1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 #include <linux/memory.h> 37 #include <linux/mm_inline.h> 38 39 #include <asm/page.h> 40 #include <asm/pgalloc.h> 41 #include <asm/tlb.h> 42 43 #include <linux/io.h> 44 #include <linux/hugetlb.h> 45 #include <linux/hugetlb_cgroup.h> 46 #include <linux/node.h> 47 #include <linux/page_owner.h> 48 #include "internal.h" 49 #include "hugetlb_vmemmap.h" 50 51 int hugetlb_max_hstate __read_mostly; 52 unsigned int default_hstate_idx; 53 struct hstate hstates[HUGE_MAX_HSTATE]; 54 55 #ifdef CONFIG_CMA 56 static struct cma *hugetlb_cma[MAX_NUMNODES]; 57 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 58 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 59 { 60 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page, 61 1 << order); 62 } 63 #else 64 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order) 65 { 66 return false; 67 } 68 #endif 69 static unsigned long hugetlb_cma_size __initdata; 70 71 __initdata LIST_HEAD(huge_boot_pages); 72 73 /* for command line parsing */ 74 static struct hstate * __initdata parsed_hstate; 75 static unsigned long __initdata default_hstate_max_huge_pages; 76 static bool __initdata parsed_valid_hugepagesz = true; 77 static bool __initdata parsed_default_hugepagesz; 78 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 79 80 /* 81 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 82 * free_huge_pages, and surplus_huge_pages. 83 */ 84 DEFINE_SPINLOCK(hugetlb_lock); 85 86 /* 87 * Serializes faults on the same logical page. This is used to 88 * prevent spurious OOMs when the hugepage pool is fully utilized. 89 */ 90 static int num_fault_mutexes; 91 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 92 93 /* Forward declaration */ 94 static int hugetlb_acct_memory(struct hstate *h, long delta); 95 static void hugetlb_vma_lock_free(struct vm_area_struct *vma); 96 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma); 97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma); 98 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 99 unsigned long start, unsigned long end); 100 101 static inline bool subpool_is_free(struct hugepage_subpool *spool) 102 { 103 if (spool->count) 104 return false; 105 if (spool->max_hpages != -1) 106 return spool->used_hpages == 0; 107 if (spool->min_hpages != -1) 108 return spool->rsv_hpages == spool->min_hpages; 109 110 return true; 111 } 112 113 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 114 unsigned long irq_flags) 115 { 116 spin_unlock_irqrestore(&spool->lock, irq_flags); 117 118 /* If no pages are used, and no other handles to the subpool 119 * remain, give up any reservations based on minimum size and 120 * free the subpool */ 121 if (subpool_is_free(spool)) { 122 if (spool->min_hpages != -1) 123 hugetlb_acct_memory(spool->hstate, 124 -spool->min_hpages); 125 kfree(spool); 126 } 127 } 128 129 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 130 long min_hpages) 131 { 132 struct hugepage_subpool *spool; 133 134 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 135 if (!spool) 136 return NULL; 137 138 spin_lock_init(&spool->lock); 139 spool->count = 1; 140 spool->max_hpages = max_hpages; 141 spool->hstate = h; 142 spool->min_hpages = min_hpages; 143 144 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 145 kfree(spool); 146 return NULL; 147 } 148 spool->rsv_hpages = min_hpages; 149 150 return spool; 151 } 152 153 void hugepage_put_subpool(struct hugepage_subpool *spool) 154 { 155 unsigned long flags; 156 157 spin_lock_irqsave(&spool->lock, flags); 158 BUG_ON(!spool->count); 159 spool->count--; 160 unlock_or_release_subpool(spool, flags); 161 } 162 163 /* 164 * Subpool accounting for allocating and reserving pages. 165 * Return -ENOMEM if there are not enough resources to satisfy the 166 * request. Otherwise, return the number of pages by which the 167 * global pools must be adjusted (upward). The returned value may 168 * only be different than the passed value (delta) in the case where 169 * a subpool minimum size must be maintained. 170 */ 171 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 172 long delta) 173 { 174 long ret = delta; 175 176 if (!spool) 177 return ret; 178 179 spin_lock_irq(&spool->lock); 180 181 if (spool->max_hpages != -1) { /* maximum size accounting */ 182 if ((spool->used_hpages + delta) <= spool->max_hpages) 183 spool->used_hpages += delta; 184 else { 185 ret = -ENOMEM; 186 goto unlock_ret; 187 } 188 } 189 190 /* minimum size accounting */ 191 if (spool->min_hpages != -1 && spool->rsv_hpages) { 192 if (delta > spool->rsv_hpages) { 193 /* 194 * Asking for more reserves than those already taken on 195 * behalf of subpool. Return difference. 196 */ 197 ret = delta - spool->rsv_hpages; 198 spool->rsv_hpages = 0; 199 } else { 200 ret = 0; /* reserves already accounted for */ 201 spool->rsv_hpages -= delta; 202 } 203 } 204 205 unlock_ret: 206 spin_unlock_irq(&spool->lock); 207 return ret; 208 } 209 210 /* 211 * Subpool accounting for freeing and unreserving pages. 212 * Return the number of global page reservations that must be dropped. 213 * The return value may only be different than the passed value (delta) 214 * in the case where a subpool minimum size must be maintained. 215 */ 216 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 217 long delta) 218 { 219 long ret = delta; 220 unsigned long flags; 221 222 if (!spool) 223 return delta; 224 225 spin_lock_irqsave(&spool->lock, flags); 226 227 if (spool->max_hpages != -1) /* maximum size accounting */ 228 spool->used_hpages -= delta; 229 230 /* minimum size accounting */ 231 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 232 if (spool->rsv_hpages + delta <= spool->min_hpages) 233 ret = 0; 234 else 235 ret = spool->rsv_hpages + delta - spool->min_hpages; 236 237 spool->rsv_hpages += delta; 238 if (spool->rsv_hpages > spool->min_hpages) 239 spool->rsv_hpages = spool->min_hpages; 240 } 241 242 /* 243 * If hugetlbfs_put_super couldn't free spool due to an outstanding 244 * quota reference, free it now. 245 */ 246 unlock_or_release_subpool(spool, flags); 247 248 return ret; 249 } 250 251 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 252 { 253 return HUGETLBFS_SB(inode->i_sb)->spool; 254 } 255 256 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 257 { 258 return subpool_inode(file_inode(vma->vm_file)); 259 } 260 261 /* 262 * hugetlb vma_lock helper routines 263 */ 264 void hugetlb_vma_lock_read(struct vm_area_struct *vma) 265 { 266 if (__vma_shareable_lock(vma)) { 267 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 268 269 down_read(&vma_lock->rw_sema); 270 } 271 } 272 273 void hugetlb_vma_unlock_read(struct vm_area_struct *vma) 274 { 275 if (__vma_shareable_lock(vma)) { 276 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 277 278 up_read(&vma_lock->rw_sema); 279 } 280 } 281 282 void hugetlb_vma_lock_write(struct vm_area_struct *vma) 283 { 284 if (__vma_shareable_lock(vma)) { 285 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 286 287 down_write(&vma_lock->rw_sema); 288 } 289 } 290 291 void hugetlb_vma_unlock_write(struct vm_area_struct *vma) 292 { 293 if (__vma_shareable_lock(vma)) { 294 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 295 296 up_write(&vma_lock->rw_sema); 297 } 298 } 299 300 int hugetlb_vma_trylock_write(struct vm_area_struct *vma) 301 { 302 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 303 304 if (!__vma_shareable_lock(vma)) 305 return 1; 306 307 return down_write_trylock(&vma_lock->rw_sema); 308 } 309 310 void hugetlb_vma_assert_locked(struct vm_area_struct *vma) 311 { 312 if (__vma_shareable_lock(vma)) { 313 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 314 315 lockdep_assert_held(&vma_lock->rw_sema); 316 } 317 } 318 319 void hugetlb_vma_lock_release(struct kref *kref) 320 { 321 struct hugetlb_vma_lock *vma_lock = container_of(kref, 322 struct hugetlb_vma_lock, refs); 323 324 kfree(vma_lock); 325 } 326 327 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 328 { 329 struct vm_area_struct *vma = vma_lock->vma; 330 331 /* 332 * vma_lock structure may or not be released as a result of put, 333 * it certainly will no longer be attached to vma so clear pointer. 334 * Semaphore synchronizes access to vma_lock->vma field. 335 */ 336 vma_lock->vma = NULL; 337 vma->vm_private_data = NULL; 338 up_write(&vma_lock->rw_sema); 339 kref_put(&vma_lock->refs, hugetlb_vma_lock_release); 340 } 341 342 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) 343 { 344 if (__vma_shareable_lock(vma)) { 345 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 346 347 __hugetlb_vma_unlock_write_put(vma_lock); 348 } 349 } 350 351 static void hugetlb_vma_lock_free(struct vm_area_struct *vma) 352 { 353 /* 354 * Only present in sharable vmas. 355 */ 356 if (!vma || !__vma_shareable_lock(vma)) 357 return; 358 359 if (vma->vm_private_data) { 360 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 361 362 down_write(&vma_lock->rw_sema); 363 __hugetlb_vma_unlock_write_put(vma_lock); 364 } 365 } 366 367 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) 368 { 369 struct hugetlb_vma_lock *vma_lock; 370 371 /* Only establish in (flags) sharable vmas */ 372 if (!vma || !(vma->vm_flags & VM_MAYSHARE)) 373 return; 374 375 /* Should never get here with non-NULL vm_private_data */ 376 if (vma->vm_private_data) 377 return; 378 379 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); 380 if (!vma_lock) { 381 /* 382 * If we can not allocate structure, then vma can not 383 * participate in pmd sharing. This is only a possible 384 * performance enhancement and memory saving issue. 385 * However, the lock is also used to synchronize page 386 * faults with truncation. If the lock is not present, 387 * unlikely races could leave pages in a file past i_size 388 * until the file is removed. Warn in the unlikely case of 389 * allocation failure. 390 */ 391 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); 392 return; 393 } 394 395 kref_init(&vma_lock->refs); 396 init_rwsem(&vma_lock->rw_sema); 397 vma_lock->vma = vma; 398 vma->vm_private_data = vma_lock; 399 } 400 401 /* Helper that removes a struct file_region from the resv_map cache and returns 402 * it for use. 403 */ 404 static struct file_region * 405 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 406 { 407 struct file_region *nrg; 408 409 VM_BUG_ON(resv->region_cache_count <= 0); 410 411 resv->region_cache_count--; 412 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 413 list_del(&nrg->link); 414 415 nrg->from = from; 416 nrg->to = to; 417 418 return nrg; 419 } 420 421 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 422 struct file_region *rg) 423 { 424 #ifdef CONFIG_CGROUP_HUGETLB 425 nrg->reservation_counter = rg->reservation_counter; 426 nrg->css = rg->css; 427 if (rg->css) 428 css_get(rg->css); 429 #endif 430 } 431 432 /* Helper that records hugetlb_cgroup uncharge info. */ 433 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 434 struct hstate *h, 435 struct resv_map *resv, 436 struct file_region *nrg) 437 { 438 #ifdef CONFIG_CGROUP_HUGETLB 439 if (h_cg) { 440 nrg->reservation_counter = 441 &h_cg->rsvd_hugepage[hstate_index(h)]; 442 nrg->css = &h_cg->css; 443 /* 444 * The caller will hold exactly one h_cg->css reference for the 445 * whole contiguous reservation region. But this area might be 446 * scattered when there are already some file_regions reside in 447 * it. As a result, many file_regions may share only one css 448 * reference. In order to ensure that one file_region must hold 449 * exactly one h_cg->css reference, we should do css_get for 450 * each file_region and leave the reference held by caller 451 * untouched. 452 */ 453 css_get(&h_cg->css); 454 if (!resv->pages_per_hpage) 455 resv->pages_per_hpage = pages_per_huge_page(h); 456 /* pages_per_hpage should be the same for all entries in 457 * a resv_map. 458 */ 459 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 460 } else { 461 nrg->reservation_counter = NULL; 462 nrg->css = NULL; 463 } 464 #endif 465 } 466 467 static void put_uncharge_info(struct file_region *rg) 468 { 469 #ifdef CONFIG_CGROUP_HUGETLB 470 if (rg->css) 471 css_put(rg->css); 472 #endif 473 } 474 475 static bool has_same_uncharge_info(struct file_region *rg, 476 struct file_region *org) 477 { 478 #ifdef CONFIG_CGROUP_HUGETLB 479 return rg->reservation_counter == org->reservation_counter && 480 rg->css == org->css; 481 482 #else 483 return true; 484 #endif 485 } 486 487 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 488 { 489 struct file_region *nrg, *prg; 490 491 prg = list_prev_entry(rg, link); 492 if (&prg->link != &resv->regions && prg->to == rg->from && 493 has_same_uncharge_info(prg, rg)) { 494 prg->to = rg->to; 495 496 list_del(&rg->link); 497 put_uncharge_info(rg); 498 kfree(rg); 499 500 rg = prg; 501 } 502 503 nrg = list_next_entry(rg, link); 504 if (&nrg->link != &resv->regions && nrg->from == rg->to && 505 has_same_uncharge_info(nrg, rg)) { 506 nrg->from = rg->from; 507 508 list_del(&rg->link); 509 put_uncharge_info(rg); 510 kfree(rg); 511 } 512 } 513 514 static inline long 515 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 516 long to, struct hstate *h, struct hugetlb_cgroup *cg, 517 long *regions_needed) 518 { 519 struct file_region *nrg; 520 521 if (!regions_needed) { 522 nrg = get_file_region_entry_from_cache(map, from, to); 523 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 524 list_add(&nrg->link, rg); 525 coalesce_file_region(map, nrg); 526 } else 527 *regions_needed += 1; 528 529 return to - from; 530 } 531 532 /* 533 * Must be called with resv->lock held. 534 * 535 * Calling this with regions_needed != NULL will count the number of pages 536 * to be added but will not modify the linked list. And regions_needed will 537 * indicate the number of file_regions needed in the cache to carry out to add 538 * the regions for this range. 539 */ 540 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 541 struct hugetlb_cgroup *h_cg, 542 struct hstate *h, long *regions_needed) 543 { 544 long add = 0; 545 struct list_head *head = &resv->regions; 546 long last_accounted_offset = f; 547 struct file_region *iter, *trg = NULL; 548 struct list_head *rg = NULL; 549 550 if (regions_needed) 551 *regions_needed = 0; 552 553 /* In this loop, we essentially handle an entry for the range 554 * [last_accounted_offset, iter->from), at every iteration, with some 555 * bounds checking. 556 */ 557 list_for_each_entry_safe(iter, trg, head, link) { 558 /* Skip irrelevant regions that start before our range. */ 559 if (iter->from < f) { 560 /* If this region ends after the last accounted offset, 561 * then we need to update last_accounted_offset. 562 */ 563 if (iter->to > last_accounted_offset) 564 last_accounted_offset = iter->to; 565 continue; 566 } 567 568 /* When we find a region that starts beyond our range, we've 569 * finished. 570 */ 571 if (iter->from >= t) { 572 rg = iter->link.prev; 573 break; 574 } 575 576 /* Add an entry for last_accounted_offset -> iter->from, and 577 * update last_accounted_offset. 578 */ 579 if (iter->from > last_accounted_offset) 580 add += hugetlb_resv_map_add(resv, iter->link.prev, 581 last_accounted_offset, 582 iter->from, h, h_cg, 583 regions_needed); 584 585 last_accounted_offset = iter->to; 586 } 587 588 /* Handle the case where our range extends beyond 589 * last_accounted_offset. 590 */ 591 if (!rg) 592 rg = head->prev; 593 if (last_accounted_offset < t) 594 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 595 t, h, h_cg, regions_needed); 596 597 return add; 598 } 599 600 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 601 */ 602 static int allocate_file_region_entries(struct resv_map *resv, 603 int regions_needed) 604 __must_hold(&resv->lock) 605 { 606 LIST_HEAD(allocated_regions); 607 int to_allocate = 0, i = 0; 608 struct file_region *trg = NULL, *rg = NULL; 609 610 VM_BUG_ON(regions_needed < 0); 611 612 /* 613 * Check for sufficient descriptors in the cache to accommodate 614 * the number of in progress add operations plus regions_needed. 615 * 616 * This is a while loop because when we drop the lock, some other call 617 * to region_add or region_del may have consumed some region_entries, 618 * so we keep looping here until we finally have enough entries for 619 * (adds_in_progress + regions_needed). 620 */ 621 while (resv->region_cache_count < 622 (resv->adds_in_progress + regions_needed)) { 623 to_allocate = resv->adds_in_progress + regions_needed - 624 resv->region_cache_count; 625 626 /* At this point, we should have enough entries in the cache 627 * for all the existing adds_in_progress. We should only be 628 * needing to allocate for regions_needed. 629 */ 630 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 631 632 spin_unlock(&resv->lock); 633 for (i = 0; i < to_allocate; i++) { 634 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 635 if (!trg) 636 goto out_of_memory; 637 list_add(&trg->link, &allocated_regions); 638 } 639 640 spin_lock(&resv->lock); 641 642 list_splice(&allocated_regions, &resv->region_cache); 643 resv->region_cache_count += to_allocate; 644 } 645 646 return 0; 647 648 out_of_memory: 649 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 650 list_del(&rg->link); 651 kfree(rg); 652 } 653 return -ENOMEM; 654 } 655 656 /* 657 * Add the huge page range represented by [f, t) to the reserve 658 * map. Regions will be taken from the cache to fill in this range. 659 * Sufficient regions should exist in the cache due to the previous 660 * call to region_chg with the same range, but in some cases the cache will not 661 * have sufficient entries due to races with other code doing region_add or 662 * region_del. The extra needed entries will be allocated. 663 * 664 * regions_needed is the out value provided by a previous call to region_chg. 665 * 666 * Return the number of new huge pages added to the map. This number is greater 667 * than or equal to zero. If file_region entries needed to be allocated for 668 * this operation and we were not able to allocate, it returns -ENOMEM. 669 * region_add of regions of length 1 never allocate file_regions and cannot 670 * fail; region_chg will always allocate at least 1 entry and a region_add for 671 * 1 page will only require at most 1 entry. 672 */ 673 static long region_add(struct resv_map *resv, long f, long t, 674 long in_regions_needed, struct hstate *h, 675 struct hugetlb_cgroup *h_cg) 676 { 677 long add = 0, actual_regions_needed = 0; 678 679 spin_lock(&resv->lock); 680 retry: 681 682 /* Count how many regions are actually needed to execute this add. */ 683 add_reservation_in_range(resv, f, t, NULL, NULL, 684 &actual_regions_needed); 685 686 /* 687 * Check for sufficient descriptors in the cache to accommodate 688 * this add operation. Note that actual_regions_needed may be greater 689 * than in_regions_needed, as the resv_map may have been modified since 690 * the region_chg call. In this case, we need to make sure that we 691 * allocate extra entries, such that we have enough for all the 692 * existing adds_in_progress, plus the excess needed for this 693 * operation. 694 */ 695 if (actual_regions_needed > in_regions_needed && 696 resv->region_cache_count < 697 resv->adds_in_progress + 698 (actual_regions_needed - in_regions_needed)) { 699 /* region_add operation of range 1 should never need to 700 * allocate file_region entries. 701 */ 702 VM_BUG_ON(t - f <= 1); 703 704 if (allocate_file_region_entries( 705 resv, actual_regions_needed - in_regions_needed)) { 706 return -ENOMEM; 707 } 708 709 goto retry; 710 } 711 712 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 713 714 resv->adds_in_progress -= in_regions_needed; 715 716 spin_unlock(&resv->lock); 717 return add; 718 } 719 720 /* 721 * Examine the existing reserve map and determine how many 722 * huge pages in the specified range [f, t) are NOT currently 723 * represented. This routine is called before a subsequent 724 * call to region_add that will actually modify the reserve 725 * map to add the specified range [f, t). region_chg does 726 * not change the number of huge pages represented by the 727 * map. A number of new file_region structures is added to the cache as a 728 * placeholder, for the subsequent region_add call to use. At least 1 729 * file_region structure is added. 730 * 731 * out_regions_needed is the number of regions added to the 732 * resv->adds_in_progress. This value needs to be provided to a follow up call 733 * to region_add or region_abort for proper accounting. 734 * 735 * Returns the number of huge pages that need to be added to the existing 736 * reservation map for the range [f, t). This number is greater or equal to 737 * zero. -ENOMEM is returned if a new file_region structure or cache entry 738 * is needed and can not be allocated. 739 */ 740 static long region_chg(struct resv_map *resv, long f, long t, 741 long *out_regions_needed) 742 { 743 long chg = 0; 744 745 spin_lock(&resv->lock); 746 747 /* Count how many hugepages in this range are NOT represented. */ 748 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 749 out_regions_needed); 750 751 if (*out_regions_needed == 0) 752 *out_regions_needed = 1; 753 754 if (allocate_file_region_entries(resv, *out_regions_needed)) 755 return -ENOMEM; 756 757 resv->adds_in_progress += *out_regions_needed; 758 759 spin_unlock(&resv->lock); 760 return chg; 761 } 762 763 /* 764 * Abort the in progress add operation. The adds_in_progress field 765 * of the resv_map keeps track of the operations in progress between 766 * calls to region_chg and region_add. Operations are sometimes 767 * aborted after the call to region_chg. In such cases, region_abort 768 * is called to decrement the adds_in_progress counter. regions_needed 769 * is the value returned by the region_chg call, it is used to decrement 770 * the adds_in_progress counter. 771 * 772 * NOTE: The range arguments [f, t) are not needed or used in this 773 * routine. They are kept to make reading the calling code easier as 774 * arguments will match the associated region_chg call. 775 */ 776 static void region_abort(struct resv_map *resv, long f, long t, 777 long regions_needed) 778 { 779 spin_lock(&resv->lock); 780 VM_BUG_ON(!resv->region_cache_count); 781 resv->adds_in_progress -= regions_needed; 782 spin_unlock(&resv->lock); 783 } 784 785 /* 786 * Delete the specified range [f, t) from the reserve map. If the 787 * t parameter is LONG_MAX, this indicates that ALL regions after f 788 * should be deleted. Locate the regions which intersect [f, t) 789 * and either trim, delete or split the existing regions. 790 * 791 * Returns the number of huge pages deleted from the reserve map. 792 * In the normal case, the return value is zero or more. In the 793 * case where a region must be split, a new region descriptor must 794 * be allocated. If the allocation fails, -ENOMEM will be returned. 795 * NOTE: If the parameter t == LONG_MAX, then we will never split 796 * a region and possibly return -ENOMEM. Callers specifying 797 * t == LONG_MAX do not need to check for -ENOMEM error. 798 */ 799 static long region_del(struct resv_map *resv, long f, long t) 800 { 801 struct list_head *head = &resv->regions; 802 struct file_region *rg, *trg; 803 struct file_region *nrg = NULL; 804 long del = 0; 805 806 retry: 807 spin_lock(&resv->lock); 808 list_for_each_entry_safe(rg, trg, head, link) { 809 /* 810 * Skip regions before the range to be deleted. file_region 811 * ranges are normally of the form [from, to). However, there 812 * may be a "placeholder" entry in the map which is of the form 813 * (from, to) with from == to. Check for placeholder entries 814 * at the beginning of the range to be deleted. 815 */ 816 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 817 continue; 818 819 if (rg->from >= t) 820 break; 821 822 if (f > rg->from && t < rg->to) { /* Must split region */ 823 /* 824 * Check for an entry in the cache before dropping 825 * lock and attempting allocation. 826 */ 827 if (!nrg && 828 resv->region_cache_count > resv->adds_in_progress) { 829 nrg = list_first_entry(&resv->region_cache, 830 struct file_region, 831 link); 832 list_del(&nrg->link); 833 resv->region_cache_count--; 834 } 835 836 if (!nrg) { 837 spin_unlock(&resv->lock); 838 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 839 if (!nrg) 840 return -ENOMEM; 841 goto retry; 842 } 843 844 del += t - f; 845 hugetlb_cgroup_uncharge_file_region( 846 resv, rg, t - f, false); 847 848 /* New entry for end of split region */ 849 nrg->from = t; 850 nrg->to = rg->to; 851 852 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 853 854 INIT_LIST_HEAD(&nrg->link); 855 856 /* Original entry is trimmed */ 857 rg->to = f; 858 859 list_add(&nrg->link, &rg->link); 860 nrg = NULL; 861 break; 862 } 863 864 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 865 del += rg->to - rg->from; 866 hugetlb_cgroup_uncharge_file_region(resv, rg, 867 rg->to - rg->from, true); 868 list_del(&rg->link); 869 kfree(rg); 870 continue; 871 } 872 873 if (f <= rg->from) { /* Trim beginning of region */ 874 hugetlb_cgroup_uncharge_file_region(resv, rg, 875 t - rg->from, false); 876 877 del += t - rg->from; 878 rg->from = t; 879 } else { /* Trim end of region */ 880 hugetlb_cgroup_uncharge_file_region(resv, rg, 881 rg->to - f, false); 882 883 del += rg->to - f; 884 rg->to = f; 885 } 886 } 887 888 spin_unlock(&resv->lock); 889 kfree(nrg); 890 return del; 891 } 892 893 /* 894 * A rare out of memory error was encountered which prevented removal of 895 * the reserve map region for a page. The huge page itself was free'ed 896 * and removed from the page cache. This routine will adjust the subpool 897 * usage count, and the global reserve count if needed. By incrementing 898 * these counts, the reserve map entry which could not be deleted will 899 * appear as a "reserved" entry instead of simply dangling with incorrect 900 * counts. 901 */ 902 void hugetlb_fix_reserve_counts(struct inode *inode) 903 { 904 struct hugepage_subpool *spool = subpool_inode(inode); 905 long rsv_adjust; 906 bool reserved = false; 907 908 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 909 if (rsv_adjust > 0) { 910 struct hstate *h = hstate_inode(inode); 911 912 if (!hugetlb_acct_memory(h, 1)) 913 reserved = true; 914 } else if (!rsv_adjust) { 915 reserved = true; 916 } 917 918 if (!reserved) 919 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 920 } 921 922 /* 923 * Count and return the number of huge pages in the reserve map 924 * that intersect with the range [f, t). 925 */ 926 static long region_count(struct resv_map *resv, long f, long t) 927 { 928 struct list_head *head = &resv->regions; 929 struct file_region *rg; 930 long chg = 0; 931 932 spin_lock(&resv->lock); 933 /* Locate each segment we overlap with, and count that overlap. */ 934 list_for_each_entry(rg, head, link) { 935 long seg_from; 936 long seg_to; 937 938 if (rg->to <= f) 939 continue; 940 if (rg->from >= t) 941 break; 942 943 seg_from = max(rg->from, f); 944 seg_to = min(rg->to, t); 945 946 chg += seg_to - seg_from; 947 } 948 spin_unlock(&resv->lock); 949 950 return chg; 951 } 952 953 /* 954 * Convert the address within this vma to the page offset within 955 * the mapping, in pagecache page units; huge pages here. 956 */ 957 static pgoff_t vma_hugecache_offset(struct hstate *h, 958 struct vm_area_struct *vma, unsigned long address) 959 { 960 return ((address - vma->vm_start) >> huge_page_shift(h)) + 961 (vma->vm_pgoff >> huge_page_order(h)); 962 } 963 964 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 965 unsigned long address) 966 { 967 return vma_hugecache_offset(hstate_vma(vma), vma, address); 968 } 969 EXPORT_SYMBOL_GPL(linear_hugepage_index); 970 971 /* 972 * Return the size of the pages allocated when backing a VMA. In the majority 973 * cases this will be same size as used by the page table entries. 974 */ 975 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 976 { 977 if (vma->vm_ops && vma->vm_ops->pagesize) 978 return vma->vm_ops->pagesize(vma); 979 return PAGE_SIZE; 980 } 981 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 982 983 /* 984 * Return the page size being used by the MMU to back a VMA. In the majority 985 * of cases, the page size used by the kernel matches the MMU size. On 986 * architectures where it differs, an architecture-specific 'strong' 987 * version of this symbol is required. 988 */ 989 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 990 { 991 return vma_kernel_pagesize(vma); 992 } 993 994 /* 995 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 996 * bits of the reservation map pointer, which are always clear due to 997 * alignment. 998 */ 999 #define HPAGE_RESV_OWNER (1UL << 0) 1000 #define HPAGE_RESV_UNMAPPED (1UL << 1) 1001 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 1002 1003 /* 1004 * These helpers are used to track how many pages are reserved for 1005 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 1006 * is guaranteed to have their future faults succeed. 1007 * 1008 * With the exception of hugetlb_dup_vma_private() which is called at fork(), 1009 * the reserve counters are updated with the hugetlb_lock held. It is safe 1010 * to reset the VMA at fork() time as it is not in use yet and there is no 1011 * chance of the global counters getting corrupted as a result of the values. 1012 * 1013 * The private mapping reservation is represented in a subtly different 1014 * manner to a shared mapping. A shared mapping has a region map associated 1015 * with the underlying file, this region map represents the backing file 1016 * pages which have ever had a reservation assigned which this persists even 1017 * after the page is instantiated. A private mapping has a region map 1018 * associated with the original mmap which is attached to all VMAs which 1019 * reference it, this region map represents those offsets which have consumed 1020 * reservation ie. where pages have been instantiated. 1021 */ 1022 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 1023 { 1024 return (unsigned long)vma->vm_private_data; 1025 } 1026 1027 static void set_vma_private_data(struct vm_area_struct *vma, 1028 unsigned long value) 1029 { 1030 vma->vm_private_data = (void *)value; 1031 } 1032 1033 static void 1034 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 1035 struct hugetlb_cgroup *h_cg, 1036 struct hstate *h) 1037 { 1038 #ifdef CONFIG_CGROUP_HUGETLB 1039 if (!h_cg || !h) { 1040 resv_map->reservation_counter = NULL; 1041 resv_map->pages_per_hpage = 0; 1042 resv_map->css = NULL; 1043 } else { 1044 resv_map->reservation_counter = 1045 &h_cg->rsvd_hugepage[hstate_index(h)]; 1046 resv_map->pages_per_hpage = pages_per_huge_page(h); 1047 resv_map->css = &h_cg->css; 1048 } 1049 #endif 1050 } 1051 1052 struct resv_map *resv_map_alloc(void) 1053 { 1054 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 1055 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 1056 1057 if (!resv_map || !rg) { 1058 kfree(resv_map); 1059 kfree(rg); 1060 return NULL; 1061 } 1062 1063 kref_init(&resv_map->refs); 1064 spin_lock_init(&resv_map->lock); 1065 INIT_LIST_HEAD(&resv_map->regions); 1066 1067 resv_map->adds_in_progress = 0; 1068 /* 1069 * Initialize these to 0. On shared mappings, 0's here indicate these 1070 * fields don't do cgroup accounting. On private mappings, these will be 1071 * re-initialized to the proper values, to indicate that hugetlb cgroup 1072 * reservations are to be un-charged from here. 1073 */ 1074 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 1075 1076 INIT_LIST_HEAD(&resv_map->region_cache); 1077 list_add(&rg->link, &resv_map->region_cache); 1078 resv_map->region_cache_count = 1; 1079 1080 return resv_map; 1081 } 1082 1083 void resv_map_release(struct kref *ref) 1084 { 1085 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 1086 struct list_head *head = &resv_map->region_cache; 1087 struct file_region *rg, *trg; 1088 1089 /* Clear out any active regions before we release the map. */ 1090 region_del(resv_map, 0, LONG_MAX); 1091 1092 /* ... and any entries left in the cache */ 1093 list_for_each_entry_safe(rg, trg, head, link) { 1094 list_del(&rg->link); 1095 kfree(rg); 1096 } 1097 1098 VM_BUG_ON(resv_map->adds_in_progress); 1099 1100 kfree(resv_map); 1101 } 1102 1103 static inline struct resv_map *inode_resv_map(struct inode *inode) 1104 { 1105 /* 1106 * At inode evict time, i_mapping may not point to the original 1107 * address space within the inode. This original address space 1108 * contains the pointer to the resv_map. So, always use the 1109 * address space embedded within the inode. 1110 * The VERY common case is inode->mapping == &inode->i_data but, 1111 * this may not be true for device special inodes. 1112 */ 1113 return (struct resv_map *)(&inode->i_data)->private_data; 1114 } 1115 1116 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 1117 { 1118 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1119 if (vma->vm_flags & VM_MAYSHARE) { 1120 struct address_space *mapping = vma->vm_file->f_mapping; 1121 struct inode *inode = mapping->host; 1122 1123 return inode_resv_map(inode); 1124 1125 } else { 1126 return (struct resv_map *)(get_vma_private_data(vma) & 1127 ~HPAGE_RESV_MASK); 1128 } 1129 } 1130 1131 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 1132 { 1133 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1134 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1135 1136 set_vma_private_data(vma, (get_vma_private_data(vma) & 1137 HPAGE_RESV_MASK) | (unsigned long)map); 1138 } 1139 1140 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 1141 { 1142 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1143 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 1144 1145 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1146 } 1147 1148 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1149 { 1150 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1151 1152 return (get_vma_private_data(vma) & flag) != 0; 1153 } 1154 1155 void hugetlb_dup_vma_private(struct vm_area_struct *vma) 1156 { 1157 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1158 /* 1159 * Clear vm_private_data 1160 * - For shared mappings this is a per-vma semaphore that may be 1161 * allocated in a subsequent call to hugetlb_vm_op_open. 1162 * Before clearing, make sure pointer is not associated with vma 1163 * as this will leak the structure. This is the case when called 1164 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already 1165 * been called to allocate a new structure. 1166 * - For MAP_PRIVATE mappings, this is the reserve map which does 1167 * not apply to children. Faults generated by the children are 1168 * not guaranteed to succeed, even if read-only. 1169 */ 1170 if (vma->vm_flags & VM_MAYSHARE) { 1171 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 1172 1173 if (vma_lock && vma_lock->vma != vma) 1174 vma->vm_private_data = NULL; 1175 } else 1176 vma->vm_private_data = NULL; 1177 } 1178 1179 /* 1180 * Reset and decrement one ref on hugepage private reservation. 1181 * Called with mm->mmap_lock writer semaphore held. 1182 * This function should be only used by move_vma() and operate on 1183 * same sized vma. It should never come here with last ref on the 1184 * reservation. 1185 */ 1186 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1187 { 1188 /* 1189 * Clear the old hugetlb private page reservation. 1190 * It has already been transferred to new_vma. 1191 * 1192 * During a mremap() operation of a hugetlb vma we call move_vma() 1193 * which copies vma into new_vma and unmaps vma. After the copy 1194 * operation both new_vma and vma share a reference to the resv_map 1195 * struct, and at that point vma is about to be unmapped. We don't 1196 * want to return the reservation to the pool at unmap of vma because 1197 * the reservation still lives on in new_vma, so simply decrement the 1198 * ref here and remove the resv_map reference from this vma. 1199 */ 1200 struct resv_map *reservations = vma_resv_map(vma); 1201 1202 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1203 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1204 kref_put(&reservations->refs, resv_map_release); 1205 } 1206 1207 hugetlb_dup_vma_private(vma); 1208 } 1209 1210 /* Returns true if the VMA has associated reserve pages */ 1211 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1212 { 1213 if (vma->vm_flags & VM_NORESERVE) { 1214 /* 1215 * This address is already reserved by other process(chg == 0), 1216 * so, we should decrement reserved count. Without decrementing, 1217 * reserve count remains after releasing inode, because this 1218 * allocated page will go into page cache and is regarded as 1219 * coming from reserved pool in releasing step. Currently, we 1220 * don't have any other solution to deal with this situation 1221 * properly, so add work-around here. 1222 */ 1223 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1224 return true; 1225 else 1226 return false; 1227 } 1228 1229 /* Shared mappings always use reserves */ 1230 if (vma->vm_flags & VM_MAYSHARE) { 1231 /* 1232 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1233 * be a region map for all pages. The only situation where 1234 * there is no region map is if a hole was punched via 1235 * fallocate. In this case, there really are no reserves to 1236 * use. This situation is indicated if chg != 0. 1237 */ 1238 if (chg) 1239 return false; 1240 else 1241 return true; 1242 } 1243 1244 /* 1245 * Only the process that called mmap() has reserves for 1246 * private mappings. 1247 */ 1248 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1249 /* 1250 * Like the shared case above, a hole punch or truncate 1251 * could have been performed on the private mapping. 1252 * Examine the value of chg to determine if reserves 1253 * actually exist or were previously consumed. 1254 * Very Subtle - The value of chg comes from a previous 1255 * call to vma_needs_reserves(). The reserve map for 1256 * private mappings has different (opposite) semantics 1257 * than that of shared mappings. vma_needs_reserves() 1258 * has already taken this difference in semantics into 1259 * account. Therefore, the meaning of chg is the same 1260 * as in the shared case above. Code could easily be 1261 * combined, but keeping it separate draws attention to 1262 * subtle differences. 1263 */ 1264 if (chg) 1265 return false; 1266 else 1267 return true; 1268 } 1269 1270 return false; 1271 } 1272 1273 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) 1274 { 1275 int nid = folio_nid(folio); 1276 1277 lockdep_assert_held(&hugetlb_lock); 1278 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1279 1280 list_move(&folio->lru, &h->hugepage_freelists[nid]); 1281 h->free_huge_pages++; 1282 h->free_huge_pages_node[nid]++; 1283 folio_set_hugetlb_freed(folio); 1284 } 1285 1286 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, 1287 int nid) 1288 { 1289 struct folio *folio; 1290 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1291 1292 lockdep_assert_held(&hugetlb_lock); 1293 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { 1294 if (pin && !folio_is_longterm_pinnable(folio)) 1295 continue; 1296 1297 if (folio_test_hwpoison(folio)) 1298 continue; 1299 1300 list_move(&folio->lru, &h->hugepage_activelist); 1301 folio_ref_unfreeze(folio, 1); 1302 folio_clear_hugetlb_freed(folio); 1303 h->free_huge_pages--; 1304 h->free_huge_pages_node[nid]--; 1305 return folio; 1306 } 1307 1308 return NULL; 1309 } 1310 1311 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, 1312 int nid, nodemask_t *nmask) 1313 { 1314 unsigned int cpuset_mems_cookie; 1315 struct zonelist *zonelist; 1316 struct zone *zone; 1317 struct zoneref *z; 1318 int node = NUMA_NO_NODE; 1319 1320 zonelist = node_zonelist(nid, gfp_mask); 1321 1322 retry_cpuset: 1323 cpuset_mems_cookie = read_mems_allowed_begin(); 1324 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1325 struct folio *folio; 1326 1327 if (!cpuset_zone_allowed(zone, gfp_mask)) 1328 continue; 1329 /* 1330 * no need to ask again on the same node. Pool is node rather than 1331 * zone aware 1332 */ 1333 if (zone_to_nid(zone) == node) 1334 continue; 1335 node = zone_to_nid(zone); 1336 1337 folio = dequeue_hugetlb_folio_node_exact(h, node); 1338 if (folio) 1339 return folio; 1340 } 1341 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1342 goto retry_cpuset; 1343 1344 return NULL; 1345 } 1346 1347 static unsigned long available_huge_pages(struct hstate *h) 1348 { 1349 return h->free_huge_pages - h->resv_huge_pages; 1350 } 1351 1352 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, 1353 struct vm_area_struct *vma, 1354 unsigned long address, int avoid_reserve, 1355 long chg) 1356 { 1357 struct folio *folio = NULL; 1358 struct mempolicy *mpol; 1359 gfp_t gfp_mask; 1360 nodemask_t *nodemask; 1361 int nid; 1362 1363 /* 1364 * A child process with MAP_PRIVATE mappings created by their parent 1365 * have no page reserves. This check ensures that reservations are 1366 * not "stolen". The child may still get SIGKILLed 1367 */ 1368 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) 1369 goto err; 1370 1371 /* If reserves cannot be used, ensure enough pages are in the pool */ 1372 if (avoid_reserve && !available_huge_pages(h)) 1373 goto err; 1374 1375 gfp_mask = htlb_alloc_mask(h); 1376 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1377 1378 if (mpol_is_preferred_many(mpol)) { 1379 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1380 nid, nodemask); 1381 1382 /* Fallback to all nodes if page==NULL */ 1383 nodemask = NULL; 1384 } 1385 1386 if (!folio) 1387 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 1388 nid, nodemask); 1389 1390 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { 1391 folio_set_hugetlb_restore_reserve(folio); 1392 h->resv_huge_pages--; 1393 } 1394 1395 mpol_cond_put(mpol); 1396 return folio; 1397 1398 err: 1399 return NULL; 1400 } 1401 1402 /* 1403 * common helper functions for hstate_next_node_to_{alloc|free}. 1404 * We may have allocated or freed a huge page based on a different 1405 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1406 * be outside of *nodes_allowed. Ensure that we use an allowed 1407 * node for alloc or free. 1408 */ 1409 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1410 { 1411 nid = next_node_in(nid, *nodes_allowed); 1412 VM_BUG_ON(nid >= MAX_NUMNODES); 1413 1414 return nid; 1415 } 1416 1417 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1418 { 1419 if (!node_isset(nid, *nodes_allowed)) 1420 nid = next_node_allowed(nid, nodes_allowed); 1421 return nid; 1422 } 1423 1424 /* 1425 * returns the previously saved node ["this node"] from which to 1426 * allocate a persistent huge page for the pool and advance the 1427 * next node from which to allocate, handling wrap at end of node 1428 * mask. 1429 */ 1430 static int hstate_next_node_to_alloc(struct hstate *h, 1431 nodemask_t *nodes_allowed) 1432 { 1433 int nid; 1434 1435 VM_BUG_ON(!nodes_allowed); 1436 1437 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1438 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1439 1440 return nid; 1441 } 1442 1443 /* 1444 * helper for remove_pool_huge_page() - return the previously saved 1445 * node ["this node"] from which to free a huge page. Advance the 1446 * next node id whether or not we find a free huge page to free so 1447 * that the next attempt to free addresses the next node. 1448 */ 1449 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1450 { 1451 int nid; 1452 1453 VM_BUG_ON(!nodes_allowed); 1454 1455 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1456 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1457 1458 return nid; 1459 } 1460 1461 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1462 for (nr_nodes = nodes_weight(*mask); \ 1463 nr_nodes > 0 && \ 1464 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1465 nr_nodes--) 1466 1467 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1468 for (nr_nodes = nodes_weight(*mask); \ 1469 nr_nodes > 0 && \ 1470 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1471 nr_nodes--) 1472 1473 /* used to demote non-gigantic_huge pages as well */ 1474 static void __destroy_compound_gigantic_folio(struct folio *folio, 1475 unsigned int order, bool demote) 1476 { 1477 int i; 1478 int nr_pages = 1 << order; 1479 struct page *p; 1480 1481 atomic_set(&folio->_entire_mapcount, 0); 1482 atomic_set(&folio->_nr_pages_mapped, 0); 1483 atomic_set(&folio->_pincount, 0); 1484 1485 for (i = 1; i < nr_pages; i++) { 1486 p = folio_page(folio, i); 1487 p->mapping = NULL; 1488 clear_compound_head(p); 1489 if (!demote) 1490 set_page_refcounted(p); 1491 } 1492 1493 __folio_clear_head(folio); 1494 } 1495 1496 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio, 1497 unsigned int order) 1498 { 1499 __destroy_compound_gigantic_folio(folio, order, true); 1500 } 1501 1502 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1503 static void destroy_compound_gigantic_folio(struct folio *folio, 1504 unsigned int order) 1505 { 1506 __destroy_compound_gigantic_folio(folio, order, false); 1507 } 1508 1509 static void free_gigantic_folio(struct folio *folio, unsigned int order) 1510 { 1511 /* 1512 * If the page isn't allocated using the cma allocator, 1513 * cma_release() returns false. 1514 */ 1515 #ifdef CONFIG_CMA 1516 int nid = folio_nid(folio); 1517 1518 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order)) 1519 return; 1520 #endif 1521 1522 free_contig_range(folio_pfn(folio), 1 << order); 1523 } 1524 1525 #ifdef CONFIG_CONTIG_ALLOC 1526 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1527 int nid, nodemask_t *nodemask) 1528 { 1529 struct page *page; 1530 unsigned long nr_pages = pages_per_huge_page(h); 1531 if (nid == NUMA_NO_NODE) 1532 nid = numa_mem_id(); 1533 1534 #ifdef CONFIG_CMA 1535 { 1536 int node; 1537 1538 if (hugetlb_cma[nid]) { 1539 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1540 huge_page_order(h), true); 1541 if (page) 1542 return page_folio(page); 1543 } 1544 1545 if (!(gfp_mask & __GFP_THISNODE)) { 1546 for_each_node_mask(node, *nodemask) { 1547 if (node == nid || !hugetlb_cma[node]) 1548 continue; 1549 1550 page = cma_alloc(hugetlb_cma[node], nr_pages, 1551 huge_page_order(h), true); 1552 if (page) 1553 return page_folio(page); 1554 } 1555 } 1556 } 1557 #endif 1558 1559 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1560 return page ? page_folio(page) : NULL; 1561 } 1562 1563 #else /* !CONFIG_CONTIG_ALLOC */ 1564 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1565 int nid, nodemask_t *nodemask) 1566 { 1567 return NULL; 1568 } 1569 #endif /* CONFIG_CONTIG_ALLOC */ 1570 1571 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1572 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, 1573 int nid, nodemask_t *nodemask) 1574 { 1575 return NULL; 1576 } 1577 static inline void free_gigantic_folio(struct folio *folio, 1578 unsigned int order) { } 1579 static inline void destroy_compound_gigantic_folio(struct folio *folio, 1580 unsigned int order) { } 1581 #endif 1582 1583 static inline void __clear_hugetlb_destructor(struct hstate *h, 1584 struct folio *folio) 1585 { 1586 lockdep_assert_held(&hugetlb_lock); 1587 1588 /* 1589 * Very subtle 1590 * 1591 * For non-gigantic pages set the destructor to the normal compound 1592 * page dtor. This is needed in case someone takes an additional 1593 * temporary ref to the page, and freeing is delayed until they drop 1594 * their reference. 1595 * 1596 * For gigantic pages set the destructor to the null dtor. This 1597 * destructor will never be called. Before freeing the gigantic 1598 * page destroy_compound_gigantic_folio will turn the folio into a 1599 * simple group of pages. After this the destructor does not 1600 * apply. 1601 * 1602 */ 1603 if (hstate_is_gigantic(h)) 1604 folio_set_compound_dtor(folio, NULL_COMPOUND_DTOR); 1605 else 1606 folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); 1607 } 1608 1609 /* 1610 * Remove hugetlb folio from lists. 1611 * If vmemmap exists for the folio, update dtor so that the folio appears 1612 * as just a compound page. Otherwise, wait until after allocating vmemmap 1613 * to update dtor. 1614 * 1615 * A reference is held on the folio, except in the case of demote. 1616 * 1617 * Must be called with hugetlb lock held. 1618 */ 1619 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1620 bool adjust_surplus, 1621 bool demote) 1622 { 1623 int nid = folio_nid(folio); 1624 1625 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio); 1626 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio); 1627 1628 lockdep_assert_held(&hugetlb_lock); 1629 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1630 return; 1631 1632 list_del(&folio->lru); 1633 1634 if (folio_test_hugetlb_freed(folio)) { 1635 h->free_huge_pages--; 1636 h->free_huge_pages_node[nid]--; 1637 } 1638 if (adjust_surplus) { 1639 h->surplus_huge_pages--; 1640 h->surplus_huge_pages_node[nid]--; 1641 } 1642 1643 /* 1644 * We can only clear the hugetlb destructor after allocating vmemmap 1645 * pages. Otherwise, someone (memory error handling) may try to write 1646 * to tail struct pages. 1647 */ 1648 if (!folio_test_hugetlb_vmemmap_optimized(folio)) 1649 __clear_hugetlb_destructor(h, folio); 1650 1651 /* 1652 * In the case of demote we do not ref count the page as it will soon 1653 * be turned into a page of smaller size. 1654 */ 1655 if (!demote) 1656 folio_ref_unfreeze(folio, 1); 1657 1658 h->nr_huge_pages--; 1659 h->nr_huge_pages_node[nid]--; 1660 } 1661 1662 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, 1663 bool adjust_surplus) 1664 { 1665 __remove_hugetlb_folio(h, folio, adjust_surplus, false); 1666 } 1667 1668 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, 1669 bool adjust_surplus) 1670 { 1671 __remove_hugetlb_folio(h, folio, adjust_surplus, true); 1672 } 1673 1674 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, 1675 bool adjust_surplus) 1676 { 1677 int zeroed; 1678 int nid = folio_nid(folio); 1679 1680 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio); 1681 1682 lockdep_assert_held(&hugetlb_lock); 1683 1684 INIT_LIST_HEAD(&folio->lru); 1685 h->nr_huge_pages++; 1686 h->nr_huge_pages_node[nid]++; 1687 1688 if (adjust_surplus) { 1689 h->surplus_huge_pages++; 1690 h->surplus_huge_pages_node[nid]++; 1691 } 1692 1693 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1694 folio_change_private(folio, NULL); 1695 /* 1696 * We have to set hugetlb_vmemmap_optimized again as above 1697 * folio_change_private(folio, NULL) cleared it. 1698 */ 1699 folio_set_hugetlb_vmemmap_optimized(folio); 1700 1701 /* 1702 * This folio is about to be managed by the hugetlb allocator and 1703 * should have no users. Drop our reference, and check for others 1704 * just in case. 1705 */ 1706 zeroed = folio_put_testzero(folio); 1707 if (unlikely(!zeroed)) 1708 /* 1709 * It is VERY unlikely soneone else has taken a ref 1710 * on the folio. In this case, we simply return as 1711 * free_huge_folio() will be called when this other ref 1712 * is dropped. 1713 */ 1714 return; 1715 1716 arch_clear_hugepage_flags(&folio->page); 1717 enqueue_hugetlb_folio(h, folio); 1718 } 1719 1720 static void __update_and_free_hugetlb_folio(struct hstate *h, 1721 struct folio *folio) 1722 { 1723 int i; 1724 struct page *subpage; 1725 bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio); 1726 1727 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1728 return; 1729 1730 /* 1731 * If we don't know which subpages are hwpoisoned, we can't free 1732 * the hugepage, so it's leaked intentionally. 1733 */ 1734 if (folio_test_hugetlb_raw_hwp_unreliable(folio)) 1735 return; 1736 1737 if (hugetlb_vmemmap_restore(h, &folio->page)) { 1738 spin_lock_irq(&hugetlb_lock); 1739 /* 1740 * If we cannot allocate vmemmap pages, just refuse to free the 1741 * page and put the page back on the hugetlb free list and treat 1742 * as a surplus page. 1743 */ 1744 add_hugetlb_folio(h, folio, true); 1745 spin_unlock_irq(&hugetlb_lock); 1746 return; 1747 } 1748 1749 /* 1750 * Move PageHWPoison flag from head page to the raw error pages, 1751 * which makes any healthy subpages reusable. 1752 */ 1753 if (unlikely(folio_test_hwpoison(folio))) 1754 folio_clear_hugetlb_hwpoison(folio); 1755 1756 /* 1757 * If vmemmap pages were allocated above, then we need to clear the 1758 * hugetlb destructor under the hugetlb lock. 1759 */ 1760 if (clear_dtor) { 1761 spin_lock_irq(&hugetlb_lock); 1762 __clear_hugetlb_destructor(h, folio); 1763 spin_unlock_irq(&hugetlb_lock); 1764 } 1765 1766 for (i = 0; i < pages_per_huge_page(h); i++) { 1767 subpage = folio_page(folio, i); 1768 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1769 1 << PG_referenced | 1 << PG_dirty | 1770 1 << PG_active | 1 << PG_private | 1771 1 << PG_writeback); 1772 } 1773 1774 /* 1775 * Non-gigantic pages demoted from CMA allocated gigantic pages 1776 * need to be given back to CMA in free_gigantic_folio. 1777 */ 1778 if (hstate_is_gigantic(h) || 1779 hugetlb_cma_folio(folio, huge_page_order(h))) { 1780 destroy_compound_gigantic_folio(folio, huge_page_order(h)); 1781 free_gigantic_folio(folio, huge_page_order(h)); 1782 } else { 1783 __free_pages(&folio->page, huge_page_order(h)); 1784 } 1785 } 1786 1787 /* 1788 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot 1789 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1790 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1791 * the vmemmap pages. 1792 * 1793 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1794 * freed and frees them one-by-one. As the page->mapping pointer is going 1795 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1796 * structure of a lockless linked list of huge pages to be freed. 1797 */ 1798 static LLIST_HEAD(hpage_freelist); 1799 1800 static void free_hpage_workfn(struct work_struct *work) 1801 { 1802 struct llist_node *node; 1803 1804 node = llist_del_all(&hpage_freelist); 1805 1806 while (node) { 1807 struct page *page; 1808 struct hstate *h; 1809 1810 page = container_of((struct address_space **)node, 1811 struct page, mapping); 1812 node = node->next; 1813 page->mapping = NULL; 1814 /* 1815 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in 1816 * folio_hstate() is going to trigger because a previous call to 1817 * remove_hugetlb_folio() will call folio_set_compound_dtor 1818 * (folio, NULL_COMPOUND_DTOR), so do not use folio_hstate() 1819 * directly. 1820 */ 1821 h = size_to_hstate(page_size(page)); 1822 1823 __update_and_free_hugetlb_folio(h, page_folio(page)); 1824 1825 cond_resched(); 1826 } 1827 } 1828 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1829 1830 static inline void flush_free_hpage_work(struct hstate *h) 1831 { 1832 if (hugetlb_vmemmap_optimizable(h)) 1833 flush_work(&free_hpage_work); 1834 } 1835 1836 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, 1837 bool atomic) 1838 { 1839 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) { 1840 __update_and_free_hugetlb_folio(h, folio); 1841 return; 1842 } 1843 1844 /* 1845 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1846 * 1847 * Only call schedule_work() if hpage_freelist is previously 1848 * empty. Otherwise, schedule_work() had been called but the workfn 1849 * hasn't retrieved the list yet. 1850 */ 1851 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist)) 1852 schedule_work(&free_hpage_work); 1853 } 1854 1855 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1856 { 1857 struct page *page, *t_page; 1858 struct folio *folio; 1859 1860 list_for_each_entry_safe(page, t_page, list, lru) { 1861 folio = page_folio(page); 1862 update_and_free_hugetlb_folio(h, folio, false); 1863 cond_resched(); 1864 } 1865 } 1866 1867 struct hstate *size_to_hstate(unsigned long size) 1868 { 1869 struct hstate *h; 1870 1871 for_each_hstate(h) { 1872 if (huge_page_size(h) == size) 1873 return h; 1874 } 1875 return NULL; 1876 } 1877 1878 void free_huge_folio(struct folio *folio) 1879 { 1880 /* 1881 * Can't pass hstate in here because it is called from the 1882 * compound page destructor. 1883 */ 1884 struct hstate *h = folio_hstate(folio); 1885 int nid = folio_nid(folio); 1886 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio); 1887 bool restore_reserve; 1888 unsigned long flags; 1889 1890 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); 1891 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio); 1892 1893 hugetlb_set_folio_subpool(folio, NULL); 1894 if (folio_test_anon(folio)) 1895 __ClearPageAnonExclusive(&folio->page); 1896 folio->mapping = NULL; 1897 restore_reserve = folio_test_hugetlb_restore_reserve(folio); 1898 folio_clear_hugetlb_restore_reserve(folio); 1899 1900 /* 1901 * If HPageRestoreReserve was set on page, page allocation consumed a 1902 * reservation. If the page was associated with a subpool, there 1903 * would have been a page reserved in the subpool before allocation 1904 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1905 * reservation, do not call hugepage_subpool_put_pages() as this will 1906 * remove the reserved page from the subpool. 1907 */ 1908 if (!restore_reserve) { 1909 /* 1910 * A return code of zero implies that the subpool will be 1911 * under its minimum size if the reservation is not restored 1912 * after page is free. Therefore, force restore_reserve 1913 * operation. 1914 */ 1915 if (hugepage_subpool_put_pages(spool, 1) == 0) 1916 restore_reserve = true; 1917 } 1918 1919 spin_lock_irqsave(&hugetlb_lock, flags); 1920 folio_clear_hugetlb_migratable(folio); 1921 hugetlb_cgroup_uncharge_folio(hstate_index(h), 1922 pages_per_huge_page(h), folio); 1923 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 1924 pages_per_huge_page(h), folio); 1925 if (restore_reserve) 1926 h->resv_huge_pages++; 1927 1928 if (folio_test_hugetlb_temporary(folio)) { 1929 remove_hugetlb_folio(h, folio, false); 1930 spin_unlock_irqrestore(&hugetlb_lock, flags); 1931 update_and_free_hugetlb_folio(h, folio, true); 1932 } else if (h->surplus_huge_pages_node[nid]) { 1933 /* remove the page from active list */ 1934 remove_hugetlb_folio(h, folio, true); 1935 spin_unlock_irqrestore(&hugetlb_lock, flags); 1936 update_and_free_hugetlb_folio(h, folio, true); 1937 } else { 1938 arch_clear_hugepage_flags(&folio->page); 1939 enqueue_hugetlb_folio(h, folio); 1940 spin_unlock_irqrestore(&hugetlb_lock, flags); 1941 } 1942 } 1943 1944 /* 1945 * Must be called with the hugetlb lock held 1946 */ 1947 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1948 { 1949 lockdep_assert_held(&hugetlb_lock); 1950 h->nr_huge_pages++; 1951 h->nr_huge_pages_node[nid]++; 1952 } 1953 1954 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) 1955 { 1956 hugetlb_vmemmap_optimize(h, &folio->page); 1957 INIT_LIST_HEAD(&folio->lru); 1958 folio_set_compound_dtor(folio, HUGETLB_PAGE_DTOR); 1959 hugetlb_set_folio_subpool(folio, NULL); 1960 set_hugetlb_cgroup(folio, NULL); 1961 set_hugetlb_cgroup_rsvd(folio, NULL); 1962 } 1963 1964 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) 1965 { 1966 __prep_new_hugetlb_folio(h, folio); 1967 spin_lock_irq(&hugetlb_lock); 1968 __prep_account_new_huge_page(h, nid); 1969 spin_unlock_irq(&hugetlb_lock); 1970 } 1971 1972 static bool __prep_compound_gigantic_folio(struct folio *folio, 1973 unsigned int order, bool demote) 1974 { 1975 int i, j; 1976 int nr_pages = 1 << order; 1977 struct page *p; 1978 1979 __folio_clear_reserved(folio); 1980 for (i = 0; i < nr_pages; i++) { 1981 p = folio_page(folio, i); 1982 1983 /* 1984 * For gigantic hugepages allocated through bootmem at 1985 * boot, it's safer to be consistent with the not-gigantic 1986 * hugepages and clear the PG_reserved bit from all tail pages 1987 * too. Otherwise drivers using get_user_pages() to access tail 1988 * pages may get the reference counting wrong if they see 1989 * PG_reserved set on a tail page (despite the head page not 1990 * having PG_reserved set). Enforcing this consistency between 1991 * head and tail pages allows drivers to optimize away a check 1992 * on the head page when they need know if put_page() is needed 1993 * after get_user_pages(). 1994 */ 1995 if (i != 0) /* head page cleared above */ 1996 __ClearPageReserved(p); 1997 /* 1998 * Subtle and very unlikely 1999 * 2000 * Gigantic 'page allocators' such as memblock or cma will 2001 * return a set of pages with each page ref counted. We need 2002 * to turn this set of pages into a compound page with tail 2003 * page ref counts set to zero. Code such as speculative page 2004 * cache adding could take a ref on a 'to be' tail page. 2005 * We need to respect any increased ref count, and only set 2006 * the ref count to zero if count is currently 1. If count 2007 * is not 1, we return an error. An error return indicates 2008 * the set of pages can not be converted to a gigantic page. 2009 * The caller who allocated the pages should then discard the 2010 * pages using the appropriate free interface. 2011 * 2012 * In the case of demote, the ref count will be zero. 2013 */ 2014 if (!demote) { 2015 if (!page_ref_freeze(p, 1)) { 2016 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 2017 goto out_error; 2018 } 2019 } else { 2020 VM_BUG_ON_PAGE(page_count(p), p); 2021 } 2022 if (i != 0) 2023 set_compound_head(p, &folio->page); 2024 } 2025 __folio_set_head(folio); 2026 /* we rely on prep_new_hugetlb_folio to set the destructor */ 2027 folio_set_order(folio, order); 2028 atomic_set(&folio->_entire_mapcount, -1); 2029 atomic_set(&folio->_nr_pages_mapped, 0); 2030 atomic_set(&folio->_pincount, 0); 2031 return true; 2032 2033 out_error: 2034 /* undo page modifications made above */ 2035 for (j = 0; j < i; j++) { 2036 p = folio_page(folio, j); 2037 if (j != 0) 2038 clear_compound_head(p); 2039 set_page_refcounted(p); 2040 } 2041 /* need to clear PG_reserved on remaining tail pages */ 2042 for (; j < nr_pages; j++) { 2043 p = folio_page(folio, j); 2044 __ClearPageReserved(p); 2045 } 2046 return false; 2047 } 2048 2049 static bool prep_compound_gigantic_folio(struct folio *folio, 2050 unsigned int order) 2051 { 2052 return __prep_compound_gigantic_folio(folio, order, false); 2053 } 2054 2055 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio, 2056 unsigned int order) 2057 { 2058 return __prep_compound_gigantic_folio(folio, order, true); 2059 } 2060 2061 /* 2062 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 2063 * transparent huge pages. See the PageTransHuge() documentation for more 2064 * details. 2065 */ 2066 int PageHuge(struct page *page) 2067 { 2068 struct folio *folio; 2069 2070 if (!PageCompound(page)) 2071 return 0; 2072 folio = page_folio(page); 2073 return folio->_folio_dtor == HUGETLB_PAGE_DTOR; 2074 } 2075 EXPORT_SYMBOL_GPL(PageHuge); 2076 2077 /** 2078 * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs 2079 * @folio: The folio to test. 2080 * 2081 * Context: Any context. Caller should have a reference on the folio to 2082 * prevent it from being turned into a tail page. 2083 * Return: True for hugetlbfs folios, false for anon folios or folios 2084 * belonging to other filesystems. 2085 */ 2086 bool folio_test_hugetlb(struct folio *folio) 2087 { 2088 if (!folio_test_large(folio)) 2089 return false; 2090 2091 return folio->_folio_dtor == HUGETLB_PAGE_DTOR; 2092 } 2093 EXPORT_SYMBOL_GPL(folio_test_hugetlb); 2094 2095 /* 2096 * Find and lock address space (mapping) in write mode. 2097 * 2098 * Upon entry, the page is locked which means that page_mapping() is 2099 * stable. Due to locking order, we can only trylock_write. If we can 2100 * not get the lock, simply return NULL to caller. 2101 */ 2102 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 2103 { 2104 struct address_space *mapping = page_mapping(hpage); 2105 2106 if (!mapping) 2107 return mapping; 2108 2109 if (i_mmap_trylock_write(mapping)) 2110 return mapping; 2111 2112 return NULL; 2113 } 2114 2115 pgoff_t hugetlb_basepage_index(struct page *page) 2116 { 2117 struct page *page_head = compound_head(page); 2118 pgoff_t index = page_index(page_head); 2119 unsigned long compound_idx; 2120 2121 if (compound_order(page_head) > MAX_ORDER) 2122 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 2123 else 2124 compound_idx = page - page_head; 2125 2126 return (index << compound_order(page_head)) + compound_idx; 2127 } 2128 2129 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, 2130 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2131 nodemask_t *node_alloc_noretry) 2132 { 2133 int order = huge_page_order(h); 2134 struct page *page; 2135 bool alloc_try_hard = true; 2136 bool retry = true; 2137 2138 /* 2139 * By default we always try hard to allocate the page with 2140 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 2141 * a loop (to adjust global huge page counts) and previous allocation 2142 * failed, do not continue to try hard on the same node. Use the 2143 * node_alloc_noretry bitmap to manage this state information. 2144 */ 2145 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 2146 alloc_try_hard = false; 2147 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 2148 if (alloc_try_hard) 2149 gfp_mask |= __GFP_RETRY_MAYFAIL; 2150 if (nid == NUMA_NO_NODE) 2151 nid = numa_mem_id(); 2152 retry: 2153 page = __alloc_pages(gfp_mask, order, nid, nmask); 2154 2155 /* Freeze head page */ 2156 if (page && !page_ref_freeze(page, 1)) { 2157 __free_pages(page, order); 2158 if (retry) { /* retry once */ 2159 retry = false; 2160 goto retry; 2161 } 2162 /* WOW! twice in a row. */ 2163 pr_warn("HugeTLB head page unexpected inflated ref count\n"); 2164 page = NULL; 2165 } 2166 2167 /* 2168 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 2169 * indicates an overall state change. Clear bit so that we resume 2170 * normal 'try hard' allocations. 2171 */ 2172 if (node_alloc_noretry && page && !alloc_try_hard) 2173 node_clear(nid, *node_alloc_noretry); 2174 2175 /* 2176 * If we tried hard to get a page but failed, set bit so that 2177 * subsequent attempts will not try as hard until there is an 2178 * overall state change. 2179 */ 2180 if (node_alloc_noretry && !page && alloc_try_hard) 2181 node_set(nid, *node_alloc_noretry); 2182 2183 if (!page) { 2184 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 2185 return NULL; 2186 } 2187 2188 __count_vm_event(HTLB_BUDDY_PGALLOC); 2189 return page_folio(page); 2190 } 2191 2192 /* 2193 * Common helper to allocate a fresh hugetlb page. All specific allocators 2194 * should use this function to get new hugetlb pages 2195 * 2196 * Note that returned page is 'frozen': ref count of head page and all tail 2197 * pages is zero. 2198 */ 2199 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, 2200 gfp_t gfp_mask, int nid, nodemask_t *nmask, 2201 nodemask_t *node_alloc_noretry) 2202 { 2203 struct folio *folio; 2204 bool retry = false; 2205 2206 retry: 2207 if (hstate_is_gigantic(h)) 2208 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); 2209 else 2210 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, 2211 nid, nmask, node_alloc_noretry); 2212 if (!folio) 2213 return NULL; 2214 if (hstate_is_gigantic(h)) { 2215 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { 2216 /* 2217 * Rare failure to convert pages to compound page. 2218 * Free pages and try again - ONCE! 2219 */ 2220 free_gigantic_folio(folio, huge_page_order(h)); 2221 if (!retry) { 2222 retry = true; 2223 goto retry; 2224 } 2225 return NULL; 2226 } 2227 } 2228 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 2229 2230 return folio; 2231 } 2232 2233 /* 2234 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 2235 * manner. 2236 */ 2237 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 2238 nodemask_t *node_alloc_noretry) 2239 { 2240 struct folio *folio; 2241 int nr_nodes, node; 2242 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2243 2244 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 2245 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, 2246 nodes_allowed, node_alloc_noretry); 2247 if (folio) { 2248 free_huge_folio(folio); /* free it into the hugepage allocator */ 2249 return 1; 2250 } 2251 } 2252 2253 return 0; 2254 } 2255 2256 /* 2257 * Remove huge page from pool from next node to free. Attempt to keep 2258 * persistent huge pages more or less balanced over allowed nodes. 2259 * This routine only 'removes' the hugetlb page. The caller must make 2260 * an additional call to free the page to low level allocators. 2261 * Called with hugetlb_lock locked. 2262 */ 2263 static struct page *remove_pool_huge_page(struct hstate *h, 2264 nodemask_t *nodes_allowed, 2265 bool acct_surplus) 2266 { 2267 int nr_nodes, node; 2268 struct page *page = NULL; 2269 struct folio *folio; 2270 2271 lockdep_assert_held(&hugetlb_lock); 2272 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2273 /* 2274 * If we're returning unused surplus pages, only examine 2275 * nodes with surplus pages. 2276 */ 2277 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2278 !list_empty(&h->hugepage_freelists[node])) { 2279 page = list_entry(h->hugepage_freelists[node].next, 2280 struct page, lru); 2281 folio = page_folio(page); 2282 remove_hugetlb_folio(h, folio, acct_surplus); 2283 break; 2284 } 2285 } 2286 2287 return page; 2288 } 2289 2290 /* 2291 * Dissolve a given free hugepage into free buddy pages. This function does 2292 * nothing for in-use hugepages and non-hugepages. 2293 * This function returns values like below: 2294 * 2295 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2296 * when the system is under memory pressure and the feature of 2297 * freeing unused vmemmap pages associated with each hugetlb page 2298 * is enabled. 2299 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2300 * (allocated or reserved.) 2301 * 0: successfully dissolved free hugepages or the page is not a 2302 * hugepage (considered as already dissolved) 2303 */ 2304 int dissolve_free_huge_page(struct page *page) 2305 { 2306 int rc = -EBUSY; 2307 struct folio *folio = page_folio(page); 2308 2309 retry: 2310 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2311 if (!folio_test_hugetlb(folio)) 2312 return 0; 2313 2314 spin_lock_irq(&hugetlb_lock); 2315 if (!folio_test_hugetlb(folio)) { 2316 rc = 0; 2317 goto out; 2318 } 2319 2320 if (!folio_ref_count(folio)) { 2321 struct hstate *h = folio_hstate(folio); 2322 if (!available_huge_pages(h)) 2323 goto out; 2324 2325 /* 2326 * We should make sure that the page is already on the free list 2327 * when it is dissolved. 2328 */ 2329 if (unlikely(!folio_test_hugetlb_freed(folio))) { 2330 spin_unlock_irq(&hugetlb_lock); 2331 cond_resched(); 2332 2333 /* 2334 * Theoretically, we should return -EBUSY when we 2335 * encounter this race. In fact, we have a chance 2336 * to successfully dissolve the page if we do a 2337 * retry. Because the race window is quite small. 2338 * If we seize this opportunity, it is an optimization 2339 * for increasing the success rate of dissolving page. 2340 */ 2341 goto retry; 2342 } 2343 2344 remove_hugetlb_folio(h, folio, false); 2345 h->max_huge_pages--; 2346 spin_unlock_irq(&hugetlb_lock); 2347 2348 /* 2349 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap 2350 * before freeing the page. update_and_free_hugtlb_folio will fail to 2351 * free the page if it can not allocate required vmemmap. We 2352 * need to adjust max_huge_pages if the page is not freed. 2353 * Attempt to allocate vmemmmap here so that we can take 2354 * appropriate action on failure. 2355 */ 2356 rc = hugetlb_vmemmap_restore(h, &folio->page); 2357 if (!rc) { 2358 update_and_free_hugetlb_folio(h, folio, false); 2359 } else { 2360 spin_lock_irq(&hugetlb_lock); 2361 add_hugetlb_folio(h, folio, false); 2362 h->max_huge_pages++; 2363 spin_unlock_irq(&hugetlb_lock); 2364 } 2365 2366 return rc; 2367 } 2368 out: 2369 spin_unlock_irq(&hugetlb_lock); 2370 return rc; 2371 } 2372 2373 /* 2374 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2375 * make specified memory blocks removable from the system. 2376 * Note that this will dissolve a free gigantic hugepage completely, if any 2377 * part of it lies within the given range. 2378 * Also note that if dissolve_free_huge_page() returns with an error, all 2379 * free hugepages that were dissolved before that error are lost. 2380 */ 2381 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2382 { 2383 unsigned long pfn; 2384 struct page *page; 2385 int rc = 0; 2386 unsigned int order; 2387 struct hstate *h; 2388 2389 if (!hugepages_supported()) 2390 return rc; 2391 2392 order = huge_page_order(&default_hstate); 2393 for_each_hstate(h) 2394 order = min(order, huge_page_order(h)); 2395 2396 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2397 page = pfn_to_page(pfn); 2398 rc = dissolve_free_huge_page(page); 2399 if (rc) 2400 break; 2401 } 2402 2403 return rc; 2404 } 2405 2406 /* 2407 * Allocates a fresh surplus page from the page allocator. 2408 */ 2409 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, 2410 gfp_t gfp_mask, int nid, nodemask_t *nmask) 2411 { 2412 struct folio *folio = NULL; 2413 2414 if (hstate_is_gigantic(h)) 2415 return NULL; 2416 2417 spin_lock_irq(&hugetlb_lock); 2418 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2419 goto out_unlock; 2420 spin_unlock_irq(&hugetlb_lock); 2421 2422 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2423 if (!folio) 2424 return NULL; 2425 2426 spin_lock_irq(&hugetlb_lock); 2427 /* 2428 * We could have raced with the pool size change. 2429 * Double check that and simply deallocate the new page 2430 * if we would end up overcommiting the surpluses. Abuse 2431 * temporary page to workaround the nasty free_huge_folio 2432 * codeflow 2433 */ 2434 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2435 folio_set_hugetlb_temporary(folio); 2436 spin_unlock_irq(&hugetlb_lock); 2437 free_huge_folio(folio); 2438 return NULL; 2439 } 2440 2441 h->surplus_huge_pages++; 2442 h->surplus_huge_pages_node[folio_nid(folio)]++; 2443 2444 out_unlock: 2445 spin_unlock_irq(&hugetlb_lock); 2446 2447 return folio; 2448 } 2449 2450 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, 2451 int nid, nodemask_t *nmask) 2452 { 2453 struct folio *folio; 2454 2455 if (hstate_is_gigantic(h)) 2456 return NULL; 2457 2458 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); 2459 if (!folio) 2460 return NULL; 2461 2462 /* fresh huge pages are frozen */ 2463 folio_ref_unfreeze(folio, 1); 2464 /* 2465 * We do not account these pages as surplus because they are only 2466 * temporary and will be released properly on the last reference 2467 */ 2468 folio_set_hugetlb_temporary(folio); 2469 2470 return folio; 2471 } 2472 2473 /* 2474 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2475 */ 2476 static 2477 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, 2478 struct vm_area_struct *vma, unsigned long addr) 2479 { 2480 struct folio *folio = NULL; 2481 struct mempolicy *mpol; 2482 gfp_t gfp_mask = htlb_alloc_mask(h); 2483 int nid; 2484 nodemask_t *nodemask; 2485 2486 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2487 if (mpol_is_preferred_many(mpol)) { 2488 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2489 2490 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2491 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); 2492 2493 /* Fallback to all nodes if page==NULL */ 2494 nodemask = NULL; 2495 } 2496 2497 if (!folio) 2498 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); 2499 mpol_cond_put(mpol); 2500 return folio; 2501 } 2502 2503 /* folio migration callback function */ 2504 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, 2505 nodemask_t *nmask, gfp_t gfp_mask) 2506 { 2507 spin_lock_irq(&hugetlb_lock); 2508 if (available_huge_pages(h)) { 2509 struct folio *folio; 2510 2511 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, 2512 preferred_nid, nmask); 2513 if (folio) { 2514 spin_unlock_irq(&hugetlb_lock); 2515 return folio; 2516 } 2517 } 2518 spin_unlock_irq(&hugetlb_lock); 2519 2520 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); 2521 } 2522 2523 /* mempolicy aware migration callback */ 2524 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, 2525 unsigned long address) 2526 { 2527 struct mempolicy *mpol; 2528 nodemask_t *nodemask; 2529 struct folio *folio; 2530 gfp_t gfp_mask; 2531 int node; 2532 2533 gfp_mask = htlb_alloc_mask(h); 2534 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2535 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); 2536 mpol_cond_put(mpol); 2537 2538 return folio; 2539 } 2540 2541 /* 2542 * Increase the hugetlb pool such that it can accommodate a reservation 2543 * of size 'delta'. 2544 */ 2545 static int gather_surplus_pages(struct hstate *h, long delta) 2546 __must_hold(&hugetlb_lock) 2547 { 2548 LIST_HEAD(surplus_list); 2549 struct folio *folio, *tmp; 2550 int ret; 2551 long i; 2552 long needed, allocated; 2553 bool alloc_ok = true; 2554 2555 lockdep_assert_held(&hugetlb_lock); 2556 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2557 if (needed <= 0) { 2558 h->resv_huge_pages += delta; 2559 return 0; 2560 } 2561 2562 allocated = 0; 2563 2564 ret = -ENOMEM; 2565 retry: 2566 spin_unlock_irq(&hugetlb_lock); 2567 for (i = 0; i < needed; i++) { 2568 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), 2569 NUMA_NO_NODE, NULL); 2570 if (!folio) { 2571 alloc_ok = false; 2572 break; 2573 } 2574 list_add(&folio->lru, &surplus_list); 2575 cond_resched(); 2576 } 2577 allocated += i; 2578 2579 /* 2580 * After retaking hugetlb_lock, we need to recalculate 'needed' 2581 * because either resv_huge_pages or free_huge_pages may have changed. 2582 */ 2583 spin_lock_irq(&hugetlb_lock); 2584 needed = (h->resv_huge_pages + delta) - 2585 (h->free_huge_pages + allocated); 2586 if (needed > 0) { 2587 if (alloc_ok) 2588 goto retry; 2589 /* 2590 * We were not able to allocate enough pages to 2591 * satisfy the entire reservation so we free what 2592 * we've allocated so far. 2593 */ 2594 goto free; 2595 } 2596 /* 2597 * The surplus_list now contains _at_least_ the number of extra pages 2598 * needed to accommodate the reservation. Add the appropriate number 2599 * of pages to the hugetlb pool and free the extras back to the buddy 2600 * allocator. Commit the entire reservation here to prevent another 2601 * process from stealing the pages as they are added to the pool but 2602 * before they are reserved. 2603 */ 2604 needed += allocated; 2605 h->resv_huge_pages += delta; 2606 ret = 0; 2607 2608 /* Free the needed pages to the hugetlb pool */ 2609 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) { 2610 if ((--needed) < 0) 2611 break; 2612 /* Add the page to the hugetlb allocator */ 2613 enqueue_hugetlb_folio(h, folio); 2614 } 2615 free: 2616 spin_unlock_irq(&hugetlb_lock); 2617 2618 /* 2619 * Free unnecessary surplus pages to the buddy allocator. 2620 * Pages have no ref count, call free_huge_folio directly. 2621 */ 2622 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) 2623 free_huge_folio(folio); 2624 spin_lock_irq(&hugetlb_lock); 2625 2626 return ret; 2627 } 2628 2629 /* 2630 * This routine has two main purposes: 2631 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2632 * in unused_resv_pages. This corresponds to the prior adjustments made 2633 * to the associated reservation map. 2634 * 2) Free any unused surplus pages that may have been allocated to satisfy 2635 * the reservation. As many as unused_resv_pages may be freed. 2636 */ 2637 static void return_unused_surplus_pages(struct hstate *h, 2638 unsigned long unused_resv_pages) 2639 { 2640 unsigned long nr_pages; 2641 struct page *page; 2642 LIST_HEAD(page_list); 2643 2644 lockdep_assert_held(&hugetlb_lock); 2645 /* Uncommit the reservation */ 2646 h->resv_huge_pages -= unused_resv_pages; 2647 2648 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 2649 goto out; 2650 2651 /* 2652 * Part (or even all) of the reservation could have been backed 2653 * by pre-allocated pages. Only free surplus pages. 2654 */ 2655 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2656 2657 /* 2658 * We want to release as many surplus pages as possible, spread 2659 * evenly across all nodes with memory. Iterate across these nodes 2660 * until we can no longer free unreserved surplus pages. This occurs 2661 * when the nodes with surplus pages have no free pages. 2662 * remove_pool_huge_page() will balance the freed pages across the 2663 * on-line nodes with memory and will handle the hstate accounting. 2664 */ 2665 while (nr_pages--) { 2666 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2667 if (!page) 2668 goto out; 2669 2670 list_add(&page->lru, &page_list); 2671 } 2672 2673 out: 2674 spin_unlock_irq(&hugetlb_lock); 2675 update_and_free_pages_bulk(h, &page_list); 2676 spin_lock_irq(&hugetlb_lock); 2677 } 2678 2679 2680 /* 2681 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2682 * are used by the huge page allocation routines to manage reservations. 2683 * 2684 * vma_needs_reservation is called to determine if the huge page at addr 2685 * within the vma has an associated reservation. If a reservation is 2686 * needed, the value 1 is returned. The caller is then responsible for 2687 * managing the global reservation and subpool usage counts. After 2688 * the huge page has been allocated, vma_commit_reservation is called 2689 * to add the page to the reservation map. If the page allocation fails, 2690 * the reservation must be ended instead of committed. vma_end_reservation 2691 * is called in such cases. 2692 * 2693 * In the normal case, vma_commit_reservation returns the same value 2694 * as the preceding vma_needs_reservation call. The only time this 2695 * is not the case is if a reserve map was changed between calls. It 2696 * is the responsibility of the caller to notice the difference and 2697 * take appropriate action. 2698 * 2699 * vma_add_reservation is used in error paths where a reservation must 2700 * be restored when a newly allocated huge page must be freed. It is 2701 * to be called after calling vma_needs_reservation to determine if a 2702 * reservation exists. 2703 * 2704 * vma_del_reservation is used in error paths where an entry in the reserve 2705 * map was created during huge page allocation and must be removed. It is to 2706 * be called after calling vma_needs_reservation to determine if a reservation 2707 * exists. 2708 */ 2709 enum vma_resv_mode { 2710 VMA_NEEDS_RESV, 2711 VMA_COMMIT_RESV, 2712 VMA_END_RESV, 2713 VMA_ADD_RESV, 2714 VMA_DEL_RESV, 2715 }; 2716 static long __vma_reservation_common(struct hstate *h, 2717 struct vm_area_struct *vma, unsigned long addr, 2718 enum vma_resv_mode mode) 2719 { 2720 struct resv_map *resv; 2721 pgoff_t idx; 2722 long ret; 2723 long dummy_out_regions_needed; 2724 2725 resv = vma_resv_map(vma); 2726 if (!resv) 2727 return 1; 2728 2729 idx = vma_hugecache_offset(h, vma, addr); 2730 switch (mode) { 2731 case VMA_NEEDS_RESV: 2732 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2733 /* We assume that vma_reservation_* routines always operate on 2734 * 1 page, and that adding to resv map a 1 page entry can only 2735 * ever require 1 region. 2736 */ 2737 VM_BUG_ON(dummy_out_regions_needed != 1); 2738 break; 2739 case VMA_COMMIT_RESV: 2740 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2741 /* region_add calls of range 1 should never fail. */ 2742 VM_BUG_ON(ret < 0); 2743 break; 2744 case VMA_END_RESV: 2745 region_abort(resv, idx, idx + 1, 1); 2746 ret = 0; 2747 break; 2748 case VMA_ADD_RESV: 2749 if (vma->vm_flags & VM_MAYSHARE) { 2750 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2751 /* region_add calls of range 1 should never fail. */ 2752 VM_BUG_ON(ret < 0); 2753 } else { 2754 region_abort(resv, idx, idx + 1, 1); 2755 ret = region_del(resv, idx, idx + 1); 2756 } 2757 break; 2758 case VMA_DEL_RESV: 2759 if (vma->vm_flags & VM_MAYSHARE) { 2760 region_abort(resv, idx, idx + 1, 1); 2761 ret = region_del(resv, idx, idx + 1); 2762 } else { 2763 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2764 /* region_add calls of range 1 should never fail. */ 2765 VM_BUG_ON(ret < 0); 2766 } 2767 break; 2768 default: 2769 BUG(); 2770 } 2771 2772 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2773 return ret; 2774 /* 2775 * We know private mapping must have HPAGE_RESV_OWNER set. 2776 * 2777 * In most cases, reserves always exist for private mappings. 2778 * However, a file associated with mapping could have been 2779 * hole punched or truncated after reserves were consumed. 2780 * As subsequent fault on such a range will not use reserves. 2781 * Subtle - The reserve map for private mappings has the 2782 * opposite meaning than that of shared mappings. If NO 2783 * entry is in the reserve map, it means a reservation exists. 2784 * If an entry exists in the reserve map, it means the 2785 * reservation has already been consumed. As a result, the 2786 * return value of this routine is the opposite of the 2787 * value returned from reserve map manipulation routines above. 2788 */ 2789 if (ret > 0) 2790 return 0; 2791 if (ret == 0) 2792 return 1; 2793 return ret; 2794 } 2795 2796 static long vma_needs_reservation(struct hstate *h, 2797 struct vm_area_struct *vma, unsigned long addr) 2798 { 2799 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2800 } 2801 2802 static long vma_commit_reservation(struct hstate *h, 2803 struct vm_area_struct *vma, unsigned long addr) 2804 { 2805 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2806 } 2807 2808 static void vma_end_reservation(struct hstate *h, 2809 struct vm_area_struct *vma, unsigned long addr) 2810 { 2811 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2812 } 2813 2814 static long vma_add_reservation(struct hstate *h, 2815 struct vm_area_struct *vma, unsigned long addr) 2816 { 2817 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2818 } 2819 2820 static long vma_del_reservation(struct hstate *h, 2821 struct vm_area_struct *vma, unsigned long addr) 2822 { 2823 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2824 } 2825 2826 /* 2827 * This routine is called to restore reservation information on error paths. 2828 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(), 2829 * and the hugetlb mutex should remain held when calling this routine. 2830 * 2831 * It handles two specific cases: 2832 * 1) A reservation was in place and the folio consumed the reservation. 2833 * hugetlb_restore_reserve is set in the folio. 2834 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is 2835 * not set. However, alloc_hugetlb_folio always updates the reserve map. 2836 * 2837 * In case 1, free_huge_folio later in the error path will increment the 2838 * global reserve count. But, free_huge_folio does not have enough context 2839 * to adjust the reservation map. This case deals primarily with private 2840 * mappings. Adjust the reserve map here to be consistent with global 2841 * reserve count adjustments to be made by free_huge_folio. Make sure the 2842 * reserve map indicates there is a reservation present. 2843 * 2844 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio. 2845 */ 2846 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2847 unsigned long address, struct folio *folio) 2848 { 2849 long rc = vma_needs_reservation(h, vma, address); 2850 2851 if (folio_test_hugetlb_restore_reserve(folio)) { 2852 if (unlikely(rc < 0)) 2853 /* 2854 * Rare out of memory condition in reserve map 2855 * manipulation. Clear hugetlb_restore_reserve so 2856 * that global reserve count will not be incremented 2857 * by free_huge_folio. This will make it appear 2858 * as though the reservation for this folio was 2859 * consumed. This may prevent the task from 2860 * faulting in the folio at a later time. This 2861 * is better than inconsistent global huge page 2862 * accounting of reserve counts. 2863 */ 2864 folio_clear_hugetlb_restore_reserve(folio); 2865 else if (rc) 2866 (void)vma_add_reservation(h, vma, address); 2867 else 2868 vma_end_reservation(h, vma, address); 2869 } else { 2870 if (!rc) { 2871 /* 2872 * This indicates there is an entry in the reserve map 2873 * not added by alloc_hugetlb_folio. We know it was added 2874 * before the alloc_hugetlb_folio call, otherwise 2875 * hugetlb_restore_reserve would be set on the folio. 2876 * Remove the entry so that a subsequent allocation 2877 * does not consume a reservation. 2878 */ 2879 rc = vma_del_reservation(h, vma, address); 2880 if (rc < 0) 2881 /* 2882 * VERY rare out of memory condition. Since 2883 * we can not delete the entry, set 2884 * hugetlb_restore_reserve so that the reserve 2885 * count will be incremented when the folio 2886 * is freed. This reserve will be consumed 2887 * on a subsequent allocation. 2888 */ 2889 folio_set_hugetlb_restore_reserve(folio); 2890 } else if (rc < 0) { 2891 /* 2892 * Rare out of memory condition from 2893 * vma_needs_reservation call. Memory allocation is 2894 * only attempted if a new entry is needed. Therefore, 2895 * this implies there is not an entry in the 2896 * reserve map. 2897 * 2898 * For shared mappings, no entry in the map indicates 2899 * no reservation. We are done. 2900 */ 2901 if (!(vma->vm_flags & VM_MAYSHARE)) 2902 /* 2903 * For private mappings, no entry indicates 2904 * a reservation is present. Since we can 2905 * not add an entry, set hugetlb_restore_reserve 2906 * on the folio so reserve count will be 2907 * incremented when freed. This reserve will 2908 * be consumed on a subsequent allocation. 2909 */ 2910 folio_set_hugetlb_restore_reserve(folio); 2911 } else 2912 /* 2913 * No reservation present, do nothing 2914 */ 2915 vma_end_reservation(h, vma, address); 2916 } 2917 } 2918 2919 /* 2920 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve 2921 * the old one 2922 * @h: struct hstate old page belongs to 2923 * @old_folio: Old folio to dissolve 2924 * @list: List to isolate the page in case we need to 2925 * Returns 0 on success, otherwise negated error. 2926 */ 2927 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, 2928 struct folio *old_folio, struct list_head *list) 2929 { 2930 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2931 int nid = folio_nid(old_folio); 2932 struct folio *new_folio; 2933 int ret = 0; 2934 2935 /* 2936 * Before dissolving the folio, we need to allocate a new one for the 2937 * pool to remain stable. Here, we allocate the folio and 'prep' it 2938 * by doing everything but actually updating counters and adding to 2939 * the pool. This simplifies and let us do most of the processing 2940 * under the lock. 2941 */ 2942 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); 2943 if (!new_folio) 2944 return -ENOMEM; 2945 __prep_new_hugetlb_folio(h, new_folio); 2946 2947 retry: 2948 spin_lock_irq(&hugetlb_lock); 2949 if (!folio_test_hugetlb(old_folio)) { 2950 /* 2951 * Freed from under us. Drop new_folio too. 2952 */ 2953 goto free_new; 2954 } else if (folio_ref_count(old_folio)) { 2955 bool isolated; 2956 2957 /* 2958 * Someone has grabbed the folio, try to isolate it here. 2959 * Fail with -EBUSY if not possible. 2960 */ 2961 spin_unlock_irq(&hugetlb_lock); 2962 isolated = isolate_hugetlb(old_folio, list); 2963 ret = isolated ? 0 : -EBUSY; 2964 spin_lock_irq(&hugetlb_lock); 2965 goto free_new; 2966 } else if (!folio_test_hugetlb_freed(old_folio)) { 2967 /* 2968 * Folio's refcount is 0 but it has not been enqueued in the 2969 * freelist yet. Race window is small, so we can succeed here if 2970 * we retry. 2971 */ 2972 spin_unlock_irq(&hugetlb_lock); 2973 cond_resched(); 2974 goto retry; 2975 } else { 2976 /* 2977 * Ok, old_folio is still a genuine free hugepage. Remove it from 2978 * the freelist and decrease the counters. These will be 2979 * incremented again when calling __prep_account_new_huge_page() 2980 * and enqueue_hugetlb_folio() for new_folio. The counters will 2981 * remain stable since this happens under the lock. 2982 */ 2983 remove_hugetlb_folio(h, old_folio, false); 2984 2985 /* 2986 * Ref count on new_folio is already zero as it was dropped 2987 * earlier. It can be directly added to the pool free list. 2988 */ 2989 __prep_account_new_huge_page(h, nid); 2990 enqueue_hugetlb_folio(h, new_folio); 2991 2992 /* 2993 * Folio has been replaced, we can safely free the old one. 2994 */ 2995 spin_unlock_irq(&hugetlb_lock); 2996 update_and_free_hugetlb_folio(h, old_folio, false); 2997 } 2998 2999 return ret; 3000 3001 free_new: 3002 spin_unlock_irq(&hugetlb_lock); 3003 /* Folio has a zero ref count, but needs a ref to be freed */ 3004 folio_ref_unfreeze(new_folio, 1); 3005 update_and_free_hugetlb_folio(h, new_folio, false); 3006 3007 return ret; 3008 } 3009 3010 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 3011 { 3012 struct hstate *h; 3013 struct folio *folio = page_folio(page); 3014 int ret = -EBUSY; 3015 3016 /* 3017 * The page might have been dissolved from under our feet, so make sure 3018 * to carefully check the state under the lock. 3019 * Return success when racing as if we dissolved the page ourselves. 3020 */ 3021 spin_lock_irq(&hugetlb_lock); 3022 if (folio_test_hugetlb(folio)) { 3023 h = folio_hstate(folio); 3024 } else { 3025 spin_unlock_irq(&hugetlb_lock); 3026 return 0; 3027 } 3028 spin_unlock_irq(&hugetlb_lock); 3029 3030 /* 3031 * Fence off gigantic pages as there is a cyclic dependency between 3032 * alloc_contig_range and them. Return -ENOMEM as this has the effect 3033 * of bailing out right away without further retrying. 3034 */ 3035 if (hstate_is_gigantic(h)) 3036 return -ENOMEM; 3037 3038 if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) 3039 ret = 0; 3040 else if (!folio_ref_count(folio)) 3041 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); 3042 3043 return ret; 3044 } 3045 3046 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, 3047 unsigned long addr, int avoid_reserve) 3048 { 3049 struct hugepage_subpool *spool = subpool_vma(vma); 3050 struct hstate *h = hstate_vma(vma); 3051 struct folio *folio; 3052 long map_chg, map_commit; 3053 long gbl_chg; 3054 int ret, idx; 3055 struct hugetlb_cgroup *h_cg = NULL; 3056 bool deferred_reserve; 3057 3058 idx = hstate_index(h); 3059 /* 3060 * Examine the region/reserve map to determine if the process 3061 * has a reservation for the page to be allocated. A return 3062 * code of zero indicates a reservation exists (no change). 3063 */ 3064 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 3065 if (map_chg < 0) 3066 return ERR_PTR(-ENOMEM); 3067 3068 /* 3069 * Processes that did not create the mapping will have no 3070 * reserves as indicated by the region/reserve map. Check 3071 * that the allocation will not exceed the subpool limit. 3072 * Allocations for MAP_NORESERVE mappings also need to be 3073 * checked against any subpool limit. 3074 */ 3075 if (map_chg || avoid_reserve) { 3076 gbl_chg = hugepage_subpool_get_pages(spool, 1); 3077 if (gbl_chg < 0) { 3078 vma_end_reservation(h, vma, addr); 3079 return ERR_PTR(-ENOSPC); 3080 } 3081 3082 /* 3083 * Even though there was no reservation in the region/reserve 3084 * map, there could be reservations associated with the 3085 * subpool that can be used. This would be indicated if the 3086 * return value of hugepage_subpool_get_pages() is zero. 3087 * However, if avoid_reserve is specified we still avoid even 3088 * the subpool reservations. 3089 */ 3090 if (avoid_reserve) 3091 gbl_chg = 1; 3092 } 3093 3094 /* If this allocation is not consuming a reservation, charge it now. 3095 */ 3096 deferred_reserve = map_chg || avoid_reserve; 3097 if (deferred_reserve) { 3098 ret = hugetlb_cgroup_charge_cgroup_rsvd( 3099 idx, pages_per_huge_page(h), &h_cg); 3100 if (ret) 3101 goto out_subpool_put; 3102 } 3103 3104 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 3105 if (ret) 3106 goto out_uncharge_cgroup_reservation; 3107 3108 spin_lock_irq(&hugetlb_lock); 3109 /* 3110 * glb_chg is passed to indicate whether or not a page must be taken 3111 * from the global free pool (global change). gbl_chg == 0 indicates 3112 * a reservation exists for the allocation. 3113 */ 3114 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); 3115 if (!folio) { 3116 spin_unlock_irq(&hugetlb_lock); 3117 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); 3118 if (!folio) 3119 goto out_uncharge_cgroup; 3120 spin_lock_irq(&hugetlb_lock); 3121 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 3122 folio_set_hugetlb_restore_reserve(folio); 3123 h->resv_huge_pages--; 3124 } 3125 list_add(&folio->lru, &h->hugepage_activelist); 3126 folio_ref_unfreeze(folio, 1); 3127 /* Fall through */ 3128 } 3129 3130 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); 3131 /* If allocation is not consuming a reservation, also store the 3132 * hugetlb_cgroup pointer on the page. 3133 */ 3134 if (deferred_reserve) { 3135 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 3136 h_cg, folio); 3137 } 3138 3139 spin_unlock_irq(&hugetlb_lock); 3140 3141 hugetlb_set_folio_subpool(folio, spool); 3142 3143 map_commit = vma_commit_reservation(h, vma, addr); 3144 if (unlikely(map_chg > map_commit)) { 3145 /* 3146 * The page was added to the reservation map between 3147 * vma_needs_reservation and vma_commit_reservation. 3148 * This indicates a race with hugetlb_reserve_pages. 3149 * Adjust for the subpool count incremented above AND 3150 * in hugetlb_reserve_pages for the same page. Also, 3151 * the reservation count added in hugetlb_reserve_pages 3152 * no longer applies. 3153 */ 3154 long rsv_adjust; 3155 3156 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 3157 hugetlb_acct_memory(h, -rsv_adjust); 3158 if (deferred_reserve) 3159 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), 3160 pages_per_huge_page(h), folio); 3161 } 3162 return folio; 3163 3164 out_uncharge_cgroup: 3165 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 3166 out_uncharge_cgroup_reservation: 3167 if (deferred_reserve) 3168 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 3169 h_cg); 3170 out_subpool_put: 3171 if (map_chg || avoid_reserve) 3172 hugepage_subpool_put_pages(spool, 1); 3173 vma_end_reservation(h, vma, addr); 3174 return ERR_PTR(-ENOSPC); 3175 } 3176 3177 int alloc_bootmem_huge_page(struct hstate *h, int nid) 3178 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 3179 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 3180 { 3181 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 3182 int nr_nodes, node; 3183 3184 /* do node specific alloc */ 3185 if (nid != NUMA_NO_NODE) { 3186 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 3187 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 3188 if (!m) 3189 return 0; 3190 goto found; 3191 } 3192 /* allocate from next node when distributing huge pages */ 3193 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 3194 m = memblock_alloc_try_nid_raw( 3195 huge_page_size(h), huge_page_size(h), 3196 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3197 /* 3198 * Use the beginning of the huge page to store the 3199 * huge_bootmem_page struct (until gather_bootmem 3200 * puts them into the mem_map). 3201 */ 3202 if (!m) 3203 return 0; 3204 goto found; 3205 } 3206 3207 found: 3208 /* Put them into a private list first because mem_map is not up yet */ 3209 INIT_LIST_HEAD(&m->list); 3210 list_add(&m->list, &huge_boot_pages); 3211 m->hstate = h; 3212 return 1; 3213 } 3214 3215 /* 3216 * Put bootmem huge pages into the standard lists after mem_map is up. 3217 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3218 */ 3219 static void __init gather_bootmem_prealloc(void) 3220 { 3221 struct huge_bootmem_page *m; 3222 3223 list_for_each_entry(m, &huge_boot_pages, list) { 3224 struct page *page = virt_to_page(m); 3225 struct folio *folio = page_folio(page); 3226 struct hstate *h = m->hstate; 3227 3228 VM_BUG_ON(!hstate_is_gigantic(h)); 3229 WARN_ON(folio_ref_count(folio) != 1); 3230 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { 3231 WARN_ON(folio_test_reserved(folio)); 3232 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); 3233 free_huge_folio(folio); /* add to the hugepage allocator */ 3234 } else { 3235 /* VERY unlikely inflated ref count on a tail page */ 3236 free_gigantic_folio(folio, huge_page_order(h)); 3237 } 3238 3239 /* 3240 * We need to restore the 'stolen' pages to totalram_pages 3241 * in order to fix confusing memory reports from free(1) and 3242 * other side-effects, like CommitLimit going negative. 3243 */ 3244 adjust_managed_page_count(page, pages_per_huge_page(h)); 3245 cond_resched(); 3246 } 3247 } 3248 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3249 { 3250 unsigned long i; 3251 char buf[32]; 3252 3253 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3254 if (hstate_is_gigantic(h)) { 3255 if (!alloc_bootmem_huge_page(h, nid)) 3256 break; 3257 } else { 3258 struct folio *folio; 3259 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3260 3261 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, 3262 &node_states[N_MEMORY], NULL); 3263 if (!folio) 3264 break; 3265 free_huge_folio(folio); /* free it into the hugepage allocator */ 3266 } 3267 cond_resched(); 3268 } 3269 if (i == h->max_huge_pages_node[nid]) 3270 return; 3271 3272 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3273 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3274 h->max_huge_pages_node[nid], buf, nid, i); 3275 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3276 h->max_huge_pages_node[nid] = i; 3277 } 3278 3279 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3280 { 3281 unsigned long i; 3282 nodemask_t *node_alloc_noretry; 3283 bool node_specific_alloc = false; 3284 3285 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3286 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3287 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3288 return; 3289 } 3290 3291 /* do node specific alloc */ 3292 for_each_online_node(i) { 3293 if (h->max_huge_pages_node[i] > 0) { 3294 hugetlb_hstate_alloc_pages_onenode(h, i); 3295 node_specific_alloc = true; 3296 } 3297 } 3298 3299 if (node_specific_alloc) 3300 return; 3301 3302 /* below will do all node balanced alloc */ 3303 if (!hstate_is_gigantic(h)) { 3304 /* 3305 * Bit mask controlling how hard we retry per-node allocations. 3306 * Ignore errors as lower level routines can deal with 3307 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3308 * time, we are likely in bigger trouble. 3309 */ 3310 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3311 GFP_KERNEL); 3312 } else { 3313 /* allocations done at boot time */ 3314 node_alloc_noretry = NULL; 3315 } 3316 3317 /* bit mask controlling how hard we retry per-node allocations */ 3318 if (node_alloc_noretry) 3319 nodes_clear(*node_alloc_noretry); 3320 3321 for (i = 0; i < h->max_huge_pages; ++i) { 3322 if (hstate_is_gigantic(h)) { 3323 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3324 break; 3325 } else if (!alloc_pool_huge_page(h, 3326 &node_states[N_MEMORY], 3327 node_alloc_noretry)) 3328 break; 3329 cond_resched(); 3330 } 3331 if (i < h->max_huge_pages) { 3332 char buf[32]; 3333 3334 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3335 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3336 h->max_huge_pages, buf, i); 3337 h->max_huge_pages = i; 3338 } 3339 kfree(node_alloc_noretry); 3340 } 3341 3342 static void __init hugetlb_init_hstates(void) 3343 { 3344 struct hstate *h, *h2; 3345 3346 for_each_hstate(h) { 3347 /* oversize hugepages were init'ed in early boot */ 3348 if (!hstate_is_gigantic(h)) 3349 hugetlb_hstate_alloc_pages(h); 3350 3351 /* 3352 * Set demote order for each hstate. Note that 3353 * h->demote_order is initially 0. 3354 * - We can not demote gigantic pages if runtime freeing 3355 * is not supported, so skip this. 3356 * - If CMA allocation is possible, we can not demote 3357 * HUGETLB_PAGE_ORDER or smaller size pages. 3358 */ 3359 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3360 continue; 3361 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3362 continue; 3363 for_each_hstate(h2) { 3364 if (h2 == h) 3365 continue; 3366 if (h2->order < h->order && 3367 h2->order > h->demote_order) 3368 h->demote_order = h2->order; 3369 } 3370 } 3371 } 3372 3373 static void __init report_hugepages(void) 3374 { 3375 struct hstate *h; 3376 3377 for_each_hstate(h) { 3378 char buf[32]; 3379 3380 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3381 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n", 3382 buf, h->free_huge_pages); 3383 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n", 3384 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); 3385 } 3386 } 3387 3388 #ifdef CONFIG_HIGHMEM 3389 static void try_to_free_low(struct hstate *h, unsigned long count, 3390 nodemask_t *nodes_allowed) 3391 { 3392 int i; 3393 LIST_HEAD(page_list); 3394 3395 lockdep_assert_held(&hugetlb_lock); 3396 if (hstate_is_gigantic(h)) 3397 return; 3398 3399 /* 3400 * Collect pages to be freed on a list, and free after dropping lock 3401 */ 3402 for_each_node_mask(i, *nodes_allowed) { 3403 struct page *page, *next; 3404 struct list_head *freel = &h->hugepage_freelists[i]; 3405 list_for_each_entry_safe(page, next, freel, lru) { 3406 if (count >= h->nr_huge_pages) 3407 goto out; 3408 if (PageHighMem(page)) 3409 continue; 3410 remove_hugetlb_folio(h, page_folio(page), false); 3411 list_add(&page->lru, &page_list); 3412 } 3413 } 3414 3415 out: 3416 spin_unlock_irq(&hugetlb_lock); 3417 update_and_free_pages_bulk(h, &page_list); 3418 spin_lock_irq(&hugetlb_lock); 3419 } 3420 #else 3421 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3422 nodemask_t *nodes_allowed) 3423 { 3424 } 3425 #endif 3426 3427 /* 3428 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3429 * balanced by operating on them in a round-robin fashion. 3430 * Returns 1 if an adjustment was made. 3431 */ 3432 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3433 int delta) 3434 { 3435 int nr_nodes, node; 3436 3437 lockdep_assert_held(&hugetlb_lock); 3438 VM_BUG_ON(delta != -1 && delta != 1); 3439 3440 if (delta < 0) { 3441 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3442 if (h->surplus_huge_pages_node[node]) 3443 goto found; 3444 } 3445 } else { 3446 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3447 if (h->surplus_huge_pages_node[node] < 3448 h->nr_huge_pages_node[node]) 3449 goto found; 3450 } 3451 } 3452 return 0; 3453 3454 found: 3455 h->surplus_huge_pages += delta; 3456 h->surplus_huge_pages_node[node] += delta; 3457 return 1; 3458 } 3459 3460 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3461 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3462 nodemask_t *nodes_allowed) 3463 { 3464 unsigned long min_count, ret; 3465 struct page *page; 3466 LIST_HEAD(page_list); 3467 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3468 3469 /* 3470 * Bit mask controlling how hard we retry per-node allocations. 3471 * If we can not allocate the bit mask, do not attempt to allocate 3472 * the requested huge pages. 3473 */ 3474 if (node_alloc_noretry) 3475 nodes_clear(*node_alloc_noretry); 3476 else 3477 return -ENOMEM; 3478 3479 /* 3480 * resize_lock mutex prevents concurrent adjustments to number of 3481 * pages in hstate via the proc/sysfs interfaces. 3482 */ 3483 mutex_lock(&h->resize_lock); 3484 flush_free_hpage_work(h); 3485 spin_lock_irq(&hugetlb_lock); 3486 3487 /* 3488 * Check for a node specific request. 3489 * Changing node specific huge page count may require a corresponding 3490 * change to the global count. In any case, the passed node mask 3491 * (nodes_allowed) will restrict alloc/free to the specified node. 3492 */ 3493 if (nid != NUMA_NO_NODE) { 3494 unsigned long old_count = count; 3495 3496 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3497 /* 3498 * User may have specified a large count value which caused the 3499 * above calculation to overflow. In this case, they wanted 3500 * to allocate as many huge pages as possible. Set count to 3501 * largest possible value to align with their intention. 3502 */ 3503 if (count < old_count) 3504 count = ULONG_MAX; 3505 } 3506 3507 /* 3508 * Gigantic pages runtime allocation depend on the capability for large 3509 * page range allocation. 3510 * If the system does not provide this feature, return an error when 3511 * the user tries to allocate gigantic pages but let the user free the 3512 * boottime allocated gigantic pages. 3513 */ 3514 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3515 if (count > persistent_huge_pages(h)) { 3516 spin_unlock_irq(&hugetlb_lock); 3517 mutex_unlock(&h->resize_lock); 3518 NODEMASK_FREE(node_alloc_noretry); 3519 return -EINVAL; 3520 } 3521 /* Fall through to decrease pool */ 3522 } 3523 3524 /* 3525 * Increase the pool size 3526 * First take pages out of surplus state. Then make up the 3527 * remaining difference by allocating fresh huge pages. 3528 * 3529 * We might race with alloc_surplus_hugetlb_folio() here and be unable 3530 * to convert a surplus huge page to a normal huge page. That is 3531 * not critical, though, it just means the overall size of the 3532 * pool might be one hugepage larger than it needs to be, but 3533 * within all the constraints specified by the sysctls. 3534 */ 3535 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3536 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3537 break; 3538 } 3539 3540 while (count > persistent_huge_pages(h)) { 3541 /* 3542 * If this allocation races such that we no longer need the 3543 * page, free_huge_folio will handle it by freeing the page 3544 * and reducing the surplus. 3545 */ 3546 spin_unlock_irq(&hugetlb_lock); 3547 3548 /* yield cpu to avoid soft lockup */ 3549 cond_resched(); 3550 3551 ret = alloc_pool_huge_page(h, nodes_allowed, 3552 node_alloc_noretry); 3553 spin_lock_irq(&hugetlb_lock); 3554 if (!ret) 3555 goto out; 3556 3557 /* Bail for signals. Probably ctrl-c from user */ 3558 if (signal_pending(current)) 3559 goto out; 3560 } 3561 3562 /* 3563 * Decrease the pool size 3564 * First return free pages to the buddy allocator (being careful 3565 * to keep enough around to satisfy reservations). Then place 3566 * pages into surplus state as needed so the pool will shrink 3567 * to the desired size as pages become free. 3568 * 3569 * By placing pages into the surplus state independent of the 3570 * overcommit value, we are allowing the surplus pool size to 3571 * exceed overcommit. There are few sane options here. Since 3572 * alloc_surplus_hugetlb_folio() is checking the global counter, 3573 * though, we'll note that we're not allowed to exceed surplus 3574 * and won't grow the pool anywhere else. Not until one of the 3575 * sysctls are changed, or the surplus pages go out of use. 3576 */ 3577 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3578 min_count = max(count, min_count); 3579 try_to_free_low(h, min_count, nodes_allowed); 3580 3581 /* 3582 * Collect pages to be removed on list without dropping lock 3583 */ 3584 while (min_count < persistent_huge_pages(h)) { 3585 page = remove_pool_huge_page(h, nodes_allowed, 0); 3586 if (!page) 3587 break; 3588 3589 list_add(&page->lru, &page_list); 3590 } 3591 /* free the pages after dropping lock */ 3592 spin_unlock_irq(&hugetlb_lock); 3593 update_and_free_pages_bulk(h, &page_list); 3594 flush_free_hpage_work(h); 3595 spin_lock_irq(&hugetlb_lock); 3596 3597 while (count < persistent_huge_pages(h)) { 3598 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3599 break; 3600 } 3601 out: 3602 h->max_huge_pages = persistent_huge_pages(h); 3603 spin_unlock_irq(&hugetlb_lock); 3604 mutex_unlock(&h->resize_lock); 3605 3606 NODEMASK_FREE(node_alloc_noretry); 3607 3608 return 0; 3609 } 3610 3611 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) 3612 { 3613 int i, nid = folio_nid(folio); 3614 struct hstate *target_hstate; 3615 struct page *subpage; 3616 struct folio *inner_folio; 3617 int rc = 0; 3618 3619 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3620 3621 remove_hugetlb_folio_for_demote(h, folio, false); 3622 spin_unlock_irq(&hugetlb_lock); 3623 3624 rc = hugetlb_vmemmap_restore(h, &folio->page); 3625 if (rc) { 3626 /* Allocation of vmemmmap failed, we can not demote folio */ 3627 spin_lock_irq(&hugetlb_lock); 3628 folio_ref_unfreeze(folio, 1); 3629 add_hugetlb_folio(h, folio, false); 3630 return rc; 3631 } 3632 3633 /* 3634 * Use destroy_compound_hugetlb_folio_for_demote for all huge page 3635 * sizes as it will not ref count folios. 3636 */ 3637 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); 3638 3639 /* 3640 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3641 * Without the mutex, pages added to target hstate could be marked 3642 * as surplus. 3643 * 3644 * Note that we already hold h->resize_lock. To prevent deadlock, 3645 * use the convention of always taking larger size hstate mutex first. 3646 */ 3647 mutex_lock(&target_hstate->resize_lock); 3648 for (i = 0; i < pages_per_huge_page(h); 3649 i += pages_per_huge_page(target_hstate)) { 3650 subpage = folio_page(folio, i); 3651 inner_folio = page_folio(subpage); 3652 if (hstate_is_gigantic(target_hstate)) 3653 prep_compound_gigantic_folio_for_demote(inner_folio, 3654 target_hstate->order); 3655 else 3656 prep_compound_page(subpage, target_hstate->order); 3657 folio_change_private(inner_folio, NULL); 3658 prep_new_hugetlb_folio(target_hstate, inner_folio, nid); 3659 free_huge_folio(inner_folio); 3660 } 3661 mutex_unlock(&target_hstate->resize_lock); 3662 3663 spin_lock_irq(&hugetlb_lock); 3664 3665 /* 3666 * Not absolutely necessary, but for consistency update max_huge_pages 3667 * based on pool changes for the demoted page. 3668 */ 3669 h->max_huge_pages--; 3670 target_hstate->max_huge_pages += 3671 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); 3672 3673 return rc; 3674 } 3675 3676 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3677 __must_hold(&hugetlb_lock) 3678 { 3679 int nr_nodes, node; 3680 struct folio *folio; 3681 3682 lockdep_assert_held(&hugetlb_lock); 3683 3684 /* We should never get here if no demote order */ 3685 if (!h->demote_order) { 3686 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3687 return -EINVAL; /* internal error */ 3688 } 3689 3690 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3691 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { 3692 if (folio_test_hwpoison(folio)) 3693 continue; 3694 return demote_free_hugetlb_folio(h, folio); 3695 } 3696 } 3697 3698 /* 3699 * Only way to get here is if all pages on free lists are poisoned. 3700 * Return -EBUSY so that caller will not retry. 3701 */ 3702 return -EBUSY; 3703 } 3704 3705 #define HSTATE_ATTR_RO(_name) \ 3706 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3707 3708 #define HSTATE_ATTR_WO(_name) \ 3709 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3710 3711 #define HSTATE_ATTR(_name) \ 3712 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3713 3714 static struct kobject *hugepages_kobj; 3715 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3716 3717 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3718 3719 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3720 { 3721 int i; 3722 3723 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3724 if (hstate_kobjs[i] == kobj) { 3725 if (nidp) 3726 *nidp = NUMA_NO_NODE; 3727 return &hstates[i]; 3728 } 3729 3730 return kobj_to_node_hstate(kobj, nidp); 3731 } 3732 3733 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3734 struct kobj_attribute *attr, char *buf) 3735 { 3736 struct hstate *h; 3737 unsigned long nr_huge_pages; 3738 int nid; 3739 3740 h = kobj_to_hstate(kobj, &nid); 3741 if (nid == NUMA_NO_NODE) 3742 nr_huge_pages = h->nr_huge_pages; 3743 else 3744 nr_huge_pages = h->nr_huge_pages_node[nid]; 3745 3746 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3747 } 3748 3749 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3750 struct hstate *h, int nid, 3751 unsigned long count, size_t len) 3752 { 3753 int err; 3754 nodemask_t nodes_allowed, *n_mask; 3755 3756 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3757 return -EINVAL; 3758 3759 if (nid == NUMA_NO_NODE) { 3760 /* 3761 * global hstate attribute 3762 */ 3763 if (!(obey_mempolicy && 3764 init_nodemask_of_mempolicy(&nodes_allowed))) 3765 n_mask = &node_states[N_MEMORY]; 3766 else 3767 n_mask = &nodes_allowed; 3768 } else { 3769 /* 3770 * Node specific request. count adjustment happens in 3771 * set_max_huge_pages() after acquiring hugetlb_lock. 3772 */ 3773 init_nodemask_of_node(&nodes_allowed, nid); 3774 n_mask = &nodes_allowed; 3775 } 3776 3777 err = set_max_huge_pages(h, count, nid, n_mask); 3778 3779 return err ? err : len; 3780 } 3781 3782 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3783 struct kobject *kobj, const char *buf, 3784 size_t len) 3785 { 3786 struct hstate *h; 3787 unsigned long count; 3788 int nid; 3789 int err; 3790 3791 err = kstrtoul(buf, 10, &count); 3792 if (err) 3793 return err; 3794 3795 h = kobj_to_hstate(kobj, &nid); 3796 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3797 } 3798 3799 static ssize_t nr_hugepages_show(struct kobject *kobj, 3800 struct kobj_attribute *attr, char *buf) 3801 { 3802 return nr_hugepages_show_common(kobj, attr, buf); 3803 } 3804 3805 static ssize_t nr_hugepages_store(struct kobject *kobj, 3806 struct kobj_attribute *attr, const char *buf, size_t len) 3807 { 3808 return nr_hugepages_store_common(false, kobj, buf, len); 3809 } 3810 HSTATE_ATTR(nr_hugepages); 3811 3812 #ifdef CONFIG_NUMA 3813 3814 /* 3815 * hstate attribute for optionally mempolicy-based constraint on persistent 3816 * huge page alloc/free. 3817 */ 3818 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3819 struct kobj_attribute *attr, 3820 char *buf) 3821 { 3822 return nr_hugepages_show_common(kobj, attr, buf); 3823 } 3824 3825 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3826 struct kobj_attribute *attr, const char *buf, size_t len) 3827 { 3828 return nr_hugepages_store_common(true, kobj, buf, len); 3829 } 3830 HSTATE_ATTR(nr_hugepages_mempolicy); 3831 #endif 3832 3833 3834 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3835 struct kobj_attribute *attr, char *buf) 3836 { 3837 struct hstate *h = kobj_to_hstate(kobj, NULL); 3838 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3839 } 3840 3841 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3842 struct kobj_attribute *attr, const char *buf, size_t count) 3843 { 3844 int err; 3845 unsigned long input; 3846 struct hstate *h = kobj_to_hstate(kobj, NULL); 3847 3848 if (hstate_is_gigantic(h)) 3849 return -EINVAL; 3850 3851 err = kstrtoul(buf, 10, &input); 3852 if (err) 3853 return err; 3854 3855 spin_lock_irq(&hugetlb_lock); 3856 h->nr_overcommit_huge_pages = input; 3857 spin_unlock_irq(&hugetlb_lock); 3858 3859 return count; 3860 } 3861 HSTATE_ATTR(nr_overcommit_hugepages); 3862 3863 static ssize_t free_hugepages_show(struct kobject *kobj, 3864 struct kobj_attribute *attr, char *buf) 3865 { 3866 struct hstate *h; 3867 unsigned long free_huge_pages; 3868 int nid; 3869 3870 h = kobj_to_hstate(kobj, &nid); 3871 if (nid == NUMA_NO_NODE) 3872 free_huge_pages = h->free_huge_pages; 3873 else 3874 free_huge_pages = h->free_huge_pages_node[nid]; 3875 3876 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3877 } 3878 HSTATE_ATTR_RO(free_hugepages); 3879 3880 static ssize_t resv_hugepages_show(struct kobject *kobj, 3881 struct kobj_attribute *attr, char *buf) 3882 { 3883 struct hstate *h = kobj_to_hstate(kobj, NULL); 3884 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3885 } 3886 HSTATE_ATTR_RO(resv_hugepages); 3887 3888 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3889 struct kobj_attribute *attr, char *buf) 3890 { 3891 struct hstate *h; 3892 unsigned long surplus_huge_pages; 3893 int nid; 3894 3895 h = kobj_to_hstate(kobj, &nid); 3896 if (nid == NUMA_NO_NODE) 3897 surplus_huge_pages = h->surplus_huge_pages; 3898 else 3899 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3900 3901 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3902 } 3903 HSTATE_ATTR_RO(surplus_hugepages); 3904 3905 static ssize_t demote_store(struct kobject *kobj, 3906 struct kobj_attribute *attr, const char *buf, size_t len) 3907 { 3908 unsigned long nr_demote; 3909 unsigned long nr_available; 3910 nodemask_t nodes_allowed, *n_mask; 3911 struct hstate *h; 3912 int err; 3913 int nid; 3914 3915 err = kstrtoul(buf, 10, &nr_demote); 3916 if (err) 3917 return err; 3918 h = kobj_to_hstate(kobj, &nid); 3919 3920 if (nid != NUMA_NO_NODE) { 3921 init_nodemask_of_node(&nodes_allowed, nid); 3922 n_mask = &nodes_allowed; 3923 } else { 3924 n_mask = &node_states[N_MEMORY]; 3925 } 3926 3927 /* Synchronize with other sysfs operations modifying huge pages */ 3928 mutex_lock(&h->resize_lock); 3929 spin_lock_irq(&hugetlb_lock); 3930 3931 while (nr_demote) { 3932 /* 3933 * Check for available pages to demote each time thorough the 3934 * loop as demote_pool_huge_page will drop hugetlb_lock. 3935 */ 3936 if (nid != NUMA_NO_NODE) 3937 nr_available = h->free_huge_pages_node[nid]; 3938 else 3939 nr_available = h->free_huge_pages; 3940 nr_available -= h->resv_huge_pages; 3941 if (!nr_available) 3942 break; 3943 3944 err = demote_pool_huge_page(h, n_mask); 3945 if (err) 3946 break; 3947 3948 nr_demote--; 3949 } 3950 3951 spin_unlock_irq(&hugetlb_lock); 3952 mutex_unlock(&h->resize_lock); 3953 3954 if (err) 3955 return err; 3956 return len; 3957 } 3958 HSTATE_ATTR_WO(demote); 3959 3960 static ssize_t demote_size_show(struct kobject *kobj, 3961 struct kobj_attribute *attr, char *buf) 3962 { 3963 struct hstate *h = kobj_to_hstate(kobj, NULL); 3964 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3965 3966 return sysfs_emit(buf, "%lukB\n", demote_size); 3967 } 3968 3969 static ssize_t demote_size_store(struct kobject *kobj, 3970 struct kobj_attribute *attr, 3971 const char *buf, size_t count) 3972 { 3973 struct hstate *h, *demote_hstate; 3974 unsigned long demote_size; 3975 unsigned int demote_order; 3976 3977 demote_size = (unsigned long)memparse(buf, NULL); 3978 3979 demote_hstate = size_to_hstate(demote_size); 3980 if (!demote_hstate) 3981 return -EINVAL; 3982 demote_order = demote_hstate->order; 3983 if (demote_order < HUGETLB_PAGE_ORDER) 3984 return -EINVAL; 3985 3986 /* demote order must be smaller than hstate order */ 3987 h = kobj_to_hstate(kobj, NULL); 3988 if (demote_order >= h->order) 3989 return -EINVAL; 3990 3991 /* resize_lock synchronizes access to demote size and writes */ 3992 mutex_lock(&h->resize_lock); 3993 h->demote_order = demote_order; 3994 mutex_unlock(&h->resize_lock); 3995 3996 return count; 3997 } 3998 HSTATE_ATTR(demote_size); 3999 4000 static struct attribute *hstate_attrs[] = { 4001 &nr_hugepages_attr.attr, 4002 &nr_overcommit_hugepages_attr.attr, 4003 &free_hugepages_attr.attr, 4004 &resv_hugepages_attr.attr, 4005 &surplus_hugepages_attr.attr, 4006 #ifdef CONFIG_NUMA 4007 &nr_hugepages_mempolicy_attr.attr, 4008 #endif 4009 NULL, 4010 }; 4011 4012 static const struct attribute_group hstate_attr_group = { 4013 .attrs = hstate_attrs, 4014 }; 4015 4016 static struct attribute *hstate_demote_attrs[] = { 4017 &demote_size_attr.attr, 4018 &demote_attr.attr, 4019 NULL, 4020 }; 4021 4022 static const struct attribute_group hstate_demote_attr_group = { 4023 .attrs = hstate_demote_attrs, 4024 }; 4025 4026 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 4027 struct kobject **hstate_kobjs, 4028 const struct attribute_group *hstate_attr_group) 4029 { 4030 int retval; 4031 int hi = hstate_index(h); 4032 4033 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 4034 if (!hstate_kobjs[hi]) 4035 return -ENOMEM; 4036 4037 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 4038 if (retval) { 4039 kobject_put(hstate_kobjs[hi]); 4040 hstate_kobjs[hi] = NULL; 4041 return retval; 4042 } 4043 4044 if (h->demote_order) { 4045 retval = sysfs_create_group(hstate_kobjs[hi], 4046 &hstate_demote_attr_group); 4047 if (retval) { 4048 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 4049 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group); 4050 kobject_put(hstate_kobjs[hi]); 4051 hstate_kobjs[hi] = NULL; 4052 return retval; 4053 } 4054 } 4055 4056 return 0; 4057 } 4058 4059 #ifdef CONFIG_NUMA 4060 static bool hugetlb_sysfs_initialized __ro_after_init; 4061 4062 /* 4063 * node_hstate/s - associate per node hstate attributes, via their kobjects, 4064 * with node devices in node_devices[] using a parallel array. The array 4065 * index of a node device or _hstate == node id. 4066 * This is here to avoid any static dependency of the node device driver, in 4067 * the base kernel, on the hugetlb module. 4068 */ 4069 struct node_hstate { 4070 struct kobject *hugepages_kobj; 4071 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 4072 }; 4073 static struct node_hstate node_hstates[MAX_NUMNODES]; 4074 4075 /* 4076 * A subset of global hstate attributes for node devices 4077 */ 4078 static struct attribute *per_node_hstate_attrs[] = { 4079 &nr_hugepages_attr.attr, 4080 &free_hugepages_attr.attr, 4081 &surplus_hugepages_attr.attr, 4082 NULL, 4083 }; 4084 4085 static const struct attribute_group per_node_hstate_attr_group = { 4086 .attrs = per_node_hstate_attrs, 4087 }; 4088 4089 /* 4090 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 4091 * Returns node id via non-NULL nidp. 4092 */ 4093 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4094 { 4095 int nid; 4096 4097 for (nid = 0; nid < nr_node_ids; nid++) { 4098 struct node_hstate *nhs = &node_hstates[nid]; 4099 int i; 4100 for (i = 0; i < HUGE_MAX_HSTATE; i++) 4101 if (nhs->hstate_kobjs[i] == kobj) { 4102 if (nidp) 4103 *nidp = nid; 4104 return &hstates[i]; 4105 } 4106 } 4107 4108 BUG(); 4109 return NULL; 4110 } 4111 4112 /* 4113 * Unregister hstate attributes from a single node device. 4114 * No-op if no hstate attributes attached. 4115 */ 4116 void hugetlb_unregister_node(struct node *node) 4117 { 4118 struct hstate *h; 4119 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4120 4121 if (!nhs->hugepages_kobj) 4122 return; /* no hstate attributes */ 4123 4124 for_each_hstate(h) { 4125 int idx = hstate_index(h); 4126 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx]; 4127 4128 if (!hstate_kobj) 4129 continue; 4130 if (h->demote_order) 4131 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group); 4132 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group); 4133 kobject_put(hstate_kobj); 4134 nhs->hstate_kobjs[idx] = NULL; 4135 } 4136 4137 kobject_put(nhs->hugepages_kobj); 4138 nhs->hugepages_kobj = NULL; 4139 } 4140 4141 4142 /* 4143 * Register hstate attributes for a single node device. 4144 * No-op if attributes already registered. 4145 */ 4146 void hugetlb_register_node(struct node *node) 4147 { 4148 struct hstate *h; 4149 struct node_hstate *nhs = &node_hstates[node->dev.id]; 4150 int err; 4151 4152 if (!hugetlb_sysfs_initialized) 4153 return; 4154 4155 if (nhs->hugepages_kobj) 4156 return; /* already allocated */ 4157 4158 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 4159 &node->dev.kobj); 4160 if (!nhs->hugepages_kobj) 4161 return; 4162 4163 for_each_hstate(h) { 4164 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 4165 nhs->hstate_kobjs, 4166 &per_node_hstate_attr_group); 4167 if (err) { 4168 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 4169 h->name, node->dev.id); 4170 hugetlb_unregister_node(node); 4171 break; 4172 } 4173 } 4174 } 4175 4176 /* 4177 * hugetlb init time: register hstate attributes for all registered node 4178 * devices of nodes that have memory. All on-line nodes should have 4179 * registered their associated device by this time. 4180 */ 4181 static void __init hugetlb_register_all_nodes(void) 4182 { 4183 int nid; 4184 4185 for_each_online_node(nid) 4186 hugetlb_register_node(node_devices[nid]); 4187 } 4188 #else /* !CONFIG_NUMA */ 4189 4190 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4191 { 4192 BUG(); 4193 if (nidp) 4194 *nidp = -1; 4195 return NULL; 4196 } 4197 4198 static void hugetlb_register_all_nodes(void) { } 4199 4200 #endif 4201 4202 #ifdef CONFIG_CMA 4203 static void __init hugetlb_cma_check(void); 4204 #else 4205 static inline __init void hugetlb_cma_check(void) 4206 { 4207 } 4208 #endif 4209 4210 static void __init hugetlb_sysfs_init(void) 4211 { 4212 struct hstate *h; 4213 int err; 4214 4215 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 4216 if (!hugepages_kobj) 4217 return; 4218 4219 for_each_hstate(h) { 4220 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 4221 hstate_kobjs, &hstate_attr_group); 4222 if (err) 4223 pr_err("HugeTLB: Unable to add hstate %s", h->name); 4224 } 4225 4226 #ifdef CONFIG_NUMA 4227 hugetlb_sysfs_initialized = true; 4228 #endif 4229 hugetlb_register_all_nodes(); 4230 } 4231 4232 #ifdef CONFIG_SYSCTL 4233 static void hugetlb_sysctl_init(void); 4234 #else 4235 static inline void hugetlb_sysctl_init(void) { } 4236 #endif 4237 4238 static int __init hugetlb_init(void) 4239 { 4240 int i; 4241 4242 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4243 __NR_HPAGEFLAGS); 4244 4245 if (!hugepages_supported()) { 4246 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4247 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4248 return 0; 4249 } 4250 4251 /* 4252 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4253 * architectures depend on setup being done here. 4254 */ 4255 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4256 if (!parsed_default_hugepagesz) { 4257 /* 4258 * If we did not parse a default huge page size, set 4259 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4260 * number of huge pages for this default size was implicitly 4261 * specified, set that here as well. 4262 * Note that the implicit setting will overwrite an explicit 4263 * setting. A warning will be printed in this case. 4264 */ 4265 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4266 if (default_hstate_max_huge_pages) { 4267 if (default_hstate.max_huge_pages) { 4268 char buf[32]; 4269 4270 string_get_size(huge_page_size(&default_hstate), 4271 1, STRING_UNITS_2, buf, 32); 4272 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4273 default_hstate.max_huge_pages, buf); 4274 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4275 default_hstate_max_huge_pages); 4276 } 4277 default_hstate.max_huge_pages = 4278 default_hstate_max_huge_pages; 4279 4280 for_each_online_node(i) 4281 default_hstate.max_huge_pages_node[i] = 4282 default_hugepages_in_node[i]; 4283 } 4284 } 4285 4286 hugetlb_cma_check(); 4287 hugetlb_init_hstates(); 4288 gather_bootmem_prealloc(); 4289 report_hugepages(); 4290 4291 hugetlb_sysfs_init(); 4292 hugetlb_cgroup_file_init(); 4293 hugetlb_sysctl_init(); 4294 4295 #ifdef CONFIG_SMP 4296 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4297 #else 4298 num_fault_mutexes = 1; 4299 #endif 4300 hugetlb_fault_mutex_table = 4301 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4302 GFP_KERNEL); 4303 BUG_ON(!hugetlb_fault_mutex_table); 4304 4305 for (i = 0; i < num_fault_mutexes; i++) 4306 mutex_init(&hugetlb_fault_mutex_table[i]); 4307 return 0; 4308 } 4309 subsys_initcall(hugetlb_init); 4310 4311 /* Overwritten by architectures with more huge page sizes */ 4312 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4313 { 4314 return size == HPAGE_SIZE; 4315 } 4316 4317 void __init hugetlb_add_hstate(unsigned int order) 4318 { 4319 struct hstate *h; 4320 unsigned long i; 4321 4322 if (size_to_hstate(PAGE_SIZE << order)) { 4323 return; 4324 } 4325 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4326 BUG_ON(order == 0); 4327 h = &hstates[hugetlb_max_hstate++]; 4328 mutex_init(&h->resize_lock); 4329 h->order = order; 4330 h->mask = ~(huge_page_size(h) - 1); 4331 for (i = 0; i < MAX_NUMNODES; ++i) 4332 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4333 INIT_LIST_HEAD(&h->hugepage_activelist); 4334 h->next_nid_to_alloc = first_memory_node; 4335 h->next_nid_to_free = first_memory_node; 4336 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4337 huge_page_size(h)/SZ_1K); 4338 4339 parsed_hstate = h; 4340 } 4341 4342 bool __init __weak hugetlb_node_alloc_supported(void) 4343 { 4344 return true; 4345 } 4346 4347 static void __init hugepages_clear_pages_in_node(void) 4348 { 4349 if (!hugetlb_max_hstate) { 4350 default_hstate_max_huge_pages = 0; 4351 memset(default_hugepages_in_node, 0, 4352 sizeof(default_hugepages_in_node)); 4353 } else { 4354 parsed_hstate->max_huge_pages = 0; 4355 memset(parsed_hstate->max_huge_pages_node, 0, 4356 sizeof(parsed_hstate->max_huge_pages_node)); 4357 } 4358 } 4359 4360 /* 4361 * hugepages command line processing 4362 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4363 * specification. If not, ignore the hugepages value. hugepages can also 4364 * be the first huge page command line option in which case it implicitly 4365 * specifies the number of huge pages for the default size. 4366 */ 4367 static int __init hugepages_setup(char *s) 4368 { 4369 unsigned long *mhp; 4370 static unsigned long *last_mhp; 4371 int node = NUMA_NO_NODE; 4372 int count; 4373 unsigned long tmp; 4374 char *p = s; 4375 4376 if (!parsed_valid_hugepagesz) { 4377 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4378 parsed_valid_hugepagesz = true; 4379 return 1; 4380 } 4381 4382 /* 4383 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4384 * yet, so this hugepages= parameter goes to the "default hstate". 4385 * Otherwise, it goes with the previously parsed hugepagesz or 4386 * default_hugepagesz. 4387 */ 4388 else if (!hugetlb_max_hstate) 4389 mhp = &default_hstate_max_huge_pages; 4390 else 4391 mhp = &parsed_hstate->max_huge_pages; 4392 4393 if (mhp == last_mhp) { 4394 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4395 return 1; 4396 } 4397 4398 while (*p) { 4399 count = 0; 4400 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4401 goto invalid; 4402 /* Parameter is node format */ 4403 if (p[count] == ':') { 4404 if (!hugetlb_node_alloc_supported()) { 4405 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4406 return 1; 4407 } 4408 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4409 goto invalid; 4410 node = array_index_nospec(tmp, MAX_NUMNODES); 4411 p += count + 1; 4412 /* Parse hugepages */ 4413 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4414 goto invalid; 4415 if (!hugetlb_max_hstate) 4416 default_hugepages_in_node[node] = tmp; 4417 else 4418 parsed_hstate->max_huge_pages_node[node] = tmp; 4419 *mhp += tmp; 4420 /* Go to parse next node*/ 4421 if (p[count] == ',') 4422 p += count + 1; 4423 else 4424 break; 4425 } else { 4426 if (p != s) 4427 goto invalid; 4428 *mhp = tmp; 4429 break; 4430 } 4431 } 4432 4433 /* 4434 * Global state is always initialized later in hugetlb_init. 4435 * But we need to allocate gigantic hstates here early to still 4436 * use the bootmem allocator. 4437 */ 4438 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4439 hugetlb_hstate_alloc_pages(parsed_hstate); 4440 4441 last_mhp = mhp; 4442 4443 return 1; 4444 4445 invalid: 4446 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4447 hugepages_clear_pages_in_node(); 4448 return 1; 4449 } 4450 __setup("hugepages=", hugepages_setup); 4451 4452 /* 4453 * hugepagesz command line processing 4454 * A specific huge page size can only be specified once with hugepagesz. 4455 * hugepagesz is followed by hugepages on the command line. The global 4456 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4457 * hugepagesz argument was valid. 4458 */ 4459 static int __init hugepagesz_setup(char *s) 4460 { 4461 unsigned long size; 4462 struct hstate *h; 4463 4464 parsed_valid_hugepagesz = false; 4465 size = (unsigned long)memparse(s, NULL); 4466 4467 if (!arch_hugetlb_valid_size(size)) { 4468 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4469 return 1; 4470 } 4471 4472 h = size_to_hstate(size); 4473 if (h) { 4474 /* 4475 * hstate for this size already exists. This is normally 4476 * an error, but is allowed if the existing hstate is the 4477 * default hstate. More specifically, it is only allowed if 4478 * the number of huge pages for the default hstate was not 4479 * previously specified. 4480 */ 4481 if (!parsed_default_hugepagesz || h != &default_hstate || 4482 default_hstate.max_huge_pages) { 4483 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4484 return 1; 4485 } 4486 4487 /* 4488 * No need to call hugetlb_add_hstate() as hstate already 4489 * exists. But, do set parsed_hstate so that a following 4490 * hugepages= parameter will be applied to this hstate. 4491 */ 4492 parsed_hstate = h; 4493 parsed_valid_hugepagesz = true; 4494 return 1; 4495 } 4496 4497 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4498 parsed_valid_hugepagesz = true; 4499 return 1; 4500 } 4501 __setup("hugepagesz=", hugepagesz_setup); 4502 4503 /* 4504 * default_hugepagesz command line input 4505 * Only one instance of default_hugepagesz allowed on command line. 4506 */ 4507 static int __init default_hugepagesz_setup(char *s) 4508 { 4509 unsigned long size; 4510 int i; 4511 4512 parsed_valid_hugepagesz = false; 4513 if (parsed_default_hugepagesz) { 4514 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4515 return 1; 4516 } 4517 4518 size = (unsigned long)memparse(s, NULL); 4519 4520 if (!arch_hugetlb_valid_size(size)) { 4521 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4522 return 1; 4523 } 4524 4525 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4526 parsed_valid_hugepagesz = true; 4527 parsed_default_hugepagesz = true; 4528 default_hstate_idx = hstate_index(size_to_hstate(size)); 4529 4530 /* 4531 * The number of default huge pages (for this size) could have been 4532 * specified as the first hugetlb parameter: hugepages=X. If so, 4533 * then default_hstate_max_huge_pages is set. If the default huge 4534 * page size is gigantic (> MAX_ORDER), then the pages must be 4535 * allocated here from bootmem allocator. 4536 */ 4537 if (default_hstate_max_huge_pages) { 4538 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4539 for_each_online_node(i) 4540 default_hstate.max_huge_pages_node[i] = 4541 default_hugepages_in_node[i]; 4542 if (hstate_is_gigantic(&default_hstate)) 4543 hugetlb_hstate_alloc_pages(&default_hstate); 4544 default_hstate_max_huge_pages = 0; 4545 } 4546 4547 return 1; 4548 } 4549 __setup("default_hugepagesz=", default_hugepagesz_setup); 4550 4551 static nodemask_t *policy_mbind_nodemask(gfp_t gfp) 4552 { 4553 #ifdef CONFIG_NUMA 4554 struct mempolicy *mpol = get_task_policy(current); 4555 4556 /* 4557 * Only enforce MPOL_BIND policy which overlaps with cpuset policy 4558 * (from policy_nodemask) specifically for hugetlb case 4559 */ 4560 if (mpol->mode == MPOL_BIND && 4561 (apply_policy_zone(mpol, gfp_zone(gfp)) && 4562 cpuset_nodemask_valid_mems_allowed(&mpol->nodes))) 4563 return &mpol->nodes; 4564 #endif 4565 return NULL; 4566 } 4567 4568 static unsigned int allowed_mems_nr(struct hstate *h) 4569 { 4570 int node; 4571 unsigned int nr = 0; 4572 nodemask_t *mbind_nodemask; 4573 unsigned int *array = h->free_huge_pages_node; 4574 gfp_t gfp_mask = htlb_alloc_mask(h); 4575 4576 mbind_nodemask = policy_mbind_nodemask(gfp_mask); 4577 for_each_node_mask(node, cpuset_current_mems_allowed) { 4578 if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) 4579 nr += array[node]; 4580 } 4581 4582 return nr; 4583 } 4584 4585 #ifdef CONFIG_SYSCTL 4586 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4587 void *buffer, size_t *length, 4588 loff_t *ppos, unsigned long *out) 4589 { 4590 struct ctl_table dup_table; 4591 4592 /* 4593 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4594 * can duplicate the @table and alter the duplicate of it. 4595 */ 4596 dup_table = *table; 4597 dup_table.data = out; 4598 4599 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4600 } 4601 4602 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4603 struct ctl_table *table, int write, 4604 void *buffer, size_t *length, loff_t *ppos) 4605 { 4606 struct hstate *h = &default_hstate; 4607 unsigned long tmp = h->max_huge_pages; 4608 int ret; 4609 4610 if (!hugepages_supported()) 4611 return -EOPNOTSUPP; 4612 4613 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4614 &tmp); 4615 if (ret) 4616 goto out; 4617 4618 if (write) 4619 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4620 NUMA_NO_NODE, tmp, *length); 4621 out: 4622 return ret; 4623 } 4624 4625 static int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4626 void *buffer, size_t *length, loff_t *ppos) 4627 { 4628 4629 return hugetlb_sysctl_handler_common(false, table, write, 4630 buffer, length, ppos); 4631 } 4632 4633 #ifdef CONFIG_NUMA 4634 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4635 void *buffer, size_t *length, loff_t *ppos) 4636 { 4637 return hugetlb_sysctl_handler_common(true, table, write, 4638 buffer, length, ppos); 4639 } 4640 #endif /* CONFIG_NUMA */ 4641 4642 static int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4643 void *buffer, size_t *length, loff_t *ppos) 4644 { 4645 struct hstate *h = &default_hstate; 4646 unsigned long tmp; 4647 int ret; 4648 4649 if (!hugepages_supported()) 4650 return -EOPNOTSUPP; 4651 4652 tmp = h->nr_overcommit_huge_pages; 4653 4654 if (write && hstate_is_gigantic(h)) 4655 return -EINVAL; 4656 4657 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4658 &tmp); 4659 if (ret) 4660 goto out; 4661 4662 if (write) { 4663 spin_lock_irq(&hugetlb_lock); 4664 h->nr_overcommit_huge_pages = tmp; 4665 spin_unlock_irq(&hugetlb_lock); 4666 } 4667 out: 4668 return ret; 4669 } 4670 4671 static struct ctl_table hugetlb_table[] = { 4672 { 4673 .procname = "nr_hugepages", 4674 .data = NULL, 4675 .maxlen = sizeof(unsigned long), 4676 .mode = 0644, 4677 .proc_handler = hugetlb_sysctl_handler, 4678 }, 4679 #ifdef CONFIG_NUMA 4680 { 4681 .procname = "nr_hugepages_mempolicy", 4682 .data = NULL, 4683 .maxlen = sizeof(unsigned long), 4684 .mode = 0644, 4685 .proc_handler = &hugetlb_mempolicy_sysctl_handler, 4686 }, 4687 #endif 4688 { 4689 .procname = "hugetlb_shm_group", 4690 .data = &sysctl_hugetlb_shm_group, 4691 .maxlen = sizeof(gid_t), 4692 .mode = 0644, 4693 .proc_handler = proc_dointvec, 4694 }, 4695 { 4696 .procname = "nr_overcommit_hugepages", 4697 .data = NULL, 4698 .maxlen = sizeof(unsigned long), 4699 .mode = 0644, 4700 .proc_handler = hugetlb_overcommit_handler, 4701 }, 4702 { } 4703 }; 4704 4705 static void hugetlb_sysctl_init(void) 4706 { 4707 register_sysctl_init("vm", hugetlb_table); 4708 } 4709 #endif /* CONFIG_SYSCTL */ 4710 4711 void hugetlb_report_meminfo(struct seq_file *m) 4712 { 4713 struct hstate *h; 4714 unsigned long total = 0; 4715 4716 if (!hugepages_supported()) 4717 return; 4718 4719 for_each_hstate(h) { 4720 unsigned long count = h->nr_huge_pages; 4721 4722 total += huge_page_size(h) * count; 4723 4724 if (h == &default_hstate) 4725 seq_printf(m, 4726 "HugePages_Total: %5lu\n" 4727 "HugePages_Free: %5lu\n" 4728 "HugePages_Rsvd: %5lu\n" 4729 "HugePages_Surp: %5lu\n" 4730 "Hugepagesize: %8lu kB\n", 4731 count, 4732 h->free_huge_pages, 4733 h->resv_huge_pages, 4734 h->surplus_huge_pages, 4735 huge_page_size(h) / SZ_1K); 4736 } 4737 4738 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4739 } 4740 4741 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4742 { 4743 struct hstate *h = &default_hstate; 4744 4745 if (!hugepages_supported()) 4746 return 0; 4747 4748 return sysfs_emit_at(buf, len, 4749 "Node %d HugePages_Total: %5u\n" 4750 "Node %d HugePages_Free: %5u\n" 4751 "Node %d HugePages_Surp: %5u\n", 4752 nid, h->nr_huge_pages_node[nid], 4753 nid, h->free_huge_pages_node[nid], 4754 nid, h->surplus_huge_pages_node[nid]); 4755 } 4756 4757 void hugetlb_show_meminfo_node(int nid) 4758 { 4759 struct hstate *h; 4760 4761 if (!hugepages_supported()) 4762 return; 4763 4764 for_each_hstate(h) 4765 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4766 nid, 4767 h->nr_huge_pages_node[nid], 4768 h->free_huge_pages_node[nid], 4769 h->surplus_huge_pages_node[nid], 4770 huge_page_size(h) / SZ_1K); 4771 } 4772 4773 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4774 { 4775 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4776 K(atomic_long_read(&mm->hugetlb_usage))); 4777 } 4778 4779 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4780 unsigned long hugetlb_total_pages(void) 4781 { 4782 struct hstate *h; 4783 unsigned long nr_total_pages = 0; 4784 4785 for_each_hstate(h) 4786 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4787 return nr_total_pages; 4788 } 4789 4790 static int hugetlb_acct_memory(struct hstate *h, long delta) 4791 { 4792 int ret = -ENOMEM; 4793 4794 if (!delta) 4795 return 0; 4796 4797 spin_lock_irq(&hugetlb_lock); 4798 /* 4799 * When cpuset is configured, it breaks the strict hugetlb page 4800 * reservation as the accounting is done on a global variable. Such 4801 * reservation is completely rubbish in the presence of cpuset because 4802 * the reservation is not checked against page availability for the 4803 * current cpuset. Application can still potentially OOM'ed by kernel 4804 * with lack of free htlb page in cpuset that the task is in. 4805 * Attempt to enforce strict accounting with cpuset is almost 4806 * impossible (or too ugly) because cpuset is too fluid that 4807 * task or memory node can be dynamically moved between cpusets. 4808 * 4809 * The change of semantics for shared hugetlb mapping with cpuset is 4810 * undesirable. However, in order to preserve some of the semantics, 4811 * we fall back to check against current free page availability as 4812 * a best attempt and hopefully to minimize the impact of changing 4813 * semantics that cpuset has. 4814 * 4815 * Apart from cpuset, we also have memory policy mechanism that 4816 * also determines from which node the kernel will allocate memory 4817 * in a NUMA system. So similar to cpuset, we also should consider 4818 * the memory policy of the current task. Similar to the description 4819 * above. 4820 */ 4821 if (delta > 0) { 4822 if (gather_surplus_pages(h, delta) < 0) 4823 goto out; 4824 4825 if (delta > allowed_mems_nr(h)) { 4826 return_unused_surplus_pages(h, delta); 4827 goto out; 4828 } 4829 } 4830 4831 ret = 0; 4832 if (delta < 0) 4833 return_unused_surplus_pages(h, (unsigned long) -delta); 4834 4835 out: 4836 spin_unlock_irq(&hugetlb_lock); 4837 return ret; 4838 } 4839 4840 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4841 { 4842 struct resv_map *resv = vma_resv_map(vma); 4843 4844 /* 4845 * HPAGE_RESV_OWNER indicates a private mapping. 4846 * This new VMA should share its siblings reservation map if present. 4847 * The VMA will only ever have a valid reservation map pointer where 4848 * it is being copied for another still existing VMA. As that VMA 4849 * has a reference to the reservation map it cannot disappear until 4850 * after this open call completes. It is therefore safe to take a 4851 * new reference here without additional locking. 4852 */ 4853 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4854 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4855 kref_get(&resv->refs); 4856 } 4857 4858 /* 4859 * vma_lock structure for sharable mappings is vma specific. 4860 * Clear old pointer (if copied via vm_area_dup) and allocate 4861 * new structure. Before clearing, make sure vma_lock is not 4862 * for this vma. 4863 */ 4864 if (vma->vm_flags & VM_MAYSHARE) { 4865 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; 4866 4867 if (vma_lock) { 4868 if (vma_lock->vma != vma) { 4869 vma->vm_private_data = NULL; 4870 hugetlb_vma_lock_alloc(vma); 4871 } else 4872 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__); 4873 } else 4874 hugetlb_vma_lock_alloc(vma); 4875 } 4876 } 4877 4878 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4879 { 4880 struct hstate *h = hstate_vma(vma); 4881 struct resv_map *resv; 4882 struct hugepage_subpool *spool = subpool_vma(vma); 4883 unsigned long reserve, start, end; 4884 long gbl_reserve; 4885 4886 hugetlb_vma_lock_free(vma); 4887 4888 resv = vma_resv_map(vma); 4889 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4890 return; 4891 4892 start = vma_hugecache_offset(h, vma, vma->vm_start); 4893 end = vma_hugecache_offset(h, vma, vma->vm_end); 4894 4895 reserve = (end - start) - region_count(resv, start, end); 4896 hugetlb_cgroup_uncharge_counter(resv, start, end); 4897 if (reserve) { 4898 /* 4899 * Decrement reserve counts. The global reserve count may be 4900 * adjusted if the subpool has a minimum size. 4901 */ 4902 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4903 hugetlb_acct_memory(h, -gbl_reserve); 4904 } 4905 4906 kref_put(&resv->refs, resv_map_release); 4907 } 4908 4909 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4910 { 4911 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4912 return -EINVAL; 4913 4914 /* 4915 * PMD sharing is only possible for PUD_SIZE-aligned address ranges 4916 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this 4917 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now. 4918 */ 4919 if (addr & ~PUD_MASK) { 4920 /* 4921 * hugetlb_vm_op_split is called right before we attempt to 4922 * split the VMA. We will need to unshare PMDs in the old and 4923 * new VMAs, so let's unshare before we split. 4924 */ 4925 unsigned long floor = addr & PUD_MASK; 4926 unsigned long ceil = floor + PUD_SIZE; 4927 4928 if (floor >= vma->vm_start && ceil <= vma->vm_end) 4929 hugetlb_unshare_pmds(vma, floor, ceil); 4930 } 4931 4932 return 0; 4933 } 4934 4935 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4936 { 4937 return huge_page_size(hstate_vma(vma)); 4938 } 4939 4940 /* 4941 * We cannot handle pagefaults against hugetlb pages at all. They cause 4942 * handle_mm_fault() to try to instantiate regular-sized pages in the 4943 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4944 * this far. 4945 */ 4946 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4947 { 4948 BUG(); 4949 return 0; 4950 } 4951 4952 /* 4953 * When a new function is introduced to vm_operations_struct and added 4954 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4955 * This is because under System V memory model, mappings created via 4956 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4957 * their original vm_ops are overwritten with shm_vm_ops. 4958 */ 4959 const struct vm_operations_struct hugetlb_vm_ops = { 4960 .fault = hugetlb_vm_op_fault, 4961 .open = hugetlb_vm_op_open, 4962 .close = hugetlb_vm_op_close, 4963 .may_split = hugetlb_vm_op_split, 4964 .pagesize = hugetlb_vm_op_pagesize, 4965 }; 4966 4967 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4968 int writable) 4969 { 4970 pte_t entry; 4971 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4972 4973 if (writable) { 4974 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4975 vma->vm_page_prot))); 4976 } else { 4977 entry = huge_pte_wrprotect(mk_huge_pte(page, 4978 vma->vm_page_prot)); 4979 } 4980 entry = pte_mkyoung(entry); 4981 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4982 4983 return entry; 4984 } 4985 4986 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4987 unsigned long address, pte_t *ptep) 4988 { 4989 pte_t entry; 4990 4991 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4992 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4993 update_mmu_cache(vma, address, ptep); 4994 } 4995 4996 bool is_hugetlb_entry_migration(pte_t pte) 4997 { 4998 swp_entry_t swp; 4999 5000 if (huge_pte_none(pte) || pte_present(pte)) 5001 return false; 5002 swp = pte_to_swp_entry(pte); 5003 if (is_migration_entry(swp)) 5004 return true; 5005 else 5006 return false; 5007 } 5008 5009 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 5010 { 5011 swp_entry_t swp; 5012 5013 if (huge_pte_none(pte) || pte_present(pte)) 5014 return false; 5015 swp = pte_to_swp_entry(pte); 5016 if (is_hwpoison_entry(swp)) 5017 return true; 5018 else 5019 return false; 5020 } 5021 5022 static void 5023 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 5024 struct folio *new_folio, pte_t old) 5025 { 5026 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 5027 5028 __folio_mark_uptodate(new_folio); 5029 hugepage_add_new_anon_rmap(new_folio, vma, addr); 5030 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 5031 newpte = huge_pte_mkuffd_wp(newpte); 5032 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte); 5033 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 5034 folio_set_hugetlb_migratable(new_folio); 5035 } 5036 5037 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 5038 struct vm_area_struct *dst_vma, 5039 struct vm_area_struct *src_vma) 5040 { 5041 pte_t *src_pte, *dst_pte, entry; 5042 struct folio *pte_folio; 5043 unsigned long addr; 5044 bool cow = is_cow_mapping(src_vma->vm_flags); 5045 struct hstate *h = hstate_vma(src_vma); 5046 unsigned long sz = huge_page_size(h); 5047 unsigned long npages = pages_per_huge_page(h); 5048 struct mmu_notifier_range range; 5049 unsigned long last_addr_mask; 5050 int ret = 0; 5051 5052 if (cow) { 5053 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src, 5054 src_vma->vm_start, 5055 src_vma->vm_end); 5056 mmu_notifier_invalidate_range_start(&range); 5057 vma_assert_write_locked(src_vma); 5058 raw_write_seqcount_begin(&src->write_protect_seq); 5059 } else { 5060 /* 5061 * For shared mappings the vma lock must be held before 5062 * calling hugetlb_walk() in the src vma. Otherwise, the 5063 * returned ptep could go away if part of a shared pmd and 5064 * another thread calls huge_pmd_unshare. 5065 */ 5066 hugetlb_vma_lock_read(src_vma); 5067 } 5068 5069 last_addr_mask = hugetlb_mask_last_page(h); 5070 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 5071 spinlock_t *src_ptl, *dst_ptl; 5072 src_pte = hugetlb_walk(src_vma, addr, sz); 5073 if (!src_pte) { 5074 addr |= last_addr_mask; 5075 continue; 5076 } 5077 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 5078 if (!dst_pte) { 5079 ret = -ENOMEM; 5080 break; 5081 } 5082 5083 /* 5084 * If the pagetables are shared don't copy or take references. 5085 * 5086 * dst_pte == src_pte is the common case of src/dest sharing. 5087 * However, src could have 'unshared' and dst shares with 5088 * another vma. So page_count of ptep page is checked instead 5089 * to reliably determine whether pte is shared. 5090 */ 5091 if (page_count(virt_to_page(dst_pte)) > 1) { 5092 addr |= last_addr_mask; 5093 continue; 5094 } 5095 5096 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5097 src_ptl = huge_pte_lockptr(h, src, src_pte); 5098 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5099 entry = huge_ptep_get(src_pte); 5100 again: 5101 if (huge_pte_none(entry)) { 5102 /* 5103 * Skip if src entry none. 5104 */ 5105 ; 5106 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5107 if (!userfaultfd_wp(dst_vma)) 5108 entry = huge_pte_clear_uffd_wp(entry); 5109 set_huge_pte_at(dst, addr, dst_pte, entry); 5110 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5111 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5112 bool uffd_wp = pte_swp_uffd_wp(entry); 5113 5114 if (!is_readable_migration_entry(swp_entry) && cow) { 5115 /* 5116 * COW mappings require pages in both 5117 * parent and child to be set to read. 5118 */ 5119 swp_entry = make_readable_migration_entry( 5120 swp_offset(swp_entry)); 5121 entry = swp_entry_to_pte(swp_entry); 5122 if (userfaultfd_wp(src_vma) && uffd_wp) 5123 entry = pte_swp_mkuffd_wp(entry); 5124 set_huge_pte_at(src, addr, src_pte, entry); 5125 } 5126 if (!userfaultfd_wp(dst_vma)) 5127 entry = huge_pte_clear_uffd_wp(entry); 5128 set_huge_pte_at(dst, addr, dst_pte, entry); 5129 } else if (unlikely(is_pte_marker(entry))) { 5130 pte_marker marker = copy_pte_marker( 5131 pte_to_swp_entry(entry), dst_vma); 5132 5133 if (marker) 5134 set_huge_pte_at(dst, addr, dst_pte, 5135 make_pte_marker(marker)); 5136 } else { 5137 entry = huge_ptep_get(src_pte); 5138 pte_folio = page_folio(pte_page(entry)); 5139 folio_get(pte_folio); 5140 5141 /* 5142 * Failing to duplicate the anon rmap is a rare case 5143 * where we see pinned hugetlb pages while they're 5144 * prone to COW. We need to do the COW earlier during 5145 * fork. 5146 * 5147 * When pre-allocating the page or copying data, we 5148 * need to be without the pgtable locks since we could 5149 * sleep during the process. 5150 */ 5151 if (!folio_test_anon(pte_folio)) { 5152 page_dup_file_rmap(&pte_folio->page, true); 5153 } else if (page_try_dup_anon_rmap(&pte_folio->page, 5154 true, src_vma)) { 5155 pte_t src_pte_old = entry; 5156 struct folio *new_folio; 5157 5158 spin_unlock(src_ptl); 5159 spin_unlock(dst_ptl); 5160 /* Do not use reserve as it's private owned */ 5161 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); 5162 if (IS_ERR(new_folio)) { 5163 folio_put(pte_folio); 5164 ret = PTR_ERR(new_folio); 5165 break; 5166 } 5167 ret = copy_user_large_folio(new_folio, 5168 pte_folio, 5169 addr, dst_vma); 5170 folio_put(pte_folio); 5171 if (ret) { 5172 folio_put(new_folio); 5173 break; 5174 } 5175 5176 /* Install the new hugetlb folio if src pte stable */ 5177 dst_ptl = huge_pte_lock(h, dst, dst_pte); 5178 src_ptl = huge_pte_lockptr(h, src, src_pte); 5179 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5180 entry = huge_ptep_get(src_pte); 5181 if (!pte_same(src_pte_old, entry)) { 5182 restore_reserve_on_error(h, dst_vma, addr, 5183 new_folio); 5184 folio_put(new_folio); 5185 /* huge_ptep of dst_pte won't change as in child */ 5186 goto again; 5187 } 5188 hugetlb_install_folio(dst_vma, dst_pte, addr, 5189 new_folio, src_pte_old); 5190 spin_unlock(src_ptl); 5191 spin_unlock(dst_ptl); 5192 continue; 5193 } 5194 5195 if (cow) { 5196 /* 5197 * No need to notify as we are downgrading page 5198 * table protection not changing it to point 5199 * to a new page. 5200 * 5201 * See Documentation/mm/mmu_notifier.rst 5202 */ 5203 huge_ptep_set_wrprotect(src, addr, src_pte); 5204 entry = huge_pte_wrprotect(entry); 5205 } 5206 5207 if (!userfaultfd_wp(dst_vma)) 5208 entry = huge_pte_clear_uffd_wp(entry); 5209 5210 set_huge_pte_at(dst, addr, dst_pte, entry); 5211 hugetlb_count_add(npages, dst); 5212 } 5213 spin_unlock(src_ptl); 5214 spin_unlock(dst_ptl); 5215 } 5216 5217 if (cow) { 5218 raw_write_seqcount_end(&src->write_protect_seq); 5219 mmu_notifier_invalidate_range_end(&range); 5220 } else { 5221 hugetlb_vma_unlock_read(src_vma); 5222 } 5223 5224 return ret; 5225 } 5226 5227 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5228 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 5229 { 5230 struct hstate *h = hstate_vma(vma); 5231 struct mm_struct *mm = vma->vm_mm; 5232 spinlock_t *src_ptl, *dst_ptl; 5233 pte_t pte; 5234 5235 dst_ptl = huge_pte_lock(h, mm, dst_pte); 5236 src_ptl = huge_pte_lockptr(h, mm, src_pte); 5237 5238 /* 5239 * We don't have to worry about the ordering of src and dst ptlocks 5240 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock. 5241 */ 5242 if (src_ptl != dst_ptl) 5243 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5244 5245 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5246 set_huge_pte_at(mm, new_addr, dst_pte, pte); 5247 5248 if (src_ptl != dst_ptl) 5249 spin_unlock(src_ptl); 5250 spin_unlock(dst_ptl); 5251 } 5252 5253 int move_hugetlb_page_tables(struct vm_area_struct *vma, 5254 struct vm_area_struct *new_vma, 5255 unsigned long old_addr, unsigned long new_addr, 5256 unsigned long len) 5257 { 5258 struct hstate *h = hstate_vma(vma); 5259 struct address_space *mapping = vma->vm_file->f_mapping; 5260 unsigned long sz = huge_page_size(h); 5261 struct mm_struct *mm = vma->vm_mm; 5262 unsigned long old_end = old_addr + len; 5263 unsigned long last_addr_mask; 5264 pte_t *src_pte, *dst_pte; 5265 struct mmu_notifier_range range; 5266 bool shared_pmd = false; 5267 5268 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr, 5269 old_end); 5270 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5271 /* 5272 * In case of shared PMDs, we should cover the maximum possible 5273 * range. 5274 */ 5275 flush_cache_range(vma, range.start, range.end); 5276 5277 mmu_notifier_invalidate_range_start(&range); 5278 last_addr_mask = hugetlb_mask_last_page(h); 5279 /* Prevent race with file truncation */ 5280 hugetlb_vma_lock_write(vma); 5281 i_mmap_lock_write(mapping); 5282 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 5283 src_pte = hugetlb_walk(vma, old_addr, sz); 5284 if (!src_pte) { 5285 old_addr |= last_addr_mask; 5286 new_addr |= last_addr_mask; 5287 continue; 5288 } 5289 if (huge_pte_none(huge_ptep_get(src_pte))) 5290 continue; 5291 5292 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 5293 shared_pmd = true; 5294 old_addr |= last_addr_mask; 5295 new_addr |= last_addr_mask; 5296 continue; 5297 } 5298 5299 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 5300 if (!dst_pte) 5301 break; 5302 5303 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 5304 } 5305 5306 if (shared_pmd) 5307 flush_hugetlb_tlb_range(vma, range.start, range.end); 5308 else 5309 flush_hugetlb_tlb_range(vma, old_end - len, old_end); 5310 mmu_notifier_invalidate_range_end(&range); 5311 i_mmap_unlock_write(mapping); 5312 hugetlb_vma_unlock_write(vma); 5313 5314 return len + old_addr - old_end; 5315 } 5316 5317 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 5318 unsigned long start, unsigned long end, 5319 struct page *ref_page, zap_flags_t zap_flags) 5320 { 5321 struct mm_struct *mm = vma->vm_mm; 5322 unsigned long address; 5323 pte_t *ptep; 5324 pte_t pte; 5325 spinlock_t *ptl; 5326 struct page *page; 5327 struct hstate *h = hstate_vma(vma); 5328 unsigned long sz = huge_page_size(h); 5329 unsigned long last_addr_mask; 5330 bool force_flush = false; 5331 5332 WARN_ON(!is_vm_hugetlb_page(vma)); 5333 BUG_ON(start & ~huge_page_mask(h)); 5334 BUG_ON(end & ~huge_page_mask(h)); 5335 5336 /* 5337 * This is a hugetlb vma, all the pte entries should point 5338 * to huge page. 5339 */ 5340 tlb_change_page_size(tlb, sz); 5341 tlb_start_vma(tlb, vma); 5342 5343 last_addr_mask = hugetlb_mask_last_page(h); 5344 address = start; 5345 for (; address < end; address += sz) { 5346 ptep = hugetlb_walk(vma, address, sz); 5347 if (!ptep) { 5348 address |= last_addr_mask; 5349 continue; 5350 } 5351 5352 ptl = huge_pte_lock(h, mm, ptep); 5353 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5354 spin_unlock(ptl); 5355 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5356 force_flush = true; 5357 address |= last_addr_mask; 5358 continue; 5359 } 5360 5361 pte = huge_ptep_get(ptep); 5362 if (huge_pte_none(pte)) { 5363 spin_unlock(ptl); 5364 continue; 5365 } 5366 5367 /* 5368 * Migrating hugepage or HWPoisoned hugepage is already 5369 * unmapped and its refcount is dropped, so just clear pte here. 5370 */ 5371 if (unlikely(!pte_present(pte))) { 5372 /* 5373 * If the pte was wr-protected by uffd-wp in any of the 5374 * swap forms, meanwhile the caller does not want to 5375 * drop the uffd-wp bit in this zap, then replace the 5376 * pte with a marker. 5377 */ 5378 if (pte_swp_uffd_wp_any(pte) && 5379 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5380 set_huge_pte_at(mm, address, ptep, 5381 make_pte_marker(PTE_MARKER_UFFD_WP)); 5382 else 5383 huge_pte_clear(mm, address, ptep, sz); 5384 spin_unlock(ptl); 5385 continue; 5386 } 5387 5388 page = pte_page(pte); 5389 /* 5390 * If a reference page is supplied, it is because a specific 5391 * page is being unmapped, not a range. Ensure the page we 5392 * are about to unmap is the actual page of interest. 5393 */ 5394 if (ref_page) { 5395 if (page != ref_page) { 5396 spin_unlock(ptl); 5397 continue; 5398 } 5399 /* 5400 * Mark the VMA as having unmapped its page so that 5401 * future faults in this VMA will fail rather than 5402 * looking like data was lost 5403 */ 5404 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5405 } 5406 5407 pte = huge_ptep_get_and_clear(mm, address, ptep); 5408 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5409 if (huge_pte_dirty(pte)) 5410 set_page_dirty(page); 5411 /* Leave a uffd-wp pte marker if needed */ 5412 if (huge_pte_uffd_wp(pte) && 5413 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5414 set_huge_pte_at(mm, address, ptep, 5415 make_pte_marker(PTE_MARKER_UFFD_WP)); 5416 hugetlb_count_sub(pages_per_huge_page(h), mm); 5417 page_remove_rmap(page, vma, true); 5418 5419 spin_unlock(ptl); 5420 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5421 /* 5422 * Bail out after unmapping reference page if supplied 5423 */ 5424 if (ref_page) 5425 break; 5426 } 5427 tlb_end_vma(tlb, vma); 5428 5429 /* 5430 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5431 * could defer the flush until now, since by holding i_mmap_rwsem we 5432 * guaranteed that the last refernece would not be dropped. But we must 5433 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5434 * dropped and the last reference to the shared PMDs page might be 5435 * dropped as well. 5436 * 5437 * In theory we could defer the freeing of the PMD pages as well, but 5438 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5439 * detect sharing, so we cannot defer the release of the page either. 5440 * Instead, do flush now. 5441 */ 5442 if (force_flush) 5443 tlb_flush_mmu_tlbonly(tlb); 5444 } 5445 5446 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5447 struct vm_area_struct *vma, unsigned long start, 5448 unsigned long end, struct page *ref_page, 5449 zap_flags_t zap_flags) 5450 { 5451 hugetlb_vma_lock_write(vma); 5452 i_mmap_lock_write(vma->vm_file->f_mapping); 5453 5454 /* mmu notification performed in caller */ 5455 __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); 5456 5457 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */ 5458 /* 5459 * Unlock and free the vma lock before releasing i_mmap_rwsem. 5460 * When the vma_lock is freed, this makes the vma ineligible 5461 * for pmd sharing. And, i_mmap_rwsem is required to set up 5462 * pmd sharing. This is important as page tables for this 5463 * unmapped range will be asynchrously deleted. If the page 5464 * tables are shared, there will be issues when accessed by 5465 * someone else. 5466 */ 5467 __hugetlb_vma_unlock_write_free(vma); 5468 i_mmap_unlock_write(vma->vm_file->f_mapping); 5469 } else { 5470 i_mmap_unlock_write(vma->vm_file->f_mapping); 5471 hugetlb_vma_unlock_write(vma); 5472 } 5473 } 5474 5475 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5476 unsigned long end, struct page *ref_page, 5477 zap_flags_t zap_flags) 5478 { 5479 struct mmu_notifier_range range; 5480 struct mmu_gather tlb; 5481 5482 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, 5483 start, end); 5484 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5485 mmu_notifier_invalidate_range_start(&range); 5486 tlb_gather_mmu(&tlb, vma->vm_mm); 5487 5488 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5489 5490 mmu_notifier_invalidate_range_end(&range); 5491 tlb_finish_mmu(&tlb); 5492 } 5493 5494 /* 5495 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5496 * mapping it owns the reserve page for. The intention is to unmap the page 5497 * from other VMAs and let the children be SIGKILLed if they are faulting the 5498 * same region. 5499 */ 5500 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5501 struct page *page, unsigned long address) 5502 { 5503 struct hstate *h = hstate_vma(vma); 5504 struct vm_area_struct *iter_vma; 5505 struct address_space *mapping; 5506 pgoff_t pgoff; 5507 5508 /* 5509 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5510 * from page cache lookup which is in HPAGE_SIZE units. 5511 */ 5512 address = address & huge_page_mask(h); 5513 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5514 vma->vm_pgoff; 5515 mapping = vma->vm_file->f_mapping; 5516 5517 /* 5518 * Take the mapping lock for the duration of the table walk. As 5519 * this mapping should be shared between all the VMAs, 5520 * __unmap_hugepage_range() is called as the lock is already held 5521 */ 5522 i_mmap_lock_write(mapping); 5523 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5524 /* Do not unmap the current VMA */ 5525 if (iter_vma == vma) 5526 continue; 5527 5528 /* 5529 * Shared VMAs have their own reserves and do not affect 5530 * MAP_PRIVATE accounting but it is possible that a shared 5531 * VMA is using the same page so check and skip such VMAs. 5532 */ 5533 if (iter_vma->vm_flags & VM_MAYSHARE) 5534 continue; 5535 5536 /* 5537 * Unmap the page from other VMAs without their own reserves. 5538 * They get marked to be SIGKILLed if they fault in these 5539 * areas. This is because a future no-page fault on this VMA 5540 * could insert a zeroed page instead of the data existing 5541 * from the time of fork. This would look like data corruption 5542 */ 5543 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5544 unmap_hugepage_range(iter_vma, address, 5545 address + huge_page_size(h), page, 0); 5546 } 5547 i_mmap_unlock_write(mapping); 5548 } 5549 5550 /* 5551 * hugetlb_wp() should be called with page lock of the original hugepage held. 5552 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5553 * cannot race with other handlers or page migration. 5554 * Keep the pte_same checks anyway to make transition from the mutex easier. 5555 */ 5556 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5557 unsigned long address, pte_t *ptep, unsigned int flags, 5558 struct folio *pagecache_folio, spinlock_t *ptl) 5559 { 5560 const bool unshare = flags & FAULT_FLAG_UNSHARE; 5561 pte_t pte = huge_ptep_get(ptep); 5562 struct hstate *h = hstate_vma(vma); 5563 struct folio *old_folio; 5564 struct folio *new_folio; 5565 int outside_reserve = 0; 5566 vm_fault_t ret = 0; 5567 unsigned long haddr = address & huge_page_mask(h); 5568 struct mmu_notifier_range range; 5569 5570 /* 5571 * Never handle CoW for uffd-wp protected pages. It should be only 5572 * handled when the uffd-wp protection is removed. 5573 * 5574 * Note that only the CoW optimization path (in hugetlb_no_page()) 5575 * can trigger this, because hugetlb_fault() will always resolve 5576 * uffd-wp bit first. 5577 */ 5578 if (!unshare && huge_pte_uffd_wp(pte)) 5579 return 0; 5580 5581 /* 5582 * hugetlb does not support FOLL_FORCE-style write faults that keep the 5583 * PTE mapped R/O such as maybe_mkwrite() would do. 5584 */ 5585 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) 5586 return VM_FAULT_SIGSEGV; 5587 5588 /* Let's take out MAP_SHARED mappings first. */ 5589 if (vma->vm_flags & VM_MAYSHARE) { 5590 set_huge_ptep_writable(vma, haddr, ptep); 5591 return 0; 5592 } 5593 5594 old_folio = page_folio(pte_page(pte)); 5595 5596 delayacct_wpcopy_start(); 5597 5598 retry_avoidcopy: 5599 /* 5600 * If no-one else is actually using this page, we're the exclusive 5601 * owner and can reuse this page. 5602 */ 5603 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { 5604 if (!PageAnonExclusive(&old_folio->page)) 5605 page_move_anon_rmap(&old_folio->page, vma); 5606 if (likely(!unshare)) 5607 set_huge_ptep_writable(vma, haddr, ptep); 5608 5609 delayacct_wpcopy_end(); 5610 return 0; 5611 } 5612 VM_BUG_ON_PAGE(folio_test_anon(old_folio) && 5613 PageAnonExclusive(&old_folio->page), &old_folio->page); 5614 5615 /* 5616 * If the process that created a MAP_PRIVATE mapping is about to 5617 * perform a COW due to a shared page count, attempt to satisfy 5618 * the allocation without using the existing reserves. The pagecache 5619 * page is used to determine if the reserve at this address was 5620 * consumed or not. If reserves were used, a partial faulted mapping 5621 * at the time of fork() could consume its reserves on COW instead 5622 * of the full address range. 5623 */ 5624 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5625 old_folio != pagecache_folio) 5626 outside_reserve = 1; 5627 5628 folio_get(old_folio); 5629 5630 /* 5631 * Drop page table lock as buddy allocator may be called. It will 5632 * be acquired again before returning to the caller, as expected. 5633 */ 5634 spin_unlock(ptl); 5635 new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve); 5636 5637 if (IS_ERR(new_folio)) { 5638 /* 5639 * If a process owning a MAP_PRIVATE mapping fails to COW, 5640 * it is due to references held by a child and an insufficient 5641 * huge page pool. To guarantee the original mappers 5642 * reliability, unmap the page from child processes. The child 5643 * may get SIGKILLed if it later faults. 5644 */ 5645 if (outside_reserve) { 5646 struct address_space *mapping = vma->vm_file->f_mapping; 5647 pgoff_t idx; 5648 u32 hash; 5649 5650 folio_put(old_folio); 5651 /* 5652 * Drop hugetlb_fault_mutex and vma_lock before 5653 * unmapping. unmapping needs to hold vma_lock 5654 * in write mode. Dropping vma_lock in read mode 5655 * here is OK as COW mappings do not interact with 5656 * PMD sharing. 5657 * 5658 * Reacquire both after unmap operation. 5659 */ 5660 idx = vma_hugecache_offset(h, vma, haddr); 5661 hash = hugetlb_fault_mutex_hash(mapping, idx); 5662 hugetlb_vma_unlock_read(vma); 5663 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5664 5665 unmap_ref_private(mm, vma, &old_folio->page, haddr); 5666 5667 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5668 hugetlb_vma_lock_read(vma); 5669 spin_lock(ptl); 5670 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5671 if (likely(ptep && 5672 pte_same(huge_ptep_get(ptep), pte))) 5673 goto retry_avoidcopy; 5674 /* 5675 * race occurs while re-acquiring page table 5676 * lock, and our job is done. 5677 */ 5678 delayacct_wpcopy_end(); 5679 return 0; 5680 } 5681 5682 ret = vmf_error(PTR_ERR(new_folio)); 5683 goto out_release_old; 5684 } 5685 5686 /* 5687 * When the original hugepage is shared one, it does not have 5688 * anon_vma prepared. 5689 */ 5690 if (unlikely(anon_vma_prepare(vma))) { 5691 ret = VM_FAULT_OOM; 5692 goto out_release_all; 5693 } 5694 5695 if (copy_user_large_folio(new_folio, old_folio, address, vma)) { 5696 ret = VM_FAULT_HWPOISON_LARGE; 5697 goto out_release_all; 5698 } 5699 __folio_mark_uptodate(new_folio); 5700 5701 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, 5702 haddr + huge_page_size(h)); 5703 mmu_notifier_invalidate_range_start(&range); 5704 5705 /* 5706 * Retake the page table lock to check for racing updates 5707 * before the page tables are altered 5708 */ 5709 spin_lock(ptl); 5710 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 5711 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5712 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 5713 5714 /* Break COW or unshare */ 5715 huge_ptep_clear_flush(vma, haddr, ptep); 5716 page_remove_rmap(&old_folio->page, vma, true); 5717 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5718 if (huge_pte_uffd_wp(pte)) 5719 newpte = huge_pte_mkuffd_wp(newpte); 5720 set_huge_pte_at(mm, haddr, ptep, newpte); 5721 folio_set_hugetlb_migratable(new_folio); 5722 /* Make the old page be freed below */ 5723 new_folio = old_folio; 5724 } 5725 spin_unlock(ptl); 5726 mmu_notifier_invalidate_range_end(&range); 5727 out_release_all: 5728 /* 5729 * No restore in case of successful pagetable update (Break COW or 5730 * unshare) 5731 */ 5732 if (new_folio != old_folio) 5733 restore_reserve_on_error(h, vma, haddr, new_folio); 5734 folio_put(new_folio); 5735 out_release_old: 5736 folio_put(old_folio); 5737 5738 spin_lock(ptl); /* Caller expects lock to be held */ 5739 5740 delayacct_wpcopy_end(); 5741 return ret; 5742 } 5743 5744 /* 5745 * Return whether there is a pagecache page to back given address within VMA. 5746 */ 5747 static bool hugetlbfs_pagecache_present(struct hstate *h, 5748 struct vm_area_struct *vma, unsigned long address) 5749 { 5750 struct address_space *mapping = vma->vm_file->f_mapping; 5751 pgoff_t idx = vma_hugecache_offset(h, vma, address); 5752 struct folio *folio; 5753 5754 folio = filemap_get_folio(mapping, idx); 5755 if (IS_ERR(folio)) 5756 return false; 5757 folio_put(folio); 5758 return true; 5759 } 5760 5761 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, 5762 pgoff_t idx) 5763 { 5764 struct inode *inode = mapping->host; 5765 struct hstate *h = hstate_inode(inode); 5766 int err; 5767 5768 __folio_set_locked(folio); 5769 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5770 5771 if (unlikely(err)) { 5772 __folio_clear_locked(folio); 5773 return err; 5774 } 5775 folio_clear_hugetlb_restore_reserve(folio); 5776 5777 /* 5778 * mark folio dirty so that it will not be removed from cache/file 5779 * by non-hugetlbfs specific code paths. 5780 */ 5781 folio_mark_dirty(folio); 5782 5783 spin_lock(&inode->i_lock); 5784 inode->i_blocks += blocks_per_huge_page(h); 5785 spin_unlock(&inode->i_lock); 5786 return 0; 5787 } 5788 5789 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5790 struct address_space *mapping, 5791 pgoff_t idx, 5792 unsigned int flags, 5793 unsigned long haddr, 5794 unsigned long addr, 5795 unsigned long reason) 5796 { 5797 u32 hash; 5798 struct vm_fault vmf = { 5799 .vma = vma, 5800 .address = haddr, 5801 .real_address = addr, 5802 .flags = flags, 5803 5804 /* 5805 * Hard to debug if it ends up being 5806 * used by a callee that assumes 5807 * something about the other 5808 * uninitialized fields... same as in 5809 * memory.c 5810 */ 5811 }; 5812 5813 /* 5814 * vma_lock and hugetlb_fault_mutex must be dropped before handling 5815 * userfault. Also mmap_lock could be dropped due to handling 5816 * userfault, any vma operation should be careful from here. 5817 */ 5818 hugetlb_vma_unlock_read(vma); 5819 hash = hugetlb_fault_mutex_hash(mapping, idx); 5820 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5821 return handle_userfault(&vmf, reason); 5822 } 5823 5824 /* 5825 * Recheck pte with pgtable lock. Returns true if pte didn't change, or 5826 * false if pte changed or is changing. 5827 */ 5828 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, 5829 pte_t *ptep, pte_t old_pte) 5830 { 5831 spinlock_t *ptl; 5832 bool same; 5833 5834 ptl = huge_pte_lock(h, mm, ptep); 5835 same = pte_same(huge_ptep_get(ptep), old_pte); 5836 spin_unlock(ptl); 5837 5838 return same; 5839 } 5840 5841 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5842 struct vm_area_struct *vma, 5843 struct address_space *mapping, pgoff_t idx, 5844 unsigned long address, pte_t *ptep, 5845 pte_t old_pte, unsigned int flags) 5846 { 5847 struct hstate *h = hstate_vma(vma); 5848 vm_fault_t ret = VM_FAULT_SIGBUS; 5849 int anon_rmap = 0; 5850 unsigned long size; 5851 struct folio *folio; 5852 pte_t new_pte; 5853 spinlock_t *ptl; 5854 unsigned long haddr = address & huge_page_mask(h); 5855 bool new_folio, new_pagecache_folio = false; 5856 u32 hash = hugetlb_fault_mutex_hash(mapping, idx); 5857 5858 /* 5859 * Currently, we are forced to kill the process in the event the 5860 * original mapper has unmapped pages from the child due to a failed 5861 * COW/unsharing. Warn that such a situation has occurred as it may not 5862 * be obvious. 5863 */ 5864 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5865 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5866 current->pid); 5867 goto out; 5868 } 5869 5870 /* 5871 * Use page lock to guard against racing truncation 5872 * before we get page_table_lock. 5873 */ 5874 new_folio = false; 5875 folio = filemap_lock_folio(mapping, idx); 5876 if (IS_ERR(folio)) { 5877 size = i_size_read(mapping->host) >> huge_page_shift(h); 5878 if (idx >= size) 5879 goto out; 5880 /* Check for page in userfault range */ 5881 if (userfaultfd_missing(vma)) { 5882 /* 5883 * Since hugetlb_no_page() was examining pte 5884 * without pgtable lock, we need to re-test under 5885 * lock because the pte may not be stable and could 5886 * have changed from under us. Try to detect 5887 * either changed or during-changing ptes and retry 5888 * properly when needed. 5889 * 5890 * Note that userfaultfd is actually fine with 5891 * false positives (e.g. caused by pte changed), 5892 * but not wrong logical events (e.g. caused by 5893 * reading a pte during changing). The latter can 5894 * confuse the userspace, so the strictness is very 5895 * much preferred. E.g., MISSING event should 5896 * never happen on the page after UFFDIO_COPY has 5897 * correctly installed the page and returned. 5898 */ 5899 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5900 ret = 0; 5901 goto out; 5902 } 5903 5904 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5905 haddr, address, 5906 VM_UFFD_MISSING); 5907 } 5908 5909 folio = alloc_hugetlb_folio(vma, haddr, 0); 5910 if (IS_ERR(folio)) { 5911 /* 5912 * Returning error will result in faulting task being 5913 * sent SIGBUS. The hugetlb fault mutex prevents two 5914 * tasks from racing to fault in the same page which 5915 * could result in false unable to allocate errors. 5916 * Page migration does not take the fault mutex, but 5917 * does a clear then write of pte's under page table 5918 * lock. Page fault code could race with migration, 5919 * notice the clear pte and try to allocate a page 5920 * here. Before returning error, get ptl and make 5921 * sure there really is no pte entry. 5922 */ 5923 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) 5924 ret = vmf_error(PTR_ERR(folio)); 5925 else 5926 ret = 0; 5927 goto out; 5928 } 5929 clear_huge_page(&folio->page, address, pages_per_huge_page(h)); 5930 __folio_mark_uptodate(folio); 5931 new_folio = true; 5932 5933 if (vma->vm_flags & VM_MAYSHARE) { 5934 int err = hugetlb_add_to_page_cache(folio, mapping, idx); 5935 if (err) { 5936 /* 5937 * err can't be -EEXIST which implies someone 5938 * else consumed the reservation since hugetlb 5939 * fault mutex is held when add a hugetlb page 5940 * to the page cache. So it's safe to call 5941 * restore_reserve_on_error() here. 5942 */ 5943 restore_reserve_on_error(h, vma, haddr, folio); 5944 folio_put(folio); 5945 goto out; 5946 } 5947 new_pagecache_folio = true; 5948 } else { 5949 folio_lock(folio); 5950 if (unlikely(anon_vma_prepare(vma))) { 5951 ret = VM_FAULT_OOM; 5952 goto backout_unlocked; 5953 } 5954 anon_rmap = 1; 5955 } 5956 } else { 5957 /* 5958 * If memory error occurs between mmap() and fault, some process 5959 * don't have hwpoisoned swap entry for errored virtual address. 5960 * So we need to block hugepage fault by PG_hwpoison bit check. 5961 */ 5962 if (unlikely(folio_test_hwpoison(folio))) { 5963 ret = VM_FAULT_HWPOISON_LARGE | 5964 VM_FAULT_SET_HINDEX(hstate_index(h)); 5965 goto backout_unlocked; 5966 } 5967 5968 /* Check for page in userfault range. */ 5969 if (userfaultfd_minor(vma)) { 5970 folio_unlock(folio); 5971 folio_put(folio); 5972 /* See comment in userfaultfd_missing() block above */ 5973 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5974 ret = 0; 5975 goto out; 5976 } 5977 return hugetlb_handle_userfault(vma, mapping, idx, flags, 5978 haddr, address, 5979 VM_UFFD_MINOR); 5980 } 5981 } 5982 5983 /* 5984 * If we are going to COW a private mapping later, we examine the 5985 * pending reservations for this page now. This will ensure that 5986 * any allocations necessary to record that reservation occur outside 5987 * the spinlock. 5988 */ 5989 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5990 if (vma_needs_reservation(h, vma, haddr) < 0) { 5991 ret = VM_FAULT_OOM; 5992 goto backout_unlocked; 5993 } 5994 /* Just decrements count, does not deallocate */ 5995 vma_end_reservation(h, vma, haddr); 5996 } 5997 5998 ptl = huge_pte_lock(h, mm, ptep); 5999 ret = 0; 6000 /* If pte changed from under us, retry */ 6001 if (!pte_same(huge_ptep_get(ptep), old_pte)) 6002 goto backout; 6003 6004 if (anon_rmap) 6005 hugepage_add_new_anon_rmap(folio, vma, haddr); 6006 else 6007 page_dup_file_rmap(&folio->page, true); 6008 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) 6009 && (vma->vm_flags & VM_SHARED))); 6010 /* 6011 * If this pte was previously wr-protected, keep it wr-protected even 6012 * if populated. 6013 */ 6014 if (unlikely(pte_marker_uffd_wp(old_pte))) 6015 new_pte = huge_pte_mkuffd_wp(new_pte); 6016 set_huge_pte_at(mm, haddr, ptep, new_pte); 6017 6018 hugetlb_count_add(pages_per_huge_page(h), mm); 6019 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6020 /* Optimization, do the COW without a second fault */ 6021 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl); 6022 } 6023 6024 spin_unlock(ptl); 6025 6026 /* 6027 * Only set hugetlb_migratable in newly allocated pages. Existing pages 6028 * found in the pagecache may not have hugetlb_migratable if they have 6029 * been isolated for migration. 6030 */ 6031 if (new_folio) 6032 folio_set_hugetlb_migratable(folio); 6033 6034 folio_unlock(folio); 6035 out: 6036 hugetlb_vma_unlock_read(vma); 6037 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6038 return ret; 6039 6040 backout: 6041 spin_unlock(ptl); 6042 backout_unlocked: 6043 if (new_folio && !new_pagecache_folio) 6044 restore_reserve_on_error(h, vma, haddr, folio); 6045 6046 folio_unlock(folio); 6047 folio_put(folio); 6048 goto out; 6049 } 6050 6051 #ifdef CONFIG_SMP 6052 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6053 { 6054 unsigned long key[2]; 6055 u32 hash; 6056 6057 key[0] = (unsigned long) mapping; 6058 key[1] = idx; 6059 6060 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 6061 6062 return hash & (num_fault_mutexes - 1); 6063 } 6064 #else 6065 /* 6066 * For uniprocessor systems we always use a single mutex, so just 6067 * return 0 and avoid the hashing overhead. 6068 */ 6069 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 6070 { 6071 return 0; 6072 } 6073 #endif 6074 6075 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 6076 unsigned long address, unsigned int flags) 6077 { 6078 pte_t *ptep, entry; 6079 spinlock_t *ptl; 6080 vm_fault_t ret; 6081 u32 hash; 6082 pgoff_t idx; 6083 struct folio *folio = NULL; 6084 struct folio *pagecache_folio = NULL; 6085 struct hstate *h = hstate_vma(vma); 6086 struct address_space *mapping; 6087 int need_wait_lock = 0; 6088 unsigned long haddr = address & huge_page_mask(h); 6089 6090 /* TODO: Handle faults under the VMA lock */ 6091 if (flags & FAULT_FLAG_VMA_LOCK) { 6092 vma_end_read(vma); 6093 return VM_FAULT_RETRY; 6094 } 6095 6096 /* 6097 * Serialize hugepage allocation and instantiation, so that we don't 6098 * get spurious allocation failures if two CPUs race to instantiate 6099 * the same page in the page cache. 6100 */ 6101 mapping = vma->vm_file->f_mapping; 6102 idx = vma_hugecache_offset(h, vma, haddr); 6103 hash = hugetlb_fault_mutex_hash(mapping, idx); 6104 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6105 6106 /* 6107 * Acquire vma lock before calling huge_pte_alloc and hold 6108 * until finished with ptep. This prevents huge_pmd_unshare from 6109 * being called elsewhere and making the ptep no longer valid. 6110 */ 6111 hugetlb_vma_lock_read(vma); 6112 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 6113 if (!ptep) { 6114 hugetlb_vma_unlock_read(vma); 6115 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6116 return VM_FAULT_OOM; 6117 } 6118 6119 entry = huge_ptep_get(ptep); 6120 if (huge_pte_none_mostly(entry)) { 6121 if (is_pte_marker(entry)) { 6122 pte_marker marker = 6123 pte_marker_get(pte_to_swp_entry(entry)); 6124 6125 if (marker & PTE_MARKER_POISONED) { 6126 ret = VM_FAULT_HWPOISON_LARGE; 6127 goto out_mutex; 6128 } 6129 } 6130 6131 /* 6132 * Other PTE markers should be handled the same way as none PTE. 6133 * 6134 * hugetlb_no_page will drop vma lock and hugetlb fault 6135 * mutex internally, which make us return immediately. 6136 */ 6137 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, 6138 entry, flags); 6139 } 6140 6141 ret = 0; 6142 6143 /* 6144 * entry could be a migration/hwpoison entry at this point, so this 6145 * check prevents the kernel from going below assuming that we have 6146 * an active hugepage in pagecache. This goto expects the 2nd page 6147 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 6148 * properly handle it. 6149 */ 6150 if (!pte_present(entry)) { 6151 if (unlikely(is_hugetlb_entry_migration(entry))) { 6152 /* 6153 * Release the hugetlb fault lock now, but retain 6154 * the vma lock, because it is needed to guard the 6155 * huge_pte_lockptr() later in 6156 * migration_entry_wait_huge(). The vma lock will 6157 * be released there. 6158 */ 6159 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6160 migration_entry_wait_huge(vma, ptep); 6161 return 0; 6162 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 6163 ret = VM_FAULT_HWPOISON_LARGE | 6164 VM_FAULT_SET_HINDEX(hstate_index(h)); 6165 goto out_mutex; 6166 } 6167 6168 /* 6169 * If we are going to COW/unshare the mapping later, we examine the 6170 * pending reservations for this page now. This will ensure that any 6171 * allocations necessary to record that reservation occur outside the 6172 * spinlock. Also lookup the pagecache page now as it is used to 6173 * determine if a reservation has been consumed. 6174 */ 6175 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 6176 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) { 6177 if (vma_needs_reservation(h, vma, haddr) < 0) { 6178 ret = VM_FAULT_OOM; 6179 goto out_mutex; 6180 } 6181 /* Just decrements count, does not deallocate */ 6182 vma_end_reservation(h, vma, haddr); 6183 6184 pagecache_folio = filemap_lock_folio(mapping, idx); 6185 if (IS_ERR(pagecache_folio)) 6186 pagecache_folio = NULL; 6187 } 6188 6189 ptl = huge_pte_lock(h, mm, ptep); 6190 6191 /* Check for a racing update before calling hugetlb_wp() */ 6192 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 6193 goto out_ptl; 6194 6195 /* Handle userfault-wp first, before trying to lock more pages */ 6196 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && 6197 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 6198 struct vm_fault vmf = { 6199 .vma = vma, 6200 .address = haddr, 6201 .real_address = address, 6202 .flags = flags, 6203 }; 6204 6205 spin_unlock(ptl); 6206 if (pagecache_folio) { 6207 folio_unlock(pagecache_folio); 6208 folio_put(pagecache_folio); 6209 } 6210 hugetlb_vma_unlock_read(vma); 6211 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6212 return handle_userfault(&vmf, VM_UFFD_WP); 6213 } 6214 6215 /* 6216 * hugetlb_wp() requires page locks of pte_page(entry) and 6217 * pagecache_folio, so here we need take the former one 6218 * when folio != pagecache_folio or !pagecache_folio. 6219 */ 6220 folio = page_folio(pte_page(entry)); 6221 if (folio != pagecache_folio) 6222 if (!folio_trylock(folio)) { 6223 need_wait_lock = 1; 6224 goto out_ptl; 6225 } 6226 6227 folio_get(folio); 6228 6229 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6230 if (!huge_pte_write(entry)) { 6231 ret = hugetlb_wp(mm, vma, address, ptep, flags, 6232 pagecache_folio, ptl); 6233 goto out_put_page; 6234 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6235 entry = huge_pte_mkdirty(entry); 6236 } 6237 } 6238 entry = pte_mkyoung(entry); 6239 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 6240 flags & FAULT_FLAG_WRITE)) 6241 update_mmu_cache(vma, haddr, ptep); 6242 out_put_page: 6243 if (folio != pagecache_folio) 6244 folio_unlock(folio); 6245 folio_put(folio); 6246 out_ptl: 6247 spin_unlock(ptl); 6248 6249 if (pagecache_folio) { 6250 folio_unlock(pagecache_folio); 6251 folio_put(pagecache_folio); 6252 } 6253 out_mutex: 6254 hugetlb_vma_unlock_read(vma); 6255 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6256 /* 6257 * Generally it's safe to hold refcount during waiting page lock. But 6258 * here we just wait to defer the next page fault to avoid busy loop and 6259 * the page is not used after unlocked before returning from the current 6260 * page fault. So we are safe from accessing freed page, even if we wait 6261 * here without taking refcount. 6262 */ 6263 if (need_wait_lock) 6264 folio_wait_locked(folio); 6265 return ret; 6266 } 6267 6268 #ifdef CONFIG_USERFAULTFD 6269 /* 6270 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte 6271 * with modifications for hugetlb pages. 6272 */ 6273 int hugetlb_mfill_atomic_pte(pte_t *dst_pte, 6274 struct vm_area_struct *dst_vma, 6275 unsigned long dst_addr, 6276 unsigned long src_addr, 6277 uffd_flags_t flags, 6278 struct folio **foliop) 6279 { 6280 struct mm_struct *dst_mm = dst_vma->vm_mm; 6281 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE); 6282 bool wp_enabled = (flags & MFILL_ATOMIC_WP); 6283 struct hstate *h = hstate_vma(dst_vma); 6284 struct address_space *mapping = dst_vma->vm_file->f_mapping; 6285 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 6286 unsigned long size; 6287 int vm_shared = dst_vma->vm_flags & VM_SHARED; 6288 pte_t _dst_pte; 6289 spinlock_t *ptl; 6290 int ret = -ENOMEM; 6291 struct folio *folio; 6292 int writable; 6293 bool folio_in_pagecache = false; 6294 6295 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { 6296 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6297 6298 /* Don't overwrite any existing PTEs (even markers) */ 6299 if (!huge_pte_none(huge_ptep_get(dst_pte))) { 6300 spin_unlock(ptl); 6301 return -EEXIST; 6302 } 6303 6304 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6305 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6306 6307 /* No need to invalidate - it was non-present before */ 6308 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6309 6310 spin_unlock(ptl); 6311 return 0; 6312 } 6313 6314 if (is_continue) { 6315 ret = -EFAULT; 6316 folio = filemap_lock_folio(mapping, idx); 6317 if (IS_ERR(folio)) 6318 goto out; 6319 folio_in_pagecache = true; 6320 } else if (!*foliop) { 6321 /* If a folio already exists, then it's UFFDIO_COPY for 6322 * a non-missing case. Return -EEXIST. 6323 */ 6324 if (vm_shared && 6325 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6326 ret = -EEXIST; 6327 goto out; 6328 } 6329 6330 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6331 if (IS_ERR(folio)) { 6332 ret = -ENOMEM; 6333 goto out; 6334 } 6335 6336 ret = copy_folio_from_user(folio, (const void __user *) src_addr, 6337 false); 6338 6339 /* fallback to copy_from_user outside mmap_lock */ 6340 if (unlikely(ret)) { 6341 ret = -ENOENT; 6342 /* Free the allocated folio which may have 6343 * consumed a reservation. 6344 */ 6345 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6346 folio_put(folio); 6347 6348 /* Allocate a temporary folio to hold the copied 6349 * contents. 6350 */ 6351 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); 6352 if (!folio) { 6353 ret = -ENOMEM; 6354 goto out; 6355 } 6356 *foliop = folio; 6357 /* Set the outparam foliop and return to the caller to 6358 * copy the contents outside the lock. Don't free the 6359 * folio. 6360 */ 6361 goto out; 6362 } 6363 } else { 6364 if (vm_shared && 6365 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 6366 folio_put(*foliop); 6367 ret = -EEXIST; 6368 *foliop = NULL; 6369 goto out; 6370 } 6371 6372 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); 6373 if (IS_ERR(folio)) { 6374 folio_put(*foliop); 6375 ret = -ENOMEM; 6376 *foliop = NULL; 6377 goto out; 6378 } 6379 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma); 6380 folio_put(*foliop); 6381 *foliop = NULL; 6382 if (ret) { 6383 folio_put(folio); 6384 goto out; 6385 } 6386 } 6387 6388 /* 6389 * The memory barrier inside __folio_mark_uptodate makes sure that 6390 * preceding stores to the page contents become visible before 6391 * the set_pte_at() write. 6392 */ 6393 __folio_mark_uptodate(folio); 6394 6395 /* Add shared, newly allocated pages to the page cache. */ 6396 if (vm_shared && !is_continue) { 6397 size = i_size_read(mapping->host) >> huge_page_shift(h); 6398 ret = -EFAULT; 6399 if (idx >= size) 6400 goto out_release_nounlock; 6401 6402 /* 6403 * Serialization between remove_inode_hugepages() and 6404 * hugetlb_add_to_page_cache() below happens through the 6405 * hugetlb_fault_mutex_table that here must be hold by 6406 * the caller. 6407 */ 6408 ret = hugetlb_add_to_page_cache(folio, mapping, idx); 6409 if (ret) 6410 goto out_release_nounlock; 6411 folio_in_pagecache = true; 6412 } 6413 6414 ptl = huge_pte_lock(h, dst_mm, dst_pte); 6415 6416 ret = -EIO; 6417 if (folio_test_hwpoison(folio)) 6418 goto out_release_unlock; 6419 6420 /* 6421 * We allow to overwrite a pte marker: consider when both MISSING|WP 6422 * registered, we firstly wr-protect a none pte which has no page cache 6423 * page backing it, then access the page. 6424 */ 6425 ret = -EEXIST; 6426 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6427 goto out_release_unlock; 6428 6429 if (folio_in_pagecache) 6430 page_dup_file_rmap(&folio->page, true); 6431 else 6432 hugepage_add_new_anon_rmap(folio, dst_vma, dst_addr); 6433 6434 /* 6435 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6436 * with wp flag set, don't set pte write bit. 6437 */ 6438 if (wp_enabled || (is_continue && !vm_shared)) 6439 writable = 0; 6440 else 6441 writable = dst_vma->vm_flags & VM_WRITE; 6442 6443 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); 6444 /* 6445 * Always mark UFFDIO_COPY page dirty; note that this may not be 6446 * extremely important for hugetlbfs for now since swapping is not 6447 * supported, but we should still be clear in that this page cannot be 6448 * thrown away at will, even if write bit not set. 6449 */ 6450 _dst_pte = huge_pte_mkdirty(_dst_pte); 6451 _dst_pte = pte_mkyoung(_dst_pte); 6452 6453 if (wp_enabled) 6454 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6455 6456 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6457 6458 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6459 6460 /* No need to invalidate - it was non-present before */ 6461 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6462 6463 spin_unlock(ptl); 6464 if (!is_continue) 6465 folio_set_hugetlb_migratable(folio); 6466 if (vm_shared || is_continue) 6467 folio_unlock(folio); 6468 ret = 0; 6469 out: 6470 return ret; 6471 out_release_unlock: 6472 spin_unlock(ptl); 6473 if (vm_shared || is_continue) 6474 folio_unlock(folio); 6475 out_release_nounlock: 6476 if (!folio_in_pagecache) 6477 restore_reserve_on_error(h, dst_vma, dst_addr, folio); 6478 folio_put(folio); 6479 goto out; 6480 } 6481 #endif /* CONFIG_USERFAULTFD */ 6482 6483 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, 6484 unsigned long address, unsigned int flags, 6485 unsigned int *page_mask) 6486 { 6487 struct hstate *h = hstate_vma(vma); 6488 struct mm_struct *mm = vma->vm_mm; 6489 unsigned long haddr = address & huge_page_mask(h); 6490 struct page *page = NULL; 6491 spinlock_t *ptl; 6492 pte_t *pte, entry; 6493 int ret; 6494 6495 hugetlb_vma_lock_read(vma); 6496 pte = hugetlb_walk(vma, haddr, huge_page_size(h)); 6497 if (!pte) 6498 goto out_unlock; 6499 6500 ptl = huge_pte_lock(h, mm, pte); 6501 entry = huge_ptep_get(pte); 6502 if (pte_present(entry)) { 6503 page = pte_page(entry); 6504 6505 if (!huge_pte_write(entry)) { 6506 if (flags & FOLL_WRITE) { 6507 page = NULL; 6508 goto out; 6509 } 6510 6511 if (gup_must_unshare(vma, flags, page)) { 6512 /* Tell the caller to do unsharing */ 6513 page = ERR_PTR(-EMLINK); 6514 goto out; 6515 } 6516 } 6517 6518 page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); 6519 6520 /* 6521 * Note that page may be a sub-page, and with vmemmap 6522 * optimizations the page struct may be read only. 6523 * try_grab_page() will increase the ref count on the 6524 * head page, so this will be OK. 6525 * 6526 * try_grab_page() should always be able to get the page here, 6527 * because we hold the ptl lock and have verified pte_present(). 6528 */ 6529 ret = try_grab_page(page, flags); 6530 6531 if (WARN_ON_ONCE(ret)) { 6532 page = ERR_PTR(ret); 6533 goto out; 6534 } 6535 6536 *page_mask = (1U << huge_page_order(h)) - 1; 6537 } 6538 out: 6539 spin_unlock(ptl); 6540 out_unlock: 6541 hugetlb_vma_unlock_read(vma); 6542 6543 /* 6544 * Fixup retval for dump requests: if pagecache doesn't exist, 6545 * don't try to allocate a new page but just skip it. 6546 */ 6547 if (!page && (flags & FOLL_DUMP) && 6548 !hugetlbfs_pagecache_present(h, vma, address)) 6549 page = ERR_PTR(-EFAULT); 6550 6551 return page; 6552 } 6553 6554 long hugetlb_change_protection(struct vm_area_struct *vma, 6555 unsigned long address, unsigned long end, 6556 pgprot_t newprot, unsigned long cp_flags) 6557 { 6558 struct mm_struct *mm = vma->vm_mm; 6559 unsigned long start = address; 6560 pte_t *ptep; 6561 pte_t pte; 6562 struct hstate *h = hstate_vma(vma); 6563 long pages = 0, psize = huge_page_size(h); 6564 bool shared_pmd = false; 6565 struct mmu_notifier_range range; 6566 unsigned long last_addr_mask; 6567 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6568 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6569 6570 /* 6571 * In the case of shared PMDs, the area to flush could be beyond 6572 * start/end. Set range.start/range.end to cover the maximum possible 6573 * range if PMD sharing is possible. 6574 */ 6575 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6576 0, mm, start, end); 6577 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6578 6579 BUG_ON(address >= end); 6580 flush_cache_range(vma, range.start, range.end); 6581 6582 mmu_notifier_invalidate_range_start(&range); 6583 hugetlb_vma_lock_write(vma); 6584 i_mmap_lock_write(vma->vm_file->f_mapping); 6585 last_addr_mask = hugetlb_mask_last_page(h); 6586 for (; address < end; address += psize) { 6587 spinlock_t *ptl; 6588 ptep = hugetlb_walk(vma, address, psize); 6589 if (!ptep) { 6590 if (!uffd_wp) { 6591 address |= last_addr_mask; 6592 continue; 6593 } 6594 /* 6595 * Userfaultfd wr-protect requires pgtable 6596 * pre-allocations to install pte markers. 6597 */ 6598 ptep = huge_pte_alloc(mm, vma, address, psize); 6599 if (!ptep) { 6600 pages = -ENOMEM; 6601 break; 6602 } 6603 } 6604 ptl = huge_pte_lock(h, mm, ptep); 6605 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6606 /* 6607 * When uffd-wp is enabled on the vma, unshare 6608 * shouldn't happen at all. Warn about it if it 6609 * happened due to some reason. 6610 */ 6611 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6612 pages++; 6613 spin_unlock(ptl); 6614 shared_pmd = true; 6615 address |= last_addr_mask; 6616 continue; 6617 } 6618 pte = huge_ptep_get(ptep); 6619 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6620 /* Nothing to do. */ 6621 } else if (unlikely(is_hugetlb_entry_migration(pte))) { 6622 swp_entry_t entry = pte_to_swp_entry(pte); 6623 struct page *page = pfn_swap_entry_to_page(entry); 6624 pte_t newpte = pte; 6625 6626 if (is_writable_migration_entry(entry)) { 6627 if (PageAnon(page)) 6628 entry = make_readable_exclusive_migration_entry( 6629 swp_offset(entry)); 6630 else 6631 entry = make_readable_migration_entry( 6632 swp_offset(entry)); 6633 newpte = swp_entry_to_pte(entry); 6634 pages++; 6635 } 6636 6637 if (uffd_wp) 6638 newpte = pte_swp_mkuffd_wp(newpte); 6639 else if (uffd_wp_resolve) 6640 newpte = pte_swp_clear_uffd_wp(newpte); 6641 if (!pte_same(pte, newpte)) 6642 set_huge_pte_at(mm, address, ptep, newpte); 6643 } else if (unlikely(is_pte_marker(pte))) { 6644 /* No other markers apply for now. */ 6645 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); 6646 if (uffd_wp_resolve) 6647 /* Safe to modify directly (non-present->none). */ 6648 huge_pte_clear(mm, address, ptep, psize); 6649 } else if (!huge_pte_none(pte)) { 6650 pte_t old_pte; 6651 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6652 6653 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6654 pte = huge_pte_modify(old_pte, newprot); 6655 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6656 if (uffd_wp) 6657 pte = huge_pte_mkuffd_wp(pte); 6658 else if (uffd_wp_resolve) 6659 pte = huge_pte_clear_uffd_wp(pte); 6660 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6661 pages++; 6662 } else { 6663 /* None pte */ 6664 if (unlikely(uffd_wp)) 6665 /* Safe to modify directly (none->non-present). */ 6666 set_huge_pte_at(mm, address, ptep, 6667 make_pte_marker(PTE_MARKER_UFFD_WP)); 6668 } 6669 spin_unlock(ptl); 6670 } 6671 /* 6672 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6673 * may have cleared our pud entry and done put_page on the page table: 6674 * once we release i_mmap_rwsem, another task can do the final put_page 6675 * and that page table be reused and filled with junk. If we actually 6676 * did unshare a page of pmds, flush the range corresponding to the pud. 6677 */ 6678 if (shared_pmd) 6679 flush_hugetlb_tlb_range(vma, range.start, range.end); 6680 else 6681 flush_hugetlb_tlb_range(vma, start, end); 6682 /* 6683 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are 6684 * downgrading page table protection not changing it to point to a new 6685 * page. 6686 * 6687 * See Documentation/mm/mmu_notifier.rst 6688 */ 6689 i_mmap_unlock_write(vma->vm_file->f_mapping); 6690 hugetlb_vma_unlock_write(vma); 6691 mmu_notifier_invalidate_range_end(&range); 6692 6693 return pages > 0 ? (pages << h->order) : pages; 6694 } 6695 6696 /* Return true if reservation was successful, false otherwise. */ 6697 bool hugetlb_reserve_pages(struct inode *inode, 6698 long from, long to, 6699 struct vm_area_struct *vma, 6700 vm_flags_t vm_flags) 6701 { 6702 long chg = -1, add = -1; 6703 struct hstate *h = hstate_inode(inode); 6704 struct hugepage_subpool *spool = subpool_inode(inode); 6705 struct resv_map *resv_map; 6706 struct hugetlb_cgroup *h_cg = NULL; 6707 long gbl_reserve, regions_needed = 0; 6708 6709 /* This should never happen */ 6710 if (from > to) { 6711 VM_WARN(1, "%s called with a negative range\n", __func__); 6712 return false; 6713 } 6714 6715 /* 6716 * vma specific semaphore used for pmd sharing and fault/truncation 6717 * synchronization 6718 */ 6719 hugetlb_vma_lock_alloc(vma); 6720 6721 /* 6722 * Only apply hugepage reservation if asked. At fault time, an 6723 * attempt will be made for VM_NORESERVE to allocate a page 6724 * without using reserves 6725 */ 6726 if (vm_flags & VM_NORESERVE) 6727 return true; 6728 6729 /* 6730 * Shared mappings base their reservation on the number of pages that 6731 * are already allocated on behalf of the file. Private mappings need 6732 * to reserve the full area even if read-only as mprotect() may be 6733 * called to make the mapping read-write. Assume !vma is a shm mapping 6734 */ 6735 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6736 /* 6737 * resv_map can not be NULL as hugetlb_reserve_pages is only 6738 * called for inodes for which resv_maps were created (see 6739 * hugetlbfs_get_inode). 6740 */ 6741 resv_map = inode_resv_map(inode); 6742 6743 chg = region_chg(resv_map, from, to, ®ions_needed); 6744 } else { 6745 /* Private mapping. */ 6746 resv_map = resv_map_alloc(); 6747 if (!resv_map) 6748 goto out_err; 6749 6750 chg = to - from; 6751 6752 set_vma_resv_map(vma, resv_map); 6753 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6754 } 6755 6756 if (chg < 0) 6757 goto out_err; 6758 6759 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6760 chg * pages_per_huge_page(h), &h_cg) < 0) 6761 goto out_err; 6762 6763 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6764 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6765 * of the resv_map. 6766 */ 6767 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6768 } 6769 6770 /* 6771 * There must be enough pages in the subpool for the mapping. If 6772 * the subpool has a minimum size, there may be some global 6773 * reservations already in place (gbl_reserve). 6774 */ 6775 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6776 if (gbl_reserve < 0) 6777 goto out_uncharge_cgroup; 6778 6779 /* 6780 * Check enough hugepages are available for the reservation. 6781 * Hand the pages back to the subpool if there are not 6782 */ 6783 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6784 goto out_put_pages; 6785 6786 /* 6787 * Account for the reservations made. Shared mappings record regions 6788 * that have reservations as they are shared by multiple VMAs. 6789 * When the last VMA disappears, the region map says how much 6790 * the reservation was and the page cache tells how much of 6791 * the reservation was consumed. Private mappings are per-VMA and 6792 * only the consumed reservations are tracked. When the VMA 6793 * disappears, the original reservation is the VMA size and the 6794 * consumed reservations are stored in the map. Hence, nothing 6795 * else has to be done for private mappings here 6796 */ 6797 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6798 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6799 6800 if (unlikely(add < 0)) { 6801 hugetlb_acct_memory(h, -gbl_reserve); 6802 goto out_put_pages; 6803 } else if (unlikely(chg > add)) { 6804 /* 6805 * pages in this range were added to the reserve 6806 * map between region_chg and region_add. This 6807 * indicates a race with alloc_hugetlb_folio. Adjust 6808 * the subpool and reserve counts modified above 6809 * based on the difference. 6810 */ 6811 long rsv_adjust; 6812 6813 /* 6814 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6815 * reference to h_cg->css. See comment below for detail. 6816 */ 6817 hugetlb_cgroup_uncharge_cgroup_rsvd( 6818 hstate_index(h), 6819 (chg - add) * pages_per_huge_page(h), h_cg); 6820 6821 rsv_adjust = hugepage_subpool_put_pages(spool, 6822 chg - add); 6823 hugetlb_acct_memory(h, -rsv_adjust); 6824 } else if (h_cg) { 6825 /* 6826 * The file_regions will hold their own reference to 6827 * h_cg->css. So we should release the reference held 6828 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6829 * done. 6830 */ 6831 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6832 } 6833 } 6834 return true; 6835 6836 out_put_pages: 6837 /* put back original number of pages, chg */ 6838 (void)hugepage_subpool_put_pages(spool, chg); 6839 out_uncharge_cgroup: 6840 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6841 chg * pages_per_huge_page(h), h_cg); 6842 out_err: 6843 hugetlb_vma_lock_free(vma); 6844 if (!vma || vma->vm_flags & VM_MAYSHARE) 6845 /* Only call region_abort if the region_chg succeeded but the 6846 * region_add failed or didn't run. 6847 */ 6848 if (chg >= 0 && add < 0) 6849 region_abort(resv_map, from, to, regions_needed); 6850 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6851 kref_put(&resv_map->refs, resv_map_release); 6852 return false; 6853 } 6854 6855 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6856 long freed) 6857 { 6858 struct hstate *h = hstate_inode(inode); 6859 struct resv_map *resv_map = inode_resv_map(inode); 6860 long chg = 0; 6861 struct hugepage_subpool *spool = subpool_inode(inode); 6862 long gbl_reserve; 6863 6864 /* 6865 * Since this routine can be called in the evict inode path for all 6866 * hugetlbfs inodes, resv_map could be NULL. 6867 */ 6868 if (resv_map) { 6869 chg = region_del(resv_map, start, end); 6870 /* 6871 * region_del() can fail in the rare case where a region 6872 * must be split and another region descriptor can not be 6873 * allocated. If end == LONG_MAX, it will not fail. 6874 */ 6875 if (chg < 0) 6876 return chg; 6877 } 6878 6879 spin_lock(&inode->i_lock); 6880 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6881 spin_unlock(&inode->i_lock); 6882 6883 /* 6884 * If the subpool has a minimum size, the number of global 6885 * reservations to be released may be adjusted. 6886 * 6887 * Note that !resv_map implies freed == 0. So (chg - freed) 6888 * won't go negative. 6889 */ 6890 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6891 hugetlb_acct_memory(h, -gbl_reserve); 6892 6893 return 0; 6894 } 6895 6896 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6897 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6898 struct vm_area_struct *vma, 6899 unsigned long addr, pgoff_t idx) 6900 { 6901 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6902 svma->vm_start; 6903 unsigned long sbase = saddr & PUD_MASK; 6904 unsigned long s_end = sbase + PUD_SIZE; 6905 6906 /* Allow segments to share if only one is marked locked */ 6907 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK; 6908 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK; 6909 6910 /* 6911 * match the virtual addresses, permission and the alignment of the 6912 * page table page. 6913 * 6914 * Also, vma_lock (vm_private_data) is required for sharing. 6915 */ 6916 if (pmd_index(addr) != pmd_index(saddr) || 6917 vm_flags != svm_flags || 6918 !range_in_vma(svma, sbase, s_end) || 6919 !svma->vm_private_data) 6920 return 0; 6921 6922 return saddr; 6923 } 6924 6925 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6926 { 6927 unsigned long start = addr & PUD_MASK; 6928 unsigned long end = start + PUD_SIZE; 6929 6930 #ifdef CONFIG_USERFAULTFD 6931 if (uffd_disable_huge_pmd_share(vma)) 6932 return false; 6933 #endif 6934 /* 6935 * check on proper vm_flags and page table alignment 6936 */ 6937 if (!(vma->vm_flags & VM_MAYSHARE)) 6938 return false; 6939 if (!vma->vm_private_data) /* vma lock required for sharing */ 6940 return false; 6941 if (!range_in_vma(vma, start, end)) 6942 return false; 6943 return true; 6944 } 6945 6946 /* 6947 * Determine if start,end range within vma could be mapped by shared pmd. 6948 * If yes, adjust start and end to cover range associated with possible 6949 * shared pmd mappings. 6950 */ 6951 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6952 unsigned long *start, unsigned long *end) 6953 { 6954 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 6955 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 6956 6957 /* 6958 * vma needs to span at least one aligned PUD size, and the range 6959 * must be at least partially within in. 6960 */ 6961 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 6962 (*end <= v_start) || (*start >= v_end)) 6963 return; 6964 6965 /* Extend the range to be PUD aligned for a worst case scenario */ 6966 if (*start > v_start) 6967 *start = ALIGN_DOWN(*start, PUD_SIZE); 6968 6969 if (*end < v_end) 6970 *end = ALIGN(*end, PUD_SIZE); 6971 } 6972 6973 /* 6974 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 6975 * and returns the corresponding pte. While this is not necessary for the 6976 * !shared pmd case because we can allocate the pmd later as well, it makes the 6977 * code much cleaner. pmd allocation is essential for the shared case because 6978 * pud has to be populated inside the same i_mmap_rwsem section - otherwise 6979 * racing tasks could either miss the sharing (see huge_pte_offset) or select a 6980 * bad pmd for sharing. 6981 */ 6982 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6983 unsigned long addr, pud_t *pud) 6984 { 6985 struct address_space *mapping = vma->vm_file->f_mapping; 6986 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 6987 vma->vm_pgoff; 6988 struct vm_area_struct *svma; 6989 unsigned long saddr; 6990 pte_t *spte = NULL; 6991 pte_t *pte; 6992 6993 i_mmap_lock_read(mapping); 6994 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 6995 if (svma == vma) 6996 continue; 6997 6998 saddr = page_table_shareable(svma, vma, addr, idx); 6999 if (saddr) { 7000 spte = hugetlb_walk(svma, saddr, 7001 vma_mmu_pagesize(svma)); 7002 if (spte) { 7003 get_page(virt_to_page(spte)); 7004 break; 7005 } 7006 } 7007 } 7008 7009 if (!spte) 7010 goto out; 7011 7012 spin_lock(&mm->page_table_lock); 7013 if (pud_none(*pud)) { 7014 pud_populate(mm, pud, 7015 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7016 mm_inc_nr_pmds(mm); 7017 } else { 7018 put_page(virt_to_page(spte)); 7019 } 7020 spin_unlock(&mm->page_table_lock); 7021 out: 7022 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7023 i_mmap_unlock_read(mapping); 7024 return pte; 7025 } 7026 7027 /* 7028 * unmap huge page backed by shared pte. 7029 * 7030 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 7031 * indicated by page_count > 1, unmap is achieved by clearing pud and 7032 * decrementing the ref count. If count == 1, the pte page is not shared. 7033 * 7034 * Called with page table lock held. 7035 * 7036 * returns: 1 successfully unmapped a shared pte page 7037 * 0 the underlying pte page is not shared, or it is the last user 7038 */ 7039 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7040 unsigned long addr, pte_t *ptep) 7041 { 7042 pgd_t *pgd = pgd_offset(mm, addr); 7043 p4d_t *p4d = p4d_offset(pgd, addr); 7044 pud_t *pud = pud_offset(p4d, addr); 7045 7046 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7047 hugetlb_vma_assert_locked(vma); 7048 BUG_ON(page_count(virt_to_page(ptep)) == 0); 7049 if (page_count(virt_to_page(ptep)) == 1) 7050 return 0; 7051 7052 pud_clear(pud); 7053 put_page(virt_to_page(ptep)); 7054 mm_dec_nr_pmds(mm); 7055 return 1; 7056 } 7057 7058 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7059 7060 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 7061 unsigned long addr, pud_t *pud) 7062 { 7063 return NULL; 7064 } 7065 7066 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7067 unsigned long addr, pte_t *ptep) 7068 { 7069 return 0; 7070 } 7071 7072 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 7073 unsigned long *start, unsigned long *end) 7074 { 7075 } 7076 7077 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 7078 { 7079 return false; 7080 } 7081 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 7082 7083 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 7084 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 7085 unsigned long addr, unsigned long sz) 7086 { 7087 pgd_t *pgd; 7088 p4d_t *p4d; 7089 pud_t *pud; 7090 pte_t *pte = NULL; 7091 7092 pgd = pgd_offset(mm, addr); 7093 p4d = p4d_alloc(mm, pgd, addr); 7094 if (!p4d) 7095 return NULL; 7096 pud = pud_alloc(mm, p4d, addr); 7097 if (pud) { 7098 if (sz == PUD_SIZE) { 7099 pte = (pte_t *)pud; 7100 } else { 7101 BUG_ON(sz != PMD_SIZE); 7102 if (want_pmd_share(vma, addr) && pud_none(*pud)) 7103 pte = huge_pmd_share(mm, vma, addr, pud); 7104 else 7105 pte = (pte_t *)pmd_alloc(mm, pud, addr); 7106 } 7107 } 7108 7109 if (pte) { 7110 pte_t pteval = ptep_get_lockless(pte); 7111 7112 BUG_ON(pte_present(pteval) && !pte_huge(pteval)); 7113 } 7114 7115 return pte; 7116 } 7117 7118 /* 7119 * huge_pte_offset() - Walk the page table to resolve the hugepage 7120 * entry at address @addr 7121 * 7122 * Return: Pointer to page table entry (PUD or PMD) for 7123 * address @addr, or NULL if a !p*d_present() entry is encountered and the 7124 * size @sz doesn't match the hugepage size at this level of the page 7125 * table. 7126 */ 7127 pte_t *huge_pte_offset(struct mm_struct *mm, 7128 unsigned long addr, unsigned long sz) 7129 { 7130 pgd_t *pgd; 7131 p4d_t *p4d; 7132 pud_t *pud; 7133 pmd_t *pmd; 7134 7135 pgd = pgd_offset(mm, addr); 7136 if (!pgd_present(*pgd)) 7137 return NULL; 7138 p4d = p4d_offset(pgd, addr); 7139 if (!p4d_present(*p4d)) 7140 return NULL; 7141 7142 pud = pud_offset(p4d, addr); 7143 if (sz == PUD_SIZE) 7144 /* must be pud huge, non-present or none */ 7145 return (pte_t *)pud; 7146 if (!pud_present(*pud)) 7147 return NULL; 7148 /* must have a valid entry and size to go further */ 7149 7150 pmd = pmd_offset(pud, addr); 7151 /* must be pmd huge, non-present or none */ 7152 return (pte_t *)pmd; 7153 } 7154 7155 /* 7156 * Return a mask that can be used to update an address to the last huge 7157 * page in a page table page mapping size. Used to skip non-present 7158 * page table entries when linearly scanning address ranges. Architectures 7159 * with unique huge page to page table relationships can define their own 7160 * version of this routine. 7161 */ 7162 unsigned long hugetlb_mask_last_page(struct hstate *h) 7163 { 7164 unsigned long hp_size = huge_page_size(h); 7165 7166 if (hp_size == PUD_SIZE) 7167 return P4D_SIZE - PUD_SIZE; 7168 else if (hp_size == PMD_SIZE) 7169 return PUD_SIZE - PMD_SIZE; 7170 else 7171 return 0UL; 7172 } 7173 7174 #else 7175 7176 /* See description above. Architectures can provide their own version. */ 7177 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 7178 { 7179 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 7180 if (huge_page_size(h) == PMD_SIZE) 7181 return PUD_SIZE - PMD_SIZE; 7182 #endif 7183 return 0UL; 7184 } 7185 7186 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 7187 7188 /* 7189 * These functions are overwritable if your architecture needs its own 7190 * behavior. 7191 */ 7192 bool isolate_hugetlb(struct folio *folio, struct list_head *list) 7193 { 7194 bool ret = true; 7195 7196 spin_lock_irq(&hugetlb_lock); 7197 if (!folio_test_hugetlb(folio) || 7198 !folio_test_hugetlb_migratable(folio) || 7199 !folio_try_get(folio)) { 7200 ret = false; 7201 goto unlock; 7202 } 7203 folio_clear_hugetlb_migratable(folio); 7204 list_move_tail(&folio->lru, list); 7205 unlock: 7206 spin_unlock_irq(&hugetlb_lock); 7207 return ret; 7208 } 7209 7210 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) 7211 { 7212 int ret = 0; 7213 7214 *hugetlb = false; 7215 spin_lock_irq(&hugetlb_lock); 7216 if (folio_test_hugetlb(folio)) { 7217 *hugetlb = true; 7218 if (folio_test_hugetlb_freed(folio)) 7219 ret = 0; 7220 else if (folio_test_hugetlb_migratable(folio) || unpoison) 7221 ret = folio_try_get(folio); 7222 else 7223 ret = -EBUSY; 7224 } 7225 spin_unlock_irq(&hugetlb_lock); 7226 return ret; 7227 } 7228 7229 int get_huge_page_for_hwpoison(unsigned long pfn, int flags, 7230 bool *migratable_cleared) 7231 { 7232 int ret; 7233 7234 spin_lock_irq(&hugetlb_lock); 7235 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared); 7236 spin_unlock_irq(&hugetlb_lock); 7237 return ret; 7238 } 7239 7240 void folio_putback_active_hugetlb(struct folio *folio) 7241 { 7242 spin_lock_irq(&hugetlb_lock); 7243 folio_set_hugetlb_migratable(folio); 7244 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist); 7245 spin_unlock_irq(&hugetlb_lock); 7246 folio_put(folio); 7247 } 7248 7249 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason) 7250 { 7251 struct hstate *h = folio_hstate(old_folio); 7252 7253 hugetlb_cgroup_migrate(old_folio, new_folio); 7254 set_page_owner_migrate_reason(&new_folio->page, reason); 7255 7256 /* 7257 * transfer temporary state of the new hugetlb folio. This is 7258 * reverse to other transitions because the newpage is going to 7259 * be final while the old one will be freed so it takes over 7260 * the temporary status. 7261 * 7262 * Also note that we have to transfer the per-node surplus state 7263 * here as well otherwise the global surplus count will not match 7264 * the per-node's. 7265 */ 7266 if (folio_test_hugetlb_temporary(new_folio)) { 7267 int old_nid = folio_nid(old_folio); 7268 int new_nid = folio_nid(new_folio); 7269 7270 folio_set_hugetlb_temporary(old_folio); 7271 folio_clear_hugetlb_temporary(new_folio); 7272 7273 7274 /* 7275 * There is no need to transfer the per-node surplus state 7276 * when we do not cross the node. 7277 */ 7278 if (new_nid == old_nid) 7279 return; 7280 spin_lock_irq(&hugetlb_lock); 7281 if (h->surplus_huge_pages_node[old_nid]) { 7282 h->surplus_huge_pages_node[old_nid]--; 7283 h->surplus_huge_pages_node[new_nid]++; 7284 } 7285 spin_unlock_irq(&hugetlb_lock); 7286 } 7287 } 7288 7289 static void hugetlb_unshare_pmds(struct vm_area_struct *vma, 7290 unsigned long start, 7291 unsigned long end) 7292 { 7293 struct hstate *h = hstate_vma(vma); 7294 unsigned long sz = huge_page_size(h); 7295 struct mm_struct *mm = vma->vm_mm; 7296 struct mmu_notifier_range range; 7297 unsigned long address; 7298 spinlock_t *ptl; 7299 pte_t *ptep; 7300 7301 if (!(vma->vm_flags & VM_MAYSHARE)) 7302 return; 7303 7304 if (start >= end) 7305 return; 7306 7307 flush_cache_range(vma, start, end); 7308 /* 7309 * No need to call adjust_range_if_pmd_sharing_possible(), because 7310 * we have already done the PUD_SIZE alignment. 7311 */ 7312 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 7313 start, end); 7314 mmu_notifier_invalidate_range_start(&range); 7315 hugetlb_vma_lock_write(vma); 7316 i_mmap_lock_write(vma->vm_file->f_mapping); 7317 for (address = start; address < end; address += PUD_SIZE) { 7318 ptep = hugetlb_walk(vma, address, sz); 7319 if (!ptep) 7320 continue; 7321 ptl = huge_pte_lock(h, mm, ptep); 7322 huge_pmd_unshare(mm, vma, address, ptep); 7323 spin_unlock(ptl); 7324 } 7325 flush_hugetlb_tlb_range(vma, start, end); 7326 i_mmap_unlock_write(vma->vm_file->f_mapping); 7327 hugetlb_vma_unlock_write(vma); 7328 /* 7329 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see 7330 * Documentation/mm/mmu_notifier.rst. 7331 */ 7332 mmu_notifier_invalidate_range_end(&range); 7333 } 7334 7335 /* 7336 * This function will unconditionally remove all the shared pmd pgtable entries 7337 * within the specific vma for a hugetlbfs memory range. 7338 */ 7339 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7340 { 7341 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE), 7342 ALIGN_DOWN(vma->vm_end, PUD_SIZE)); 7343 } 7344 7345 #ifdef CONFIG_CMA 7346 static bool cma_reserve_called __initdata; 7347 7348 static int __init cmdline_parse_hugetlb_cma(char *p) 7349 { 7350 int nid, count = 0; 7351 unsigned long tmp; 7352 char *s = p; 7353 7354 while (*s) { 7355 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7356 break; 7357 7358 if (s[count] == ':') { 7359 if (tmp >= MAX_NUMNODES) 7360 break; 7361 nid = array_index_nospec(tmp, MAX_NUMNODES); 7362 7363 s += count + 1; 7364 tmp = memparse(s, &s); 7365 hugetlb_cma_size_in_node[nid] = tmp; 7366 hugetlb_cma_size += tmp; 7367 7368 /* 7369 * Skip the separator if have one, otherwise 7370 * break the parsing. 7371 */ 7372 if (*s == ',') 7373 s++; 7374 else 7375 break; 7376 } else { 7377 hugetlb_cma_size = memparse(p, &p); 7378 break; 7379 } 7380 } 7381 7382 return 0; 7383 } 7384 7385 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7386 7387 void __init hugetlb_cma_reserve(int order) 7388 { 7389 unsigned long size, reserved, per_node; 7390 bool node_specific_cma_alloc = false; 7391 int nid; 7392 7393 cma_reserve_called = true; 7394 7395 if (!hugetlb_cma_size) 7396 return; 7397 7398 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7399 if (hugetlb_cma_size_in_node[nid] == 0) 7400 continue; 7401 7402 if (!node_online(nid)) { 7403 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7404 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7405 hugetlb_cma_size_in_node[nid] = 0; 7406 continue; 7407 } 7408 7409 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7410 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7411 nid, (PAGE_SIZE << order) / SZ_1M); 7412 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7413 hugetlb_cma_size_in_node[nid] = 0; 7414 } else { 7415 node_specific_cma_alloc = true; 7416 } 7417 } 7418 7419 /* Validate the CMA size again in case some invalid nodes specified. */ 7420 if (!hugetlb_cma_size) 7421 return; 7422 7423 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7424 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7425 (PAGE_SIZE << order) / SZ_1M); 7426 hugetlb_cma_size = 0; 7427 return; 7428 } 7429 7430 if (!node_specific_cma_alloc) { 7431 /* 7432 * If 3 GB area is requested on a machine with 4 numa nodes, 7433 * let's allocate 1 GB on first three nodes and ignore the last one. 7434 */ 7435 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7436 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7437 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7438 } 7439 7440 reserved = 0; 7441 for_each_online_node(nid) { 7442 int res; 7443 char name[CMA_MAX_NAME]; 7444 7445 if (node_specific_cma_alloc) { 7446 if (hugetlb_cma_size_in_node[nid] == 0) 7447 continue; 7448 7449 size = hugetlb_cma_size_in_node[nid]; 7450 } else { 7451 size = min(per_node, hugetlb_cma_size - reserved); 7452 } 7453 7454 size = round_up(size, PAGE_SIZE << order); 7455 7456 snprintf(name, sizeof(name), "hugetlb%d", nid); 7457 /* 7458 * Note that 'order per bit' is based on smallest size that 7459 * may be returned to CMA allocator in the case of 7460 * huge page demotion. 7461 */ 7462 res = cma_declare_contiguous_nid(0, size, 0, 7463 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7464 0, false, name, 7465 &hugetlb_cma[nid], nid); 7466 if (res) { 7467 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7468 res, nid); 7469 continue; 7470 } 7471 7472 reserved += size; 7473 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7474 size / SZ_1M, nid); 7475 7476 if (reserved >= hugetlb_cma_size) 7477 break; 7478 } 7479 7480 if (!reserved) 7481 /* 7482 * hugetlb_cma_size is used to determine if allocations from 7483 * cma are possible. Set to zero if no cma regions are set up. 7484 */ 7485 hugetlb_cma_size = 0; 7486 } 7487 7488 static void __init hugetlb_cma_check(void) 7489 { 7490 if (!hugetlb_cma_size || cma_reserve_called) 7491 return; 7492 7493 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7494 } 7495 7496 #endif /* CONFIG_CMA */ 7497