1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Generic hugetlb support. 4 * (C) Nadia Yvette Chambers, April 2004 5 */ 6 #include <linux/list.h> 7 #include <linux/init.h> 8 #include <linux/mm.h> 9 #include <linux/seq_file.h> 10 #include <linux/sysctl.h> 11 #include <linux/highmem.h> 12 #include <linux/mmu_notifier.h> 13 #include <linux/nodemask.h> 14 #include <linux/pagemap.h> 15 #include <linux/mempolicy.h> 16 #include <linux/compiler.h> 17 #include <linux/cpuset.h> 18 #include <linux/mutex.h> 19 #include <linux/memblock.h> 20 #include <linux/sysfs.h> 21 #include <linux/slab.h> 22 #include <linux/sched/mm.h> 23 #include <linux/mmdebug.h> 24 #include <linux/sched/signal.h> 25 #include <linux/rmap.h> 26 #include <linux/string_helpers.h> 27 #include <linux/swap.h> 28 #include <linux/swapops.h> 29 #include <linux/jhash.h> 30 #include <linux/numa.h> 31 #include <linux/llist.h> 32 #include <linux/cma.h> 33 #include <linux/migrate.h> 34 #include <linux/nospec.h> 35 #include <linux/delayacct.h> 36 37 #include <asm/page.h> 38 #include <asm/pgalloc.h> 39 #include <asm/tlb.h> 40 41 #include <linux/io.h> 42 #include <linux/hugetlb.h> 43 #include <linux/hugetlb_cgroup.h> 44 #include <linux/node.h> 45 #include <linux/page_owner.h> 46 #include "internal.h" 47 #include "hugetlb_vmemmap.h" 48 49 int hugetlb_max_hstate __read_mostly; 50 unsigned int default_hstate_idx; 51 struct hstate hstates[HUGE_MAX_HSTATE]; 52 53 #ifdef CONFIG_CMA 54 static struct cma *hugetlb_cma[MAX_NUMNODES]; 55 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata; 56 static bool hugetlb_cma_page(struct page *page, unsigned int order) 57 { 58 return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page, 59 1 << order); 60 } 61 #else 62 static bool hugetlb_cma_page(struct page *page, unsigned int order) 63 { 64 return false; 65 } 66 #endif 67 static unsigned long hugetlb_cma_size __initdata; 68 69 __initdata LIST_HEAD(huge_boot_pages); 70 71 /* for command line parsing */ 72 static struct hstate * __initdata parsed_hstate; 73 static unsigned long __initdata default_hstate_max_huge_pages; 74 static bool __initdata parsed_valid_hugepagesz = true; 75 static bool __initdata parsed_default_hugepagesz; 76 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata; 77 78 /* 79 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages, 80 * free_huge_pages, and surplus_huge_pages. 81 */ 82 DEFINE_SPINLOCK(hugetlb_lock); 83 84 /* 85 * Serializes faults on the same logical page. This is used to 86 * prevent spurious OOMs when the hugepage pool is fully utilized. 87 */ 88 static int num_fault_mutexes; 89 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp; 90 91 /* Forward declaration */ 92 static int hugetlb_acct_memory(struct hstate *h, long delta); 93 94 static inline bool subpool_is_free(struct hugepage_subpool *spool) 95 { 96 if (spool->count) 97 return false; 98 if (spool->max_hpages != -1) 99 return spool->used_hpages == 0; 100 if (spool->min_hpages != -1) 101 return spool->rsv_hpages == spool->min_hpages; 102 103 return true; 104 } 105 106 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool, 107 unsigned long irq_flags) 108 { 109 spin_unlock_irqrestore(&spool->lock, irq_flags); 110 111 /* If no pages are used, and no other handles to the subpool 112 * remain, give up any reservations based on minimum size and 113 * free the subpool */ 114 if (subpool_is_free(spool)) { 115 if (spool->min_hpages != -1) 116 hugetlb_acct_memory(spool->hstate, 117 -spool->min_hpages); 118 kfree(spool); 119 } 120 } 121 122 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, 123 long min_hpages) 124 { 125 struct hugepage_subpool *spool; 126 127 spool = kzalloc(sizeof(*spool), GFP_KERNEL); 128 if (!spool) 129 return NULL; 130 131 spin_lock_init(&spool->lock); 132 spool->count = 1; 133 spool->max_hpages = max_hpages; 134 spool->hstate = h; 135 spool->min_hpages = min_hpages; 136 137 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { 138 kfree(spool); 139 return NULL; 140 } 141 spool->rsv_hpages = min_hpages; 142 143 return spool; 144 } 145 146 void hugepage_put_subpool(struct hugepage_subpool *spool) 147 { 148 unsigned long flags; 149 150 spin_lock_irqsave(&spool->lock, flags); 151 BUG_ON(!spool->count); 152 spool->count--; 153 unlock_or_release_subpool(spool, flags); 154 } 155 156 /* 157 * Subpool accounting for allocating and reserving pages. 158 * Return -ENOMEM if there are not enough resources to satisfy the 159 * request. Otherwise, return the number of pages by which the 160 * global pools must be adjusted (upward). The returned value may 161 * only be different than the passed value (delta) in the case where 162 * a subpool minimum size must be maintained. 163 */ 164 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool, 165 long delta) 166 { 167 long ret = delta; 168 169 if (!spool) 170 return ret; 171 172 spin_lock_irq(&spool->lock); 173 174 if (spool->max_hpages != -1) { /* maximum size accounting */ 175 if ((spool->used_hpages + delta) <= spool->max_hpages) 176 spool->used_hpages += delta; 177 else { 178 ret = -ENOMEM; 179 goto unlock_ret; 180 } 181 } 182 183 /* minimum size accounting */ 184 if (spool->min_hpages != -1 && spool->rsv_hpages) { 185 if (delta > spool->rsv_hpages) { 186 /* 187 * Asking for more reserves than those already taken on 188 * behalf of subpool. Return difference. 189 */ 190 ret = delta - spool->rsv_hpages; 191 spool->rsv_hpages = 0; 192 } else { 193 ret = 0; /* reserves already accounted for */ 194 spool->rsv_hpages -= delta; 195 } 196 } 197 198 unlock_ret: 199 spin_unlock_irq(&spool->lock); 200 return ret; 201 } 202 203 /* 204 * Subpool accounting for freeing and unreserving pages. 205 * Return the number of global page reservations that must be dropped. 206 * The return value may only be different than the passed value (delta) 207 * in the case where a subpool minimum size must be maintained. 208 */ 209 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool, 210 long delta) 211 { 212 long ret = delta; 213 unsigned long flags; 214 215 if (!spool) 216 return delta; 217 218 spin_lock_irqsave(&spool->lock, flags); 219 220 if (spool->max_hpages != -1) /* maximum size accounting */ 221 spool->used_hpages -= delta; 222 223 /* minimum size accounting */ 224 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) { 225 if (spool->rsv_hpages + delta <= spool->min_hpages) 226 ret = 0; 227 else 228 ret = spool->rsv_hpages + delta - spool->min_hpages; 229 230 spool->rsv_hpages += delta; 231 if (spool->rsv_hpages > spool->min_hpages) 232 spool->rsv_hpages = spool->min_hpages; 233 } 234 235 /* 236 * If hugetlbfs_put_super couldn't free spool due to an outstanding 237 * quota reference, free it now. 238 */ 239 unlock_or_release_subpool(spool, flags); 240 241 return ret; 242 } 243 244 static inline struct hugepage_subpool *subpool_inode(struct inode *inode) 245 { 246 return HUGETLBFS_SB(inode->i_sb)->spool; 247 } 248 249 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) 250 { 251 return subpool_inode(file_inode(vma->vm_file)); 252 } 253 254 /* Helper that removes a struct file_region from the resv_map cache and returns 255 * it for use. 256 */ 257 static struct file_region * 258 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) 259 { 260 struct file_region *nrg = NULL; 261 262 VM_BUG_ON(resv->region_cache_count <= 0); 263 264 resv->region_cache_count--; 265 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 266 list_del(&nrg->link); 267 268 nrg->from = from; 269 nrg->to = to; 270 271 return nrg; 272 } 273 274 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg, 275 struct file_region *rg) 276 { 277 #ifdef CONFIG_CGROUP_HUGETLB 278 nrg->reservation_counter = rg->reservation_counter; 279 nrg->css = rg->css; 280 if (rg->css) 281 css_get(rg->css); 282 #endif 283 } 284 285 /* Helper that records hugetlb_cgroup uncharge info. */ 286 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, 287 struct hstate *h, 288 struct resv_map *resv, 289 struct file_region *nrg) 290 { 291 #ifdef CONFIG_CGROUP_HUGETLB 292 if (h_cg) { 293 nrg->reservation_counter = 294 &h_cg->rsvd_hugepage[hstate_index(h)]; 295 nrg->css = &h_cg->css; 296 /* 297 * The caller will hold exactly one h_cg->css reference for the 298 * whole contiguous reservation region. But this area might be 299 * scattered when there are already some file_regions reside in 300 * it. As a result, many file_regions may share only one css 301 * reference. In order to ensure that one file_region must hold 302 * exactly one h_cg->css reference, we should do css_get for 303 * each file_region and leave the reference held by caller 304 * untouched. 305 */ 306 css_get(&h_cg->css); 307 if (!resv->pages_per_hpage) 308 resv->pages_per_hpage = pages_per_huge_page(h); 309 /* pages_per_hpage should be the same for all entries in 310 * a resv_map. 311 */ 312 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); 313 } else { 314 nrg->reservation_counter = NULL; 315 nrg->css = NULL; 316 } 317 #endif 318 } 319 320 static void put_uncharge_info(struct file_region *rg) 321 { 322 #ifdef CONFIG_CGROUP_HUGETLB 323 if (rg->css) 324 css_put(rg->css); 325 #endif 326 } 327 328 static bool has_same_uncharge_info(struct file_region *rg, 329 struct file_region *org) 330 { 331 #ifdef CONFIG_CGROUP_HUGETLB 332 return rg->reservation_counter == org->reservation_counter && 333 rg->css == org->css; 334 335 #else 336 return true; 337 #endif 338 } 339 340 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) 341 { 342 struct file_region *nrg = NULL, *prg = NULL; 343 344 prg = list_prev_entry(rg, link); 345 if (&prg->link != &resv->regions && prg->to == rg->from && 346 has_same_uncharge_info(prg, rg)) { 347 prg->to = rg->to; 348 349 list_del(&rg->link); 350 put_uncharge_info(rg); 351 kfree(rg); 352 353 rg = prg; 354 } 355 356 nrg = list_next_entry(rg, link); 357 if (&nrg->link != &resv->regions && nrg->from == rg->to && 358 has_same_uncharge_info(nrg, rg)) { 359 nrg->from = rg->from; 360 361 list_del(&rg->link); 362 put_uncharge_info(rg); 363 kfree(rg); 364 } 365 } 366 367 static inline long 368 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from, 369 long to, struct hstate *h, struct hugetlb_cgroup *cg, 370 long *regions_needed) 371 { 372 struct file_region *nrg; 373 374 if (!regions_needed) { 375 nrg = get_file_region_entry_from_cache(map, from, to); 376 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); 377 list_add(&nrg->link, rg); 378 coalesce_file_region(map, nrg); 379 } else 380 *regions_needed += 1; 381 382 return to - from; 383 } 384 385 /* 386 * Must be called with resv->lock held. 387 * 388 * Calling this with regions_needed != NULL will count the number of pages 389 * to be added but will not modify the linked list. And regions_needed will 390 * indicate the number of file_regions needed in the cache to carry out to add 391 * the regions for this range. 392 */ 393 static long add_reservation_in_range(struct resv_map *resv, long f, long t, 394 struct hugetlb_cgroup *h_cg, 395 struct hstate *h, long *regions_needed) 396 { 397 long add = 0; 398 struct list_head *head = &resv->regions; 399 long last_accounted_offset = f; 400 struct file_region *iter, *trg = NULL; 401 struct list_head *rg = NULL; 402 403 if (regions_needed) 404 *regions_needed = 0; 405 406 /* In this loop, we essentially handle an entry for the range 407 * [last_accounted_offset, iter->from), at every iteration, with some 408 * bounds checking. 409 */ 410 list_for_each_entry_safe(iter, trg, head, link) { 411 /* Skip irrelevant regions that start before our range. */ 412 if (iter->from < f) { 413 /* If this region ends after the last accounted offset, 414 * then we need to update last_accounted_offset. 415 */ 416 if (iter->to > last_accounted_offset) 417 last_accounted_offset = iter->to; 418 continue; 419 } 420 421 /* When we find a region that starts beyond our range, we've 422 * finished. 423 */ 424 if (iter->from >= t) { 425 rg = iter->link.prev; 426 break; 427 } 428 429 /* Add an entry for last_accounted_offset -> iter->from, and 430 * update last_accounted_offset. 431 */ 432 if (iter->from > last_accounted_offset) 433 add += hugetlb_resv_map_add(resv, iter->link.prev, 434 last_accounted_offset, 435 iter->from, h, h_cg, 436 regions_needed); 437 438 last_accounted_offset = iter->to; 439 } 440 441 /* Handle the case where our range extends beyond 442 * last_accounted_offset. 443 */ 444 if (!rg) 445 rg = head->prev; 446 if (last_accounted_offset < t) 447 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset, 448 t, h, h_cg, regions_needed); 449 450 return add; 451 } 452 453 /* Must be called with resv->lock acquired. Will drop lock to allocate entries. 454 */ 455 static int allocate_file_region_entries(struct resv_map *resv, 456 int regions_needed) 457 __must_hold(&resv->lock) 458 { 459 struct list_head allocated_regions; 460 int to_allocate = 0, i = 0; 461 struct file_region *trg = NULL, *rg = NULL; 462 463 VM_BUG_ON(regions_needed < 0); 464 465 INIT_LIST_HEAD(&allocated_regions); 466 467 /* 468 * Check for sufficient descriptors in the cache to accommodate 469 * the number of in progress add operations plus regions_needed. 470 * 471 * This is a while loop because when we drop the lock, some other call 472 * to region_add or region_del may have consumed some region_entries, 473 * so we keep looping here until we finally have enough entries for 474 * (adds_in_progress + regions_needed). 475 */ 476 while (resv->region_cache_count < 477 (resv->adds_in_progress + regions_needed)) { 478 to_allocate = resv->adds_in_progress + regions_needed - 479 resv->region_cache_count; 480 481 /* At this point, we should have enough entries in the cache 482 * for all the existing adds_in_progress. We should only be 483 * needing to allocate for regions_needed. 484 */ 485 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress); 486 487 spin_unlock(&resv->lock); 488 for (i = 0; i < to_allocate; i++) { 489 trg = kmalloc(sizeof(*trg), GFP_KERNEL); 490 if (!trg) 491 goto out_of_memory; 492 list_add(&trg->link, &allocated_regions); 493 } 494 495 spin_lock(&resv->lock); 496 497 list_splice(&allocated_regions, &resv->region_cache); 498 resv->region_cache_count += to_allocate; 499 } 500 501 return 0; 502 503 out_of_memory: 504 list_for_each_entry_safe(rg, trg, &allocated_regions, link) { 505 list_del(&rg->link); 506 kfree(rg); 507 } 508 return -ENOMEM; 509 } 510 511 /* 512 * Add the huge page range represented by [f, t) to the reserve 513 * map. Regions will be taken from the cache to fill in this range. 514 * Sufficient regions should exist in the cache due to the previous 515 * call to region_chg with the same range, but in some cases the cache will not 516 * have sufficient entries due to races with other code doing region_add or 517 * region_del. The extra needed entries will be allocated. 518 * 519 * regions_needed is the out value provided by a previous call to region_chg. 520 * 521 * Return the number of new huge pages added to the map. This number is greater 522 * than or equal to zero. If file_region entries needed to be allocated for 523 * this operation and we were not able to allocate, it returns -ENOMEM. 524 * region_add of regions of length 1 never allocate file_regions and cannot 525 * fail; region_chg will always allocate at least 1 entry and a region_add for 526 * 1 page will only require at most 1 entry. 527 */ 528 static long region_add(struct resv_map *resv, long f, long t, 529 long in_regions_needed, struct hstate *h, 530 struct hugetlb_cgroup *h_cg) 531 { 532 long add = 0, actual_regions_needed = 0; 533 534 spin_lock(&resv->lock); 535 retry: 536 537 /* Count how many regions are actually needed to execute this add. */ 538 add_reservation_in_range(resv, f, t, NULL, NULL, 539 &actual_regions_needed); 540 541 /* 542 * Check for sufficient descriptors in the cache to accommodate 543 * this add operation. Note that actual_regions_needed may be greater 544 * than in_regions_needed, as the resv_map may have been modified since 545 * the region_chg call. In this case, we need to make sure that we 546 * allocate extra entries, such that we have enough for all the 547 * existing adds_in_progress, plus the excess needed for this 548 * operation. 549 */ 550 if (actual_regions_needed > in_regions_needed && 551 resv->region_cache_count < 552 resv->adds_in_progress + 553 (actual_regions_needed - in_regions_needed)) { 554 /* region_add operation of range 1 should never need to 555 * allocate file_region entries. 556 */ 557 VM_BUG_ON(t - f <= 1); 558 559 if (allocate_file_region_entries( 560 resv, actual_regions_needed - in_regions_needed)) { 561 return -ENOMEM; 562 } 563 564 goto retry; 565 } 566 567 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); 568 569 resv->adds_in_progress -= in_regions_needed; 570 571 spin_unlock(&resv->lock); 572 return add; 573 } 574 575 /* 576 * Examine the existing reserve map and determine how many 577 * huge pages in the specified range [f, t) are NOT currently 578 * represented. This routine is called before a subsequent 579 * call to region_add that will actually modify the reserve 580 * map to add the specified range [f, t). region_chg does 581 * not change the number of huge pages represented by the 582 * map. A number of new file_region structures is added to the cache as a 583 * placeholder, for the subsequent region_add call to use. At least 1 584 * file_region structure is added. 585 * 586 * out_regions_needed is the number of regions added to the 587 * resv->adds_in_progress. This value needs to be provided to a follow up call 588 * to region_add or region_abort for proper accounting. 589 * 590 * Returns the number of huge pages that need to be added to the existing 591 * reservation map for the range [f, t). This number is greater or equal to 592 * zero. -ENOMEM is returned if a new file_region structure or cache entry 593 * is needed and can not be allocated. 594 */ 595 static long region_chg(struct resv_map *resv, long f, long t, 596 long *out_regions_needed) 597 { 598 long chg = 0; 599 600 spin_lock(&resv->lock); 601 602 /* Count how many hugepages in this range are NOT represented. */ 603 chg = add_reservation_in_range(resv, f, t, NULL, NULL, 604 out_regions_needed); 605 606 if (*out_regions_needed == 0) 607 *out_regions_needed = 1; 608 609 if (allocate_file_region_entries(resv, *out_regions_needed)) 610 return -ENOMEM; 611 612 resv->adds_in_progress += *out_regions_needed; 613 614 spin_unlock(&resv->lock); 615 return chg; 616 } 617 618 /* 619 * Abort the in progress add operation. The adds_in_progress field 620 * of the resv_map keeps track of the operations in progress between 621 * calls to region_chg and region_add. Operations are sometimes 622 * aborted after the call to region_chg. In such cases, region_abort 623 * is called to decrement the adds_in_progress counter. regions_needed 624 * is the value returned by the region_chg call, it is used to decrement 625 * the adds_in_progress counter. 626 * 627 * NOTE: The range arguments [f, t) are not needed or used in this 628 * routine. They are kept to make reading the calling code easier as 629 * arguments will match the associated region_chg call. 630 */ 631 static void region_abort(struct resv_map *resv, long f, long t, 632 long regions_needed) 633 { 634 spin_lock(&resv->lock); 635 VM_BUG_ON(!resv->region_cache_count); 636 resv->adds_in_progress -= regions_needed; 637 spin_unlock(&resv->lock); 638 } 639 640 /* 641 * Delete the specified range [f, t) from the reserve map. If the 642 * t parameter is LONG_MAX, this indicates that ALL regions after f 643 * should be deleted. Locate the regions which intersect [f, t) 644 * and either trim, delete or split the existing regions. 645 * 646 * Returns the number of huge pages deleted from the reserve map. 647 * In the normal case, the return value is zero or more. In the 648 * case where a region must be split, a new region descriptor must 649 * be allocated. If the allocation fails, -ENOMEM will be returned. 650 * NOTE: If the parameter t == LONG_MAX, then we will never split 651 * a region and possibly return -ENOMEM. Callers specifying 652 * t == LONG_MAX do not need to check for -ENOMEM error. 653 */ 654 static long region_del(struct resv_map *resv, long f, long t) 655 { 656 struct list_head *head = &resv->regions; 657 struct file_region *rg, *trg; 658 struct file_region *nrg = NULL; 659 long del = 0; 660 661 retry: 662 spin_lock(&resv->lock); 663 list_for_each_entry_safe(rg, trg, head, link) { 664 /* 665 * Skip regions before the range to be deleted. file_region 666 * ranges are normally of the form [from, to). However, there 667 * may be a "placeholder" entry in the map which is of the form 668 * (from, to) with from == to. Check for placeholder entries 669 * at the beginning of the range to be deleted. 670 */ 671 if (rg->to <= f && (rg->to != rg->from || rg->to != f)) 672 continue; 673 674 if (rg->from >= t) 675 break; 676 677 if (f > rg->from && t < rg->to) { /* Must split region */ 678 /* 679 * Check for an entry in the cache before dropping 680 * lock and attempting allocation. 681 */ 682 if (!nrg && 683 resv->region_cache_count > resv->adds_in_progress) { 684 nrg = list_first_entry(&resv->region_cache, 685 struct file_region, 686 link); 687 list_del(&nrg->link); 688 resv->region_cache_count--; 689 } 690 691 if (!nrg) { 692 spin_unlock(&resv->lock); 693 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL); 694 if (!nrg) 695 return -ENOMEM; 696 goto retry; 697 } 698 699 del += t - f; 700 hugetlb_cgroup_uncharge_file_region( 701 resv, rg, t - f, false); 702 703 /* New entry for end of split region */ 704 nrg->from = t; 705 nrg->to = rg->to; 706 707 copy_hugetlb_cgroup_uncharge_info(nrg, rg); 708 709 INIT_LIST_HEAD(&nrg->link); 710 711 /* Original entry is trimmed */ 712 rg->to = f; 713 714 list_add(&nrg->link, &rg->link); 715 nrg = NULL; 716 break; 717 } 718 719 if (f <= rg->from && t >= rg->to) { /* Remove entire region */ 720 del += rg->to - rg->from; 721 hugetlb_cgroup_uncharge_file_region(resv, rg, 722 rg->to - rg->from, true); 723 list_del(&rg->link); 724 kfree(rg); 725 continue; 726 } 727 728 if (f <= rg->from) { /* Trim beginning of region */ 729 hugetlb_cgroup_uncharge_file_region(resv, rg, 730 t - rg->from, false); 731 732 del += t - rg->from; 733 rg->from = t; 734 } else { /* Trim end of region */ 735 hugetlb_cgroup_uncharge_file_region(resv, rg, 736 rg->to - f, false); 737 738 del += rg->to - f; 739 rg->to = f; 740 } 741 } 742 743 spin_unlock(&resv->lock); 744 kfree(nrg); 745 return del; 746 } 747 748 /* 749 * A rare out of memory error was encountered which prevented removal of 750 * the reserve map region for a page. The huge page itself was free'ed 751 * and removed from the page cache. This routine will adjust the subpool 752 * usage count, and the global reserve count if needed. By incrementing 753 * these counts, the reserve map entry which could not be deleted will 754 * appear as a "reserved" entry instead of simply dangling with incorrect 755 * counts. 756 */ 757 void hugetlb_fix_reserve_counts(struct inode *inode) 758 { 759 struct hugepage_subpool *spool = subpool_inode(inode); 760 long rsv_adjust; 761 bool reserved = false; 762 763 rsv_adjust = hugepage_subpool_get_pages(spool, 1); 764 if (rsv_adjust > 0) { 765 struct hstate *h = hstate_inode(inode); 766 767 if (!hugetlb_acct_memory(h, 1)) 768 reserved = true; 769 } else if (!rsv_adjust) { 770 reserved = true; 771 } 772 773 if (!reserved) 774 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n"); 775 } 776 777 /* 778 * Count and return the number of huge pages in the reserve map 779 * that intersect with the range [f, t). 780 */ 781 static long region_count(struct resv_map *resv, long f, long t) 782 { 783 struct list_head *head = &resv->regions; 784 struct file_region *rg; 785 long chg = 0; 786 787 spin_lock(&resv->lock); 788 /* Locate each segment we overlap with, and count that overlap. */ 789 list_for_each_entry(rg, head, link) { 790 long seg_from; 791 long seg_to; 792 793 if (rg->to <= f) 794 continue; 795 if (rg->from >= t) 796 break; 797 798 seg_from = max(rg->from, f); 799 seg_to = min(rg->to, t); 800 801 chg += seg_to - seg_from; 802 } 803 spin_unlock(&resv->lock); 804 805 return chg; 806 } 807 808 /* 809 * Convert the address within this vma to the page offset within 810 * the mapping, in pagecache page units; huge pages here. 811 */ 812 static pgoff_t vma_hugecache_offset(struct hstate *h, 813 struct vm_area_struct *vma, unsigned long address) 814 { 815 return ((address - vma->vm_start) >> huge_page_shift(h)) + 816 (vma->vm_pgoff >> huge_page_order(h)); 817 } 818 819 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 820 unsigned long address) 821 { 822 return vma_hugecache_offset(hstate_vma(vma), vma, address); 823 } 824 EXPORT_SYMBOL_GPL(linear_hugepage_index); 825 826 /* 827 * Return the size of the pages allocated when backing a VMA. In the majority 828 * cases this will be same size as used by the page table entries. 829 */ 830 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) 831 { 832 if (vma->vm_ops && vma->vm_ops->pagesize) 833 return vma->vm_ops->pagesize(vma); 834 return PAGE_SIZE; 835 } 836 EXPORT_SYMBOL_GPL(vma_kernel_pagesize); 837 838 /* 839 * Return the page size being used by the MMU to back a VMA. In the majority 840 * of cases, the page size used by the kernel matches the MMU size. On 841 * architectures where it differs, an architecture-specific 'strong' 842 * version of this symbol is required. 843 */ 844 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) 845 { 846 return vma_kernel_pagesize(vma); 847 } 848 849 /* 850 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 851 * bits of the reservation map pointer, which are always clear due to 852 * alignment. 853 */ 854 #define HPAGE_RESV_OWNER (1UL << 0) 855 #define HPAGE_RESV_UNMAPPED (1UL << 1) 856 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED) 857 858 /* 859 * These helpers are used to track how many pages are reserved for 860 * faults in a MAP_PRIVATE mapping. Only the process that called mmap() 861 * is guaranteed to have their future faults succeed. 862 * 863 * With the exception of reset_vma_resv_huge_pages() which is called at fork(), 864 * the reserve counters are updated with the hugetlb_lock held. It is safe 865 * to reset the VMA at fork() time as it is not in use yet and there is no 866 * chance of the global counters getting corrupted as a result of the values. 867 * 868 * The private mapping reservation is represented in a subtly different 869 * manner to a shared mapping. A shared mapping has a region map associated 870 * with the underlying file, this region map represents the backing file 871 * pages which have ever had a reservation assigned which this persists even 872 * after the page is instantiated. A private mapping has a region map 873 * associated with the original mmap which is attached to all VMAs which 874 * reference it, this region map represents those offsets which have consumed 875 * reservation ie. where pages have been instantiated. 876 */ 877 static unsigned long get_vma_private_data(struct vm_area_struct *vma) 878 { 879 return (unsigned long)vma->vm_private_data; 880 } 881 882 static void set_vma_private_data(struct vm_area_struct *vma, 883 unsigned long value) 884 { 885 vma->vm_private_data = (void *)value; 886 } 887 888 static void 889 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map, 890 struct hugetlb_cgroup *h_cg, 891 struct hstate *h) 892 { 893 #ifdef CONFIG_CGROUP_HUGETLB 894 if (!h_cg || !h) { 895 resv_map->reservation_counter = NULL; 896 resv_map->pages_per_hpage = 0; 897 resv_map->css = NULL; 898 } else { 899 resv_map->reservation_counter = 900 &h_cg->rsvd_hugepage[hstate_index(h)]; 901 resv_map->pages_per_hpage = pages_per_huge_page(h); 902 resv_map->css = &h_cg->css; 903 } 904 #endif 905 } 906 907 struct resv_map *resv_map_alloc(void) 908 { 909 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL); 910 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL); 911 912 if (!resv_map || !rg) { 913 kfree(resv_map); 914 kfree(rg); 915 return NULL; 916 } 917 918 kref_init(&resv_map->refs); 919 spin_lock_init(&resv_map->lock); 920 INIT_LIST_HEAD(&resv_map->regions); 921 922 resv_map->adds_in_progress = 0; 923 /* 924 * Initialize these to 0. On shared mappings, 0's here indicate these 925 * fields don't do cgroup accounting. On private mappings, these will be 926 * re-initialized to the proper values, to indicate that hugetlb cgroup 927 * reservations are to be un-charged from here. 928 */ 929 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL); 930 931 INIT_LIST_HEAD(&resv_map->region_cache); 932 list_add(&rg->link, &resv_map->region_cache); 933 resv_map->region_cache_count = 1; 934 935 return resv_map; 936 } 937 938 void resv_map_release(struct kref *ref) 939 { 940 struct resv_map *resv_map = container_of(ref, struct resv_map, refs); 941 struct list_head *head = &resv_map->region_cache; 942 struct file_region *rg, *trg; 943 944 /* Clear out any active regions before we release the map. */ 945 region_del(resv_map, 0, LONG_MAX); 946 947 /* ... and any entries left in the cache */ 948 list_for_each_entry_safe(rg, trg, head, link) { 949 list_del(&rg->link); 950 kfree(rg); 951 } 952 953 VM_BUG_ON(resv_map->adds_in_progress); 954 955 kfree(resv_map); 956 } 957 958 static inline struct resv_map *inode_resv_map(struct inode *inode) 959 { 960 /* 961 * At inode evict time, i_mapping may not point to the original 962 * address space within the inode. This original address space 963 * contains the pointer to the resv_map. So, always use the 964 * address space embedded within the inode. 965 * The VERY common case is inode->mapping == &inode->i_data but, 966 * this may not be true for device special inodes. 967 */ 968 return (struct resv_map *)(&inode->i_data)->private_data; 969 } 970 971 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) 972 { 973 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 974 if (vma->vm_flags & VM_MAYSHARE) { 975 struct address_space *mapping = vma->vm_file->f_mapping; 976 struct inode *inode = mapping->host; 977 978 return inode_resv_map(inode); 979 980 } else { 981 return (struct resv_map *)(get_vma_private_data(vma) & 982 ~HPAGE_RESV_MASK); 983 } 984 } 985 986 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) 987 { 988 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 989 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 990 991 set_vma_private_data(vma, (get_vma_private_data(vma) & 992 HPAGE_RESV_MASK) | (unsigned long)map); 993 } 994 995 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) 996 { 997 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 998 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); 999 1000 set_vma_private_data(vma, get_vma_private_data(vma) | flags); 1001 } 1002 1003 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) 1004 { 1005 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1006 1007 return (get_vma_private_data(vma) & flag) != 0; 1008 } 1009 1010 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ 1011 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) 1012 { 1013 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); 1014 if (!(vma->vm_flags & VM_MAYSHARE)) 1015 vma->vm_private_data = (void *)0; 1016 } 1017 1018 /* 1019 * Reset and decrement one ref on hugepage private reservation. 1020 * Called with mm->mmap_sem writer semaphore held. 1021 * This function should be only used by move_vma() and operate on 1022 * same sized vma. It should never come here with last ref on the 1023 * reservation. 1024 */ 1025 void clear_vma_resv_huge_pages(struct vm_area_struct *vma) 1026 { 1027 /* 1028 * Clear the old hugetlb private page reservation. 1029 * It has already been transferred to new_vma. 1030 * 1031 * During a mremap() operation of a hugetlb vma we call move_vma() 1032 * which copies vma into new_vma and unmaps vma. After the copy 1033 * operation both new_vma and vma share a reference to the resv_map 1034 * struct, and at that point vma is about to be unmapped. We don't 1035 * want to return the reservation to the pool at unmap of vma because 1036 * the reservation still lives on in new_vma, so simply decrement the 1037 * ref here and remove the resv_map reference from this vma. 1038 */ 1039 struct resv_map *reservations = vma_resv_map(vma); 1040 1041 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1042 resv_map_put_hugetlb_cgroup_uncharge_info(reservations); 1043 kref_put(&reservations->refs, resv_map_release); 1044 } 1045 1046 reset_vma_resv_huge_pages(vma); 1047 } 1048 1049 /* Returns true if the VMA has associated reserve pages */ 1050 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) 1051 { 1052 if (vma->vm_flags & VM_NORESERVE) { 1053 /* 1054 * This address is already reserved by other process(chg == 0), 1055 * so, we should decrement reserved count. Without decrementing, 1056 * reserve count remains after releasing inode, because this 1057 * allocated page will go into page cache and is regarded as 1058 * coming from reserved pool in releasing step. Currently, we 1059 * don't have any other solution to deal with this situation 1060 * properly, so add work-around here. 1061 */ 1062 if (vma->vm_flags & VM_MAYSHARE && chg == 0) 1063 return true; 1064 else 1065 return false; 1066 } 1067 1068 /* Shared mappings always use reserves */ 1069 if (vma->vm_flags & VM_MAYSHARE) { 1070 /* 1071 * We know VM_NORESERVE is not set. Therefore, there SHOULD 1072 * be a region map for all pages. The only situation where 1073 * there is no region map is if a hole was punched via 1074 * fallocate. In this case, there really are no reserves to 1075 * use. This situation is indicated if chg != 0. 1076 */ 1077 if (chg) 1078 return false; 1079 else 1080 return true; 1081 } 1082 1083 /* 1084 * Only the process that called mmap() has reserves for 1085 * private mappings. 1086 */ 1087 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 1088 /* 1089 * Like the shared case above, a hole punch or truncate 1090 * could have been performed on the private mapping. 1091 * Examine the value of chg to determine if reserves 1092 * actually exist or were previously consumed. 1093 * Very Subtle - The value of chg comes from a previous 1094 * call to vma_needs_reserves(). The reserve map for 1095 * private mappings has different (opposite) semantics 1096 * than that of shared mappings. vma_needs_reserves() 1097 * has already taken this difference in semantics into 1098 * account. Therefore, the meaning of chg is the same 1099 * as in the shared case above. Code could easily be 1100 * combined, but keeping it separate draws attention to 1101 * subtle differences. 1102 */ 1103 if (chg) 1104 return false; 1105 else 1106 return true; 1107 } 1108 1109 return false; 1110 } 1111 1112 static void enqueue_huge_page(struct hstate *h, struct page *page) 1113 { 1114 int nid = page_to_nid(page); 1115 1116 lockdep_assert_held(&hugetlb_lock); 1117 VM_BUG_ON_PAGE(page_count(page), page); 1118 1119 list_move(&page->lru, &h->hugepage_freelists[nid]); 1120 h->free_huge_pages++; 1121 h->free_huge_pages_node[nid]++; 1122 SetHPageFreed(page); 1123 } 1124 1125 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) 1126 { 1127 struct page *page; 1128 bool pin = !!(current->flags & PF_MEMALLOC_PIN); 1129 1130 lockdep_assert_held(&hugetlb_lock); 1131 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { 1132 if (pin && !is_longterm_pinnable_page(page)) 1133 continue; 1134 1135 if (PageHWPoison(page)) 1136 continue; 1137 1138 list_move(&page->lru, &h->hugepage_activelist); 1139 set_page_refcounted(page); 1140 ClearHPageFreed(page); 1141 h->free_huge_pages--; 1142 h->free_huge_pages_node[nid]--; 1143 return page; 1144 } 1145 1146 return NULL; 1147 } 1148 1149 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, 1150 nodemask_t *nmask) 1151 { 1152 unsigned int cpuset_mems_cookie; 1153 struct zonelist *zonelist; 1154 struct zone *zone; 1155 struct zoneref *z; 1156 int node = NUMA_NO_NODE; 1157 1158 zonelist = node_zonelist(nid, gfp_mask); 1159 1160 retry_cpuset: 1161 cpuset_mems_cookie = read_mems_allowed_begin(); 1162 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { 1163 struct page *page; 1164 1165 if (!cpuset_zone_allowed(zone, gfp_mask)) 1166 continue; 1167 /* 1168 * no need to ask again on the same node. Pool is node rather than 1169 * zone aware 1170 */ 1171 if (zone_to_nid(zone) == node) 1172 continue; 1173 node = zone_to_nid(zone); 1174 1175 page = dequeue_huge_page_node_exact(h, node); 1176 if (page) 1177 return page; 1178 } 1179 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie))) 1180 goto retry_cpuset; 1181 1182 return NULL; 1183 } 1184 1185 static struct page *dequeue_huge_page_vma(struct hstate *h, 1186 struct vm_area_struct *vma, 1187 unsigned long address, int avoid_reserve, 1188 long chg) 1189 { 1190 struct page *page = NULL; 1191 struct mempolicy *mpol; 1192 gfp_t gfp_mask; 1193 nodemask_t *nodemask; 1194 int nid; 1195 1196 /* 1197 * A child process with MAP_PRIVATE mappings created by their parent 1198 * have no page reserves. This check ensures that reservations are 1199 * not "stolen". The child may still get SIGKILLed 1200 */ 1201 if (!vma_has_reserves(vma, chg) && 1202 h->free_huge_pages - h->resv_huge_pages == 0) 1203 goto err; 1204 1205 /* If reserves cannot be used, ensure enough pages are in the pool */ 1206 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) 1207 goto err; 1208 1209 gfp_mask = htlb_alloc_mask(h); 1210 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 1211 1212 if (mpol_is_preferred_many(mpol)) { 1213 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1214 1215 /* Fallback to all nodes if page==NULL */ 1216 nodemask = NULL; 1217 } 1218 1219 if (!page) 1220 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); 1221 1222 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { 1223 SetHPageRestoreReserve(page); 1224 h->resv_huge_pages--; 1225 } 1226 1227 mpol_cond_put(mpol); 1228 return page; 1229 1230 err: 1231 return NULL; 1232 } 1233 1234 /* 1235 * common helper functions for hstate_next_node_to_{alloc|free}. 1236 * We may have allocated or freed a huge page based on a different 1237 * nodes_allowed previously, so h->next_node_to_{alloc|free} might 1238 * be outside of *nodes_allowed. Ensure that we use an allowed 1239 * node for alloc or free. 1240 */ 1241 static int next_node_allowed(int nid, nodemask_t *nodes_allowed) 1242 { 1243 nid = next_node_in(nid, *nodes_allowed); 1244 VM_BUG_ON(nid >= MAX_NUMNODES); 1245 1246 return nid; 1247 } 1248 1249 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed) 1250 { 1251 if (!node_isset(nid, *nodes_allowed)) 1252 nid = next_node_allowed(nid, nodes_allowed); 1253 return nid; 1254 } 1255 1256 /* 1257 * returns the previously saved node ["this node"] from which to 1258 * allocate a persistent huge page for the pool and advance the 1259 * next node from which to allocate, handling wrap at end of node 1260 * mask. 1261 */ 1262 static int hstate_next_node_to_alloc(struct hstate *h, 1263 nodemask_t *nodes_allowed) 1264 { 1265 int nid; 1266 1267 VM_BUG_ON(!nodes_allowed); 1268 1269 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); 1270 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); 1271 1272 return nid; 1273 } 1274 1275 /* 1276 * helper for remove_pool_huge_page() - return the previously saved 1277 * node ["this node"] from which to free a huge page. Advance the 1278 * next node id whether or not we find a free huge page to free so 1279 * that the next attempt to free addresses the next node. 1280 */ 1281 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) 1282 { 1283 int nid; 1284 1285 VM_BUG_ON(!nodes_allowed); 1286 1287 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); 1288 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); 1289 1290 return nid; 1291 } 1292 1293 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \ 1294 for (nr_nodes = nodes_weight(*mask); \ 1295 nr_nodes > 0 && \ 1296 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \ 1297 nr_nodes--) 1298 1299 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \ 1300 for (nr_nodes = nodes_weight(*mask); \ 1301 nr_nodes > 0 && \ 1302 ((node = hstate_next_node_to_free(hs, mask)) || 1); \ 1303 nr_nodes--) 1304 1305 /* used to demote non-gigantic_huge pages as well */ 1306 static void __destroy_compound_gigantic_page(struct page *page, 1307 unsigned int order, bool demote) 1308 { 1309 int i; 1310 int nr_pages = 1 << order; 1311 struct page *p = page + 1; 1312 1313 atomic_set(compound_mapcount_ptr(page), 0); 1314 atomic_set(compound_pincount_ptr(page), 0); 1315 1316 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1317 p->mapping = NULL; 1318 clear_compound_head(p); 1319 if (!demote) 1320 set_page_refcounted(p); 1321 } 1322 1323 set_compound_order(page, 0); 1324 #ifdef CONFIG_64BIT 1325 page[1].compound_nr = 0; 1326 #endif 1327 __ClearPageHead(page); 1328 } 1329 1330 static void destroy_compound_hugetlb_page_for_demote(struct page *page, 1331 unsigned int order) 1332 { 1333 __destroy_compound_gigantic_page(page, order, true); 1334 } 1335 1336 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE 1337 static void destroy_compound_gigantic_page(struct page *page, 1338 unsigned int order) 1339 { 1340 __destroy_compound_gigantic_page(page, order, false); 1341 } 1342 1343 static void free_gigantic_page(struct page *page, unsigned int order) 1344 { 1345 /* 1346 * If the page isn't allocated using the cma allocator, 1347 * cma_release() returns false. 1348 */ 1349 #ifdef CONFIG_CMA 1350 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) 1351 return; 1352 #endif 1353 1354 free_contig_range(page_to_pfn(page), 1 << order); 1355 } 1356 1357 #ifdef CONFIG_CONTIG_ALLOC 1358 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1359 int nid, nodemask_t *nodemask) 1360 { 1361 unsigned long nr_pages = pages_per_huge_page(h); 1362 if (nid == NUMA_NO_NODE) 1363 nid = numa_mem_id(); 1364 1365 #ifdef CONFIG_CMA 1366 { 1367 struct page *page; 1368 int node; 1369 1370 if (hugetlb_cma[nid]) { 1371 page = cma_alloc(hugetlb_cma[nid], nr_pages, 1372 huge_page_order(h), true); 1373 if (page) 1374 return page; 1375 } 1376 1377 if (!(gfp_mask & __GFP_THISNODE)) { 1378 for_each_node_mask(node, *nodemask) { 1379 if (node == nid || !hugetlb_cma[node]) 1380 continue; 1381 1382 page = cma_alloc(hugetlb_cma[node], nr_pages, 1383 huge_page_order(h), true); 1384 if (page) 1385 return page; 1386 } 1387 } 1388 } 1389 #endif 1390 1391 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask); 1392 } 1393 1394 #else /* !CONFIG_CONTIG_ALLOC */ 1395 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1396 int nid, nodemask_t *nodemask) 1397 { 1398 return NULL; 1399 } 1400 #endif /* CONFIG_CONTIG_ALLOC */ 1401 1402 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */ 1403 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, 1404 int nid, nodemask_t *nodemask) 1405 { 1406 return NULL; 1407 } 1408 static inline void free_gigantic_page(struct page *page, unsigned int order) { } 1409 static inline void destroy_compound_gigantic_page(struct page *page, 1410 unsigned int order) { } 1411 #endif 1412 1413 /* 1414 * Remove hugetlb page from lists, and update dtor so that page appears 1415 * as just a compound page. 1416 * 1417 * A reference is held on the page, except in the case of demote. 1418 * 1419 * Must be called with hugetlb lock held. 1420 */ 1421 static void __remove_hugetlb_page(struct hstate *h, struct page *page, 1422 bool adjust_surplus, 1423 bool demote) 1424 { 1425 int nid = page_to_nid(page); 1426 1427 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page); 1428 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page); 1429 1430 lockdep_assert_held(&hugetlb_lock); 1431 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1432 return; 1433 1434 list_del(&page->lru); 1435 1436 if (HPageFreed(page)) { 1437 h->free_huge_pages--; 1438 h->free_huge_pages_node[nid]--; 1439 } 1440 if (adjust_surplus) { 1441 h->surplus_huge_pages--; 1442 h->surplus_huge_pages_node[nid]--; 1443 } 1444 1445 /* 1446 * Very subtle 1447 * 1448 * For non-gigantic pages set the destructor to the normal compound 1449 * page dtor. This is needed in case someone takes an additional 1450 * temporary ref to the page, and freeing is delayed until they drop 1451 * their reference. 1452 * 1453 * For gigantic pages set the destructor to the null dtor. This 1454 * destructor will never be called. Before freeing the gigantic 1455 * page destroy_compound_gigantic_page will turn the compound page 1456 * into a simple group of pages. After this the destructor does not 1457 * apply. 1458 * 1459 * This handles the case where more than one ref is held when and 1460 * after update_and_free_page is called. 1461 * 1462 * In the case of demote we do not ref count the page as it will soon 1463 * be turned into a page of smaller size. 1464 */ 1465 if (!demote) 1466 set_page_refcounted(page); 1467 if (hstate_is_gigantic(h)) 1468 set_compound_page_dtor(page, NULL_COMPOUND_DTOR); 1469 else 1470 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 1471 1472 h->nr_huge_pages--; 1473 h->nr_huge_pages_node[nid]--; 1474 } 1475 1476 static void remove_hugetlb_page(struct hstate *h, struct page *page, 1477 bool adjust_surplus) 1478 { 1479 __remove_hugetlb_page(h, page, adjust_surplus, false); 1480 } 1481 1482 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page, 1483 bool adjust_surplus) 1484 { 1485 __remove_hugetlb_page(h, page, adjust_surplus, true); 1486 } 1487 1488 static void add_hugetlb_page(struct hstate *h, struct page *page, 1489 bool adjust_surplus) 1490 { 1491 int zeroed; 1492 int nid = page_to_nid(page); 1493 1494 VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page); 1495 1496 lockdep_assert_held(&hugetlb_lock); 1497 1498 INIT_LIST_HEAD(&page->lru); 1499 h->nr_huge_pages++; 1500 h->nr_huge_pages_node[nid]++; 1501 1502 if (adjust_surplus) { 1503 h->surplus_huge_pages++; 1504 h->surplus_huge_pages_node[nid]++; 1505 } 1506 1507 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1508 set_page_private(page, 0); 1509 SetHPageVmemmapOptimized(page); 1510 1511 /* 1512 * This page is about to be managed by the hugetlb allocator and 1513 * should have no users. Drop our reference, and check for others 1514 * just in case. 1515 */ 1516 zeroed = put_page_testzero(page); 1517 if (!zeroed) 1518 /* 1519 * It is VERY unlikely soneone else has taken a ref on 1520 * the page. In this case, we simply return as the 1521 * hugetlb destructor (free_huge_page) will be called 1522 * when this other ref is dropped. 1523 */ 1524 return; 1525 1526 arch_clear_hugepage_flags(page); 1527 enqueue_huge_page(h, page); 1528 } 1529 1530 static void __update_and_free_page(struct hstate *h, struct page *page) 1531 { 1532 int i; 1533 struct page *subpage = page; 1534 1535 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 1536 return; 1537 1538 if (hugetlb_vmemmap_alloc(h, page)) { 1539 spin_lock_irq(&hugetlb_lock); 1540 /* 1541 * If we cannot allocate vmemmap pages, just refuse to free the 1542 * page and put the page back on the hugetlb free list and treat 1543 * as a surplus page. 1544 */ 1545 add_hugetlb_page(h, page, true); 1546 spin_unlock_irq(&hugetlb_lock); 1547 return; 1548 } 1549 1550 for (i = 0; i < pages_per_huge_page(h); 1551 i++, subpage = mem_map_next(subpage, page, i)) { 1552 subpage->flags &= ~(1 << PG_locked | 1 << PG_error | 1553 1 << PG_referenced | 1 << PG_dirty | 1554 1 << PG_active | 1 << PG_private | 1555 1 << PG_writeback); 1556 } 1557 1558 /* 1559 * Non-gigantic pages demoted from CMA allocated gigantic pages 1560 * need to be given back to CMA in free_gigantic_page. 1561 */ 1562 if (hstate_is_gigantic(h) || 1563 hugetlb_cma_page(page, huge_page_order(h))) { 1564 destroy_compound_gigantic_page(page, huge_page_order(h)); 1565 free_gigantic_page(page, huge_page_order(h)); 1566 } else { 1567 __free_pages(page, huge_page_order(h)); 1568 } 1569 } 1570 1571 /* 1572 * As update_and_free_page() can be called under any context, so we cannot 1573 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the 1574 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate 1575 * the vmemmap pages. 1576 * 1577 * free_hpage_workfn() locklessly retrieves the linked list of pages to be 1578 * freed and frees them one-by-one. As the page->mapping pointer is going 1579 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node 1580 * structure of a lockless linked list of huge pages to be freed. 1581 */ 1582 static LLIST_HEAD(hpage_freelist); 1583 1584 static void free_hpage_workfn(struct work_struct *work) 1585 { 1586 struct llist_node *node; 1587 1588 node = llist_del_all(&hpage_freelist); 1589 1590 while (node) { 1591 struct page *page; 1592 struct hstate *h; 1593 1594 page = container_of((struct address_space **)node, 1595 struct page, mapping); 1596 node = node->next; 1597 page->mapping = NULL; 1598 /* 1599 * The VM_BUG_ON_PAGE(!PageHuge(page), page) in page_hstate() 1600 * is going to trigger because a previous call to 1601 * remove_hugetlb_page() will set_compound_page_dtor(page, 1602 * NULL_COMPOUND_DTOR), so do not use page_hstate() directly. 1603 */ 1604 h = size_to_hstate(page_size(page)); 1605 1606 __update_and_free_page(h, page); 1607 1608 cond_resched(); 1609 } 1610 } 1611 static DECLARE_WORK(free_hpage_work, free_hpage_workfn); 1612 1613 static inline void flush_free_hpage_work(struct hstate *h) 1614 { 1615 if (hugetlb_optimize_vmemmap_pages(h)) 1616 flush_work(&free_hpage_work); 1617 } 1618 1619 static void update_and_free_page(struct hstate *h, struct page *page, 1620 bool atomic) 1621 { 1622 if (!HPageVmemmapOptimized(page) || !atomic) { 1623 __update_and_free_page(h, page); 1624 return; 1625 } 1626 1627 /* 1628 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages. 1629 * 1630 * Only call schedule_work() if hpage_freelist is previously 1631 * empty. Otherwise, schedule_work() had been called but the workfn 1632 * hasn't retrieved the list yet. 1633 */ 1634 if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist)) 1635 schedule_work(&free_hpage_work); 1636 } 1637 1638 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) 1639 { 1640 struct page *page, *t_page; 1641 1642 list_for_each_entry_safe(page, t_page, list, lru) { 1643 update_and_free_page(h, page, false); 1644 cond_resched(); 1645 } 1646 } 1647 1648 struct hstate *size_to_hstate(unsigned long size) 1649 { 1650 struct hstate *h; 1651 1652 for_each_hstate(h) { 1653 if (huge_page_size(h) == size) 1654 return h; 1655 } 1656 return NULL; 1657 } 1658 1659 void free_huge_page(struct page *page) 1660 { 1661 /* 1662 * Can't pass hstate in here because it is called from the 1663 * compound page destructor. 1664 */ 1665 struct hstate *h = page_hstate(page); 1666 int nid = page_to_nid(page); 1667 struct hugepage_subpool *spool = hugetlb_page_subpool(page); 1668 bool restore_reserve; 1669 unsigned long flags; 1670 1671 VM_BUG_ON_PAGE(page_count(page), page); 1672 VM_BUG_ON_PAGE(page_mapcount(page), page); 1673 1674 hugetlb_set_page_subpool(page, NULL); 1675 if (PageAnon(page)) 1676 __ClearPageAnonExclusive(page); 1677 page->mapping = NULL; 1678 restore_reserve = HPageRestoreReserve(page); 1679 ClearHPageRestoreReserve(page); 1680 1681 /* 1682 * If HPageRestoreReserve was set on page, page allocation consumed a 1683 * reservation. If the page was associated with a subpool, there 1684 * would have been a page reserved in the subpool before allocation 1685 * via hugepage_subpool_get_pages(). Since we are 'restoring' the 1686 * reservation, do not call hugepage_subpool_put_pages() as this will 1687 * remove the reserved page from the subpool. 1688 */ 1689 if (!restore_reserve) { 1690 /* 1691 * A return code of zero implies that the subpool will be 1692 * under its minimum size if the reservation is not restored 1693 * after page is free. Therefore, force restore_reserve 1694 * operation. 1695 */ 1696 if (hugepage_subpool_put_pages(spool, 1) == 0) 1697 restore_reserve = true; 1698 } 1699 1700 spin_lock_irqsave(&hugetlb_lock, flags); 1701 ClearHPageMigratable(page); 1702 hugetlb_cgroup_uncharge_page(hstate_index(h), 1703 pages_per_huge_page(h), page); 1704 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 1705 pages_per_huge_page(h), page); 1706 if (restore_reserve) 1707 h->resv_huge_pages++; 1708 1709 if (HPageTemporary(page)) { 1710 remove_hugetlb_page(h, page, false); 1711 spin_unlock_irqrestore(&hugetlb_lock, flags); 1712 update_and_free_page(h, page, true); 1713 } else if (h->surplus_huge_pages_node[nid]) { 1714 /* remove the page from active list */ 1715 remove_hugetlb_page(h, page, true); 1716 spin_unlock_irqrestore(&hugetlb_lock, flags); 1717 update_and_free_page(h, page, true); 1718 } else { 1719 arch_clear_hugepage_flags(page); 1720 enqueue_huge_page(h, page); 1721 spin_unlock_irqrestore(&hugetlb_lock, flags); 1722 } 1723 } 1724 1725 /* 1726 * Must be called with the hugetlb lock held 1727 */ 1728 static void __prep_account_new_huge_page(struct hstate *h, int nid) 1729 { 1730 lockdep_assert_held(&hugetlb_lock); 1731 h->nr_huge_pages++; 1732 h->nr_huge_pages_node[nid]++; 1733 } 1734 1735 static void __prep_new_huge_page(struct hstate *h, struct page *page) 1736 { 1737 hugetlb_vmemmap_free(h, page); 1738 INIT_LIST_HEAD(&page->lru); 1739 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR); 1740 hugetlb_set_page_subpool(page, NULL); 1741 set_hugetlb_cgroup(page, NULL); 1742 set_hugetlb_cgroup_rsvd(page, NULL); 1743 } 1744 1745 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) 1746 { 1747 __prep_new_huge_page(h, page); 1748 spin_lock_irq(&hugetlb_lock); 1749 __prep_account_new_huge_page(h, nid); 1750 spin_unlock_irq(&hugetlb_lock); 1751 } 1752 1753 static bool __prep_compound_gigantic_page(struct page *page, unsigned int order, 1754 bool demote) 1755 { 1756 int i, j; 1757 int nr_pages = 1 << order; 1758 struct page *p = page + 1; 1759 1760 /* we rely on prep_new_huge_page to set the destructor */ 1761 set_compound_order(page, order); 1762 __ClearPageReserved(page); 1763 __SetPageHead(page); 1764 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { 1765 /* 1766 * For gigantic hugepages allocated through bootmem at 1767 * boot, it's safer to be consistent with the not-gigantic 1768 * hugepages and clear the PG_reserved bit from all tail pages 1769 * too. Otherwise drivers using get_user_pages() to access tail 1770 * pages may get the reference counting wrong if they see 1771 * PG_reserved set on a tail page (despite the head page not 1772 * having PG_reserved set). Enforcing this consistency between 1773 * head and tail pages allows drivers to optimize away a check 1774 * on the head page when they need know if put_page() is needed 1775 * after get_user_pages(). 1776 */ 1777 __ClearPageReserved(p); 1778 /* 1779 * Subtle and very unlikely 1780 * 1781 * Gigantic 'page allocators' such as memblock or cma will 1782 * return a set of pages with each page ref counted. We need 1783 * to turn this set of pages into a compound page with tail 1784 * page ref counts set to zero. Code such as speculative page 1785 * cache adding could take a ref on a 'to be' tail page. 1786 * We need to respect any increased ref count, and only set 1787 * the ref count to zero if count is currently 1. If count 1788 * is not 1, we return an error. An error return indicates 1789 * the set of pages can not be converted to a gigantic page. 1790 * The caller who allocated the pages should then discard the 1791 * pages using the appropriate free interface. 1792 * 1793 * In the case of demote, the ref count will be zero. 1794 */ 1795 if (!demote) { 1796 if (!page_ref_freeze(p, 1)) { 1797 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n"); 1798 goto out_error; 1799 } 1800 } else { 1801 VM_BUG_ON_PAGE(page_count(p), p); 1802 } 1803 set_compound_head(p, page); 1804 } 1805 atomic_set(compound_mapcount_ptr(page), -1); 1806 atomic_set(compound_pincount_ptr(page), 0); 1807 return true; 1808 1809 out_error: 1810 /* undo tail page modifications made above */ 1811 p = page + 1; 1812 for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) { 1813 clear_compound_head(p); 1814 set_page_refcounted(p); 1815 } 1816 /* need to clear PG_reserved on remaining tail pages */ 1817 for (; j < nr_pages; j++, p = mem_map_next(p, page, j)) 1818 __ClearPageReserved(p); 1819 set_compound_order(page, 0); 1820 #ifdef CONFIG_64BIT 1821 page[1].compound_nr = 0; 1822 #endif 1823 __ClearPageHead(page); 1824 return false; 1825 } 1826 1827 static bool prep_compound_gigantic_page(struct page *page, unsigned int order) 1828 { 1829 return __prep_compound_gigantic_page(page, order, false); 1830 } 1831 1832 static bool prep_compound_gigantic_page_for_demote(struct page *page, 1833 unsigned int order) 1834 { 1835 return __prep_compound_gigantic_page(page, order, true); 1836 } 1837 1838 /* 1839 * PageHuge() only returns true for hugetlbfs pages, but not for normal or 1840 * transparent huge pages. See the PageTransHuge() documentation for more 1841 * details. 1842 */ 1843 int PageHuge(struct page *page) 1844 { 1845 if (!PageCompound(page)) 1846 return 0; 1847 1848 page = compound_head(page); 1849 return page[1].compound_dtor == HUGETLB_PAGE_DTOR; 1850 } 1851 EXPORT_SYMBOL_GPL(PageHuge); 1852 1853 /* 1854 * PageHeadHuge() only returns true for hugetlbfs head page, but not for 1855 * normal or transparent huge pages. 1856 */ 1857 int PageHeadHuge(struct page *page_head) 1858 { 1859 if (!PageHead(page_head)) 1860 return 0; 1861 1862 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR; 1863 } 1864 EXPORT_SYMBOL_GPL(PageHeadHuge); 1865 1866 /* 1867 * Find and lock address space (mapping) in write mode. 1868 * 1869 * Upon entry, the page is locked which means that page_mapping() is 1870 * stable. Due to locking order, we can only trylock_write. If we can 1871 * not get the lock, simply return NULL to caller. 1872 */ 1873 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) 1874 { 1875 struct address_space *mapping = page_mapping(hpage); 1876 1877 if (!mapping) 1878 return mapping; 1879 1880 if (i_mmap_trylock_write(mapping)) 1881 return mapping; 1882 1883 return NULL; 1884 } 1885 1886 pgoff_t hugetlb_basepage_index(struct page *page) 1887 { 1888 struct page *page_head = compound_head(page); 1889 pgoff_t index = page_index(page_head); 1890 unsigned long compound_idx; 1891 1892 if (compound_order(page_head) >= MAX_ORDER) 1893 compound_idx = page_to_pfn(page) - page_to_pfn(page_head); 1894 else 1895 compound_idx = page - page_head; 1896 1897 return (index << compound_order(page_head)) + compound_idx; 1898 } 1899 1900 static struct page *alloc_buddy_huge_page(struct hstate *h, 1901 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1902 nodemask_t *node_alloc_noretry) 1903 { 1904 int order = huge_page_order(h); 1905 struct page *page; 1906 bool alloc_try_hard = true; 1907 1908 /* 1909 * By default we always try hard to allocate the page with 1910 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in 1911 * a loop (to adjust global huge page counts) and previous allocation 1912 * failed, do not continue to try hard on the same node. Use the 1913 * node_alloc_noretry bitmap to manage this state information. 1914 */ 1915 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry)) 1916 alloc_try_hard = false; 1917 gfp_mask |= __GFP_COMP|__GFP_NOWARN; 1918 if (alloc_try_hard) 1919 gfp_mask |= __GFP_RETRY_MAYFAIL; 1920 if (nid == NUMA_NO_NODE) 1921 nid = numa_mem_id(); 1922 page = __alloc_pages(gfp_mask, order, nid, nmask); 1923 if (page) 1924 __count_vm_event(HTLB_BUDDY_PGALLOC); 1925 else 1926 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL); 1927 1928 /* 1929 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this 1930 * indicates an overall state change. Clear bit so that we resume 1931 * normal 'try hard' allocations. 1932 */ 1933 if (node_alloc_noretry && page && !alloc_try_hard) 1934 node_clear(nid, *node_alloc_noretry); 1935 1936 /* 1937 * If we tried hard to get a page but failed, set bit so that 1938 * subsequent attempts will not try as hard until there is an 1939 * overall state change. 1940 */ 1941 if (node_alloc_noretry && !page && alloc_try_hard) 1942 node_set(nid, *node_alloc_noretry); 1943 1944 return page; 1945 } 1946 1947 /* 1948 * Common helper to allocate a fresh hugetlb page. All specific allocators 1949 * should use this function to get new hugetlb pages 1950 */ 1951 static struct page *alloc_fresh_huge_page(struct hstate *h, 1952 gfp_t gfp_mask, int nid, nodemask_t *nmask, 1953 nodemask_t *node_alloc_noretry) 1954 { 1955 struct page *page; 1956 bool retry = false; 1957 1958 retry: 1959 if (hstate_is_gigantic(h)) 1960 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); 1961 else 1962 page = alloc_buddy_huge_page(h, gfp_mask, 1963 nid, nmask, node_alloc_noretry); 1964 if (!page) 1965 return NULL; 1966 1967 if (hstate_is_gigantic(h)) { 1968 if (!prep_compound_gigantic_page(page, huge_page_order(h))) { 1969 /* 1970 * Rare failure to convert pages to compound page. 1971 * Free pages and try again - ONCE! 1972 */ 1973 free_gigantic_page(page, huge_page_order(h)); 1974 if (!retry) { 1975 retry = true; 1976 goto retry; 1977 } 1978 return NULL; 1979 } 1980 } 1981 prep_new_huge_page(h, page, page_to_nid(page)); 1982 1983 return page; 1984 } 1985 1986 /* 1987 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved 1988 * manner. 1989 */ 1990 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, 1991 nodemask_t *node_alloc_noretry) 1992 { 1993 struct page *page; 1994 int nr_nodes, node; 1995 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 1996 1997 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 1998 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, 1999 node_alloc_noretry); 2000 if (page) 2001 break; 2002 } 2003 2004 if (!page) 2005 return 0; 2006 2007 put_page(page); /* free it into the hugepage allocator */ 2008 2009 return 1; 2010 } 2011 2012 /* 2013 * Remove huge page from pool from next node to free. Attempt to keep 2014 * persistent huge pages more or less balanced over allowed nodes. 2015 * This routine only 'removes' the hugetlb page. The caller must make 2016 * an additional call to free the page to low level allocators. 2017 * Called with hugetlb_lock locked. 2018 */ 2019 static struct page *remove_pool_huge_page(struct hstate *h, 2020 nodemask_t *nodes_allowed, 2021 bool acct_surplus) 2022 { 2023 int nr_nodes, node; 2024 struct page *page = NULL; 2025 2026 lockdep_assert_held(&hugetlb_lock); 2027 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 2028 /* 2029 * If we're returning unused surplus pages, only examine 2030 * nodes with surplus pages. 2031 */ 2032 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && 2033 !list_empty(&h->hugepage_freelists[node])) { 2034 page = list_entry(h->hugepage_freelists[node].next, 2035 struct page, lru); 2036 remove_hugetlb_page(h, page, acct_surplus); 2037 break; 2038 } 2039 } 2040 2041 return page; 2042 } 2043 2044 /* 2045 * Dissolve a given free hugepage into free buddy pages. This function does 2046 * nothing for in-use hugepages and non-hugepages. 2047 * This function returns values like below: 2048 * 2049 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages 2050 * when the system is under memory pressure and the feature of 2051 * freeing unused vmemmap pages associated with each hugetlb page 2052 * is enabled. 2053 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use 2054 * (allocated or reserved.) 2055 * 0: successfully dissolved free hugepages or the page is not a 2056 * hugepage (considered as already dissolved) 2057 */ 2058 int dissolve_free_huge_page(struct page *page) 2059 { 2060 int rc = -EBUSY; 2061 2062 retry: 2063 /* Not to disrupt normal path by vainly holding hugetlb_lock */ 2064 if (!PageHuge(page)) 2065 return 0; 2066 2067 spin_lock_irq(&hugetlb_lock); 2068 if (!PageHuge(page)) { 2069 rc = 0; 2070 goto out; 2071 } 2072 2073 if (!page_count(page)) { 2074 struct page *head = compound_head(page); 2075 struct hstate *h = page_hstate(head); 2076 if (h->free_huge_pages - h->resv_huge_pages == 0) 2077 goto out; 2078 2079 /* 2080 * We should make sure that the page is already on the free list 2081 * when it is dissolved. 2082 */ 2083 if (unlikely(!HPageFreed(head))) { 2084 spin_unlock_irq(&hugetlb_lock); 2085 cond_resched(); 2086 2087 /* 2088 * Theoretically, we should return -EBUSY when we 2089 * encounter this race. In fact, we have a chance 2090 * to successfully dissolve the page if we do a 2091 * retry. Because the race window is quite small. 2092 * If we seize this opportunity, it is an optimization 2093 * for increasing the success rate of dissolving page. 2094 */ 2095 goto retry; 2096 } 2097 2098 remove_hugetlb_page(h, head, false); 2099 h->max_huge_pages--; 2100 spin_unlock_irq(&hugetlb_lock); 2101 2102 /* 2103 * Normally update_and_free_page will allocate required vmemmmap 2104 * before freeing the page. update_and_free_page will fail to 2105 * free the page if it can not allocate required vmemmap. We 2106 * need to adjust max_huge_pages if the page is not freed. 2107 * Attempt to allocate vmemmmap here so that we can take 2108 * appropriate action on failure. 2109 */ 2110 rc = hugetlb_vmemmap_alloc(h, head); 2111 if (!rc) { 2112 /* 2113 * Move PageHWPoison flag from head page to the raw 2114 * error page, which makes any subpages rather than 2115 * the error page reusable. 2116 */ 2117 if (PageHWPoison(head) && page != head) { 2118 SetPageHWPoison(page); 2119 ClearPageHWPoison(head); 2120 } 2121 update_and_free_page(h, head, false); 2122 } else { 2123 spin_lock_irq(&hugetlb_lock); 2124 add_hugetlb_page(h, head, false); 2125 h->max_huge_pages++; 2126 spin_unlock_irq(&hugetlb_lock); 2127 } 2128 2129 return rc; 2130 } 2131 out: 2132 spin_unlock_irq(&hugetlb_lock); 2133 return rc; 2134 } 2135 2136 /* 2137 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to 2138 * make specified memory blocks removable from the system. 2139 * Note that this will dissolve a free gigantic hugepage completely, if any 2140 * part of it lies within the given range. 2141 * Also note that if dissolve_free_huge_page() returns with an error, all 2142 * free hugepages that were dissolved before that error are lost. 2143 */ 2144 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) 2145 { 2146 unsigned long pfn; 2147 struct page *page; 2148 int rc = 0; 2149 unsigned int order; 2150 struct hstate *h; 2151 2152 if (!hugepages_supported()) 2153 return rc; 2154 2155 order = huge_page_order(&default_hstate); 2156 for_each_hstate(h) 2157 order = min(order, huge_page_order(h)); 2158 2159 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) { 2160 page = pfn_to_page(pfn); 2161 rc = dissolve_free_huge_page(page); 2162 if (rc) 2163 break; 2164 } 2165 2166 return rc; 2167 } 2168 2169 /* 2170 * Allocates a fresh surplus page from the page allocator. 2171 */ 2172 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, 2173 int nid, nodemask_t *nmask, bool zero_ref) 2174 { 2175 struct page *page = NULL; 2176 bool retry = false; 2177 2178 if (hstate_is_gigantic(h)) 2179 return NULL; 2180 2181 spin_lock_irq(&hugetlb_lock); 2182 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) 2183 goto out_unlock; 2184 spin_unlock_irq(&hugetlb_lock); 2185 2186 retry: 2187 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 2188 if (!page) 2189 return NULL; 2190 2191 spin_lock_irq(&hugetlb_lock); 2192 /* 2193 * We could have raced with the pool size change. 2194 * Double check that and simply deallocate the new page 2195 * if we would end up overcommiting the surpluses. Abuse 2196 * temporary page to workaround the nasty free_huge_page 2197 * codeflow 2198 */ 2199 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { 2200 SetHPageTemporary(page); 2201 spin_unlock_irq(&hugetlb_lock); 2202 put_page(page); 2203 return NULL; 2204 } 2205 2206 if (zero_ref) { 2207 /* 2208 * Caller requires a page with zero ref count. 2209 * We will drop ref count here. If someone else is holding 2210 * a ref, the page will be freed when they drop it. Abuse 2211 * temporary page flag to accomplish this. 2212 */ 2213 SetHPageTemporary(page); 2214 if (!put_page_testzero(page)) { 2215 /* 2216 * Unexpected inflated ref count on freshly allocated 2217 * huge. Retry once. 2218 */ 2219 pr_info("HugeTLB unexpected inflated ref count on freshly allocated page\n"); 2220 spin_unlock_irq(&hugetlb_lock); 2221 if (retry) 2222 return NULL; 2223 2224 retry = true; 2225 goto retry; 2226 } 2227 ClearHPageTemporary(page); 2228 } 2229 2230 h->surplus_huge_pages++; 2231 h->surplus_huge_pages_node[page_to_nid(page)]++; 2232 2233 out_unlock: 2234 spin_unlock_irq(&hugetlb_lock); 2235 2236 return page; 2237 } 2238 2239 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, 2240 int nid, nodemask_t *nmask) 2241 { 2242 struct page *page; 2243 2244 if (hstate_is_gigantic(h)) 2245 return NULL; 2246 2247 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); 2248 if (!page) 2249 return NULL; 2250 2251 /* 2252 * We do not account these pages as surplus because they are only 2253 * temporary and will be released properly on the last reference 2254 */ 2255 SetHPageTemporary(page); 2256 2257 return page; 2258 } 2259 2260 /* 2261 * Use the VMA's mpolicy to allocate a huge page from the buddy. 2262 */ 2263 static 2264 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, 2265 struct vm_area_struct *vma, unsigned long addr) 2266 { 2267 struct page *page = NULL; 2268 struct mempolicy *mpol; 2269 gfp_t gfp_mask = htlb_alloc_mask(h); 2270 int nid; 2271 nodemask_t *nodemask; 2272 2273 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); 2274 if (mpol_is_preferred_many(mpol)) { 2275 gfp_t gfp = gfp_mask | __GFP_NOWARN; 2276 2277 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); 2278 page = alloc_surplus_huge_page(h, gfp, nid, nodemask, false); 2279 2280 /* Fallback to all nodes if page==NULL */ 2281 nodemask = NULL; 2282 } 2283 2284 if (!page) 2285 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask, false); 2286 mpol_cond_put(mpol); 2287 return page; 2288 } 2289 2290 /* page migration callback function */ 2291 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, 2292 nodemask_t *nmask, gfp_t gfp_mask) 2293 { 2294 spin_lock_irq(&hugetlb_lock); 2295 if (h->free_huge_pages - h->resv_huge_pages > 0) { 2296 struct page *page; 2297 2298 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); 2299 if (page) { 2300 spin_unlock_irq(&hugetlb_lock); 2301 return page; 2302 } 2303 } 2304 spin_unlock_irq(&hugetlb_lock); 2305 2306 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); 2307 } 2308 2309 /* mempolicy aware migration callback */ 2310 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, 2311 unsigned long address) 2312 { 2313 struct mempolicy *mpol; 2314 nodemask_t *nodemask; 2315 struct page *page; 2316 gfp_t gfp_mask; 2317 int node; 2318 2319 gfp_mask = htlb_alloc_mask(h); 2320 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); 2321 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); 2322 mpol_cond_put(mpol); 2323 2324 return page; 2325 } 2326 2327 /* 2328 * Increase the hugetlb pool such that it can accommodate a reservation 2329 * of size 'delta'. 2330 */ 2331 static int gather_surplus_pages(struct hstate *h, long delta) 2332 __must_hold(&hugetlb_lock) 2333 { 2334 struct list_head surplus_list; 2335 struct page *page, *tmp; 2336 int ret; 2337 long i; 2338 long needed, allocated; 2339 bool alloc_ok = true; 2340 2341 lockdep_assert_held(&hugetlb_lock); 2342 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; 2343 if (needed <= 0) { 2344 h->resv_huge_pages += delta; 2345 return 0; 2346 } 2347 2348 allocated = 0; 2349 INIT_LIST_HEAD(&surplus_list); 2350 2351 ret = -ENOMEM; 2352 retry: 2353 spin_unlock_irq(&hugetlb_lock); 2354 for (i = 0; i < needed; i++) { 2355 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), 2356 NUMA_NO_NODE, NULL, true); 2357 if (!page) { 2358 alloc_ok = false; 2359 break; 2360 } 2361 list_add(&page->lru, &surplus_list); 2362 cond_resched(); 2363 } 2364 allocated += i; 2365 2366 /* 2367 * After retaking hugetlb_lock, we need to recalculate 'needed' 2368 * because either resv_huge_pages or free_huge_pages may have changed. 2369 */ 2370 spin_lock_irq(&hugetlb_lock); 2371 needed = (h->resv_huge_pages + delta) - 2372 (h->free_huge_pages + allocated); 2373 if (needed > 0) { 2374 if (alloc_ok) 2375 goto retry; 2376 /* 2377 * We were not able to allocate enough pages to 2378 * satisfy the entire reservation so we free what 2379 * we've allocated so far. 2380 */ 2381 goto free; 2382 } 2383 /* 2384 * The surplus_list now contains _at_least_ the number of extra pages 2385 * needed to accommodate the reservation. Add the appropriate number 2386 * of pages to the hugetlb pool and free the extras back to the buddy 2387 * allocator. Commit the entire reservation here to prevent another 2388 * process from stealing the pages as they are added to the pool but 2389 * before they are reserved. 2390 */ 2391 needed += allocated; 2392 h->resv_huge_pages += delta; 2393 ret = 0; 2394 2395 /* Free the needed pages to the hugetlb pool */ 2396 list_for_each_entry_safe(page, tmp, &surplus_list, lru) { 2397 if ((--needed) < 0) 2398 break; 2399 /* Add the page to the hugetlb allocator */ 2400 enqueue_huge_page(h, page); 2401 } 2402 free: 2403 spin_unlock_irq(&hugetlb_lock); 2404 2405 /* 2406 * Free unnecessary surplus pages to the buddy allocator. 2407 * Pages have no ref count, call free_huge_page directly. 2408 */ 2409 list_for_each_entry_safe(page, tmp, &surplus_list, lru) 2410 free_huge_page(page); 2411 spin_lock_irq(&hugetlb_lock); 2412 2413 return ret; 2414 } 2415 2416 /* 2417 * This routine has two main purposes: 2418 * 1) Decrement the reservation count (resv_huge_pages) by the value passed 2419 * in unused_resv_pages. This corresponds to the prior adjustments made 2420 * to the associated reservation map. 2421 * 2) Free any unused surplus pages that may have been allocated to satisfy 2422 * the reservation. As many as unused_resv_pages may be freed. 2423 */ 2424 static void return_unused_surplus_pages(struct hstate *h, 2425 unsigned long unused_resv_pages) 2426 { 2427 unsigned long nr_pages; 2428 struct page *page; 2429 LIST_HEAD(page_list); 2430 2431 lockdep_assert_held(&hugetlb_lock); 2432 /* Uncommit the reservation */ 2433 h->resv_huge_pages -= unused_resv_pages; 2434 2435 /* Cannot return gigantic pages currently */ 2436 if (hstate_is_gigantic(h)) 2437 goto out; 2438 2439 /* 2440 * Part (or even all) of the reservation could have been backed 2441 * by pre-allocated pages. Only free surplus pages. 2442 */ 2443 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); 2444 2445 /* 2446 * We want to release as many surplus pages as possible, spread 2447 * evenly across all nodes with memory. Iterate across these nodes 2448 * until we can no longer free unreserved surplus pages. This occurs 2449 * when the nodes with surplus pages have no free pages. 2450 * remove_pool_huge_page() will balance the freed pages across the 2451 * on-line nodes with memory and will handle the hstate accounting. 2452 */ 2453 while (nr_pages--) { 2454 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); 2455 if (!page) 2456 goto out; 2457 2458 list_add(&page->lru, &page_list); 2459 } 2460 2461 out: 2462 spin_unlock_irq(&hugetlb_lock); 2463 update_and_free_pages_bulk(h, &page_list); 2464 spin_lock_irq(&hugetlb_lock); 2465 } 2466 2467 2468 /* 2469 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation 2470 * are used by the huge page allocation routines to manage reservations. 2471 * 2472 * vma_needs_reservation is called to determine if the huge page at addr 2473 * within the vma has an associated reservation. If a reservation is 2474 * needed, the value 1 is returned. The caller is then responsible for 2475 * managing the global reservation and subpool usage counts. After 2476 * the huge page has been allocated, vma_commit_reservation is called 2477 * to add the page to the reservation map. If the page allocation fails, 2478 * the reservation must be ended instead of committed. vma_end_reservation 2479 * is called in such cases. 2480 * 2481 * In the normal case, vma_commit_reservation returns the same value 2482 * as the preceding vma_needs_reservation call. The only time this 2483 * is not the case is if a reserve map was changed between calls. It 2484 * is the responsibility of the caller to notice the difference and 2485 * take appropriate action. 2486 * 2487 * vma_add_reservation is used in error paths where a reservation must 2488 * be restored when a newly allocated huge page must be freed. It is 2489 * to be called after calling vma_needs_reservation to determine if a 2490 * reservation exists. 2491 * 2492 * vma_del_reservation is used in error paths where an entry in the reserve 2493 * map was created during huge page allocation and must be removed. It is to 2494 * be called after calling vma_needs_reservation to determine if a reservation 2495 * exists. 2496 */ 2497 enum vma_resv_mode { 2498 VMA_NEEDS_RESV, 2499 VMA_COMMIT_RESV, 2500 VMA_END_RESV, 2501 VMA_ADD_RESV, 2502 VMA_DEL_RESV, 2503 }; 2504 static long __vma_reservation_common(struct hstate *h, 2505 struct vm_area_struct *vma, unsigned long addr, 2506 enum vma_resv_mode mode) 2507 { 2508 struct resv_map *resv; 2509 pgoff_t idx; 2510 long ret; 2511 long dummy_out_regions_needed; 2512 2513 resv = vma_resv_map(vma); 2514 if (!resv) 2515 return 1; 2516 2517 idx = vma_hugecache_offset(h, vma, addr); 2518 switch (mode) { 2519 case VMA_NEEDS_RESV: 2520 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed); 2521 /* We assume that vma_reservation_* routines always operate on 2522 * 1 page, and that adding to resv map a 1 page entry can only 2523 * ever require 1 region. 2524 */ 2525 VM_BUG_ON(dummy_out_regions_needed != 1); 2526 break; 2527 case VMA_COMMIT_RESV: 2528 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2529 /* region_add calls of range 1 should never fail. */ 2530 VM_BUG_ON(ret < 0); 2531 break; 2532 case VMA_END_RESV: 2533 region_abort(resv, idx, idx + 1, 1); 2534 ret = 0; 2535 break; 2536 case VMA_ADD_RESV: 2537 if (vma->vm_flags & VM_MAYSHARE) { 2538 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2539 /* region_add calls of range 1 should never fail. */ 2540 VM_BUG_ON(ret < 0); 2541 } else { 2542 region_abort(resv, idx, idx + 1, 1); 2543 ret = region_del(resv, idx, idx + 1); 2544 } 2545 break; 2546 case VMA_DEL_RESV: 2547 if (vma->vm_flags & VM_MAYSHARE) { 2548 region_abort(resv, idx, idx + 1, 1); 2549 ret = region_del(resv, idx, idx + 1); 2550 } else { 2551 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL); 2552 /* region_add calls of range 1 should never fail. */ 2553 VM_BUG_ON(ret < 0); 2554 } 2555 break; 2556 default: 2557 BUG(); 2558 } 2559 2560 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV) 2561 return ret; 2562 /* 2563 * We know private mapping must have HPAGE_RESV_OWNER set. 2564 * 2565 * In most cases, reserves always exist for private mappings. 2566 * However, a file associated with mapping could have been 2567 * hole punched or truncated after reserves were consumed. 2568 * As subsequent fault on such a range will not use reserves. 2569 * Subtle - The reserve map for private mappings has the 2570 * opposite meaning than that of shared mappings. If NO 2571 * entry is in the reserve map, it means a reservation exists. 2572 * If an entry exists in the reserve map, it means the 2573 * reservation has already been consumed. As a result, the 2574 * return value of this routine is the opposite of the 2575 * value returned from reserve map manipulation routines above. 2576 */ 2577 if (ret > 0) 2578 return 0; 2579 if (ret == 0) 2580 return 1; 2581 return ret; 2582 } 2583 2584 static long vma_needs_reservation(struct hstate *h, 2585 struct vm_area_struct *vma, unsigned long addr) 2586 { 2587 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); 2588 } 2589 2590 static long vma_commit_reservation(struct hstate *h, 2591 struct vm_area_struct *vma, unsigned long addr) 2592 { 2593 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); 2594 } 2595 2596 static void vma_end_reservation(struct hstate *h, 2597 struct vm_area_struct *vma, unsigned long addr) 2598 { 2599 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); 2600 } 2601 2602 static long vma_add_reservation(struct hstate *h, 2603 struct vm_area_struct *vma, unsigned long addr) 2604 { 2605 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); 2606 } 2607 2608 static long vma_del_reservation(struct hstate *h, 2609 struct vm_area_struct *vma, unsigned long addr) 2610 { 2611 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); 2612 } 2613 2614 /* 2615 * This routine is called to restore reservation information on error paths. 2616 * It should ONLY be called for pages allocated via alloc_huge_page(), and 2617 * the hugetlb mutex should remain held when calling this routine. 2618 * 2619 * It handles two specific cases: 2620 * 1) A reservation was in place and the page consumed the reservation. 2621 * HPageRestoreReserve is set in the page. 2622 * 2) No reservation was in place for the page, so HPageRestoreReserve is 2623 * not set. However, alloc_huge_page always updates the reserve map. 2624 * 2625 * In case 1, free_huge_page later in the error path will increment the 2626 * global reserve count. But, free_huge_page does not have enough context 2627 * to adjust the reservation map. This case deals primarily with private 2628 * mappings. Adjust the reserve map here to be consistent with global 2629 * reserve count adjustments to be made by free_huge_page. Make sure the 2630 * reserve map indicates there is a reservation present. 2631 * 2632 * In case 2, simply undo reserve map modifications done by alloc_huge_page. 2633 */ 2634 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, 2635 unsigned long address, struct page *page) 2636 { 2637 long rc = vma_needs_reservation(h, vma, address); 2638 2639 if (HPageRestoreReserve(page)) { 2640 if (unlikely(rc < 0)) 2641 /* 2642 * Rare out of memory condition in reserve map 2643 * manipulation. Clear HPageRestoreReserve so that 2644 * global reserve count will not be incremented 2645 * by free_huge_page. This will make it appear 2646 * as though the reservation for this page was 2647 * consumed. This may prevent the task from 2648 * faulting in the page at a later time. This 2649 * is better than inconsistent global huge page 2650 * accounting of reserve counts. 2651 */ 2652 ClearHPageRestoreReserve(page); 2653 else if (rc) 2654 (void)vma_add_reservation(h, vma, address); 2655 else 2656 vma_end_reservation(h, vma, address); 2657 } else { 2658 if (!rc) { 2659 /* 2660 * This indicates there is an entry in the reserve map 2661 * not added by alloc_huge_page. We know it was added 2662 * before the alloc_huge_page call, otherwise 2663 * HPageRestoreReserve would be set on the page. 2664 * Remove the entry so that a subsequent allocation 2665 * does not consume a reservation. 2666 */ 2667 rc = vma_del_reservation(h, vma, address); 2668 if (rc < 0) 2669 /* 2670 * VERY rare out of memory condition. Since 2671 * we can not delete the entry, set 2672 * HPageRestoreReserve so that the reserve 2673 * count will be incremented when the page 2674 * is freed. This reserve will be consumed 2675 * on a subsequent allocation. 2676 */ 2677 SetHPageRestoreReserve(page); 2678 } else if (rc < 0) { 2679 /* 2680 * Rare out of memory condition from 2681 * vma_needs_reservation call. Memory allocation is 2682 * only attempted if a new entry is needed. Therefore, 2683 * this implies there is not an entry in the 2684 * reserve map. 2685 * 2686 * For shared mappings, no entry in the map indicates 2687 * no reservation. We are done. 2688 */ 2689 if (!(vma->vm_flags & VM_MAYSHARE)) 2690 /* 2691 * For private mappings, no entry indicates 2692 * a reservation is present. Since we can 2693 * not add an entry, set SetHPageRestoreReserve 2694 * on the page so reserve count will be 2695 * incremented when freed. This reserve will 2696 * be consumed on a subsequent allocation. 2697 */ 2698 SetHPageRestoreReserve(page); 2699 } else 2700 /* 2701 * No reservation present, do nothing 2702 */ 2703 vma_end_reservation(h, vma, address); 2704 } 2705 } 2706 2707 /* 2708 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one 2709 * @h: struct hstate old page belongs to 2710 * @old_page: Old page to dissolve 2711 * @list: List to isolate the page in case we need to 2712 * Returns 0 on success, otherwise negated error. 2713 */ 2714 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, 2715 struct list_head *list) 2716 { 2717 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 2718 int nid = page_to_nid(old_page); 2719 bool alloc_retry = false; 2720 struct page *new_page; 2721 int ret = 0; 2722 2723 /* 2724 * Before dissolving the page, we need to allocate a new one for the 2725 * pool to remain stable. Here, we allocate the page and 'prep' it 2726 * by doing everything but actually updating counters and adding to 2727 * the pool. This simplifies and let us do most of the processing 2728 * under the lock. 2729 */ 2730 alloc_retry: 2731 new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); 2732 if (!new_page) 2733 return -ENOMEM; 2734 /* 2735 * If all goes well, this page will be directly added to the free 2736 * list in the pool. For this the ref count needs to be zero. 2737 * Attempt to drop now, and retry once if needed. It is VERY 2738 * unlikely there is another ref on the page. 2739 * 2740 * If someone else has a reference to the page, it will be freed 2741 * when they drop their ref. Abuse temporary page flag to accomplish 2742 * this. Retry once if there is an inflated ref count. 2743 */ 2744 SetHPageTemporary(new_page); 2745 if (!put_page_testzero(new_page)) { 2746 if (alloc_retry) 2747 return -EBUSY; 2748 2749 alloc_retry = true; 2750 goto alloc_retry; 2751 } 2752 ClearHPageTemporary(new_page); 2753 2754 __prep_new_huge_page(h, new_page); 2755 2756 retry: 2757 spin_lock_irq(&hugetlb_lock); 2758 if (!PageHuge(old_page)) { 2759 /* 2760 * Freed from under us. Drop new_page too. 2761 */ 2762 goto free_new; 2763 } else if (page_count(old_page)) { 2764 /* 2765 * Someone has grabbed the page, try to isolate it here. 2766 * Fail with -EBUSY if not possible. 2767 */ 2768 spin_unlock_irq(&hugetlb_lock); 2769 ret = isolate_hugetlb(old_page, list); 2770 spin_lock_irq(&hugetlb_lock); 2771 goto free_new; 2772 } else if (!HPageFreed(old_page)) { 2773 /* 2774 * Page's refcount is 0 but it has not been enqueued in the 2775 * freelist yet. Race window is small, so we can succeed here if 2776 * we retry. 2777 */ 2778 spin_unlock_irq(&hugetlb_lock); 2779 cond_resched(); 2780 goto retry; 2781 } else { 2782 /* 2783 * Ok, old_page is still a genuine free hugepage. Remove it from 2784 * the freelist and decrease the counters. These will be 2785 * incremented again when calling __prep_account_new_huge_page() 2786 * and enqueue_huge_page() for new_page. The counters will remain 2787 * stable since this happens under the lock. 2788 */ 2789 remove_hugetlb_page(h, old_page, false); 2790 2791 /* 2792 * Ref count on new page is already zero as it was dropped 2793 * earlier. It can be directly added to the pool free list. 2794 */ 2795 __prep_account_new_huge_page(h, nid); 2796 enqueue_huge_page(h, new_page); 2797 2798 /* 2799 * Pages have been replaced, we can safely free the old one. 2800 */ 2801 spin_unlock_irq(&hugetlb_lock); 2802 update_and_free_page(h, old_page, false); 2803 } 2804 2805 return ret; 2806 2807 free_new: 2808 spin_unlock_irq(&hugetlb_lock); 2809 /* Page has a zero ref count, but needs a ref to be freed */ 2810 set_page_refcounted(new_page); 2811 update_and_free_page(h, new_page, false); 2812 2813 return ret; 2814 } 2815 2816 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) 2817 { 2818 struct hstate *h; 2819 struct page *head; 2820 int ret = -EBUSY; 2821 2822 /* 2823 * The page might have been dissolved from under our feet, so make sure 2824 * to carefully check the state under the lock. 2825 * Return success when racing as if we dissolved the page ourselves. 2826 */ 2827 spin_lock_irq(&hugetlb_lock); 2828 if (PageHuge(page)) { 2829 head = compound_head(page); 2830 h = page_hstate(head); 2831 } else { 2832 spin_unlock_irq(&hugetlb_lock); 2833 return 0; 2834 } 2835 spin_unlock_irq(&hugetlb_lock); 2836 2837 /* 2838 * Fence off gigantic pages as there is a cyclic dependency between 2839 * alloc_contig_range and them. Return -ENOMEM as this has the effect 2840 * of bailing out right away without further retrying. 2841 */ 2842 if (hstate_is_gigantic(h)) 2843 return -ENOMEM; 2844 2845 if (page_count(head) && !isolate_hugetlb(head, list)) 2846 ret = 0; 2847 else if (!page_count(head)) 2848 ret = alloc_and_dissolve_huge_page(h, head, list); 2849 2850 return ret; 2851 } 2852 2853 struct page *alloc_huge_page(struct vm_area_struct *vma, 2854 unsigned long addr, int avoid_reserve) 2855 { 2856 struct hugepage_subpool *spool = subpool_vma(vma); 2857 struct hstate *h = hstate_vma(vma); 2858 struct page *page; 2859 long map_chg, map_commit; 2860 long gbl_chg; 2861 int ret, idx; 2862 struct hugetlb_cgroup *h_cg; 2863 bool deferred_reserve; 2864 2865 idx = hstate_index(h); 2866 /* 2867 * Examine the region/reserve map to determine if the process 2868 * has a reservation for the page to be allocated. A return 2869 * code of zero indicates a reservation exists (no change). 2870 */ 2871 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); 2872 if (map_chg < 0) 2873 return ERR_PTR(-ENOMEM); 2874 2875 /* 2876 * Processes that did not create the mapping will have no 2877 * reserves as indicated by the region/reserve map. Check 2878 * that the allocation will not exceed the subpool limit. 2879 * Allocations for MAP_NORESERVE mappings also need to be 2880 * checked against any subpool limit. 2881 */ 2882 if (map_chg || avoid_reserve) { 2883 gbl_chg = hugepage_subpool_get_pages(spool, 1); 2884 if (gbl_chg < 0) { 2885 vma_end_reservation(h, vma, addr); 2886 return ERR_PTR(-ENOSPC); 2887 } 2888 2889 /* 2890 * Even though there was no reservation in the region/reserve 2891 * map, there could be reservations associated with the 2892 * subpool that can be used. This would be indicated if the 2893 * return value of hugepage_subpool_get_pages() is zero. 2894 * However, if avoid_reserve is specified we still avoid even 2895 * the subpool reservations. 2896 */ 2897 if (avoid_reserve) 2898 gbl_chg = 1; 2899 } 2900 2901 /* If this allocation is not consuming a reservation, charge it now. 2902 */ 2903 deferred_reserve = map_chg || avoid_reserve; 2904 if (deferred_reserve) { 2905 ret = hugetlb_cgroup_charge_cgroup_rsvd( 2906 idx, pages_per_huge_page(h), &h_cg); 2907 if (ret) 2908 goto out_subpool_put; 2909 } 2910 2911 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); 2912 if (ret) 2913 goto out_uncharge_cgroup_reservation; 2914 2915 spin_lock_irq(&hugetlb_lock); 2916 /* 2917 * glb_chg is passed to indicate whether or not a page must be taken 2918 * from the global free pool (global change). gbl_chg == 0 indicates 2919 * a reservation exists for the allocation. 2920 */ 2921 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); 2922 if (!page) { 2923 spin_unlock_irq(&hugetlb_lock); 2924 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); 2925 if (!page) 2926 goto out_uncharge_cgroup; 2927 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { 2928 SetHPageRestoreReserve(page); 2929 h->resv_huge_pages--; 2930 } 2931 spin_lock_irq(&hugetlb_lock); 2932 list_add(&page->lru, &h->hugepage_activelist); 2933 /* Fall through */ 2934 } 2935 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); 2936 /* If allocation is not consuming a reservation, also store the 2937 * hugetlb_cgroup pointer on the page. 2938 */ 2939 if (deferred_reserve) { 2940 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), 2941 h_cg, page); 2942 } 2943 2944 spin_unlock_irq(&hugetlb_lock); 2945 2946 hugetlb_set_page_subpool(page, spool); 2947 2948 map_commit = vma_commit_reservation(h, vma, addr); 2949 if (unlikely(map_chg > map_commit)) { 2950 /* 2951 * The page was added to the reservation map between 2952 * vma_needs_reservation and vma_commit_reservation. 2953 * This indicates a race with hugetlb_reserve_pages. 2954 * Adjust for the subpool count incremented above AND 2955 * in hugetlb_reserve_pages for the same page. Also, 2956 * the reservation count added in hugetlb_reserve_pages 2957 * no longer applies. 2958 */ 2959 long rsv_adjust; 2960 2961 rsv_adjust = hugepage_subpool_put_pages(spool, 1); 2962 hugetlb_acct_memory(h, -rsv_adjust); 2963 if (deferred_reserve) 2964 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), 2965 pages_per_huge_page(h), page); 2966 } 2967 return page; 2968 2969 out_uncharge_cgroup: 2970 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); 2971 out_uncharge_cgroup_reservation: 2972 if (deferred_reserve) 2973 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), 2974 h_cg); 2975 out_subpool_put: 2976 if (map_chg || avoid_reserve) 2977 hugepage_subpool_put_pages(spool, 1); 2978 vma_end_reservation(h, vma, addr); 2979 return ERR_PTR(-ENOSPC); 2980 } 2981 2982 int alloc_bootmem_huge_page(struct hstate *h, int nid) 2983 __attribute__ ((weak, alias("__alloc_bootmem_huge_page"))); 2984 int __alloc_bootmem_huge_page(struct hstate *h, int nid) 2985 { 2986 struct huge_bootmem_page *m = NULL; /* initialize for clang */ 2987 int nr_nodes, node; 2988 2989 /* do node specific alloc */ 2990 if (nid != NUMA_NO_NODE) { 2991 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), 2992 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 2993 if (!m) 2994 return 0; 2995 goto found; 2996 } 2997 /* allocate from next node when distributing huge pages */ 2998 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { 2999 m = memblock_alloc_try_nid_raw( 3000 huge_page_size(h), huge_page_size(h), 3001 0, MEMBLOCK_ALLOC_ACCESSIBLE, node); 3002 /* 3003 * Use the beginning of the huge page to store the 3004 * huge_bootmem_page struct (until gather_bootmem 3005 * puts them into the mem_map). 3006 */ 3007 if (!m) 3008 return 0; 3009 goto found; 3010 } 3011 3012 found: 3013 /* Put them into a private list first because mem_map is not up yet */ 3014 INIT_LIST_HEAD(&m->list); 3015 list_add(&m->list, &huge_boot_pages); 3016 m->hstate = h; 3017 return 1; 3018 } 3019 3020 /* 3021 * Put bootmem huge pages into the standard lists after mem_map is up. 3022 * Note: This only applies to gigantic (order > MAX_ORDER) pages. 3023 */ 3024 static void __init gather_bootmem_prealloc(void) 3025 { 3026 struct huge_bootmem_page *m; 3027 3028 list_for_each_entry(m, &huge_boot_pages, list) { 3029 struct page *page = virt_to_page(m); 3030 struct hstate *h = m->hstate; 3031 3032 VM_BUG_ON(!hstate_is_gigantic(h)); 3033 WARN_ON(page_count(page) != 1); 3034 if (prep_compound_gigantic_page(page, huge_page_order(h))) { 3035 WARN_ON(PageReserved(page)); 3036 prep_new_huge_page(h, page, page_to_nid(page)); 3037 put_page(page); /* add to the hugepage allocator */ 3038 } else { 3039 /* VERY unlikely inflated ref count on a tail page */ 3040 free_gigantic_page(page, huge_page_order(h)); 3041 } 3042 3043 /* 3044 * We need to restore the 'stolen' pages to totalram_pages 3045 * in order to fix confusing memory reports from free(1) and 3046 * other side-effects, like CommitLimit going negative. 3047 */ 3048 adjust_managed_page_count(page, pages_per_huge_page(h)); 3049 cond_resched(); 3050 } 3051 } 3052 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) 3053 { 3054 unsigned long i; 3055 char buf[32]; 3056 3057 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { 3058 if (hstate_is_gigantic(h)) { 3059 if (!alloc_bootmem_huge_page(h, nid)) 3060 break; 3061 } else { 3062 struct page *page; 3063 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; 3064 3065 page = alloc_fresh_huge_page(h, gfp_mask, nid, 3066 &node_states[N_MEMORY], NULL); 3067 if (!page) 3068 break; 3069 put_page(page); /* free it into the hugepage allocator */ 3070 } 3071 cond_resched(); 3072 } 3073 if (i == h->max_huge_pages_node[nid]) 3074 return; 3075 3076 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3077 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n", 3078 h->max_huge_pages_node[nid], buf, nid, i); 3079 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); 3080 h->max_huge_pages_node[nid] = i; 3081 } 3082 3083 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) 3084 { 3085 unsigned long i; 3086 nodemask_t *node_alloc_noretry; 3087 bool node_specific_alloc = false; 3088 3089 /* skip gigantic hugepages allocation if hugetlb_cma enabled */ 3090 if (hstate_is_gigantic(h) && hugetlb_cma_size) { 3091 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n"); 3092 return; 3093 } 3094 3095 /* do node specific alloc */ 3096 for_each_online_node(i) { 3097 if (h->max_huge_pages_node[i] > 0) { 3098 hugetlb_hstate_alloc_pages_onenode(h, i); 3099 node_specific_alloc = true; 3100 } 3101 } 3102 3103 if (node_specific_alloc) 3104 return; 3105 3106 /* below will do all node balanced alloc */ 3107 if (!hstate_is_gigantic(h)) { 3108 /* 3109 * Bit mask controlling how hard we retry per-node allocations. 3110 * Ignore errors as lower level routines can deal with 3111 * node_alloc_noretry == NULL. If this kmalloc fails at boot 3112 * time, we are likely in bigger trouble. 3113 */ 3114 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry), 3115 GFP_KERNEL); 3116 } else { 3117 /* allocations done at boot time */ 3118 node_alloc_noretry = NULL; 3119 } 3120 3121 /* bit mask controlling how hard we retry per-node allocations */ 3122 if (node_alloc_noretry) 3123 nodes_clear(*node_alloc_noretry); 3124 3125 for (i = 0; i < h->max_huge_pages; ++i) { 3126 if (hstate_is_gigantic(h)) { 3127 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) 3128 break; 3129 } else if (!alloc_pool_huge_page(h, 3130 &node_states[N_MEMORY], 3131 node_alloc_noretry)) 3132 break; 3133 cond_resched(); 3134 } 3135 if (i < h->max_huge_pages) { 3136 char buf[32]; 3137 3138 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3139 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n", 3140 h->max_huge_pages, buf, i); 3141 h->max_huge_pages = i; 3142 } 3143 kfree(node_alloc_noretry); 3144 } 3145 3146 static void __init hugetlb_init_hstates(void) 3147 { 3148 struct hstate *h, *h2; 3149 3150 for_each_hstate(h) { 3151 /* oversize hugepages were init'ed in early boot */ 3152 if (!hstate_is_gigantic(h)) 3153 hugetlb_hstate_alloc_pages(h); 3154 3155 /* 3156 * Set demote order for each hstate. Note that 3157 * h->demote_order is initially 0. 3158 * - We can not demote gigantic pages if runtime freeing 3159 * is not supported, so skip this. 3160 * - If CMA allocation is possible, we can not demote 3161 * HUGETLB_PAGE_ORDER or smaller size pages. 3162 */ 3163 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3164 continue; 3165 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) 3166 continue; 3167 for_each_hstate(h2) { 3168 if (h2 == h) 3169 continue; 3170 if (h2->order < h->order && 3171 h2->order > h->demote_order) 3172 h->demote_order = h2->order; 3173 } 3174 } 3175 } 3176 3177 static void __init report_hugepages(void) 3178 { 3179 struct hstate *h; 3180 3181 for_each_hstate(h) { 3182 char buf[32]; 3183 3184 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); 3185 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n", 3186 buf, h->free_huge_pages); 3187 } 3188 } 3189 3190 #ifdef CONFIG_HIGHMEM 3191 static void try_to_free_low(struct hstate *h, unsigned long count, 3192 nodemask_t *nodes_allowed) 3193 { 3194 int i; 3195 LIST_HEAD(page_list); 3196 3197 lockdep_assert_held(&hugetlb_lock); 3198 if (hstate_is_gigantic(h)) 3199 return; 3200 3201 /* 3202 * Collect pages to be freed on a list, and free after dropping lock 3203 */ 3204 for_each_node_mask(i, *nodes_allowed) { 3205 struct page *page, *next; 3206 struct list_head *freel = &h->hugepage_freelists[i]; 3207 list_for_each_entry_safe(page, next, freel, lru) { 3208 if (count >= h->nr_huge_pages) 3209 goto out; 3210 if (PageHighMem(page)) 3211 continue; 3212 remove_hugetlb_page(h, page, false); 3213 list_add(&page->lru, &page_list); 3214 } 3215 } 3216 3217 out: 3218 spin_unlock_irq(&hugetlb_lock); 3219 update_and_free_pages_bulk(h, &page_list); 3220 spin_lock_irq(&hugetlb_lock); 3221 } 3222 #else 3223 static inline void try_to_free_low(struct hstate *h, unsigned long count, 3224 nodemask_t *nodes_allowed) 3225 { 3226 } 3227 #endif 3228 3229 /* 3230 * Increment or decrement surplus_huge_pages. Keep node-specific counters 3231 * balanced by operating on them in a round-robin fashion. 3232 * Returns 1 if an adjustment was made. 3233 */ 3234 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, 3235 int delta) 3236 { 3237 int nr_nodes, node; 3238 3239 lockdep_assert_held(&hugetlb_lock); 3240 VM_BUG_ON(delta != -1 && delta != 1); 3241 3242 if (delta < 0) { 3243 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { 3244 if (h->surplus_huge_pages_node[node]) 3245 goto found; 3246 } 3247 } else { 3248 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3249 if (h->surplus_huge_pages_node[node] < 3250 h->nr_huge_pages_node[node]) 3251 goto found; 3252 } 3253 } 3254 return 0; 3255 3256 found: 3257 h->surplus_huge_pages += delta; 3258 h->surplus_huge_pages_node[node] += delta; 3259 return 1; 3260 } 3261 3262 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) 3263 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, 3264 nodemask_t *nodes_allowed) 3265 { 3266 unsigned long min_count, ret; 3267 struct page *page; 3268 LIST_HEAD(page_list); 3269 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL); 3270 3271 /* 3272 * Bit mask controlling how hard we retry per-node allocations. 3273 * If we can not allocate the bit mask, do not attempt to allocate 3274 * the requested huge pages. 3275 */ 3276 if (node_alloc_noretry) 3277 nodes_clear(*node_alloc_noretry); 3278 else 3279 return -ENOMEM; 3280 3281 /* 3282 * resize_lock mutex prevents concurrent adjustments to number of 3283 * pages in hstate via the proc/sysfs interfaces. 3284 */ 3285 mutex_lock(&h->resize_lock); 3286 flush_free_hpage_work(h); 3287 spin_lock_irq(&hugetlb_lock); 3288 3289 /* 3290 * Check for a node specific request. 3291 * Changing node specific huge page count may require a corresponding 3292 * change to the global count. In any case, the passed node mask 3293 * (nodes_allowed) will restrict alloc/free to the specified node. 3294 */ 3295 if (nid != NUMA_NO_NODE) { 3296 unsigned long old_count = count; 3297 3298 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; 3299 /* 3300 * User may have specified a large count value which caused the 3301 * above calculation to overflow. In this case, they wanted 3302 * to allocate as many huge pages as possible. Set count to 3303 * largest possible value to align with their intention. 3304 */ 3305 if (count < old_count) 3306 count = ULONG_MAX; 3307 } 3308 3309 /* 3310 * Gigantic pages runtime allocation depend on the capability for large 3311 * page range allocation. 3312 * If the system does not provide this feature, return an error when 3313 * the user tries to allocate gigantic pages but let the user free the 3314 * boottime allocated gigantic pages. 3315 */ 3316 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { 3317 if (count > persistent_huge_pages(h)) { 3318 spin_unlock_irq(&hugetlb_lock); 3319 mutex_unlock(&h->resize_lock); 3320 NODEMASK_FREE(node_alloc_noretry); 3321 return -EINVAL; 3322 } 3323 /* Fall through to decrease pool */ 3324 } 3325 3326 /* 3327 * Increase the pool size 3328 * First take pages out of surplus state. Then make up the 3329 * remaining difference by allocating fresh huge pages. 3330 * 3331 * We might race with alloc_surplus_huge_page() here and be unable 3332 * to convert a surplus huge page to a normal huge page. That is 3333 * not critical, though, it just means the overall size of the 3334 * pool might be one hugepage larger than it needs to be, but 3335 * within all the constraints specified by the sysctls. 3336 */ 3337 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { 3338 if (!adjust_pool_surplus(h, nodes_allowed, -1)) 3339 break; 3340 } 3341 3342 while (count > persistent_huge_pages(h)) { 3343 /* 3344 * If this allocation races such that we no longer need the 3345 * page, free_huge_page will handle it by freeing the page 3346 * and reducing the surplus. 3347 */ 3348 spin_unlock_irq(&hugetlb_lock); 3349 3350 /* yield cpu to avoid soft lockup */ 3351 cond_resched(); 3352 3353 ret = alloc_pool_huge_page(h, nodes_allowed, 3354 node_alloc_noretry); 3355 spin_lock_irq(&hugetlb_lock); 3356 if (!ret) 3357 goto out; 3358 3359 /* Bail for signals. Probably ctrl-c from user */ 3360 if (signal_pending(current)) 3361 goto out; 3362 } 3363 3364 /* 3365 * Decrease the pool size 3366 * First return free pages to the buddy allocator (being careful 3367 * to keep enough around to satisfy reservations). Then place 3368 * pages into surplus state as needed so the pool will shrink 3369 * to the desired size as pages become free. 3370 * 3371 * By placing pages into the surplus state independent of the 3372 * overcommit value, we are allowing the surplus pool size to 3373 * exceed overcommit. There are few sane options here. Since 3374 * alloc_surplus_huge_page() is checking the global counter, 3375 * though, we'll note that we're not allowed to exceed surplus 3376 * and won't grow the pool anywhere else. Not until one of the 3377 * sysctls are changed, or the surplus pages go out of use. 3378 */ 3379 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; 3380 min_count = max(count, min_count); 3381 try_to_free_low(h, min_count, nodes_allowed); 3382 3383 /* 3384 * Collect pages to be removed on list without dropping lock 3385 */ 3386 while (min_count < persistent_huge_pages(h)) { 3387 page = remove_pool_huge_page(h, nodes_allowed, 0); 3388 if (!page) 3389 break; 3390 3391 list_add(&page->lru, &page_list); 3392 } 3393 /* free the pages after dropping lock */ 3394 spin_unlock_irq(&hugetlb_lock); 3395 update_and_free_pages_bulk(h, &page_list); 3396 flush_free_hpage_work(h); 3397 spin_lock_irq(&hugetlb_lock); 3398 3399 while (count < persistent_huge_pages(h)) { 3400 if (!adjust_pool_surplus(h, nodes_allowed, 1)) 3401 break; 3402 } 3403 out: 3404 h->max_huge_pages = persistent_huge_pages(h); 3405 spin_unlock_irq(&hugetlb_lock); 3406 mutex_unlock(&h->resize_lock); 3407 3408 NODEMASK_FREE(node_alloc_noretry); 3409 3410 return 0; 3411 } 3412 3413 static int demote_free_huge_page(struct hstate *h, struct page *page) 3414 { 3415 int i, nid = page_to_nid(page); 3416 struct hstate *target_hstate; 3417 int rc = 0; 3418 3419 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); 3420 3421 remove_hugetlb_page_for_demote(h, page, false); 3422 spin_unlock_irq(&hugetlb_lock); 3423 3424 rc = hugetlb_vmemmap_alloc(h, page); 3425 if (rc) { 3426 /* Allocation of vmemmmap failed, we can not demote page */ 3427 spin_lock_irq(&hugetlb_lock); 3428 set_page_refcounted(page); 3429 add_hugetlb_page(h, page, false); 3430 return rc; 3431 } 3432 3433 /* 3434 * Use destroy_compound_hugetlb_page_for_demote for all huge page 3435 * sizes as it will not ref count pages. 3436 */ 3437 destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h)); 3438 3439 /* 3440 * Taking target hstate mutex synchronizes with set_max_huge_pages. 3441 * Without the mutex, pages added to target hstate could be marked 3442 * as surplus. 3443 * 3444 * Note that we already hold h->resize_lock. To prevent deadlock, 3445 * use the convention of always taking larger size hstate mutex first. 3446 */ 3447 mutex_lock(&target_hstate->resize_lock); 3448 for (i = 0; i < pages_per_huge_page(h); 3449 i += pages_per_huge_page(target_hstate)) { 3450 if (hstate_is_gigantic(target_hstate)) 3451 prep_compound_gigantic_page_for_demote(page + i, 3452 target_hstate->order); 3453 else 3454 prep_compound_page(page + i, target_hstate->order); 3455 set_page_private(page + i, 0); 3456 set_page_refcounted(page + i); 3457 prep_new_huge_page(target_hstate, page + i, nid); 3458 put_page(page + i); 3459 } 3460 mutex_unlock(&target_hstate->resize_lock); 3461 3462 spin_lock_irq(&hugetlb_lock); 3463 3464 /* 3465 * Not absolutely necessary, but for consistency update max_huge_pages 3466 * based on pool changes for the demoted page. 3467 */ 3468 h->max_huge_pages--; 3469 target_hstate->max_huge_pages += pages_per_huge_page(h); 3470 3471 return rc; 3472 } 3473 3474 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) 3475 __must_hold(&hugetlb_lock) 3476 { 3477 int nr_nodes, node; 3478 struct page *page; 3479 3480 lockdep_assert_held(&hugetlb_lock); 3481 3482 /* We should never get here if no demote order */ 3483 if (!h->demote_order) { 3484 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n"); 3485 return -EINVAL; /* internal error */ 3486 } 3487 3488 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { 3489 list_for_each_entry(page, &h->hugepage_freelists[node], lru) { 3490 if (PageHWPoison(page)) 3491 continue; 3492 3493 return demote_free_huge_page(h, page); 3494 } 3495 } 3496 3497 /* 3498 * Only way to get here is if all pages on free lists are poisoned. 3499 * Return -EBUSY so that caller will not retry. 3500 */ 3501 return -EBUSY; 3502 } 3503 3504 #define HSTATE_ATTR_RO(_name) \ 3505 static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 3506 3507 #define HSTATE_ATTR_WO(_name) \ 3508 static struct kobj_attribute _name##_attr = __ATTR_WO(_name) 3509 3510 #define HSTATE_ATTR(_name) \ 3511 static struct kobj_attribute _name##_attr = __ATTR_RW(_name) 3512 3513 static struct kobject *hugepages_kobj; 3514 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3515 3516 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp); 3517 3518 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp) 3519 { 3520 int i; 3521 3522 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3523 if (hstate_kobjs[i] == kobj) { 3524 if (nidp) 3525 *nidp = NUMA_NO_NODE; 3526 return &hstates[i]; 3527 } 3528 3529 return kobj_to_node_hstate(kobj, nidp); 3530 } 3531 3532 static ssize_t nr_hugepages_show_common(struct kobject *kobj, 3533 struct kobj_attribute *attr, char *buf) 3534 { 3535 struct hstate *h; 3536 unsigned long nr_huge_pages; 3537 int nid; 3538 3539 h = kobj_to_hstate(kobj, &nid); 3540 if (nid == NUMA_NO_NODE) 3541 nr_huge_pages = h->nr_huge_pages; 3542 else 3543 nr_huge_pages = h->nr_huge_pages_node[nid]; 3544 3545 return sysfs_emit(buf, "%lu\n", nr_huge_pages); 3546 } 3547 3548 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy, 3549 struct hstate *h, int nid, 3550 unsigned long count, size_t len) 3551 { 3552 int err; 3553 nodemask_t nodes_allowed, *n_mask; 3554 3555 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) 3556 return -EINVAL; 3557 3558 if (nid == NUMA_NO_NODE) { 3559 /* 3560 * global hstate attribute 3561 */ 3562 if (!(obey_mempolicy && 3563 init_nodemask_of_mempolicy(&nodes_allowed))) 3564 n_mask = &node_states[N_MEMORY]; 3565 else 3566 n_mask = &nodes_allowed; 3567 } else { 3568 /* 3569 * Node specific request. count adjustment happens in 3570 * set_max_huge_pages() after acquiring hugetlb_lock. 3571 */ 3572 init_nodemask_of_node(&nodes_allowed, nid); 3573 n_mask = &nodes_allowed; 3574 } 3575 3576 err = set_max_huge_pages(h, count, nid, n_mask); 3577 3578 return err ? err : len; 3579 } 3580 3581 static ssize_t nr_hugepages_store_common(bool obey_mempolicy, 3582 struct kobject *kobj, const char *buf, 3583 size_t len) 3584 { 3585 struct hstate *h; 3586 unsigned long count; 3587 int nid; 3588 int err; 3589 3590 err = kstrtoul(buf, 10, &count); 3591 if (err) 3592 return err; 3593 3594 h = kobj_to_hstate(kobj, &nid); 3595 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); 3596 } 3597 3598 static ssize_t nr_hugepages_show(struct kobject *kobj, 3599 struct kobj_attribute *attr, char *buf) 3600 { 3601 return nr_hugepages_show_common(kobj, attr, buf); 3602 } 3603 3604 static ssize_t nr_hugepages_store(struct kobject *kobj, 3605 struct kobj_attribute *attr, const char *buf, size_t len) 3606 { 3607 return nr_hugepages_store_common(false, kobj, buf, len); 3608 } 3609 HSTATE_ATTR(nr_hugepages); 3610 3611 #ifdef CONFIG_NUMA 3612 3613 /* 3614 * hstate attribute for optionally mempolicy-based constraint on persistent 3615 * huge page alloc/free. 3616 */ 3617 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj, 3618 struct kobj_attribute *attr, 3619 char *buf) 3620 { 3621 return nr_hugepages_show_common(kobj, attr, buf); 3622 } 3623 3624 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj, 3625 struct kobj_attribute *attr, const char *buf, size_t len) 3626 { 3627 return nr_hugepages_store_common(true, kobj, buf, len); 3628 } 3629 HSTATE_ATTR(nr_hugepages_mempolicy); 3630 #endif 3631 3632 3633 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj, 3634 struct kobj_attribute *attr, char *buf) 3635 { 3636 struct hstate *h = kobj_to_hstate(kobj, NULL); 3637 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); 3638 } 3639 3640 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj, 3641 struct kobj_attribute *attr, const char *buf, size_t count) 3642 { 3643 int err; 3644 unsigned long input; 3645 struct hstate *h = kobj_to_hstate(kobj, NULL); 3646 3647 if (hstate_is_gigantic(h)) 3648 return -EINVAL; 3649 3650 err = kstrtoul(buf, 10, &input); 3651 if (err) 3652 return err; 3653 3654 spin_lock_irq(&hugetlb_lock); 3655 h->nr_overcommit_huge_pages = input; 3656 spin_unlock_irq(&hugetlb_lock); 3657 3658 return count; 3659 } 3660 HSTATE_ATTR(nr_overcommit_hugepages); 3661 3662 static ssize_t free_hugepages_show(struct kobject *kobj, 3663 struct kobj_attribute *attr, char *buf) 3664 { 3665 struct hstate *h; 3666 unsigned long free_huge_pages; 3667 int nid; 3668 3669 h = kobj_to_hstate(kobj, &nid); 3670 if (nid == NUMA_NO_NODE) 3671 free_huge_pages = h->free_huge_pages; 3672 else 3673 free_huge_pages = h->free_huge_pages_node[nid]; 3674 3675 return sysfs_emit(buf, "%lu\n", free_huge_pages); 3676 } 3677 HSTATE_ATTR_RO(free_hugepages); 3678 3679 static ssize_t resv_hugepages_show(struct kobject *kobj, 3680 struct kobj_attribute *attr, char *buf) 3681 { 3682 struct hstate *h = kobj_to_hstate(kobj, NULL); 3683 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); 3684 } 3685 HSTATE_ATTR_RO(resv_hugepages); 3686 3687 static ssize_t surplus_hugepages_show(struct kobject *kobj, 3688 struct kobj_attribute *attr, char *buf) 3689 { 3690 struct hstate *h; 3691 unsigned long surplus_huge_pages; 3692 int nid; 3693 3694 h = kobj_to_hstate(kobj, &nid); 3695 if (nid == NUMA_NO_NODE) 3696 surplus_huge_pages = h->surplus_huge_pages; 3697 else 3698 surplus_huge_pages = h->surplus_huge_pages_node[nid]; 3699 3700 return sysfs_emit(buf, "%lu\n", surplus_huge_pages); 3701 } 3702 HSTATE_ATTR_RO(surplus_hugepages); 3703 3704 static ssize_t demote_store(struct kobject *kobj, 3705 struct kobj_attribute *attr, const char *buf, size_t len) 3706 { 3707 unsigned long nr_demote; 3708 unsigned long nr_available; 3709 nodemask_t nodes_allowed, *n_mask; 3710 struct hstate *h; 3711 int err = 0; 3712 int nid; 3713 3714 err = kstrtoul(buf, 10, &nr_demote); 3715 if (err) 3716 return err; 3717 h = kobj_to_hstate(kobj, &nid); 3718 3719 if (nid != NUMA_NO_NODE) { 3720 init_nodemask_of_node(&nodes_allowed, nid); 3721 n_mask = &nodes_allowed; 3722 } else { 3723 n_mask = &node_states[N_MEMORY]; 3724 } 3725 3726 /* Synchronize with other sysfs operations modifying huge pages */ 3727 mutex_lock(&h->resize_lock); 3728 spin_lock_irq(&hugetlb_lock); 3729 3730 while (nr_demote) { 3731 /* 3732 * Check for available pages to demote each time thorough the 3733 * loop as demote_pool_huge_page will drop hugetlb_lock. 3734 */ 3735 if (nid != NUMA_NO_NODE) 3736 nr_available = h->free_huge_pages_node[nid]; 3737 else 3738 nr_available = h->free_huge_pages; 3739 nr_available -= h->resv_huge_pages; 3740 if (!nr_available) 3741 break; 3742 3743 err = demote_pool_huge_page(h, n_mask); 3744 if (err) 3745 break; 3746 3747 nr_demote--; 3748 } 3749 3750 spin_unlock_irq(&hugetlb_lock); 3751 mutex_unlock(&h->resize_lock); 3752 3753 if (err) 3754 return err; 3755 return len; 3756 } 3757 HSTATE_ATTR_WO(demote); 3758 3759 static ssize_t demote_size_show(struct kobject *kobj, 3760 struct kobj_attribute *attr, char *buf) 3761 { 3762 int nid; 3763 struct hstate *h = kobj_to_hstate(kobj, &nid); 3764 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; 3765 3766 return sysfs_emit(buf, "%lukB\n", demote_size); 3767 } 3768 3769 static ssize_t demote_size_store(struct kobject *kobj, 3770 struct kobj_attribute *attr, 3771 const char *buf, size_t count) 3772 { 3773 struct hstate *h, *demote_hstate; 3774 unsigned long demote_size; 3775 unsigned int demote_order; 3776 int nid; 3777 3778 demote_size = (unsigned long)memparse(buf, NULL); 3779 3780 demote_hstate = size_to_hstate(demote_size); 3781 if (!demote_hstate) 3782 return -EINVAL; 3783 demote_order = demote_hstate->order; 3784 if (demote_order < HUGETLB_PAGE_ORDER) 3785 return -EINVAL; 3786 3787 /* demote order must be smaller than hstate order */ 3788 h = kobj_to_hstate(kobj, &nid); 3789 if (demote_order >= h->order) 3790 return -EINVAL; 3791 3792 /* resize_lock synchronizes access to demote size and writes */ 3793 mutex_lock(&h->resize_lock); 3794 h->demote_order = demote_order; 3795 mutex_unlock(&h->resize_lock); 3796 3797 return count; 3798 } 3799 HSTATE_ATTR(demote_size); 3800 3801 static struct attribute *hstate_attrs[] = { 3802 &nr_hugepages_attr.attr, 3803 &nr_overcommit_hugepages_attr.attr, 3804 &free_hugepages_attr.attr, 3805 &resv_hugepages_attr.attr, 3806 &surplus_hugepages_attr.attr, 3807 #ifdef CONFIG_NUMA 3808 &nr_hugepages_mempolicy_attr.attr, 3809 #endif 3810 NULL, 3811 }; 3812 3813 static const struct attribute_group hstate_attr_group = { 3814 .attrs = hstate_attrs, 3815 }; 3816 3817 static struct attribute *hstate_demote_attrs[] = { 3818 &demote_size_attr.attr, 3819 &demote_attr.attr, 3820 NULL, 3821 }; 3822 3823 static const struct attribute_group hstate_demote_attr_group = { 3824 .attrs = hstate_demote_attrs, 3825 }; 3826 3827 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, 3828 struct kobject **hstate_kobjs, 3829 const struct attribute_group *hstate_attr_group) 3830 { 3831 int retval; 3832 int hi = hstate_index(h); 3833 3834 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); 3835 if (!hstate_kobjs[hi]) 3836 return -ENOMEM; 3837 3838 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group); 3839 if (retval) { 3840 kobject_put(hstate_kobjs[hi]); 3841 hstate_kobjs[hi] = NULL; 3842 } 3843 3844 if (h->demote_order) { 3845 if (sysfs_create_group(hstate_kobjs[hi], 3846 &hstate_demote_attr_group)) 3847 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); 3848 } 3849 3850 return retval; 3851 } 3852 3853 static void __init hugetlb_sysfs_init(void) 3854 { 3855 struct hstate *h; 3856 int err; 3857 3858 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj); 3859 if (!hugepages_kobj) 3860 return; 3861 3862 for_each_hstate(h) { 3863 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, 3864 hstate_kobjs, &hstate_attr_group); 3865 if (err) 3866 pr_err("HugeTLB: Unable to add hstate %s", h->name); 3867 } 3868 } 3869 3870 #ifdef CONFIG_NUMA 3871 3872 /* 3873 * node_hstate/s - associate per node hstate attributes, via their kobjects, 3874 * with node devices in node_devices[] using a parallel array. The array 3875 * index of a node device or _hstate == node id. 3876 * This is here to avoid any static dependency of the node device driver, in 3877 * the base kernel, on the hugetlb module. 3878 */ 3879 struct node_hstate { 3880 struct kobject *hugepages_kobj; 3881 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE]; 3882 }; 3883 static struct node_hstate node_hstates[MAX_NUMNODES]; 3884 3885 /* 3886 * A subset of global hstate attributes for node devices 3887 */ 3888 static struct attribute *per_node_hstate_attrs[] = { 3889 &nr_hugepages_attr.attr, 3890 &free_hugepages_attr.attr, 3891 &surplus_hugepages_attr.attr, 3892 NULL, 3893 }; 3894 3895 static const struct attribute_group per_node_hstate_attr_group = { 3896 .attrs = per_node_hstate_attrs, 3897 }; 3898 3899 /* 3900 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj. 3901 * Returns node id via non-NULL nidp. 3902 */ 3903 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 3904 { 3905 int nid; 3906 3907 for (nid = 0; nid < nr_node_ids; nid++) { 3908 struct node_hstate *nhs = &node_hstates[nid]; 3909 int i; 3910 for (i = 0; i < HUGE_MAX_HSTATE; i++) 3911 if (nhs->hstate_kobjs[i] == kobj) { 3912 if (nidp) 3913 *nidp = nid; 3914 return &hstates[i]; 3915 } 3916 } 3917 3918 BUG(); 3919 return NULL; 3920 } 3921 3922 /* 3923 * Unregister hstate attributes from a single node device. 3924 * No-op if no hstate attributes attached. 3925 */ 3926 static void hugetlb_unregister_node(struct node *node) 3927 { 3928 struct hstate *h; 3929 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3930 3931 if (!nhs->hugepages_kobj) 3932 return; /* no hstate attributes */ 3933 3934 for_each_hstate(h) { 3935 int idx = hstate_index(h); 3936 if (nhs->hstate_kobjs[idx]) { 3937 kobject_put(nhs->hstate_kobjs[idx]); 3938 nhs->hstate_kobjs[idx] = NULL; 3939 } 3940 } 3941 3942 kobject_put(nhs->hugepages_kobj); 3943 nhs->hugepages_kobj = NULL; 3944 } 3945 3946 3947 /* 3948 * Register hstate attributes for a single node device. 3949 * No-op if attributes already registered. 3950 */ 3951 static void hugetlb_register_node(struct node *node) 3952 { 3953 struct hstate *h; 3954 struct node_hstate *nhs = &node_hstates[node->dev.id]; 3955 int err; 3956 3957 if (nhs->hugepages_kobj) 3958 return; /* already allocated */ 3959 3960 nhs->hugepages_kobj = kobject_create_and_add("hugepages", 3961 &node->dev.kobj); 3962 if (!nhs->hugepages_kobj) 3963 return; 3964 3965 for_each_hstate(h) { 3966 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, 3967 nhs->hstate_kobjs, 3968 &per_node_hstate_attr_group); 3969 if (err) { 3970 pr_err("HugeTLB: Unable to add hstate %s for node %d\n", 3971 h->name, node->dev.id); 3972 hugetlb_unregister_node(node); 3973 break; 3974 } 3975 } 3976 } 3977 3978 /* 3979 * hugetlb init time: register hstate attributes for all registered node 3980 * devices of nodes that have memory. All on-line nodes should have 3981 * registered their associated device by this time. 3982 */ 3983 static void __init hugetlb_register_all_nodes(void) 3984 { 3985 int nid; 3986 3987 for_each_node_state(nid, N_MEMORY) { 3988 struct node *node = node_devices[nid]; 3989 if (node->dev.id == nid) 3990 hugetlb_register_node(node); 3991 } 3992 3993 /* 3994 * Let the node device driver know we're here so it can 3995 * [un]register hstate attributes on node hotplug. 3996 */ 3997 register_hugetlbfs_with_node(hugetlb_register_node, 3998 hugetlb_unregister_node); 3999 } 4000 #else /* !CONFIG_NUMA */ 4001 4002 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp) 4003 { 4004 BUG(); 4005 if (nidp) 4006 *nidp = -1; 4007 return NULL; 4008 } 4009 4010 static void hugetlb_register_all_nodes(void) { } 4011 4012 #endif 4013 4014 static int __init hugetlb_init(void) 4015 { 4016 int i; 4017 4018 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE < 4019 __NR_HPAGEFLAGS); 4020 4021 if (!hugepages_supported()) { 4022 if (hugetlb_max_hstate || default_hstate_max_huge_pages) 4023 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n"); 4024 return 0; 4025 } 4026 4027 /* 4028 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some 4029 * architectures depend on setup being done here. 4030 */ 4031 hugetlb_add_hstate(HUGETLB_PAGE_ORDER); 4032 if (!parsed_default_hugepagesz) { 4033 /* 4034 * If we did not parse a default huge page size, set 4035 * default_hstate_idx to HPAGE_SIZE hstate. And, if the 4036 * number of huge pages for this default size was implicitly 4037 * specified, set that here as well. 4038 * Note that the implicit setting will overwrite an explicit 4039 * setting. A warning will be printed in this case. 4040 */ 4041 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE)); 4042 if (default_hstate_max_huge_pages) { 4043 if (default_hstate.max_huge_pages) { 4044 char buf[32]; 4045 4046 string_get_size(huge_page_size(&default_hstate), 4047 1, STRING_UNITS_2, buf, 32); 4048 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n", 4049 default_hstate.max_huge_pages, buf); 4050 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n", 4051 default_hstate_max_huge_pages); 4052 } 4053 default_hstate.max_huge_pages = 4054 default_hstate_max_huge_pages; 4055 4056 for_each_online_node(i) 4057 default_hstate.max_huge_pages_node[i] = 4058 default_hugepages_in_node[i]; 4059 } 4060 } 4061 4062 hugetlb_cma_check(); 4063 hugetlb_init_hstates(); 4064 gather_bootmem_prealloc(); 4065 report_hugepages(); 4066 4067 hugetlb_sysfs_init(); 4068 hugetlb_register_all_nodes(); 4069 hugetlb_cgroup_file_init(); 4070 4071 #ifdef CONFIG_SMP 4072 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus()); 4073 #else 4074 num_fault_mutexes = 1; 4075 #endif 4076 hugetlb_fault_mutex_table = 4077 kmalloc_array(num_fault_mutexes, sizeof(struct mutex), 4078 GFP_KERNEL); 4079 BUG_ON(!hugetlb_fault_mutex_table); 4080 4081 for (i = 0; i < num_fault_mutexes; i++) 4082 mutex_init(&hugetlb_fault_mutex_table[i]); 4083 return 0; 4084 } 4085 subsys_initcall(hugetlb_init); 4086 4087 /* Overwritten by architectures with more huge page sizes */ 4088 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size) 4089 { 4090 return size == HPAGE_SIZE; 4091 } 4092 4093 void __init hugetlb_add_hstate(unsigned int order) 4094 { 4095 struct hstate *h; 4096 unsigned long i; 4097 4098 if (size_to_hstate(PAGE_SIZE << order)) { 4099 return; 4100 } 4101 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); 4102 BUG_ON(order == 0); 4103 h = &hstates[hugetlb_max_hstate++]; 4104 mutex_init(&h->resize_lock); 4105 h->order = order; 4106 h->mask = ~(huge_page_size(h) - 1); 4107 for (i = 0; i < MAX_NUMNODES; ++i) 4108 INIT_LIST_HEAD(&h->hugepage_freelists[i]); 4109 INIT_LIST_HEAD(&h->hugepage_activelist); 4110 h->next_nid_to_alloc = first_memory_node; 4111 h->next_nid_to_free = first_memory_node; 4112 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", 4113 huge_page_size(h)/1024); 4114 hugetlb_vmemmap_init(h); 4115 4116 parsed_hstate = h; 4117 } 4118 4119 bool __init __weak hugetlb_node_alloc_supported(void) 4120 { 4121 return true; 4122 } 4123 4124 static void __init hugepages_clear_pages_in_node(void) 4125 { 4126 if (!hugetlb_max_hstate) { 4127 default_hstate_max_huge_pages = 0; 4128 memset(default_hugepages_in_node, 0, 4129 MAX_NUMNODES * sizeof(unsigned int)); 4130 } else { 4131 parsed_hstate->max_huge_pages = 0; 4132 memset(parsed_hstate->max_huge_pages_node, 0, 4133 MAX_NUMNODES * sizeof(unsigned int)); 4134 } 4135 } 4136 4137 /* 4138 * hugepages command line processing 4139 * hugepages normally follows a valid hugepagsz or default_hugepagsz 4140 * specification. If not, ignore the hugepages value. hugepages can also 4141 * be the first huge page command line option in which case it implicitly 4142 * specifies the number of huge pages for the default size. 4143 */ 4144 static int __init hugepages_setup(char *s) 4145 { 4146 unsigned long *mhp; 4147 static unsigned long *last_mhp; 4148 int node = NUMA_NO_NODE; 4149 int count; 4150 unsigned long tmp; 4151 char *p = s; 4152 4153 if (!parsed_valid_hugepagesz) { 4154 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s); 4155 parsed_valid_hugepagesz = true; 4156 return 1; 4157 } 4158 4159 /* 4160 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter 4161 * yet, so this hugepages= parameter goes to the "default hstate". 4162 * Otherwise, it goes with the previously parsed hugepagesz or 4163 * default_hugepagesz. 4164 */ 4165 else if (!hugetlb_max_hstate) 4166 mhp = &default_hstate_max_huge_pages; 4167 else 4168 mhp = &parsed_hstate->max_huge_pages; 4169 4170 if (mhp == last_mhp) { 4171 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s); 4172 return 1; 4173 } 4174 4175 while (*p) { 4176 count = 0; 4177 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4178 goto invalid; 4179 /* Parameter is node format */ 4180 if (p[count] == ':') { 4181 if (!hugetlb_node_alloc_supported()) { 4182 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n"); 4183 return 1; 4184 } 4185 if (tmp >= MAX_NUMNODES || !node_online(tmp)) 4186 goto invalid; 4187 node = array_index_nospec(tmp, MAX_NUMNODES); 4188 p += count + 1; 4189 /* Parse hugepages */ 4190 if (sscanf(p, "%lu%n", &tmp, &count) != 1) 4191 goto invalid; 4192 if (!hugetlb_max_hstate) 4193 default_hugepages_in_node[node] = tmp; 4194 else 4195 parsed_hstate->max_huge_pages_node[node] = tmp; 4196 *mhp += tmp; 4197 /* Go to parse next node*/ 4198 if (p[count] == ',') 4199 p += count + 1; 4200 else 4201 break; 4202 } else { 4203 if (p != s) 4204 goto invalid; 4205 *mhp = tmp; 4206 break; 4207 } 4208 } 4209 4210 /* 4211 * Global state is always initialized later in hugetlb_init. 4212 * But we need to allocate gigantic hstates here early to still 4213 * use the bootmem allocator. 4214 */ 4215 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate)) 4216 hugetlb_hstate_alloc_pages(parsed_hstate); 4217 4218 last_mhp = mhp; 4219 4220 return 1; 4221 4222 invalid: 4223 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p); 4224 hugepages_clear_pages_in_node(); 4225 return 1; 4226 } 4227 __setup("hugepages=", hugepages_setup); 4228 4229 /* 4230 * hugepagesz command line processing 4231 * A specific huge page size can only be specified once with hugepagesz. 4232 * hugepagesz is followed by hugepages on the command line. The global 4233 * variable 'parsed_valid_hugepagesz' is used to determine if prior 4234 * hugepagesz argument was valid. 4235 */ 4236 static int __init hugepagesz_setup(char *s) 4237 { 4238 unsigned long size; 4239 struct hstate *h; 4240 4241 parsed_valid_hugepagesz = false; 4242 size = (unsigned long)memparse(s, NULL); 4243 4244 if (!arch_hugetlb_valid_size(size)) { 4245 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s); 4246 return 1; 4247 } 4248 4249 h = size_to_hstate(size); 4250 if (h) { 4251 /* 4252 * hstate for this size already exists. This is normally 4253 * an error, but is allowed if the existing hstate is the 4254 * default hstate. More specifically, it is only allowed if 4255 * the number of huge pages for the default hstate was not 4256 * previously specified. 4257 */ 4258 if (!parsed_default_hugepagesz || h != &default_hstate || 4259 default_hstate.max_huge_pages) { 4260 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s); 4261 return 1; 4262 } 4263 4264 /* 4265 * No need to call hugetlb_add_hstate() as hstate already 4266 * exists. But, do set parsed_hstate so that a following 4267 * hugepages= parameter will be applied to this hstate. 4268 */ 4269 parsed_hstate = h; 4270 parsed_valid_hugepagesz = true; 4271 return 1; 4272 } 4273 4274 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4275 parsed_valid_hugepagesz = true; 4276 return 1; 4277 } 4278 __setup("hugepagesz=", hugepagesz_setup); 4279 4280 /* 4281 * default_hugepagesz command line input 4282 * Only one instance of default_hugepagesz allowed on command line. 4283 */ 4284 static int __init default_hugepagesz_setup(char *s) 4285 { 4286 unsigned long size; 4287 int i; 4288 4289 parsed_valid_hugepagesz = false; 4290 if (parsed_default_hugepagesz) { 4291 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s); 4292 return 1; 4293 } 4294 4295 size = (unsigned long)memparse(s, NULL); 4296 4297 if (!arch_hugetlb_valid_size(size)) { 4298 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s); 4299 return 1; 4300 } 4301 4302 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); 4303 parsed_valid_hugepagesz = true; 4304 parsed_default_hugepagesz = true; 4305 default_hstate_idx = hstate_index(size_to_hstate(size)); 4306 4307 /* 4308 * The number of default huge pages (for this size) could have been 4309 * specified as the first hugetlb parameter: hugepages=X. If so, 4310 * then default_hstate_max_huge_pages is set. If the default huge 4311 * page size is gigantic (>= MAX_ORDER), then the pages must be 4312 * allocated here from bootmem allocator. 4313 */ 4314 if (default_hstate_max_huge_pages) { 4315 default_hstate.max_huge_pages = default_hstate_max_huge_pages; 4316 for_each_online_node(i) 4317 default_hstate.max_huge_pages_node[i] = 4318 default_hugepages_in_node[i]; 4319 if (hstate_is_gigantic(&default_hstate)) 4320 hugetlb_hstate_alloc_pages(&default_hstate); 4321 default_hstate_max_huge_pages = 0; 4322 } 4323 4324 return 1; 4325 } 4326 __setup("default_hugepagesz=", default_hugepagesz_setup); 4327 4328 static unsigned int allowed_mems_nr(struct hstate *h) 4329 { 4330 int node; 4331 unsigned int nr = 0; 4332 nodemask_t *mpol_allowed; 4333 unsigned int *array = h->free_huge_pages_node; 4334 gfp_t gfp_mask = htlb_alloc_mask(h); 4335 4336 mpol_allowed = policy_nodemask_current(gfp_mask); 4337 4338 for_each_node_mask(node, cpuset_current_mems_allowed) { 4339 if (!mpol_allowed || node_isset(node, *mpol_allowed)) 4340 nr += array[node]; 4341 } 4342 4343 return nr; 4344 } 4345 4346 #ifdef CONFIG_SYSCTL 4347 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write, 4348 void *buffer, size_t *length, 4349 loff_t *ppos, unsigned long *out) 4350 { 4351 struct ctl_table dup_table; 4352 4353 /* 4354 * In order to avoid races with __do_proc_doulongvec_minmax(), we 4355 * can duplicate the @table and alter the duplicate of it. 4356 */ 4357 dup_table = *table; 4358 dup_table.data = out; 4359 4360 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos); 4361 } 4362 4363 static int hugetlb_sysctl_handler_common(bool obey_mempolicy, 4364 struct ctl_table *table, int write, 4365 void *buffer, size_t *length, loff_t *ppos) 4366 { 4367 struct hstate *h = &default_hstate; 4368 unsigned long tmp = h->max_huge_pages; 4369 int ret; 4370 4371 if (!hugepages_supported()) 4372 return -EOPNOTSUPP; 4373 4374 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4375 &tmp); 4376 if (ret) 4377 goto out; 4378 4379 if (write) 4380 ret = __nr_hugepages_store_common(obey_mempolicy, h, 4381 NUMA_NO_NODE, tmp, *length); 4382 out: 4383 return ret; 4384 } 4385 4386 int hugetlb_sysctl_handler(struct ctl_table *table, int write, 4387 void *buffer, size_t *length, loff_t *ppos) 4388 { 4389 4390 return hugetlb_sysctl_handler_common(false, table, write, 4391 buffer, length, ppos); 4392 } 4393 4394 #ifdef CONFIG_NUMA 4395 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, 4396 void *buffer, size_t *length, loff_t *ppos) 4397 { 4398 return hugetlb_sysctl_handler_common(true, table, write, 4399 buffer, length, ppos); 4400 } 4401 #endif /* CONFIG_NUMA */ 4402 4403 int hugetlb_overcommit_handler(struct ctl_table *table, int write, 4404 void *buffer, size_t *length, loff_t *ppos) 4405 { 4406 struct hstate *h = &default_hstate; 4407 unsigned long tmp; 4408 int ret; 4409 4410 if (!hugepages_supported()) 4411 return -EOPNOTSUPP; 4412 4413 tmp = h->nr_overcommit_huge_pages; 4414 4415 if (write && hstate_is_gigantic(h)) 4416 return -EINVAL; 4417 4418 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos, 4419 &tmp); 4420 if (ret) 4421 goto out; 4422 4423 if (write) { 4424 spin_lock_irq(&hugetlb_lock); 4425 h->nr_overcommit_huge_pages = tmp; 4426 spin_unlock_irq(&hugetlb_lock); 4427 } 4428 out: 4429 return ret; 4430 } 4431 4432 #endif /* CONFIG_SYSCTL */ 4433 4434 void hugetlb_report_meminfo(struct seq_file *m) 4435 { 4436 struct hstate *h; 4437 unsigned long total = 0; 4438 4439 if (!hugepages_supported()) 4440 return; 4441 4442 for_each_hstate(h) { 4443 unsigned long count = h->nr_huge_pages; 4444 4445 total += huge_page_size(h) * count; 4446 4447 if (h == &default_hstate) 4448 seq_printf(m, 4449 "HugePages_Total: %5lu\n" 4450 "HugePages_Free: %5lu\n" 4451 "HugePages_Rsvd: %5lu\n" 4452 "HugePages_Surp: %5lu\n" 4453 "Hugepagesize: %8lu kB\n", 4454 count, 4455 h->free_huge_pages, 4456 h->resv_huge_pages, 4457 h->surplus_huge_pages, 4458 huge_page_size(h) / SZ_1K); 4459 } 4460 4461 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K); 4462 } 4463 4464 int hugetlb_report_node_meminfo(char *buf, int len, int nid) 4465 { 4466 struct hstate *h = &default_hstate; 4467 4468 if (!hugepages_supported()) 4469 return 0; 4470 4471 return sysfs_emit_at(buf, len, 4472 "Node %d HugePages_Total: %5u\n" 4473 "Node %d HugePages_Free: %5u\n" 4474 "Node %d HugePages_Surp: %5u\n", 4475 nid, h->nr_huge_pages_node[nid], 4476 nid, h->free_huge_pages_node[nid], 4477 nid, h->surplus_huge_pages_node[nid]); 4478 } 4479 4480 void hugetlb_show_meminfo_node(int nid) 4481 { 4482 struct hstate *h; 4483 4484 if (!hugepages_supported()) 4485 return; 4486 4487 for_each_hstate(h) 4488 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n", 4489 nid, 4490 h->nr_huge_pages_node[nid], 4491 h->free_huge_pages_node[nid], 4492 h->surplus_huge_pages_node[nid], 4493 huge_page_size(h) / SZ_1K); 4494 } 4495 4496 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm) 4497 { 4498 seq_printf(m, "HugetlbPages:\t%8lu kB\n", 4499 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10)); 4500 } 4501 4502 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 4503 unsigned long hugetlb_total_pages(void) 4504 { 4505 struct hstate *h; 4506 unsigned long nr_total_pages = 0; 4507 4508 for_each_hstate(h) 4509 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); 4510 return nr_total_pages; 4511 } 4512 4513 static int hugetlb_acct_memory(struct hstate *h, long delta) 4514 { 4515 int ret = -ENOMEM; 4516 4517 if (!delta) 4518 return 0; 4519 4520 spin_lock_irq(&hugetlb_lock); 4521 /* 4522 * When cpuset is configured, it breaks the strict hugetlb page 4523 * reservation as the accounting is done on a global variable. Such 4524 * reservation is completely rubbish in the presence of cpuset because 4525 * the reservation is not checked against page availability for the 4526 * current cpuset. Application can still potentially OOM'ed by kernel 4527 * with lack of free htlb page in cpuset that the task is in. 4528 * Attempt to enforce strict accounting with cpuset is almost 4529 * impossible (or too ugly) because cpuset is too fluid that 4530 * task or memory node can be dynamically moved between cpusets. 4531 * 4532 * The change of semantics for shared hugetlb mapping with cpuset is 4533 * undesirable. However, in order to preserve some of the semantics, 4534 * we fall back to check against current free page availability as 4535 * a best attempt and hopefully to minimize the impact of changing 4536 * semantics that cpuset has. 4537 * 4538 * Apart from cpuset, we also have memory policy mechanism that 4539 * also determines from which node the kernel will allocate memory 4540 * in a NUMA system. So similar to cpuset, we also should consider 4541 * the memory policy of the current task. Similar to the description 4542 * above. 4543 */ 4544 if (delta > 0) { 4545 if (gather_surplus_pages(h, delta) < 0) 4546 goto out; 4547 4548 if (delta > allowed_mems_nr(h)) { 4549 return_unused_surplus_pages(h, delta); 4550 goto out; 4551 } 4552 } 4553 4554 ret = 0; 4555 if (delta < 0) 4556 return_unused_surplus_pages(h, (unsigned long) -delta); 4557 4558 out: 4559 spin_unlock_irq(&hugetlb_lock); 4560 return ret; 4561 } 4562 4563 static void hugetlb_vm_op_open(struct vm_area_struct *vma) 4564 { 4565 struct resv_map *resv = vma_resv_map(vma); 4566 4567 /* 4568 * This new VMA should share its siblings reservation map if present. 4569 * The VMA will only ever have a valid reservation map pointer where 4570 * it is being copied for another still existing VMA. As that VMA 4571 * has a reference to the reservation map it cannot disappear until 4572 * after this open call completes. It is therefore safe to take a 4573 * new reference here without additional locking. 4574 */ 4575 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { 4576 resv_map_dup_hugetlb_cgroup_uncharge_info(resv); 4577 kref_get(&resv->refs); 4578 } 4579 } 4580 4581 static void hugetlb_vm_op_close(struct vm_area_struct *vma) 4582 { 4583 struct hstate *h = hstate_vma(vma); 4584 struct resv_map *resv = vma_resv_map(vma); 4585 struct hugepage_subpool *spool = subpool_vma(vma); 4586 unsigned long reserve, start, end; 4587 long gbl_reserve; 4588 4589 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 4590 return; 4591 4592 start = vma_hugecache_offset(h, vma, vma->vm_start); 4593 end = vma_hugecache_offset(h, vma, vma->vm_end); 4594 4595 reserve = (end - start) - region_count(resv, start, end); 4596 hugetlb_cgroup_uncharge_counter(resv, start, end); 4597 if (reserve) { 4598 /* 4599 * Decrement reserve counts. The global reserve count may be 4600 * adjusted if the subpool has a minimum size. 4601 */ 4602 gbl_reserve = hugepage_subpool_put_pages(spool, reserve); 4603 hugetlb_acct_memory(h, -gbl_reserve); 4604 } 4605 4606 kref_put(&resv->refs, resv_map_release); 4607 } 4608 4609 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) 4610 { 4611 if (addr & ~(huge_page_mask(hstate_vma(vma)))) 4612 return -EINVAL; 4613 return 0; 4614 } 4615 4616 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) 4617 { 4618 return huge_page_size(hstate_vma(vma)); 4619 } 4620 4621 /* 4622 * We cannot handle pagefaults against hugetlb pages at all. They cause 4623 * handle_mm_fault() to try to instantiate regular-sized pages in the 4624 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get 4625 * this far. 4626 */ 4627 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf) 4628 { 4629 BUG(); 4630 return 0; 4631 } 4632 4633 /* 4634 * When a new function is introduced to vm_operations_struct and added 4635 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops. 4636 * This is because under System V memory model, mappings created via 4637 * shmget/shmat with "huge page" specified are backed by hugetlbfs files, 4638 * their original vm_ops are overwritten with shm_vm_ops. 4639 */ 4640 const struct vm_operations_struct hugetlb_vm_ops = { 4641 .fault = hugetlb_vm_op_fault, 4642 .open = hugetlb_vm_op_open, 4643 .close = hugetlb_vm_op_close, 4644 .may_split = hugetlb_vm_op_split, 4645 .pagesize = hugetlb_vm_op_pagesize, 4646 }; 4647 4648 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 4649 int writable) 4650 { 4651 pte_t entry; 4652 unsigned int shift = huge_page_shift(hstate_vma(vma)); 4653 4654 if (writable) { 4655 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, 4656 vma->vm_page_prot))); 4657 } else { 4658 entry = huge_pte_wrprotect(mk_huge_pte(page, 4659 vma->vm_page_prot)); 4660 } 4661 entry = pte_mkyoung(entry); 4662 entry = arch_make_huge_pte(entry, shift, vma->vm_flags); 4663 4664 return entry; 4665 } 4666 4667 static void set_huge_ptep_writable(struct vm_area_struct *vma, 4668 unsigned long address, pte_t *ptep) 4669 { 4670 pte_t entry; 4671 4672 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep))); 4673 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) 4674 update_mmu_cache(vma, address, ptep); 4675 } 4676 4677 bool is_hugetlb_entry_migration(pte_t pte) 4678 { 4679 swp_entry_t swp; 4680 4681 if (huge_pte_none(pte) || pte_present(pte)) 4682 return false; 4683 swp = pte_to_swp_entry(pte); 4684 if (is_migration_entry(swp)) 4685 return true; 4686 else 4687 return false; 4688 } 4689 4690 static bool is_hugetlb_entry_hwpoisoned(pte_t pte) 4691 { 4692 swp_entry_t swp; 4693 4694 if (huge_pte_none(pte) || pte_present(pte)) 4695 return false; 4696 swp = pte_to_swp_entry(pte); 4697 if (is_hwpoison_entry(swp)) 4698 return true; 4699 else 4700 return false; 4701 } 4702 4703 static void 4704 hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4705 struct page *new_page) 4706 { 4707 __SetPageUptodate(new_page); 4708 hugepage_add_new_anon_rmap(new_page, vma, addr); 4709 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1)); 4710 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4711 ClearHPageRestoreReserve(new_page); 4712 SetHPageMigratable(new_page); 4713 } 4714 4715 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 4716 struct vm_area_struct *dst_vma, 4717 struct vm_area_struct *src_vma) 4718 { 4719 pte_t *src_pte, *dst_pte, entry, dst_entry; 4720 struct page *ptepage; 4721 unsigned long addr; 4722 bool cow = is_cow_mapping(src_vma->vm_flags); 4723 struct hstate *h = hstate_vma(src_vma); 4724 unsigned long sz = huge_page_size(h); 4725 unsigned long npages = pages_per_huge_page(h); 4726 struct address_space *mapping = src_vma->vm_file->f_mapping; 4727 struct mmu_notifier_range range; 4728 unsigned long last_addr_mask; 4729 int ret = 0; 4730 4731 if (cow) { 4732 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src, 4733 src_vma->vm_start, 4734 src_vma->vm_end); 4735 mmu_notifier_invalidate_range_start(&range); 4736 mmap_assert_write_locked(src); 4737 raw_write_seqcount_begin(&src->write_protect_seq); 4738 } else { 4739 /* 4740 * For shared mappings i_mmap_rwsem must be held to call 4741 * huge_pte_alloc, otherwise the returned ptep could go 4742 * away if part of a shared pmd and another thread calls 4743 * huge_pmd_unshare. 4744 */ 4745 i_mmap_lock_read(mapping); 4746 } 4747 4748 last_addr_mask = hugetlb_mask_last_page(h); 4749 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) { 4750 spinlock_t *src_ptl, *dst_ptl; 4751 src_pte = huge_pte_offset(src, addr, sz); 4752 if (!src_pte) { 4753 addr |= last_addr_mask; 4754 continue; 4755 } 4756 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz); 4757 if (!dst_pte) { 4758 ret = -ENOMEM; 4759 break; 4760 } 4761 4762 /* 4763 * If the pagetables are shared don't copy or take references. 4764 * dst_pte == src_pte is the common case of src/dest sharing. 4765 * 4766 * However, src could have 'unshared' and dst shares with 4767 * another vma. If dst_pte !none, this implies sharing. 4768 * Check here before taking page table lock, and once again 4769 * after taking the lock below. 4770 */ 4771 dst_entry = huge_ptep_get(dst_pte); 4772 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry)) { 4773 addr |= last_addr_mask; 4774 continue; 4775 } 4776 4777 dst_ptl = huge_pte_lock(h, dst, dst_pte); 4778 src_ptl = huge_pte_lockptr(h, src, src_pte); 4779 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4780 entry = huge_ptep_get(src_pte); 4781 dst_entry = huge_ptep_get(dst_pte); 4782 again: 4783 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) { 4784 /* 4785 * Skip if src entry none. Also, skip in the 4786 * unlikely case dst entry !none as this implies 4787 * sharing with another vma. 4788 */ 4789 ; 4790 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 4791 bool uffd_wp = huge_pte_uffd_wp(entry); 4792 4793 if (!userfaultfd_wp(dst_vma) && uffd_wp) 4794 entry = huge_pte_clear_uffd_wp(entry); 4795 set_huge_pte_at(dst, addr, dst_pte, entry); 4796 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 4797 swp_entry_t swp_entry = pte_to_swp_entry(entry); 4798 bool uffd_wp = huge_pte_uffd_wp(entry); 4799 4800 if (!is_readable_migration_entry(swp_entry) && cow) { 4801 /* 4802 * COW mappings require pages in both 4803 * parent and child to be set to read. 4804 */ 4805 swp_entry = make_readable_migration_entry( 4806 swp_offset(swp_entry)); 4807 entry = swp_entry_to_pte(swp_entry); 4808 if (userfaultfd_wp(src_vma) && uffd_wp) 4809 entry = huge_pte_mkuffd_wp(entry); 4810 set_huge_pte_at(src, addr, src_pte, entry); 4811 } 4812 if (!userfaultfd_wp(dst_vma) && uffd_wp) 4813 entry = huge_pte_clear_uffd_wp(entry); 4814 set_huge_pte_at(dst, addr, dst_pte, entry); 4815 } else if (unlikely(is_pte_marker(entry))) { 4816 /* 4817 * We copy the pte marker only if the dst vma has 4818 * uffd-wp enabled. 4819 */ 4820 if (userfaultfd_wp(dst_vma)) 4821 set_huge_pte_at(dst, addr, dst_pte, entry); 4822 } else { 4823 entry = huge_ptep_get(src_pte); 4824 ptepage = pte_page(entry); 4825 get_page(ptepage); 4826 4827 /* 4828 * Failing to duplicate the anon rmap is a rare case 4829 * where we see pinned hugetlb pages while they're 4830 * prone to COW. We need to do the COW earlier during 4831 * fork. 4832 * 4833 * When pre-allocating the page or copying data, we 4834 * need to be without the pgtable locks since we could 4835 * sleep during the process. 4836 */ 4837 if (!PageAnon(ptepage)) { 4838 page_dup_file_rmap(ptepage, true); 4839 } else if (page_try_dup_anon_rmap(ptepage, true, 4840 src_vma)) { 4841 pte_t src_pte_old = entry; 4842 struct page *new; 4843 4844 spin_unlock(src_ptl); 4845 spin_unlock(dst_ptl); 4846 /* Do not use reserve as it's private owned */ 4847 new = alloc_huge_page(dst_vma, addr, 1); 4848 if (IS_ERR(new)) { 4849 put_page(ptepage); 4850 ret = PTR_ERR(new); 4851 break; 4852 } 4853 copy_user_huge_page(new, ptepage, addr, dst_vma, 4854 npages); 4855 put_page(ptepage); 4856 4857 /* Install the new huge page if src pte stable */ 4858 dst_ptl = huge_pte_lock(h, dst, dst_pte); 4859 src_ptl = huge_pte_lockptr(h, src, src_pte); 4860 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4861 entry = huge_ptep_get(src_pte); 4862 if (!pte_same(src_pte_old, entry)) { 4863 restore_reserve_on_error(h, dst_vma, addr, 4864 new); 4865 put_page(new); 4866 /* dst_entry won't change as in child */ 4867 goto again; 4868 } 4869 hugetlb_install_page(dst_vma, dst_pte, addr, new); 4870 spin_unlock(src_ptl); 4871 spin_unlock(dst_ptl); 4872 continue; 4873 } 4874 4875 if (cow) { 4876 /* 4877 * No need to notify as we are downgrading page 4878 * table protection not changing it to point 4879 * to a new page. 4880 * 4881 * See Documentation/mm/mmu_notifier.rst 4882 */ 4883 huge_ptep_set_wrprotect(src, addr, src_pte); 4884 entry = huge_pte_wrprotect(entry); 4885 } 4886 4887 set_huge_pte_at(dst, addr, dst_pte, entry); 4888 hugetlb_count_add(npages, dst); 4889 } 4890 spin_unlock(src_ptl); 4891 spin_unlock(dst_ptl); 4892 } 4893 4894 if (cow) { 4895 raw_write_seqcount_end(&src->write_protect_seq); 4896 mmu_notifier_invalidate_range_end(&range); 4897 } else { 4898 i_mmap_unlock_read(mapping); 4899 } 4900 4901 return ret; 4902 } 4903 4904 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 4905 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 4906 { 4907 struct hstate *h = hstate_vma(vma); 4908 struct mm_struct *mm = vma->vm_mm; 4909 spinlock_t *src_ptl, *dst_ptl; 4910 pte_t pte; 4911 4912 dst_ptl = huge_pte_lock(h, mm, dst_pte); 4913 src_ptl = huge_pte_lockptr(h, mm, src_pte); 4914 4915 /* 4916 * We don't have to worry about the ordering of src and dst ptlocks 4917 * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock. 4918 */ 4919 if (src_ptl != dst_ptl) 4920 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 4921 4922 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 4923 set_huge_pte_at(mm, new_addr, dst_pte, pte); 4924 4925 if (src_ptl != dst_ptl) 4926 spin_unlock(src_ptl); 4927 spin_unlock(dst_ptl); 4928 } 4929 4930 int move_hugetlb_page_tables(struct vm_area_struct *vma, 4931 struct vm_area_struct *new_vma, 4932 unsigned long old_addr, unsigned long new_addr, 4933 unsigned long len) 4934 { 4935 struct hstate *h = hstate_vma(vma); 4936 struct address_space *mapping = vma->vm_file->f_mapping; 4937 unsigned long sz = huge_page_size(h); 4938 struct mm_struct *mm = vma->vm_mm; 4939 unsigned long old_end = old_addr + len; 4940 unsigned long last_addr_mask; 4941 pte_t *src_pte, *dst_pte; 4942 struct mmu_notifier_range range; 4943 bool shared_pmd = false; 4944 4945 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr, 4946 old_end); 4947 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 4948 /* 4949 * In case of shared PMDs, we should cover the maximum possible 4950 * range. 4951 */ 4952 flush_cache_range(vma, range.start, range.end); 4953 4954 mmu_notifier_invalidate_range_start(&range); 4955 last_addr_mask = hugetlb_mask_last_page(h); 4956 /* Prevent race with file truncation */ 4957 i_mmap_lock_write(mapping); 4958 for (; old_addr < old_end; old_addr += sz, new_addr += sz) { 4959 src_pte = huge_pte_offset(mm, old_addr, sz); 4960 if (!src_pte) { 4961 old_addr |= last_addr_mask; 4962 new_addr |= last_addr_mask; 4963 continue; 4964 } 4965 if (huge_pte_none(huge_ptep_get(src_pte))) 4966 continue; 4967 4968 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) { 4969 shared_pmd = true; 4970 old_addr |= last_addr_mask; 4971 new_addr |= last_addr_mask; 4972 continue; 4973 } 4974 4975 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz); 4976 if (!dst_pte) 4977 break; 4978 4979 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 4980 } 4981 4982 if (shared_pmd) 4983 flush_tlb_range(vma, range.start, range.end); 4984 else 4985 flush_tlb_range(vma, old_end - len, old_end); 4986 mmu_notifier_invalidate_range_end(&range); 4987 i_mmap_unlock_write(mapping); 4988 4989 return len + old_addr - old_end; 4990 } 4991 4992 static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 4993 unsigned long start, unsigned long end, 4994 struct page *ref_page, zap_flags_t zap_flags) 4995 { 4996 struct mm_struct *mm = vma->vm_mm; 4997 unsigned long address; 4998 pte_t *ptep; 4999 pte_t pte; 5000 spinlock_t *ptl; 5001 struct page *page; 5002 struct hstate *h = hstate_vma(vma); 5003 unsigned long sz = huge_page_size(h); 5004 struct mmu_notifier_range range; 5005 unsigned long last_addr_mask; 5006 bool force_flush = false; 5007 5008 WARN_ON(!is_vm_hugetlb_page(vma)); 5009 BUG_ON(start & ~huge_page_mask(h)); 5010 BUG_ON(end & ~huge_page_mask(h)); 5011 5012 /* 5013 * This is a hugetlb vma, all the pte entries should point 5014 * to huge page. 5015 */ 5016 tlb_change_page_size(tlb, sz); 5017 tlb_start_vma(tlb, vma); 5018 5019 /* 5020 * If sharing possible, alert mmu notifiers of worst case. 5021 */ 5022 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, 5023 end); 5024 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 5025 mmu_notifier_invalidate_range_start(&range); 5026 last_addr_mask = hugetlb_mask_last_page(h); 5027 address = start; 5028 for (; address < end; address += sz) { 5029 ptep = huge_pte_offset(mm, address, sz); 5030 if (!ptep) { 5031 address |= last_addr_mask; 5032 continue; 5033 } 5034 5035 ptl = huge_pte_lock(h, mm, ptep); 5036 if (huge_pmd_unshare(mm, vma, address, ptep)) { 5037 spin_unlock(ptl); 5038 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); 5039 force_flush = true; 5040 address |= last_addr_mask; 5041 continue; 5042 } 5043 5044 pte = huge_ptep_get(ptep); 5045 if (huge_pte_none(pte)) { 5046 spin_unlock(ptl); 5047 continue; 5048 } 5049 5050 /* 5051 * Migrating hugepage or HWPoisoned hugepage is already 5052 * unmapped and its refcount is dropped, so just clear pte here. 5053 */ 5054 if (unlikely(!pte_present(pte))) { 5055 /* 5056 * If the pte was wr-protected by uffd-wp in any of the 5057 * swap forms, meanwhile the caller does not want to 5058 * drop the uffd-wp bit in this zap, then replace the 5059 * pte with a marker. 5060 */ 5061 if (pte_swp_uffd_wp_any(pte) && 5062 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5063 set_huge_pte_at(mm, address, ptep, 5064 make_pte_marker(PTE_MARKER_UFFD_WP)); 5065 else 5066 huge_pte_clear(mm, address, ptep, sz); 5067 spin_unlock(ptl); 5068 continue; 5069 } 5070 5071 page = pte_page(pte); 5072 /* 5073 * If a reference page is supplied, it is because a specific 5074 * page is being unmapped, not a range. Ensure the page we 5075 * are about to unmap is the actual page of interest. 5076 */ 5077 if (ref_page) { 5078 if (page != ref_page) { 5079 spin_unlock(ptl); 5080 continue; 5081 } 5082 /* 5083 * Mark the VMA as having unmapped its page so that 5084 * future faults in this VMA will fail rather than 5085 * looking like data was lost 5086 */ 5087 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); 5088 } 5089 5090 pte = huge_ptep_get_and_clear(mm, address, ptep); 5091 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5092 if (huge_pte_dirty(pte)) 5093 set_page_dirty(page); 5094 /* Leave a uffd-wp pte marker if needed */ 5095 if (huge_pte_uffd_wp(pte) && 5096 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5097 set_huge_pte_at(mm, address, ptep, 5098 make_pte_marker(PTE_MARKER_UFFD_WP)); 5099 hugetlb_count_sub(pages_per_huge_page(h), mm); 5100 page_remove_rmap(page, vma, true); 5101 5102 spin_unlock(ptl); 5103 tlb_remove_page_size(tlb, page, huge_page_size(h)); 5104 /* 5105 * Bail out after unmapping reference page if supplied 5106 */ 5107 if (ref_page) 5108 break; 5109 } 5110 mmu_notifier_invalidate_range_end(&range); 5111 tlb_end_vma(tlb, vma); 5112 5113 /* 5114 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We 5115 * could defer the flush until now, since by holding i_mmap_rwsem we 5116 * guaranteed that the last refernece would not be dropped. But we must 5117 * do the flushing before we return, as otherwise i_mmap_rwsem will be 5118 * dropped and the last reference to the shared PMDs page might be 5119 * dropped as well. 5120 * 5121 * In theory we could defer the freeing of the PMD pages as well, but 5122 * huge_pmd_unshare() relies on the exact page_count for the PMD page to 5123 * detect sharing, so we cannot defer the release of the page either. 5124 * Instead, do flush now. 5125 */ 5126 if (force_flush) 5127 tlb_flush_mmu_tlbonly(tlb); 5128 } 5129 5130 void __unmap_hugepage_range_final(struct mmu_gather *tlb, 5131 struct vm_area_struct *vma, unsigned long start, 5132 unsigned long end, struct page *ref_page, 5133 zap_flags_t zap_flags) 5134 { 5135 __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags); 5136 5137 /* 5138 * Clear this flag so that x86's huge_pmd_share page_table_shareable 5139 * test will fail on a vma being torn down, and not grab a page table 5140 * on its way out. We're lucky that the flag has such an appropriate 5141 * name, and can in fact be safely cleared here. We could clear it 5142 * before the __unmap_hugepage_range above, but all that's necessary 5143 * is to clear it before releasing the i_mmap_rwsem. This works 5144 * because in the context this is called, the VMA is about to be 5145 * destroyed and the i_mmap_rwsem is held. 5146 */ 5147 vma->vm_flags &= ~VM_MAYSHARE; 5148 } 5149 5150 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 5151 unsigned long end, struct page *ref_page, 5152 zap_flags_t zap_flags) 5153 { 5154 struct mmu_gather tlb; 5155 5156 tlb_gather_mmu(&tlb, vma->vm_mm); 5157 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags); 5158 tlb_finish_mmu(&tlb); 5159 } 5160 5161 /* 5162 * This is called when the original mapper is failing to COW a MAP_PRIVATE 5163 * mapping it owns the reserve page for. The intention is to unmap the page 5164 * from other VMAs and let the children be SIGKILLed if they are faulting the 5165 * same region. 5166 */ 5167 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, 5168 struct page *page, unsigned long address) 5169 { 5170 struct hstate *h = hstate_vma(vma); 5171 struct vm_area_struct *iter_vma; 5172 struct address_space *mapping; 5173 pgoff_t pgoff; 5174 5175 /* 5176 * vm_pgoff is in PAGE_SIZE units, hence the different calculation 5177 * from page cache lookup which is in HPAGE_SIZE units. 5178 */ 5179 address = address & huge_page_mask(h); 5180 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + 5181 vma->vm_pgoff; 5182 mapping = vma->vm_file->f_mapping; 5183 5184 /* 5185 * Take the mapping lock for the duration of the table walk. As 5186 * this mapping should be shared between all the VMAs, 5187 * __unmap_hugepage_range() is called as the lock is already held 5188 */ 5189 i_mmap_lock_write(mapping); 5190 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 5191 /* Do not unmap the current VMA */ 5192 if (iter_vma == vma) 5193 continue; 5194 5195 /* 5196 * Shared VMAs have their own reserves and do not affect 5197 * MAP_PRIVATE accounting but it is possible that a shared 5198 * VMA is using the same page so check and skip such VMAs. 5199 */ 5200 if (iter_vma->vm_flags & VM_MAYSHARE) 5201 continue; 5202 5203 /* 5204 * Unmap the page from other VMAs without their own reserves. 5205 * They get marked to be SIGKILLed if they fault in these 5206 * areas. This is because a future no-page fault on this VMA 5207 * could insert a zeroed page instead of the data existing 5208 * from the time of fork. This would look like data corruption 5209 */ 5210 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER)) 5211 unmap_hugepage_range(iter_vma, address, 5212 address + huge_page_size(h), page, 0); 5213 } 5214 i_mmap_unlock_write(mapping); 5215 } 5216 5217 /* 5218 * hugetlb_wp() should be called with page lock of the original hugepage held. 5219 * Called with hugetlb_fault_mutex_table held and pte_page locked so we 5220 * cannot race with other handlers or page migration. 5221 * Keep the pte_same checks anyway to make transition from the mutex easier. 5222 */ 5223 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5224 unsigned long address, pte_t *ptep, unsigned int flags, 5225 struct page *pagecache_page, spinlock_t *ptl) 5226 { 5227 const bool unshare = flags & FAULT_FLAG_UNSHARE; 5228 pte_t pte; 5229 struct hstate *h = hstate_vma(vma); 5230 struct page *old_page, *new_page; 5231 int outside_reserve = 0; 5232 vm_fault_t ret = 0; 5233 unsigned long haddr = address & huge_page_mask(h); 5234 struct mmu_notifier_range range; 5235 5236 VM_BUG_ON(unshare && (flags & FOLL_WRITE)); 5237 VM_BUG_ON(!unshare && !(flags & FOLL_WRITE)); 5238 5239 pte = huge_ptep_get(ptep); 5240 old_page = pte_page(pte); 5241 5242 delayacct_wpcopy_start(); 5243 5244 retry_avoidcopy: 5245 /* 5246 * If no-one else is actually using this page, we're the exclusive 5247 * owner and can reuse this page. 5248 */ 5249 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { 5250 if (!PageAnonExclusive(old_page)) 5251 page_move_anon_rmap(old_page, vma); 5252 if (likely(!unshare)) 5253 set_huge_ptep_writable(vma, haddr, ptep); 5254 5255 delayacct_wpcopy_end(); 5256 return 0; 5257 } 5258 VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), 5259 old_page); 5260 5261 /* 5262 * If the process that created a MAP_PRIVATE mapping is about to 5263 * perform a COW due to a shared page count, attempt to satisfy 5264 * the allocation without using the existing reserves. The pagecache 5265 * page is used to determine if the reserve at this address was 5266 * consumed or not. If reserves were used, a partial faulted mapping 5267 * at the time of fork() could consume its reserves on COW instead 5268 * of the full address range. 5269 */ 5270 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && 5271 old_page != pagecache_page) 5272 outside_reserve = 1; 5273 5274 get_page(old_page); 5275 5276 /* 5277 * Drop page table lock as buddy allocator may be called. It will 5278 * be acquired again before returning to the caller, as expected. 5279 */ 5280 spin_unlock(ptl); 5281 new_page = alloc_huge_page(vma, haddr, outside_reserve); 5282 5283 if (IS_ERR(new_page)) { 5284 /* 5285 * If a process owning a MAP_PRIVATE mapping fails to COW, 5286 * it is due to references held by a child and an insufficient 5287 * huge page pool. To guarantee the original mappers 5288 * reliability, unmap the page from child processes. The child 5289 * may get SIGKILLed if it later faults. 5290 */ 5291 if (outside_reserve) { 5292 struct address_space *mapping = vma->vm_file->f_mapping; 5293 pgoff_t idx; 5294 u32 hash; 5295 5296 put_page(old_page); 5297 BUG_ON(huge_pte_none(pte)); 5298 /* 5299 * Drop hugetlb_fault_mutex and i_mmap_rwsem before 5300 * unmapping. unmapping needs to hold i_mmap_rwsem 5301 * in write mode. Dropping i_mmap_rwsem in read mode 5302 * here is OK as COW mappings do not interact with 5303 * PMD sharing. 5304 * 5305 * Reacquire both after unmap operation. 5306 */ 5307 idx = vma_hugecache_offset(h, vma, haddr); 5308 hash = hugetlb_fault_mutex_hash(mapping, idx); 5309 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5310 i_mmap_unlock_read(mapping); 5311 5312 unmap_ref_private(mm, vma, old_page, haddr); 5313 5314 i_mmap_lock_read(mapping); 5315 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5316 spin_lock(ptl); 5317 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5318 if (likely(ptep && 5319 pte_same(huge_ptep_get(ptep), pte))) 5320 goto retry_avoidcopy; 5321 /* 5322 * race occurs while re-acquiring page table 5323 * lock, and our job is done. 5324 */ 5325 delayacct_wpcopy_end(); 5326 return 0; 5327 } 5328 5329 ret = vmf_error(PTR_ERR(new_page)); 5330 goto out_release_old; 5331 } 5332 5333 /* 5334 * When the original hugepage is shared one, it does not have 5335 * anon_vma prepared. 5336 */ 5337 if (unlikely(anon_vma_prepare(vma))) { 5338 ret = VM_FAULT_OOM; 5339 goto out_release_all; 5340 } 5341 5342 copy_user_huge_page(new_page, old_page, address, vma, 5343 pages_per_huge_page(h)); 5344 __SetPageUptodate(new_page); 5345 5346 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, 5347 haddr + huge_page_size(h)); 5348 mmu_notifier_invalidate_range_start(&range); 5349 5350 /* 5351 * Retake the page table lock to check for racing updates 5352 * before the page tables are altered 5353 */ 5354 spin_lock(ptl); 5355 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5356 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 5357 ClearHPageRestoreReserve(new_page); 5358 5359 /* Break COW or unshare */ 5360 huge_ptep_clear_flush(vma, haddr, ptep); 5361 mmu_notifier_invalidate_range(mm, range.start, range.end); 5362 page_remove_rmap(old_page, vma, true); 5363 hugepage_add_new_anon_rmap(new_page, vma, haddr); 5364 set_huge_pte_at(mm, haddr, ptep, 5365 make_huge_pte(vma, new_page, !unshare)); 5366 SetHPageMigratable(new_page); 5367 /* Make the old page be freed below */ 5368 new_page = old_page; 5369 } 5370 spin_unlock(ptl); 5371 mmu_notifier_invalidate_range_end(&range); 5372 out_release_all: 5373 /* 5374 * No restore in case of successful pagetable update (Break COW or 5375 * unshare) 5376 */ 5377 if (new_page != old_page) 5378 restore_reserve_on_error(h, vma, haddr, new_page); 5379 put_page(new_page); 5380 out_release_old: 5381 put_page(old_page); 5382 5383 spin_lock(ptl); /* Caller expects lock to be held */ 5384 5385 delayacct_wpcopy_end(); 5386 return ret; 5387 } 5388 5389 /* Return the pagecache page at a given address within a VMA */ 5390 static struct page *hugetlbfs_pagecache_page(struct hstate *h, 5391 struct vm_area_struct *vma, unsigned long address) 5392 { 5393 struct address_space *mapping; 5394 pgoff_t idx; 5395 5396 mapping = vma->vm_file->f_mapping; 5397 idx = vma_hugecache_offset(h, vma, address); 5398 5399 return find_lock_page(mapping, idx); 5400 } 5401 5402 /* 5403 * Return whether there is a pagecache page to back given address within VMA. 5404 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page. 5405 */ 5406 static bool hugetlbfs_pagecache_present(struct hstate *h, 5407 struct vm_area_struct *vma, unsigned long address) 5408 { 5409 struct address_space *mapping; 5410 pgoff_t idx; 5411 struct page *page; 5412 5413 mapping = vma->vm_file->f_mapping; 5414 idx = vma_hugecache_offset(h, vma, address); 5415 5416 page = find_get_page(mapping, idx); 5417 if (page) 5418 put_page(page); 5419 return page != NULL; 5420 } 5421 5422 int huge_add_to_page_cache(struct page *page, struct address_space *mapping, 5423 pgoff_t idx) 5424 { 5425 struct folio *folio = page_folio(page); 5426 struct inode *inode = mapping->host; 5427 struct hstate *h = hstate_inode(inode); 5428 int err; 5429 5430 __folio_set_locked(folio); 5431 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL); 5432 5433 if (unlikely(err)) { 5434 __folio_clear_locked(folio); 5435 return err; 5436 } 5437 ClearHPageRestoreReserve(page); 5438 5439 /* 5440 * mark folio dirty so that it will not be removed from cache/file 5441 * by non-hugetlbfs specific code paths. 5442 */ 5443 folio_mark_dirty(folio); 5444 5445 spin_lock(&inode->i_lock); 5446 inode->i_blocks += blocks_per_huge_page(h); 5447 spin_unlock(&inode->i_lock); 5448 return 0; 5449 } 5450 5451 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma, 5452 struct address_space *mapping, 5453 pgoff_t idx, 5454 unsigned int flags, 5455 unsigned long haddr, 5456 unsigned long addr, 5457 unsigned long reason) 5458 { 5459 vm_fault_t ret; 5460 u32 hash; 5461 struct vm_fault vmf = { 5462 .vma = vma, 5463 .address = haddr, 5464 .real_address = addr, 5465 .flags = flags, 5466 5467 /* 5468 * Hard to debug if it ends up being 5469 * used by a callee that assumes 5470 * something about the other 5471 * uninitialized fields... same as in 5472 * memory.c 5473 */ 5474 }; 5475 5476 /* 5477 * hugetlb_fault_mutex and i_mmap_rwsem must be 5478 * dropped before handling userfault. Reacquire 5479 * after handling fault to make calling code simpler. 5480 */ 5481 hash = hugetlb_fault_mutex_hash(mapping, idx); 5482 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5483 i_mmap_unlock_read(mapping); 5484 ret = handle_userfault(&vmf, reason); 5485 i_mmap_lock_read(mapping); 5486 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5487 5488 return ret; 5489 } 5490 5491 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5492 struct vm_area_struct *vma, 5493 struct address_space *mapping, pgoff_t idx, 5494 unsigned long address, pte_t *ptep, 5495 pte_t old_pte, unsigned int flags) 5496 { 5497 struct hstate *h = hstate_vma(vma); 5498 vm_fault_t ret = VM_FAULT_SIGBUS; 5499 int anon_rmap = 0; 5500 unsigned long size; 5501 struct page *page; 5502 pte_t new_pte; 5503 spinlock_t *ptl; 5504 unsigned long haddr = address & huge_page_mask(h); 5505 bool new_page, new_pagecache_page = false; 5506 5507 /* 5508 * Currently, we are forced to kill the process in the event the 5509 * original mapper has unmapped pages from the child due to a failed 5510 * COW/unsharing. Warn that such a situation has occurred as it may not 5511 * be obvious. 5512 */ 5513 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { 5514 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n", 5515 current->pid); 5516 return ret; 5517 } 5518 5519 /* 5520 * We can not race with truncation due to holding i_mmap_rwsem. 5521 * i_size is modified when holding i_mmap_rwsem, so check here 5522 * once for faults beyond end of file. 5523 */ 5524 size = i_size_read(mapping->host) >> huge_page_shift(h); 5525 if (idx >= size) 5526 goto out; 5527 5528 retry: 5529 new_page = false; 5530 page = find_lock_page(mapping, idx); 5531 if (!page) { 5532 /* Check for page in userfault range */ 5533 if (userfaultfd_missing(vma)) { 5534 ret = hugetlb_handle_userfault(vma, mapping, idx, 5535 flags, haddr, address, 5536 VM_UFFD_MISSING); 5537 goto out; 5538 } 5539 5540 page = alloc_huge_page(vma, haddr, 0); 5541 if (IS_ERR(page)) { 5542 /* 5543 * Returning error will result in faulting task being 5544 * sent SIGBUS. The hugetlb fault mutex prevents two 5545 * tasks from racing to fault in the same page which 5546 * could result in false unable to allocate errors. 5547 * Page migration does not take the fault mutex, but 5548 * does a clear then write of pte's under page table 5549 * lock. Page fault code could race with migration, 5550 * notice the clear pte and try to allocate a page 5551 * here. Before returning error, get ptl and make 5552 * sure there really is no pte entry. 5553 */ 5554 ptl = huge_pte_lock(h, mm, ptep); 5555 ret = 0; 5556 if (huge_pte_none(huge_ptep_get(ptep))) 5557 ret = vmf_error(PTR_ERR(page)); 5558 spin_unlock(ptl); 5559 goto out; 5560 } 5561 clear_huge_page(page, address, pages_per_huge_page(h)); 5562 __SetPageUptodate(page); 5563 new_page = true; 5564 5565 if (vma->vm_flags & VM_MAYSHARE) { 5566 int err = huge_add_to_page_cache(page, mapping, idx); 5567 if (err) { 5568 put_page(page); 5569 if (err == -EEXIST) 5570 goto retry; 5571 goto out; 5572 } 5573 new_pagecache_page = true; 5574 } else { 5575 lock_page(page); 5576 if (unlikely(anon_vma_prepare(vma))) { 5577 ret = VM_FAULT_OOM; 5578 goto backout_unlocked; 5579 } 5580 anon_rmap = 1; 5581 } 5582 } else { 5583 /* 5584 * If memory error occurs between mmap() and fault, some process 5585 * don't have hwpoisoned swap entry for errored virtual address. 5586 * So we need to block hugepage fault by PG_hwpoison bit check. 5587 */ 5588 if (unlikely(PageHWPoison(page))) { 5589 ret = VM_FAULT_HWPOISON_LARGE | 5590 VM_FAULT_SET_HINDEX(hstate_index(h)); 5591 goto backout_unlocked; 5592 } 5593 5594 /* Check for page in userfault range. */ 5595 if (userfaultfd_minor(vma)) { 5596 unlock_page(page); 5597 put_page(page); 5598 ret = hugetlb_handle_userfault(vma, mapping, idx, 5599 flags, haddr, address, 5600 VM_UFFD_MINOR); 5601 goto out; 5602 } 5603 } 5604 5605 /* 5606 * If we are going to COW a private mapping later, we examine the 5607 * pending reservations for this page now. This will ensure that 5608 * any allocations necessary to record that reservation occur outside 5609 * the spinlock. 5610 */ 5611 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5612 if (vma_needs_reservation(h, vma, haddr) < 0) { 5613 ret = VM_FAULT_OOM; 5614 goto backout_unlocked; 5615 } 5616 /* Just decrements count, does not deallocate */ 5617 vma_end_reservation(h, vma, haddr); 5618 } 5619 5620 ptl = huge_pte_lock(h, mm, ptep); 5621 ret = 0; 5622 /* If pte changed from under us, retry */ 5623 if (!pte_same(huge_ptep_get(ptep), old_pte)) 5624 goto backout; 5625 5626 if (anon_rmap) { 5627 ClearHPageRestoreReserve(page); 5628 hugepage_add_new_anon_rmap(page, vma, haddr); 5629 } else 5630 page_dup_file_rmap(page, true); 5631 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 5632 && (vma->vm_flags & VM_SHARED))); 5633 /* 5634 * If this pte was previously wr-protected, keep it wr-protected even 5635 * if populated. 5636 */ 5637 if (unlikely(pte_marker_uffd_wp(old_pte))) 5638 new_pte = huge_pte_wrprotect(huge_pte_mkuffd_wp(new_pte)); 5639 set_huge_pte_at(mm, haddr, ptep, new_pte); 5640 5641 hugetlb_count_add(pages_per_huge_page(h), mm); 5642 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 5643 /* Optimization, do the COW without a second fault */ 5644 ret = hugetlb_wp(mm, vma, address, ptep, flags, page, ptl); 5645 } 5646 5647 spin_unlock(ptl); 5648 5649 /* 5650 * Only set HPageMigratable in newly allocated pages. Existing pages 5651 * found in the pagecache may not have HPageMigratableset if they have 5652 * been isolated for migration. 5653 */ 5654 if (new_page) 5655 SetHPageMigratable(page); 5656 5657 unlock_page(page); 5658 out: 5659 return ret; 5660 5661 backout: 5662 spin_unlock(ptl); 5663 backout_unlocked: 5664 unlock_page(page); 5665 /* restore reserve for newly allocated pages not in page cache */ 5666 if (new_page && !new_pagecache_page) 5667 restore_reserve_on_error(h, vma, haddr, page); 5668 put_page(page); 5669 goto out; 5670 } 5671 5672 #ifdef CONFIG_SMP 5673 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5674 { 5675 unsigned long key[2]; 5676 u32 hash; 5677 5678 key[0] = (unsigned long) mapping; 5679 key[1] = idx; 5680 5681 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0); 5682 5683 return hash & (num_fault_mutexes - 1); 5684 } 5685 #else 5686 /* 5687 * For uniprocessor systems we always use a single mutex, so just 5688 * return 0 and avoid the hashing overhead. 5689 */ 5690 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx) 5691 { 5692 return 0; 5693 } 5694 #endif 5695 5696 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 5697 unsigned long address, unsigned int flags) 5698 { 5699 pte_t *ptep, entry; 5700 spinlock_t *ptl; 5701 vm_fault_t ret; 5702 u32 hash; 5703 pgoff_t idx; 5704 struct page *page = NULL; 5705 struct page *pagecache_page = NULL; 5706 struct hstate *h = hstate_vma(vma); 5707 struct address_space *mapping; 5708 int need_wait_lock = 0; 5709 unsigned long haddr = address & huge_page_mask(h); 5710 5711 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); 5712 if (ptep) { 5713 /* 5714 * Since we hold no locks, ptep could be stale. That is 5715 * OK as we are only making decisions based on content and 5716 * not actually modifying content here. 5717 */ 5718 entry = huge_ptep_get(ptep); 5719 if (unlikely(is_hugetlb_entry_migration(entry))) { 5720 migration_entry_wait_huge(vma, ptep); 5721 return 0; 5722 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) 5723 return VM_FAULT_HWPOISON_LARGE | 5724 VM_FAULT_SET_HINDEX(hstate_index(h)); 5725 } 5726 5727 /* 5728 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold 5729 * until finished with ptep. This serves two purposes: 5730 * 1) It prevents huge_pmd_unshare from being called elsewhere 5731 * and making the ptep no longer valid. 5732 * 2) It synchronizes us with i_size modifications during truncation. 5733 * 5734 * ptep could have already be assigned via huge_pte_offset. That 5735 * is OK, as huge_pte_alloc will return the same value unless 5736 * something has changed. 5737 */ 5738 mapping = vma->vm_file->f_mapping; 5739 i_mmap_lock_read(mapping); 5740 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); 5741 if (!ptep) { 5742 i_mmap_unlock_read(mapping); 5743 return VM_FAULT_OOM; 5744 } 5745 5746 /* 5747 * Serialize hugepage allocation and instantiation, so that we don't 5748 * get spurious allocation failures if two CPUs race to instantiate 5749 * the same page in the page cache. 5750 */ 5751 idx = vma_hugecache_offset(h, vma, haddr); 5752 hash = hugetlb_fault_mutex_hash(mapping, idx); 5753 mutex_lock(&hugetlb_fault_mutex_table[hash]); 5754 5755 entry = huge_ptep_get(ptep); 5756 /* PTE markers should be handled the same way as none pte */ 5757 if (huge_pte_none_mostly(entry)) { 5758 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, 5759 entry, flags); 5760 goto out_mutex; 5761 } 5762 5763 ret = 0; 5764 5765 /* 5766 * entry could be a migration/hwpoison entry at this point, so this 5767 * check prevents the kernel from going below assuming that we have 5768 * an active hugepage in pagecache. This goto expects the 2nd page 5769 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will 5770 * properly handle it. 5771 */ 5772 if (!pte_present(entry)) 5773 goto out_mutex; 5774 5775 /* 5776 * If we are going to COW/unshare the mapping later, we examine the 5777 * pending reservations for this page now. This will ensure that any 5778 * allocations necessary to record that reservation occur outside the 5779 * spinlock. For private mappings, we also lookup the pagecache 5780 * page now as it is used to determine if a reservation has been 5781 * consumed. 5782 */ 5783 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) && 5784 !huge_pte_write(entry)) { 5785 if (vma_needs_reservation(h, vma, haddr) < 0) { 5786 ret = VM_FAULT_OOM; 5787 goto out_mutex; 5788 } 5789 /* Just decrements count, does not deallocate */ 5790 vma_end_reservation(h, vma, haddr); 5791 5792 if (!(vma->vm_flags & VM_MAYSHARE)) 5793 pagecache_page = hugetlbfs_pagecache_page(h, 5794 vma, haddr); 5795 } 5796 5797 ptl = huge_pte_lock(h, mm, ptep); 5798 5799 /* Check for a racing update before calling hugetlb_wp() */ 5800 if (unlikely(!pte_same(entry, huge_ptep_get(ptep)))) 5801 goto out_ptl; 5802 5803 /* Handle userfault-wp first, before trying to lock more pages */ 5804 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) && 5805 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { 5806 struct vm_fault vmf = { 5807 .vma = vma, 5808 .address = haddr, 5809 .real_address = address, 5810 .flags = flags, 5811 }; 5812 5813 spin_unlock(ptl); 5814 if (pagecache_page) { 5815 unlock_page(pagecache_page); 5816 put_page(pagecache_page); 5817 } 5818 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5819 i_mmap_unlock_read(mapping); 5820 return handle_userfault(&vmf, VM_UFFD_WP); 5821 } 5822 5823 /* 5824 * hugetlb_wp() requires page locks of pte_page(entry) and 5825 * pagecache_page, so here we need take the former one 5826 * when page != pagecache_page or !pagecache_page. 5827 */ 5828 page = pte_page(entry); 5829 if (page != pagecache_page) 5830 if (!trylock_page(page)) { 5831 need_wait_lock = 1; 5832 goto out_ptl; 5833 } 5834 5835 get_page(page); 5836 5837 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 5838 if (!huge_pte_write(entry)) { 5839 ret = hugetlb_wp(mm, vma, address, ptep, flags, 5840 pagecache_page, ptl); 5841 goto out_put_page; 5842 } else if (likely(flags & FAULT_FLAG_WRITE)) { 5843 entry = huge_pte_mkdirty(entry); 5844 } 5845 } 5846 entry = pte_mkyoung(entry); 5847 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, 5848 flags & FAULT_FLAG_WRITE)) 5849 update_mmu_cache(vma, haddr, ptep); 5850 out_put_page: 5851 if (page != pagecache_page) 5852 unlock_page(page); 5853 put_page(page); 5854 out_ptl: 5855 spin_unlock(ptl); 5856 5857 if (pagecache_page) { 5858 unlock_page(pagecache_page); 5859 put_page(pagecache_page); 5860 } 5861 out_mutex: 5862 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 5863 i_mmap_unlock_read(mapping); 5864 /* 5865 * Generally it's safe to hold refcount during waiting page lock. But 5866 * here we just wait to defer the next page fault to avoid busy loop and 5867 * the page is not used after unlocked before returning from the current 5868 * page fault. So we are safe from accessing freed page, even if we wait 5869 * here without taking refcount. 5870 */ 5871 if (need_wait_lock) 5872 wait_on_page_locked(page); 5873 return ret; 5874 } 5875 5876 #ifdef CONFIG_USERFAULTFD 5877 /* 5878 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with 5879 * modifications for huge pages. 5880 */ 5881 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, 5882 pte_t *dst_pte, 5883 struct vm_area_struct *dst_vma, 5884 unsigned long dst_addr, 5885 unsigned long src_addr, 5886 enum mcopy_atomic_mode mode, 5887 struct page **pagep, 5888 bool wp_copy) 5889 { 5890 bool is_continue = (mode == MCOPY_ATOMIC_CONTINUE); 5891 struct hstate *h = hstate_vma(dst_vma); 5892 struct address_space *mapping = dst_vma->vm_file->f_mapping; 5893 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); 5894 unsigned long size; 5895 int vm_shared = dst_vma->vm_flags & VM_SHARED; 5896 pte_t _dst_pte; 5897 spinlock_t *ptl; 5898 int ret = -ENOMEM; 5899 struct page *page; 5900 int writable; 5901 bool page_in_pagecache = false; 5902 5903 if (is_continue) { 5904 ret = -EFAULT; 5905 page = find_lock_page(mapping, idx); 5906 if (!page) 5907 goto out; 5908 page_in_pagecache = true; 5909 } else if (!*pagep) { 5910 /* If a page already exists, then it's UFFDIO_COPY for 5911 * a non-missing case. Return -EEXIST. 5912 */ 5913 if (vm_shared && 5914 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 5915 ret = -EEXIST; 5916 goto out; 5917 } 5918 5919 page = alloc_huge_page(dst_vma, dst_addr, 0); 5920 if (IS_ERR(page)) { 5921 ret = -ENOMEM; 5922 goto out; 5923 } 5924 5925 ret = copy_huge_page_from_user(page, 5926 (const void __user *) src_addr, 5927 pages_per_huge_page(h), false); 5928 5929 /* fallback to copy_from_user outside mmap_lock */ 5930 if (unlikely(ret)) { 5931 ret = -ENOENT; 5932 /* Free the allocated page which may have 5933 * consumed a reservation. 5934 */ 5935 restore_reserve_on_error(h, dst_vma, dst_addr, page); 5936 put_page(page); 5937 5938 /* Allocate a temporary page to hold the copied 5939 * contents. 5940 */ 5941 page = alloc_huge_page_vma(h, dst_vma, dst_addr); 5942 if (!page) { 5943 ret = -ENOMEM; 5944 goto out; 5945 } 5946 *pagep = page; 5947 /* Set the outparam pagep and return to the caller to 5948 * copy the contents outside the lock. Don't free the 5949 * page. 5950 */ 5951 goto out; 5952 } 5953 } else { 5954 if (vm_shared && 5955 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { 5956 put_page(*pagep); 5957 ret = -EEXIST; 5958 *pagep = NULL; 5959 goto out; 5960 } 5961 5962 page = alloc_huge_page(dst_vma, dst_addr, 0); 5963 if (IS_ERR(page)) { 5964 put_page(*pagep); 5965 ret = -ENOMEM; 5966 *pagep = NULL; 5967 goto out; 5968 } 5969 copy_user_huge_page(page, *pagep, dst_addr, dst_vma, 5970 pages_per_huge_page(h)); 5971 put_page(*pagep); 5972 *pagep = NULL; 5973 } 5974 5975 /* 5976 * The memory barrier inside __SetPageUptodate makes sure that 5977 * preceding stores to the page contents become visible before 5978 * the set_pte_at() write. 5979 */ 5980 __SetPageUptodate(page); 5981 5982 /* Add shared, newly allocated pages to the page cache. */ 5983 if (vm_shared && !is_continue) { 5984 size = i_size_read(mapping->host) >> huge_page_shift(h); 5985 ret = -EFAULT; 5986 if (idx >= size) 5987 goto out_release_nounlock; 5988 5989 /* 5990 * Serialization between remove_inode_hugepages() and 5991 * huge_add_to_page_cache() below happens through the 5992 * hugetlb_fault_mutex_table that here must be hold by 5993 * the caller. 5994 */ 5995 ret = huge_add_to_page_cache(page, mapping, idx); 5996 if (ret) 5997 goto out_release_nounlock; 5998 page_in_pagecache = true; 5999 } 6000 6001 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); 6002 spin_lock(ptl); 6003 6004 /* 6005 * Recheck the i_size after holding PT lock to make sure not 6006 * to leave any page mapped (as page_mapped()) beyond the end 6007 * of the i_size (remove_inode_hugepages() is strict about 6008 * enforcing that). If we bail out here, we'll also leave a 6009 * page in the radix tree in the vm_shared case beyond the end 6010 * of the i_size, but remove_inode_hugepages() will take care 6011 * of it as soon as we drop the hugetlb_fault_mutex_table. 6012 */ 6013 size = i_size_read(mapping->host) >> huge_page_shift(h); 6014 ret = -EFAULT; 6015 if (idx >= size) 6016 goto out_release_unlock; 6017 6018 ret = -EEXIST; 6019 /* 6020 * We allow to overwrite a pte marker: consider when both MISSING|WP 6021 * registered, we firstly wr-protect a none pte which has no page cache 6022 * page backing it, then access the page. 6023 */ 6024 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6025 goto out_release_unlock; 6026 6027 if (vm_shared) { 6028 page_dup_file_rmap(page, true); 6029 } else { 6030 ClearHPageRestoreReserve(page); 6031 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr); 6032 } 6033 6034 /* 6035 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY 6036 * with wp flag set, don't set pte write bit. 6037 */ 6038 if (wp_copy || (is_continue && !vm_shared)) 6039 writable = 0; 6040 else 6041 writable = dst_vma->vm_flags & VM_WRITE; 6042 6043 _dst_pte = make_huge_pte(dst_vma, page, writable); 6044 /* 6045 * Always mark UFFDIO_COPY page dirty; note that this may not be 6046 * extremely important for hugetlbfs for now since swapping is not 6047 * supported, but we should still be clear in that this page cannot be 6048 * thrown away at will, even if write bit not set. 6049 */ 6050 _dst_pte = huge_pte_mkdirty(_dst_pte); 6051 _dst_pte = pte_mkyoung(_dst_pte); 6052 6053 if (wp_copy) 6054 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6055 6056 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6057 6058 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6059 6060 /* No need to invalidate - it was non-present before */ 6061 update_mmu_cache(dst_vma, dst_addr, dst_pte); 6062 6063 spin_unlock(ptl); 6064 if (!is_continue) 6065 SetHPageMigratable(page); 6066 if (vm_shared || is_continue) 6067 unlock_page(page); 6068 ret = 0; 6069 out: 6070 return ret; 6071 out_release_unlock: 6072 spin_unlock(ptl); 6073 if (vm_shared || is_continue) 6074 unlock_page(page); 6075 out_release_nounlock: 6076 if (!page_in_pagecache) 6077 restore_reserve_on_error(h, dst_vma, dst_addr, page); 6078 put_page(page); 6079 goto out; 6080 } 6081 #endif /* CONFIG_USERFAULTFD */ 6082 6083 static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, 6084 int refs, struct page **pages, 6085 struct vm_area_struct **vmas) 6086 { 6087 int nr; 6088 6089 for (nr = 0; nr < refs; nr++) { 6090 if (likely(pages)) 6091 pages[nr] = mem_map_offset(page, nr); 6092 if (vmas) 6093 vmas[nr] = vma; 6094 } 6095 } 6096 6097 static inline bool __follow_hugetlb_must_fault(unsigned int flags, pte_t *pte, 6098 bool *unshare) 6099 { 6100 pte_t pteval = huge_ptep_get(pte); 6101 6102 *unshare = false; 6103 if (is_swap_pte(pteval)) 6104 return true; 6105 if (huge_pte_write(pteval)) 6106 return false; 6107 if (flags & FOLL_WRITE) 6108 return true; 6109 if (gup_must_unshare(flags, pte_page(pteval))) { 6110 *unshare = true; 6111 return true; 6112 } 6113 return false; 6114 } 6115 6116 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 6117 struct page **pages, struct vm_area_struct **vmas, 6118 unsigned long *position, unsigned long *nr_pages, 6119 long i, unsigned int flags, int *locked) 6120 { 6121 unsigned long pfn_offset; 6122 unsigned long vaddr = *position; 6123 unsigned long remainder = *nr_pages; 6124 struct hstate *h = hstate_vma(vma); 6125 int err = -EFAULT, refs; 6126 6127 while (vaddr < vma->vm_end && remainder) { 6128 pte_t *pte; 6129 spinlock_t *ptl = NULL; 6130 bool unshare = false; 6131 int absent; 6132 struct page *page; 6133 6134 /* 6135 * If we have a pending SIGKILL, don't keep faulting pages and 6136 * potentially allocating memory. 6137 */ 6138 if (fatal_signal_pending(current)) { 6139 remainder = 0; 6140 break; 6141 } 6142 6143 /* 6144 * Some archs (sparc64, sh*) have multiple pte_ts to 6145 * each hugepage. We have to make sure we get the 6146 * first, for the page indexing below to work. 6147 * 6148 * Note that page table lock is not held when pte is null. 6149 */ 6150 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), 6151 huge_page_size(h)); 6152 if (pte) 6153 ptl = huge_pte_lock(h, mm, pte); 6154 absent = !pte || huge_pte_none(huge_ptep_get(pte)); 6155 6156 /* 6157 * When coredumping, it suits get_dump_page if we just return 6158 * an error where there's an empty slot with no huge pagecache 6159 * to back it. This way, we avoid allocating a hugepage, and 6160 * the sparse dumpfile avoids allocating disk blocks, but its 6161 * huge holes still show up with zeroes where they need to be. 6162 */ 6163 if (absent && (flags & FOLL_DUMP) && 6164 !hugetlbfs_pagecache_present(h, vma, vaddr)) { 6165 if (pte) 6166 spin_unlock(ptl); 6167 remainder = 0; 6168 break; 6169 } 6170 6171 /* 6172 * We need call hugetlb_fault for both hugepages under migration 6173 * (in which case hugetlb_fault waits for the migration,) and 6174 * hwpoisoned hugepages (in which case we need to prevent the 6175 * caller from accessing to them.) In order to do this, we use 6176 * here is_swap_pte instead of is_hugetlb_entry_migration and 6177 * is_hugetlb_entry_hwpoisoned. This is because it simply covers 6178 * both cases, and because we can't follow correct pages 6179 * directly from any kind of swap entries. 6180 */ 6181 if (absent || 6182 __follow_hugetlb_must_fault(flags, pte, &unshare)) { 6183 vm_fault_t ret; 6184 unsigned int fault_flags = 0; 6185 6186 if (pte) 6187 spin_unlock(ptl); 6188 if (flags & FOLL_WRITE) 6189 fault_flags |= FAULT_FLAG_WRITE; 6190 else if (unshare) 6191 fault_flags |= FAULT_FLAG_UNSHARE; 6192 if (locked) 6193 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6194 FAULT_FLAG_KILLABLE; 6195 if (flags & FOLL_NOWAIT) 6196 fault_flags |= FAULT_FLAG_ALLOW_RETRY | 6197 FAULT_FLAG_RETRY_NOWAIT; 6198 if (flags & FOLL_TRIED) { 6199 /* 6200 * Note: FAULT_FLAG_ALLOW_RETRY and 6201 * FAULT_FLAG_TRIED can co-exist 6202 */ 6203 fault_flags |= FAULT_FLAG_TRIED; 6204 } 6205 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); 6206 if (ret & VM_FAULT_ERROR) { 6207 err = vm_fault_to_errno(ret, flags); 6208 remainder = 0; 6209 break; 6210 } 6211 if (ret & VM_FAULT_RETRY) { 6212 if (locked && 6213 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) 6214 *locked = 0; 6215 *nr_pages = 0; 6216 /* 6217 * VM_FAULT_RETRY must not return an 6218 * error, it will return zero 6219 * instead. 6220 * 6221 * No need to update "position" as the 6222 * caller will not check it after 6223 * *nr_pages is set to 0. 6224 */ 6225 return i; 6226 } 6227 continue; 6228 } 6229 6230 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; 6231 page = pte_page(huge_ptep_get(pte)); 6232 6233 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) && 6234 !PageAnonExclusive(page), page); 6235 6236 /* 6237 * If subpage information not requested, update counters 6238 * and skip the same_page loop below. 6239 */ 6240 if (!pages && !vmas && !pfn_offset && 6241 (vaddr + huge_page_size(h) < vma->vm_end) && 6242 (remainder >= pages_per_huge_page(h))) { 6243 vaddr += huge_page_size(h); 6244 remainder -= pages_per_huge_page(h); 6245 i += pages_per_huge_page(h); 6246 spin_unlock(ptl); 6247 continue; 6248 } 6249 6250 /* vaddr may not be aligned to PAGE_SIZE */ 6251 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, 6252 (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); 6253 6254 if (pages || vmas) 6255 record_subpages_vmas(mem_map_offset(page, pfn_offset), 6256 vma, refs, 6257 likely(pages) ? pages + i : NULL, 6258 vmas ? vmas + i : NULL); 6259 6260 if (pages) { 6261 /* 6262 * try_grab_folio() should always succeed here, 6263 * because: a) we hold the ptl lock, and b) we've just 6264 * checked that the huge page is present in the page 6265 * tables. If the huge page is present, then the tail 6266 * pages must also be present. The ptl prevents the 6267 * head page and tail pages from being rearranged in 6268 * any way. So this page must be available at this 6269 * point, unless the page refcount overflowed: 6270 */ 6271 if (WARN_ON_ONCE(!try_grab_folio(pages[i], refs, 6272 flags))) { 6273 spin_unlock(ptl); 6274 remainder = 0; 6275 err = -ENOMEM; 6276 break; 6277 } 6278 } 6279 6280 vaddr += (refs << PAGE_SHIFT); 6281 remainder -= refs; 6282 i += refs; 6283 6284 spin_unlock(ptl); 6285 } 6286 *nr_pages = remainder; 6287 /* 6288 * setting position is actually required only if remainder is 6289 * not zero but it's faster not to add a "if (remainder)" 6290 * branch. 6291 */ 6292 *position = vaddr; 6293 6294 return i ? i : err; 6295 } 6296 6297 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, 6298 unsigned long address, unsigned long end, 6299 pgprot_t newprot, unsigned long cp_flags) 6300 { 6301 struct mm_struct *mm = vma->vm_mm; 6302 unsigned long start = address; 6303 pte_t *ptep; 6304 pte_t pte; 6305 struct hstate *h = hstate_vma(vma); 6306 unsigned long pages = 0, psize = huge_page_size(h); 6307 bool shared_pmd = false; 6308 struct mmu_notifier_range range; 6309 unsigned long last_addr_mask; 6310 bool uffd_wp = cp_flags & MM_CP_UFFD_WP; 6311 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; 6312 6313 /* 6314 * In the case of shared PMDs, the area to flush could be beyond 6315 * start/end. Set range.start/range.end to cover the maximum possible 6316 * range if PMD sharing is possible. 6317 */ 6318 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 6319 0, vma, mm, start, end); 6320 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); 6321 6322 BUG_ON(address >= end); 6323 flush_cache_range(vma, range.start, range.end); 6324 6325 mmu_notifier_invalidate_range_start(&range); 6326 last_addr_mask = hugetlb_mask_last_page(h); 6327 i_mmap_lock_write(vma->vm_file->f_mapping); 6328 for (; address < end; address += psize) { 6329 spinlock_t *ptl; 6330 ptep = huge_pte_offset(mm, address, psize); 6331 if (!ptep) { 6332 address |= last_addr_mask; 6333 continue; 6334 } 6335 ptl = huge_pte_lock(h, mm, ptep); 6336 if (huge_pmd_unshare(mm, vma, address, ptep)) { 6337 /* 6338 * When uffd-wp is enabled on the vma, unshare 6339 * shouldn't happen at all. Warn about it if it 6340 * happened due to some reason. 6341 */ 6342 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve); 6343 pages++; 6344 spin_unlock(ptl); 6345 shared_pmd = true; 6346 address |= last_addr_mask; 6347 continue; 6348 } 6349 pte = huge_ptep_get(ptep); 6350 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) { 6351 spin_unlock(ptl); 6352 continue; 6353 } 6354 if (unlikely(is_hugetlb_entry_migration(pte))) { 6355 swp_entry_t entry = pte_to_swp_entry(pte); 6356 struct page *page = pfn_swap_entry_to_page(entry); 6357 6358 if (!is_readable_migration_entry(entry)) { 6359 pte_t newpte; 6360 6361 if (PageAnon(page)) 6362 entry = make_readable_exclusive_migration_entry( 6363 swp_offset(entry)); 6364 else 6365 entry = make_readable_migration_entry( 6366 swp_offset(entry)); 6367 newpte = swp_entry_to_pte(entry); 6368 if (uffd_wp) 6369 newpte = pte_swp_mkuffd_wp(newpte); 6370 else if (uffd_wp_resolve) 6371 newpte = pte_swp_clear_uffd_wp(newpte); 6372 set_huge_pte_at(mm, address, ptep, newpte); 6373 pages++; 6374 } 6375 spin_unlock(ptl); 6376 continue; 6377 } 6378 if (unlikely(pte_marker_uffd_wp(pte))) { 6379 /* 6380 * This is changing a non-present pte into a none pte, 6381 * no need for huge_ptep_modify_prot_start/commit(). 6382 */ 6383 if (uffd_wp_resolve) 6384 huge_pte_clear(mm, address, ptep, psize); 6385 } 6386 if (!huge_pte_none(pte)) { 6387 pte_t old_pte; 6388 unsigned int shift = huge_page_shift(hstate_vma(vma)); 6389 6390 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); 6391 pte = huge_pte_modify(old_pte, newprot); 6392 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 6393 if (uffd_wp) 6394 pte = huge_pte_mkuffd_wp(huge_pte_wrprotect(pte)); 6395 else if (uffd_wp_resolve) 6396 pte = huge_pte_clear_uffd_wp(pte); 6397 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); 6398 pages++; 6399 } else { 6400 /* None pte */ 6401 if (unlikely(uffd_wp)) 6402 /* Safe to modify directly (none->non-present). */ 6403 set_huge_pte_at(mm, address, ptep, 6404 make_pte_marker(PTE_MARKER_UFFD_WP)); 6405 } 6406 spin_unlock(ptl); 6407 } 6408 /* 6409 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare 6410 * may have cleared our pud entry and done put_page on the page table: 6411 * once we release i_mmap_rwsem, another task can do the final put_page 6412 * and that page table be reused and filled with junk. If we actually 6413 * did unshare a page of pmds, flush the range corresponding to the pud. 6414 */ 6415 if (shared_pmd) 6416 flush_hugetlb_tlb_range(vma, range.start, range.end); 6417 else 6418 flush_hugetlb_tlb_range(vma, start, end); 6419 /* 6420 * No need to call mmu_notifier_invalidate_range() we are downgrading 6421 * page table protection not changing it to point to a new page. 6422 * 6423 * See Documentation/mm/mmu_notifier.rst 6424 */ 6425 i_mmap_unlock_write(vma->vm_file->f_mapping); 6426 mmu_notifier_invalidate_range_end(&range); 6427 6428 return pages << h->order; 6429 } 6430 6431 /* Return true if reservation was successful, false otherwise. */ 6432 bool hugetlb_reserve_pages(struct inode *inode, 6433 long from, long to, 6434 struct vm_area_struct *vma, 6435 vm_flags_t vm_flags) 6436 { 6437 long chg, add = -1; 6438 struct hstate *h = hstate_inode(inode); 6439 struct hugepage_subpool *spool = subpool_inode(inode); 6440 struct resv_map *resv_map; 6441 struct hugetlb_cgroup *h_cg = NULL; 6442 long gbl_reserve, regions_needed = 0; 6443 6444 /* This should never happen */ 6445 if (from > to) { 6446 VM_WARN(1, "%s called with a negative range\n", __func__); 6447 return false; 6448 } 6449 6450 /* 6451 * Only apply hugepage reservation if asked. At fault time, an 6452 * attempt will be made for VM_NORESERVE to allocate a page 6453 * without using reserves 6454 */ 6455 if (vm_flags & VM_NORESERVE) 6456 return true; 6457 6458 /* 6459 * Shared mappings base their reservation on the number of pages that 6460 * are already allocated on behalf of the file. Private mappings need 6461 * to reserve the full area even if read-only as mprotect() may be 6462 * called to make the mapping read-write. Assume !vma is a shm mapping 6463 */ 6464 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6465 /* 6466 * resv_map can not be NULL as hugetlb_reserve_pages is only 6467 * called for inodes for which resv_maps were created (see 6468 * hugetlbfs_get_inode). 6469 */ 6470 resv_map = inode_resv_map(inode); 6471 6472 chg = region_chg(resv_map, from, to, ®ions_needed); 6473 6474 } else { 6475 /* Private mapping. */ 6476 resv_map = resv_map_alloc(); 6477 if (!resv_map) 6478 return false; 6479 6480 chg = to - from; 6481 6482 set_vma_resv_map(vma, resv_map); 6483 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 6484 } 6485 6486 if (chg < 0) 6487 goto out_err; 6488 6489 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6490 chg * pages_per_huge_page(h), &h_cg) < 0) 6491 goto out_err; 6492 6493 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) { 6494 /* For private mappings, the hugetlb_cgroup uncharge info hangs 6495 * of the resv_map. 6496 */ 6497 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); 6498 } 6499 6500 /* 6501 * There must be enough pages in the subpool for the mapping. If 6502 * the subpool has a minimum size, there may be some global 6503 * reservations already in place (gbl_reserve). 6504 */ 6505 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6506 if (gbl_reserve < 0) 6507 goto out_uncharge_cgroup; 6508 6509 /* 6510 * Check enough hugepages are available for the reservation. 6511 * Hand the pages back to the subpool if there are not 6512 */ 6513 if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6514 goto out_put_pages; 6515 6516 /* 6517 * Account for the reservations made. Shared mappings record regions 6518 * that have reservations as they are shared by multiple VMAs. 6519 * When the last VMA disappears, the region map says how much 6520 * the reservation was and the page cache tells how much of 6521 * the reservation was consumed. Private mappings are per-VMA and 6522 * only the consumed reservations are tracked. When the VMA 6523 * disappears, the original reservation is the VMA size and the 6524 * consumed reservations are stored in the map. Hence, nothing 6525 * else has to be done for private mappings here 6526 */ 6527 if (!vma || vma->vm_flags & VM_MAYSHARE) { 6528 add = region_add(resv_map, from, to, regions_needed, h, h_cg); 6529 6530 if (unlikely(add < 0)) { 6531 hugetlb_acct_memory(h, -gbl_reserve); 6532 goto out_put_pages; 6533 } else if (unlikely(chg > add)) { 6534 /* 6535 * pages in this range were added to the reserve 6536 * map between region_chg and region_add. This 6537 * indicates a race with alloc_huge_page. Adjust 6538 * the subpool and reserve counts modified above 6539 * based on the difference. 6540 */ 6541 long rsv_adjust; 6542 6543 /* 6544 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the 6545 * reference to h_cg->css. See comment below for detail. 6546 */ 6547 hugetlb_cgroup_uncharge_cgroup_rsvd( 6548 hstate_index(h), 6549 (chg - add) * pages_per_huge_page(h), h_cg); 6550 6551 rsv_adjust = hugepage_subpool_put_pages(spool, 6552 chg - add); 6553 hugetlb_acct_memory(h, -rsv_adjust); 6554 } else if (h_cg) { 6555 /* 6556 * The file_regions will hold their own reference to 6557 * h_cg->css. So we should release the reference held 6558 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are 6559 * done. 6560 */ 6561 hugetlb_cgroup_put_rsvd_cgroup(h_cg); 6562 } 6563 } 6564 return true; 6565 6566 out_put_pages: 6567 /* put back original number of pages, chg */ 6568 (void)hugepage_subpool_put_pages(spool, chg); 6569 out_uncharge_cgroup: 6570 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6571 chg * pages_per_huge_page(h), h_cg); 6572 out_err: 6573 if (!vma || vma->vm_flags & VM_MAYSHARE) 6574 /* Only call region_abort if the region_chg succeeded but the 6575 * region_add failed or didn't run. 6576 */ 6577 if (chg >= 0 && add < 0) 6578 region_abort(resv_map, from, to, regions_needed); 6579 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) 6580 kref_put(&resv_map->refs, resv_map_release); 6581 return false; 6582 } 6583 6584 long hugetlb_unreserve_pages(struct inode *inode, long start, long end, 6585 long freed) 6586 { 6587 struct hstate *h = hstate_inode(inode); 6588 struct resv_map *resv_map = inode_resv_map(inode); 6589 long chg = 0; 6590 struct hugepage_subpool *spool = subpool_inode(inode); 6591 long gbl_reserve; 6592 6593 /* 6594 * Since this routine can be called in the evict inode path for all 6595 * hugetlbfs inodes, resv_map could be NULL. 6596 */ 6597 if (resv_map) { 6598 chg = region_del(resv_map, start, end); 6599 /* 6600 * region_del() can fail in the rare case where a region 6601 * must be split and another region descriptor can not be 6602 * allocated. If end == LONG_MAX, it will not fail. 6603 */ 6604 if (chg < 0) 6605 return chg; 6606 } 6607 6608 spin_lock(&inode->i_lock); 6609 inode->i_blocks -= (blocks_per_huge_page(h) * freed); 6610 spin_unlock(&inode->i_lock); 6611 6612 /* 6613 * If the subpool has a minimum size, the number of global 6614 * reservations to be released may be adjusted. 6615 * 6616 * Note that !resv_map implies freed == 0. So (chg - freed) 6617 * won't go negative. 6618 */ 6619 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed)); 6620 hugetlb_acct_memory(h, -gbl_reserve); 6621 6622 return 0; 6623 } 6624 6625 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6626 static unsigned long page_table_shareable(struct vm_area_struct *svma, 6627 struct vm_area_struct *vma, 6628 unsigned long addr, pgoff_t idx) 6629 { 6630 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) + 6631 svma->vm_start; 6632 unsigned long sbase = saddr & PUD_MASK; 6633 unsigned long s_end = sbase + PUD_SIZE; 6634 6635 /* Allow segments to share if only one is marked locked */ 6636 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; 6637 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK; 6638 6639 /* 6640 * match the virtual addresses, permission and the alignment of the 6641 * page table page. 6642 */ 6643 if (pmd_index(addr) != pmd_index(saddr) || 6644 vm_flags != svm_flags || 6645 !range_in_vma(svma, sbase, s_end)) 6646 return 0; 6647 6648 return saddr; 6649 } 6650 6651 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) 6652 { 6653 unsigned long base = addr & PUD_MASK; 6654 unsigned long end = base + PUD_SIZE; 6655 6656 /* 6657 * check on proper vm_flags and page table alignment 6658 */ 6659 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) 6660 return true; 6661 return false; 6662 } 6663 6664 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6665 { 6666 #ifdef CONFIG_USERFAULTFD 6667 if (uffd_disable_huge_pmd_share(vma)) 6668 return false; 6669 #endif 6670 return vma_shareable(vma, addr); 6671 } 6672 6673 /* 6674 * Determine if start,end range within vma could be mapped by shared pmd. 6675 * If yes, adjust start and end to cover range associated with possible 6676 * shared pmd mappings. 6677 */ 6678 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6679 unsigned long *start, unsigned long *end) 6680 { 6681 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE), 6682 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 6683 6684 /* 6685 * vma needs to span at least one aligned PUD size, and the range 6686 * must be at least partially within in. 6687 */ 6688 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) || 6689 (*end <= v_start) || (*start >= v_end)) 6690 return; 6691 6692 /* Extend the range to be PUD aligned for a worst case scenario */ 6693 if (*start > v_start) 6694 *start = ALIGN_DOWN(*start, PUD_SIZE); 6695 6696 if (*end < v_end) 6697 *end = ALIGN(*end, PUD_SIZE); 6698 } 6699 6700 /* 6701 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 6702 * and returns the corresponding pte. While this is not necessary for the 6703 * !shared pmd case because we can allocate the pmd later as well, it makes the 6704 * code much cleaner. 6705 * 6706 * This routine must be called with i_mmap_rwsem held in at least read mode if 6707 * sharing is possible. For hugetlbfs, this prevents removal of any page 6708 * table entries associated with the address space. This is important as we 6709 * are setting up sharing based on existing page table entries (mappings). 6710 */ 6711 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6712 unsigned long addr, pud_t *pud) 6713 { 6714 struct address_space *mapping = vma->vm_file->f_mapping; 6715 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + 6716 vma->vm_pgoff; 6717 struct vm_area_struct *svma; 6718 unsigned long saddr; 6719 pte_t *spte = NULL; 6720 pte_t *pte; 6721 spinlock_t *ptl; 6722 6723 i_mmap_assert_locked(mapping); 6724 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 6725 if (svma == vma) 6726 continue; 6727 6728 saddr = page_table_shareable(svma, vma, addr, idx); 6729 if (saddr) { 6730 spte = huge_pte_offset(svma->vm_mm, saddr, 6731 vma_mmu_pagesize(svma)); 6732 if (spte) { 6733 get_page(virt_to_page(spte)); 6734 break; 6735 } 6736 } 6737 } 6738 6739 if (!spte) 6740 goto out; 6741 6742 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); 6743 if (pud_none(*pud)) { 6744 pud_populate(mm, pud, 6745 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 6746 mm_inc_nr_pmds(mm); 6747 } else { 6748 put_page(virt_to_page(spte)); 6749 } 6750 spin_unlock(ptl); 6751 out: 6752 pte = (pte_t *)pmd_alloc(mm, pud, addr); 6753 return pte; 6754 } 6755 6756 /* 6757 * unmap huge page backed by shared pte. 6758 * 6759 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 6760 * indicated by page_count > 1, unmap is achieved by clearing pud and 6761 * decrementing the ref count. If count == 1, the pte page is not shared. 6762 * 6763 * Called with page table lock held and i_mmap_rwsem held in write mode. 6764 * 6765 * returns: 1 successfully unmapped a shared pte page 6766 * 0 the underlying pte page is not shared, or it is the last user 6767 */ 6768 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 6769 unsigned long addr, pte_t *ptep) 6770 { 6771 pgd_t *pgd = pgd_offset(mm, addr); 6772 p4d_t *p4d = p4d_offset(pgd, addr); 6773 pud_t *pud = pud_offset(p4d, addr); 6774 6775 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 6776 BUG_ON(page_count(virt_to_page(ptep)) == 0); 6777 if (page_count(virt_to_page(ptep)) == 1) 6778 return 0; 6779 6780 pud_clear(pud); 6781 put_page(virt_to_page(ptep)); 6782 mm_dec_nr_pmds(mm); 6783 return 1; 6784 } 6785 6786 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 6787 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 6788 unsigned long addr, pud_t *pud) 6789 { 6790 return NULL; 6791 } 6792 6793 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 6794 unsigned long addr, pte_t *ptep) 6795 { 6796 return 0; 6797 } 6798 6799 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, 6800 unsigned long *start, unsigned long *end) 6801 { 6802 } 6803 6804 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr) 6805 { 6806 return false; 6807 } 6808 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ 6809 6810 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB 6811 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 6812 unsigned long addr, unsigned long sz) 6813 { 6814 pgd_t *pgd; 6815 p4d_t *p4d; 6816 pud_t *pud; 6817 pte_t *pte = NULL; 6818 6819 pgd = pgd_offset(mm, addr); 6820 p4d = p4d_alloc(mm, pgd, addr); 6821 if (!p4d) 6822 return NULL; 6823 pud = pud_alloc(mm, p4d, addr); 6824 if (pud) { 6825 if (sz == PUD_SIZE) { 6826 pte = (pte_t *)pud; 6827 } else { 6828 BUG_ON(sz != PMD_SIZE); 6829 if (want_pmd_share(vma, addr) && pud_none(*pud)) 6830 pte = huge_pmd_share(mm, vma, addr, pud); 6831 else 6832 pte = (pte_t *)pmd_alloc(mm, pud, addr); 6833 } 6834 } 6835 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); 6836 6837 return pte; 6838 } 6839 6840 /* 6841 * huge_pte_offset() - Walk the page table to resolve the hugepage 6842 * entry at address @addr 6843 * 6844 * Return: Pointer to page table entry (PUD or PMD) for 6845 * address @addr, or NULL if a !p*d_present() entry is encountered and the 6846 * size @sz doesn't match the hugepage size at this level of the page 6847 * table. 6848 */ 6849 pte_t *huge_pte_offset(struct mm_struct *mm, 6850 unsigned long addr, unsigned long sz) 6851 { 6852 pgd_t *pgd; 6853 p4d_t *p4d; 6854 pud_t *pud; 6855 pmd_t *pmd; 6856 6857 pgd = pgd_offset(mm, addr); 6858 if (!pgd_present(*pgd)) 6859 return NULL; 6860 p4d = p4d_offset(pgd, addr); 6861 if (!p4d_present(*p4d)) 6862 return NULL; 6863 6864 pud = pud_offset(p4d, addr); 6865 if (sz == PUD_SIZE) 6866 /* must be pud huge, non-present or none */ 6867 return (pte_t *)pud; 6868 if (!pud_present(*pud)) 6869 return NULL; 6870 /* must have a valid entry and size to go further */ 6871 6872 pmd = pmd_offset(pud, addr); 6873 /* must be pmd huge, non-present or none */ 6874 return (pte_t *)pmd; 6875 } 6876 6877 /* 6878 * Return a mask that can be used to update an address to the last huge 6879 * page in a page table page mapping size. Used to skip non-present 6880 * page table entries when linearly scanning address ranges. Architectures 6881 * with unique huge page to page table relationships can define their own 6882 * version of this routine. 6883 */ 6884 unsigned long hugetlb_mask_last_page(struct hstate *h) 6885 { 6886 unsigned long hp_size = huge_page_size(h); 6887 6888 if (hp_size == PUD_SIZE) 6889 return P4D_SIZE - PUD_SIZE; 6890 else if (hp_size == PMD_SIZE) 6891 return PUD_SIZE - PMD_SIZE; 6892 else 6893 return 0UL; 6894 } 6895 6896 #else 6897 6898 /* See description above. Architectures can provide their own version. */ 6899 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) 6900 { 6901 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE 6902 if (huge_page_size(h) == PMD_SIZE) 6903 return PUD_SIZE - PMD_SIZE; 6904 #endif 6905 return 0UL; 6906 } 6907 6908 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ 6909 6910 /* 6911 * These functions are overwritable if your architecture needs its own 6912 * behavior. 6913 */ 6914 struct page * __weak 6915 follow_huge_addr(struct mm_struct *mm, unsigned long address, 6916 int write) 6917 { 6918 return ERR_PTR(-EINVAL); 6919 } 6920 6921 struct page * __weak 6922 follow_huge_pd(struct vm_area_struct *vma, 6923 unsigned long address, hugepd_t hpd, int flags, int pdshift) 6924 { 6925 WARN(1, "hugepd follow called with no support for hugepage directory format\n"); 6926 return NULL; 6927 } 6928 6929 struct page * __weak 6930 follow_huge_pmd(struct mm_struct *mm, unsigned long address, 6931 pmd_t *pmd, int flags) 6932 { 6933 struct page *page = NULL; 6934 spinlock_t *ptl; 6935 pte_t pte; 6936 6937 /* 6938 * FOLL_PIN is not supported for follow_page(). Ordinary GUP goes via 6939 * follow_hugetlb_page(). 6940 */ 6941 if (WARN_ON_ONCE(flags & FOLL_PIN)) 6942 return NULL; 6943 6944 retry: 6945 ptl = pmd_lockptr(mm, pmd); 6946 spin_lock(ptl); 6947 /* 6948 * make sure that the address range covered by this pmd is not 6949 * unmapped from other threads. 6950 */ 6951 if (!pmd_huge(*pmd)) 6952 goto out; 6953 pte = huge_ptep_get((pte_t *)pmd); 6954 if (pte_present(pte)) { 6955 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); 6956 /* 6957 * try_grab_page() should always succeed here, because: a) we 6958 * hold the pmd (ptl) lock, and b) we've just checked that the 6959 * huge pmd (head) page is present in the page tables. The ptl 6960 * prevents the head page and tail pages from being rearranged 6961 * in any way. So this page must be available at this point, 6962 * unless the page refcount overflowed: 6963 */ 6964 if (WARN_ON_ONCE(!try_grab_page(page, flags))) { 6965 page = NULL; 6966 goto out; 6967 } 6968 } else { 6969 if (is_hugetlb_entry_migration(pte)) { 6970 spin_unlock(ptl); 6971 __migration_entry_wait_huge((pte_t *)pmd, ptl); 6972 goto retry; 6973 } 6974 /* 6975 * hwpoisoned entry is treated as no_page_table in 6976 * follow_page_mask(). 6977 */ 6978 } 6979 out: 6980 spin_unlock(ptl); 6981 return page; 6982 } 6983 6984 struct page * __weak 6985 follow_huge_pud(struct mm_struct *mm, unsigned long address, 6986 pud_t *pud, int flags) 6987 { 6988 if (flags & (FOLL_GET | FOLL_PIN)) 6989 return NULL; 6990 6991 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT); 6992 } 6993 6994 struct page * __weak 6995 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) 6996 { 6997 if (flags & (FOLL_GET | FOLL_PIN)) 6998 return NULL; 6999 7000 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT); 7001 } 7002 7003 int isolate_hugetlb(struct page *page, struct list_head *list) 7004 { 7005 int ret = 0; 7006 7007 spin_lock_irq(&hugetlb_lock); 7008 if (!PageHeadHuge(page) || 7009 !HPageMigratable(page) || 7010 !get_page_unless_zero(page)) { 7011 ret = -EBUSY; 7012 goto unlock; 7013 } 7014 ClearHPageMigratable(page); 7015 list_move_tail(&page->lru, list); 7016 unlock: 7017 spin_unlock_irq(&hugetlb_lock); 7018 return ret; 7019 } 7020 7021 int get_hwpoison_huge_page(struct page *page, bool *hugetlb) 7022 { 7023 int ret = 0; 7024 7025 *hugetlb = false; 7026 spin_lock_irq(&hugetlb_lock); 7027 if (PageHeadHuge(page)) { 7028 *hugetlb = true; 7029 if (HPageFreed(page)) 7030 ret = 0; 7031 else if (HPageMigratable(page)) 7032 ret = get_page_unless_zero(page); 7033 else 7034 ret = -EBUSY; 7035 } 7036 spin_unlock_irq(&hugetlb_lock); 7037 return ret; 7038 } 7039 7040 int get_huge_page_for_hwpoison(unsigned long pfn, int flags) 7041 { 7042 int ret; 7043 7044 spin_lock_irq(&hugetlb_lock); 7045 ret = __get_huge_page_for_hwpoison(pfn, flags); 7046 spin_unlock_irq(&hugetlb_lock); 7047 return ret; 7048 } 7049 7050 void putback_active_hugepage(struct page *page) 7051 { 7052 spin_lock_irq(&hugetlb_lock); 7053 SetHPageMigratable(page); 7054 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist); 7055 spin_unlock_irq(&hugetlb_lock); 7056 put_page(page); 7057 } 7058 7059 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason) 7060 { 7061 struct hstate *h = page_hstate(oldpage); 7062 7063 hugetlb_cgroup_migrate(oldpage, newpage); 7064 set_page_owner_migrate_reason(newpage, reason); 7065 7066 /* 7067 * transfer temporary state of the new huge page. This is 7068 * reverse to other transitions because the newpage is going to 7069 * be final while the old one will be freed so it takes over 7070 * the temporary status. 7071 * 7072 * Also note that we have to transfer the per-node surplus state 7073 * here as well otherwise the global surplus count will not match 7074 * the per-node's. 7075 */ 7076 if (HPageTemporary(newpage)) { 7077 int old_nid = page_to_nid(oldpage); 7078 int new_nid = page_to_nid(newpage); 7079 7080 SetHPageTemporary(oldpage); 7081 ClearHPageTemporary(newpage); 7082 7083 /* 7084 * There is no need to transfer the per-node surplus state 7085 * when we do not cross the node. 7086 */ 7087 if (new_nid == old_nid) 7088 return; 7089 spin_lock_irq(&hugetlb_lock); 7090 if (h->surplus_huge_pages_node[old_nid]) { 7091 h->surplus_huge_pages_node[old_nid]--; 7092 h->surplus_huge_pages_node[new_nid]++; 7093 } 7094 spin_unlock_irq(&hugetlb_lock); 7095 } 7096 } 7097 7098 /* 7099 * This function will unconditionally remove all the shared pmd pgtable entries 7100 * within the specific vma for a hugetlbfs memory range. 7101 */ 7102 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) 7103 { 7104 struct hstate *h = hstate_vma(vma); 7105 unsigned long sz = huge_page_size(h); 7106 struct mm_struct *mm = vma->vm_mm; 7107 struct mmu_notifier_range range; 7108 unsigned long address, start, end; 7109 spinlock_t *ptl; 7110 pte_t *ptep; 7111 7112 if (!(vma->vm_flags & VM_MAYSHARE)) 7113 return; 7114 7115 start = ALIGN(vma->vm_start, PUD_SIZE); 7116 end = ALIGN_DOWN(vma->vm_end, PUD_SIZE); 7117 7118 if (start >= end) 7119 return; 7120 7121 flush_cache_range(vma, start, end); 7122 /* 7123 * No need to call adjust_range_if_pmd_sharing_possible(), because 7124 * we have already done the PUD_SIZE alignment. 7125 */ 7126 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 7127 start, end); 7128 mmu_notifier_invalidate_range_start(&range); 7129 i_mmap_lock_write(vma->vm_file->f_mapping); 7130 for (address = start; address < end; address += PUD_SIZE) { 7131 ptep = huge_pte_offset(mm, address, sz); 7132 if (!ptep) 7133 continue; 7134 ptl = huge_pte_lock(h, mm, ptep); 7135 huge_pmd_unshare(mm, vma, address, ptep); 7136 spin_unlock(ptl); 7137 } 7138 flush_hugetlb_tlb_range(vma, start, end); 7139 i_mmap_unlock_write(vma->vm_file->f_mapping); 7140 /* 7141 * No need to call mmu_notifier_invalidate_range(), see 7142 * Documentation/mm/mmu_notifier.rst. 7143 */ 7144 mmu_notifier_invalidate_range_end(&range); 7145 } 7146 7147 #ifdef CONFIG_CMA 7148 static bool cma_reserve_called __initdata; 7149 7150 static int __init cmdline_parse_hugetlb_cma(char *p) 7151 { 7152 int nid, count = 0; 7153 unsigned long tmp; 7154 char *s = p; 7155 7156 while (*s) { 7157 if (sscanf(s, "%lu%n", &tmp, &count) != 1) 7158 break; 7159 7160 if (s[count] == ':') { 7161 if (tmp >= MAX_NUMNODES) 7162 break; 7163 nid = array_index_nospec(tmp, MAX_NUMNODES); 7164 7165 s += count + 1; 7166 tmp = memparse(s, &s); 7167 hugetlb_cma_size_in_node[nid] = tmp; 7168 hugetlb_cma_size += tmp; 7169 7170 /* 7171 * Skip the separator if have one, otherwise 7172 * break the parsing. 7173 */ 7174 if (*s == ',') 7175 s++; 7176 else 7177 break; 7178 } else { 7179 hugetlb_cma_size = memparse(p, &p); 7180 break; 7181 } 7182 } 7183 7184 return 0; 7185 } 7186 7187 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma); 7188 7189 void __init hugetlb_cma_reserve(int order) 7190 { 7191 unsigned long size, reserved, per_node; 7192 bool node_specific_cma_alloc = false; 7193 int nid; 7194 7195 cma_reserve_called = true; 7196 7197 if (!hugetlb_cma_size) 7198 return; 7199 7200 for (nid = 0; nid < MAX_NUMNODES; nid++) { 7201 if (hugetlb_cma_size_in_node[nid] == 0) 7202 continue; 7203 7204 if (!node_online(nid)) { 7205 pr_warn("hugetlb_cma: invalid node %d specified\n", nid); 7206 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7207 hugetlb_cma_size_in_node[nid] = 0; 7208 continue; 7209 } 7210 7211 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) { 7212 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n", 7213 nid, (PAGE_SIZE << order) / SZ_1M); 7214 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid]; 7215 hugetlb_cma_size_in_node[nid] = 0; 7216 } else { 7217 node_specific_cma_alloc = true; 7218 } 7219 } 7220 7221 /* Validate the CMA size again in case some invalid nodes specified. */ 7222 if (!hugetlb_cma_size) 7223 return; 7224 7225 if (hugetlb_cma_size < (PAGE_SIZE << order)) { 7226 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n", 7227 (PAGE_SIZE << order) / SZ_1M); 7228 hugetlb_cma_size = 0; 7229 return; 7230 } 7231 7232 if (!node_specific_cma_alloc) { 7233 /* 7234 * If 3 GB area is requested on a machine with 4 numa nodes, 7235 * let's allocate 1 GB on first three nodes and ignore the last one. 7236 */ 7237 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes); 7238 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n", 7239 hugetlb_cma_size / SZ_1M, per_node / SZ_1M); 7240 } 7241 7242 reserved = 0; 7243 for_each_online_node(nid) { 7244 int res; 7245 char name[CMA_MAX_NAME]; 7246 7247 if (node_specific_cma_alloc) { 7248 if (hugetlb_cma_size_in_node[nid] == 0) 7249 continue; 7250 7251 size = hugetlb_cma_size_in_node[nid]; 7252 } else { 7253 size = min(per_node, hugetlb_cma_size - reserved); 7254 } 7255 7256 size = round_up(size, PAGE_SIZE << order); 7257 7258 snprintf(name, sizeof(name), "hugetlb%d", nid); 7259 /* 7260 * Note that 'order per bit' is based on smallest size that 7261 * may be returned to CMA allocator in the case of 7262 * huge page demotion. 7263 */ 7264 res = cma_declare_contiguous_nid(0, size, 0, 7265 PAGE_SIZE << HUGETLB_PAGE_ORDER, 7266 0, false, name, 7267 &hugetlb_cma[nid], nid); 7268 if (res) { 7269 pr_warn("hugetlb_cma: reservation failed: err %d, node %d", 7270 res, nid); 7271 continue; 7272 } 7273 7274 reserved += size; 7275 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n", 7276 size / SZ_1M, nid); 7277 7278 if (reserved >= hugetlb_cma_size) 7279 break; 7280 } 7281 7282 if (!reserved) 7283 /* 7284 * hugetlb_cma_size is used to determine if allocations from 7285 * cma are possible. Set to zero if no cma regions are set up. 7286 */ 7287 hugetlb_cma_size = 0; 7288 } 7289 7290 void __init hugetlb_cma_check(void) 7291 { 7292 if (!hugetlb_cma_size || cma_reserve_called) 7293 return; 7294 7295 pr_warn("hugetlb_cma: the option isn't supported by current arch\n"); 7296 } 7297 7298 #endif /* CONFIG_CMA */ 7299