Lines Matching defs:h

6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 #include <linux/mm_inline.h>
39 #include <asm/page.h>
40 #include <asm/pgalloc.h>
41 #include <asm/tlb.h>
43 #include <linux/io.h>
44 #include <linux/hugetlb.h>
45 #include <linux/hugetlb_cgroup.h>
46 #include <linux/node.h>
47 #include <linux/page_owner.h>
48 #include "internal.h"
49 #include "hugetlb_vmemmap.h"
94 static int hugetlb_acct_memory(struct hstate *h, long delta);
130 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
142 spool->hstate = h;
145 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
466 struct hstate *h,
473 &h_cg->rsvd_hugepage[hstate_index(h)];
487 resv->pages_per_hpage = pages_per_huge_page(h);
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
548 long to, struct hstate *h, struct hugetlb_cgroup *cg,
555 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
574 struct hstate *h, long *regions_needed)
614 iter->from, h, h_cg,
627 t, h, h_cg, regions_needed);
706 long in_regions_needed, struct hstate *h,
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
942 struct hstate *h = hstate_inode(inode);
944 if (!hugetlb_acct_memory(h, 1))
989 static pgoff_t vma_hugecache_offset(struct hstate *h,
992 return ((address - vma->vm_start) >> huge_page_shift(h)) +
993 (vma->vm_pgoff >> huge_page_order(h));
1073 struct hstate *h)
1076 if (!h_cg || !h) {
1082 &h_cg->rsvd_hugepage[hstate_index(h)];
1083 resv_map->pages_per_hpage = pages_per_huge_page(h);
1317 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1324 list_move(&folio->lru, &h->hugepage_freelists[nid]);
1325 h->free_huge_pages++;
1326 h->free_huge_pages_node[nid]++;
1330 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1337 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1344 list_move(&folio->lru, &h->hugepage_activelist);
1347 h->free_huge_pages--;
1348 h->free_huge_pages_node[nid]--;
1355 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1381 folio = dequeue_hugetlb_folio_node_exact(h, node);
1391 static unsigned long available_huge_pages(struct hstate *h)
1393 return h->free_huge_pages - h->resv_huge_pages;
1396 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1412 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1416 if (avoid_reserve && !available_huge_pages(h))
1419 gfp_mask = htlb_alloc_mask(h);
1423 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1431 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1436 h->resv_huge_pages--;
1449 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1474 static int hstate_next_node_to_alloc(struct hstate *h,
1481 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1482 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1493 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1499 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1500 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1571 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1575 unsigned long nr_pages = pages_per_huge_page(h);
1585 huge_page_order(h), true);
1596 huge_page_order(h), true);
1609 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1617 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1628 static inline void __clear_hugetlb_destructor(struct hstate *h,
1646 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1656 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1662 h->free_huge_pages--;
1663 h->free_huge_pages_node[nid]--;
1666 h->surplus_huge_pages--;
1667 h->surplus_huge_pages_node[nid]--;
1676 __clear_hugetlb_destructor(h, folio);
1685 h->nr_huge_pages--;
1686 h->nr_huge_pages_node[nid]--;
1689 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1692 __remove_hugetlb_folio(h, folio, adjust_surplus, false);
1695 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1698 __remove_hugetlb_folio(h, folio, adjust_surplus, true);
1701 static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1712 h->nr_huge_pages++;
1713 h->nr_huge_pages_node[nid]++;
1716 h->surplus_huge_pages++;
1717 h->surplus_huge_pages_node[nid]++;
1744 enqueue_hugetlb_folio(h, folio);
1747 static void __update_and_free_hugetlb_folio(struct hstate *h,
1750 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1760 if (hugetlb_vmemmap_restore(h, &folio->page)) {
1767 add_hugetlb_folio(h, folio, true);
1778 __clear_hugetlb_destructor(h, folio);
1793 if (hstate_is_gigantic(h) ||
1794 hugetlb_cma_folio(folio, huge_page_order(h))) {
1795 destroy_compound_gigantic_folio(folio, huge_page_order(h));
1796 free_gigantic_folio(folio, huge_page_order(h));
1799 __free_pages(&folio->page, huge_page_order(h));
1824 struct hstate *h;
1836 h = size_to_hstate(page_size(page));
1838 __update_and_free_hugetlb_folio(h, page_folio(page));
1845 static inline void flush_free_hpage_work(struct hstate *h)
1847 if (hugetlb_vmemmap_optimizable(h))
1851 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1855 __update_and_free_hugetlb_folio(h, folio);
1870 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
1877 update_and_free_hugetlb_folio(h, folio, false);
1884 struct hstate *h;
1886 for_each_hstate(h) {
1887 if (huge_page_size(h) == size)
1888 return h;
1899 struct hstate *h = folio_hstate(folio);
1936 hugetlb_cgroup_uncharge_folio(hstate_index(h),
1937 pages_per_huge_page(h), folio);
1938 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1939 pages_per_huge_page(h), folio);
1941 h->resv_huge_pages++;
1944 remove_hugetlb_folio(h, folio, false);
1946 update_and_free_hugetlb_folio(h, folio, true);
1947 } else if (h->surplus_huge_pages_node[nid]) {
1949 remove_hugetlb_folio(h, folio, true);
1951 update_and_free_hugetlb_folio(h, folio, true);
1954 enqueue_hugetlb_folio(h, folio);
1962 static void __prep_account_new_huge_page(struct hstate *h, int nid)
1965 h->nr_huge_pages++;
1966 h->nr_huge_pages_node[nid]++;
1969 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1971 hugetlb_vmemmap_optimize(h, &folio->page);
1979 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
1981 __prep_new_hugetlb_folio(h, folio);
1983 __prep_account_new_huge_page(h, nid);
2110 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2114 int order = huge_page_order(h);
2180 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2188 if (hstate_is_gigantic(h))
2189 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2191 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2195 if (hstate_is_gigantic(h)) {
2196 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2201 free_gigantic_folio(folio, huge_page_order(h));
2209 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2218 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
2223 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2225 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2226 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2244 static struct page *remove_pool_huge_page(struct hstate *h,
2253 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2258 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2259 !list_empty(&h->hugepage_freelists[node])) {
2260 page = list_entry(h->hugepage_freelists[node].next,
2263 remove_hugetlb_folio(h, folio, acct_surplus);
2302 struct hstate *h = folio_hstate(folio);
2303 if (!available_huge_pages(h))
2325 remove_hugetlb_folio(h, folio, false);
2326 h->max_huge_pages--;
2337 rc = hugetlb_vmemmap_restore(h, &folio->page);
2339 update_and_free_hugetlb_folio(h, folio, false);
2342 add_hugetlb_folio(h, folio, false);
2343 h->max_huge_pages++;
2368 struct hstate *h;
2374 for_each_hstate(h)
2375 order = min(order, huge_page_order(h));
2390 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2395 if (hstate_is_gigantic(h))
2399 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2403 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2415 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2422 h->surplus_huge_pages++;
2423 h->surplus_huge_pages_node[folio_nid(folio)]++;
2431 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2436 if (hstate_is_gigantic(h))
2439 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2458 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2463 gfp_t gfp_mask = htlb_alloc_mask(h);
2472 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2479 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2485 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2489 if (available_huge_pages(h)) {
2492 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2501 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2505 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
2514 gfp_mask = htlb_alloc_mask(h);
2516 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
2543 static int gather_surplus_pages(struct hstate *h, long delta)
2553 nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2556 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2558 h->resv_huge_pages += delta;
2571 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2591 needed = (h->resv_huge_pages + delta) -
2592 (h->free_huge_pages + allocated);
2612 h->resv_huge_pages += delta;
2620 enqueue_hugetlb_folio(h, folio);
2644 static void return_unused_surplus_pages(struct hstate *h,
2653 h->resv_huge_pages -= unused_resv_pages;
2655 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2662 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2673 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
2682 update_and_free_pages_bulk(h, &page_list);
2723 static long __vma_reservation_common(struct hstate *h,
2736 idx = vma_hugecache_offset(h, vma, addr);
2803 static long vma_needs_reservation(struct hstate *h,
2806 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2809 static long vma_commit_reservation(struct hstate *h,
2812 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2815 static void vma_end_reservation(struct hstate *h,
2818 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2821 static long vma_add_reservation(struct hstate *h,
2824 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2827 static long vma_del_reservation(struct hstate *h,
2830 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2853 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2856 long rc = vma_needs_reservation(h, vma, address);
2873 (void)vma_add_reservation(h, vma, address);
2875 vma_end_reservation(h, vma, address);
2886 rc = vma_del_reservation(h, vma, address);
2922 vma_end_reservation(h, vma, address);
2929 * @h: struct hstate old page belongs to
2934 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
2937 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2949 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
2952 __prep_new_hugetlb_folio(h, new_folio);
2990 remove_hugetlb_folio(h, old_folio, false);
2996 __prep_account_new_huge_page(h, nid);
2997 enqueue_hugetlb_folio(h, new_folio);
3003 update_and_free_hugetlb_folio(h, old_folio, false);
3012 update_and_free_hugetlb_folio(h, new_folio, false);
3019 struct hstate *h;
3030 h = folio_hstate(folio);
3042 if (hstate_is_gigantic(h))
3048 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3057 struct hstate *h = hstate_vma(vma);
3065 idx = hstate_index(h);
3071 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3085 vma_end_reservation(h, vma, addr);
3106 idx, pages_per_huge_page(h), &h_cg);
3111 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
3121 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3124 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3130 h->resv_huge_pages--;
3132 list_add(&folio->lru, &h->hugepage_activelist);
3137 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3142 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3150 map_commit = vma_commit_reservation(h, vma, addr);
3164 hugetlb_acct_memory(h, -rsv_adjust);
3167 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3168 pages_per_huge_page(h), folio);
3175 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3178 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3183 vma_end_reservation(h, vma, addr);
3187 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3189 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3196 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3203 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3205 huge_page_size(h), huge_page_size(h),
3221 m->hstate = h;
3236 struct hstate *h = m->hstate;
3238 VM_BUG_ON(!hstate_is_gigantic(h));
3240 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
3242 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
3246 free_gigantic_folio(folio, huge_page_order(h));
3254 adjust_managed_page_count(page, pages_per_huge_page(h));
3258 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3263 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3264 if (hstate_is_gigantic(h)) {
3265 if (!alloc_bootmem_huge_page(h, nid))
3269 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3271 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3279 if (i == h->max_huge_pages_node[nid])
3282 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3284 h->max_huge_pages_node[nid], buf, nid, i);
3285 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3286 h->max_huge_pages_node[nid] = i;
3289 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3296 if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3303 if (h->max_huge_pages_node[i] > 0) {
3304 hugetlb_hstate_alloc_pages_onenode(h, i);
3313 if (!hstate_is_gigantic(h)) {
3331 for (i = 0; i < h->max_huge_pages; ++i) {
3332 if (hstate_is_gigantic(h)) {
3333 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3335 } else if (!alloc_pool_huge_page(h,
3341 if (i < h->max_huge_pages) {
3344 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3346 h->max_huge_pages, buf, i);
3347 h->max_huge_pages = i;
3354 struct hstate *h, *h2;
3356 for_each_hstate(h) {
3358 if (!hstate_is_gigantic(h))
3359 hugetlb_hstate_alloc_pages(h);
3363 * h->demote_order is initially 0.
3369 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3371 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3374 if (h2 == h)
3376 if (h2->order < h->order &&
3377 h2->order > h->demote_order)
3378 h->demote_order = h2->order;
3385 struct hstate *h;
3387 for_each_hstate(h) {
3390 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3392 buf, h->free_huge_pages);
3394 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3399 static void try_to_free_low(struct hstate *h, unsigned long count,
3406 if (hstate_is_gigantic(h))
3414 struct list_head *freel = &h->hugepage_freelists[i];
3416 if (count >= h->nr_huge_pages)
3420 remove_hugetlb_folio(h, page_folio(page), false);
3427 update_and_free_pages_bulk(h, &page_list);
3431 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3442 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3451 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3452 if (h->surplus_huge_pages_node[node])
3456 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3457 if (h->surplus_huge_pages_node[node] <
3458 h->nr_huge_pages_node[node])
3465 h->surplus_huge_pages += delta;
3466 h->surplus_huge_pages_node[node] += delta;
3470 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3471 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3493 mutex_lock(&h->resize_lock);
3494 flush_free_hpage_work(h);
3506 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
3524 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3525 if (count > persistent_huge_pages(h)) {
3527 mutex_unlock(&h->resize_lock);
3545 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3546 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3550 while (count > persistent_huge_pages(h)) {
3561 ret = alloc_pool_huge_page(h, nodes_allowed,
3587 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3589 try_to_free_low(h, min_count, nodes_allowed);
3594 while (min_count < persistent_huge_pages(h)) {
3595 page = remove_pool_huge_page(h, nodes_allowed, 0);
3603 update_and_free_pages_bulk(h, &page_list);
3604 flush_free_hpage_work(h);
3607 while (count < persistent_huge_pages(h)) {
3608 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3612 h->max_huge_pages = persistent_huge_pages(h);
3614 mutex_unlock(&h->resize_lock);
3621 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
3629 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3631 remove_hugetlb_folio_for_demote(h, folio, false);
3634 rc = hugetlb_vmemmap_restore(h, &folio->page);
3639 add_hugetlb_folio(h, folio, false);
3647 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3654 * Note that we already hold h->resize_lock. To prevent deadlock,
3658 for (i = 0; i < pages_per_huge_page(h);
3679 h->max_huge_pages--;
3681 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3686 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3695 if (!h->demote_order) {
3700 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3701 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3704 return demote_free_hugetlb_folio(h, folio);
3746 struct hstate *h;
3750 h = kobj_to_hstate(kobj, &nid);
3752 nr_huge_pages = h->nr_huge_pages;
3754 nr_huge_pages = h->nr_huge_pages_node[nid];
3760 struct hstate *h, int nid,
3766 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3787 err = set_max_huge_pages(h, count, nid, n_mask);
3796 struct hstate *h;
3805 h = kobj_to_hstate(kobj, &nid);
3806 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
3847 struct hstate *h = kobj_to_hstate(kobj, NULL);
3848 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
3856 struct hstate *h = kobj_to_hstate(kobj, NULL);
3858 if (hstate_is_gigantic(h))
3866 h->nr_overcommit_huge_pages = input;
3876 struct hstate *h;
3880 h = kobj_to_hstate(kobj, &nid);
3882 free_huge_pages = h->free_huge_pages;
3884 free_huge_pages = h->free_huge_pages_node[nid];
3893 struct hstate *h = kobj_to_hstate(kobj, NULL);
3894 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
3901 struct hstate *h;
3905 h = kobj_to_hstate(kobj, &nid);
3907 surplus_huge_pages = h->surplus_huge_pages;
3909 surplus_huge_pages = h->surplus_huge_pages_node[nid];
3921 struct hstate *h;
3928 h = kobj_to_hstate(kobj, &nid);
3938 mutex_lock(&h->resize_lock);
3947 nr_available = h->free_huge_pages_node[nid];
3949 nr_available = h->free_huge_pages;
3950 nr_available -= h->resv_huge_pages;
3954 err = demote_pool_huge_page(h, n_mask);
3962 mutex_unlock(&h->resize_lock);
3973 struct hstate *h = kobj_to_hstate(kobj, NULL);
3974 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
3983 struct hstate *h, *demote_hstate;
3997 h = kobj_to_hstate(kobj, NULL);
3998 if (demote_order >= h->order)
4002 mutex_lock(&h->resize_lock);
4003 h->demote_order = demote_order;
4004 mutex_unlock(&h->resize_lock);
4036 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4041 int hi = hstate_index(h);
4043 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4054 if (h->demote_order) {
4058 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
4128 struct hstate *h;
4134 for_each_hstate(h) {
4135 int idx = hstate_index(h);
4140 if (h->demote_order)
4158 struct hstate *h;
4173 for_each_hstate(h) {
4174 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4179 h->name, node->dev.id);
4222 struct hstate *h;
4229 for_each_hstate(h) {
4230 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4233 pr_err("HugeTLB: Unable to add hstate %s", h->name);
4329 struct hstate *h;
4337 h = &hstates[hugetlb_max_hstate++];
4338 __mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4339 h->order = order;
4340 h->mask = ~(huge_page_size(h) - 1);
4342 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4343 INIT_LIST_HEAD(&h->hugepage_activelist);
4344 h->next_nid_to_alloc = first_memory_node;
4345 h->next_nid_to_free = first_memory_node;
4346 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4347 huge_page_size(h)/SZ_1K);
4349 parsed_hstate = h;
4472 struct hstate *h;
4482 h = size_to_hstate(size);
4483 if (h) {
4491 if (!parsed_default_hugepagesz || h != &default_hstate ||
4502 parsed_hstate = h;
4561 static unsigned int allowed_mems_nr(struct hstate *h)
4566 unsigned int *array = h->free_huge_pages_node;
4567 gfp_t gfp_mask = htlb_alloc_mask(h);
4599 struct hstate *h = &default_hstate;
4600 unsigned long tmp = h->max_huge_pages;
4612 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4638 struct hstate *h = &default_hstate;
4645 tmp = h->nr_overcommit_huge_pages;
4647 if (write && hstate_is_gigantic(h))
4657 h->nr_overcommit_huge_pages = tmp;
4706 struct hstate *h;
4712 for_each_hstate(h) {
4713 unsigned long count = h->nr_huge_pages;
4715 total += huge_page_size(h) * count;
4717 if (h == &default_hstate)
4725 h->free_huge_pages,
4726 h->resv_huge_pages,
4727 h->surplus_huge_pages,
4728 huge_page_size(h) / SZ_1K);
4736 struct hstate *h = &default_hstate;
4745 nid, h->nr_huge_pages_node[nid],
4746 nid, h->free_huge_pages_node[nid],
4747 nid, h->surplus_huge_pages_node[nid]);
4752 struct hstate *h;
4757 for_each_hstate(h)
4760 h->nr_huge_pages_node[nid],
4761 h->free_huge_pages_node[nid],
4762 h->surplus_huge_pages_node[nid],
4763 huge_page_size(h) / SZ_1K);
4775 struct hstate *h;
4778 for_each_hstate(h)
4779 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4783 static int hugetlb_acct_memory(struct hstate *h, long delta)
4815 if (gather_surplus_pages(h, delta) < 0)
4818 if (delta > allowed_mems_nr(h)) {
4819 return_unused_surplus_pages(h, delta);
4826 return_unused_surplus_pages(h, (unsigned long) -delta);
4873 struct hstate *h = hstate_vma(vma);
4885 start = vma_hugecache_offset(h, vma, vma->vm_start);
4886 end = vma_hugecache_offset(h, vma, vma->vm_end);
4896 hugetlb_acct_memory(h, -gbl_reserve);
5038 struct hstate *h = hstate_vma(src_vma);
5039 unsigned long sz = huge_page_size(h);
5040 unsigned long npages = pages_per_huge_page(h);
5062 last_addr_mask = hugetlb_mask_last_page(h);
5089 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5090 src_ptl = huge_pte_lockptr(h, src, src_pte);
5170 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5171 src_ptl = huge_pte_lockptr(h, src, src_pte);
5175 restore_reserve_on_error(h, dst_vma, addr,
5224 struct hstate *h = hstate_vma(vma);
5229 dst_ptl = huge_pte_lock(h, mm, dst_pte);
5230 src_ptl = huge_pte_lockptr(h, mm, src_pte);
5252 struct hstate *h = hstate_vma(vma);
5254 unsigned long sz = huge_page_size(h);
5272 last_addr_mask = hugetlb_mask_last_page(h);
5321 struct hstate *h = hstate_vma(vma);
5322 unsigned long sz = huge_page_size(h);
5327 BUG_ON(start & ~huge_page_mask(h));
5328 BUG_ON(end & ~huge_page_mask(h));
5337 last_addr_mask = hugetlb_mask_last_page(h);
5346 ptl = huge_pte_lock(h, mm, ptep);
5403 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5412 hugetlb_count_sub(pages_per_huge_page(h), mm);
5416 tlb_remove_page_size(tlb, page, huge_page_size(h));
5509 struct hstate *h = hstate_vma(vma);
5518 address = address & huge_page_mask(h);
5551 address + huge_page_size(h), page, 0);
5568 struct hstate *h = hstate_vma(vma);
5573 unsigned long haddr = address & huge_page_mask(h);
5666 idx = vma_hugecache_offset(h, vma, haddr);
5676 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5708 haddr + huge_page_size(h));
5716 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5726 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
5739 restore_reserve_on_error(h, vma, haddr, new_folio);
5753 static bool hugetlbfs_pagecache_present(struct hstate *h,
5757 pgoff_t idx = vma_hugecache_offset(h, vma, address);
5771 struct hstate *h = hstate_inode(inode);
5790 inode->i_blocks += blocks_per_huge_page(h);
5834 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
5840 ptl = huge_pte_lock(h, mm, ptep);
5853 struct hstate *h = hstate_vma(vma);
5860 unsigned long haddr = address & huge_page_mask(h);
5883 size = i_size_read(mapping->host) >> huge_page_shift(h);
5905 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5929 if (hugetlb_pte_stable(h, mm, ptep, old_pte))
5935 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
5949 restore_reserve_on_error(h, vma, haddr, folio);
5970 VM_FAULT_SET_HINDEX(hstate_index(h));
5979 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
5996 if (vma_needs_reservation(h, vma, haddr) < 0) {
6001 vma_end_reservation(h, vma, haddr);
6004 ptl = huge_pte_lock(h, mm, ptep);
6022 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
6024 hugetlb_count_add(pages_per_huge_page(h), mm);
6050 restore_reserve_on_error(h, vma, haddr, folio);
6091 struct hstate *h = hstate_vma(vma);
6094 unsigned long haddr = address & huge_page_mask(h);
6108 idx = vma_hugecache_offset(h, vma, haddr);
6118 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
6170 VM_FAULT_SET_HINDEX(hstate_index(h));
6183 if (vma_needs_reservation(h, vma, haddr) < 0) {
6188 vma_end_reservation(h, vma, haddr);
6195 ptl = huge_pte_lock(h, mm, ptep);
6289 struct hstate *h = hstate_vma(dst_vma);
6291 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6302 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6312 huge_page_size(h));
6332 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6352 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6358 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6372 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6404 size = i_size_read(mapping->host) >> huge_page_shift(h);
6421 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6463 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
6465 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6484 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6494 struct hstate *h = hstate_vma(vma);
6496 unsigned long haddr = address & huge_page_mask(h);
6503 pte = hugetlb_walk(vma, haddr, huge_page_size(h));
6507 ptl = huge_pte_lock(h, mm, pte);
6525 page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
6543 *page_mask = (1U << huge_page_order(h)) - 1;
6555 !hugetlbfs_pagecache_present(h, vma, address))
6569 struct hstate *h = hstate_vma(vma);
6570 long pages = 0, psize = huge_page_size(h);
6592 last_addr_mask = hugetlb_mask_last_page(h);
6611 ptl = huge_pte_lock(h, mm, ptep);
6705 return pages > 0 ? (pages << h->order) : pages;
6715 struct hstate *h = hstate_inode(inode);
6771 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6772 chg * pages_per_huge_page(h), &h_cg) < 0)
6779 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6795 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
6810 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6813 hugetlb_acct_memory(h, -gbl_reserve);
6830 hstate_index(h),
6831 (chg - add) * pages_per_huge_page(h), h_cg);
6835 hugetlb_acct_memory(h, -rsv_adjust);
6852 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6853 chg * pages_per_huge_page(h), h_cg);
6872 struct hstate *h = hstate_inode(inode);
6894 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6905 hugetlb_acct_memory(h, -gbl_reserve);
7174 unsigned long hugetlb_mask_last_page(struct hstate *h)
7176 unsigned long hp_size = huge_page_size(h);
7189 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7192 if (huge_page_size(h) == PMD_SIZE)
7263 struct hstate *h = folio_hstate(old_folio);
7293 if (h->surplus_huge_pages_node[old_nid]) {
7294 h->surplus_huge_pages_node[old_nid]--;
7295 h->surplus_huge_pages_node[new_nid]++;
7305 struct hstate *h = hstate_vma(vma);
7306 unsigned long sz = huge_page_size(h);
7333 ptl = huge_pte_lock(h, mm, ptep);