hugetlb.c (b65d4adbc0f0d4619f61ee9d8126bc5005b78802) hugetlb.c (ad2fa3717b74994a22519dbe045757135db00dbb)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>

--- 1362 unchanged lines hidden (view full) ---

1371
1372 set_page_refcounted(page);
1373 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1374
1375 h->nr_huge_pages--;
1376 h->nr_huge_pages_node[nid]--;
1377}
1378
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Generic hugetlb support.
4 * (C) Nadia Yvette Chambers, April 2004
5 */
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>

--- 1362 unchanged lines hidden (view full) ---

1371
1372 set_page_refcounted(page);
1373 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1374
1375 h->nr_huge_pages--;
1376 h->nr_huge_pages_node[nid]--;
1377}
1378
1379static void add_hugetlb_page(struct hstate *h, struct page *page,
1380 bool adjust_surplus)
1381{
1382 int zeroed;
1383 int nid = page_to_nid(page);
1384
1385 VM_BUG_ON_PAGE(!HPageVmemmapOptimized(page), page);
1386
1387 lockdep_assert_held(&hugetlb_lock);
1388
1389 INIT_LIST_HEAD(&page->lru);
1390 h->nr_huge_pages++;
1391 h->nr_huge_pages_node[nid]++;
1392
1393 if (adjust_surplus) {
1394 h->surplus_huge_pages++;
1395 h->surplus_huge_pages_node[nid]++;
1396 }
1397
1398 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1399 set_page_private(page, 0);
1400 SetHPageVmemmapOptimized(page);
1401
1402 /*
1403 * This page is now managed by the hugetlb allocator and has
1404 * no users -- drop the last reference.
1405 */
1406 zeroed = put_page_testzero(page);
1407 VM_BUG_ON_PAGE(!zeroed, page);
1408 arch_clear_hugepage_flags(page);
1409 enqueue_huge_page(h, page);
1410}
1411
1379static void __update_and_free_page(struct hstate *h, struct page *page)
1380{
1381 int i;
1382 struct page *subpage = page;
1383
1384 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1385 return;
1386
1412static void __update_and_free_page(struct hstate *h, struct page *page)
1413{
1414 int i;
1415 struct page *subpage = page;
1416
1417 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1418 return;
1419
1420 if (alloc_huge_page_vmemmap(h, page)) {
1421 spin_lock_irq(&hugetlb_lock);
1422 /*
1423 * If we cannot allocate vmemmap pages, just refuse to free the
1424 * page and put the page back on the hugetlb free list and treat
1425 * as a surplus page.
1426 */
1427 add_hugetlb_page(h, page, true);
1428 spin_unlock_irq(&hugetlb_lock);
1429 return;
1430 }
1431
1387 for (i = 0; i < pages_per_huge_page(h);
1388 i++, subpage = mem_map_next(subpage, page, i)) {
1389 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1390 1 << PG_referenced | 1 << PG_dirty |
1391 1 << PG_active | 1 << PG_private |
1392 1 << PG_writeback);
1393 }
1394 if (hstate_is_gigantic(h)) {

--- 50 unchanged lines hidden (view full) ---

1445{
1446 if (free_vmemmap_pages_per_hpage(h))
1447 flush_work(&free_hpage_work);
1448}
1449
1450static void update_and_free_page(struct hstate *h, struct page *page,
1451 bool atomic)
1452{
1432 for (i = 0; i < pages_per_huge_page(h);
1433 i++, subpage = mem_map_next(subpage, page, i)) {
1434 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1435 1 << PG_referenced | 1 << PG_dirty |
1436 1 << PG_active | 1 << PG_private |
1437 1 << PG_writeback);
1438 }
1439 if (hstate_is_gigantic(h)) {

--- 50 unchanged lines hidden (view full) ---

1490{
1491 if (free_vmemmap_pages_per_hpage(h))
1492 flush_work(&free_hpage_work);
1493}
1494
1495static void update_and_free_page(struct hstate *h, struct page *page,
1496 bool atomic)
1497{
1453 if (!free_vmemmap_pages_per_hpage(h) || !atomic) {
1498 if (!HPageVmemmapOptimized(page) || !atomic) {
1454 __update_and_free_page(h, page);
1455 return;
1456 }
1457
1458 /*
1459 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1460 *
1461 * Only call schedule_work() if hpage_freelist is previously

--- 339 unchanged lines hidden (view full) ---

1801 return page;
1802}
1803
1804/*
1805 * Dissolve a given free hugepage into free buddy pages. This function does
1806 * nothing for in-use hugepages and non-hugepages.
1807 * This function returns values like below:
1808 *
1499 __update_and_free_page(h, page);
1500 return;
1501 }
1502
1503 /*
1504 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1505 *
1506 * Only call schedule_work() if hpage_freelist is previously

--- 339 unchanged lines hidden (view full) ---

1846 return page;
1847}
1848
1849/*
1850 * Dissolve a given free hugepage into free buddy pages. This function does
1851 * nothing for in-use hugepages and non-hugepages.
1852 * This function returns values like below:
1853 *
1809 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1810 * (allocated or reserved.)
1811 * 0: successfully dissolved free hugepages or the page is not a
1812 * hugepage (considered as already dissolved)
1854 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
1855 * when the system is under memory pressure and the feature of
1856 * freeing unused vmemmap pages associated with each hugetlb page
1857 * is enabled.
1858 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1859 * (allocated or reserved.)
1860 * 0: successfully dissolved free hugepages or the page is not a
1861 * hugepage (considered as already dissolved)
1813 */
1814int dissolve_free_huge_page(struct page *page)
1815{
1816 int rc = -EBUSY;
1817
1818retry:
1819 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1820 if (!PageHuge(page))

--- 25 unchanged lines hidden (view full) ---

1846 * to successfully dissolve the page if we do a
1847 * retry. Because the race window is quite small.
1848 * If we seize this opportunity, it is an optimization
1849 * for increasing the success rate of dissolving page.
1850 */
1851 goto retry;
1852 }
1853
1862 */
1863int dissolve_free_huge_page(struct page *page)
1864{
1865 int rc = -EBUSY;
1866
1867retry:
1868 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1869 if (!PageHuge(page))

--- 25 unchanged lines hidden (view full) ---

1895 * to successfully dissolve the page if we do a
1896 * retry. Because the race window is quite small.
1897 * If we seize this opportunity, it is an optimization
1898 * for increasing the success rate of dissolving page.
1899 */
1900 goto retry;
1901 }
1902
1854 /*
1855 * Move PageHWPoison flag from head page to the raw error page,
1856 * which makes any subpages rather than the error page reusable.
1857 */
1858 if (PageHWPoison(head) && page != head) {
1859 SetPageHWPoison(page);
1860 ClearPageHWPoison(head);
1861 }
1862 remove_hugetlb_page(h, head, false);
1863 h->max_huge_pages--;
1864 spin_unlock_irq(&hugetlb_lock);
1903 remove_hugetlb_page(h, head, false);
1904 h->max_huge_pages--;
1905 spin_unlock_irq(&hugetlb_lock);
1865 update_and_free_page(h, head, false);
1866 return 0;
1906
1907 /*
1908 * Normally update_and_free_page will allocate required vmemmmap
1909 * before freeing the page. update_and_free_page will fail to
1910 * free the page if it can not allocate required vmemmap. We
1911 * need to adjust max_huge_pages if the page is not freed.
1912 * Attempt to allocate vmemmmap here so that we can take
1913 * appropriate action on failure.
1914 */
1915 rc = alloc_huge_page_vmemmap(h, head);
1916 if (!rc) {
1917 /*
1918 * Move PageHWPoison flag from head page to the raw
1919 * error page, which makes any subpages rather than
1920 * the error page reusable.
1921 */
1922 if (PageHWPoison(head) && page != head) {
1923 SetPageHWPoison(page);
1924 ClearPageHWPoison(head);
1925 }
1926 update_and_free_page(h, head, false);
1927 } else {
1928 spin_lock_irq(&hugetlb_lock);
1929 add_hugetlb_page(h, head, false);
1930 h->max_huge_pages++;
1931 spin_unlock_irq(&hugetlb_lock);
1932 }
1933
1934 return rc;
1867 }
1868out:
1869 spin_unlock_irq(&hugetlb_lock);
1870 return rc;
1871}
1872
1873/*
1874 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to

--- 4310 unchanged lines hidden ---
1935 }
1936out:
1937 spin_unlock_irq(&hugetlb_lock);
1938 return rc;
1939}
1940
1941/*
1942 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to

--- 4310 unchanged lines hidden ---