mempolicy.c (ac79f78dab892fcdc11fda8af5cc5e80d09dca8a) mempolicy.c (19deb7695e072deaff025e03de40c61b525bd57e)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should

--- 1166 unchanged lines hidden (view full) ---

1175 }
1176
1177 if (PageHuge(page)) {
1178 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1179 vma, address);
1180 } else if (PageTransHuge(page)) {
1181 struct page *thp;
1182
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Simple NUMA memory policy for the Linux kernel.
4 *
5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7 *
8 * NUMA policy allows the user to give hints in which node(s) memory should

--- 1166 unchanged lines hidden (view full) ---

1175 }
1176
1177 if (PageHuge(page)) {
1178 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1179 vma, address);
1180 } else if (PageTransHuge(page)) {
1181 struct page *thp;
1182
1183 thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
1184 address, numa_node_id());
1183 thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1184 HPAGE_PMD_ORDER);
1185 if (!thp)
1186 return NULL;
1187 prep_transhuge_page(thp);
1188 return thp;
1189 }
1190 /*
1191 * if !vma, alloc_page_vma() will use task or system default policy
1192 */

--- 885 unchanged lines hidden (view full) ---

2078 * %GFP_HIGHMEM highmem/user allocations,
2079 * %GFP_FS allocation should not call back into a file system.
2080 * %GFP_ATOMIC don't sleep.
2081 *
2082 * @order:Order of the GFP allocation.
2083 * @vma: Pointer to VMA or NULL if not available.
2084 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2085 * @node: Which node to prefer for allocation (modulo policy).
1185 if (!thp)
1186 return NULL;
1187 prep_transhuge_page(thp);
1188 return thp;
1189 }
1190 /*
1191 * if !vma, alloc_page_vma() will use task or system default policy
1192 */

--- 885 unchanged lines hidden (view full) ---

2078 * %GFP_HIGHMEM highmem/user allocations,
2079 * %GFP_FS allocation should not call back into a file system.
2080 * %GFP_ATOMIC don't sleep.
2081 *
2082 * @order:Order of the GFP allocation.
2083 * @vma: Pointer to VMA or NULL if not available.
2084 * @addr: Virtual Address of the allocation. Must be inside the VMA.
2085 * @node: Which node to prefer for allocation (modulo policy).
2086 * @hugepage: for hugepages try only the preferred node if possible
2086 *
2087 * This function allocates a page from the kernel page pool and applies
2088 * a NUMA policy associated with the VMA or the current process.
2089 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2090 * mm_struct of the VMA to prevent it from going away. Should be used for
2091 * all allocations for pages that will be mapped into user space. Returns
2092 * NULL when no page can be allocated.
2093 */
2094struct page *
2095alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2087 *
2088 * This function allocates a page from the kernel page pool and applies
2089 * a NUMA policy associated with the VMA or the current process.
2090 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
2091 * mm_struct of the VMA to prevent it from going away. Should be used for
2092 * all allocations for pages that will be mapped into user space. Returns
2093 * NULL when no page can be allocated.
2094 */
2095struct page *
2096alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2096 unsigned long addr, int node)
2097 unsigned long addr, int node, bool hugepage)
2097{
2098 struct mempolicy *pol;
2099 struct page *page;
2100 int preferred_nid;
2101 nodemask_t *nmask;
2102
2103 pol = get_vma_policy(vma, addr);
2104
2105 if (pol->mode == MPOL_INTERLEAVE) {
2106 unsigned nid;
2107
2108 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2109 mpol_cond_put(pol);
2110 page = alloc_page_interleave(gfp, order, nid);
2111 goto out;
2112 }
2113
2098{
2099 struct mempolicy *pol;
2100 struct page *page;
2101 int preferred_nid;
2102 nodemask_t *nmask;
2103
2104 pol = get_vma_policy(vma, addr);
2105
2106 if (pol->mode == MPOL_INTERLEAVE) {
2107 unsigned nid;
2108
2109 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2110 mpol_cond_put(pol);
2111 page = alloc_page_interleave(gfp, order, nid);
2112 goto out;
2113 }
2114
2115 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2116 int hpage_node = node;
2117
2118 /*
2119 * For hugepage allocation and non-interleave policy which
2120 * allows the current node (or other explicitly preferred
2121 * node) we only try to allocate from the current/preferred
2122 * node and don't fall back to other nodes, as the cost of
2123 * remote accesses would likely offset THP benefits.
2124 *
2125 * If the policy is interleave, or does not allow the current
2126 * node in its nodemask, we allocate the standard way.
2127 */
2128 if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2129 hpage_node = pol->v.preferred_node;
2130
2131 nmask = policy_nodemask(gfp, pol);
2132 if (!nmask || node_isset(hpage_node, *nmask)) {
2133 mpol_cond_put(pol);
2134 page = __alloc_pages_node(hpage_node,
2135 gfp | __GFP_THISNODE, order);
2136 goto out;
2137 }
2138 }
2139
2114 nmask = policy_nodemask(gfp, pol);
2115 preferred_nid = policy_node(gfp, pol, node);
2116 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2117 mpol_cond_put(pol);
2118out:
2119 return page;
2120}
2121EXPORT_SYMBOL(alloc_pages_vma);

--- 812 unchanged lines hidden ---
2140 nmask = policy_nodemask(gfp, pol);
2141 preferred_nid = policy_node(gfp, pol, node);
2142 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2143 mpol_cond_put(pol);
2144out:
2145 return page;
2146}
2147EXPORT_SYMBOL(alloc_pages_vma);

--- 812 unchanged lines hidden ---