sparse-vmemmap.c (f41f2ed43ca5258d70d53290d1951a21621f95c8) sparse-vmemmap.c (ad2fa3717b74994a22519dbe045757135db00dbb)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtual Memory Map support
4 *
5 * (C) 2007 sgi. Christoph Lameter.
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset

--- 26 unchanged lines hidden (view full) ---

35#include <asm/tlbflush.h>
36
37/**
38 * struct vmemmap_remap_walk - walk vmemmap page table
39 *
40 * @remap_pte: called for each lowest-level entry (PTE).
41 * @reuse_page: the page which is reused for the tail vmemmap pages.
42 * @reuse_addr: the virtual address of the @reuse_page page.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtual Memory Map support
4 *
5 * (C) 2007 sgi. Christoph Lameter.
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset

--- 26 unchanged lines hidden (view full) ---

35#include <asm/tlbflush.h>
36
37/**
38 * struct vmemmap_remap_walk - walk vmemmap page table
39 *
40 * @remap_pte: called for each lowest-level entry (PTE).
41 * @reuse_page: the page which is reused for the tail vmemmap pages.
42 * @reuse_addr: the virtual address of the @reuse_page page.
43 * @vmemmap_pages: the list head of the vmemmap pages that can be freed.
43 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
44 * or is mapped from.
44 */
45struct vmemmap_remap_walk {
46 void (*remap_pte)(pte_t *pte, unsigned long addr,
47 struct vmemmap_remap_walk *walk);
48 struct page *reuse_page;
49 unsigned long reuse_addr;
50 struct list_head *vmemmap_pages;
51};

--- 167 unchanged lines hidden (view full) ---

219 * So we need to make sure that @start and @reuse meet the above rules.
220 */
221 BUG_ON(start - reuse != PAGE_SIZE);
222
223 vmemmap_remap_range(reuse, end, &walk);
224 free_vmemmap_page_list(&vmemmap_pages);
225}
226
45 */
46struct vmemmap_remap_walk {
47 void (*remap_pte)(pte_t *pte, unsigned long addr,
48 struct vmemmap_remap_walk *walk);
49 struct page *reuse_page;
50 unsigned long reuse_addr;
51 struct list_head *vmemmap_pages;
52};

--- 167 unchanged lines hidden (view full) ---

220 * So we need to make sure that @start and @reuse meet the above rules.
221 */
222 BUG_ON(start - reuse != PAGE_SIZE);
223
224 vmemmap_remap_range(reuse, end, &walk);
225 free_vmemmap_page_list(&vmemmap_pages);
226}
227
228static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
229 struct vmemmap_remap_walk *walk)
230{
231 pgprot_t pgprot = PAGE_KERNEL;
232 struct page *page;
233 void *to;
234
235 BUG_ON(pte_page(*pte) != walk->reuse_page);
236
237 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
238 list_del(&page->lru);
239 to = page_to_virt(page);
240 copy_page(to, (void *)walk->reuse_addr);
241
242 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
243}
244
245static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
246 gfp_t gfp_mask, struct list_head *list)
247{
248 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
249 int nid = page_to_nid((struct page *)start);
250 struct page *page, *next;
251
252 while (nr_pages--) {
253 page = alloc_pages_node(nid, gfp_mask, 0);
254 if (!page)
255 goto out;
256 list_add_tail(&page->lru, list);
257 }
258
259 return 0;
260out:
261 list_for_each_entry_safe(page, next, list, lru)
262 __free_pages(page, 0);
263 return -ENOMEM;
264}
265
266/**
267 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
268 * to the page which is from the @vmemmap_pages
269 * respectively.
270 * @start: start address of the vmemmap virtual address range that we want
271 * to remap.
272 * @end: end address of the vmemmap virtual address range that we want to
273 * remap.
274 * @reuse: reuse address.
275 * @gfp_mask: GFP flag for allocating vmemmap pages.
276 */
277int vmemmap_remap_alloc(unsigned long start, unsigned long end,
278 unsigned long reuse, gfp_t gfp_mask)
279{
280 LIST_HEAD(vmemmap_pages);
281 struct vmemmap_remap_walk walk = {
282 .remap_pte = vmemmap_restore_pte,
283 .reuse_addr = reuse,
284 .vmemmap_pages = &vmemmap_pages,
285 };
286
287 /* See the comment in the vmemmap_remap_free(). */
288 BUG_ON(start - reuse != PAGE_SIZE);
289
290 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
291
292 if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
293 return -ENOMEM;
294
295 vmemmap_remap_range(reuse, end, &walk);
296
297 return 0;
298}
299
227/*
228 * Allocate a block of memory to be used to back the virtual memory map
229 * or to back the page tables that are used to create the mapping.
230 * Uses the main allocators if they are available, else bootmem.
231 */
232
233static void * __ref __earlyonly_bootmem_alloc(int node,
234 unsigned long size,

--- 225 unchanged lines hidden ---
300/*
301 * Allocate a block of memory to be used to back the virtual memory map
302 * or to back the page tables that are used to create the mapping.
303 * Uses the main allocators if they are available, else bootmem.
304 */
305
306static void * __ref __earlyonly_bootmem_alloc(int node,
307 unsigned long size,

--- 225 unchanged lines hidden ---