1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtual Memory Map support 4 * 5 * (C) 2007 sgi. Christoph Lameter. 6 * 7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 8 * virt_to_page, page_address() to be implemented as a base offset 9 * calculation without memory access. 10 * 11 * However, virtual mappings need a page table and TLBs. Many Linux 12 * architectures already map their physical space using 1-1 mappings 13 * via TLBs. For those arches the virtual memory map is essentially 14 * for free if we use the same page size as the 1-1 mappings. In that 15 * case the overhead consists of a few additional pages that are 16 * allocated to create a view of memory for vmemmap. 17 * 18 * The architecture is expected to provide a vmemmap_populate() function 19 * to instantiate the mapping. 20 */ 21 #include <linux/mm.h> 22 #include <linux/mmzone.h> 23 #include <linux/memblock.h> 24 #include <linux/memremap.h> 25 #include <linux/highmem.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sched.h> 30 #include <asm/dma.h> 31 #include <asm/pgalloc.h> 32 33 /* 34 * Allocate a block of memory to be used to back the virtual memory map 35 * or to back the page tables that are used to create the mapping. 36 * Uses the main allocators if they are available, else bootmem. 37 */ 38 39 static void * __ref __earlyonly_bootmem_alloc(int node, 40 unsigned long size, 41 unsigned long align, 42 unsigned long goal) 43 { 44 return memblock_alloc_try_nid_raw(size, align, goal, 45 MEMBLOCK_ALLOC_ACCESSIBLE, node); 46 } 47 48 void * __meminit vmemmap_alloc_block(unsigned long size, int node) 49 { 50 /* If the main allocator is up use that, fallback to bootmem. */ 51 if (slab_is_available()) { 52 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; 53 int order = get_order(size); 54 static bool warned; 55 struct page *page; 56 57 page = alloc_pages_node(node, gfp_mask, order); 58 if (page) 59 return page_address(page); 60 61 if (!warned) { 62 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, 63 "vmemmap alloc failure: order:%u", order); 64 warned = true; 65 } 66 return NULL; 67 } else 68 return __earlyonly_bootmem_alloc(node, size, size, 69 __pa(MAX_DMA_ADDRESS)); 70 } 71 72 /* need to make sure size is all the same during early stage */ 73 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) 74 { 75 void *ptr = sparse_buffer_alloc(size); 76 77 if (!ptr) 78 ptr = vmemmap_alloc_block(size, node); 79 return ptr; 80 } 81 82 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) 83 { 84 return altmap->base_pfn + altmap->reserve + altmap->alloc 85 + altmap->align; 86 } 87 88 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) 89 { 90 unsigned long allocated = altmap->alloc + altmap->align; 91 92 if (altmap->free > allocated) 93 return altmap->free - allocated; 94 return 0; 95 } 96 97 /** 98 * altmap_alloc_block_buf - allocate pages from the device page map 99 * @altmap: device page map 100 * @size: size (in bytes) of the allocation 101 * 102 * Allocations are aligned to the size of the request. 103 */ 104 void * __meminit altmap_alloc_block_buf(unsigned long size, 105 struct vmem_altmap *altmap) 106 { 107 unsigned long pfn, nr_pfns, nr_align; 108 109 if (size & ~PAGE_MASK) { 110 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 111 __func__, size); 112 return NULL; 113 } 114 115 pfn = vmem_altmap_next_pfn(altmap); 116 nr_pfns = size >> PAGE_SHIFT; 117 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); 118 nr_align = ALIGN(pfn, nr_align) - pfn; 119 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) 120 return NULL; 121 122 altmap->alloc += nr_pfns; 123 altmap->align += nr_align; 124 pfn += nr_align; 125 126 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", 127 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); 128 return __va(__pfn_to_phys(pfn)); 129 } 130 131 void __meminit vmemmap_verify(pte_t *pte, int node, 132 unsigned long start, unsigned long end) 133 { 134 unsigned long pfn = pte_pfn(*pte); 135 int actual_node = early_pfn_to_nid(pfn); 136 137 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 138 pr_warn("[%lx-%lx] potential offnode page_structs\n", 139 start, end - 1); 140 } 141 142 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) 143 { 144 pte_t *pte = pte_offset_kernel(pmd, addr); 145 if (pte_none(*pte)) { 146 pte_t entry; 147 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); 148 if (!p) 149 return NULL; 150 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 151 set_pte_at(&init_mm, addr, pte, entry); 152 } 153 return pte; 154 } 155 156 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) 157 { 158 void *p = vmemmap_alloc_block(size, node); 159 160 if (!p) 161 return NULL; 162 memset(p, 0, size); 163 164 return p; 165 } 166 167 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) 168 { 169 pmd_t *pmd = pmd_offset(pud, addr); 170 if (pmd_none(*pmd)) { 171 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 172 if (!p) 173 return NULL; 174 pmd_populate_kernel(&init_mm, pmd, p); 175 } 176 return pmd; 177 } 178 179 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) 180 { 181 pud_t *pud = pud_offset(p4d, addr); 182 if (pud_none(*pud)) { 183 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 184 if (!p) 185 return NULL; 186 pud_populate(&init_mm, pud, p); 187 } 188 return pud; 189 } 190 191 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) 192 { 193 p4d_t *p4d = p4d_offset(pgd, addr); 194 if (p4d_none(*p4d)) { 195 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 196 if (!p) 197 return NULL; 198 p4d_populate(&init_mm, p4d, p); 199 } 200 return p4d; 201 } 202 203 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 204 { 205 pgd_t *pgd = pgd_offset_k(addr); 206 if (pgd_none(*pgd)) { 207 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 208 if (!p) 209 return NULL; 210 pgd_populate(&init_mm, pgd, p); 211 } 212 return pgd; 213 } 214 215 int __meminit vmemmap_populate_basepages(unsigned long start, 216 unsigned long end, int node) 217 { 218 unsigned long addr = start; 219 pgd_t *pgd; 220 p4d_t *p4d; 221 pud_t *pud; 222 pmd_t *pmd; 223 pte_t *pte; 224 225 for (; addr < end; addr += PAGE_SIZE) { 226 pgd = vmemmap_pgd_populate(addr, node); 227 if (!pgd) 228 return -ENOMEM; 229 p4d = vmemmap_p4d_populate(pgd, addr, node); 230 if (!p4d) 231 return -ENOMEM; 232 pud = vmemmap_pud_populate(p4d, addr, node); 233 if (!pud) 234 return -ENOMEM; 235 pmd = vmemmap_pmd_populate(pud, addr, node); 236 if (!pmd) 237 return -ENOMEM; 238 pte = vmemmap_pte_populate(pmd, addr, node); 239 if (!pte) 240 return -ENOMEM; 241 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); 242 } 243 244 return 0; 245 } 246 247 struct page * __meminit __populate_section_memmap(unsigned long pfn, 248 unsigned long nr_pages, int nid, struct vmem_altmap *altmap) 249 { 250 unsigned long start; 251 unsigned long end; 252 253 /* 254 * The minimum granularity of memmap extensions is 255 * PAGES_PER_SUBSECTION as allocations are tracked in the 256 * 'subsection_map' bitmap of the section. 257 */ 258 end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION); 259 pfn &= PAGE_SUBSECTION_MASK; 260 nr_pages = end - pfn; 261 262 start = (unsigned long) pfn_to_page(pfn); 263 end = start + nr_pages * sizeof(struct page); 264 265 if (vmemmap_populate(start, end, nid, altmap)) 266 return NULL; 267 268 return pfn_to_page(pfn); 269 } 270