1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtual Memory Map support 4 * 5 * (C) 2007 sgi. Christoph Lameter. 6 * 7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 8 * virt_to_page, page_address() to be implemented as a base offset 9 * calculation without memory access. 10 * 11 * However, virtual mappings need a page table and TLBs. Many Linux 12 * architectures already map their physical space using 1-1 mappings 13 * via TLBs. For those arches the virtual memory map is essentially 14 * for free if we use the same page size as the 1-1 mappings. In that 15 * case the overhead consists of a few additional pages that are 16 * allocated to create a view of memory for vmemmap. 17 * 18 * The architecture is expected to provide a vmemmap_populate() function 19 * to instantiate the mapping. 20 */ 21 #include <linux/mm.h> 22 #include <linux/mmzone.h> 23 #include <linux/bootmem.h> 24 #include <linux/memremap.h> 25 #include <linux/highmem.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sched.h> 30 #include <asm/dma.h> 31 #include <asm/pgalloc.h> 32 #include <asm/pgtable.h> 33 34 /* 35 * Allocate a block of memory to be used to back the virtual memory map 36 * or to back the page tables that are used to create the mapping. 37 * Uses the main allocators if they are available, else bootmem. 38 */ 39 40 static void * __ref __earlyonly_bootmem_alloc(int node, 41 unsigned long size, 42 unsigned long align, 43 unsigned long goal) 44 { 45 return memblock_virt_alloc_try_nid_raw(size, align, goal, 46 BOOTMEM_ALLOC_ACCESSIBLE, node); 47 } 48 49 static void *vmemmap_buf; 50 static void *vmemmap_buf_end; 51 52 void * __meminit vmemmap_alloc_block(unsigned long size, int node) 53 { 54 /* If the main allocator is up use that, fallback to bootmem. */ 55 if (slab_is_available()) { 56 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; 57 int order = get_order(size); 58 static bool warned; 59 struct page *page; 60 61 page = alloc_pages_node(node, gfp_mask, order); 62 if (page) 63 return page_address(page); 64 65 if (!warned) { 66 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, 67 "vmemmap alloc failure: order:%u", order); 68 warned = true; 69 } 70 return NULL; 71 } else 72 return __earlyonly_bootmem_alloc(node, size, size, 73 __pa(MAX_DMA_ADDRESS)); 74 } 75 76 /* need to make sure size is all the same during early stage */ 77 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) 78 { 79 void *ptr; 80 81 if (!vmemmap_buf) 82 return vmemmap_alloc_block(size, node); 83 84 /* take the from buf */ 85 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size); 86 if (ptr + size > vmemmap_buf_end) 87 return vmemmap_alloc_block(size, node); 88 89 vmemmap_buf = ptr + size; 90 91 return ptr; 92 } 93 94 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) 95 { 96 return altmap->base_pfn + altmap->reserve + altmap->alloc 97 + altmap->align; 98 } 99 100 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) 101 { 102 unsigned long allocated = altmap->alloc + altmap->align; 103 104 if (altmap->free > allocated) 105 return altmap->free - allocated; 106 return 0; 107 } 108 109 /** 110 * altmap_alloc_block_buf - allocate pages from the device page map 111 * @altmap: device page map 112 * @size: size (in bytes) of the allocation 113 * 114 * Allocations are aligned to the size of the request. 115 */ 116 void * __meminit altmap_alloc_block_buf(unsigned long size, 117 struct vmem_altmap *altmap) 118 { 119 unsigned long pfn, nr_pfns, nr_align; 120 121 if (size & ~PAGE_MASK) { 122 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 123 __func__, size); 124 return NULL; 125 } 126 127 pfn = vmem_altmap_next_pfn(altmap); 128 nr_pfns = size >> PAGE_SHIFT; 129 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); 130 nr_align = ALIGN(pfn, nr_align) - pfn; 131 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) 132 return NULL; 133 134 altmap->alloc += nr_pfns; 135 altmap->align += nr_align; 136 pfn += nr_align; 137 138 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", 139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); 140 return __va(__pfn_to_phys(pfn)); 141 } 142 143 void __meminit vmemmap_verify(pte_t *pte, int node, 144 unsigned long start, unsigned long end) 145 { 146 unsigned long pfn = pte_pfn(*pte); 147 int actual_node = early_pfn_to_nid(pfn); 148 149 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 150 pr_warn("[%lx-%lx] potential offnode page_structs\n", 151 start, end - 1); 152 } 153 154 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) 155 { 156 pte_t *pte = pte_offset_kernel(pmd, addr); 157 if (pte_none(*pte)) { 158 pte_t entry; 159 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); 160 if (!p) 161 return NULL; 162 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 163 set_pte_at(&init_mm, addr, pte, entry); 164 } 165 return pte; 166 } 167 168 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) 169 { 170 void *p = vmemmap_alloc_block(size, node); 171 172 if (!p) 173 return NULL; 174 memset(p, 0, size); 175 176 return p; 177 } 178 179 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) 180 { 181 pmd_t *pmd = pmd_offset(pud, addr); 182 if (pmd_none(*pmd)) { 183 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 184 if (!p) 185 return NULL; 186 pmd_populate_kernel(&init_mm, pmd, p); 187 } 188 return pmd; 189 } 190 191 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) 192 { 193 pud_t *pud = pud_offset(p4d, addr); 194 if (pud_none(*pud)) { 195 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 196 if (!p) 197 return NULL; 198 pud_populate(&init_mm, pud, p); 199 } 200 return pud; 201 } 202 203 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) 204 { 205 p4d_t *p4d = p4d_offset(pgd, addr); 206 if (p4d_none(*p4d)) { 207 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 208 if (!p) 209 return NULL; 210 p4d_populate(&init_mm, p4d, p); 211 } 212 return p4d; 213 } 214 215 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 216 { 217 pgd_t *pgd = pgd_offset_k(addr); 218 if (pgd_none(*pgd)) { 219 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 220 if (!p) 221 return NULL; 222 pgd_populate(&init_mm, pgd, p); 223 } 224 return pgd; 225 } 226 227 int __meminit vmemmap_populate_basepages(unsigned long start, 228 unsigned long end, int node) 229 { 230 unsigned long addr = start; 231 pgd_t *pgd; 232 p4d_t *p4d; 233 pud_t *pud; 234 pmd_t *pmd; 235 pte_t *pte; 236 237 for (; addr < end; addr += PAGE_SIZE) { 238 pgd = vmemmap_pgd_populate(addr, node); 239 if (!pgd) 240 return -ENOMEM; 241 p4d = vmemmap_p4d_populate(pgd, addr, node); 242 if (!p4d) 243 return -ENOMEM; 244 pud = vmemmap_pud_populate(p4d, addr, node); 245 if (!pud) 246 return -ENOMEM; 247 pmd = vmemmap_pmd_populate(pud, addr, node); 248 if (!pmd) 249 return -ENOMEM; 250 pte = vmemmap_pte_populate(pmd, addr, node); 251 if (!pte) 252 return -ENOMEM; 253 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); 254 } 255 256 return 0; 257 } 258 259 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, 260 struct vmem_altmap *altmap) 261 { 262 unsigned long start; 263 unsigned long end; 264 struct page *map; 265 266 map = pfn_to_page(pnum * PAGES_PER_SECTION); 267 start = (unsigned long)map; 268 end = (unsigned long)(map + PAGES_PER_SECTION); 269 270 if (vmemmap_populate(start, end, nid, altmap)) 271 return NULL; 272 273 return map; 274 } 275 276 void __init sparse_mem_maps_populate_node(struct page **map_map, 277 unsigned long pnum_begin, 278 unsigned long pnum_end, 279 unsigned long map_count, int nodeid) 280 { 281 unsigned long pnum; 282 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 283 void *vmemmap_buf_start; 284 285 size = ALIGN(size, PMD_SIZE); 286 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count, 287 PMD_SIZE, __pa(MAX_DMA_ADDRESS)); 288 289 if (vmemmap_buf_start) { 290 vmemmap_buf = vmemmap_buf_start; 291 vmemmap_buf_end = vmemmap_buf_start + size * map_count; 292 } 293 294 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 295 struct mem_section *ms; 296 297 if (!present_section_nr(pnum)) 298 continue; 299 300 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); 301 if (map_map[pnum]) 302 continue; 303 ms = __nr_to_section(pnum); 304 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 305 __func__); 306 ms->section_mem_map = 0; 307 } 308 309 if (vmemmap_buf_start) { 310 /* need to free left buf */ 311 memblock_free_early(__pa(vmemmap_buf), 312 vmemmap_buf_end - vmemmap_buf); 313 vmemmap_buf = NULL; 314 vmemmap_buf_end = NULL; 315 } 316 } 317