1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Common EFI memory map functions. 4 */ 5 6 #define pr_fmt(fmt) "efi: " fmt 7 8 #include <linux/init.h> 9 #include <linux/kernel.h> 10 #include <linux/efi.h> 11 #include <linux/io.h> 12 #include <asm/early_ioremap.h> 13 #include <linux/memblock.h> 14 #include <linux/slab.h> 15 16 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size) 17 { 18 return memblock_phys_alloc(size, SMP_CACHE_BYTES); 19 } 20 21 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size) 22 { 23 unsigned int order = get_order(size); 24 struct page *p = alloc_pages(GFP_KERNEL, order); 25 26 if (!p) 27 return 0; 28 29 return PFN_PHYS(page_to_pfn(p)); 30 } 31 32 /** 33 * efi_memmap_alloc - Allocate memory for the EFI memory map 34 * @num_entries: Number of entries in the allocated map. 35 * 36 * Depending on whether mm_init() has already been invoked or not, 37 * either memblock or "normal" page allocation is used. 38 * 39 * Returns the physical address of the allocated memory map on 40 * success, zero on failure. 41 */ 42 phys_addr_t __init efi_memmap_alloc(unsigned int num_entries) 43 { 44 unsigned long size = num_entries * efi.memmap.desc_size; 45 46 if (slab_is_available()) 47 return __efi_memmap_alloc_late(size); 48 49 return __efi_memmap_alloc_early(size); 50 } 51 52 /** 53 * __efi_memmap_init - Common code for mapping the EFI memory map 54 * @data: EFI memory map data 55 * @late: Use early or late mapping function? 56 * 57 * This function takes care of figuring out which function to use to 58 * map the EFI memory map in efi.memmap based on how far into the boot 59 * we are. 60 * 61 * During bootup @late should be %false since we only have access to 62 * the early_memremap*() functions as the vmalloc space isn't setup. 63 * Once the kernel is fully booted we can fallback to the more robust 64 * memremap*() API. 65 * 66 * Returns zero on success, a negative error code on failure. 67 */ 68 static int __init 69 __efi_memmap_init(struct efi_memory_map_data *data, bool late) 70 { 71 struct efi_memory_map map; 72 phys_addr_t phys_map; 73 74 if (efi_enabled(EFI_PARAVIRT)) 75 return 0; 76 77 phys_map = data->phys_map; 78 79 if (late) 80 map.map = memremap(phys_map, data->size, MEMREMAP_WB); 81 else 82 map.map = early_memremap(phys_map, data->size); 83 84 if (!map.map) { 85 pr_err("Could not map the memory map!\n"); 86 return -ENOMEM; 87 } 88 89 map.phys_map = data->phys_map; 90 map.nr_map = data->size / data->desc_size; 91 map.map_end = map.map + data->size; 92 93 map.desc_version = data->desc_version; 94 map.desc_size = data->desc_size; 95 map.late = late; 96 97 set_bit(EFI_MEMMAP, &efi.flags); 98 99 efi.memmap = map; 100 101 return 0; 102 } 103 104 /** 105 * efi_memmap_init_early - Map the EFI memory map data structure 106 * @data: EFI memory map data 107 * 108 * Use early_memremap() to map the passed in EFI memory map and assign 109 * it to efi.memmap. 110 */ 111 int __init efi_memmap_init_early(struct efi_memory_map_data *data) 112 { 113 /* Cannot go backwards */ 114 WARN_ON(efi.memmap.late); 115 116 return __efi_memmap_init(data, false); 117 } 118 119 void __init efi_memmap_unmap(void) 120 { 121 if (!efi_enabled(EFI_MEMMAP)) 122 return; 123 124 if (!efi.memmap.late) { 125 unsigned long size; 126 127 size = efi.memmap.desc_size * efi.memmap.nr_map; 128 early_memunmap(efi.memmap.map, size); 129 } else { 130 memunmap(efi.memmap.map); 131 } 132 133 efi.memmap.map = NULL; 134 clear_bit(EFI_MEMMAP, &efi.flags); 135 } 136 137 /** 138 * efi_memmap_init_late - Map efi.memmap with memremap() 139 * @phys_addr: Physical address of the new EFI memory map 140 * @size: Size in bytes of the new EFI memory map 141 * 142 * Setup a mapping of the EFI memory map using ioremap_cache(). This 143 * function should only be called once the vmalloc space has been 144 * setup and is therefore not suitable for calling during early EFI 145 * initialise, e.g. in efi_init(). Additionally, it expects 146 * efi_memmap_init_early() to have already been called. 147 * 148 * The reason there are two EFI memmap initialisation 149 * (efi_memmap_init_early() and this late version) is because the 150 * early EFI memmap should be explicitly unmapped once EFI 151 * initialisation is complete as the fixmap space used to map the EFI 152 * memmap (via early_memremap()) is a scarce resource. 153 * 154 * This late mapping is intended to persist for the duration of 155 * runtime so that things like efi_mem_desc_lookup() and 156 * efi_mem_attributes() always work. 157 * 158 * Returns zero on success, a negative error code on failure. 159 */ 160 int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size) 161 { 162 struct efi_memory_map_data data = { 163 .phys_map = addr, 164 .size = size, 165 }; 166 167 /* Did we forget to unmap the early EFI memmap? */ 168 WARN_ON(efi.memmap.map); 169 170 /* Were we already called? */ 171 WARN_ON(efi.memmap.late); 172 173 /* 174 * It makes no sense to allow callers to register different 175 * values for the following fields. Copy them out of the 176 * existing early EFI memmap. 177 */ 178 data.desc_version = efi.memmap.desc_version; 179 data.desc_size = efi.memmap.desc_size; 180 181 return __efi_memmap_init(&data, true); 182 } 183 184 /** 185 * efi_memmap_install - Install a new EFI memory map in efi.memmap 186 * @addr: Physical address of the memory map 187 * @nr_map: Number of entries in the memory map 188 * 189 * Unlike efi_memmap_init_*(), this function does not allow the caller 190 * to switch from early to late mappings. It simply uses the existing 191 * mapping function and installs the new memmap. 192 * 193 * Returns zero on success, a negative error code on failure. 194 */ 195 int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map) 196 { 197 struct efi_memory_map_data data; 198 199 efi_memmap_unmap(); 200 201 data.phys_map = addr; 202 data.size = efi.memmap.desc_size * nr_map; 203 data.desc_version = efi.memmap.desc_version; 204 data.desc_size = efi.memmap.desc_size; 205 206 return __efi_memmap_init(&data, efi.memmap.late); 207 } 208 209 /** 210 * efi_memmap_split_count - Count number of additional EFI memmap entries 211 * @md: EFI memory descriptor to split 212 * @range: Address range (start, end) to split around 213 * 214 * Returns the number of additional EFI memmap entries required to 215 * accomodate @range. 216 */ 217 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range) 218 { 219 u64 m_start, m_end; 220 u64 start, end; 221 int count = 0; 222 223 start = md->phys_addr; 224 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1; 225 226 /* modifying range */ 227 m_start = range->start; 228 m_end = range->end; 229 230 if (m_start <= start) { 231 /* split into 2 parts */ 232 if (start < m_end && m_end < end) 233 count++; 234 } 235 236 if (start < m_start && m_start < end) { 237 /* split into 3 parts */ 238 if (m_end < end) 239 count += 2; 240 /* split into 2 parts */ 241 if (end <= m_end) 242 count++; 243 } 244 245 return count; 246 } 247 248 /** 249 * efi_memmap_insert - Insert a memory region in an EFI memmap 250 * @old_memmap: The existing EFI memory map structure 251 * @buf: Address of buffer to store new map 252 * @mem: Memory map entry to insert 253 * 254 * It is suggested that you call efi_memmap_split_count() first 255 * to see how large @buf needs to be. 256 */ 257 void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf, 258 struct efi_mem_range *mem) 259 { 260 u64 m_start, m_end, m_attr; 261 efi_memory_desc_t *md; 262 u64 start, end; 263 void *old, *new; 264 265 /* modifying range */ 266 m_start = mem->range.start; 267 m_end = mem->range.end; 268 m_attr = mem->attribute; 269 270 /* 271 * The EFI memory map deals with regions in EFI_PAGE_SIZE 272 * units. Ensure that the region described by 'mem' is aligned 273 * correctly. 274 */ 275 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) || 276 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) { 277 WARN_ON(1); 278 return; 279 } 280 281 for (old = old_memmap->map, new = buf; 282 old < old_memmap->map_end; 283 old += old_memmap->desc_size, new += old_memmap->desc_size) { 284 285 /* copy original EFI memory descriptor */ 286 memcpy(new, old, old_memmap->desc_size); 287 md = new; 288 start = md->phys_addr; 289 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1; 290 291 if (m_start <= start && end <= m_end) 292 md->attribute |= m_attr; 293 294 if (m_start <= start && 295 (start < m_end && m_end < end)) { 296 /* first part */ 297 md->attribute |= m_attr; 298 md->num_pages = (m_end - md->phys_addr + 1) >> 299 EFI_PAGE_SHIFT; 300 /* latter part */ 301 new += old_memmap->desc_size; 302 memcpy(new, old, old_memmap->desc_size); 303 md = new; 304 md->phys_addr = m_end + 1; 305 md->num_pages = (end - md->phys_addr + 1) >> 306 EFI_PAGE_SHIFT; 307 } 308 309 if ((start < m_start && m_start < end) && m_end < end) { 310 /* first part */ 311 md->num_pages = (m_start - md->phys_addr) >> 312 EFI_PAGE_SHIFT; 313 /* middle part */ 314 new += old_memmap->desc_size; 315 memcpy(new, old, old_memmap->desc_size); 316 md = new; 317 md->attribute |= m_attr; 318 md->phys_addr = m_start; 319 md->num_pages = (m_end - m_start + 1) >> 320 EFI_PAGE_SHIFT; 321 /* last part */ 322 new += old_memmap->desc_size; 323 memcpy(new, old, old_memmap->desc_size); 324 md = new; 325 md->phys_addr = m_end + 1; 326 md->num_pages = (end - m_end) >> 327 EFI_PAGE_SHIFT; 328 } 329 330 if ((start < m_start && m_start < end) && 331 (end <= m_end)) { 332 /* first part */ 333 md->num_pages = (m_start - md->phys_addr) >> 334 EFI_PAGE_SHIFT; 335 /* latter part */ 336 new += old_memmap->desc_size; 337 memcpy(new, old, old_memmap->desc_size); 338 md = new; 339 md->phys_addr = m_start; 340 md->num_pages = (end - md->phys_addr + 1) >> 341 EFI_PAGE_SHIFT; 342 md->attribute |= m_attr; 343 } 344 } 345 } 346