1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Common EFI memory map functions.
4 */
5
6 #define pr_fmt(fmt) "efi: " fmt
7
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/efi.h>
11 #include <linux/io.h>
12 #include <asm/early_ioremap.h>
13 #include <asm/efi.h>
14 #include <linux/memblock.h>
15 #include <linux/slab.h>
16
__efi_memmap_alloc_early(unsigned long size)17 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
18 {
19 return memblock_phys_alloc(size, SMP_CACHE_BYTES);
20 }
21
__efi_memmap_alloc_late(unsigned long size)22 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
23 {
24 unsigned int order = get_order(size);
25 struct page *p = alloc_pages(GFP_KERNEL, order);
26
27 if (!p)
28 return 0;
29
30 return PFN_PHYS(page_to_pfn(p));
31 }
32
__efi_memmap_free(u64 phys,unsigned long size,unsigned long flags)33 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
34 {
35 if (flags & EFI_MEMMAP_MEMBLOCK) {
36 if (slab_is_available())
37 memblock_free_late(phys, size);
38 else
39 memblock_phys_free(phys, size);
40 } else if (flags & EFI_MEMMAP_SLAB) {
41 struct page *p = pfn_to_page(PHYS_PFN(phys));
42 unsigned int order = get_order(size);
43
44 free_pages((unsigned long) page_address(p), order);
45 }
46 }
47
48 /**
49 * efi_memmap_alloc - Allocate memory for the EFI memory map
50 * @num_entries: Number of entries in the allocated map.
51 * @data: efi memmap installation parameters
52 *
53 * Depending on whether mm_init() has already been invoked or not,
54 * either memblock or "normal" page allocation is used.
55 *
56 * Returns zero on success, a negative error code on failure.
57 */
efi_memmap_alloc(unsigned int num_entries,struct efi_memory_map_data * data)58 int __init efi_memmap_alloc(unsigned int num_entries,
59 struct efi_memory_map_data *data)
60 {
61 /* Expect allocation parameters are zero initialized */
62 WARN_ON(data->phys_map || data->size);
63
64 data->size = num_entries * efi.memmap.desc_size;
65 data->desc_version = efi.memmap.desc_version;
66 data->desc_size = efi.memmap.desc_size;
67 data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
68 data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
69
70 if (slab_is_available()) {
71 data->flags |= EFI_MEMMAP_SLAB;
72 data->phys_map = __efi_memmap_alloc_late(data->size);
73 } else {
74 data->flags |= EFI_MEMMAP_MEMBLOCK;
75 data->phys_map = __efi_memmap_alloc_early(data->size);
76 }
77
78 if (!data->phys_map)
79 return -ENOMEM;
80 return 0;
81 }
82
83 /**
84 * efi_memmap_install - Install a new EFI memory map in efi.memmap
85 * @data: efi memmap installation parameters
86 *
87 * Unlike efi_memmap_init_*(), this function does not allow the caller
88 * to switch from early to late mappings. It simply uses the existing
89 * mapping function and installs the new memmap.
90 *
91 * Returns zero on success, a negative error code on failure.
92 */
efi_memmap_install(struct efi_memory_map_data * data)93 int __init efi_memmap_install(struct efi_memory_map_data *data)
94 {
95 unsigned long size = efi.memmap.desc_size * efi.memmap.nr_map;
96 unsigned long flags = efi.memmap.flags;
97 u64 phys = efi.memmap.phys_map;
98 int ret;
99
100 efi_memmap_unmap();
101
102 if (efi_enabled(EFI_PARAVIRT))
103 return 0;
104
105 ret = __efi_memmap_init(data);
106 if (ret)
107 return ret;
108
109 __efi_memmap_free(phys, size, flags);
110 return 0;
111 }
112
113 /**
114 * efi_memmap_split_count - Count number of additional EFI memmap entries
115 * @md: EFI memory descriptor to split
116 * @range: Address range (start, end) to split around
117 *
118 * Returns the number of additional EFI memmap entries required to
119 * accommodate @range.
120 */
efi_memmap_split_count(efi_memory_desc_t * md,struct range * range)121 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
122 {
123 u64 m_start, m_end;
124 u64 start, end;
125 int count = 0;
126
127 start = md->phys_addr;
128 end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
129
130 /* modifying range */
131 m_start = range->start;
132 m_end = range->end;
133
134 if (m_start <= start) {
135 /* split into 2 parts */
136 if (start < m_end && m_end < end)
137 count++;
138 }
139
140 if (start < m_start && m_start < end) {
141 /* split into 3 parts */
142 if (m_end < end)
143 count += 2;
144 /* split into 2 parts */
145 if (end <= m_end)
146 count++;
147 }
148
149 return count;
150 }
151
152 /**
153 * efi_memmap_insert - Insert a memory region in an EFI memmap
154 * @old_memmap: The existing EFI memory map structure
155 * @buf: Address of buffer to store new map
156 * @mem: Memory map entry to insert
157 *
158 * It is suggested that you call efi_memmap_split_count() first
159 * to see how large @buf needs to be.
160 */
efi_memmap_insert(struct efi_memory_map * old_memmap,void * buf,struct efi_mem_range * mem)161 void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
162 struct efi_mem_range *mem)
163 {
164 u64 m_start, m_end, m_attr;
165 efi_memory_desc_t *md;
166 u64 start, end;
167 void *old, *new;
168
169 /* modifying range */
170 m_start = mem->range.start;
171 m_end = mem->range.end;
172 m_attr = mem->attribute;
173
174 /*
175 * The EFI memory map deals with regions in EFI_PAGE_SIZE
176 * units. Ensure that the region described by 'mem' is aligned
177 * correctly.
178 */
179 if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
180 !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
181 WARN_ON(1);
182 return;
183 }
184
185 for (old = old_memmap->map, new = buf;
186 old < old_memmap->map_end;
187 old += old_memmap->desc_size, new += old_memmap->desc_size) {
188
189 /* copy original EFI memory descriptor */
190 memcpy(new, old, old_memmap->desc_size);
191 md = new;
192 start = md->phys_addr;
193 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
194
195 if (m_start <= start && end <= m_end)
196 md->attribute |= m_attr;
197
198 if (m_start <= start &&
199 (start < m_end && m_end < end)) {
200 /* first part */
201 md->attribute |= m_attr;
202 md->num_pages = (m_end - md->phys_addr + 1) >>
203 EFI_PAGE_SHIFT;
204 /* latter part */
205 new += old_memmap->desc_size;
206 memcpy(new, old, old_memmap->desc_size);
207 md = new;
208 md->phys_addr = m_end + 1;
209 md->num_pages = (end - md->phys_addr + 1) >>
210 EFI_PAGE_SHIFT;
211 }
212
213 if ((start < m_start && m_start < end) && m_end < end) {
214 /* first part */
215 md->num_pages = (m_start - md->phys_addr) >>
216 EFI_PAGE_SHIFT;
217 /* middle part */
218 new += old_memmap->desc_size;
219 memcpy(new, old, old_memmap->desc_size);
220 md = new;
221 md->attribute |= m_attr;
222 md->phys_addr = m_start;
223 md->num_pages = (m_end - m_start + 1) >>
224 EFI_PAGE_SHIFT;
225 /* last part */
226 new += old_memmap->desc_size;
227 memcpy(new, old, old_memmap->desc_size);
228 md = new;
229 md->phys_addr = m_end + 1;
230 md->num_pages = (end - m_end) >>
231 EFI_PAGE_SHIFT;
232 }
233
234 if ((start < m_start && m_start < end) &&
235 (end <= m_end)) {
236 /* first part */
237 md->num_pages = (m_start - md->phys_addr) >>
238 EFI_PAGE_SHIFT;
239 /* latter part */
240 new += old_memmap->desc_size;
241 memcpy(new, old, old_memmap->desc_size);
242 md = new;
243 md->phys_addr = m_start;
244 md->num_pages = (end - md->phys_addr + 1) >>
245 EFI_PAGE_SHIFT;
246 md->attribute |= m_attr;
247 }
248 }
249 }
250