xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c (revision d9fd5a71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - Google LLC
4  * Author: David Brazdil <dbrazdil@google.com>
5  *
6  * Generates relocation information used by the kernel to convert
7  * absolute addresses in hyp data from kernel VAs to hyp VAs.
8  *
9  * This is necessary because hyp code is linked into the same binary
10  * as the kernel but executes under different memory mappings.
11  * If the compiler used absolute addressing, those addresses need to
12  * be converted before they are used by hyp code.
13  *
14  * The input of this program is the relocatable ELF object containing
15  * all hyp code/data, not yet linked into vmlinux. Hyp section names
16  * should have been prefixed with `.hyp` at this point.
17  *
18  * The output (printed to stdout) is an assembly file containing
19  * an array of 32-bit integers and static relocations that instruct
20  * the linker of `vmlinux` to populate the array entries with offsets
21  * to positions in the kernel binary containing VAs used by hyp code.
22  *
23  * Note that dynamic relocations could be used for the same purpose.
24  * However, those are only generated if CONFIG_RELOCATABLE=y.
25  */
26 
27 #include <elf.h>
28 #include <endian.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <stdbool.h>
32 #include <stdio.h>
33 #include <stdlib.h>
34 #include <string.h>
35 #include <sys/mman.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <unistd.h>
39 
40 #include <generated/autoconf.h>
41 
42 #define HYP_SECTION_PREFIX		".hyp"
43 #define HYP_RELOC_SECTION		".hyp.reloc"
44 #define HYP_SECTION_SYMBOL_PREFIX	"__hyp_section_"
45 
46 /*
47  * AArch64 relocation type constants.
48  * Included in case these are not defined in the host toolchain.
49  */
50 #ifndef R_AARCH64_ABS64
51 #define R_AARCH64_ABS64			257
52 #endif
53 #ifndef R_AARCH64_LD_PREL_LO19
54 #define R_AARCH64_LD_PREL_LO19		273
55 #endif
56 #ifndef R_AARCH64_ADR_PREL_LO21
57 #define R_AARCH64_ADR_PREL_LO21		274
58 #endif
59 #ifndef R_AARCH64_ADR_PREL_PG_HI21
60 #define R_AARCH64_ADR_PREL_PG_HI21	275
61 #endif
62 #ifndef R_AARCH64_ADR_PREL_PG_HI21_NC
63 #define R_AARCH64_ADR_PREL_PG_HI21_NC	276
64 #endif
65 #ifndef R_AARCH64_ADD_ABS_LO12_NC
66 #define R_AARCH64_ADD_ABS_LO12_NC	277
67 #endif
68 #ifndef R_AARCH64_LDST8_ABS_LO12_NC
69 #define R_AARCH64_LDST8_ABS_LO12_NC	278
70 #endif
71 #ifndef R_AARCH64_TSTBR14
72 #define R_AARCH64_TSTBR14		279
73 #endif
74 #ifndef R_AARCH64_CONDBR19
75 #define R_AARCH64_CONDBR19		280
76 #endif
77 #ifndef R_AARCH64_JUMP26
78 #define R_AARCH64_JUMP26		282
79 #endif
80 #ifndef R_AARCH64_CALL26
81 #define R_AARCH64_CALL26		283
82 #endif
83 #ifndef R_AARCH64_LDST16_ABS_LO12_NC
84 #define R_AARCH64_LDST16_ABS_LO12_NC	284
85 #endif
86 #ifndef R_AARCH64_LDST32_ABS_LO12_NC
87 #define R_AARCH64_LDST32_ABS_LO12_NC	285
88 #endif
89 #ifndef R_AARCH64_LDST64_ABS_LO12_NC
90 #define R_AARCH64_LDST64_ABS_LO12_NC	286
91 #endif
92 #ifndef R_AARCH64_MOVW_PREL_G0
93 #define R_AARCH64_MOVW_PREL_G0		287
94 #endif
95 #ifndef R_AARCH64_MOVW_PREL_G0_NC
96 #define R_AARCH64_MOVW_PREL_G0_NC	288
97 #endif
98 #ifndef R_AARCH64_MOVW_PREL_G1
99 #define R_AARCH64_MOVW_PREL_G1		289
100 #endif
101 #ifndef R_AARCH64_MOVW_PREL_G1_NC
102 #define R_AARCH64_MOVW_PREL_G1_NC	290
103 #endif
104 #ifndef R_AARCH64_MOVW_PREL_G2
105 #define R_AARCH64_MOVW_PREL_G2		291
106 #endif
107 #ifndef R_AARCH64_MOVW_PREL_G2_NC
108 #define R_AARCH64_MOVW_PREL_G2_NC	292
109 #endif
110 #ifndef R_AARCH64_MOVW_PREL_G3
111 #define R_AARCH64_MOVW_PREL_G3		293
112 #endif
113 #ifndef R_AARCH64_LDST128_ABS_LO12_NC
114 #define R_AARCH64_LDST128_ABS_LO12_NC	299
115 #endif
116 
117 /* Global state of the processed ELF. */
118 static struct {
119 	const char	*path;
120 	char		*begin;
121 	size_t		size;
122 	Elf64_Ehdr	*ehdr;
123 	Elf64_Shdr	*sh_table;
124 	const char	*sh_string;
125 } elf;
126 
127 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
128 
129 #define elf16toh(x)	le16toh(x)
130 #define elf32toh(x)	le32toh(x)
131 #define elf64toh(x)	le64toh(x)
132 
133 #define ELFENDIAN	ELFDATA2LSB
134 
135 #elif defined(CONFIG_CPU_BIG_ENDIAN)
136 
137 #define elf16toh(x)	be16toh(x)
138 #define elf32toh(x)	be32toh(x)
139 #define elf64toh(x)	be64toh(x)
140 
141 #define ELFENDIAN	ELFDATA2MSB
142 
143 #else
144 
145 #error PDP-endian sadly unsupported...
146 
147 #endif
148 
149 #define fatal_error(fmt, ...)						\
150 	({								\
151 		fprintf(stderr, "error: %s: " fmt "\n",			\
152 			elf.path, ## __VA_ARGS__);			\
153 		exit(EXIT_FAILURE);					\
154 		__builtin_unreachable();				\
155 	})
156 
157 #define fatal_perror(msg)						\
158 	({								\
159 		fprintf(stderr, "error: %s: " msg ": %s\n",		\
160 			elf.path, strerror(errno));			\
161 		exit(EXIT_FAILURE);					\
162 		__builtin_unreachable();				\
163 	})
164 
165 #define assert_op(lhs, rhs, fmt, op)					\
166 	({								\
167 		typeof(lhs) _lhs = (lhs);				\
168 		typeof(rhs) _rhs = (rhs);				\
169 									\
170 		if (!(_lhs op _rhs)) {					\
171 			fatal_error("assertion " #lhs " " #op " " #rhs	\
172 				" failed (lhs=" fmt ", rhs=" fmt	\
173 				", line=%d)", _lhs, _rhs, __LINE__);	\
174 		}							\
175 	})
176 
177 #define assert_eq(lhs, rhs, fmt)	assert_op(lhs, rhs, fmt, ==)
178 #define assert_ne(lhs, rhs, fmt)	assert_op(lhs, rhs, fmt, !=)
179 #define assert_lt(lhs, rhs, fmt)	assert_op(lhs, rhs, fmt, <)
180 #define assert_ge(lhs, rhs, fmt)	assert_op(lhs, rhs, fmt, >=)
181 
182 /*
183  * Return a pointer of a given type at a given offset from
184  * the beginning of the ELF file.
185  */
186 #define elf_ptr(type, off) ((type *)(elf.begin + (off)))
187 
188 /* Iterate over all sections in the ELF. */
189 #define for_each_section(var) \
190 	for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var)
191 
192 /* Iterate over all Elf64_Rela relocations in a given section. */
193 #define for_each_rela(shdr, var)					\
194 	for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset));	\
195 	     var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++)
196 
197 /* True if a string starts with a given prefix. */
198 static inline bool starts_with(const char *str, const char *prefix)
199 {
200 	return memcmp(str, prefix, strlen(prefix)) == 0;
201 }
202 
203 /* Returns a string containing the name of a given section. */
204 static inline const char *section_name(Elf64_Shdr *shdr)
205 {
206 	return elf.sh_string + elf32toh(shdr->sh_name);
207 }
208 
209 /* Returns a pointer to the first byte of section data. */
210 static inline const char *section_begin(Elf64_Shdr *shdr)
211 {
212 	return elf_ptr(char, elf64toh(shdr->sh_offset));
213 }
214 
215 /* Find a section by its offset from the beginning of the file. */
216 static inline Elf64_Shdr *section_by_off(Elf64_Off off)
217 {
218 	assert_ne(off, 0UL, "%lu");
219 	return elf_ptr(Elf64_Shdr, off);
220 }
221 
222 /* Find a section by its index. */
223 static inline Elf64_Shdr *section_by_idx(uint16_t idx)
224 {
225 	assert_ne(idx, SHN_UNDEF, "%u");
226 	return &elf.sh_table[idx];
227 }
228 
229 /*
230  * Memory-map the given ELF file, perform sanity checks, and
231  * populate global state.
232  */
233 static void init_elf(const char *path)
234 {
235 	int fd, ret;
236 	struct stat stat;
237 
238 	/* Store path in the global struct for error printing. */
239 	elf.path = path;
240 
241 	/* Open the ELF file. */
242 	fd = open(path, O_RDONLY);
243 	if (fd < 0)
244 		fatal_perror("Could not open ELF file");
245 
246 	/* Get status of ELF file to obtain its size. */
247 	ret = fstat(fd, &stat);
248 	if (ret < 0) {
249 		close(fd);
250 		fatal_perror("Could not get status of ELF file");
251 	}
252 
253 	/* mmap() the entire ELF file read-only at an arbitrary address. */
254 	elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
255 	if (elf.begin == MAP_FAILED) {
256 		close(fd);
257 		fatal_perror("Could not mmap ELF file");
258 	}
259 
260 	/* mmap() was successful, close the FD. */
261 	close(fd);
262 
263 	/* Get pointer to the ELF header. */
264 	assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu");
265 	elf.ehdr = elf_ptr(Elf64_Ehdr, 0);
266 
267 	/* Check the ELF magic. */
268 	assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x");
269 	assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x");
270 	assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x");
271 	assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x");
272 
273 	/* Sanity check that this is an ELF64 relocatable object for AArch64. */
274 	assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u");
275 	assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u");
276 	assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u");
277 	assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u");
278 
279 	/* Populate fields of the global struct. */
280 	elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff));
281 	elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx)));
282 }
283 
284 /* Print the prologue of the output ASM file. */
285 static void emit_prologue(void)
286 {
287 	printf(".data\n"
288 	       ".pushsection " HYP_RELOC_SECTION ", \"a\"\n");
289 }
290 
291 /* Print ASM statements needed as a prologue to a processed hyp section. */
292 static void emit_section_prologue(const char *sh_orig_name)
293 {
294 	/* Declare the hyp section symbol. */
295 	printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name);
296 }
297 
298 /*
299  * Print ASM statements to create a hyp relocation entry for a given
300  * R_AARCH64_ABS64 relocation.
301  *
302  * The linker of vmlinux will populate the position given by `rela` with
303  * an absolute 64-bit kernel VA. If the kernel is relocatable, it will
304  * also generate a dynamic relocation entry so that the kernel can shift
305  * the address at runtime for KASLR.
306  *
307  * Emit a 32-bit offset from the current address to the position given
308  * by `rela`. This way the kernel can iterate over all kernel VAs used
309  * by hyp at runtime and convert them to hyp VAs. However, that offset
310  * will not be known until linking of `vmlinux`, so emit a PREL32
311  * relocation referencing a symbol that the hyp linker script put at
312  * the beginning of the relocated section + the offset from `rela`.
313  */
314 static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name)
315 {
316 	/* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */
317 	static size_t reloc_offset;
318 
319 	/* Create storage for the 32-bit offset. */
320 	printf(".word 0\n");
321 
322 	/*
323 	 * Create a PREL32 relocation which instructs the linker of `vmlinux`
324 	 * to insert offset to position <base> + <offset>, where <base> is
325 	 * a symbol at the beginning of the relocated section, and <offset>
326 	 * is `rela->r_offset`.
327 	 */
328 	printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n",
329 	       reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name,
330 	       elf64toh(rela->r_offset));
331 
332 	reloc_offset += 4;
333 }
334 
335 /* Print the epilogue of the output ASM file. */
336 static void emit_epilogue(void)
337 {
338 	printf(".popsection\n");
339 }
340 
341 /*
342  * Iterate over all RELA relocations in a given section and emit
343  * hyp relocation data for all absolute addresses in hyp code/data.
344  *
345  * Static relocations that generate PC-relative-addressing are ignored.
346  * Failure is reported for unexpected relocation types.
347  */
348 static void emit_rela_section(Elf64_Shdr *sh_rela)
349 {
350 	Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)];
351 	const char *sh_orig_name = section_name(sh_orig);
352 	Elf64_Rela *rela;
353 
354 	/* Skip all non-hyp sections. */
355 	if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX))
356 		return;
357 
358 	emit_section_prologue(sh_orig_name);
359 
360 	for_each_rela(sh_rela, rela) {
361 		uint32_t type = (uint32_t)elf64toh(rela->r_info);
362 
363 		/* Check that rela points inside the relocated section. */
364 		assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx");
365 
366 		switch (type) {
367 		/*
368 		 * Data relocations to generate absolute addressing.
369 		 * Emit a hyp relocation.
370 		 */
371 		case R_AARCH64_ABS64:
372 			emit_rela_abs64(rela, sh_orig_name);
373 			break;
374 		/* Allow relocations to generate PC-relative addressing. */
375 		case R_AARCH64_LD_PREL_LO19:
376 		case R_AARCH64_ADR_PREL_LO21:
377 		case R_AARCH64_ADR_PREL_PG_HI21:
378 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
379 		case R_AARCH64_ADD_ABS_LO12_NC:
380 		case R_AARCH64_LDST8_ABS_LO12_NC:
381 		case R_AARCH64_LDST16_ABS_LO12_NC:
382 		case R_AARCH64_LDST32_ABS_LO12_NC:
383 		case R_AARCH64_LDST64_ABS_LO12_NC:
384 		case R_AARCH64_LDST128_ABS_LO12_NC:
385 			break;
386 		/* Allow relative relocations for control-flow instructions. */
387 		case R_AARCH64_TSTBR14:
388 		case R_AARCH64_CONDBR19:
389 		case R_AARCH64_JUMP26:
390 		case R_AARCH64_CALL26:
391 			break;
392 		/* Allow group relocations to create PC-relative offset inline. */
393 		case R_AARCH64_MOVW_PREL_G0:
394 		case R_AARCH64_MOVW_PREL_G0_NC:
395 		case R_AARCH64_MOVW_PREL_G1:
396 		case R_AARCH64_MOVW_PREL_G1_NC:
397 		case R_AARCH64_MOVW_PREL_G2:
398 		case R_AARCH64_MOVW_PREL_G2_NC:
399 		case R_AARCH64_MOVW_PREL_G3:
400 			break;
401 		default:
402 			fatal_error("Unexpected RELA type %u", type);
403 		}
404 	}
405 }
406 
407 /* Iterate over all sections and emit hyp relocation data for RELA sections. */
408 static void emit_all_relocs(void)
409 {
410 	Elf64_Shdr *shdr;
411 
412 	for_each_section(shdr) {
413 		switch (elf32toh(shdr->sh_type)) {
414 		case SHT_REL:
415 			fatal_error("Unexpected SHT_REL section \"%s\"",
416 				section_name(shdr));
417 		case SHT_RELA:
418 			emit_rela_section(shdr);
419 			break;
420 		}
421 	}
422 }
423 
424 int main(int argc, const char **argv)
425 {
426 	if (argc != 2) {
427 		fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]);
428 		return EXIT_FAILURE;
429 	}
430 
431 	init_elf(argv[1]);
432 
433 	emit_prologue();
434 	emit_all_relocs();
435 	emit_epilogue();
436 
437 	return EXIT_SUCCESS;
438 }
439