xref: /openbmc/linux/arch/x86/entry/vdso/vdso2c.h (revision 28dce2c4)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * This file is included twice from vdso2c.c.  It generates code for 32-bit
4  * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
5  * are built for 32-bit userspace.
6  */
7 
8 static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len)
9 {
10 	size_t i;
11 
12 	for (i = 0; i < len; i++) {
13 		if (i % 10 == 0)
14 			fprintf(outfile, "\n\t");
15 		fprintf(outfile, "0x%02X, ", (int)(data)[i]);
16 	}
17 }
18 
19 
20 /*
21  * Extract a section from the input data into a standalone blob.  Used to
22  * capture kernel-only data that needs to persist indefinitely, e.g. the
23  * exception fixup tables, but only in the kernel, i.e. the section can
24  * be stripped from the final vDSO image.
25  */
26 static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
27 			      FILE *outfile, ELF(Shdr) *sec, const char *name)
28 {
29 	unsigned long offset;
30 	size_t len;
31 
32 	offset = (unsigned long)GET_LE(&sec->sh_offset);
33 	len = (size_t)GET_LE(&sec->sh_size);
34 
35 	if (offset + len > data_len)
36 		fail("section to extract overruns input data");
37 
38 	fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
39 	BITSFUNC(copy)(outfile, data + offset, len);
40 	fprintf(outfile, "\n};\n\n");
41 }
42 
43 static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
44 			 void *stripped_addr, size_t stripped_len,
45 			 FILE *outfile, const char *image_name)
46 {
47 	int found_load = 0;
48 	unsigned long load_size = -1;  /* Work around bogus warning */
49 	unsigned long mapping_size;
50 	ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
51 	unsigned long i, syms_nr;
52 	ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
53 		*alt_sec = NULL, *extable_sec = NULL;
54 	ELF(Dyn) *dyn = 0, *dyn_end = 0;
55 	const char *secstrings;
56 	INT_BITS syms[NSYMS] = {};
57 
58 	ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
59 
60 	if (GET_LE(&hdr->e_type) != ET_DYN)
61 		fail("input is not a shared object\n");
62 
63 	/* Walk the segment table. */
64 	for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
65 		if (GET_LE(&pt[i].p_type) == PT_LOAD) {
66 			if (found_load)
67 				fail("multiple PT_LOAD segs\n");
68 
69 			if (GET_LE(&pt[i].p_offset) != 0 ||
70 			    GET_LE(&pt[i].p_vaddr) != 0)
71 				fail("PT_LOAD in wrong place\n");
72 
73 			if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
74 				fail("cannot handle memsz != filesz\n");
75 
76 			load_size = GET_LE(&pt[i].p_memsz);
77 			found_load = 1;
78 		} else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
79 			dyn = raw_addr + GET_LE(&pt[i].p_offset);
80 			dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
81 				GET_LE(&pt[i].p_memsz);
82 		}
83 	}
84 	if (!found_load)
85 		fail("no PT_LOAD seg\n");
86 
87 	if (stripped_len < load_size)
88 		fail("stripped input is too short\n");
89 
90 	if (!dyn)
91 		fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
92 
93 	/* Walk the dynamic table */
94 	for (i = 0; dyn + i < dyn_end &&
95 		     GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
96 		typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
97 		if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
98 		    tag == DT_RELENT || tag == DT_TEXTREL)
99 			fail("vdso image contains dynamic relocations\n");
100 	}
101 
102 	/* Walk the section table */
103 	secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
104 		GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
105 	secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
106 	for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
107 		ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
108 			GET_LE(&hdr->e_shentsize) * i;
109 		if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
110 			symtab_hdr = sh;
111 
112 		if (!strcmp(secstrings + GET_LE(&sh->sh_name),
113 			    ".altinstructions"))
114 			alt_sec = sh;
115 		if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table"))
116 			extable_sec = sh;
117 	}
118 
119 	if (!symtab_hdr)
120 		fail("no symbol table\n");
121 
122 	strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
123 		GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
124 
125 	syms_nr = GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
126 	/* Walk the symbol table */
127 	for (i = 0; i < syms_nr; i++) {
128 		unsigned int k;
129 		ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
130 			GET_LE(&symtab_hdr->sh_entsize) * i;
131 		const char *sym_name = raw_addr +
132 				       GET_LE(&strtab_hdr->sh_offset) +
133 				       GET_LE(&sym->st_name);
134 
135 		for (k = 0; k < NSYMS; k++) {
136 			if (!strcmp(sym_name, required_syms[k].name)) {
137 				if (syms[k]) {
138 					fail("duplicate symbol %s\n",
139 					     required_syms[k].name);
140 				}
141 
142 				/*
143 				 * Careful: we use negative addresses, but
144 				 * st_value is unsigned, so we rely
145 				 * on syms[k] being a signed type of the
146 				 * correct width.
147 				 */
148 				syms[k] = GET_LE(&sym->st_value);
149 			}
150 		}
151 	}
152 
153 	/* Validate mapping addresses. */
154 	for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
155 		INT_BITS symval = syms[special_pages[i]];
156 
157 		if (!symval)
158 			continue;  /* The mapping isn't used; ignore it. */
159 
160 		if (symval % 4096)
161 			fail("%s must be a multiple of 4096\n",
162 			     required_syms[i].name);
163 		if (symval + 4096 < syms[sym_vvar_start])
164 			fail("%s underruns vvar_start\n",
165 			     required_syms[i].name);
166 		if (symval + 4096 > 0)
167 			fail("%s is on the wrong side of the vdso text\n",
168 			     required_syms[i].name);
169 	}
170 	if (syms[sym_vvar_start] % 4096)
171 		fail("vvar_begin must be a multiple of 4096\n");
172 
173 	if (!image_name) {
174 		fwrite(stripped_addr, stripped_len, 1, outfile);
175 		return;
176 	}
177 
178 	mapping_size = (stripped_len + 4095) / 4096 * 4096;
179 
180 	fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
181 	fprintf(outfile, "#include <linux/linkage.h>\n");
182 	fprintf(outfile, "#include <asm/page_types.h>\n");
183 	fprintf(outfile, "#include <asm/vdso.h>\n");
184 	fprintf(outfile, "\n");
185 	fprintf(outfile,
186 		"static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
187 		mapping_size);
188 	for (i = 0; i < stripped_len; i++) {
189 		if (i % 10 == 0)
190 			fprintf(outfile, "\n\t");
191 		fprintf(outfile, "0x%02X, ",
192 			(int)((unsigned char *)stripped_addr)[i]);
193 	}
194 	fprintf(outfile, "\n};\n\n");
195 	if (extable_sec)
196 		BITSFUNC(extract)(raw_addr, raw_len, outfile,
197 				  extable_sec, "extable");
198 
199 	fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
200 	fprintf(outfile, "\t.data = raw_data,\n");
201 	fprintf(outfile, "\t.size = %lu,\n", mapping_size);
202 	if (alt_sec) {
203 		fprintf(outfile, "\t.alt = %lu,\n",
204 			(unsigned long)GET_LE(&alt_sec->sh_offset));
205 		fprintf(outfile, "\t.alt_len = %lu,\n",
206 			(unsigned long)GET_LE(&alt_sec->sh_size));
207 	}
208 	if (extable_sec) {
209 		fprintf(outfile, "\t.extable_base = %lu,\n",
210 			(unsigned long)GET_LE(&extable_sec->sh_offset));
211 		fprintf(outfile, "\t.extable_len = %lu,\n",
212 			(unsigned long)GET_LE(&extable_sec->sh_size));
213 		fprintf(outfile, "\t.extable = extable,\n");
214 	}
215 
216 	for (i = 0; i < NSYMS; i++) {
217 		if (required_syms[i].export && syms[i])
218 			fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
219 				required_syms[i].name, (int64_t)syms[i]);
220 	}
221 	fprintf(outfile, "};\n");
222 }
223