xref: /openbmc/linux/arch/x86/entry/vdso/vdso2c.h (revision 9726bfcd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * This file is included twice from vdso2c.c.  It generates code for 32-bit
4  * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
5  * are built for 32-bit userspace.
6  */
7 
8 static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
9 			 void *stripped_addr, size_t stripped_len,
10 			 FILE *outfile, const char *image_name)
11 {
12 	int found_load = 0;
13 	unsigned long load_size = -1;  /* Work around bogus warning */
14 	unsigned long mapping_size;
15 	ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
16 	int i;
17 	unsigned long j;
18 	ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
19 		*alt_sec = NULL;
20 	ELF(Dyn) *dyn = 0, *dyn_end = 0;
21 	const char *secstrings;
22 	INT_BITS syms[NSYMS] = {};
23 
24 	ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
25 
26 	if (GET_LE(&hdr->e_type) != ET_DYN)
27 		fail("input is not a shared object\n");
28 
29 	/* Walk the segment table. */
30 	for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
31 		if (GET_LE(&pt[i].p_type) == PT_LOAD) {
32 			if (found_load)
33 				fail("multiple PT_LOAD segs\n");
34 
35 			if (GET_LE(&pt[i].p_offset) != 0 ||
36 			    GET_LE(&pt[i].p_vaddr) != 0)
37 				fail("PT_LOAD in wrong place\n");
38 
39 			if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
40 				fail("cannot handle memsz != filesz\n");
41 
42 			load_size = GET_LE(&pt[i].p_memsz);
43 			found_load = 1;
44 		} else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
45 			dyn = raw_addr + GET_LE(&pt[i].p_offset);
46 			dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
47 				GET_LE(&pt[i].p_memsz);
48 		}
49 	}
50 	if (!found_load)
51 		fail("no PT_LOAD seg\n");
52 
53 	if (stripped_len < load_size)
54 		fail("stripped input is too short\n");
55 
56 	if (!dyn)
57 		fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
58 
59 	/* Walk the dynamic table */
60 	for (i = 0; dyn + i < dyn_end &&
61 		     GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
62 		typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
63 		if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
64 		    tag == DT_RELENT || tag == DT_TEXTREL)
65 			fail("vdso image contains dynamic relocations\n");
66 	}
67 
68 	/* Walk the section table */
69 	secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
70 		GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
71 	secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
72 	for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
73 		ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
74 			GET_LE(&hdr->e_shentsize) * i;
75 		if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
76 			symtab_hdr = sh;
77 
78 		if (!strcmp(secstrings + GET_LE(&sh->sh_name),
79 			    ".altinstructions"))
80 			alt_sec = sh;
81 	}
82 
83 	if (!symtab_hdr)
84 		fail("no symbol table\n");
85 
86 	strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
87 		GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
88 
89 	/* Walk the symbol table */
90 	for (i = 0;
91 	     i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
92 	     i++) {
93 		int k;
94 		ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
95 			GET_LE(&symtab_hdr->sh_entsize) * i;
96 		const char *sym_name = raw_addr +
97 				       GET_LE(&strtab_hdr->sh_offset) +
98 				       GET_LE(&sym->st_name);
99 
100 		for (k = 0; k < NSYMS; k++) {
101 			if (!strcmp(sym_name, required_syms[k].name)) {
102 				if (syms[k]) {
103 					fail("duplicate symbol %s\n",
104 					     required_syms[k].name);
105 				}
106 
107 				/*
108 				 * Careful: we use negative addresses, but
109 				 * st_value is unsigned, so we rely
110 				 * on syms[k] being a signed type of the
111 				 * correct width.
112 				 */
113 				syms[k] = GET_LE(&sym->st_value);
114 			}
115 		}
116 	}
117 
118 	/* Validate mapping addresses. */
119 	for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
120 		INT_BITS symval = syms[special_pages[i]];
121 
122 		if (!symval)
123 			continue;  /* The mapping isn't used; ignore it. */
124 
125 		if (symval % 4096)
126 			fail("%s must be a multiple of 4096\n",
127 			     required_syms[i].name);
128 		if (symval + 4096 < syms[sym_vvar_start])
129 			fail("%s underruns vvar_start\n",
130 			     required_syms[i].name);
131 		if (symval + 4096 > 0)
132 			fail("%s is on the wrong side of the vdso text\n",
133 			     required_syms[i].name);
134 	}
135 	if (syms[sym_vvar_start] % 4096)
136 		fail("vvar_begin must be a multiple of 4096\n");
137 
138 	if (!image_name) {
139 		fwrite(stripped_addr, stripped_len, 1, outfile);
140 		return;
141 	}
142 
143 	mapping_size = (stripped_len + 4095) / 4096 * 4096;
144 
145 	fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
146 	fprintf(outfile, "#include <linux/linkage.h>\n");
147 	fprintf(outfile, "#include <asm/page_types.h>\n");
148 	fprintf(outfile, "#include <asm/vdso.h>\n");
149 	fprintf(outfile, "\n");
150 	fprintf(outfile,
151 		"static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
152 		mapping_size);
153 	for (j = 0; j < stripped_len; j++) {
154 		if (j % 10 == 0)
155 			fprintf(outfile, "\n\t");
156 		fprintf(outfile, "0x%02X, ",
157 			(int)((unsigned char *)stripped_addr)[j]);
158 	}
159 	fprintf(outfile, "\n};\n\n");
160 
161 	fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
162 	fprintf(outfile, "\t.data = raw_data,\n");
163 	fprintf(outfile, "\t.size = %lu,\n", mapping_size);
164 	if (alt_sec) {
165 		fprintf(outfile, "\t.alt = %lu,\n",
166 			(unsigned long)GET_LE(&alt_sec->sh_offset));
167 		fprintf(outfile, "\t.alt_len = %lu,\n",
168 			(unsigned long)GET_LE(&alt_sec->sh_size));
169 	}
170 	for (i = 0; i < NSYMS; i++) {
171 		if (required_syms[i].export && syms[i])
172 			fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
173 				required_syms[i].name, (int64_t)syms[i]);
174 	}
175 	fprintf(outfile, "};\n");
176 }
177