1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 code for kexec_file_load system call
4  *
5  * Copyright IBM Corp. 2018
6  *
7  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8  */
9 
10 #define pr_fmt(fmt)	"kexec: " fmt
11 
12 #include <linux/elf.h>
13 #include <linux/errno.h>
14 #include <linux/kexec.h>
15 #include <linux/module_signature.h>
16 #include <linux/verification.h>
17 #include <linux/vmalloc.h>
18 #include <asm/boot_data.h>
19 #include <asm/ipl.h>
20 #include <asm/setup.h>
21 
22 const struct kexec_file_ops * const kexec_file_loaders[] = {
23 	&s390_kexec_elf_ops,
24 	&s390_kexec_image_ops,
25 	NULL,
26 };
27 
28 #ifdef CONFIG_KEXEC_SIG
29 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
30 {
31 	const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
32 	struct module_signature *ms;
33 	unsigned long sig_len;
34 	int ret;
35 
36 	/* Skip signature verification when not secure IPLed. */
37 	if (!ipl_secure_flag)
38 		return 0;
39 
40 	if (marker_len > kernel_len)
41 		return -EKEYREJECTED;
42 
43 	if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
44 		   marker_len))
45 		return -EKEYREJECTED;
46 	kernel_len -= marker_len;
47 
48 	ms = (void *)kernel + kernel_len - sizeof(*ms);
49 	kernel_len -= sizeof(*ms);
50 
51 	sig_len = be32_to_cpu(ms->sig_len);
52 	if (sig_len >= kernel_len)
53 		return -EKEYREJECTED;
54 	kernel_len -= sig_len;
55 
56 	if (ms->id_type != PKEY_ID_PKCS7)
57 		return -EKEYREJECTED;
58 
59 	if (ms->algo != 0 ||
60 	    ms->hash != 0 ||
61 	    ms->signer_len != 0 ||
62 	    ms->key_id_len != 0 ||
63 	    ms->__pad[0] != 0 ||
64 	    ms->__pad[1] != 0 ||
65 	    ms->__pad[2] != 0) {
66 		return -EBADMSG;
67 	}
68 
69 	ret = verify_pkcs7_signature(kernel, kernel_len,
70 				     kernel + kernel_len, sig_len,
71 				     VERIFY_USE_SECONDARY_KEYRING,
72 				     VERIFYING_MODULE_SIGNATURE,
73 				     NULL, NULL);
74 	if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
75 		ret = verify_pkcs7_signature(kernel, kernel_len,
76 					     kernel + kernel_len, sig_len,
77 					     VERIFY_USE_PLATFORM_KEYRING,
78 					     VERIFYING_MODULE_SIGNATURE,
79 					     NULL, NULL);
80 	return ret;
81 }
82 #endif /* CONFIG_KEXEC_SIG */
83 
84 static int kexec_file_update_purgatory(struct kimage *image,
85 				       struct s390_load_data *data)
86 {
87 	u64 entry, type;
88 	int ret;
89 
90 	if (image->type == KEXEC_TYPE_CRASH) {
91 		entry = STARTUP_KDUMP_OFFSET;
92 		type = KEXEC_TYPE_CRASH;
93 	} else {
94 		entry = STARTUP_NORMAL_OFFSET;
95 		type = KEXEC_TYPE_DEFAULT;
96 	}
97 
98 	ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
99 					     sizeof(entry), false);
100 	if (ret)
101 		return ret;
102 
103 	ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
104 					     sizeof(type), false);
105 	if (ret)
106 		return ret;
107 
108 	if (image->type == KEXEC_TYPE_CRASH) {
109 		u64 crash_size;
110 
111 		ret = kexec_purgatory_get_set_symbol(image, "crash_start",
112 						     &crashk_res.start,
113 						     sizeof(crashk_res.start),
114 						     false);
115 		if (ret)
116 			return ret;
117 
118 		crash_size = crashk_res.end - crashk_res.start + 1;
119 		ret = kexec_purgatory_get_set_symbol(image, "crash_size",
120 						     &crash_size,
121 						     sizeof(crash_size),
122 						     false);
123 	}
124 	return ret;
125 }
126 
127 static int kexec_file_add_purgatory(struct kimage *image,
128 				    struct s390_load_data *data)
129 {
130 	struct kexec_buf buf;
131 	int ret;
132 
133 	buf.image = image;
134 
135 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
136 	buf.mem = data->memsz;
137 	if (image->type == KEXEC_TYPE_CRASH)
138 		buf.mem += crashk_res.start;
139 
140 	ret = kexec_load_purgatory(image, &buf);
141 	if (ret)
142 		return ret;
143 	data->memsz += buf.memsz;
144 
145 	return kexec_file_update_purgatory(image, data);
146 }
147 
148 static int kexec_file_add_initrd(struct kimage *image,
149 				 struct s390_load_data *data)
150 {
151 	struct kexec_buf buf;
152 	int ret;
153 
154 	buf.image = image;
155 
156 	buf.buffer = image->initrd_buf;
157 	buf.bufsz = image->initrd_buf_len;
158 
159 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
160 	buf.mem = data->memsz;
161 	if (image->type == KEXEC_TYPE_CRASH)
162 		buf.mem += crashk_res.start;
163 	buf.memsz = buf.bufsz;
164 
165 	data->parm->initrd_start = data->memsz;
166 	data->parm->initrd_size = buf.memsz;
167 	data->memsz += buf.memsz;
168 
169 	ret = kexec_add_buffer(&buf);
170 	if (ret)
171 		return ret;
172 
173 	return ipl_report_add_component(data->report, &buf, 0, 0);
174 }
175 
176 static int kexec_file_add_ipl_report(struct kimage *image,
177 				     struct s390_load_data *data)
178 {
179 	__u32 *lc_ipl_parmblock_ptr;
180 	unsigned int len, ncerts;
181 	struct kexec_buf buf;
182 	unsigned long addr;
183 	void *ptr, *end;
184 	int ret;
185 
186 	buf.image = image;
187 
188 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
189 	buf.mem = data->memsz;
190 
191 	ptr = __va(ipl_cert_list_addr);
192 	end = ptr + ipl_cert_list_size;
193 	ncerts = 0;
194 	while (ptr < end) {
195 		ncerts++;
196 		len = *(unsigned int *)ptr;
197 		ptr += sizeof(len);
198 		ptr += len;
199 	}
200 
201 	addr = data->memsz + data->report->size;
202 	addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
203 	ptr = __va(ipl_cert_list_addr);
204 	while (ptr < end) {
205 		len = *(unsigned int *)ptr;
206 		ptr += sizeof(len);
207 		ipl_report_add_certificate(data->report, ptr, addr, len);
208 		addr += len;
209 		ptr += len;
210 	}
211 
212 	ret = -ENOMEM;
213 	buf.buffer = ipl_report_finish(data->report);
214 	if (!buf.buffer)
215 		goto out;
216 	buf.bufsz = data->report->size;
217 	buf.memsz = buf.bufsz;
218 	image->arch.ipl_buf = buf.buffer;
219 
220 	data->memsz += buf.memsz;
221 
222 	lc_ipl_parmblock_ptr =
223 		data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
224 	*lc_ipl_parmblock_ptr = (__u32)buf.mem;
225 
226 	if (image->type == KEXEC_TYPE_CRASH)
227 		buf.mem += crashk_res.start;
228 
229 	ret = kexec_add_buffer(&buf);
230 out:
231 	return ret;
232 }
233 
234 void *kexec_file_add_components(struct kimage *image,
235 				int (*add_kernel)(struct kimage *image,
236 						  struct s390_load_data *data))
237 {
238 	unsigned long max_command_line_size = LEGACY_COMMAND_LINE_SIZE;
239 	struct s390_load_data data = {0};
240 	unsigned long minsize;
241 	int ret;
242 
243 	data.report = ipl_report_init(&ipl_block);
244 	if (IS_ERR(data.report))
245 		return data.report;
246 
247 	ret = add_kernel(image, &data);
248 	if (ret)
249 		goto out;
250 
251 	ret = -EINVAL;
252 	minsize = PARMAREA + offsetof(struct parmarea, command_line);
253 	if (image->kernel_buf_len < minsize)
254 		goto out;
255 
256 	if (data.parm->max_command_line_size)
257 		max_command_line_size = data.parm->max_command_line_size;
258 
259 	if (minsize + max_command_line_size < minsize)
260 		goto out;
261 
262 	if (image->kernel_buf_len < minsize + max_command_line_size)
263 		goto out;
264 
265 	if (image->cmdline_buf_len >= max_command_line_size)
266 		goto out;
267 
268 	memcpy(data.parm->command_line, image->cmdline_buf,
269 	       image->cmdline_buf_len);
270 
271 	if (image->type == KEXEC_TYPE_CRASH) {
272 		data.parm->oldmem_base = crashk_res.start;
273 		data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
274 	}
275 
276 	if (image->initrd_buf) {
277 		ret = kexec_file_add_initrd(image, &data);
278 		if (ret)
279 			goto out;
280 	}
281 
282 	ret = kexec_file_add_purgatory(image, &data);
283 	if (ret)
284 		goto out;
285 
286 	if (data.kernel_mem == 0) {
287 		unsigned long restart_psw =  0x0008000080000000UL;
288 		restart_psw += image->start;
289 		memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
290 		image->start = 0;
291 	}
292 
293 	ret = kexec_file_add_ipl_report(image, &data);
294 out:
295 	ipl_report_free(data.report);
296 	return ERR_PTR(ret);
297 }
298 
299 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
300 				     Elf_Shdr *section,
301 				     const Elf_Shdr *relsec,
302 				     const Elf_Shdr *symtab)
303 {
304 	const char *strtab, *name, *shstrtab;
305 	const Elf_Shdr *sechdrs;
306 	Elf_Rela *relas;
307 	int i, r_type;
308 	int ret;
309 
310 	/* String & section header string table */
311 	sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
312 	strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset;
313 	shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset;
314 
315 	relas = (void *)pi->ehdr + relsec->sh_offset;
316 
317 	for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
318 		const Elf_Sym *sym;	/* symbol to relocate */
319 		unsigned long addr;	/* final location after relocation */
320 		unsigned long val;	/* relocated symbol value */
321 		void *loc;		/* tmp location to modify */
322 
323 		sym = (void *)pi->ehdr + symtab->sh_offset;
324 		sym += ELF64_R_SYM(relas[i].r_info);
325 
326 		if (sym->st_name)
327 			name = strtab + sym->st_name;
328 		else
329 			name = shstrtab + sechdrs[sym->st_shndx].sh_name;
330 
331 		if (sym->st_shndx == SHN_UNDEF) {
332 			pr_err("Undefined symbol: %s\n", name);
333 			return -ENOEXEC;
334 		}
335 
336 		if (sym->st_shndx == SHN_COMMON) {
337 			pr_err("symbol '%s' in common section\n", name);
338 			return -ENOEXEC;
339 		}
340 
341 		if (sym->st_shndx >= pi->ehdr->e_shnum &&
342 		    sym->st_shndx != SHN_ABS) {
343 			pr_err("Invalid section %d for symbol %s\n",
344 			       sym->st_shndx, name);
345 			return -ENOEXEC;
346 		}
347 
348 		loc = pi->purgatory_buf;
349 		loc += section->sh_offset;
350 		loc += relas[i].r_offset;
351 
352 		val = sym->st_value;
353 		if (sym->st_shndx != SHN_ABS)
354 			val += pi->sechdrs[sym->st_shndx].sh_addr;
355 		val += relas[i].r_addend;
356 
357 		addr = section->sh_addr + relas[i].r_offset;
358 
359 		r_type = ELF64_R_TYPE(relas[i].r_info);
360 
361 		if (r_type == R_390_PLT32DBL)
362 			r_type = R_390_PC32DBL;
363 
364 		ret = arch_kexec_do_relocs(r_type, loc, val, addr);
365 		if (ret) {
366 			pr_err("Unknown rela relocation: %d\n", r_type);
367 			return -ENOEXEC;
368 		}
369 	}
370 	return 0;
371 }
372 
373 int arch_kimage_file_post_load_cleanup(struct kimage *image)
374 {
375 	vfree(image->arch.ipl_buf);
376 	image->arch.ipl_buf = NULL;
377 
378 	return kexec_image_post_load_cleanup_default(image);
379 }
380