1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 code for kexec_file_load system call
4  *
5  * Copyright IBM Corp. 2018
6  *
7  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/elf.h>
11 #include <linux/errno.h>
12 #include <linux/kexec.h>
13 #include <linux/module.h>
14 #include <linux/verification.h>
15 #include <asm/boot_data.h>
16 #include <asm/ipl.h>
17 #include <asm/setup.h>
18 
19 const struct kexec_file_ops * const kexec_file_loaders[] = {
20 	&s390_kexec_elf_ops,
21 	&s390_kexec_image_ops,
22 	NULL,
23 };
24 
25 #ifdef CONFIG_KEXEC_VERIFY_SIG
26 /*
27  * Module signature information block.
28  *
29  * The constituents of the signature section are, in order:
30  *
31  *	- Signer's name
32  *	- Key identifier
33  *	- Signature data
34  *	- Information block
35  */
36 struct module_signature {
37 	u8	algo;		/* Public-key crypto algorithm [0] */
38 	u8	hash;		/* Digest algorithm [0] */
39 	u8	id_type;	/* Key identifier type [PKEY_ID_PKCS7] */
40 	u8	signer_len;	/* Length of signer's name [0] */
41 	u8	key_id_len;	/* Length of key identifier [0] */
42 	u8	__pad[3];
43 	__be32	sig_len;	/* Length of signature data */
44 };
45 
46 #define PKEY_ID_PKCS7 2
47 
48 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
49 {
50 	const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
51 	struct module_signature *ms;
52 	unsigned long sig_len;
53 
54 	/* Skip signature verification when not secure IPLed. */
55 	if (!ipl_secure_flag)
56 		return 0;
57 
58 	if (marker_len > kernel_len)
59 		return -EKEYREJECTED;
60 
61 	if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
62 		   marker_len))
63 		return -EKEYREJECTED;
64 	kernel_len -= marker_len;
65 
66 	ms = (void *)kernel + kernel_len - sizeof(*ms);
67 	kernel_len -= sizeof(*ms);
68 
69 	sig_len = be32_to_cpu(ms->sig_len);
70 	if (sig_len >= kernel_len)
71 		return -EKEYREJECTED;
72 	kernel_len -= sig_len;
73 
74 	if (ms->id_type != PKEY_ID_PKCS7)
75 		return -EKEYREJECTED;
76 
77 	if (ms->algo != 0 ||
78 	    ms->hash != 0 ||
79 	    ms->signer_len != 0 ||
80 	    ms->key_id_len != 0 ||
81 	    ms->__pad[0] != 0 ||
82 	    ms->__pad[1] != 0 ||
83 	    ms->__pad[2] != 0) {
84 		return -EBADMSG;
85 	}
86 
87 	return verify_pkcs7_signature(kernel, kernel_len,
88 				      kernel + kernel_len, sig_len,
89 				      VERIFY_USE_PLATFORM_KEYRING,
90 				      VERIFYING_MODULE_SIGNATURE,
91 				      NULL, NULL);
92 }
93 #endif /* CONFIG_KEXEC_VERIFY_SIG */
94 
95 static int kexec_file_update_purgatory(struct kimage *image,
96 				       struct s390_load_data *data)
97 {
98 	u64 entry, type;
99 	int ret;
100 
101 	if (image->type == KEXEC_TYPE_CRASH) {
102 		entry = STARTUP_KDUMP_OFFSET;
103 		type = KEXEC_TYPE_CRASH;
104 	} else {
105 		entry = STARTUP_NORMAL_OFFSET;
106 		type = KEXEC_TYPE_DEFAULT;
107 	}
108 
109 	ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
110 					     sizeof(entry), false);
111 	if (ret)
112 		return ret;
113 
114 	ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
115 					     sizeof(type), false);
116 	if (ret)
117 		return ret;
118 
119 	if (image->type == KEXEC_TYPE_CRASH) {
120 		u64 crash_size;
121 
122 		ret = kexec_purgatory_get_set_symbol(image, "crash_start",
123 						     &crashk_res.start,
124 						     sizeof(crashk_res.start),
125 						     false);
126 		if (ret)
127 			return ret;
128 
129 		crash_size = crashk_res.end - crashk_res.start + 1;
130 		ret = kexec_purgatory_get_set_symbol(image, "crash_size",
131 						     &crash_size,
132 						     sizeof(crash_size),
133 						     false);
134 	}
135 	return ret;
136 }
137 
138 static int kexec_file_add_purgatory(struct kimage *image,
139 				    struct s390_load_data *data)
140 {
141 	struct kexec_buf buf;
142 	int ret;
143 
144 	buf.image = image;
145 
146 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
147 	buf.mem = data->memsz;
148 	if (image->type == KEXEC_TYPE_CRASH)
149 		buf.mem += crashk_res.start;
150 
151 	ret = kexec_load_purgatory(image, &buf);
152 	if (ret)
153 		return ret;
154 	data->memsz += buf.memsz;
155 
156 	return kexec_file_update_purgatory(image, data);
157 }
158 
159 static int kexec_file_add_initrd(struct kimage *image,
160 				 struct s390_load_data *data)
161 {
162 	struct kexec_buf buf;
163 	int ret;
164 
165 	buf.image = image;
166 
167 	buf.buffer = image->initrd_buf;
168 	buf.bufsz = image->initrd_buf_len;
169 
170 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
171 	buf.mem = data->memsz;
172 	if (image->type == KEXEC_TYPE_CRASH)
173 		buf.mem += crashk_res.start;
174 	buf.memsz = buf.bufsz;
175 
176 	data->parm->initrd_start = buf.mem;
177 	data->parm->initrd_size = buf.memsz;
178 	data->memsz += buf.memsz;
179 
180 	ret = kexec_add_buffer(&buf);
181 	if (ret)
182 		return ret;
183 
184 	return ipl_report_add_component(data->report, &buf, 0, 0);
185 }
186 
187 static int kexec_file_add_ipl_report(struct kimage *image,
188 				     struct s390_load_data *data)
189 {
190 	__u32 *lc_ipl_parmblock_ptr;
191 	unsigned int len, ncerts;
192 	struct kexec_buf buf;
193 	unsigned long addr;
194 	void *ptr, *end;
195 
196 	buf.image = image;
197 
198 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
199 	buf.mem = data->memsz;
200 	if (image->type == KEXEC_TYPE_CRASH)
201 		buf.mem += crashk_res.start;
202 
203 	ptr = (void *)ipl_cert_list_addr;
204 	end = ptr + ipl_cert_list_size;
205 	ncerts = 0;
206 	while (ptr < end) {
207 		ncerts++;
208 		len = *(unsigned int *)ptr;
209 		ptr += sizeof(len);
210 		ptr += len;
211 	}
212 
213 	addr = data->memsz + data->report->size;
214 	addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
215 	ptr = (void *)ipl_cert_list_addr;
216 	while (ptr < end) {
217 		len = *(unsigned int *)ptr;
218 		ptr += sizeof(len);
219 		ipl_report_add_certificate(data->report, ptr, addr, len);
220 		addr += len;
221 		ptr += len;
222 	}
223 
224 	buf.buffer = ipl_report_finish(data->report);
225 	buf.bufsz = data->report->size;
226 	buf.memsz = buf.bufsz;
227 
228 	data->memsz += buf.memsz;
229 
230 	lc_ipl_parmblock_ptr =
231 		data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
232 	*lc_ipl_parmblock_ptr = (__u32)buf.mem;
233 
234 	return kexec_add_buffer(&buf);
235 }
236 
237 void *kexec_file_add_components(struct kimage *image,
238 				int (*add_kernel)(struct kimage *image,
239 						  struct s390_load_data *data))
240 {
241 	struct s390_load_data data = {0};
242 	int ret;
243 
244 	data.report = ipl_report_init(&ipl_block);
245 	if (IS_ERR(data.report))
246 		return data.report;
247 
248 	ret = add_kernel(image, &data);
249 	if (ret)
250 		goto out;
251 
252 	if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
253 		ret = -EINVAL;
254 		goto out;
255 	}
256 	memcpy(data.parm->command_line, image->cmdline_buf,
257 	       image->cmdline_buf_len);
258 
259 	if (image->type == KEXEC_TYPE_CRASH) {
260 		data.parm->oldmem_base = crashk_res.start;
261 		data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
262 	}
263 
264 	if (image->initrd_buf) {
265 		ret = kexec_file_add_initrd(image, &data);
266 		if (ret)
267 			goto out;
268 	}
269 
270 	ret = kexec_file_add_purgatory(image, &data);
271 	if (ret)
272 		goto out;
273 
274 	if (data.kernel_mem == 0) {
275 		unsigned long restart_psw =  0x0008000080000000UL;
276 		restart_psw += image->start;
277 		memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
278 		image->start = 0;
279 	}
280 
281 	ret = kexec_file_add_ipl_report(image, &data);
282 out:
283 	ipl_report_free(data.report);
284 	return ERR_PTR(ret);
285 }
286 
287 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
288 				     Elf_Shdr *section,
289 				     const Elf_Shdr *relsec,
290 				     const Elf_Shdr *symtab)
291 {
292 	Elf_Rela *relas;
293 	int i, r_type;
294 
295 	relas = (void *)pi->ehdr + relsec->sh_offset;
296 
297 	for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
298 		const Elf_Sym *sym;	/* symbol to relocate */
299 		unsigned long addr;	/* final location after relocation */
300 		unsigned long val;	/* relocated symbol value */
301 		void *loc;		/* tmp location to modify */
302 
303 		sym = (void *)pi->ehdr + symtab->sh_offset;
304 		sym += ELF64_R_SYM(relas[i].r_info);
305 
306 		if (sym->st_shndx == SHN_UNDEF)
307 			return -ENOEXEC;
308 
309 		if (sym->st_shndx == SHN_COMMON)
310 			return -ENOEXEC;
311 
312 		if (sym->st_shndx >= pi->ehdr->e_shnum &&
313 		    sym->st_shndx != SHN_ABS)
314 			return -ENOEXEC;
315 
316 		loc = pi->purgatory_buf;
317 		loc += section->sh_offset;
318 		loc += relas[i].r_offset;
319 
320 		val = sym->st_value;
321 		if (sym->st_shndx != SHN_ABS)
322 			val += pi->sechdrs[sym->st_shndx].sh_addr;
323 		val += relas[i].r_addend;
324 
325 		addr = section->sh_addr + relas[i].r_offset;
326 
327 		r_type = ELF64_R_TYPE(relas[i].r_info);
328 		arch_kexec_do_relocs(r_type, loc, val, addr);
329 	}
330 	return 0;
331 }
332 
333 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
334 				  unsigned long buf_len)
335 {
336 	/* A kernel must be at least large enough to contain head.S. During
337 	 * load memory in head.S will be accessed, e.g. to register the next
338 	 * command line. If the next kernel were smaller the current kernel
339 	 * will panic at load.
340 	 */
341 	if (buf_len < HEAD_END)
342 		return -ENOEXEC;
343 
344 	return kexec_image_probe_default(image, buf, buf_len);
345 }
346