xref: /openbmc/linux/kernel/kexec_file.c (revision 6c9111bc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kexec: kexec_file_load system call
4  *
5  * Copyright (C) 2014 Red Hat Inc.
6  * Authors:
7  *      Vivek Goyal <vgoyal@redhat.com>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/capability.h>
13 #include <linux/mm.h>
14 #include <linux/file.h>
15 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/memblock.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
20 #include <linux/fs.h>
21 #include <linux/ima.h>
22 #include <crypto/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/kernel.h>
27 #include <linux/kernel_read_file.h>
28 #include <linux/syscalls.h>
29 #include <linux/vmalloc.h>
30 #include "kexec_internal.h"
31 
32 static int kexec_calculate_store_digests(struct kimage *image);
33 
34 /*
35  * Currently this is the only default function that is exported as some
36  * architectures need it to do additional handlings.
37  * In the future, other default functions may be exported too if required.
38  */
39 int kexec_image_probe_default(struct kimage *image, void *buf,
40 			      unsigned long buf_len)
41 {
42 	const struct kexec_file_ops * const *fops;
43 	int ret = -ENOEXEC;
44 
45 	for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
46 		ret = (*fops)->probe(buf, buf_len);
47 		if (!ret) {
48 			image->fops = *fops;
49 			return ret;
50 		}
51 	}
52 
53 	return ret;
54 }
55 
56 /* Architectures can provide this probe function */
57 int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
58 					 unsigned long buf_len)
59 {
60 	return kexec_image_probe_default(image, buf, buf_len);
61 }
62 
63 static void *kexec_image_load_default(struct kimage *image)
64 {
65 	if (!image->fops || !image->fops->load)
66 		return ERR_PTR(-ENOEXEC);
67 
68 	return image->fops->load(image, image->kernel_buf,
69 				 image->kernel_buf_len, image->initrd_buf,
70 				 image->initrd_buf_len, image->cmdline_buf,
71 				 image->cmdline_buf_len);
72 }
73 
74 void * __weak arch_kexec_kernel_image_load(struct kimage *image)
75 {
76 	return kexec_image_load_default(image);
77 }
78 
79 int kexec_image_post_load_cleanup_default(struct kimage *image)
80 {
81 	if (!image->fops || !image->fops->cleanup)
82 		return 0;
83 
84 	return image->fops->cleanup(image->image_loader_data);
85 }
86 
87 int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
88 {
89 	return kexec_image_post_load_cleanup_default(image);
90 }
91 
92 #ifdef CONFIG_KEXEC_SIG
93 static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
94 					  unsigned long buf_len)
95 {
96 	if (!image->fops || !image->fops->verify_sig) {
97 		pr_debug("kernel loader does not support signature verification.\n");
98 		return -EKEYREJECTED;
99 	}
100 
101 	return image->fops->verify_sig(buf, buf_len);
102 }
103 
104 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
105 					unsigned long buf_len)
106 {
107 	return kexec_image_verify_sig_default(image, buf, buf_len);
108 }
109 #endif
110 
111 /*
112  * arch_kexec_apply_relocations_add - apply relocations of type RELA
113  * @pi:		Purgatory to be relocated.
114  * @section:	Section relocations applying to.
115  * @relsec:	Section containing RELAs.
116  * @symtab:	Corresponding symtab.
117  *
118  * Return: 0 on success, negative errno on error.
119  */
120 int __weak
121 arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
122 				 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
123 {
124 	pr_err("RELA relocation unsupported.\n");
125 	return -ENOEXEC;
126 }
127 
128 /*
129  * arch_kexec_apply_relocations - apply relocations of type REL
130  * @pi:		Purgatory to be relocated.
131  * @section:	Section relocations applying to.
132  * @relsec:	Section containing RELs.
133  * @symtab:	Corresponding symtab.
134  *
135  * Return: 0 on success, negative errno on error.
136  */
137 int __weak
138 arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
139 			     const Elf_Shdr *relsec, const Elf_Shdr *symtab)
140 {
141 	pr_err("REL relocation unsupported.\n");
142 	return -ENOEXEC;
143 }
144 
145 /*
146  * Free up memory used by kernel, initrd, and command line. This is temporary
147  * memory allocation which is not needed any more after these buffers have
148  * been loaded into separate segments and have been copied elsewhere.
149  */
150 void kimage_file_post_load_cleanup(struct kimage *image)
151 {
152 	struct purgatory_info *pi = &image->purgatory_info;
153 
154 	vfree(image->kernel_buf);
155 	image->kernel_buf = NULL;
156 
157 	vfree(image->initrd_buf);
158 	image->initrd_buf = NULL;
159 
160 	kfree(image->cmdline_buf);
161 	image->cmdline_buf = NULL;
162 
163 	vfree(pi->purgatory_buf);
164 	pi->purgatory_buf = NULL;
165 
166 	vfree(pi->sechdrs);
167 	pi->sechdrs = NULL;
168 
169 	/* See if architecture has anything to cleanup post load */
170 	arch_kimage_file_post_load_cleanup(image);
171 
172 	/*
173 	 * Above call should have called into bootloader to free up
174 	 * any data stored in kimage->image_loader_data. It should
175 	 * be ok now to free it up.
176 	 */
177 	kfree(image->image_loader_data);
178 	image->image_loader_data = NULL;
179 }
180 
181 #ifdef CONFIG_KEXEC_SIG
182 static int
183 kimage_validate_signature(struct kimage *image)
184 {
185 	int ret;
186 
187 	ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
188 					   image->kernel_buf_len);
189 	if (ret) {
190 
191 		if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
192 			pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
193 			return ret;
194 		}
195 
196 		/*
197 		 * If IMA is guaranteed to appraise a signature on the kexec
198 		 * image, permit it even if the kernel is otherwise locked
199 		 * down.
200 		 */
201 		if (!ima_appraise_signature(READING_KEXEC_IMAGE) &&
202 		    security_locked_down(LOCKDOWN_KEXEC))
203 			return -EPERM;
204 
205 		pr_debug("kernel signature verification failed (%d).\n", ret);
206 	}
207 
208 	return 0;
209 }
210 #endif
211 
212 /*
213  * In file mode list of segments is prepared by kernel. Copy relevant
214  * data from user space, do error checking, prepare segment list
215  */
216 static int
217 kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
218 			     const char __user *cmdline_ptr,
219 			     unsigned long cmdline_len, unsigned flags)
220 {
221 	int ret;
222 	void *ldata;
223 
224 	ret = kernel_read_file_from_fd(kernel_fd, 0, &image->kernel_buf,
225 				       INT_MAX, NULL, READING_KEXEC_IMAGE);
226 	if (ret < 0)
227 		return ret;
228 	image->kernel_buf_len = ret;
229 
230 	/* Call arch image probe handlers */
231 	ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
232 					    image->kernel_buf_len);
233 	if (ret)
234 		goto out;
235 
236 #ifdef CONFIG_KEXEC_SIG
237 	ret = kimage_validate_signature(image);
238 
239 	if (ret)
240 		goto out;
241 #endif
242 	/* It is possible that there no initramfs is being loaded */
243 	if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
244 		ret = kernel_read_file_from_fd(initrd_fd, 0, &image->initrd_buf,
245 					       INT_MAX, NULL,
246 					       READING_KEXEC_INITRAMFS);
247 		if (ret < 0)
248 			goto out;
249 		image->initrd_buf_len = ret;
250 		ret = 0;
251 	}
252 
253 	if (cmdline_len) {
254 		image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
255 		if (IS_ERR(image->cmdline_buf)) {
256 			ret = PTR_ERR(image->cmdline_buf);
257 			image->cmdline_buf = NULL;
258 			goto out;
259 		}
260 
261 		image->cmdline_buf_len = cmdline_len;
262 
263 		/* command line should be a string with last byte null */
264 		if (image->cmdline_buf[cmdline_len - 1] != '\0') {
265 			ret = -EINVAL;
266 			goto out;
267 		}
268 
269 		ima_kexec_cmdline(kernel_fd, image->cmdline_buf,
270 				  image->cmdline_buf_len - 1);
271 	}
272 
273 	/* IMA needs to pass the measurement list to the next kernel. */
274 	ima_add_kexec_buffer(image);
275 
276 	/* Call arch image load handlers */
277 	ldata = arch_kexec_kernel_image_load(image);
278 
279 	if (IS_ERR(ldata)) {
280 		ret = PTR_ERR(ldata);
281 		goto out;
282 	}
283 
284 	image->image_loader_data = ldata;
285 out:
286 	/* In case of error, free up all allocated memory in this function */
287 	if (ret)
288 		kimage_file_post_load_cleanup(image);
289 	return ret;
290 }
291 
292 static int
293 kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
294 		       int initrd_fd, const char __user *cmdline_ptr,
295 		       unsigned long cmdline_len, unsigned long flags)
296 {
297 	int ret;
298 	struct kimage *image;
299 	bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
300 
301 	image = do_kimage_alloc_init();
302 	if (!image)
303 		return -ENOMEM;
304 
305 	image->file_mode = 1;
306 
307 	if (kexec_on_panic) {
308 		/* Enable special crash kernel control page alloc policy. */
309 		image->control_page = crashk_res.start;
310 		image->type = KEXEC_TYPE_CRASH;
311 	}
312 
313 	ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
314 					   cmdline_ptr, cmdline_len, flags);
315 	if (ret)
316 		goto out_free_image;
317 
318 	ret = sanity_check_segment_list(image);
319 	if (ret)
320 		goto out_free_post_load_bufs;
321 
322 	ret = -ENOMEM;
323 	image->control_code_page = kimage_alloc_control_pages(image,
324 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
325 	if (!image->control_code_page) {
326 		pr_err("Could not allocate control_code_buffer\n");
327 		goto out_free_post_load_bufs;
328 	}
329 
330 	if (!kexec_on_panic) {
331 		image->swap_page = kimage_alloc_control_pages(image, 0);
332 		if (!image->swap_page) {
333 			pr_err("Could not allocate swap buffer\n");
334 			goto out_free_control_pages;
335 		}
336 	}
337 
338 	*rimage = image;
339 	return 0;
340 out_free_control_pages:
341 	kimage_free_page_list(&image->control_pages);
342 out_free_post_load_bufs:
343 	kimage_file_post_load_cleanup(image);
344 out_free_image:
345 	kfree(image);
346 	return ret;
347 }
348 
349 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
350 		unsigned long, cmdline_len, const char __user *, cmdline_ptr,
351 		unsigned long, flags)
352 {
353 	int ret = 0, i;
354 	struct kimage **dest_image, *image;
355 
356 	/* We only trust the superuser with rebooting the system. */
357 	if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
358 		return -EPERM;
359 
360 	/* Make sure we have a legal set of flags */
361 	if (flags != (flags & KEXEC_FILE_FLAGS))
362 		return -EINVAL;
363 
364 	image = NULL;
365 
366 	if (!mutex_trylock(&kexec_mutex))
367 		return -EBUSY;
368 
369 	dest_image = &kexec_image;
370 	if (flags & KEXEC_FILE_ON_CRASH) {
371 		dest_image = &kexec_crash_image;
372 		if (kexec_crash_image)
373 			arch_kexec_unprotect_crashkres();
374 	}
375 
376 	if (flags & KEXEC_FILE_UNLOAD)
377 		goto exchange;
378 
379 	/*
380 	 * In case of crash, new kernel gets loaded in reserved region. It is
381 	 * same memory where old crash kernel might be loaded. Free any
382 	 * current crash dump kernel before we corrupt it.
383 	 */
384 	if (flags & KEXEC_FILE_ON_CRASH)
385 		kimage_free(xchg(&kexec_crash_image, NULL));
386 
387 	ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
388 				     cmdline_len, flags);
389 	if (ret)
390 		goto out;
391 
392 	ret = machine_kexec_prepare(image);
393 	if (ret)
394 		goto out;
395 
396 	/*
397 	 * Some architecture(like S390) may touch the crash memory before
398 	 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
399 	 */
400 	ret = kimage_crash_copy_vmcoreinfo(image);
401 	if (ret)
402 		goto out;
403 
404 	ret = kexec_calculate_store_digests(image);
405 	if (ret)
406 		goto out;
407 
408 	for (i = 0; i < image->nr_segments; i++) {
409 		struct kexec_segment *ksegment;
410 
411 		ksegment = &image->segment[i];
412 		pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
413 			 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
414 			 ksegment->memsz);
415 
416 		ret = kimage_load_segment(image, &image->segment[i]);
417 		if (ret)
418 			goto out;
419 	}
420 
421 	kimage_terminate(image);
422 
423 	ret = machine_kexec_post_load(image);
424 	if (ret)
425 		goto out;
426 
427 	/*
428 	 * Free up any temporary buffers allocated which are not needed
429 	 * after image has been loaded
430 	 */
431 	kimage_file_post_load_cleanup(image);
432 exchange:
433 	image = xchg(dest_image, image);
434 out:
435 	if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
436 		arch_kexec_protect_crashkres();
437 
438 	mutex_unlock(&kexec_mutex);
439 	kimage_free(image);
440 	return ret;
441 }
442 
443 static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
444 				    struct kexec_buf *kbuf)
445 {
446 	struct kimage *image = kbuf->image;
447 	unsigned long temp_start, temp_end;
448 
449 	temp_end = min(end, kbuf->buf_max);
450 	temp_start = temp_end - kbuf->memsz;
451 
452 	do {
453 		/* align down start */
454 		temp_start = temp_start & (~(kbuf->buf_align - 1));
455 
456 		if (temp_start < start || temp_start < kbuf->buf_min)
457 			return 0;
458 
459 		temp_end = temp_start + kbuf->memsz - 1;
460 
461 		/*
462 		 * Make sure this does not conflict with any of existing
463 		 * segments
464 		 */
465 		if (kimage_is_destination_range(image, temp_start, temp_end)) {
466 			temp_start = temp_start - PAGE_SIZE;
467 			continue;
468 		}
469 
470 		/* We found a suitable memory range */
471 		break;
472 	} while (1);
473 
474 	/* If we are here, we found a suitable memory range */
475 	kbuf->mem = temp_start;
476 
477 	/* Success, stop navigating through remaining System RAM ranges */
478 	return 1;
479 }
480 
481 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
482 				     struct kexec_buf *kbuf)
483 {
484 	struct kimage *image = kbuf->image;
485 	unsigned long temp_start, temp_end;
486 
487 	temp_start = max(start, kbuf->buf_min);
488 
489 	do {
490 		temp_start = ALIGN(temp_start, kbuf->buf_align);
491 		temp_end = temp_start + kbuf->memsz - 1;
492 
493 		if (temp_end > end || temp_end > kbuf->buf_max)
494 			return 0;
495 		/*
496 		 * Make sure this does not conflict with any of existing
497 		 * segments
498 		 */
499 		if (kimage_is_destination_range(image, temp_start, temp_end)) {
500 			temp_start = temp_start + PAGE_SIZE;
501 			continue;
502 		}
503 
504 		/* We found a suitable memory range */
505 		break;
506 	} while (1);
507 
508 	/* If we are here, we found a suitable memory range */
509 	kbuf->mem = temp_start;
510 
511 	/* Success, stop navigating through remaining System RAM ranges */
512 	return 1;
513 }
514 
515 static int locate_mem_hole_callback(struct resource *res, void *arg)
516 {
517 	struct kexec_buf *kbuf = (struct kexec_buf *)arg;
518 	u64 start = res->start, end = res->end;
519 	unsigned long sz = end - start + 1;
520 
521 	/* Returning 0 will take to next memory range */
522 
523 	/* Don't use memory that will be detected and handled by a driver. */
524 	if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
525 		return 0;
526 
527 	if (sz < kbuf->memsz)
528 		return 0;
529 
530 	if (end < kbuf->buf_min || start > kbuf->buf_max)
531 		return 0;
532 
533 	/*
534 	 * Allocate memory top down with-in ram range. Otherwise bottom up
535 	 * allocation.
536 	 */
537 	if (kbuf->top_down)
538 		return locate_mem_hole_top_down(start, end, kbuf);
539 	return locate_mem_hole_bottom_up(start, end, kbuf);
540 }
541 
542 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
543 static int kexec_walk_memblock(struct kexec_buf *kbuf,
544 			       int (*func)(struct resource *, void *))
545 {
546 	int ret = 0;
547 	u64 i;
548 	phys_addr_t mstart, mend;
549 	struct resource res = { };
550 
551 	if (kbuf->image->type == KEXEC_TYPE_CRASH)
552 		return func(&crashk_res, kbuf);
553 
554 	if (kbuf->top_down) {
555 		for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
556 						&mstart, &mend, NULL) {
557 			/*
558 			 * In memblock, end points to the first byte after the
559 			 * range while in kexec, end points to the last byte
560 			 * in the range.
561 			 */
562 			res.start = mstart;
563 			res.end = mend - 1;
564 			ret = func(&res, kbuf);
565 			if (ret)
566 				break;
567 		}
568 	} else {
569 		for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
570 					&mstart, &mend, NULL) {
571 			/*
572 			 * In memblock, end points to the first byte after the
573 			 * range while in kexec, end points to the last byte
574 			 * in the range.
575 			 */
576 			res.start = mstart;
577 			res.end = mend - 1;
578 			ret = func(&res, kbuf);
579 			if (ret)
580 				break;
581 		}
582 	}
583 
584 	return ret;
585 }
586 #else
587 static int kexec_walk_memblock(struct kexec_buf *kbuf,
588 			       int (*func)(struct resource *, void *))
589 {
590 	return 0;
591 }
592 #endif
593 
594 /**
595  * kexec_walk_resources - call func(data) on free memory regions
596  * @kbuf:	Context info for the search. Also passed to @func.
597  * @func:	Function to call for each memory region.
598  *
599  * Return: The memory walk will stop when func returns a non-zero value
600  * and that value will be returned. If all free regions are visited without
601  * func returning non-zero, then zero will be returned.
602  */
603 static int kexec_walk_resources(struct kexec_buf *kbuf,
604 				int (*func)(struct resource *, void *))
605 {
606 	if (kbuf->image->type == KEXEC_TYPE_CRASH)
607 		return walk_iomem_res_desc(crashk_res.desc,
608 					   IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
609 					   crashk_res.start, crashk_res.end,
610 					   kbuf, func);
611 	else
612 		return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
613 }
614 
615 /**
616  * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
617  * @kbuf:	Parameters for the memory search.
618  *
619  * On success, kbuf->mem will have the start address of the memory region found.
620  *
621  * Return: 0 on success, negative errno on error.
622  */
623 int kexec_locate_mem_hole(struct kexec_buf *kbuf)
624 {
625 	int ret;
626 
627 	/* Arch knows where to place */
628 	if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
629 		return 0;
630 
631 	if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
632 		ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
633 	else
634 		ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
635 
636 	return ret == 1 ? 0 : -EADDRNOTAVAIL;
637 }
638 
639 /**
640  * arch_kexec_locate_mem_hole - Find free memory to place the segments.
641  * @kbuf:                       Parameters for the memory search.
642  *
643  * On success, kbuf->mem will have the start address of the memory region found.
644  *
645  * Return: 0 on success, negative errno on error.
646  */
647 int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
648 {
649 	return kexec_locate_mem_hole(kbuf);
650 }
651 
652 /**
653  * kexec_add_buffer - place a buffer in a kexec segment
654  * @kbuf:	Buffer contents and memory parameters.
655  *
656  * This function assumes that kexec_mutex is held.
657  * On successful return, @kbuf->mem will have the physical address of
658  * the buffer in memory.
659  *
660  * Return: 0 on success, negative errno on error.
661  */
662 int kexec_add_buffer(struct kexec_buf *kbuf)
663 {
664 	struct kexec_segment *ksegment;
665 	int ret;
666 
667 	/* Currently adding segment this way is allowed only in file mode */
668 	if (!kbuf->image->file_mode)
669 		return -EINVAL;
670 
671 	if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
672 		return -EINVAL;
673 
674 	/*
675 	 * Make sure we are not trying to add buffer after allocating
676 	 * control pages. All segments need to be placed first before
677 	 * any control pages are allocated. As control page allocation
678 	 * logic goes through list of segments to make sure there are
679 	 * no destination overlaps.
680 	 */
681 	if (!list_empty(&kbuf->image->control_pages)) {
682 		WARN_ON(1);
683 		return -EINVAL;
684 	}
685 
686 	/* Ensure minimum alignment needed for segments. */
687 	kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
688 	kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
689 
690 	/* Walk the RAM ranges and allocate a suitable range for the buffer */
691 	ret = arch_kexec_locate_mem_hole(kbuf);
692 	if (ret)
693 		return ret;
694 
695 	/* Found a suitable memory range */
696 	ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
697 	ksegment->kbuf = kbuf->buffer;
698 	ksegment->bufsz = kbuf->bufsz;
699 	ksegment->mem = kbuf->mem;
700 	ksegment->memsz = kbuf->memsz;
701 	kbuf->image->nr_segments++;
702 	return 0;
703 }
704 
705 /* Calculate and store the digest of segments */
706 static int kexec_calculate_store_digests(struct kimage *image)
707 {
708 	struct crypto_shash *tfm;
709 	struct shash_desc *desc;
710 	int ret = 0, i, j, zero_buf_sz, sha_region_sz;
711 	size_t desc_size, nullsz;
712 	char *digest;
713 	void *zero_buf;
714 	struct kexec_sha_region *sha_regions;
715 	struct purgatory_info *pi = &image->purgatory_info;
716 
717 	if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
718 		return 0;
719 
720 	zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
721 	zero_buf_sz = PAGE_SIZE;
722 
723 	tfm = crypto_alloc_shash("sha256", 0, 0);
724 	if (IS_ERR(tfm)) {
725 		ret = PTR_ERR(tfm);
726 		goto out;
727 	}
728 
729 	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
730 	desc = kzalloc(desc_size, GFP_KERNEL);
731 	if (!desc) {
732 		ret = -ENOMEM;
733 		goto out_free_tfm;
734 	}
735 
736 	sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
737 	sha_regions = vzalloc(sha_region_sz);
738 	if (!sha_regions)
739 		goto out_free_desc;
740 
741 	desc->tfm   = tfm;
742 
743 	ret = crypto_shash_init(desc);
744 	if (ret < 0)
745 		goto out_free_sha_regions;
746 
747 	digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
748 	if (!digest) {
749 		ret = -ENOMEM;
750 		goto out_free_sha_regions;
751 	}
752 
753 	for (j = i = 0; i < image->nr_segments; i++) {
754 		struct kexec_segment *ksegment;
755 
756 		ksegment = &image->segment[i];
757 		/*
758 		 * Skip purgatory as it will be modified once we put digest
759 		 * info in purgatory.
760 		 */
761 		if (ksegment->kbuf == pi->purgatory_buf)
762 			continue;
763 
764 		ret = crypto_shash_update(desc, ksegment->kbuf,
765 					  ksegment->bufsz);
766 		if (ret)
767 			break;
768 
769 		/*
770 		 * Assume rest of the buffer is filled with zero and
771 		 * update digest accordingly.
772 		 */
773 		nullsz = ksegment->memsz - ksegment->bufsz;
774 		while (nullsz) {
775 			unsigned long bytes = nullsz;
776 
777 			if (bytes > zero_buf_sz)
778 				bytes = zero_buf_sz;
779 			ret = crypto_shash_update(desc, zero_buf, bytes);
780 			if (ret)
781 				break;
782 			nullsz -= bytes;
783 		}
784 
785 		if (ret)
786 			break;
787 
788 		sha_regions[j].start = ksegment->mem;
789 		sha_regions[j].len = ksegment->memsz;
790 		j++;
791 	}
792 
793 	if (!ret) {
794 		ret = crypto_shash_final(desc, digest);
795 		if (ret)
796 			goto out_free_digest;
797 		ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
798 						     sha_regions, sha_region_sz, 0);
799 		if (ret)
800 			goto out_free_digest;
801 
802 		ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
803 						     digest, SHA256_DIGEST_SIZE, 0);
804 		if (ret)
805 			goto out_free_digest;
806 	}
807 
808 out_free_digest:
809 	kfree(digest);
810 out_free_sha_regions:
811 	vfree(sha_regions);
812 out_free_desc:
813 	kfree(desc);
814 out_free_tfm:
815 	kfree(tfm);
816 out:
817 	return ret;
818 }
819 
820 #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
821 /*
822  * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
823  * @pi:		Purgatory to be loaded.
824  * @kbuf:	Buffer to setup.
825  *
826  * Allocates the memory needed for the buffer. Caller is responsible to free
827  * the memory after use.
828  *
829  * Return: 0 on success, negative errno on error.
830  */
831 static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
832 				      struct kexec_buf *kbuf)
833 {
834 	const Elf_Shdr *sechdrs;
835 	unsigned long bss_align;
836 	unsigned long bss_sz;
837 	unsigned long align;
838 	int i, ret;
839 
840 	sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
841 	kbuf->buf_align = bss_align = 1;
842 	kbuf->bufsz = bss_sz = 0;
843 
844 	for (i = 0; i < pi->ehdr->e_shnum; i++) {
845 		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
846 			continue;
847 
848 		align = sechdrs[i].sh_addralign;
849 		if (sechdrs[i].sh_type != SHT_NOBITS) {
850 			if (kbuf->buf_align < align)
851 				kbuf->buf_align = align;
852 			kbuf->bufsz = ALIGN(kbuf->bufsz, align);
853 			kbuf->bufsz += sechdrs[i].sh_size;
854 		} else {
855 			if (bss_align < align)
856 				bss_align = align;
857 			bss_sz = ALIGN(bss_sz, align);
858 			bss_sz += sechdrs[i].sh_size;
859 		}
860 	}
861 	kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
862 	kbuf->memsz = kbuf->bufsz + bss_sz;
863 	if (kbuf->buf_align < bss_align)
864 		kbuf->buf_align = bss_align;
865 
866 	kbuf->buffer = vzalloc(kbuf->bufsz);
867 	if (!kbuf->buffer)
868 		return -ENOMEM;
869 	pi->purgatory_buf = kbuf->buffer;
870 
871 	ret = kexec_add_buffer(kbuf);
872 	if (ret)
873 		goto out;
874 
875 	return 0;
876 out:
877 	vfree(pi->purgatory_buf);
878 	pi->purgatory_buf = NULL;
879 	return ret;
880 }
881 
882 /*
883  * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
884  * @pi:		Purgatory to be loaded.
885  * @kbuf:	Buffer prepared to store purgatory.
886  *
887  * Allocates the memory needed for the buffer. Caller is responsible to free
888  * the memory after use.
889  *
890  * Return: 0 on success, negative errno on error.
891  */
892 static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
893 					 struct kexec_buf *kbuf)
894 {
895 	unsigned long bss_addr;
896 	unsigned long offset;
897 	Elf_Shdr *sechdrs;
898 	int i;
899 
900 	/*
901 	 * The section headers in kexec_purgatory are read-only. In order to
902 	 * have them modifiable make a temporary copy.
903 	 */
904 	sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
905 	if (!sechdrs)
906 		return -ENOMEM;
907 	memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
908 	       pi->ehdr->e_shnum * sizeof(Elf_Shdr));
909 	pi->sechdrs = sechdrs;
910 
911 	offset = 0;
912 	bss_addr = kbuf->mem + kbuf->bufsz;
913 	kbuf->image->start = pi->ehdr->e_entry;
914 
915 	for (i = 0; i < pi->ehdr->e_shnum; i++) {
916 		unsigned long align;
917 		void *src, *dst;
918 
919 		if (!(sechdrs[i].sh_flags & SHF_ALLOC))
920 			continue;
921 
922 		align = sechdrs[i].sh_addralign;
923 		if (sechdrs[i].sh_type == SHT_NOBITS) {
924 			bss_addr = ALIGN(bss_addr, align);
925 			sechdrs[i].sh_addr = bss_addr;
926 			bss_addr += sechdrs[i].sh_size;
927 			continue;
928 		}
929 
930 		offset = ALIGN(offset, align);
931 		if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
932 		    pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
933 		    pi->ehdr->e_entry < (sechdrs[i].sh_addr
934 					 + sechdrs[i].sh_size)) {
935 			kbuf->image->start -= sechdrs[i].sh_addr;
936 			kbuf->image->start += kbuf->mem + offset;
937 		}
938 
939 		src = (void *)pi->ehdr + sechdrs[i].sh_offset;
940 		dst = pi->purgatory_buf + offset;
941 		memcpy(dst, src, sechdrs[i].sh_size);
942 
943 		sechdrs[i].sh_addr = kbuf->mem + offset;
944 		sechdrs[i].sh_offset = offset;
945 		offset += sechdrs[i].sh_size;
946 	}
947 
948 	return 0;
949 }
950 
951 static int kexec_apply_relocations(struct kimage *image)
952 {
953 	int i, ret;
954 	struct purgatory_info *pi = &image->purgatory_info;
955 	const Elf_Shdr *sechdrs;
956 
957 	sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
958 
959 	for (i = 0; i < pi->ehdr->e_shnum; i++) {
960 		const Elf_Shdr *relsec;
961 		const Elf_Shdr *symtab;
962 		Elf_Shdr *section;
963 
964 		relsec = sechdrs + i;
965 
966 		if (relsec->sh_type != SHT_RELA &&
967 		    relsec->sh_type != SHT_REL)
968 			continue;
969 
970 		/*
971 		 * For section of type SHT_RELA/SHT_REL,
972 		 * ->sh_link contains section header index of associated
973 		 * symbol table. And ->sh_info contains section header
974 		 * index of section to which relocations apply.
975 		 */
976 		if (relsec->sh_info >= pi->ehdr->e_shnum ||
977 		    relsec->sh_link >= pi->ehdr->e_shnum)
978 			return -ENOEXEC;
979 
980 		section = pi->sechdrs + relsec->sh_info;
981 		symtab = sechdrs + relsec->sh_link;
982 
983 		if (!(section->sh_flags & SHF_ALLOC))
984 			continue;
985 
986 		/*
987 		 * symtab->sh_link contain section header index of associated
988 		 * string table.
989 		 */
990 		if (symtab->sh_link >= pi->ehdr->e_shnum)
991 			/* Invalid section number? */
992 			continue;
993 
994 		/*
995 		 * Respective architecture needs to provide support for applying
996 		 * relocations of type SHT_RELA/SHT_REL.
997 		 */
998 		if (relsec->sh_type == SHT_RELA)
999 			ret = arch_kexec_apply_relocations_add(pi, section,
1000 							       relsec, symtab);
1001 		else if (relsec->sh_type == SHT_REL)
1002 			ret = arch_kexec_apply_relocations(pi, section,
1003 							   relsec, symtab);
1004 		if (ret)
1005 			return ret;
1006 	}
1007 
1008 	return 0;
1009 }
1010 
1011 /*
1012  * kexec_load_purgatory - Load and relocate the purgatory object.
1013  * @image:	Image to add the purgatory to.
1014  * @kbuf:	Memory parameters to use.
1015  *
1016  * Allocates the memory needed for image->purgatory_info.sechdrs and
1017  * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
1018  * to free the memory after use.
1019  *
1020  * Return: 0 on success, negative errno on error.
1021  */
1022 int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
1023 {
1024 	struct purgatory_info *pi = &image->purgatory_info;
1025 	int ret;
1026 
1027 	if (kexec_purgatory_size <= 0)
1028 		return -EINVAL;
1029 
1030 	pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
1031 
1032 	ret = kexec_purgatory_setup_kbuf(pi, kbuf);
1033 	if (ret)
1034 		return ret;
1035 
1036 	ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
1037 	if (ret)
1038 		goto out_free_kbuf;
1039 
1040 	ret = kexec_apply_relocations(image);
1041 	if (ret)
1042 		goto out;
1043 
1044 	return 0;
1045 out:
1046 	vfree(pi->sechdrs);
1047 	pi->sechdrs = NULL;
1048 out_free_kbuf:
1049 	vfree(pi->purgatory_buf);
1050 	pi->purgatory_buf = NULL;
1051 	return ret;
1052 }
1053 
1054 /*
1055  * kexec_purgatory_find_symbol - find a symbol in the purgatory
1056  * @pi:		Purgatory to search in.
1057  * @name:	Name of the symbol.
1058  *
1059  * Return: pointer to symbol in read-only symtab on success, NULL on error.
1060  */
1061 static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
1062 						  const char *name)
1063 {
1064 	const Elf_Shdr *sechdrs;
1065 	const Elf_Ehdr *ehdr;
1066 	const Elf_Sym *syms;
1067 	const char *strtab;
1068 	int i, k;
1069 
1070 	if (!pi->ehdr)
1071 		return NULL;
1072 
1073 	ehdr = pi->ehdr;
1074 	sechdrs = (void *)ehdr + ehdr->e_shoff;
1075 
1076 	for (i = 0; i < ehdr->e_shnum; i++) {
1077 		if (sechdrs[i].sh_type != SHT_SYMTAB)
1078 			continue;
1079 
1080 		if (sechdrs[i].sh_link >= ehdr->e_shnum)
1081 			/* Invalid strtab section number */
1082 			continue;
1083 		strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
1084 		syms = (void *)ehdr + sechdrs[i].sh_offset;
1085 
1086 		/* Go through symbols for a match */
1087 		for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
1088 			if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
1089 				continue;
1090 
1091 			if (strcmp(strtab + syms[k].st_name, name) != 0)
1092 				continue;
1093 
1094 			if (syms[k].st_shndx == SHN_UNDEF ||
1095 			    syms[k].st_shndx >= ehdr->e_shnum) {
1096 				pr_debug("Symbol: %s has bad section index %d.\n",
1097 						name, syms[k].st_shndx);
1098 				return NULL;
1099 			}
1100 
1101 			/* Found the symbol we are looking for */
1102 			return &syms[k];
1103 		}
1104 	}
1105 
1106 	return NULL;
1107 }
1108 
1109 void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1110 {
1111 	struct purgatory_info *pi = &image->purgatory_info;
1112 	const Elf_Sym *sym;
1113 	Elf_Shdr *sechdr;
1114 
1115 	sym = kexec_purgatory_find_symbol(pi, name);
1116 	if (!sym)
1117 		return ERR_PTR(-EINVAL);
1118 
1119 	sechdr = &pi->sechdrs[sym->st_shndx];
1120 
1121 	/*
1122 	 * Returns the address where symbol will finally be loaded after
1123 	 * kexec_load_segment()
1124 	 */
1125 	return (void *)(sechdr->sh_addr + sym->st_value);
1126 }
1127 
1128 /*
1129  * Get or set value of a symbol. If "get_value" is true, symbol value is
1130  * returned in buf otherwise symbol value is set based on value in buf.
1131  */
1132 int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1133 				   void *buf, unsigned int size, bool get_value)
1134 {
1135 	struct purgatory_info *pi = &image->purgatory_info;
1136 	const Elf_Sym *sym;
1137 	Elf_Shdr *sec;
1138 	char *sym_buf;
1139 
1140 	sym = kexec_purgatory_find_symbol(pi, name);
1141 	if (!sym)
1142 		return -EINVAL;
1143 
1144 	if (sym->st_size != size) {
1145 		pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1146 		       name, (unsigned long)sym->st_size, size);
1147 		return -EINVAL;
1148 	}
1149 
1150 	sec = pi->sechdrs + sym->st_shndx;
1151 
1152 	if (sec->sh_type == SHT_NOBITS) {
1153 		pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1154 		       get_value ? "get" : "set");
1155 		return -EINVAL;
1156 	}
1157 
1158 	sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
1159 
1160 	if (get_value)
1161 		memcpy((void *)buf, sym_buf, size);
1162 	else
1163 		memcpy((void *)sym_buf, buf, size);
1164 
1165 	return 0;
1166 }
1167 #endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
1168 
1169 int crash_exclude_mem_range(struct crash_mem *mem,
1170 			    unsigned long long mstart, unsigned long long mend)
1171 {
1172 	int i, j;
1173 	unsigned long long start, end, p_start, p_end;
1174 	struct crash_mem_range temp_range = {0, 0};
1175 
1176 	for (i = 0; i < mem->nr_ranges; i++) {
1177 		start = mem->ranges[i].start;
1178 		end = mem->ranges[i].end;
1179 		p_start = mstart;
1180 		p_end = mend;
1181 
1182 		if (mstart > end || mend < start)
1183 			continue;
1184 
1185 		/* Truncate any area outside of range */
1186 		if (mstart < start)
1187 			p_start = start;
1188 		if (mend > end)
1189 			p_end = end;
1190 
1191 		/* Found completely overlapping range */
1192 		if (p_start == start && p_end == end) {
1193 			mem->ranges[i].start = 0;
1194 			mem->ranges[i].end = 0;
1195 			if (i < mem->nr_ranges - 1) {
1196 				/* Shift rest of the ranges to left */
1197 				for (j = i; j < mem->nr_ranges - 1; j++) {
1198 					mem->ranges[j].start =
1199 						mem->ranges[j+1].start;
1200 					mem->ranges[j].end =
1201 							mem->ranges[j+1].end;
1202 				}
1203 
1204 				/*
1205 				 * Continue to check if there are another overlapping ranges
1206 				 * from the current position because of shifting the above
1207 				 * mem ranges.
1208 				 */
1209 				i--;
1210 				mem->nr_ranges--;
1211 				continue;
1212 			}
1213 			mem->nr_ranges--;
1214 			return 0;
1215 		}
1216 
1217 		if (p_start > start && p_end < end) {
1218 			/* Split original range */
1219 			mem->ranges[i].end = p_start - 1;
1220 			temp_range.start = p_end + 1;
1221 			temp_range.end = end;
1222 		} else if (p_start != start)
1223 			mem->ranges[i].end = p_start - 1;
1224 		else
1225 			mem->ranges[i].start = p_end + 1;
1226 		break;
1227 	}
1228 
1229 	/* If a split happened, add the split to array */
1230 	if (!temp_range.end)
1231 		return 0;
1232 
1233 	/* Split happened */
1234 	if (i == mem->max_nr_ranges - 1)
1235 		return -ENOMEM;
1236 
1237 	/* Location where new range should go */
1238 	j = i + 1;
1239 	if (j < mem->nr_ranges) {
1240 		/* Move over all ranges one slot towards the end */
1241 		for (i = mem->nr_ranges - 1; i >= j; i--)
1242 			mem->ranges[i + 1] = mem->ranges[i];
1243 	}
1244 
1245 	mem->ranges[j].start = temp_range.start;
1246 	mem->ranges[j].end = temp_range.end;
1247 	mem->nr_ranges++;
1248 	return 0;
1249 }
1250 
1251 int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1252 			  void **addr, unsigned long *sz)
1253 {
1254 	Elf64_Ehdr *ehdr;
1255 	Elf64_Phdr *phdr;
1256 	unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1257 	unsigned char *buf;
1258 	unsigned int cpu, i;
1259 	unsigned long long notes_addr;
1260 	unsigned long mstart, mend;
1261 
1262 	/* extra phdr for vmcoreinfo ELF note */
1263 	nr_phdr = nr_cpus + 1;
1264 	nr_phdr += mem->nr_ranges;
1265 
1266 	/*
1267 	 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1268 	 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1269 	 * I think this is required by tools like gdb. So same physical
1270 	 * memory will be mapped in two ELF headers. One will contain kernel
1271 	 * text virtual addresses and other will have __va(physical) addresses.
1272 	 */
1273 
1274 	nr_phdr++;
1275 	elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1276 	elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1277 
1278 	buf = vzalloc(elf_sz);
1279 	if (!buf)
1280 		return -ENOMEM;
1281 
1282 	ehdr = (Elf64_Ehdr *)buf;
1283 	phdr = (Elf64_Phdr *)(ehdr + 1);
1284 	memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1285 	ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1286 	ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1287 	ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1288 	ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1289 	memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1290 	ehdr->e_type = ET_CORE;
1291 	ehdr->e_machine = ELF_ARCH;
1292 	ehdr->e_version = EV_CURRENT;
1293 	ehdr->e_phoff = sizeof(Elf64_Ehdr);
1294 	ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1295 	ehdr->e_phentsize = sizeof(Elf64_Phdr);
1296 
1297 	/* Prepare one phdr of type PT_NOTE for each present CPU */
1298 	for_each_present_cpu(cpu) {
1299 		phdr->p_type = PT_NOTE;
1300 		notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1301 		phdr->p_offset = phdr->p_paddr = notes_addr;
1302 		phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1303 		(ehdr->e_phnum)++;
1304 		phdr++;
1305 	}
1306 
1307 	/* Prepare one PT_NOTE header for vmcoreinfo */
1308 	phdr->p_type = PT_NOTE;
1309 	phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1310 	phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1311 	(ehdr->e_phnum)++;
1312 	phdr++;
1313 
1314 	/* Prepare PT_LOAD type program header for kernel text region */
1315 	if (kernel_map) {
1316 		phdr->p_type = PT_LOAD;
1317 		phdr->p_flags = PF_R|PF_W|PF_X;
1318 		phdr->p_vaddr = (unsigned long) _text;
1319 		phdr->p_filesz = phdr->p_memsz = _end - _text;
1320 		phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1321 		ehdr->e_phnum++;
1322 		phdr++;
1323 	}
1324 
1325 	/* Go through all the ranges in mem->ranges[] and prepare phdr */
1326 	for (i = 0; i < mem->nr_ranges; i++) {
1327 		mstart = mem->ranges[i].start;
1328 		mend = mem->ranges[i].end;
1329 
1330 		phdr->p_type = PT_LOAD;
1331 		phdr->p_flags = PF_R|PF_W|PF_X;
1332 		phdr->p_offset  = mstart;
1333 
1334 		phdr->p_paddr = mstart;
1335 		phdr->p_vaddr = (unsigned long) __va(mstart);
1336 		phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1337 		phdr->p_align = 0;
1338 		ehdr->e_phnum++;
1339 		pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1340 			phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1341 			ehdr->e_phnum, phdr->p_offset);
1342 		phdr++;
1343 	}
1344 
1345 	*addr = buf;
1346 	*sz = elf_sz;
1347 	return 0;
1348 }
1349