xref: /openbmc/linux/arch/powerpc/kexec/file_load_64.c (revision ecc23d0a422a3118fcf6e4f0a46e17a6c2047b02)
119031275SHari Bathini // SPDX-License-Identifier: GPL-2.0-only
219031275SHari Bathini /*
319031275SHari Bathini  * ppc64 code to implement the kexec_file_load syscall
419031275SHari Bathini  *
519031275SHari Bathini  * Copyright (C) 2004  Adam Litke (agl@us.ibm.com)
619031275SHari Bathini  * Copyright (C) 2004  IBM Corp.
719031275SHari Bathini  * Copyright (C) 2004,2005  Milton D Miller II, IBM Corporation
819031275SHari Bathini  * Copyright (C) 2005  R Sharada (sharada@in.ibm.com)
919031275SHari Bathini  * Copyright (C) 2006  Mohan Kumar M (mohan@in.ibm.com)
1019031275SHari Bathini  * Copyright (C) 2020  IBM Corporation
1119031275SHari Bathini  *
1219031275SHari Bathini  * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
1319031275SHari Bathini  * Heavily modified for the kernel by
1419031275SHari Bathini  * Hari Bathini, IBM Corporation.
1519031275SHari Bathini  */
1619031275SHari Bathini 
1719031275SHari Bathini #include <linux/kexec.h>
1819031275SHari Bathini #include <linux/of_fdt.h>
1919031275SHari Bathini #include <linux/libfdt.h>
2081d7cac4SRob Herring #include <linux/of.h>
21b8e55a3eSHari Bathini #include <linux/memblock.h>
227c64e21aSHari Bathini #include <linux/slab.h>
231a1cf93cSHari Bathini #include <linux/vmalloc.h>
242377c92eSHari Bathini #include <asm/setup.h>
257c64e21aSHari Bathini #include <asm/drmem.h>
264cfa6ff2SMichael Ellerman #include <asm/firmware.h>
27b8e55a3eSHari Bathini #include <asm/kexec_ranges.h>
281a1cf93cSHari Bathini #include <asm/crashdump-ppc64.h>
2997e45d46SRandy Dunlap #include <asm/mmzone.h>
3089c9ce1cSMichal Suchanek #include <asm/iommu.h>
31340a4a9fSLaurent Dufour #include <asm/prom.h>
3291361b51SRussell Currey #include <asm/plpks.h>
3319031275SHari Bathini 
347c64e21aSHari Bathini struct umem_info {
357c64e21aSHari Bathini 	u64 *buf;		/* data buffer for usable-memory property */
367c64e21aSHari Bathini 	u32 size;		/* size allocated for the data buffer */
377c64e21aSHari Bathini 	u32 max_entries;	/* maximum no. of entries */
387c64e21aSHari Bathini 	u32 idx;		/* index of current entry */
397c64e21aSHari Bathini 
407c64e21aSHari Bathini 	/* usable memory ranges to look up */
417c64e21aSHari Bathini 	unsigned int nr_ranges;
42cade589fSLi Chen 	const struct range *ranges;
437c64e21aSHari Bathini };
447c64e21aSHari Bathini 
4519031275SHari Bathini const struct kexec_file_ops * const kexec_file_loaders[] = {
4619031275SHari Bathini 	&kexec_elf64_ops,
4719031275SHari Bathini 	NULL
4819031275SHari Bathini };
4919031275SHari Bathini 
5019031275SHari Bathini /**
51b8e55a3eSHari Bathini  * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
52b8e55a3eSHari Bathini  *                             regions like opal/rtas, tce-table, initrd,
53b8e55a3eSHari Bathini  *                             kernel, htab which should be avoided while
54b8e55a3eSHari Bathini  *                             setting up kexec load segments.
55b8e55a3eSHari Bathini  * @mem_ranges:                Range list to add the memory ranges to.
56b8e55a3eSHari Bathini  *
57b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
58b8e55a3eSHari Bathini  */
get_exclude_memory_ranges(struct crash_mem ** mem_ranges)59b8e55a3eSHari Bathini static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
60b8e55a3eSHari Bathini {
61b8e55a3eSHari Bathini 	int ret;
62b8e55a3eSHari Bathini 
63b8e55a3eSHari Bathini 	ret = add_tce_mem_ranges(mem_ranges);
64b8e55a3eSHari Bathini 	if (ret)
65b8e55a3eSHari Bathini 		goto out;
66b8e55a3eSHari Bathini 
67b8e55a3eSHari Bathini 	ret = add_initrd_mem_range(mem_ranges);
68b8e55a3eSHari Bathini 	if (ret)
69b8e55a3eSHari Bathini 		goto out;
70b8e55a3eSHari Bathini 
71b8e55a3eSHari Bathini 	ret = add_htab_mem_range(mem_ranges);
72b8e55a3eSHari Bathini 	if (ret)
73b8e55a3eSHari Bathini 		goto out;
74b8e55a3eSHari Bathini 
75b8e55a3eSHari Bathini 	ret = add_kernel_mem_range(mem_ranges);
76b8e55a3eSHari Bathini 	if (ret)
77b8e55a3eSHari Bathini 		goto out;
78b8e55a3eSHari Bathini 
79b8e55a3eSHari Bathini 	ret = add_rtas_mem_range(mem_ranges);
80b8e55a3eSHari Bathini 	if (ret)
81b8e55a3eSHari Bathini 		goto out;
82b8e55a3eSHari Bathini 
83b8e55a3eSHari Bathini 	ret = add_opal_mem_range(mem_ranges);
84b8e55a3eSHari Bathini 	if (ret)
85b8e55a3eSHari Bathini 		goto out;
86b8e55a3eSHari Bathini 
87b8e55a3eSHari Bathini 	ret = add_reserved_mem_ranges(mem_ranges);
88b8e55a3eSHari Bathini 	if (ret)
89b8e55a3eSHari Bathini 		goto out;
90b8e55a3eSHari Bathini 
91b8e55a3eSHari Bathini 	/* exclude memory ranges should be sorted for easy lookup */
92b8e55a3eSHari Bathini 	sort_memory_ranges(*mem_ranges, true);
93b8e55a3eSHari Bathini out:
94b8e55a3eSHari Bathini 	if (ret)
95b8e55a3eSHari Bathini 		pr_err("Failed to setup exclude memory ranges\n");
96b8e55a3eSHari Bathini 	return ret;
97b8e55a3eSHari Bathini }
98b8e55a3eSHari Bathini 
99b8e55a3eSHari Bathini /**
1007c64e21aSHari Bathini  * get_usable_memory_ranges - Get usable memory ranges. This list includes
1017c64e21aSHari Bathini  *                            regions like crashkernel, opal/rtas & tce-table,
1027c64e21aSHari Bathini  *                            that kdump kernel could use.
1037c64e21aSHari Bathini  * @mem_ranges:               Range list to add the memory ranges to.
1047c64e21aSHari Bathini  *
1057c64e21aSHari Bathini  * Returns 0 on success, negative errno on error.
1067c64e21aSHari Bathini  */
get_usable_memory_ranges(struct crash_mem ** mem_ranges)1077c64e21aSHari Bathini static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
1087c64e21aSHari Bathini {
1097c64e21aSHari Bathini 	int ret;
1107c64e21aSHari Bathini 
1117c64e21aSHari Bathini 	/*
1127c64e21aSHari Bathini 	 * Early boot failure observed on guests when low memory (first memory
1137c64e21aSHari Bathini 	 * block?) is not added to usable memory. So, add [0, crashk_res.end]
1147c64e21aSHari Bathini 	 * instead of [crashk_res.start, crashk_res.end] to workaround it.
1157c64e21aSHari Bathini 	 * Also, crashed kernel's memory must be added to reserve map to
1167c64e21aSHari Bathini 	 * avoid kdump kernel from using it.
1177c64e21aSHari Bathini 	 */
1187c64e21aSHari Bathini 	ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
1197c64e21aSHari Bathini 	if (ret)
1207c64e21aSHari Bathini 		goto out;
1217c64e21aSHari Bathini 
1227c64e21aSHari Bathini 	ret = add_rtas_mem_range(mem_ranges);
1237c64e21aSHari Bathini 	if (ret)
1247c64e21aSHari Bathini 		goto out;
1257c64e21aSHari Bathini 
1267c64e21aSHari Bathini 	ret = add_opal_mem_range(mem_ranges);
1277c64e21aSHari Bathini 	if (ret)
1287c64e21aSHari Bathini 		goto out;
1297c64e21aSHari Bathini 
1307c64e21aSHari Bathini 	ret = add_tce_mem_ranges(mem_ranges);
1317c64e21aSHari Bathini out:
1327c64e21aSHari Bathini 	if (ret)
1337c64e21aSHari Bathini 		pr_err("Failed to setup usable memory ranges\n");
1347c64e21aSHari Bathini 	return ret;
1357c64e21aSHari Bathini }
1367c64e21aSHari Bathini 
1377c64e21aSHari Bathini /**
138cb350c1fSHari Bathini  * get_crash_memory_ranges - Get crash memory ranges. This list includes
139cb350c1fSHari Bathini  *                           first/crashing kernel's memory regions that
140cb350c1fSHari Bathini  *                           would be exported via an elfcore.
141cb350c1fSHari Bathini  * @mem_ranges:              Range list to add the memory ranges to.
142cb350c1fSHari Bathini  *
143cb350c1fSHari Bathini  * Returns 0 on success, negative errno on error.
144cb350c1fSHari Bathini  */
get_crash_memory_ranges(struct crash_mem ** mem_ranges)145cb350c1fSHari Bathini static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
146cb350c1fSHari Bathini {
147b10d6bcaSMike Rapoport 	phys_addr_t base, end;
148cb350c1fSHari Bathini 	struct crash_mem *tmem;
149b10d6bcaSMike Rapoport 	u64 i;
150cb350c1fSHari Bathini 	int ret;
151cb350c1fSHari Bathini 
152b10d6bcaSMike Rapoport 	for_each_mem_range(i, &base, &end) {
153b10d6bcaSMike Rapoport 		u64 size = end - base;
154cb350c1fSHari Bathini 
155cb350c1fSHari Bathini 		/* Skip backup memory region, which needs a separate entry */
156cb350c1fSHari Bathini 		if (base == BACKUP_SRC_START) {
157cb350c1fSHari Bathini 			if (size > BACKUP_SRC_SIZE) {
158cb350c1fSHari Bathini 				base = BACKUP_SRC_END + 1;
159cb350c1fSHari Bathini 				size -= BACKUP_SRC_SIZE;
160cb350c1fSHari Bathini 			} else
161cb350c1fSHari Bathini 				continue;
162cb350c1fSHari Bathini 		}
163cb350c1fSHari Bathini 
164cb350c1fSHari Bathini 		ret = add_mem_range(mem_ranges, base, size);
165cb350c1fSHari Bathini 		if (ret)
166cb350c1fSHari Bathini 			goto out;
167cb350c1fSHari Bathini 
168cb350c1fSHari Bathini 		/* Try merging adjacent ranges before reallocation attempt */
169cb350c1fSHari Bathini 		if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
170cb350c1fSHari Bathini 			sort_memory_ranges(*mem_ranges, true);
171cb350c1fSHari Bathini 	}
172cb350c1fSHari Bathini 
173cb350c1fSHari Bathini 	/* Reallocate memory ranges if there is no space to split ranges */
174cb350c1fSHari Bathini 	tmem = *mem_ranges;
175cb350c1fSHari Bathini 	if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
176cb350c1fSHari Bathini 		tmem = realloc_mem_ranges(mem_ranges);
177cb350c1fSHari Bathini 		if (!tmem)
178cb350c1fSHari Bathini 			goto out;
179cb350c1fSHari Bathini 	}
180cb350c1fSHari Bathini 
181cb350c1fSHari Bathini 	/* Exclude crashkernel region */
182cb350c1fSHari Bathini 	ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
183cb350c1fSHari Bathini 	if (ret)
184cb350c1fSHari Bathini 		goto out;
185cb350c1fSHari Bathini 
186cb350c1fSHari Bathini 	/*
187cb350c1fSHari Bathini 	 * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
188cb350c1fSHari Bathini 	 *        regions are exported to save their context at the time of
189cb350c1fSHari Bathini 	 *        crash, they should actually be backed up just like the
190cb350c1fSHari Bathini 	 *        first 64K bytes of memory.
191cb350c1fSHari Bathini 	 */
192cb350c1fSHari Bathini 	ret = add_rtas_mem_range(mem_ranges);
193cb350c1fSHari Bathini 	if (ret)
194cb350c1fSHari Bathini 		goto out;
195cb350c1fSHari Bathini 
196cb350c1fSHari Bathini 	ret = add_opal_mem_range(mem_ranges);
197cb350c1fSHari Bathini 	if (ret)
198cb350c1fSHari Bathini 		goto out;
199cb350c1fSHari Bathini 
200cb350c1fSHari Bathini 	/* create a separate program header for the backup region */
201cb350c1fSHari Bathini 	ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
202cb350c1fSHari Bathini 	if (ret)
203cb350c1fSHari Bathini 		goto out;
204cb350c1fSHari Bathini 
205cb350c1fSHari Bathini 	sort_memory_ranges(*mem_ranges, false);
206cb350c1fSHari Bathini out:
207cb350c1fSHari Bathini 	if (ret)
208cb350c1fSHari Bathini 		pr_err("Failed to setup crash memory ranges\n");
209cb350c1fSHari Bathini 	return ret;
210cb350c1fSHari Bathini }
211cb350c1fSHari Bathini 
212cb350c1fSHari Bathini /**
2136ecd0163SHari Bathini  * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
2146ecd0163SHari Bathini  *                              memory regions that should be added to the
2156ecd0163SHari Bathini  *                              memory reserve map to ensure the region is
2166ecd0163SHari Bathini  *                              protected from any mischief.
2176ecd0163SHari Bathini  * @mem_ranges:                 Range list to add the memory ranges to.
2186ecd0163SHari Bathini  *
2196ecd0163SHari Bathini  * Returns 0 on success, negative errno on error.
2206ecd0163SHari Bathini  */
get_reserved_memory_ranges(struct crash_mem ** mem_ranges)2216ecd0163SHari Bathini static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
2226ecd0163SHari Bathini {
2236ecd0163SHari Bathini 	int ret;
2246ecd0163SHari Bathini 
2256ecd0163SHari Bathini 	ret = add_rtas_mem_range(mem_ranges);
2266ecd0163SHari Bathini 	if (ret)
2276ecd0163SHari Bathini 		goto out;
2286ecd0163SHari Bathini 
2296ecd0163SHari Bathini 	ret = add_tce_mem_ranges(mem_ranges);
2306ecd0163SHari Bathini 	if (ret)
2316ecd0163SHari Bathini 		goto out;
2326ecd0163SHari Bathini 
2336ecd0163SHari Bathini 	ret = add_reserved_mem_ranges(mem_ranges);
2346ecd0163SHari Bathini out:
2356ecd0163SHari Bathini 	if (ret)
2366ecd0163SHari Bathini 		pr_err("Failed to setup reserved memory ranges\n");
2376ecd0163SHari Bathini 	return ret;
2386ecd0163SHari Bathini }
2396ecd0163SHari Bathini 
2406ecd0163SHari Bathini /**
241b8e55a3eSHari Bathini  * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
242b8e55a3eSHari Bathini  *                              in the memory regions between buf_min & buf_max
243b8e55a3eSHari Bathini  *                              for the buffer. If found, sets kbuf->mem.
244b8e55a3eSHari Bathini  * @kbuf:                       Buffer contents and memory parameters.
245b8e55a3eSHari Bathini  * @buf_min:                    Minimum address for the buffer.
246b8e55a3eSHari Bathini  * @buf_max:                    Maximum address for the buffer.
247b8e55a3eSHari Bathini  *
248b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
249b8e55a3eSHari Bathini  */
__locate_mem_hole_top_down(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)250b8e55a3eSHari Bathini static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
251b8e55a3eSHari Bathini 				      u64 buf_min, u64 buf_max)
252b8e55a3eSHari Bathini {
253b8e55a3eSHari Bathini 	int ret = -EADDRNOTAVAIL;
254b8e55a3eSHari Bathini 	phys_addr_t start, end;
255b8e55a3eSHari Bathini 	u64 i;
256b8e55a3eSHari Bathini 
2576e245ad4SMike Rapoport 	for_each_mem_range_rev(i, &start, &end) {
258b8e55a3eSHari Bathini 		/*
259b8e55a3eSHari Bathini 		 * memblock uses [start, end) convention while it is
260b8e55a3eSHari Bathini 		 * [start, end] here. Fix the off-by-one to have the
261b8e55a3eSHari Bathini 		 * same convention.
262b8e55a3eSHari Bathini 		 */
263b8e55a3eSHari Bathini 		end -= 1;
264b8e55a3eSHari Bathini 
265b8e55a3eSHari Bathini 		if (start > buf_max)
266b8e55a3eSHari Bathini 			continue;
267b8e55a3eSHari Bathini 
268b8e55a3eSHari Bathini 		/* Memory hole not found */
269b8e55a3eSHari Bathini 		if (end < buf_min)
270b8e55a3eSHari Bathini 			break;
271b8e55a3eSHari Bathini 
272b8e55a3eSHari Bathini 		/* Adjust memory region based on the given range */
273b8e55a3eSHari Bathini 		if (start < buf_min)
274b8e55a3eSHari Bathini 			start = buf_min;
275b8e55a3eSHari Bathini 		if (end > buf_max)
276b8e55a3eSHari Bathini 			end = buf_max;
277b8e55a3eSHari Bathini 
278b8e55a3eSHari Bathini 		start = ALIGN(start, kbuf->buf_align);
279b8e55a3eSHari Bathini 		if (start < end && (end - start + 1) >= kbuf->memsz) {
280b8e55a3eSHari Bathini 			/* Suitable memory range found. Set kbuf->mem */
281b8e55a3eSHari Bathini 			kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
282b8e55a3eSHari Bathini 					       kbuf->buf_align);
283b8e55a3eSHari Bathini 			ret = 0;
284b8e55a3eSHari Bathini 			break;
285b8e55a3eSHari Bathini 		}
286b8e55a3eSHari Bathini 	}
287b8e55a3eSHari Bathini 
288b8e55a3eSHari Bathini 	return ret;
289b8e55a3eSHari Bathini }
290b8e55a3eSHari Bathini 
291b8e55a3eSHari Bathini /**
292b8e55a3eSHari Bathini  * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
293b8e55a3eSHari Bathini  *                                  suitable buffer with top down approach.
294b8e55a3eSHari Bathini  * @kbuf:                           Buffer contents and memory parameters.
295b8e55a3eSHari Bathini  * @buf_min:                        Minimum address for the buffer.
296b8e55a3eSHari Bathini  * @buf_max:                        Maximum address for the buffer.
297b8e55a3eSHari Bathini  * @emem:                           Exclude memory ranges.
298b8e55a3eSHari Bathini  *
299b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
300b8e55a3eSHari Bathini  */
locate_mem_hole_top_down_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)301b8e55a3eSHari Bathini static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
302b8e55a3eSHari Bathini 					  u64 buf_min, u64 buf_max,
303b8e55a3eSHari Bathini 					  const struct crash_mem *emem)
304b8e55a3eSHari Bathini {
305b8e55a3eSHari Bathini 	int i, ret = 0, err = -EADDRNOTAVAIL;
306b8e55a3eSHari Bathini 	u64 start, end, tmin, tmax;
307b8e55a3eSHari Bathini 
308b8e55a3eSHari Bathini 	tmax = buf_max;
309b8e55a3eSHari Bathini 	for (i = (emem->nr_ranges - 1); i >= 0; i--) {
310b8e55a3eSHari Bathini 		start = emem->ranges[i].start;
311b8e55a3eSHari Bathini 		end = emem->ranges[i].end;
312b8e55a3eSHari Bathini 
313b8e55a3eSHari Bathini 		if (start > tmax)
314b8e55a3eSHari Bathini 			continue;
315b8e55a3eSHari Bathini 
316b8e55a3eSHari Bathini 		if (end < tmax) {
317b8e55a3eSHari Bathini 			tmin = (end < buf_min ? buf_min : end + 1);
318b8e55a3eSHari Bathini 			ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
319b8e55a3eSHari Bathini 			if (!ret)
320b8e55a3eSHari Bathini 				return 0;
321b8e55a3eSHari Bathini 		}
322b8e55a3eSHari Bathini 
323b8e55a3eSHari Bathini 		tmax = start - 1;
324b8e55a3eSHari Bathini 
325b8e55a3eSHari Bathini 		if (tmax < buf_min) {
326b8e55a3eSHari Bathini 			ret = err;
327b8e55a3eSHari Bathini 			break;
328b8e55a3eSHari Bathini 		}
329b8e55a3eSHari Bathini 		ret = 0;
330b8e55a3eSHari Bathini 	}
331b8e55a3eSHari Bathini 
332b8e55a3eSHari Bathini 	if (!ret) {
333b8e55a3eSHari Bathini 		tmin = buf_min;
334b8e55a3eSHari Bathini 		ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
335b8e55a3eSHari Bathini 	}
336b8e55a3eSHari Bathini 	return ret;
337b8e55a3eSHari Bathini }
338b8e55a3eSHari Bathini 
339b8e55a3eSHari Bathini /**
340b8e55a3eSHari Bathini  * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
341b8e55a3eSHari Bathini  *                               in the memory regions between buf_min & buf_max
342b8e55a3eSHari Bathini  *                               for the buffer. If found, sets kbuf->mem.
343b8e55a3eSHari Bathini  * @kbuf:                        Buffer contents and memory parameters.
344b8e55a3eSHari Bathini  * @buf_min:                     Minimum address for the buffer.
345b8e55a3eSHari Bathini  * @buf_max:                     Maximum address for the buffer.
346b8e55a3eSHari Bathini  *
347b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
348b8e55a3eSHari Bathini  */
__locate_mem_hole_bottom_up(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max)349b8e55a3eSHari Bathini static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
350b8e55a3eSHari Bathini 				       u64 buf_min, u64 buf_max)
351b8e55a3eSHari Bathini {
352b8e55a3eSHari Bathini 	int ret = -EADDRNOTAVAIL;
353b8e55a3eSHari Bathini 	phys_addr_t start, end;
354b8e55a3eSHari Bathini 	u64 i;
355b8e55a3eSHari Bathini 
3566e245ad4SMike Rapoport 	for_each_mem_range(i, &start, &end) {
357b8e55a3eSHari Bathini 		/*
358b8e55a3eSHari Bathini 		 * memblock uses [start, end) convention while it is
359b8e55a3eSHari Bathini 		 * [start, end] here. Fix the off-by-one to have the
360b8e55a3eSHari Bathini 		 * same convention.
361b8e55a3eSHari Bathini 		 */
362b8e55a3eSHari Bathini 		end -= 1;
363b8e55a3eSHari Bathini 
364b8e55a3eSHari Bathini 		if (end < buf_min)
365b8e55a3eSHari Bathini 			continue;
366b8e55a3eSHari Bathini 
367b8e55a3eSHari Bathini 		/* Memory hole not found */
368b8e55a3eSHari Bathini 		if (start > buf_max)
369b8e55a3eSHari Bathini 			break;
370b8e55a3eSHari Bathini 
371b8e55a3eSHari Bathini 		/* Adjust memory region based on the given range */
372b8e55a3eSHari Bathini 		if (start < buf_min)
373b8e55a3eSHari Bathini 			start = buf_min;
374b8e55a3eSHari Bathini 		if (end > buf_max)
375b8e55a3eSHari Bathini 			end = buf_max;
376b8e55a3eSHari Bathini 
377b8e55a3eSHari Bathini 		start = ALIGN(start, kbuf->buf_align);
378b8e55a3eSHari Bathini 		if (start < end && (end - start + 1) >= kbuf->memsz) {
379b8e55a3eSHari Bathini 			/* Suitable memory range found. Set kbuf->mem */
380b8e55a3eSHari Bathini 			kbuf->mem = start;
381b8e55a3eSHari Bathini 			ret = 0;
382b8e55a3eSHari Bathini 			break;
383b8e55a3eSHari Bathini 		}
384b8e55a3eSHari Bathini 	}
385b8e55a3eSHari Bathini 
386b8e55a3eSHari Bathini 	return ret;
387b8e55a3eSHari Bathini }
388b8e55a3eSHari Bathini 
389b8e55a3eSHari Bathini /**
390b8e55a3eSHari Bathini  * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
391b8e55a3eSHari Bathini  *                                   suitable buffer with bottom up approach.
392b8e55a3eSHari Bathini  * @kbuf:                            Buffer contents and memory parameters.
393b8e55a3eSHari Bathini  * @buf_min:                         Minimum address for the buffer.
394b8e55a3eSHari Bathini  * @buf_max:                         Maximum address for the buffer.
395b8e55a3eSHari Bathini  * @emem:                            Exclude memory ranges.
396b8e55a3eSHari Bathini  *
397b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
398b8e55a3eSHari Bathini  */
locate_mem_hole_bottom_up_ppc64(struct kexec_buf * kbuf,u64 buf_min,u64 buf_max,const struct crash_mem * emem)399b8e55a3eSHari Bathini static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
400b8e55a3eSHari Bathini 					   u64 buf_min, u64 buf_max,
401b8e55a3eSHari Bathini 					   const struct crash_mem *emem)
402b8e55a3eSHari Bathini {
403b8e55a3eSHari Bathini 	int i, ret = 0, err = -EADDRNOTAVAIL;
404b8e55a3eSHari Bathini 	u64 start, end, tmin, tmax;
405b8e55a3eSHari Bathini 
406b8e55a3eSHari Bathini 	tmin = buf_min;
407b8e55a3eSHari Bathini 	for (i = 0; i < emem->nr_ranges; i++) {
408b8e55a3eSHari Bathini 		start = emem->ranges[i].start;
409b8e55a3eSHari Bathini 		end = emem->ranges[i].end;
410b8e55a3eSHari Bathini 
411b8e55a3eSHari Bathini 		if (end < tmin)
412b8e55a3eSHari Bathini 			continue;
413b8e55a3eSHari Bathini 
414b8e55a3eSHari Bathini 		if (start > tmin) {
415b8e55a3eSHari Bathini 			tmax = (start > buf_max ? buf_max : start - 1);
416b8e55a3eSHari Bathini 			ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
417b8e55a3eSHari Bathini 			if (!ret)
418b8e55a3eSHari Bathini 				return 0;
419b8e55a3eSHari Bathini 		}
420b8e55a3eSHari Bathini 
421b8e55a3eSHari Bathini 		tmin = end + 1;
422b8e55a3eSHari Bathini 
423b8e55a3eSHari Bathini 		if (tmin > buf_max) {
424b8e55a3eSHari Bathini 			ret = err;
425b8e55a3eSHari Bathini 			break;
426b8e55a3eSHari Bathini 		}
427b8e55a3eSHari Bathini 		ret = 0;
428b8e55a3eSHari Bathini 	}
429b8e55a3eSHari Bathini 
430b8e55a3eSHari Bathini 	if (!ret) {
431b8e55a3eSHari Bathini 		tmax = buf_max;
432b8e55a3eSHari Bathini 		ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
433b8e55a3eSHari Bathini 	}
434b8e55a3eSHari Bathini 	return ret;
435b8e55a3eSHari Bathini }
436b8e55a3eSHari Bathini 
437b8e55a3eSHari Bathini /**
4387c64e21aSHari Bathini  * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
4397c64e21aSHari Bathini  * @um_info:                  Usable memory buffer and ranges info.
4407c64e21aSHari Bathini  * @cnt:                      No. of entries to accommodate.
4417c64e21aSHari Bathini  *
4427c64e21aSHari Bathini  * Frees up the old buffer if memory reallocation fails.
4437c64e21aSHari Bathini  *
4447c64e21aSHari Bathini  * Returns buffer on success, NULL on error.
4457c64e21aSHari Bathini  */
check_realloc_usable_mem(struct umem_info * um_info,int cnt)4467c64e21aSHari Bathini static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
4477c64e21aSHari Bathini {
4487c64e21aSHari Bathini 	u32 new_size;
4497c64e21aSHari Bathini 	u64 *tbuf;
4507c64e21aSHari Bathini 
4517c64e21aSHari Bathini 	if ((um_info->idx + cnt) <= um_info->max_entries)
4527c64e21aSHari Bathini 		return um_info->buf;
4537c64e21aSHari Bathini 
4547c64e21aSHari Bathini 	new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
4557c64e21aSHari Bathini 	tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
4567c64e21aSHari Bathini 	if (tbuf) {
4577c64e21aSHari Bathini 		um_info->buf = tbuf;
4587c64e21aSHari Bathini 		um_info->size = new_size;
4597c64e21aSHari Bathini 		um_info->max_entries = (um_info->size / sizeof(u64));
4607c64e21aSHari Bathini 	}
4617c64e21aSHari Bathini 
4627c64e21aSHari Bathini 	return tbuf;
4637c64e21aSHari Bathini }
4647c64e21aSHari Bathini 
4657c64e21aSHari Bathini /**
4667c64e21aSHari Bathini  * add_usable_mem - Add the usable memory ranges within the given memory range
4677c64e21aSHari Bathini  *                  to the buffer
4687c64e21aSHari Bathini  * @um_info:        Usable memory buffer and ranges info.
4697c64e21aSHari Bathini  * @base:           Base address of memory range to look for.
4707c64e21aSHari Bathini  * @end:            End address of memory range to look for.
4717c64e21aSHari Bathini  *
4727c64e21aSHari Bathini  * Returns 0 on success, negative errno on error.
4737c64e21aSHari Bathini  */
add_usable_mem(struct umem_info * um_info,u64 base,u64 end)4747c64e21aSHari Bathini static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
4757c64e21aSHari Bathini {
4767c64e21aSHari Bathini 	u64 loc_base, loc_end;
4777c64e21aSHari Bathini 	bool add;
4787c64e21aSHari Bathini 	int i;
4797c64e21aSHari Bathini 
4807c64e21aSHari Bathini 	for (i = 0; i < um_info->nr_ranges; i++) {
4817c64e21aSHari Bathini 		add = false;
4827c64e21aSHari Bathini 		loc_base = um_info->ranges[i].start;
4837c64e21aSHari Bathini 		loc_end = um_info->ranges[i].end;
4847c64e21aSHari Bathini 		if (loc_base >= base && loc_end <= end)
4857c64e21aSHari Bathini 			add = true;
4867c64e21aSHari Bathini 		else if (base < loc_end && end > loc_base) {
4877c64e21aSHari Bathini 			if (loc_base < base)
4887c64e21aSHari Bathini 				loc_base = base;
4897c64e21aSHari Bathini 			if (loc_end > end)
4907c64e21aSHari Bathini 				loc_end = end;
4917c64e21aSHari Bathini 			add = true;
4927c64e21aSHari Bathini 		}
4937c64e21aSHari Bathini 
4947c64e21aSHari Bathini 		if (add) {
4957c64e21aSHari Bathini 			if (!check_realloc_usable_mem(um_info, 2))
4967c64e21aSHari Bathini 				return -ENOMEM;
4977c64e21aSHari Bathini 
4987c64e21aSHari Bathini 			um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
4997c64e21aSHari Bathini 			um_info->buf[um_info->idx++] =
5007c64e21aSHari Bathini 					cpu_to_be64(loc_end - loc_base + 1);
5017c64e21aSHari Bathini 		}
5027c64e21aSHari Bathini 	}
5037c64e21aSHari Bathini 
5047c64e21aSHari Bathini 	return 0;
5057c64e21aSHari Bathini }
5067c64e21aSHari Bathini 
5077c64e21aSHari Bathini /**
5087c64e21aSHari Bathini  * kdump_setup_usable_lmb - This is a callback function that gets called by
5097c64e21aSHari Bathini  *                          walk_drmem_lmbs for every LMB to set its
5107c64e21aSHari Bathini  *                          usable memory ranges.
5117c64e21aSHari Bathini  * @lmb:                    LMB info.
5127c64e21aSHari Bathini  * @usm:                    linux,drconf-usable-memory property value.
5137c64e21aSHari Bathini  * @data:                   Pointer to usable memory buffer and ranges info.
5147c64e21aSHari Bathini  *
5157c64e21aSHari Bathini  * Returns 0 on success, negative errno on error.
5167c64e21aSHari Bathini  */
kdump_setup_usable_lmb(struct drmem_lmb * lmb,const __be32 ** usm,void * data)5177c64e21aSHari Bathini static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
5187c64e21aSHari Bathini 				  void *data)
5197c64e21aSHari Bathini {
5207c64e21aSHari Bathini 	struct umem_info *um_info;
5217c64e21aSHari Bathini 	int tmp_idx, ret;
5227c64e21aSHari Bathini 	u64 base, end;
5237c64e21aSHari Bathini 
5247c64e21aSHari Bathini 	/*
5257c64e21aSHari Bathini 	 * kdump load isn't supported on kernels already booted with
5267c64e21aSHari Bathini 	 * linux,drconf-usable-memory property.
5277c64e21aSHari Bathini 	 */
5287c64e21aSHari Bathini 	if (*usm) {
5297c64e21aSHari Bathini 		pr_err("linux,drconf-usable-memory property already exists!");
5307c64e21aSHari Bathini 		return -EINVAL;
5317c64e21aSHari Bathini 	}
5327c64e21aSHari Bathini 
5337c64e21aSHari Bathini 	um_info = data;
5347c64e21aSHari Bathini 	tmp_idx = um_info->idx;
5357c64e21aSHari Bathini 	if (!check_realloc_usable_mem(um_info, 1))
5367c64e21aSHari Bathini 		return -ENOMEM;
5377c64e21aSHari Bathini 
5387c64e21aSHari Bathini 	um_info->idx++;
5397c64e21aSHari Bathini 	base = lmb->base_addr;
5407c64e21aSHari Bathini 	end = base + drmem_lmb_size() - 1;
5417c64e21aSHari Bathini 	ret = add_usable_mem(um_info, base, end);
5427c64e21aSHari Bathini 	if (!ret) {
5437c64e21aSHari Bathini 		/*
5447c64e21aSHari Bathini 		 * Update the no. of ranges added. Two entries (base & size)
5457c64e21aSHari Bathini 		 * for every range added.
5467c64e21aSHari Bathini 		 */
5477c64e21aSHari Bathini 		um_info->buf[tmp_idx] =
5487c64e21aSHari Bathini 				cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
5497c64e21aSHari Bathini 	}
5507c64e21aSHari Bathini 
5517c64e21aSHari Bathini 	return ret;
5527c64e21aSHari Bathini }
5537c64e21aSHari Bathini 
5547c64e21aSHari Bathini #define NODE_PATH_LEN		256
5557c64e21aSHari Bathini /**
5567c64e21aSHari Bathini  * add_usable_mem_property - Add usable memory property for the given
5577c64e21aSHari Bathini  *                           memory node.
5587c64e21aSHari Bathini  * @fdt:                     Flattened device tree for the kdump kernel.
5597c64e21aSHari Bathini  * @dn:                      Memory node.
5607c64e21aSHari Bathini  * @um_info:                 Usable memory buffer and ranges info.
5617c64e21aSHari Bathini  *
5627c64e21aSHari Bathini  * Returns 0 on success, negative errno on error.
5637c64e21aSHari Bathini  */
add_usable_mem_property(void * fdt,struct device_node * dn,struct umem_info * um_info)5647c64e21aSHari Bathini static int add_usable_mem_property(void *fdt, struct device_node *dn,
5657c64e21aSHari Bathini 				   struct umem_info *um_info)
5667c64e21aSHari Bathini {
5677c64e21aSHari Bathini 	int n_mem_addr_cells, n_mem_size_cells, node;
5687c64e21aSHari Bathini 	char path[NODE_PATH_LEN];
5697c64e21aSHari Bathini 	int i, len, ranges, ret;
5707c64e21aSHari Bathini 	const __be32 *prop;
5717c64e21aSHari Bathini 	u64 base, end;
5727c64e21aSHari Bathini 
5737c64e21aSHari Bathini 	of_node_get(dn);
5747c64e21aSHari Bathini 
5757c64e21aSHari Bathini 	if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
5767c64e21aSHari Bathini 		pr_err("Buffer (%d) too small for memory node: %pOF\n",
5777c64e21aSHari Bathini 		       NODE_PATH_LEN, dn);
5787c64e21aSHari Bathini 		return -EOVERFLOW;
5797c64e21aSHari Bathini 	}
5807c64e21aSHari Bathini 	pr_debug("Memory node path: %s\n", path);
5817c64e21aSHari Bathini 
5827c64e21aSHari Bathini 	/* Now that we know the path, find its offset in kdump kernel's fdt */
5837c64e21aSHari Bathini 	node = fdt_path_offset(fdt, path);
5847c64e21aSHari Bathini 	if (node < 0) {
5857c64e21aSHari Bathini 		pr_err("Malformed device tree: error reading %s\n", path);
5867c64e21aSHari Bathini 		ret = -EINVAL;
5877c64e21aSHari Bathini 		goto out;
5887c64e21aSHari Bathini 	}
5897c64e21aSHari Bathini 
5907c64e21aSHari Bathini 	/* Get the address & size cells */
5917c64e21aSHari Bathini 	n_mem_addr_cells = of_n_addr_cells(dn);
5927c64e21aSHari Bathini 	n_mem_size_cells = of_n_size_cells(dn);
5937c64e21aSHari Bathini 	pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
5947c64e21aSHari Bathini 		 n_mem_size_cells);
5957c64e21aSHari Bathini 
5967c64e21aSHari Bathini 	um_info->idx  = 0;
5977c64e21aSHari Bathini 	if (!check_realloc_usable_mem(um_info, 2)) {
5987c64e21aSHari Bathini 		ret = -ENOMEM;
5997c64e21aSHari Bathini 		goto out;
6007c64e21aSHari Bathini 	}
6017c64e21aSHari Bathini 
6027c64e21aSHari Bathini 	prop = of_get_property(dn, "reg", &len);
6037c64e21aSHari Bathini 	if (!prop || len <= 0) {
6047c64e21aSHari Bathini 		ret = 0;
6057c64e21aSHari Bathini 		goto out;
6067c64e21aSHari Bathini 	}
6077c64e21aSHari Bathini 
6087c64e21aSHari Bathini 	/*
6097c64e21aSHari Bathini 	 * "reg" property represents sequence of (addr,size) tuples
6107c64e21aSHari Bathini 	 * each representing a memory range.
6117c64e21aSHari Bathini 	 */
6127c64e21aSHari Bathini 	ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
6137c64e21aSHari Bathini 
6147c64e21aSHari Bathini 	for (i = 0; i < ranges; i++) {
6157c64e21aSHari Bathini 		base = of_read_number(prop, n_mem_addr_cells);
6167c64e21aSHari Bathini 		prop += n_mem_addr_cells;
6177c64e21aSHari Bathini 		end = base + of_read_number(prop, n_mem_size_cells) - 1;
6187c64e21aSHari Bathini 		prop += n_mem_size_cells;
6197c64e21aSHari Bathini 
6207c64e21aSHari Bathini 		ret = add_usable_mem(um_info, base, end);
6217c64e21aSHari Bathini 		if (ret)
6227c64e21aSHari Bathini 			goto out;
6237c64e21aSHari Bathini 	}
6247c64e21aSHari Bathini 
6257c64e21aSHari Bathini 	/*
6267c64e21aSHari Bathini 	 * No kdump kernel usable memory found in this memory node.
6277c64e21aSHari Bathini 	 * Write (0,0) tuple in linux,usable-memory property for
6287c64e21aSHari Bathini 	 * this region to be ignored.
6297c64e21aSHari Bathini 	 */
6307c64e21aSHari Bathini 	if (um_info->idx == 0) {
6317c64e21aSHari Bathini 		um_info->buf[0] = 0;
6327c64e21aSHari Bathini 		um_info->buf[1] = 0;
6337c64e21aSHari Bathini 		um_info->idx = 2;
6347c64e21aSHari Bathini 	}
6357c64e21aSHari Bathini 
6367c64e21aSHari Bathini 	ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
6377c64e21aSHari Bathini 			  (um_info->idx * sizeof(u64)));
6387c64e21aSHari Bathini 
6397c64e21aSHari Bathini out:
6407c64e21aSHari Bathini 	of_node_put(dn);
6417c64e21aSHari Bathini 	return ret;
6427c64e21aSHari Bathini }
6437c64e21aSHari Bathini 
6447c64e21aSHari Bathini 
6457c64e21aSHari Bathini /**
6467c64e21aSHari Bathini  * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
6477c64e21aSHari Bathini  *                         and linux,drconf-usable-memory DT properties as
6487c64e21aSHari Bathini  *                         appropriate to restrict its memory usage.
6497c64e21aSHari Bathini  * @fdt:                   Flattened device tree for the kdump kernel.
6507c64e21aSHari Bathini  * @usable_mem:            Usable memory ranges for kdump kernel.
6517c64e21aSHari Bathini  *
6527c64e21aSHari Bathini  * Returns 0 on success, negative errno on error.
6537c64e21aSHari Bathini  */
update_usable_mem_fdt(void * fdt,struct crash_mem * usable_mem)6547c64e21aSHari Bathini static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
6557c64e21aSHari Bathini {
6567c64e21aSHari Bathini 	struct umem_info um_info;
6577c64e21aSHari Bathini 	struct device_node *dn;
6587c64e21aSHari Bathini 	int node, ret = 0;
6597c64e21aSHari Bathini 
6607c64e21aSHari Bathini 	if (!usable_mem) {
6617c64e21aSHari Bathini 		pr_err("Usable memory ranges for kdump kernel not found\n");
6627c64e21aSHari Bathini 		return -ENOENT;
6637c64e21aSHari Bathini 	}
6647c64e21aSHari Bathini 
6657c64e21aSHari Bathini 	node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
6667c64e21aSHari Bathini 	if (node == -FDT_ERR_NOTFOUND)
6677c64e21aSHari Bathini 		pr_debug("No dynamic reconfiguration memory found\n");
6687c64e21aSHari Bathini 	else if (node < 0) {
6697c64e21aSHari Bathini 		pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
6707c64e21aSHari Bathini 		return -EINVAL;
6717c64e21aSHari Bathini 	}
6727c64e21aSHari Bathini 
6737c64e21aSHari Bathini 	um_info.buf  = NULL;
6747c64e21aSHari Bathini 	um_info.size = 0;
6757c64e21aSHari Bathini 	um_info.max_entries = 0;
6767c64e21aSHari Bathini 	um_info.idx  = 0;
6777c64e21aSHari Bathini 	/* Memory ranges to look up */
6787c64e21aSHari Bathini 	um_info.ranges = &(usable_mem->ranges[0]);
6797c64e21aSHari Bathini 	um_info.nr_ranges = usable_mem->nr_ranges;
6807c64e21aSHari Bathini 
6817c64e21aSHari Bathini 	dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
6827c64e21aSHari Bathini 	if (dn) {
6837c64e21aSHari Bathini 		ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
6847c64e21aSHari Bathini 		of_node_put(dn);
6857c64e21aSHari Bathini 
6867c64e21aSHari Bathini 		if (ret) {
6877c64e21aSHari Bathini 			pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
6887c64e21aSHari Bathini 			goto out;
6897c64e21aSHari Bathini 		}
6907c64e21aSHari Bathini 
6917c64e21aSHari Bathini 		ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
6927c64e21aSHari Bathini 				  um_info.buf, (um_info.idx * sizeof(u64)));
6937c64e21aSHari Bathini 		if (ret) {
694b0ae5b6fSSourabh Jain 			pr_err("Failed to update fdt with linux,drconf-usable-memory property: %s",
695b0ae5b6fSSourabh Jain 			       fdt_strerror(ret));
6967c64e21aSHari Bathini 			goto out;
6977c64e21aSHari Bathini 		}
6987c64e21aSHari Bathini 	}
6997c64e21aSHari Bathini 
7007c64e21aSHari Bathini 	/*
7017c64e21aSHari Bathini 	 * Walk through each memory node and set linux,usable-memory property
7027c64e21aSHari Bathini 	 * for the corresponding node in kdump kernel's fdt.
7037c64e21aSHari Bathini 	 */
7047c64e21aSHari Bathini 	for_each_node_by_type(dn, "memory") {
7057c64e21aSHari Bathini 		ret = add_usable_mem_property(fdt, dn, &um_info);
7067c64e21aSHari Bathini 		if (ret) {
7077c64e21aSHari Bathini 			pr_err("Failed to set linux,usable-memory property for %s node",
7087c64e21aSHari Bathini 			       dn->full_name);
7097453f501SWan Jiabing 			of_node_put(dn);
7107c64e21aSHari Bathini 			goto out;
7117c64e21aSHari Bathini 		}
7127c64e21aSHari Bathini 	}
7137c64e21aSHari Bathini 
7147c64e21aSHari Bathini out:
7157c64e21aSHari Bathini 	kfree(um_info.buf);
7167c64e21aSHari Bathini 	return ret;
7177c64e21aSHari Bathini }
7187c64e21aSHari Bathini 
7197c64e21aSHari Bathini /**
7201a1cf93cSHari Bathini  * load_backup_segment - Locate a memory hole to place the backup region.
7211a1cf93cSHari Bathini  * @image:               Kexec image.
7221a1cf93cSHari Bathini  * @kbuf:                Buffer contents and memory parameters.
7231a1cf93cSHari Bathini  *
7241a1cf93cSHari Bathini  * Returns 0 on success, negative errno on error.
7251a1cf93cSHari Bathini  */
load_backup_segment(struct kimage * image,struct kexec_buf * kbuf)7261a1cf93cSHari Bathini static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
7271a1cf93cSHari Bathini {
7281a1cf93cSHari Bathini 	void *buf;
7291a1cf93cSHari Bathini 	int ret;
7301a1cf93cSHari Bathini 
7311a1cf93cSHari Bathini 	/*
7321a1cf93cSHari Bathini 	 * Setup a source buffer for backup segment.
7331a1cf93cSHari Bathini 	 *
7341a1cf93cSHari Bathini 	 * A source buffer has no meaning for backup region as data will
7351a1cf93cSHari Bathini 	 * be copied from backup source, after crash, in the purgatory.
7361a1cf93cSHari Bathini 	 * But as load segment code doesn't recognize such segments,
7371a1cf93cSHari Bathini 	 * setup a dummy source buffer to keep it happy for now.
7381a1cf93cSHari Bathini 	 */
7391a1cf93cSHari Bathini 	buf = vzalloc(BACKUP_SRC_SIZE);
7401a1cf93cSHari Bathini 	if (!buf)
7411a1cf93cSHari Bathini 		return -ENOMEM;
7421a1cf93cSHari Bathini 
7431a1cf93cSHari Bathini 	kbuf->buffer = buf;
7441a1cf93cSHari Bathini 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
7451a1cf93cSHari Bathini 	kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
7461a1cf93cSHari Bathini 	kbuf->top_down = false;
7471a1cf93cSHari Bathini 
7481a1cf93cSHari Bathini 	ret = kexec_add_buffer(kbuf);
7491a1cf93cSHari Bathini 	if (ret) {
7501a1cf93cSHari Bathini 		vfree(buf);
7511a1cf93cSHari Bathini 		return ret;
7521a1cf93cSHari Bathini 	}
7531a1cf93cSHari Bathini 
7541a1cf93cSHari Bathini 	image->arch.backup_buf = buf;
7551a1cf93cSHari Bathini 	image->arch.backup_start = kbuf->mem;
7561a1cf93cSHari Bathini 	return 0;
7571a1cf93cSHari Bathini }
7581a1cf93cSHari Bathini 
7591a1cf93cSHari Bathini /**
760cb350c1fSHari Bathini  * update_backup_region_phdr - Update backup region's offset for the core to
761cb350c1fSHari Bathini  *                             export the region appropriately.
762cb350c1fSHari Bathini  * @image:                     Kexec image.
763cb350c1fSHari Bathini  * @ehdr:                      ELF core header.
764cb350c1fSHari Bathini  *
765cb350c1fSHari Bathini  * Assumes an exclusive program header is setup for the backup region
766cb350c1fSHari Bathini  * in the ELF headers
767cb350c1fSHari Bathini  *
768cb350c1fSHari Bathini  * Returns nothing.
769cb350c1fSHari Bathini  */
update_backup_region_phdr(struct kimage * image,Elf64_Ehdr * ehdr)770cb350c1fSHari Bathini static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
771cb350c1fSHari Bathini {
772cb350c1fSHari Bathini 	Elf64_Phdr *phdr;
773cb350c1fSHari Bathini 	unsigned int i;
774cb350c1fSHari Bathini 
775cb350c1fSHari Bathini 	phdr = (Elf64_Phdr *)(ehdr + 1);
776cb350c1fSHari Bathini 	for (i = 0; i < ehdr->e_phnum; i++) {
777cb350c1fSHari Bathini 		if (phdr->p_paddr == BACKUP_SRC_START) {
778cb350c1fSHari Bathini 			phdr->p_offset = image->arch.backup_start;
779cb350c1fSHari Bathini 			pr_debug("Backup region offset updated to 0x%lx\n",
780cb350c1fSHari Bathini 				 image->arch.backup_start);
781cb350c1fSHari Bathini 			return;
782cb350c1fSHari Bathini 		}
783cb350c1fSHari Bathini 	}
784cb350c1fSHari Bathini }
785cb350c1fSHari Bathini 
786cb350c1fSHari Bathini /**
787cb350c1fSHari Bathini  * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
788cb350c1fSHari Bathini  *                           segment needed to load kdump kernel.
789cb350c1fSHari Bathini  * @image:                   Kexec image.
790cb350c1fSHari Bathini  * @kbuf:                    Buffer contents and memory parameters.
791cb350c1fSHari Bathini  *
792cb350c1fSHari Bathini  * Returns 0 on success, negative errno on error.
793cb350c1fSHari Bathini  */
load_elfcorehdr_segment(struct kimage * image,struct kexec_buf * kbuf)794cb350c1fSHari Bathini static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
795cb350c1fSHari Bathini {
796cb350c1fSHari Bathini 	struct crash_mem *cmem = NULL;
797cb350c1fSHari Bathini 	unsigned long headers_sz;
798cb350c1fSHari Bathini 	void *headers = NULL;
799cb350c1fSHari Bathini 	int ret;
800cb350c1fSHari Bathini 
801cb350c1fSHari Bathini 	ret = get_crash_memory_ranges(&cmem);
802cb350c1fSHari Bathini 	if (ret)
803cb350c1fSHari Bathini 		goto out;
804cb350c1fSHari Bathini 
805cb350c1fSHari Bathini 	/* Setup elfcorehdr segment */
806cb350c1fSHari Bathini 	ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
807cb350c1fSHari Bathini 	if (ret) {
808cb350c1fSHari Bathini 		pr_err("Failed to prepare elf headers for the core\n");
809cb350c1fSHari Bathini 		goto out;
810cb350c1fSHari Bathini 	}
811cb350c1fSHari Bathini 
812cb350c1fSHari Bathini 	/* Fix the offset for backup region in the ELF header */
813cb350c1fSHari Bathini 	update_backup_region_phdr(image, headers);
814cb350c1fSHari Bathini 
815cb350c1fSHari Bathini 	kbuf->buffer = headers;
816cb350c1fSHari Bathini 	kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
817cb350c1fSHari Bathini 	kbuf->bufsz = kbuf->memsz = headers_sz;
818cb350c1fSHari Bathini 	kbuf->top_down = false;
819cb350c1fSHari Bathini 
820cb350c1fSHari Bathini 	ret = kexec_add_buffer(kbuf);
821cb350c1fSHari Bathini 	if (ret) {
822cb350c1fSHari Bathini 		vfree(headers);
823cb350c1fSHari Bathini 		goto out;
824cb350c1fSHari Bathini 	}
825cb350c1fSHari Bathini 
826e6635babSLakshmi Ramasubramanian 	image->elf_load_addr = kbuf->mem;
827e6635babSLakshmi Ramasubramanian 	image->elf_headers_sz = headers_sz;
828e6635babSLakshmi Ramasubramanian 	image->elf_headers = headers;
829cb350c1fSHari Bathini out:
830cb350c1fSHari Bathini 	kfree(cmem);
831cb350c1fSHari Bathini 	return ret;
832cb350c1fSHari Bathini }
833cb350c1fSHari Bathini 
834cb350c1fSHari Bathini /**
8351a1cf93cSHari Bathini  * load_crashdump_segments_ppc64 - Initialize the additional segements needed
8361a1cf93cSHari Bathini  *                                 to load kdump kernel.
8371a1cf93cSHari Bathini  * @image:                         Kexec image.
8381a1cf93cSHari Bathini  * @kbuf:                          Buffer contents and memory parameters.
8391a1cf93cSHari Bathini  *
8401a1cf93cSHari Bathini  * Returns 0 on success, negative errno on error.
8411a1cf93cSHari Bathini  */
load_crashdump_segments_ppc64(struct kimage * image,struct kexec_buf * kbuf)8421a1cf93cSHari Bathini int load_crashdump_segments_ppc64(struct kimage *image,
8431a1cf93cSHari Bathini 				  struct kexec_buf *kbuf)
8441a1cf93cSHari Bathini {
8451a1cf93cSHari Bathini 	int ret;
8461a1cf93cSHari Bathini 
8471a1cf93cSHari Bathini 	/* Load backup segment - first 64K bytes of the crashing kernel */
8481a1cf93cSHari Bathini 	ret = load_backup_segment(image, kbuf);
8491a1cf93cSHari Bathini 	if (ret) {
8501a1cf93cSHari Bathini 		pr_err("Failed to load backup segment\n");
8511a1cf93cSHari Bathini 		return ret;
8521a1cf93cSHari Bathini 	}
8531a1cf93cSHari Bathini 	pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
8541a1cf93cSHari Bathini 
855cb350c1fSHari Bathini 	/* Load elfcorehdr segment - to export crashing kernel's vmcore */
856cb350c1fSHari Bathini 	ret = load_elfcorehdr_segment(image, kbuf);
857cb350c1fSHari Bathini 	if (ret) {
858cb350c1fSHari Bathini 		pr_err("Failed to load elfcorehdr segment\n");
859cb350c1fSHari Bathini 		return ret;
860cb350c1fSHari Bathini 	}
861cb350c1fSHari Bathini 	pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
862e6635babSLakshmi Ramasubramanian 		 image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
863cb350c1fSHari Bathini 
8641a1cf93cSHari Bathini 	return 0;
8651a1cf93cSHari Bathini }
8661a1cf93cSHari Bathini 
8671a1cf93cSHari Bathini /**
86819031275SHari Bathini  * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
86919031275SHari Bathini  *                         variables and call setup_purgatory() to initialize
87019031275SHari Bathini  *                         common global variable.
87119031275SHari Bathini  * @image:                 kexec image.
87219031275SHari Bathini  * @slave_code:            Slave code for the purgatory.
87319031275SHari Bathini  * @fdt:                   Flattened device tree for the next kernel.
87419031275SHari Bathini  * @kernel_load_addr:      Address where the kernel is loaded.
87519031275SHari Bathini  * @fdt_load_addr:         Address where the flattened device tree is loaded.
87619031275SHari Bathini  *
87719031275SHari Bathini  * Returns 0 on success, negative errno on error.
87819031275SHari Bathini  */
setup_purgatory_ppc64(struct kimage * image,const void * slave_code,const void * fdt,unsigned long kernel_load_addr,unsigned long fdt_load_addr)87919031275SHari Bathini int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
88019031275SHari Bathini 			  const void *fdt, unsigned long kernel_load_addr,
88119031275SHari Bathini 			  unsigned long fdt_load_addr)
88219031275SHari Bathini {
8832e6bd221SHari Bathini 	struct device_node *dn = NULL;
88419031275SHari Bathini 	int ret;
88519031275SHari Bathini 
88619031275SHari Bathini 	ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
88719031275SHari Bathini 			      fdt_load_addr);
88819031275SHari Bathini 	if (ret)
8897c64e21aSHari Bathini 		goto out;
8907c64e21aSHari Bathini 
8917c64e21aSHari Bathini 	if (image->type == KEXEC_TYPE_CRASH) {
8927c64e21aSHari Bathini 		u32 my_run_at_load = 1;
8937c64e21aSHari Bathini 
8947c64e21aSHari Bathini 		/*
8957c64e21aSHari Bathini 		 * Tell relocatable kernel to run at load address
8967c64e21aSHari Bathini 		 * via the word meant for that at 0x5c.
8977c64e21aSHari Bathini 		 */
8987c64e21aSHari Bathini 		ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
8997c64e21aSHari Bathini 						     &my_run_at_load,
9007c64e21aSHari Bathini 						     sizeof(my_run_at_load),
9017c64e21aSHari Bathini 						     false);
9027c64e21aSHari Bathini 		if (ret)
9037c64e21aSHari Bathini 			goto out;
9047c64e21aSHari Bathini 	}
9057c64e21aSHari Bathini 
9061a1cf93cSHari Bathini 	/* Tell purgatory where to look for backup region */
9071a1cf93cSHari Bathini 	ret = kexec_purgatory_get_set_symbol(image, "backup_start",
9081a1cf93cSHari Bathini 					     &image->arch.backup_start,
9091a1cf93cSHari Bathini 					     sizeof(image->arch.backup_start),
9101a1cf93cSHari Bathini 					     false);
9112e6bd221SHari Bathini 	if (ret)
9122e6bd221SHari Bathini 		goto out;
9132e6bd221SHari Bathini 
9142e6bd221SHari Bathini 	/* Setup OPAL base & entry values */
9152e6bd221SHari Bathini 	dn = of_find_node_by_path("/ibm,opal");
9162e6bd221SHari Bathini 	if (dn) {
9172e6bd221SHari Bathini 		u64 val;
9182e6bd221SHari Bathini 
919*1dd2d563SZhang Zekun 		ret = of_property_read_u64(dn, "opal-base-address", &val);
920*1dd2d563SZhang Zekun 		if (ret)
921*1dd2d563SZhang Zekun 			goto out;
922*1dd2d563SZhang Zekun 
9232e6bd221SHari Bathini 		ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
9242e6bd221SHari Bathini 						     sizeof(val), false);
9252e6bd221SHari Bathini 		if (ret)
9262e6bd221SHari Bathini 			goto out;
9272e6bd221SHari Bathini 
928*1dd2d563SZhang Zekun 		ret = of_property_read_u64(dn, "opal-entry-address", &val);
929*1dd2d563SZhang Zekun 		if (ret)
930*1dd2d563SZhang Zekun 			goto out;
9312e6bd221SHari Bathini 		ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
9322e6bd221SHari Bathini 						     sizeof(val), false);
9332e6bd221SHari Bathini 	}
9347c64e21aSHari Bathini out:
9357c64e21aSHari Bathini 	if (ret)
93619031275SHari Bathini 		pr_err("Failed to setup purgatory symbols");
9372e6bd221SHari Bathini 	of_node_put(dn);
93819031275SHari Bathini 	return ret;
93919031275SHari Bathini }
94019031275SHari Bathini 
94119031275SHari Bathini /**
9427f965394SLaurent Dufour  * cpu_node_size - Compute the size of a CPU node in the FDT.
943340a4a9fSLaurent Dufour  *                 This should be done only once and the value is stored in
944340a4a9fSLaurent Dufour  *                 a static variable.
945340a4a9fSLaurent Dufour  * Returns the max size of a CPU node in the FDT.
946340a4a9fSLaurent Dufour  */
cpu_node_size(void)947340a4a9fSLaurent Dufour static unsigned int cpu_node_size(void)
948340a4a9fSLaurent Dufour {
949340a4a9fSLaurent Dufour 	static unsigned int size;
950340a4a9fSLaurent Dufour 	struct device_node *dn;
951340a4a9fSLaurent Dufour 	struct property *pp;
952340a4a9fSLaurent Dufour 
953340a4a9fSLaurent Dufour 	/*
954340a4a9fSLaurent Dufour 	 * Don't compute it twice, we are assuming that the per CPU node size
955340a4a9fSLaurent Dufour 	 * doesn't change during the system's life.
956340a4a9fSLaurent Dufour 	 */
957340a4a9fSLaurent Dufour 	if (size)
958340a4a9fSLaurent Dufour 		return size;
959340a4a9fSLaurent Dufour 
960340a4a9fSLaurent Dufour 	dn = of_find_node_by_type(NULL, "cpu");
961340a4a9fSLaurent Dufour 	if (WARN_ON_ONCE(!dn)) {
962340a4a9fSLaurent Dufour 		// Unlikely to happen
963340a4a9fSLaurent Dufour 		return 0;
964340a4a9fSLaurent Dufour 	}
965340a4a9fSLaurent Dufour 
966340a4a9fSLaurent Dufour 	/*
967340a4a9fSLaurent Dufour 	 * We compute the sub node size for a CPU node, assuming it
968340a4a9fSLaurent Dufour 	 * will be the same for all.
969340a4a9fSLaurent Dufour 	 */
970340a4a9fSLaurent Dufour 	size += strlen(dn->name) + 5;
971340a4a9fSLaurent Dufour 	for_each_property_of_node(dn, pp) {
972340a4a9fSLaurent Dufour 		size += strlen(pp->name);
973340a4a9fSLaurent Dufour 		size += pp->length;
974340a4a9fSLaurent Dufour 	}
975340a4a9fSLaurent Dufour 
976340a4a9fSLaurent Dufour 	of_node_put(dn);
977340a4a9fSLaurent Dufour 	return size;
978340a4a9fSLaurent Dufour }
979340a4a9fSLaurent Dufour 
980340a4a9fSLaurent Dufour /**
981886db323SThiago Jung Bauermann  * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
982886db323SThiago Jung Bauermann  *                              setup FDT for kexec/kdump kernel.
9832377c92eSHari Bathini  * @image:                      kexec image being loaded.
9842377c92eSHari Bathini  *
985886db323SThiago Jung Bauermann  * Returns the estimated extra size needed for kexec/kdump kernel FDT.
9862377c92eSHari Bathini  */
kexec_extra_fdt_size_ppc64(struct kimage * image)987886db323SThiago Jung Bauermann unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
9882377c92eSHari Bathini {
98991361b51SRussell Currey 	unsigned int cpu_nodes, extra_size = 0;
990340a4a9fSLaurent Dufour 	struct device_node *dn;
9912377c92eSHari Bathini 	u64 usm_entries;
9922377c92eSHari Bathini 
99391361b51SRussell Currey 	// Budget some space for the password blob. There's already extra space
99491361b51SRussell Currey 	// for the key name
99591361b51SRussell Currey 	if (plpks_is_available())
99691361b51SRussell Currey 		extra_size += (unsigned int)plpks_get_passwordlen();
99791361b51SRussell Currey 
9982377c92eSHari Bathini 	if (image->type != KEXEC_TYPE_CRASH)
99991361b51SRussell Currey 		return extra_size;
10002377c92eSHari Bathini 
10012377c92eSHari Bathini 	/*
1002886db323SThiago Jung Bauermann 	 * For kdump kernel, account for linux,usable-memory and
10032377c92eSHari Bathini 	 * linux,drconf-usable-memory properties. Get an approximate on the
10042377c92eSHari Bathini 	 * number of usable memory entries and use for FDT size estimation.
10052377c92eSHari Bathini 	 */
10067294194bSMichael Ellerman 	if (drmem_lmb_size()) {
1007fc546faaSSourabh Jain 		usm_entries = ((memory_hotplug_max() / drmem_lmb_size()) +
10082377c92eSHari Bathini 			       (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
100991361b51SRussell Currey 		extra_size += (unsigned int)(usm_entries * sizeof(u64));
10107294194bSMichael Ellerman 	}
1011340a4a9fSLaurent Dufour 
1012340a4a9fSLaurent Dufour 	/*
1013340a4a9fSLaurent Dufour 	 * Get the number of CPU nodes in the current DT. This allows to
1014340a4a9fSLaurent Dufour 	 * reserve places for CPU nodes added since the boot time.
1015340a4a9fSLaurent Dufour 	 */
1016340a4a9fSLaurent Dufour 	cpu_nodes = 0;
1017340a4a9fSLaurent Dufour 	for_each_node_by_type(dn, "cpu") {
1018340a4a9fSLaurent Dufour 		cpu_nodes++;
1019340a4a9fSLaurent Dufour 	}
1020340a4a9fSLaurent Dufour 
1021340a4a9fSLaurent Dufour 	if (cpu_nodes > boot_cpu_node_count)
1022340a4a9fSLaurent Dufour 		extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
1023340a4a9fSLaurent Dufour 
1024340a4a9fSLaurent Dufour 	return extra_size;
10252377c92eSHari Bathini }
10262377c92eSHari Bathini 
10272377c92eSHari Bathini /**
102840c75399SSourabh Jain  * add_node_props - Reads node properties from device node structure and add
102940c75399SSourabh Jain  *                  them to fdt.
103040c75399SSourabh Jain  * @fdt:            Flattened device tree of the kernel
103140c75399SSourabh Jain  * @node_offset:    offset of the node to add a property at
103240c75399SSourabh Jain  * @dn:             device node pointer
103340c75399SSourabh Jain  *
103440c75399SSourabh Jain  * Returns 0 on success, negative errno on error.
103540c75399SSourabh Jain  */
add_node_props(void * fdt,int node_offset,const struct device_node * dn)103640c75399SSourabh Jain static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
103740c75399SSourabh Jain {
103840c75399SSourabh Jain 	int ret = 0;
103940c75399SSourabh Jain 	struct property *pp;
104040c75399SSourabh Jain 
104140c75399SSourabh Jain 	if (!dn)
104240c75399SSourabh Jain 		return -EINVAL;
104340c75399SSourabh Jain 
104440c75399SSourabh Jain 	for_each_property_of_node(dn, pp) {
104540c75399SSourabh Jain 		ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
104640c75399SSourabh Jain 		if (ret < 0) {
104740c75399SSourabh Jain 			pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
104840c75399SSourabh Jain 			return ret;
104940c75399SSourabh Jain 		}
105040c75399SSourabh Jain 	}
105140c75399SSourabh Jain 	return ret;
105240c75399SSourabh Jain }
105340c75399SSourabh Jain 
105440c75399SSourabh Jain /**
105540c75399SSourabh Jain  * update_cpus_node - Update cpus node of flattened device tree using of_root
105640c75399SSourabh Jain  *                    device node.
105740c75399SSourabh Jain  * @fdt:              Flattened device tree of the kernel.
105840c75399SSourabh Jain  *
105940c75399SSourabh Jain  * Returns 0 on success, negative errno on error.
106040c75399SSourabh Jain  */
update_cpus_node(void * fdt)106140c75399SSourabh Jain static int update_cpus_node(void *fdt)
106240c75399SSourabh Jain {
106340c75399SSourabh Jain 	struct device_node *cpus_node, *dn;
106440c75399SSourabh Jain 	int cpus_offset, cpus_subnode_offset, ret = 0;
106540c75399SSourabh Jain 
106640c75399SSourabh Jain 	cpus_offset = fdt_path_offset(fdt, "/cpus");
106740c75399SSourabh Jain 	if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
106840c75399SSourabh Jain 		pr_err("Malformed device tree: error reading /cpus node: %s\n",
106940c75399SSourabh Jain 		       fdt_strerror(cpus_offset));
107040c75399SSourabh Jain 		return cpus_offset;
107140c75399SSourabh Jain 	}
107240c75399SSourabh Jain 
107340c75399SSourabh Jain 	if (cpus_offset > 0) {
107440c75399SSourabh Jain 		ret = fdt_del_node(fdt, cpus_offset);
107540c75399SSourabh Jain 		if (ret < 0) {
107640c75399SSourabh Jain 			pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
107740c75399SSourabh Jain 			return -EINVAL;
107840c75399SSourabh Jain 		}
107940c75399SSourabh Jain 	}
108040c75399SSourabh Jain 
108140c75399SSourabh Jain 	/* Add cpus node to fdt */
108240c75399SSourabh Jain 	cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
108340c75399SSourabh Jain 	if (cpus_offset < 0) {
108440c75399SSourabh Jain 		pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
108540c75399SSourabh Jain 		return -EINVAL;
108640c75399SSourabh Jain 	}
108740c75399SSourabh Jain 
108840c75399SSourabh Jain 	/* Add cpus node properties */
108940c75399SSourabh Jain 	cpus_node = of_find_node_by_path("/cpus");
109040c75399SSourabh Jain 	ret = add_node_props(fdt, cpus_offset, cpus_node);
109140c75399SSourabh Jain 	of_node_put(cpus_node);
109240c75399SSourabh Jain 	if (ret < 0)
109340c75399SSourabh Jain 		return ret;
109440c75399SSourabh Jain 
109540c75399SSourabh Jain 	/* Loop through all subnodes of cpus and add them to fdt */
109640c75399SSourabh Jain 	for_each_node_by_type(dn, "cpu") {
109740c75399SSourabh Jain 		cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
109840c75399SSourabh Jain 		if (cpus_subnode_offset < 0) {
109940c75399SSourabh Jain 			pr_err("Unable to add %s subnode: %s\n", dn->full_name,
110040c75399SSourabh Jain 			       fdt_strerror(cpus_subnode_offset));
110140c75399SSourabh Jain 			ret = cpus_subnode_offset;
110240c75399SSourabh Jain 			goto out;
110340c75399SSourabh Jain 		}
110440c75399SSourabh Jain 
110540c75399SSourabh Jain 		ret = add_node_props(fdt, cpus_subnode_offset, dn);
110640c75399SSourabh Jain 		if (ret < 0)
110740c75399SSourabh Jain 			goto out;
110840c75399SSourabh Jain 	}
110940c75399SSourabh Jain out:
111040c75399SSourabh Jain 	of_node_put(dn);
111140c75399SSourabh Jain 	return ret;
111240c75399SSourabh Jain }
111340c75399SSourabh Jain 
copy_property(void * fdt,int node_offset,const struct device_node * dn,const char * propname)1114b1fc44eaSAlexey Kardashevskiy static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
1115b1fc44eaSAlexey Kardashevskiy 			 const char *propname)
1116b1fc44eaSAlexey Kardashevskiy {
1117b1fc44eaSAlexey Kardashevskiy 	const void *prop, *fdtprop;
111883ee9f23SRussell Currey 	int len = 0, fdtlen = 0;
1119b1fc44eaSAlexey Kardashevskiy 
1120b1fc44eaSAlexey Kardashevskiy 	prop = of_get_property(dn, propname, &len);
1121b1fc44eaSAlexey Kardashevskiy 	fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
1122b1fc44eaSAlexey Kardashevskiy 
1123b1fc44eaSAlexey Kardashevskiy 	if (fdtprop && !prop)
112483ee9f23SRussell Currey 		return fdt_delprop(fdt, node_offset, propname);
1125b1fc44eaSAlexey Kardashevskiy 	else if (prop)
112683ee9f23SRussell Currey 		return fdt_setprop(fdt, node_offset, propname, prop, len);
112783ee9f23SRussell Currey 	else
112883ee9f23SRussell Currey 		return -FDT_ERR_NOTFOUND;
1129b1fc44eaSAlexey Kardashevskiy }
1130b1fc44eaSAlexey Kardashevskiy 
update_pci_dma_nodes(void * fdt,const char * dmapropname)1131b1fc44eaSAlexey Kardashevskiy static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
1132b1fc44eaSAlexey Kardashevskiy {
1133b1fc44eaSAlexey Kardashevskiy 	struct device_node *dn;
1134b1fc44eaSAlexey Kardashevskiy 	int pci_offset, root_offset, ret = 0;
1135b1fc44eaSAlexey Kardashevskiy 
1136b1fc44eaSAlexey Kardashevskiy 	if (!firmware_has_feature(FW_FEATURE_LPAR))
1137b1fc44eaSAlexey Kardashevskiy 		return 0;
1138b1fc44eaSAlexey Kardashevskiy 
1139b1fc44eaSAlexey Kardashevskiy 	root_offset = fdt_path_offset(fdt, "/");
1140b1fc44eaSAlexey Kardashevskiy 	for_each_node_with_property(dn, dmapropname) {
1141b1fc44eaSAlexey Kardashevskiy 		pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
1142b1fc44eaSAlexey Kardashevskiy 		if (pci_offset < 0)
1143b1fc44eaSAlexey Kardashevskiy 			continue;
1144b1fc44eaSAlexey Kardashevskiy 
1145b1fc44eaSAlexey Kardashevskiy 		ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
1146b1fc44eaSAlexey Kardashevskiy 		if (ret < 0)
1147b1fc44eaSAlexey Kardashevskiy 			break;
1148b1fc44eaSAlexey Kardashevskiy 		ret = copy_property(fdt, pci_offset, dn, dmapropname);
1149b1fc44eaSAlexey Kardashevskiy 		if (ret < 0)
1150b1fc44eaSAlexey Kardashevskiy 			break;
1151b1fc44eaSAlexey Kardashevskiy 	}
1152b1fc44eaSAlexey Kardashevskiy 
1153b1fc44eaSAlexey Kardashevskiy 	return ret;
1154b1fc44eaSAlexey Kardashevskiy }
1155b1fc44eaSAlexey Kardashevskiy 
115640c75399SSourabh Jain /**
115719031275SHari Bathini  * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
115819031275SHari Bathini  *                       being loaded.
115919031275SHari Bathini  * @image:               kexec image being loaded.
116019031275SHari Bathini  * @fdt:                 Flattened device tree for the next kernel.
116119031275SHari Bathini  * @initrd_load_addr:    Address where the next initrd will be loaded.
116219031275SHari Bathini  * @initrd_len:          Size of the next initrd, or 0 if there will be none.
116319031275SHari Bathini  * @cmdline:             Command line for the next kernel, or NULL if there will
116419031275SHari Bathini  *                       be none.
116519031275SHari Bathini  *
116619031275SHari Bathini  * Returns 0 on success, negative errno on error.
116719031275SHari Bathini  */
setup_new_fdt_ppc64(const struct kimage * image,void * fdt,unsigned long initrd_load_addr,unsigned long initrd_len,const char * cmdline)116819031275SHari Bathini int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
116919031275SHari Bathini 			unsigned long initrd_load_addr,
117019031275SHari Bathini 			unsigned long initrd_len, const char *cmdline)
117119031275SHari Bathini {
11726ecd0163SHari Bathini 	struct crash_mem *umem = NULL, *rmem = NULL;
11736ecd0163SHari Bathini 	int i, nr_ranges, ret;
11747c64e21aSHari Bathini 
11757c64e21aSHari Bathini 	/*
11767c64e21aSHari Bathini 	 * Restrict memory usage for kdump kernel by setting up
11771a1cf93cSHari Bathini 	 * usable memory ranges and memory reserve map.
11787c64e21aSHari Bathini 	 */
11797c64e21aSHari Bathini 	if (image->type == KEXEC_TYPE_CRASH) {
11807c64e21aSHari Bathini 		ret = get_usable_memory_ranges(&umem);
11817c64e21aSHari Bathini 		if (ret)
11827c64e21aSHari Bathini 			goto out;
11837c64e21aSHari Bathini 
11847c64e21aSHari Bathini 		ret = update_usable_mem_fdt(fdt, umem);
11857c64e21aSHari Bathini 		if (ret) {
11867c64e21aSHari Bathini 			pr_err("Error setting up usable-memory property for kdump kernel\n");
11877c64e21aSHari Bathini 			goto out;
11887c64e21aSHari Bathini 		}
11897c64e21aSHari Bathini 
11901a1cf93cSHari Bathini 		/*
11911a1cf93cSHari Bathini 		 * Ensure we don't touch crashed kernel's memory except the
11921a1cf93cSHari Bathini 		 * first 64K of RAM, which will be backed up.
11931a1cf93cSHari Bathini 		 */
11941a1cf93cSHari Bathini 		ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
11951a1cf93cSHari Bathini 				      crashk_res.start - BACKUP_SRC_SIZE);
11967c64e21aSHari Bathini 		if (ret) {
11977c64e21aSHari Bathini 			pr_err("Error reserving crash memory: %s\n",
11987c64e21aSHari Bathini 			       fdt_strerror(ret));
11997c64e21aSHari Bathini 			goto out;
12007c64e21aSHari Bathini 		}
12011a1cf93cSHari Bathini 
12021a1cf93cSHari Bathini 		/* Ensure backup region is not used by kdump/capture kernel */
12031a1cf93cSHari Bathini 		ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
12041a1cf93cSHari Bathini 				      BACKUP_SRC_SIZE);
12051a1cf93cSHari Bathini 		if (ret) {
12061a1cf93cSHari Bathini 			pr_err("Error reserving memory for backup: %s\n",
12071a1cf93cSHari Bathini 			       fdt_strerror(ret));
12081a1cf93cSHari Bathini 			goto out;
12091a1cf93cSHari Bathini 		}
12107c64e21aSHari Bathini 	}
12117c64e21aSHari Bathini 
121240c75399SSourabh Jain 	/* Update cpus nodes information to account hotplug CPUs. */
121340c75399SSourabh Jain 	ret =  update_cpus_node(fdt);
121440c75399SSourabh Jain 	if (ret < 0)
121540c75399SSourabh Jain 		goto out;
121640c75399SSourabh Jain 
1217b1fc44eaSAlexey Kardashevskiy 	ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
1218b1fc44eaSAlexey Kardashevskiy 	if (ret < 0)
1219b1fc44eaSAlexey Kardashevskiy 		goto out;
1220b1fc44eaSAlexey Kardashevskiy 
1221b1fc44eaSAlexey Kardashevskiy 	ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
1222b1fc44eaSAlexey Kardashevskiy 	if (ret < 0)
1223b1fc44eaSAlexey Kardashevskiy 		goto out;
1224b1fc44eaSAlexey Kardashevskiy 
12256ecd0163SHari Bathini 	/* Update memory reserve map */
12266ecd0163SHari Bathini 	ret = get_reserved_memory_ranges(&rmem);
12276ecd0163SHari Bathini 	if (ret)
12286ecd0163SHari Bathini 		goto out;
12296ecd0163SHari Bathini 
12306ecd0163SHari Bathini 	nr_ranges = rmem ? rmem->nr_ranges : 0;
12316ecd0163SHari Bathini 	for (i = 0; i < nr_ranges; i++) {
12326ecd0163SHari Bathini 		u64 base, size;
12336ecd0163SHari Bathini 
12346ecd0163SHari Bathini 		base = rmem->ranges[i].start;
12356ecd0163SHari Bathini 		size = rmem->ranges[i].end - base + 1;
12366ecd0163SHari Bathini 		ret = fdt_add_mem_rsv(fdt, base, size);
12376ecd0163SHari Bathini 		if (ret) {
12386ecd0163SHari Bathini 			pr_err("Error updating memory reserve map: %s\n",
12396ecd0163SHari Bathini 			       fdt_strerror(ret));
12406ecd0163SHari Bathini 			goto out;
12416ecd0163SHari Bathini 		}
12426ecd0163SHari Bathini 	}
12436ecd0163SHari Bathini 
124491361b51SRussell Currey 	// If we have PLPKS active, we need to provide the password to the new kernel
124591361b51SRussell Currey 	if (plpks_is_available())
124691361b51SRussell Currey 		ret = plpks_populate_fdt(fdt);
124791361b51SRussell Currey 
12487c64e21aSHari Bathini out:
12496ecd0163SHari Bathini 	kfree(rmem);
12507c64e21aSHari Bathini 	kfree(umem);
12517c64e21aSHari Bathini 	return ret;
125219031275SHari Bathini }
125319031275SHari Bathini 
125419031275SHari Bathini /**
1255b8e55a3eSHari Bathini  * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
1256b8e55a3eSHari Bathini  *                              tce-table, reserved-ranges & such (exclude
1257b8e55a3eSHari Bathini  *                              memory ranges) as they can't be used for kexec
1258b8e55a3eSHari Bathini  *                              segment buffer. Sets kbuf->mem when a suitable
1259b8e55a3eSHari Bathini  *                              memory hole is found.
1260b8e55a3eSHari Bathini  * @kbuf:                       Buffer contents and memory parameters.
1261b8e55a3eSHari Bathini  *
1262b8e55a3eSHari Bathini  * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
1263b8e55a3eSHari Bathini  *
1264b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
1265b8e55a3eSHari Bathini  */
arch_kexec_locate_mem_hole(struct kexec_buf * kbuf)1266b8e55a3eSHari Bathini int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
1267b8e55a3eSHari Bathini {
1268b8e55a3eSHari Bathini 	struct crash_mem **emem;
1269b8e55a3eSHari Bathini 	u64 buf_min, buf_max;
1270b8e55a3eSHari Bathini 	int ret;
1271b8e55a3eSHari Bathini 
1272b8e55a3eSHari Bathini 	/* Look up the exclude ranges list while locating the memory hole */
1273b8e55a3eSHari Bathini 	emem = &(kbuf->image->arch.exclude_ranges);
1274b8e55a3eSHari Bathini 	if (!(*emem) || ((*emem)->nr_ranges == 0)) {
1275b8e55a3eSHari Bathini 		pr_warn("No exclude range list. Using the default locate mem hole method\n");
1276b8e55a3eSHari Bathini 		return kexec_locate_mem_hole(kbuf);
1277b8e55a3eSHari Bathini 	}
1278b8e55a3eSHari Bathini 
1279b5667d13SHari Bathini 	buf_min = kbuf->buf_min;
1280b5667d13SHari Bathini 	buf_max = kbuf->buf_max;
1281b8e55a3eSHari Bathini 	/* Segments for kdump kernel should be within crashkernel region */
1282b5667d13SHari Bathini 	if (kbuf->image->type == KEXEC_TYPE_CRASH) {
1283b5667d13SHari Bathini 		buf_min = (buf_min < crashk_res.start ?
1284b5667d13SHari Bathini 			   crashk_res.start : buf_min);
1285b5667d13SHari Bathini 		buf_max = (buf_max > crashk_res.end ?
1286b5667d13SHari Bathini 			   crashk_res.end : buf_max);
1287b5667d13SHari Bathini 	}
1288b8e55a3eSHari Bathini 
1289b8e55a3eSHari Bathini 	if (buf_min > buf_max) {
1290b8e55a3eSHari Bathini 		pr_err("Invalid buffer min and/or max values\n");
1291b8e55a3eSHari Bathini 		return -EINVAL;
1292b8e55a3eSHari Bathini 	}
1293b8e55a3eSHari Bathini 
1294b8e55a3eSHari Bathini 	if (kbuf->top_down)
1295b8e55a3eSHari Bathini 		ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
1296b8e55a3eSHari Bathini 						     *emem);
1297b8e55a3eSHari Bathini 	else
1298b8e55a3eSHari Bathini 		ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
1299b8e55a3eSHari Bathini 						      *emem);
1300b8e55a3eSHari Bathini 
1301b8e55a3eSHari Bathini 	/* Add the buffer allocated to the exclude list for the next lookup */
1302b8e55a3eSHari Bathini 	if (!ret) {
1303b8e55a3eSHari Bathini 		add_mem_range(emem, kbuf->mem, kbuf->memsz);
1304b8e55a3eSHari Bathini 		sort_memory_ranges(*emem, true);
1305b8e55a3eSHari Bathini 	} else {
1306b8e55a3eSHari Bathini 		pr_err("Failed to locate memory buffer of size %lu\n",
1307b8e55a3eSHari Bathini 		       kbuf->memsz);
1308b8e55a3eSHari Bathini 	}
1309b8e55a3eSHari Bathini 	return ret;
1310b8e55a3eSHari Bathini }
1311b8e55a3eSHari Bathini 
1312b8e55a3eSHari Bathini /**
131319031275SHari Bathini  * arch_kexec_kernel_image_probe - Does additional handling needed to setup
131419031275SHari Bathini  *                                 kexec segments.
131519031275SHari Bathini  * @image:                         kexec image being loaded.
131619031275SHari Bathini  * @buf:                           Buffer pointing to elf data.
131719031275SHari Bathini  * @buf_len:                       Length of the buffer.
131819031275SHari Bathini  *
131919031275SHari Bathini  * Returns 0 on success, negative errno on error.
132019031275SHari Bathini  */
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)132119031275SHari Bathini int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
132219031275SHari Bathini 				  unsigned long buf_len)
132319031275SHari Bathini {
1324b8e55a3eSHari Bathini 	int ret;
1325b8e55a3eSHari Bathini 
1326b5667d13SHari Bathini 	/* Get exclude memory ranges needed for setting up kexec segments */
1327b8e55a3eSHari Bathini 	ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
13286ecd0163SHari Bathini 	if (ret) {
1329b8e55a3eSHari Bathini 		pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
13306ecd0163SHari Bathini 		return ret;
13316ecd0163SHari Bathini 	}
133219031275SHari Bathini 
133319031275SHari Bathini 	return kexec_image_probe_default(image, buf, buf_len);
133419031275SHari Bathini }
1335b8e55a3eSHari Bathini 
1336b8e55a3eSHari Bathini /**
1337b8e55a3eSHari Bathini  * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
1338b8e55a3eSHari Bathini  *                                      while loading the image.
1339b8e55a3eSHari Bathini  * @image:                              kexec image being loaded.
1340b8e55a3eSHari Bathini  *
1341b8e55a3eSHari Bathini  * Returns 0 on success, negative errno on error.
1342b8e55a3eSHari Bathini  */
arch_kimage_file_post_load_cleanup(struct kimage * image)1343b8e55a3eSHari Bathini int arch_kimage_file_post_load_cleanup(struct kimage *image)
1344b8e55a3eSHari Bathini {
1345b8e55a3eSHari Bathini 	kfree(image->arch.exclude_ranges);
1346b8e55a3eSHari Bathini 	image->arch.exclude_ranges = NULL;
1347b8e55a3eSHari Bathini 
13481a1cf93cSHari Bathini 	vfree(image->arch.backup_buf);
13491a1cf93cSHari Bathini 	image->arch.backup_buf = NULL;
13501a1cf93cSHari Bathini 
1351e6635babSLakshmi Ramasubramanian 	vfree(image->elf_headers);
1352e6635babSLakshmi Ramasubramanian 	image->elf_headers = NULL;
1353e6635babSLakshmi Ramasubramanian 	image->elf_headers_sz = 0;
1354cb350c1fSHari Bathini 
13553c985d31SRob Herring 	kvfree(image->arch.fdt);
13563c985d31SRob Herring 	image->arch.fdt = NULL;
13573c985d31SRob Herring 
1358b8e55a3eSHari Bathini 	return kexec_image_post_load_cleanup_default(image);
1359b8e55a3eSHari Bathini }
1360