xref: /openbmc/linux/drivers/of/fdt.c (revision 1103d3b5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions for working with the Flattened Device Tree data format
4  *
5  * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
6  * benh@kernel.crashing.org
7  */
8 
9 #define pr_fmt(fmt)	"OF: fdt: " fmt
10 
11 #include <linux/crash_dump.h>
12 #include <linux/crc32.h>
13 #include <linux/kernel.h>
14 #include <linux/initrd.h>
15 #include <linux/memblock.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/of_fdt.h>
19 #include <linux/of_reserved_mem.h>
20 #include <linux/sizes.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/slab.h>
24 #include <linux/libfdt.h>
25 #include <linux/debugfs.h>
26 #include <linux/serial_core.h>
27 #include <linux/sysfs.h>
28 #include <linux/random.h>
29 
30 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
31 #include <asm/page.h>
32 
33 #include "of_private.h"
34 
35 /*
36  * of_fdt_limit_memory - limit the number of regions in the /memory node
37  * @limit: maximum entries
38  *
39  * Adjust the flattened device tree to have at most 'limit' number of
40  * memory entries in the /memory node. This function may be called
41  * any time after initial_boot_param is set.
42  */
of_fdt_limit_memory(int limit)43 void __init of_fdt_limit_memory(int limit)
44 {
45 	int memory;
46 	int len;
47 	const void *val;
48 	int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
49 	int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
50 	const __be32 *addr_prop;
51 	const __be32 *size_prop;
52 	int root_offset;
53 	int cell_size;
54 
55 	root_offset = fdt_path_offset(initial_boot_params, "/");
56 	if (root_offset < 0)
57 		return;
58 
59 	addr_prop = fdt_getprop(initial_boot_params, root_offset,
60 				"#address-cells", NULL);
61 	if (addr_prop)
62 		nr_address_cells = fdt32_to_cpu(*addr_prop);
63 
64 	size_prop = fdt_getprop(initial_boot_params, root_offset,
65 				"#size-cells", NULL);
66 	if (size_prop)
67 		nr_size_cells = fdt32_to_cpu(*size_prop);
68 
69 	cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
70 
71 	memory = fdt_path_offset(initial_boot_params, "/memory");
72 	if (memory > 0) {
73 		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
74 		if (len > limit*cell_size) {
75 			len = limit*cell_size;
76 			pr_debug("Limiting number of entries to %d\n", limit);
77 			fdt_setprop(initial_boot_params, memory, "reg", val,
78 					len);
79 		}
80 	}
81 }
82 
of_fdt_device_is_available(const void * blob,unsigned long node)83 static bool of_fdt_device_is_available(const void *blob, unsigned long node)
84 {
85 	const char *status = fdt_getprop(blob, node, "status", NULL);
86 
87 	if (!status)
88 		return true;
89 
90 	if (!strcmp(status, "ok") || !strcmp(status, "okay"))
91 		return true;
92 
93 	return false;
94 }
95 
unflatten_dt_alloc(void ** mem,unsigned long size,unsigned long align)96 static void *unflatten_dt_alloc(void **mem, unsigned long size,
97 				       unsigned long align)
98 {
99 	void *res;
100 
101 	*mem = PTR_ALIGN(*mem, align);
102 	res = *mem;
103 	*mem += size;
104 
105 	return res;
106 }
107 
populate_properties(const void * blob,int offset,void ** mem,struct device_node * np,const char * nodename,bool dryrun)108 static void populate_properties(const void *blob,
109 				int offset,
110 				void **mem,
111 				struct device_node *np,
112 				const char *nodename,
113 				bool dryrun)
114 {
115 	struct property *pp, **pprev = NULL;
116 	int cur;
117 	bool has_name = false;
118 
119 	pprev = &np->properties;
120 	for (cur = fdt_first_property_offset(blob, offset);
121 	     cur >= 0;
122 	     cur = fdt_next_property_offset(blob, cur)) {
123 		const __be32 *val;
124 		const char *pname;
125 		u32 sz;
126 
127 		val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
128 		if (!val) {
129 			pr_warn("Cannot locate property at 0x%x\n", cur);
130 			continue;
131 		}
132 
133 		if (!pname) {
134 			pr_warn("Cannot find property name at 0x%x\n", cur);
135 			continue;
136 		}
137 
138 		if (!strcmp(pname, "name"))
139 			has_name = true;
140 
141 		pp = unflatten_dt_alloc(mem, sizeof(struct property),
142 					__alignof__(struct property));
143 		if (dryrun)
144 			continue;
145 
146 		/* We accept flattened tree phandles either in
147 		 * ePAPR-style "phandle" properties, or the
148 		 * legacy "linux,phandle" properties.  If both
149 		 * appear and have different values, things
150 		 * will get weird. Don't do that.
151 		 */
152 		if (!strcmp(pname, "phandle") ||
153 		    !strcmp(pname, "linux,phandle")) {
154 			if (!np->phandle)
155 				np->phandle = be32_to_cpup(val);
156 		}
157 
158 		/* And we process the "ibm,phandle" property
159 		 * used in pSeries dynamic device tree
160 		 * stuff
161 		 */
162 		if (!strcmp(pname, "ibm,phandle"))
163 			np->phandle = be32_to_cpup(val);
164 
165 		pp->name   = (char *)pname;
166 		pp->length = sz;
167 		pp->value  = (__be32 *)val;
168 		*pprev     = pp;
169 		pprev      = &pp->next;
170 	}
171 
172 	/* With version 0x10 we may not have the name property,
173 	 * recreate it here from the unit name if absent
174 	 */
175 	if (!has_name) {
176 		const char *p = nodename, *ps = p, *pa = NULL;
177 		int len;
178 
179 		while (*p) {
180 			if ((*p) == '@')
181 				pa = p;
182 			else if ((*p) == '/')
183 				ps = p + 1;
184 			p++;
185 		}
186 
187 		if (pa < ps)
188 			pa = p;
189 		len = (pa - ps) + 1;
190 		pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
191 					__alignof__(struct property));
192 		if (!dryrun) {
193 			pp->name   = "name";
194 			pp->length = len;
195 			pp->value  = pp + 1;
196 			*pprev     = pp;
197 			memcpy(pp->value, ps, len - 1);
198 			((char *)pp->value)[len - 1] = 0;
199 			pr_debug("fixed up name for %s -> %s\n",
200 				 nodename, (char *)pp->value);
201 		}
202 	}
203 }
204 
populate_node(const void * blob,int offset,void ** mem,struct device_node * dad,struct device_node ** pnp,bool dryrun)205 static int populate_node(const void *blob,
206 			  int offset,
207 			  void **mem,
208 			  struct device_node *dad,
209 			  struct device_node **pnp,
210 			  bool dryrun)
211 {
212 	struct device_node *np;
213 	const char *pathp;
214 	int len;
215 
216 	pathp = fdt_get_name(blob, offset, &len);
217 	if (!pathp) {
218 		*pnp = NULL;
219 		return len;
220 	}
221 
222 	len++;
223 
224 	np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
225 				__alignof__(struct device_node));
226 	if (!dryrun) {
227 		char *fn;
228 		of_node_init(np);
229 		np->full_name = fn = ((char *)np) + sizeof(*np);
230 
231 		memcpy(fn, pathp, len);
232 
233 		if (dad != NULL) {
234 			np->parent = dad;
235 			np->sibling = dad->child;
236 			dad->child = np;
237 		}
238 	}
239 
240 	populate_properties(blob, offset, mem, np, pathp, dryrun);
241 	if (!dryrun) {
242 		np->name = of_get_property(np, "name", NULL);
243 		if (!np->name)
244 			np->name = "<NULL>";
245 	}
246 
247 	*pnp = np;
248 	return 0;
249 }
250 
reverse_nodes(struct device_node * parent)251 static void reverse_nodes(struct device_node *parent)
252 {
253 	struct device_node *child, *next;
254 
255 	/* In-depth first */
256 	child = parent->child;
257 	while (child) {
258 		reverse_nodes(child);
259 
260 		child = child->sibling;
261 	}
262 
263 	/* Reverse the nodes in the child list */
264 	child = parent->child;
265 	parent->child = NULL;
266 	while (child) {
267 		next = child->sibling;
268 
269 		child->sibling = parent->child;
270 		parent->child = child;
271 		child = next;
272 	}
273 }
274 
275 /**
276  * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
277  * @blob: The parent device tree blob
278  * @mem: Memory chunk to use for allocating device nodes and properties
279  * @dad: Parent struct device_node
280  * @nodepp: The device_node tree created by the call
281  *
282  * Return: The size of unflattened device tree or error code
283  */
unflatten_dt_nodes(const void * blob,void * mem,struct device_node * dad,struct device_node ** nodepp)284 static int unflatten_dt_nodes(const void *blob,
285 			      void *mem,
286 			      struct device_node *dad,
287 			      struct device_node **nodepp)
288 {
289 	struct device_node *root;
290 	int offset = 0, depth = 0, initial_depth = 0;
291 #define FDT_MAX_DEPTH	64
292 	struct device_node *nps[FDT_MAX_DEPTH];
293 	void *base = mem;
294 	bool dryrun = !base;
295 	int ret;
296 
297 	if (nodepp)
298 		*nodepp = NULL;
299 
300 	/*
301 	 * We're unflattening device sub-tree if @dad is valid. There are
302 	 * possibly multiple nodes in the first level of depth. We need
303 	 * set @depth to 1 to make fdt_next_node() happy as it bails
304 	 * immediately when negative @depth is found. Otherwise, the device
305 	 * nodes except the first one won't be unflattened successfully.
306 	 */
307 	if (dad)
308 		depth = initial_depth = 1;
309 
310 	root = dad;
311 	nps[depth] = dad;
312 
313 	for (offset = 0;
314 	     offset >= 0 && depth >= initial_depth;
315 	     offset = fdt_next_node(blob, offset, &depth)) {
316 		if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
317 			continue;
318 
319 		if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
320 		    !of_fdt_device_is_available(blob, offset))
321 			continue;
322 
323 		ret = populate_node(blob, offset, &mem, nps[depth],
324 				   &nps[depth+1], dryrun);
325 		if (ret < 0)
326 			return ret;
327 
328 		if (!dryrun && nodepp && !*nodepp)
329 			*nodepp = nps[depth+1];
330 		if (!dryrun && !root)
331 			root = nps[depth+1];
332 	}
333 
334 	if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
335 		pr_err("Error %d processing FDT\n", offset);
336 		return -EINVAL;
337 	}
338 
339 	/*
340 	 * Reverse the child list. Some drivers assumes node order matches .dts
341 	 * node order
342 	 */
343 	if (!dryrun)
344 		reverse_nodes(root);
345 
346 	return mem - base;
347 }
348 
349 /**
350  * __unflatten_device_tree - create tree of device_nodes from flat blob
351  * @blob: The blob to expand
352  * @dad: Parent device node
353  * @mynodes: The device_node tree created by the call
354  * @dt_alloc: An allocator that provides a virtual address to memory
355  * for the resulting tree
356  * @detached: if true set OF_DETACHED on @mynodes
357  *
358  * unflattens a device-tree, creating the tree of struct device_node. It also
359  * fills the "name" and "type" pointers of the nodes so the normal device-tree
360  * walking functions can be used.
361  *
362  * Return: NULL on failure or the memory chunk containing the unflattened
363  * device tree on success.
364  */
__unflatten_device_tree(const void * blob,struct device_node * dad,struct device_node ** mynodes,void * (* dt_alloc)(u64 size,u64 align),bool detached)365 void *__unflatten_device_tree(const void *blob,
366 			      struct device_node *dad,
367 			      struct device_node **mynodes,
368 			      void *(*dt_alloc)(u64 size, u64 align),
369 			      bool detached)
370 {
371 	int size;
372 	void *mem;
373 	int ret;
374 
375 	if (mynodes)
376 		*mynodes = NULL;
377 
378 	pr_debug(" -> unflatten_device_tree()\n");
379 
380 	if (!blob) {
381 		pr_debug("No device tree pointer\n");
382 		return NULL;
383 	}
384 
385 	pr_debug("Unflattening device tree:\n");
386 	pr_debug("magic: %08x\n", fdt_magic(blob));
387 	pr_debug("size: %08x\n", fdt_totalsize(blob));
388 	pr_debug("version: %08x\n", fdt_version(blob));
389 
390 	if (fdt_check_header(blob)) {
391 		pr_err("Invalid device tree blob header\n");
392 		return NULL;
393 	}
394 
395 	/* First pass, scan for size */
396 	size = unflatten_dt_nodes(blob, NULL, dad, NULL);
397 	if (size <= 0)
398 		return NULL;
399 
400 	size = ALIGN(size, 4);
401 	pr_debug("  size is %d, allocating...\n", size);
402 
403 	/* Allocate memory for the expanded device tree */
404 	mem = dt_alloc(size + 4, __alignof__(struct device_node));
405 	if (!mem)
406 		return NULL;
407 
408 	memset(mem, 0, size);
409 
410 	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
411 
412 	pr_debug("  unflattening %p...\n", mem);
413 
414 	/* Second pass, do actual unflattening */
415 	ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
416 
417 	if (be32_to_cpup(mem + size) != 0xdeadbeef)
418 		pr_warn("End of tree marker overwritten: %08x\n",
419 			be32_to_cpup(mem + size));
420 
421 	if (ret <= 0)
422 		return NULL;
423 
424 	if (detached && mynodes && *mynodes) {
425 		of_node_set_flag(*mynodes, OF_DETACHED);
426 		pr_debug("unflattened tree is detached\n");
427 	}
428 
429 	pr_debug(" <- unflatten_device_tree()\n");
430 	return mem;
431 }
432 
kernel_tree_alloc(u64 size,u64 align)433 static void *kernel_tree_alloc(u64 size, u64 align)
434 {
435 	return kzalloc(size, GFP_KERNEL);
436 }
437 
438 static DEFINE_MUTEX(of_fdt_unflatten_mutex);
439 
440 /**
441  * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
442  * @blob: Flat device tree blob
443  * @dad: Parent device node
444  * @mynodes: The device tree created by the call
445  *
446  * unflattens the device-tree passed by the firmware, creating the
447  * tree of struct device_node. It also fills the "name" and "type"
448  * pointers of the nodes so the normal device-tree walking functions
449  * can be used.
450  *
451  * Return: NULL on failure or the memory chunk containing the unflattened
452  * device tree on success.
453  */
of_fdt_unflatten_tree(const unsigned long * blob,struct device_node * dad,struct device_node ** mynodes)454 void *of_fdt_unflatten_tree(const unsigned long *blob,
455 			    struct device_node *dad,
456 			    struct device_node **mynodes)
457 {
458 	void *mem;
459 
460 	mutex_lock(&of_fdt_unflatten_mutex);
461 	mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
462 				      true);
463 	mutex_unlock(&of_fdt_unflatten_mutex);
464 
465 	return mem;
466 }
467 EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
468 
469 /* Everything below here references initial_boot_params directly. */
470 int __initdata dt_root_addr_cells;
471 int __initdata dt_root_size_cells;
472 
473 void *initial_boot_params __ro_after_init;
474 phys_addr_t initial_boot_params_pa __ro_after_init;
475 
476 #ifdef CONFIG_OF_EARLY_FLATTREE
477 
478 static u32 of_fdt_crc32;
479 
early_init_dt_reserve_memory(phys_addr_t base,phys_addr_t size,bool nomap)480 static int __init early_init_dt_reserve_memory(phys_addr_t base,
481 					       phys_addr_t size, bool nomap)
482 {
483 	if (nomap) {
484 		/*
485 		 * If the memory is already reserved (by another region), we
486 		 * should not allow it to be marked nomap, but don't worry
487 		 * if the region isn't memory as it won't be mapped.
488 		 */
489 		if (memblock_overlaps_region(&memblock.memory, base, size) &&
490 		    memblock_is_region_reserved(base, size))
491 			return -EBUSY;
492 
493 		return memblock_mark_nomap(base, size);
494 	}
495 	return memblock_reserve(base, size);
496 }
497 
498 /*
499  * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
500  */
__reserved_mem_reserve_reg(unsigned long node,const char * uname)501 static int __init __reserved_mem_reserve_reg(unsigned long node,
502 					     const char *uname)
503 {
504 	int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
505 	phys_addr_t base, size;
506 	int len;
507 	const __be32 *prop;
508 	int first = 1;
509 	bool nomap;
510 
511 	prop = of_get_flat_dt_prop(node, "reg", &len);
512 	if (!prop)
513 		return -ENOENT;
514 
515 	if (len && len % t_len != 0) {
516 		pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
517 		       uname);
518 		return -EINVAL;
519 	}
520 
521 	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
522 
523 	while (len >= t_len) {
524 		base = dt_mem_next_cell(dt_root_addr_cells, &prop);
525 		size = dt_mem_next_cell(dt_root_size_cells, &prop);
526 
527 		if (size &&
528 		    early_init_dt_reserve_memory(base, size, nomap) == 0)
529 			pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
530 				uname, &base, (unsigned long)(size / SZ_1M));
531 		else
532 			pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
533 			       uname, &base, (unsigned long)(size / SZ_1M));
534 
535 		len -= t_len;
536 		if (first) {
537 			fdt_reserved_mem_save_node(node, uname, base, size);
538 			first = 0;
539 		}
540 	}
541 	return 0;
542 }
543 
544 /*
545  * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
546  * in /reserved-memory matches the values supported by the current implementation,
547  * also check if ranges property has been provided
548  */
__reserved_mem_check_root(unsigned long node)549 static int __init __reserved_mem_check_root(unsigned long node)
550 {
551 	const __be32 *prop;
552 
553 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
554 	if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
555 		return -EINVAL;
556 
557 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
558 	if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
559 		return -EINVAL;
560 
561 	prop = of_get_flat_dt_prop(node, "ranges", NULL);
562 	if (!prop)
563 		return -EINVAL;
564 	return 0;
565 }
566 
567 /*
568  * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
569  */
fdt_scan_reserved_mem(void)570 static int __init fdt_scan_reserved_mem(void)
571 {
572 	int node, child;
573 	const void *fdt = initial_boot_params;
574 
575 	node = fdt_path_offset(fdt, "/reserved-memory");
576 	if (node < 0)
577 		return -ENODEV;
578 
579 	if (__reserved_mem_check_root(node) != 0) {
580 		pr_err("Reserved memory: unsupported node format, ignoring\n");
581 		return -EINVAL;
582 	}
583 
584 	fdt_for_each_subnode(child, fdt, node) {
585 		const char *uname;
586 		int err;
587 
588 		if (!of_fdt_device_is_available(fdt, child))
589 			continue;
590 
591 		uname = fdt_get_name(fdt, child, NULL);
592 
593 		err = __reserved_mem_reserve_reg(child, uname);
594 		if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
595 			fdt_reserved_mem_save_node(child, uname, 0, 0);
596 	}
597 	return 0;
598 }
599 
600 /*
601  * fdt_reserve_elfcorehdr() - reserves memory for elf core header
602  *
603  * This function reserves the memory occupied by an elf core header
604  * described in the device tree. This region contains all the
605  * information about primary kernel's core image and is used by a dump
606  * capture kernel to access the system memory on primary kernel.
607  */
fdt_reserve_elfcorehdr(void)608 static void __init fdt_reserve_elfcorehdr(void)
609 {
610 	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
611 		return;
612 
613 	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
614 		pr_warn("elfcorehdr is overlapped\n");
615 		return;
616 	}
617 
618 	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
619 
620 	pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
621 		elfcorehdr_size >> 10, elfcorehdr_addr);
622 }
623 
624 /**
625  * early_init_fdt_scan_reserved_mem() - create reserved memory regions
626  *
627  * This function grabs memory from early allocator for device exclusive use
628  * defined in device tree structures. It should be called by arch specific code
629  * once the early allocator (i.e. memblock) has been fully activated.
630  */
early_init_fdt_scan_reserved_mem(void)631 void __init early_init_fdt_scan_reserved_mem(void)
632 {
633 	int n;
634 	u64 base, size;
635 
636 	if (!initial_boot_params)
637 		return;
638 
639 	fdt_scan_reserved_mem();
640 	fdt_reserve_elfcorehdr();
641 
642 	/* Process header /memreserve/ fields */
643 	for (n = 0; ; n++) {
644 		fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
645 		if (!size)
646 			break;
647 		memblock_reserve(base, size);
648 	}
649 
650 	fdt_init_reserved_mem();
651 }
652 
653 /**
654  * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
655  */
early_init_fdt_reserve_self(void)656 void __init early_init_fdt_reserve_self(void)
657 {
658 	if (!initial_boot_params)
659 		return;
660 
661 	/* Reserve the dtb region */
662 	memblock_reserve(__pa(initial_boot_params),
663 			 fdt_totalsize(initial_boot_params));
664 }
665 
666 /**
667  * of_scan_flat_dt - scan flattened tree blob and call callback on each.
668  * @it: callback function
669  * @data: context data pointer
670  *
671  * This function is used to scan the flattened device-tree, it is
672  * used to extract the memory information at boot before we can
673  * unflatten the tree
674  */
of_scan_flat_dt(int (* it)(unsigned long node,const char * uname,int depth,void * data),void * data)675 int __init of_scan_flat_dt(int (*it)(unsigned long node,
676 				     const char *uname, int depth,
677 				     void *data),
678 			   void *data)
679 {
680 	const void *blob = initial_boot_params;
681 	const char *pathp;
682 	int offset, rc = 0, depth = -1;
683 
684 	if (!blob)
685 		return 0;
686 
687 	for (offset = fdt_next_node(blob, -1, &depth);
688 	     offset >= 0 && depth >= 0 && !rc;
689 	     offset = fdt_next_node(blob, offset, &depth)) {
690 
691 		pathp = fdt_get_name(blob, offset, NULL);
692 		rc = it(offset, pathp, depth, data);
693 	}
694 	return rc;
695 }
696 
697 /**
698  * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
699  * @parent: parent node
700  * @it: callback function
701  * @data: context data pointer
702  *
703  * This function is used to scan sub-nodes of a node.
704  */
of_scan_flat_dt_subnodes(unsigned long parent,int (* it)(unsigned long node,const char * uname,void * data),void * data)705 int __init of_scan_flat_dt_subnodes(unsigned long parent,
706 				    int (*it)(unsigned long node,
707 					      const char *uname,
708 					      void *data),
709 				    void *data)
710 {
711 	const void *blob = initial_boot_params;
712 	int node;
713 
714 	fdt_for_each_subnode(node, blob, parent) {
715 		const char *pathp;
716 		int rc;
717 
718 		pathp = fdt_get_name(blob, node, NULL);
719 		rc = it(node, pathp, data);
720 		if (rc)
721 			return rc;
722 	}
723 	return 0;
724 }
725 
726 /**
727  * of_get_flat_dt_subnode_by_name - get the subnode by given name
728  *
729  * @node: the parent node
730  * @uname: the name of subnode
731  * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
732  */
733 
of_get_flat_dt_subnode_by_name(unsigned long node,const char * uname)734 int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
735 {
736 	return fdt_subnode_offset(initial_boot_params, node, uname);
737 }
738 
739 /*
740  * of_get_flat_dt_root - find the root node in the flat blob
741  */
of_get_flat_dt_root(void)742 unsigned long __init of_get_flat_dt_root(void)
743 {
744 	return 0;
745 }
746 
747 /*
748  * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
749  *
750  * This function can be used within scan_flattened_dt callback to get
751  * access to properties
752  */
of_get_flat_dt_prop(unsigned long node,const char * name,int * size)753 const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
754 				       int *size)
755 {
756 	return fdt_getprop(initial_boot_params, node, name, size);
757 }
758 
759 /**
760  * of_fdt_is_compatible - Return true if given node from the given blob has
761  * compat in its compatible list
762  * @blob: A device tree blob
763  * @node: node to test
764  * @compat: compatible string to compare with compatible list.
765  *
766  * Return: a non-zero value on match with smaller values returned for more
767  * specific compatible values.
768  */
of_fdt_is_compatible(const void * blob,unsigned long node,const char * compat)769 static int of_fdt_is_compatible(const void *blob,
770 		      unsigned long node, const char *compat)
771 {
772 	const char *cp;
773 	int cplen;
774 	unsigned long l, score = 0;
775 
776 	cp = fdt_getprop(blob, node, "compatible", &cplen);
777 	if (cp == NULL)
778 		return 0;
779 	while (cplen > 0) {
780 		score++;
781 		if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
782 			return score;
783 		l = strlen(cp) + 1;
784 		cp += l;
785 		cplen -= l;
786 	}
787 
788 	return 0;
789 }
790 
791 /**
792  * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
793  * @node: node to test
794  * @compat: compatible string to compare with compatible list.
795  */
of_flat_dt_is_compatible(unsigned long node,const char * compat)796 int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
797 {
798 	return of_fdt_is_compatible(initial_boot_params, node, compat);
799 }
800 
801 /*
802  * of_flat_dt_match - Return true if node matches a list of compatible values
803  */
of_flat_dt_match(unsigned long node,const char * const * compat)804 static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
805 {
806 	unsigned int tmp, score = 0;
807 
808 	if (!compat)
809 		return 0;
810 
811 	while (*compat) {
812 		tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
813 		if (tmp && (score == 0 || (tmp < score)))
814 			score = tmp;
815 		compat++;
816 	}
817 
818 	return score;
819 }
820 
821 /*
822  * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
823  */
of_get_flat_dt_phandle(unsigned long node)824 uint32_t __init of_get_flat_dt_phandle(unsigned long node)
825 {
826 	return fdt_get_phandle(initial_boot_params, node);
827 }
828 
of_flat_dt_get_machine_name(void)829 const char * __init of_flat_dt_get_machine_name(void)
830 {
831 	const char *name;
832 	unsigned long dt_root = of_get_flat_dt_root();
833 
834 	name = of_get_flat_dt_prop(dt_root, "model", NULL);
835 	if (!name)
836 		name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
837 	return name;
838 }
839 
840 /**
841  * of_flat_dt_match_machine - Iterate match tables to find matching machine.
842  *
843  * @default_match: A machine specific ptr to return in case of no match.
844  * @get_next_compat: callback function to return next compatible match table.
845  *
846  * Iterate through machine match tables to find the best match for the machine
847  * compatible string in the FDT.
848  */
of_flat_dt_match_machine(const void * default_match,const void * (* get_next_compat)(const char * const **))849 const void * __init of_flat_dt_match_machine(const void *default_match,
850 		const void * (*get_next_compat)(const char * const**))
851 {
852 	const void *data = NULL;
853 	const void *best_data = default_match;
854 	const char *const *compat;
855 	unsigned long dt_root;
856 	unsigned int best_score = ~1, score = 0;
857 
858 	dt_root = of_get_flat_dt_root();
859 	while ((data = get_next_compat(&compat))) {
860 		score = of_flat_dt_match(dt_root, compat);
861 		if (score > 0 && score < best_score) {
862 			best_data = data;
863 			best_score = score;
864 		}
865 	}
866 	if (!best_data) {
867 		const char *prop;
868 		int size;
869 
870 		pr_err("\n unrecognized device tree list:\n[ ");
871 
872 		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
873 		if (prop) {
874 			while (size > 0) {
875 				printk("'%s' ", prop);
876 				size -= strlen(prop) + 1;
877 				prop += strlen(prop) + 1;
878 			}
879 		}
880 		printk("]\n\n");
881 		return NULL;
882 	}
883 
884 	pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
885 
886 	return best_data;
887 }
888 
__early_init_dt_declare_initrd(unsigned long start,unsigned long end)889 static void __early_init_dt_declare_initrd(unsigned long start,
890 					   unsigned long end)
891 {
892 	/*
893 	 * __va() is not yet available this early on some platforms. In that
894 	 * case, the platform uses phys_initrd_start/phys_initrd_size instead
895 	 * and does the VA conversion itself.
896 	 */
897 	if (!IS_ENABLED(CONFIG_ARM64) &&
898 	    !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
899 		initrd_start = (unsigned long)__va(start);
900 		initrd_end = (unsigned long)__va(end);
901 		initrd_below_start_ok = 1;
902 	}
903 }
904 
905 /**
906  * early_init_dt_check_for_initrd - Decode initrd location from flat tree
907  * @node: reference to node containing initrd location ('chosen')
908  */
early_init_dt_check_for_initrd(unsigned long node)909 static void __init early_init_dt_check_for_initrd(unsigned long node)
910 {
911 	u64 start, end;
912 	int len;
913 	const __be32 *prop;
914 
915 	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
916 		return;
917 
918 	pr_debug("Looking for initrd properties... ");
919 
920 	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
921 	if (!prop)
922 		return;
923 	start = of_read_number(prop, len/4);
924 
925 	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
926 	if (!prop)
927 		return;
928 	end = of_read_number(prop, len/4);
929 	if (start > end)
930 		return;
931 
932 	__early_init_dt_declare_initrd(start, end);
933 	phys_initrd_start = start;
934 	phys_initrd_size = end - start;
935 
936 	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
937 }
938 
939 /**
940  * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
941  * tree
942  * @node: reference to node containing elfcorehdr location ('chosen')
943  */
early_init_dt_check_for_elfcorehdr(unsigned long node)944 static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
945 {
946 	const __be32 *prop;
947 	int len;
948 
949 	if (!IS_ENABLED(CONFIG_CRASH_DUMP))
950 		return;
951 
952 	pr_debug("Looking for elfcorehdr property... ");
953 
954 	prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
955 	if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
956 		return;
957 
958 	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
959 	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
960 
961 	pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
962 		 elfcorehdr_addr, elfcorehdr_size);
963 }
964 
965 static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
966 
967 /*
968  * The main usage of linux,usable-memory-range is for crash dump kernel.
969  * Originally, the number of usable-memory regions is one. Now there may
970  * be two regions, low region and high region.
971  * To make compatibility with existing user-space and older kdump, the low
972  * region is always the last range of linux,usable-memory-range if exist.
973  */
974 #define MAX_USABLE_RANGES		2
975 
976 /**
977  * early_init_dt_check_for_usable_mem_range - Decode usable memory range
978  * location from flat tree
979  */
early_init_dt_check_for_usable_mem_range(void)980 void __init early_init_dt_check_for_usable_mem_range(void)
981 {
982 	struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
983 	const __be32 *prop, *endp;
984 	int len, i;
985 	unsigned long node = chosen_node_offset;
986 
987 	if ((long)node < 0)
988 		return;
989 
990 	pr_debug("Looking for usable-memory-range property... ");
991 
992 	prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
993 	if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
994 		return;
995 
996 	endp = prop + (len / sizeof(__be32));
997 	for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
998 		rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
999 		rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
1000 
1001 		pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
1002 			 i, &rgn[i].base, &rgn[i].size);
1003 	}
1004 
1005 	memblock_cap_memory_range(rgn[0].base, rgn[0].size);
1006 	for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
1007 		memblock_add(rgn[i].base, rgn[i].size);
1008 }
1009 
1010 #ifdef CONFIG_SERIAL_EARLYCON
1011 
early_init_dt_scan_chosen_stdout(void)1012 int __init early_init_dt_scan_chosen_stdout(void)
1013 {
1014 	int offset;
1015 	const char *p, *q, *options = NULL;
1016 	int l;
1017 	const struct earlycon_id *match;
1018 	const void *fdt = initial_boot_params;
1019 	int ret;
1020 
1021 	offset = fdt_path_offset(fdt, "/chosen");
1022 	if (offset < 0)
1023 		offset = fdt_path_offset(fdt, "/chosen@0");
1024 	if (offset < 0)
1025 		return -ENOENT;
1026 
1027 	p = fdt_getprop(fdt, offset, "stdout-path", &l);
1028 	if (!p)
1029 		p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
1030 	if (!p || !l)
1031 		return -ENOENT;
1032 
1033 	q = strchrnul(p, ':');
1034 	if (*q != '\0')
1035 		options = q + 1;
1036 	l = q - p;
1037 
1038 	/* Get the node specified by stdout-path */
1039 	offset = fdt_path_offset_namelen(fdt, p, l);
1040 	if (offset < 0) {
1041 		pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
1042 		return 0;
1043 	}
1044 
1045 	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
1046 		if (!match->compatible[0])
1047 			continue;
1048 
1049 		if (fdt_node_check_compatible(fdt, offset, match->compatible))
1050 			continue;
1051 
1052 		ret = of_setup_earlycon(match, offset, options);
1053 		if (!ret || ret == -EALREADY)
1054 			return 0;
1055 	}
1056 	return -ENODEV;
1057 }
1058 #endif
1059 
1060 /*
1061  * early_init_dt_scan_root - fetch the top level address and size cells
1062  */
early_init_dt_scan_root(void)1063 int __init early_init_dt_scan_root(void)
1064 {
1065 	const __be32 *prop;
1066 	const void *fdt = initial_boot_params;
1067 	int node = fdt_path_offset(fdt, "/");
1068 
1069 	if (node < 0)
1070 		return -ENODEV;
1071 
1072 	dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
1073 	dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
1074 
1075 	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
1076 	if (prop)
1077 		dt_root_size_cells = be32_to_cpup(prop);
1078 	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
1079 
1080 	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
1081 	if (prop)
1082 		dt_root_addr_cells = be32_to_cpup(prop);
1083 	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
1084 
1085 	return 0;
1086 }
1087 
dt_mem_next_cell(int s,const __be32 ** cellp)1088 u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
1089 {
1090 	const __be32 *p = *cellp;
1091 
1092 	*cellp = p + s;
1093 	return of_read_number(p, s);
1094 }
1095 
1096 /*
1097  * early_init_dt_scan_memory - Look for and parse memory nodes
1098  */
early_init_dt_scan_memory(void)1099 int __init early_init_dt_scan_memory(void)
1100 {
1101 	int node, found_memory = 0;
1102 	const void *fdt = initial_boot_params;
1103 
1104 	fdt_for_each_subnode(node, fdt, 0) {
1105 		const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
1106 		const __be32 *reg, *endp;
1107 		int l;
1108 		bool hotpluggable;
1109 
1110 		/* We are scanning "memory" nodes only */
1111 		if (type == NULL || strcmp(type, "memory") != 0)
1112 			continue;
1113 
1114 		if (!of_fdt_device_is_available(fdt, node))
1115 			continue;
1116 
1117 		reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1118 		if (reg == NULL)
1119 			reg = of_get_flat_dt_prop(node, "reg", &l);
1120 		if (reg == NULL)
1121 			continue;
1122 
1123 		endp = reg + (l / sizeof(__be32));
1124 		hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
1125 
1126 		pr_debug("memory scan node %s, reg size %d,\n",
1127 			 fdt_get_name(fdt, node, NULL), l);
1128 
1129 		while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1130 			u64 base, size;
1131 
1132 			base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1133 			size = dt_mem_next_cell(dt_root_size_cells, &reg);
1134 
1135 			if (size == 0)
1136 				continue;
1137 			pr_debug(" - %llx, %llx\n", base, size);
1138 
1139 			early_init_dt_add_memory_arch(base, size);
1140 
1141 			found_memory = 1;
1142 
1143 			if (!hotpluggable)
1144 				continue;
1145 
1146 			if (memblock_mark_hotplug(base, size))
1147 				pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1148 					base, base + size);
1149 		}
1150 	}
1151 	return found_memory;
1152 }
1153 
early_init_dt_scan_chosen(char * cmdline)1154 int __init early_init_dt_scan_chosen(char *cmdline)
1155 {
1156 	int l, node;
1157 	const char *p;
1158 	const void *rng_seed;
1159 	const void *fdt = initial_boot_params;
1160 
1161 	node = fdt_path_offset(fdt, "/chosen");
1162 	if (node < 0)
1163 		node = fdt_path_offset(fdt, "/chosen@0");
1164 	if (node < 0)
1165 		/* Handle the cmdline config options even if no /chosen node */
1166 		goto handle_cmdline;
1167 
1168 	chosen_node_offset = node;
1169 
1170 	early_init_dt_check_for_initrd(node);
1171 	early_init_dt_check_for_elfcorehdr(node);
1172 
1173 	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1174 	if (rng_seed && l > 0) {
1175 		add_bootloader_randomness(rng_seed, l);
1176 
1177 		/* try to clear seed so it won't be found. */
1178 		fdt_nop_property(initial_boot_params, node, "rng-seed");
1179 
1180 		/* update CRC check value */
1181 		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1182 				fdt_totalsize(initial_boot_params));
1183 	}
1184 
1185 	/* Retrieve command line */
1186 	p = of_get_flat_dt_prop(node, "bootargs", &l);
1187 	if (p != NULL && l > 0)
1188 		strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1189 
1190 handle_cmdline:
1191 	/*
1192 	 * CONFIG_CMDLINE is meant to be a default in case nothing else
1193 	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1194 	 * is set in which case we override whatever was found earlier.
1195 	 */
1196 #ifdef CONFIG_CMDLINE
1197 #if defined(CONFIG_CMDLINE_EXTEND)
1198 	strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1199 	strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1200 #elif defined(CONFIG_CMDLINE_FORCE)
1201 	strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1202 #else
1203 	/* No arguments from boot loader, use kernel's  cmdl*/
1204 	if (!((char *)cmdline)[0])
1205 		strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1206 #endif
1207 #endif /* CONFIG_CMDLINE */
1208 
1209 	pr_debug("Command line is: %s\n", (char *)cmdline);
1210 
1211 	return 0;
1212 }
1213 
1214 #ifndef MIN_MEMBLOCK_ADDR
1215 #define MIN_MEMBLOCK_ADDR	__pa(PAGE_OFFSET)
1216 #endif
1217 #ifndef MAX_MEMBLOCK_ADDR
1218 #define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
1219 #endif
1220 
early_init_dt_add_memory_arch(u64 base,u64 size)1221 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1222 {
1223 	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1224 
1225 	if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1226 		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1227 			base, base + size);
1228 		return;
1229 	}
1230 
1231 	if (!PAGE_ALIGNED(base)) {
1232 		size -= PAGE_SIZE - (base & ~PAGE_MASK);
1233 		base = PAGE_ALIGN(base);
1234 	}
1235 	size &= PAGE_MASK;
1236 
1237 	if (base > MAX_MEMBLOCK_ADDR) {
1238 		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1239 			base, base + size);
1240 		return;
1241 	}
1242 
1243 	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1244 		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1245 			((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1246 		size = MAX_MEMBLOCK_ADDR - base + 1;
1247 	}
1248 
1249 	if (base + size < phys_offset) {
1250 		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1251 			base, base + size);
1252 		return;
1253 	}
1254 	if (base < phys_offset) {
1255 		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1256 			base, phys_offset);
1257 		size -= phys_offset - base;
1258 		base = phys_offset;
1259 	}
1260 	memblock_add(base, size);
1261 }
1262 
early_init_dt_alloc_memory_arch(u64 size,u64 align)1263 static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1264 {
1265 	void *ptr = memblock_alloc(size, align);
1266 
1267 	if (!ptr)
1268 		panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1269 		      __func__, size, align);
1270 
1271 	return ptr;
1272 }
1273 
early_init_dt_verify(void * dt_virt,phys_addr_t dt_phys)1274 bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
1275 {
1276 	if (!dt_virt)
1277 		return false;
1278 
1279 	/* check device tree validity */
1280 	if (fdt_check_header(dt_virt))
1281 		return false;
1282 
1283 	/* Setup flat device-tree pointer */
1284 	initial_boot_params = dt_virt;
1285 	initial_boot_params_pa = dt_phys;
1286 	of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1287 				fdt_totalsize(initial_boot_params));
1288 	return true;
1289 }
1290 
1291 
early_init_dt_scan_nodes(void)1292 void __init early_init_dt_scan_nodes(void)
1293 {
1294 	int rc;
1295 
1296 	/* Initialize {size,address}-cells info */
1297 	early_init_dt_scan_root();
1298 
1299 	/* Retrieve various information from the /chosen node */
1300 	rc = early_init_dt_scan_chosen(boot_command_line);
1301 	if (rc)
1302 		pr_warn("No chosen node found, continuing without\n");
1303 
1304 	/* Setup memory, calling early_init_dt_add_memory_arch */
1305 	early_init_dt_scan_memory();
1306 
1307 	/* Handle linux,usable-memory-range property */
1308 	early_init_dt_check_for_usable_mem_range();
1309 }
1310 
early_init_dt_scan(void * dt_virt,phys_addr_t dt_phys)1311 bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
1312 {
1313 	bool status;
1314 
1315 	status = early_init_dt_verify(dt_virt, dt_phys);
1316 	if (!status)
1317 		return false;
1318 
1319 	early_init_dt_scan_nodes();
1320 	return true;
1321 }
1322 
1323 /**
1324  * unflatten_device_tree - create tree of device_nodes from flat blob
1325  *
1326  * unflattens the device-tree passed by the firmware, creating the
1327  * tree of struct device_node. It also fills the "name" and "type"
1328  * pointers of the nodes so the normal device-tree walking functions
1329  * can be used.
1330  */
unflatten_device_tree(void)1331 void __init unflatten_device_tree(void)
1332 {
1333 	__unflatten_device_tree(initial_boot_params, NULL, &of_root,
1334 				early_init_dt_alloc_memory_arch, false);
1335 
1336 	/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1337 	of_alias_scan(early_init_dt_alloc_memory_arch);
1338 
1339 	unittest_unflatten_overlay_base();
1340 }
1341 
1342 /**
1343  * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1344  *
1345  * Copies and unflattens the device-tree passed by the firmware, creating the
1346  * tree of struct device_node. It also fills the "name" and "type"
1347  * pointers of the nodes so the normal device-tree walking functions
1348  * can be used. This should only be used when the FDT memory has not been
1349  * reserved such is the case when the FDT is built-in to the kernel init
1350  * section. If the FDT memory is reserved already then unflatten_device_tree
1351  * should be used instead.
1352  */
unflatten_and_copy_device_tree(void)1353 void __init unflatten_and_copy_device_tree(void)
1354 {
1355 	int size;
1356 	void *dt;
1357 
1358 	if (!initial_boot_params) {
1359 		pr_warn("No valid device tree found, continuing without\n");
1360 		return;
1361 	}
1362 
1363 	size = fdt_totalsize(initial_boot_params);
1364 	dt = early_init_dt_alloc_memory_arch(size,
1365 					     roundup_pow_of_two(FDT_V17_SIZE));
1366 
1367 	if (dt) {
1368 		memcpy(dt, initial_boot_params, size);
1369 		initial_boot_params = dt;
1370 	}
1371 	unflatten_device_tree();
1372 }
1373 
1374 #ifdef CONFIG_SYSFS
of_fdt_raw_read(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)1375 static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1376 			       struct bin_attribute *bin_attr,
1377 			       char *buf, loff_t off, size_t count)
1378 {
1379 	memcpy(buf, initial_boot_params + off, count);
1380 	return count;
1381 }
1382 
of_fdt_raw_init(void)1383 static int __init of_fdt_raw_init(void)
1384 {
1385 	static struct bin_attribute of_fdt_raw_attr =
1386 		__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1387 
1388 	if (!initial_boot_params)
1389 		return 0;
1390 
1391 	if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1392 				     fdt_totalsize(initial_boot_params))) {
1393 		pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1394 		return 0;
1395 	}
1396 	of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1397 	return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1398 }
1399 late_initcall(of_fdt_raw_init);
1400 #endif
1401 
1402 #endif /* CONFIG_OF_EARLY_FLATTREE */
1403