1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12 #undef DEBUG
13
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/init.h>
17 #include <linux/threads.h>
18 #include <linux/spinlock.h>
19 #include <linux/types.h>
20 #include <linux/pci.h>
21 #include <linux/delay.h>
22 #include <linux/initrd.h>
23 #include <linux/bitops.h>
24 #include <linux/export.h>
25 #include <linux/kexec.h>
26 #include <linux/irq.h>
27 #include <linux/memblock.h>
28 #include <linux/of.h>
29 #include <linux/of_fdt.h>
30 #include <linux/libfdt.h>
31 #include <linux/cpu.h>
32 #include <linux/pgtable.h>
33 #include <linux/seq_buf.h>
34
35 #include <asm/rtas.h>
36 #include <asm/page.h>
37 #include <asm/processor.h>
38 #include <asm/irq.h>
39 #include <asm/io.h>
40 #include <asm/kdump.h>
41 #include <asm/smp.h>
42 #include <asm/mmu.h>
43 #include <asm/paca.h>
44 #include <asm/powernv.h>
45 #include <asm/iommu.h>
46 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/setup.h>
49 #include <asm/pci-bridge.h>
50 #include <asm/kexec.h>
51 #include <asm/opal.h>
52 #include <asm/fadump.h>
53 #include <asm/epapr_hcalls.h>
54 #include <asm/firmware.h>
55 #include <asm/dt_cpu_ftrs.h>
56 #include <asm/drmem.h>
57 #include <asm/ultravisor.h>
58 #include <asm/prom.h>
59 #include <asm/plpks.h>
60
61 #include <mm/mmu_decl.h>
62
63 #ifdef DEBUG
64 #define DBG(fmt...) printk(KERN_ERR fmt)
65 #else
66 #define DBG(fmt...)
67 #endif
68
69 int *chip_id_lookup_table;
70
71 #ifdef CONFIG_PPC64
72 int __initdata iommu_is_off;
73 int __initdata iommu_force_on;
74 unsigned long tce_alloc_start, tce_alloc_end;
75 u64 ppc64_rma_size;
76 unsigned int boot_cpu_node_count __ro_after_init;
77 #endif
78 static phys_addr_t first_memblock_size;
79 static int __initdata boot_cpu_count;
80
early_parse_mem(char * p)81 static int __init early_parse_mem(char *p)
82 {
83 if (!p)
84 return 1;
85
86 memory_limit = PAGE_ALIGN(memparse(p, &p));
87 DBG("memory limit = 0x%llx\n", memory_limit);
88
89 return 0;
90 }
91 early_param("mem", early_parse_mem);
92
93 /*
94 * overlaps_initrd - check for overlap with page aligned extension of
95 * initrd.
96 */
overlaps_initrd(unsigned long start,unsigned long size)97 static inline int overlaps_initrd(unsigned long start, unsigned long size)
98 {
99 #ifdef CONFIG_BLK_DEV_INITRD
100 if (!initrd_start)
101 return 0;
102
103 return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
104 start <= ALIGN(initrd_end, PAGE_SIZE);
105 #else
106 return 0;
107 #endif
108 }
109
110 /**
111 * move_device_tree - move tree to an unused area, if needed.
112 *
113 * The device tree may be allocated beyond our memory limit, or inside the
114 * crash kernel region for kdump, or within the page aligned range of initrd.
115 * If so, move it out of the way.
116 */
move_device_tree(void)117 static void __init move_device_tree(void)
118 {
119 unsigned long start, size;
120 void *p;
121
122 DBG("-> move_device_tree\n");
123
124 start = __pa(initial_boot_params);
125 size = fdt_totalsize(initial_boot_params);
126
127 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
128 !memblock_is_memory(start + size - 1) ||
129 overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
130 p = memblock_alloc_raw(size, PAGE_SIZE);
131 if (!p)
132 panic("Failed to allocate %lu bytes to move device tree\n",
133 size);
134 memcpy(p, initial_boot_params, size);
135 initial_boot_params = p;
136 DBG("Moved device tree to 0x%px\n", p);
137 }
138
139 DBG("<- move_device_tree\n");
140 }
141
142 /*
143 * ibm,pa/pi-features is a per-cpu property that contains a string of
144 * attribute descriptors, each of which has a 2 byte header plus up
145 * to 254 bytes worth of processor attribute bits. First header
146 * byte specifies the number of bytes following the header.
147 * Second header byte is an "attribute-specifier" type, of which
148 * zero is the only currently-defined value.
149 * Implementation: Pass in the byte and bit offset for the feature
150 * that we are interested in. The function will return -1 if the
151 * pa-features property is missing, or a 1/0 to indicate if the feature
152 * is supported/not supported. Note that the bit numbers are
153 * big-endian to match the definition in PAPR.
154 */
155 struct ibm_feature {
156 unsigned long cpu_features; /* CPU_FTR_xxx bit */
157 unsigned long mmu_features; /* MMU_FTR_xxx bit */
158 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
159 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
160 unsigned char pabyte; /* byte number in ibm,pa/pi-features */
161 unsigned char pabit; /* bit number (big-endian) */
162 unsigned char invert; /* if 1, pa bit set => clear feature */
163 };
164
165 static struct ibm_feature ibm_pa_features[] __initdata = {
166 { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
167 { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
168 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
169 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
170 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
171 #ifdef CONFIG_PPC_RADIX_MMU
172 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
173 #endif
174 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
175 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
176 /*
177 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
178 * we don't want to turn on TM here, so we use the *_COMP versions
179 * which are 0 if the kernel doesn't support TM.
180 */
181 { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
182 .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
183
184 { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
185 { .pabyte = 68, .pabit = 5, .cpu_features = CPU_FTR_DEXCR_NPHIE },
186 };
187
188 /*
189 * ibm,pi-features property provides the support of processor specific
190 * options not described in ibm,pa-features. Right now use byte 0, bit 3
191 * which indicates the occurrence of DSI interrupt when the paste operation
192 * on the suspended NX window.
193 */
194 static struct ibm_feature ibm_pi_features[] __initdata = {
195 { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
196 };
197
scan_features(unsigned long node,const unsigned char * ftrs,unsigned long tablelen,struct ibm_feature * fp,unsigned long ft_size)198 static void __init scan_features(unsigned long node, const unsigned char *ftrs,
199 unsigned long tablelen,
200 struct ibm_feature *fp,
201 unsigned long ft_size)
202 {
203 unsigned long i, len, bit;
204
205 /* find descriptor with type == 0 */
206 for (;;) {
207 if (tablelen < 3)
208 return;
209 len = 2 + ftrs[0];
210 if (tablelen < len)
211 return; /* descriptor 0 not found */
212 if (ftrs[1] == 0)
213 break;
214 tablelen -= len;
215 ftrs += len;
216 }
217
218 /* loop over bits we know about */
219 for (i = 0; i < ft_size; ++i, ++fp) {
220 if (fp->pabyte >= ftrs[0])
221 continue;
222 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
223 if (bit ^ fp->invert) {
224 cur_cpu_spec->cpu_features |= fp->cpu_features;
225 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
226 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
227 cur_cpu_spec->mmu_features |= fp->mmu_features;
228 } else {
229 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
230 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
231 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
232 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
233 }
234 }
235 }
236
check_cpu_features(unsigned long node,char * name,struct ibm_feature * fp,unsigned long size)237 static void __init check_cpu_features(unsigned long node, char *name,
238 struct ibm_feature *fp,
239 unsigned long size)
240 {
241 const unsigned char *pa_ftrs;
242 int tablelen;
243
244 pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
245 if (pa_ftrs == NULL)
246 return;
247
248 scan_features(node, pa_ftrs, tablelen, fp, size);
249 }
250
251 #ifdef CONFIG_PPC_64S_HASH_MMU
init_mmu_slb_size(unsigned long node)252 static void __init init_mmu_slb_size(unsigned long node)
253 {
254 const __be32 *slb_size_ptr;
255
256 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
257 of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
258
259 if (slb_size_ptr)
260 mmu_slb_size = be32_to_cpup(slb_size_ptr);
261 }
262 #else
263 #define init_mmu_slb_size(node) do { } while(0)
264 #endif
265
266 static struct feature_property {
267 const char *name;
268 u32 min_value;
269 unsigned long cpu_feature;
270 unsigned long cpu_user_ftr;
271 } feature_properties[] __initdata = {
272 #ifdef CONFIG_ALTIVEC
273 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
274 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
275 #endif /* CONFIG_ALTIVEC */
276 #ifdef CONFIG_VSX
277 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
278 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
279 #endif /* CONFIG_VSX */
280 #ifdef CONFIG_PPC64
281 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
282 {"ibm,purr", 1, CPU_FTR_PURR, 0},
283 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
284 #endif /* CONFIG_PPC64 */
285 };
286
287 #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
identical_pvr_fixup(unsigned long node)288 static __init void identical_pvr_fixup(unsigned long node)
289 {
290 unsigned int pvr;
291 const char *model = of_get_flat_dt_prop(node, "model", NULL);
292
293 /*
294 * Since 440GR(x)/440EP(x) processors have the same pvr,
295 * we check the node path and set bit 28 in the cur_cpu_spec
296 * pvr for EP(x) processor version. This bit is always 0 in
297 * the "real" pvr. Then we call identify_cpu again with
298 * the new logical pvr to enable FPU support.
299 */
300 if (model && strstr(model, "440EP")) {
301 pvr = cur_cpu_spec->pvr_value | 0x8;
302 identify_cpu(0, pvr);
303 DBG("Using logical pvr %x for %s\n", pvr, model);
304 }
305 }
306 #else
307 #define identical_pvr_fixup(node) do { } while(0)
308 #endif
309
check_cpu_feature_properties(unsigned long node)310 static void __init check_cpu_feature_properties(unsigned long node)
311 {
312 int i;
313 struct feature_property *fp = feature_properties;
314 const __be32 *prop;
315
316 for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
317 prop = of_get_flat_dt_prop(node, fp->name, NULL);
318 if (prop && be32_to_cpup(prop) >= fp->min_value) {
319 cur_cpu_spec->cpu_features |= fp->cpu_feature;
320 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
321 }
322 }
323 }
324
early_init_dt_scan_cpus(unsigned long node,const char * uname,int depth,void * data)325 static int __init early_init_dt_scan_cpus(unsigned long node,
326 const char *uname, int depth,
327 void *data)
328 {
329 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
330 const __be32 *prop;
331 const __be32 *intserv;
332 int i, nthreads;
333 int len;
334 int found = -1;
335 int found_thread = 0;
336
337 /* We are scanning "cpu" nodes only */
338 if (type == NULL || strcmp(type, "cpu") != 0)
339 return 0;
340
341 if (IS_ENABLED(CONFIG_PPC64))
342 boot_cpu_node_count++;
343
344 /* Get physical cpuid */
345 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
346 if (!intserv)
347 intserv = of_get_flat_dt_prop(node, "reg", &len);
348
349 nthreads = len / sizeof(int);
350
351 /*
352 * Now see if any of these threads match our boot cpu.
353 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
354 */
355 for (i = 0; i < nthreads; i++) {
356 if (be32_to_cpu(intserv[i]) ==
357 fdt_boot_cpuid_phys(initial_boot_params)) {
358 found = boot_cpu_count;
359 found_thread = i;
360 }
361 #ifdef CONFIG_SMP
362 /* logical cpu id is always 0 on UP kernels */
363 boot_cpu_count++;
364 #endif
365 }
366
367 /* Not the boot CPU */
368 if (found < 0)
369 return 0;
370
371 DBG("boot cpu: logical %d physical %d\n", found,
372 be32_to_cpu(intserv[found_thread]));
373 boot_cpuid = found;
374
375 if (IS_ENABLED(CONFIG_PPC64))
376 boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
377
378 if (nr_cpu_ids % nthreads != 0) {
379 set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
380 pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
381 nr_cpu_ids);
382 }
383
384 if (boot_cpuid >= nr_cpu_ids) {
385 set_nr_cpu_ids(min(CONFIG_NR_CPUS, ALIGN(boot_cpuid + 1, nthreads)));
386 pr_warn("Boot CPU %d >= nr_cpu_ids, adjusted nr_cpu_ids to %d\n",
387 boot_cpuid, nr_cpu_ids);
388 }
389
390 /*
391 * PAPR defines "logical" PVR values for cpus that
392 * meet various levels of the architecture:
393 * 0x0f000001 Architecture version 2.04
394 * 0x0f000002 Architecture version 2.05
395 * If the cpu-version property in the cpu node contains
396 * such a value, we call identify_cpu again with the
397 * logical PVR value in order to use the cpu feature
398 * bits appropriate for the architecture level.
399 *
400 * A POWER6 partition in "POWER6 architected" mode
401 * uses the 0x0f000002 PVR value; in POWER5+ mode
402 * it uses 0x0f000001.
403 *
404 * If we're using device tree CPU feature discovery then we don't
405 * support the cpu-version property, and it's the responsibility of the
406 * firmware/hypervisor to provide the correct feature set for the
407 * architecture level via the ibm,powerpc-cpu-features binding.
408 */
409 if (!dt_cpu_ftrs_in_use()) {
410 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
411 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
412 identify_cpu(0, be32_to_cpup(prop));
413 seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
414 }
415
416 check_cpu_feature_properties(node);
417 check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
418 ARRAY_SIZE(ibm_pa_features));
419 check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
420 ARRAY_SIZE(ibm_pi_features));
421 }
422
423 identical_pvr_fixup(node);
424 init_mmu_slb_size(node);
425
426 #ifdef CONFIG_PPC64
427 if (nthreads == 1)
428 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
429 else if (!dt_cpu_ftrs_in_use())
430 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
431 #endif
432
433 return 0;
434 }
435
early_init_dt_scan_chosen_ppc(unsigned long node,const char * uname,int depth,void * data)436 static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
437 const char *uname,
438 int depth, void *data)
439 {
440 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
441
442 /* Use common scan routine to determine if this is the chosen node */
443 if (early_init_dt_scan_chosen(data) < 0)
444 return 0;
445
446 #ifdef CONFIG_PPC64
447 /* check if iommu is forced on or off */
448 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
449 iommu_is_off = 1;
450 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
451 iommu_force_on = 1;
452 #endif
453
454 /* mem=x on the command line is the preferred mechanism */
455 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
456 if (lprop)
457 memory_limit = *lprop;
458
459 #ifdef CONFIG_PPC64
460 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
461 if (lprop)
462 tce_alloc_start = *lprop;
463 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
464 if (lprop)
465 tce_alloc_end = *lprop;
466 #endif
467
468 #ifdef CONFIG_KEXEC_CORE
469 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
470 if (lprop)
471 crashk_res.start = *lprop;
472
473 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
474 if (lprop)
475 crashk_res.end = crashk_res.start + *lprop - 1;
476 #endif
477
478 /* break now */
479 return 1;
480 }
481
482 /*
483 * Compare the range against max mem limit and update
484 * size if it cross the limit.
485 */
486
487 #ifdef CONFIG_SPARSEMEM
validate_mem_limit(u64 base,u64 * size)488 static bool __init validate_mem_limit(u64 base, u64 *size)
489 {
490 u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
491
492 if (base >= max_mem)
493 return false;
494 if ((base + *size) > max_mem)
495 *size = max_mem - base;
496 return true;
497 }
498 #else
validate_mem_limit(u64 base,u64 * size)499 static bool __init validate_mem_limit(u64 base, u64 *size)
500 {
501 return true;
502 }
503 #endif
504
505 #ifdef CONFIG_PPC_PSERIES
506 /*
507 * Interpret the ibm dynamic reconfiguration memory LMBs.
508 * This contains a list of memory blocks along with NUMA affinity
509 * information.
510 */
early_init_drmem_lmb(struct drmem_lmb * lmb,const __be32 ** usm,void * data)511 static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
512 const __be32 **usm,
513 void *data)
514 {
515 u64 base, size;
516 int is_kexec_kdump = 0, rngs;
517
518 base = lmb->base_addr;
519 size = drmem_lmb_size();
520 rngs = 1;
521
522 /*
523 * Skip this block if the reserved bit is set in flags
524 * or if the block is not assigned to this partition.
525 */
526 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
527 !(lmb->flags & DRCONF_MEM_ASSIGNED))
528 return 0;
529
530 if (*usm)
531 is_kexec_kdump = 1;
532
533 if (is_kexec_kdump) {
534 /*
535 * For each memblock in ibm,dynamic-memory, a
536 * corresponding entry in linux,drconf-usable-memory
537 * property contains a counter 'p' followed by 'p'
538 * (base, size) duple. Now read the counter from
539 * linux,drconf-usable-memory property
540 */
541 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
542 if (!rngs) /* there are no (base, size) duple */
543 return 0;
544 }
545
546 do {
547 if (is_kexec_kdump) {
548 base = dt_mem_next_cell(dt_root_addr_cells, usm);
549 size = dt_mem_next_cell(dt_root_size_cells, usm);
550 }
551
552 if (iommu_is_off) {
553 if (base >= 0x80000000ul)
554 continue;
555 if ((base + size) > 0x80000000ul)
556 size = 0x80000000ul - base;
557 }
558
559 if (!validate_mem_limit(base, &size))
560 continue;
561
562 DBG("Adding: %llx -> %llx\n", base, size);
563 memblock_add(base, size);
564
565 if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
566 memblock_mark_hotplug(base, size);
567 } while (--rngs);
568
569 return 0;
570 }
571 #endif /* CONFIG_PPC_PSERIES */
572
early_init_dt_scan_memory_ppc(void)573 static int __init early_init_dt_scan_memory_ppc(void)
574 {
575 #ifdef CONFIG_PPC_PSERIES
576 const void *fdt = initial_boot_params;
577 int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
578
579 if (node > 0)
580 walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
581
582 #endif
583
584 return early_init_dt_scan_memory();
585 }
586
587 /*
588 * For a relocatable kernel, we need to get the memstart_addr first,
589 * then use it to calculate the virtual kernel start address. This has
590 * to happen at a very early stage (before machine_init). In this case,
591 * we just want to get the memstart_address and would not like to mess the
592 * memblock at this stage. So introduce a variable to skip the memblock_add()
593 * for this reason.
594 */
595 #ifdef CONFIG_RELOCATABLE
596 static int add_mem_to_memblock = 1;
597 #else
598 #define add_mem_to_memblock 1
599 #endif
600
early_init_dt_add_memory_arch(u64 base,u64 size)601 void __init early_init_dt_add_memory_arch(u64 base, u64 size)
602 {
603 #ifdef CONFIG_PPC64
604 if (iommu_is_off) {
605 if (base >= 0x80000000ul)
606 return;
607 if ((base + size) > 0x80000000ul)
608 size = 0x80000000ul - base;
609 }
610 #endif
611 /* Keep track of the beginning of memory -and- the size of
612 * the very first block in the device-tree as it represents
613 * the RMA on ppc64 server
614 */
615 if (base < memstart_addr) {
616 memstart_addr = base;
617 first_memblock_size = size;
618 }
619
620 /* Add the chunk to the MEMBLOCK list */
621 if (add_mem_to_memblock) {
622 if (validate_mem_limit(base, &size))
623 memblock_add(base, size);
624 }
625 }
626
early_reserve_mem_dt(void)627 static void __init early_reserve_mem_dt(void)
628 {
629 unsigned long i, dt_root;
630 int len;
631 const __be32 *prop;
632
633 early_init_fdt_reserve_self();
634 early_init_fdt_scan_reserved_mem();
635
636 dt_root = of_get_flat_dt_root();
637
638 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
639
640 if (!prop)
641 return;
642
643 DBG("Found new-style reserved-ranges\n");
644
645 /* Each reserved range is an (address,size) pair, 2 cells each,
646 * totalling 4 cells per range. */
647 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
648 u64 base, size;
649
650 base = of_read_number(prop + (i * 4) + 0, 2);
651 size = of_read_number(prop + (i * 4) + 2, 2);
652
653 if (size) {
654 DBG("reserving: %llx -> %llx\n", base, size);
655 memblock_reserve(base, size);
656 }
657 }
658 }
659
early_reserve_mem(void)660 static void __init early_reserve_mem(void)
661 {
662 __be64 *reserve_map;
663
664 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
665 fdt_off_mem_rsvmap(initial_boot_params));
666
667 /* Look for the new "reserved-regions" property in the DT */
668 early_reserve_mem_dt();
669
670 #ifdef CONFIG_BLK_DEV_INITRD
671 /* Then reserve the initrd, if any */
672 if (initrd_start && (initrd_end > initrd_start)) {
673 memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
674 ALIGN(initrd_end, PAGE_SIZE) -
675 ALIGN_DOWN(initrd_start, PAGE_SIZE));
676 }
677 #endif /* CONFIG_BLK_DEV_INITRD */
678
679 if (!IS_ENABLED(CONFIG_PPC32))
680 return;
681
682 /*
683 * Handle the case where we might be booting from an old kexec
684 * image that setup the mem_rsvmap as pairs of 32-bit values
685 */
686 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
687 u32 base_32, size_32;
688 __be32 *reserve_map_32 = (__be32 *)reserve_map;
689
690 DBG("Found old 32-bit reserve map\n");
691
692 while (1) {
693 base_32 = be32_to_cpup(reserve_map_32++);
694 size_32 = be32_to_cpup(reserve_map_32++);
695 if (size_32 == 0)
696 break;
697 DBG("reserving: %x -> %x\n", base_32, size_32);
698 memblock_reserve(base_32, size_32);
699 }
700 return;
701 }
702 }
703
704 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
705 static bool tm_disabled __initdata;
706
parse_ppc_tm(char * str)707 static int __init parse_ppc_tm(char *str)
708 {
709 bool res;
710
711 if (kstrtobool(str, &res))
712 return -EINVAL;
713
714 tm_disabled = !res;
715
716 return 0;
717 }
718 early_param("ppc_tm", parse_ppc_tm);
719
tm_init(void)720 static void __init tm_init(void)
721 {
722 if (tm_disabled) {
723 pr_info("Disabling hardware transactional memory (HTM)\n");
724 cur_cpu_spec->cpu_user_features2 &=
725 ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
726 cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
727 return;
728 }
729
730 pnv_tm_init();
731 }
732 #else
tm_init(void)733 static void tm_init(void) { }
734 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
735
736 static int __init
early_init_dt_scan_model(unsigned long node,const char * uname,int depth,void * data)737 early_init_dt_scan_model(unsigned long node, const char *uname,
738 int depth, void *data)
739 {
740 const char *prop;
741
742 if (depth != 0)
743 return 0;
744
745 prop = of_get_flat_dt_prop(node, "model", NULL);
746 if (prop)
747 seq_buf_printf(&ppc_hw_desc, "%s ", prop);
748
749 /* break now */
750 return 1;
751 }
752
753 #ifdef CONFIG_PPC64
save_fscr_to_task(void)754 static void __init save_fscr_to_task(void)
755 {
756 /*
757 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
758 * have configured via the device tree features or via __init_FSCR().
759 * That value will then be propagated to pid 1 (init) and all future
760 * processes.
761 */
762 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
763 init_task.thread.fscr = mfspr(SPRN_FSCR);
764 }
765 #else
save_fscr_to_task(void)766 static inline void save_fscr_to_task(void) {}
767 #endif
768
769
early_init_devtree(void * params)770 void __init early_init_devtree(void *params)
771 {
772 phys_addr_t limit;
773
774 DBG(" -> early_init_devtree(%px)\n", params);
775
776 /* Too early to BUG_ON(), do it by hand */
777 if (!early_init_dt_verify(params))
778 panic("BUG: Failed verifying flat device tree, bad version?");
779
780 of_scan_flat_dt(early_init_dt_scan_model, NULL);
781
782 #ifdef CONFIG_PPC_RTAS
783 /* Some machines might need RTAS info for debugging, grab it now. */
784 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
785 #endif
786
787 #ifdef CONFIG_PPC_POWERNV
788 /* Some machines might need OPAL info for debugging, grab it now. */
789 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
790
791 /* Scan tree for ultravisor feature */
792 of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
793 #endif
794
795 #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
796 /* scan tree to see if dump is active during last boot */
797 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
798 #endif
799
800 /* Retrieve various informations from the /chosen node of the
801 * device-tree, including the platform type, initrd location and
802 * size, TCE reserve, and more ...
803 */
804 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
805
806 /* Scan memory nodes and rebuild MEMBLOCKs */
807 early_init_dt_scan_root();
808 early_init_dt_scan_memory_ppc();
809
810 /*
811 * As generic code authors expect to be able to use static keys
812 * in early_param() handlers, we initialize the static keys just
813 * before parsing early params (it's fine to call jump_label_init()
814 * more than once).
815 */
816 jump_label_init();
817 parse_early_param();
818
819 /* make sure we've parsed cmdline for mem= before this */
820 if (memory_limit)
821 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
822 setup_initial_memory_limit(memstart_addr, first_memblock_size);
823 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
824 memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
825 /* If relocatable, reserve first 32k for interrupt vectors etc. */
826 if (PHYSICAL_START > MEMORY_START)
827 memblock_reserve(MEMORY_START, 0x8000);
828 reserve_kdump_trampoline();
829 #if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
830 /*
831 * If we fail to reserve memory for firmware-assisted dump then
832 * fallback to kexec based kdump.
833 */
834 if (fadump_reserve_mem() == 0)
835 #endif
836 reserve_crashkernel();
837 early_reserve_mem();
838
839 /* Ensure that total memory size is page-aligned. */
840 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
841 memblock_enforce_memory_limit(limit);
842
843 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
844 if (!early_radix_enabled())
845 memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
846 #endif
847
848 memblock_allow_resize();
849 memblock_dump_all();
850
851 DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
852
853 /* We may need to relocate the flat tree, do it now.
854 * FIXME .. and the initrd too? */
855 move_device_tree();
856
857 DBG("Scanning CPUs ...\n");
858
859 dt_cpu_ftrs_scan();
860
861 // We can now add the CPU name & PVR to the hardware description
862 seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
863
864 /* Retrieve CPU related informations from the flat tree
865 * (altivec support, boot CPU ID, ...)
866 */
867 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
868 if (boot_cpuid < 0) {
869 printk("Failed to identify boot CPU !\n");
870 BUG();
871 }
872
873 save_fscr_to_task();
874
875 #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
876 /* We'll later wait for secondaries to check in; there are
877 * NCPUS-1 non-boot CPUs :-)
878 */
879 spinning_secondaries = boot_cpu_count - 1;
880 #endif
881
882 mmu_early_init_devtree();
883
884 #ifdef CONFIG_PPC_POWERNV
885 /* Scan and build the list of machine check recoverable ranges */
886 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
887 #endif
888 epapr_paravirt_early_init();
889
890 /* Now try to figure out if we are running on LPAR and so on */
891 pseries_probe_fw_features();
892
893 /*
894 * Initialize pkey features and default AMR/IAMR values
895 */
896 pkey_early_init_devtree();
897
898 #ifdef CONFIG_PPC_PS3
899 /* Identify PS3 firmware */
900 if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
901 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
902 #endif
903
904 /* If kexec left a PLPKS password in the DT, get it and clear it */
905 plpks_early_init_devtree();
906
907 tm_init();
908
909 DBG(" <- early_init_devtree()\n");
910 }
911
912 #ifdef CONFIG_RELOCATABLE
913 /*
914 * This function run before early_init_devtree, so we have to init
915 * initial_boot_params.
916 */
early_get_first_memblock_info(void * params,phys_addr_t * size)917 void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
918 {
919 /* Setup flat device-tree pointer */
920 initial_boot_params = params;
921
922 /*
923 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
924 * mess the memblock.
925 */
926 add_mem_to_memblock = 0;
927 early_init_dt_scan_root();
928 early_init_dt_scan_memory_ppc();
929 add_mem_to_memblock = 1;
930
931 if (size)
932 *size = first_memblock_size;
933 }
934 #endif
935
936 /*******
937 *
938 * New implementation of the OF "find" APIs, return a refcounted
939 * object, call of_node_put() when done. The device tree and list
940 * are protected by a rw_lock.
941 *
942 * Note that property management will need some locking as well,
943 * this isn't dealt with yet.
944 *
945 *******/
946
947 /**
948 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
949 * @np: device node of the device
950 *
951 * This looks for a property "ibm,chip-id" in the node or any
952 * of its parents and returns its content, or -1 if it cannot
953 * be found.
954 */
of_get_ibm_chip_id(struct device_node * np)955 int of_get_ibm_chip_id(struct device_node *np)
956 {
957 of_node_get(np);
958 while (np) {
959 u32 chip_id;
960
961 /*
962 * Skiboot may produce memory nodes that contain more than one
963 * cell in chip-id, we only read the first one here.
964 */
965 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
966 of_node_put(np);
967 return chip_id;
968 }
969
970 np = of_get_next_parent(np);
971 }
972 return -1;
973 }
974 EXPORT_SYMBOL(of_get_ibm_chip_id);
975
976 /**
977 * cpu_to_chip_id - Return the cpus chip-id
978 * @cpu: The logical cpu number.
979 *
980 * Return the value of the ibm,chip-id property corresponding to the given
981 * logical cpu number. If the chip-id can not be found, returns -1.
982 */
cpu_to_chip_id(int cpu)983 int cpu_to_chip_id(int cpu)
984 {
985 struct device_node *np;
986 int ret = -1, idx;
987
988 idx = cpu / threads_per_core;
989 if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
990 return chip_id_lookup_table[idx];
991
992 np = of_get_cpu_node(cpu, NULL);
993 if (np) {
994 ret = of_get_ibm_chip_id(np);
995 of_node_put(np);
996
997 if (chip_id_lookup_table)
998 chip_id_lookup_table[idx] = ret;
999 }
1000
1001 return ret;
1002 }
1003 EXPORT_SYMBOL(cpu_to_chip_id);
1004
arch_match_cpu_phys_id(int cpu,u64 phys_id)1005 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
1006 {
1007 #ifdef CONFIG_SMP
1008 /*
1009 * Early firmware scanning must use this rather than
1010 * get_hard_smp_processor_id because we don't have pacas allocated
1011 * until memory topology is discovered.
1012 */
1013 if (cpu_to_phys_id != NULL)
1014 return (int)phys_id == cpu_to_phys_id[cpu];
1015 #endif
1016
1017 return (int)phys_id == get_hard_smp_processor_id(cpu);
1018 }
1019