xref: /openbmc/u-boot/arch/arm/mach-imx/imx8/cpu.c (revision f77d4410)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <cpu.h>
9 #include <dm.h>
10 #include <dm/device-internal.h>
11 #include <dm/lists.h>
12 #include <dm/uclass.h>
13 #include <errno.h>
14 #include <asm/arch/sci/sci.h>
15 #include <asm/arch/sys_proto.h>
16 #include <asm/arch-imx/cpu.h>
17 #include <asm/armv8/cpu.h>
18 #include <asm/armv8/mmu.h>
19 #include <asm/mach-imx/boot_mode.h>
20 
21 DECLARE_GLOBAL_DATA_PTR;
22 
23 #define BT_PASSOVER_TAG	0x504F
24 struct pass_over_info_t *get_pass_over_info(void)
25 {
26 	struct pass_over_info_t *p =
27 		(struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
28 
29 	if (p->barker != BT_PASSOVER_TAG ||
30 	    p->len != sizeof(struct pass_over_info_t))
31 		return NULL;
32 
33 	return p;
34 }
35 
36 int arch_cpu_init(void)
37 {
38 	struct pass_over_info_t *pass_over = get_pass_over_info();
39 
40 	if (pass_over && pass_over->g_ap_mu == 0) {
41 		/*
42 		 * When ap_mu is 0, means the U-Boot booted
43 		 * from first container
44 		 */
45 		sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
46 	}
47 
48 	return 0;
49 }
50 
51 int arch_cpu_init_dm(void)
52 {
53 	struct udevice *devp;
54 	int node, ret;
55 
56 	node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
57 	ret = device_bind_driver_to_node(gd->dm_root, "imx8_scu", "imx8_scu",
58 					 offset_to_ofnode(node), &devp);
59 
60 	if (ret) {
61 		printf("could not find scu %d\n", ret);
62 		return ret;
63 	}
64 
65 	ret = device_probe(devp);
66 	if (ret) {
67 		printf("scu probe failed %d\n", ret);
68 		return ret;
69 	}
70 
71 	return 0;
72 }
73 
74 int print_bootinfo(void)
75 {
76 	enum boot_device bt_dev = get_boot_device();
77 
78 	puts("Boot:  ");
79 	switch (bt_dev) {
80 	case SD1_BOOT:
81 		puts("SD0\n");
82 		break;
83 	case SD2_BOOT:
84 		puts("SD1\n");
85 		break;
86 	case SD3_BOOT:
87 		puts("SD2\n");
88 		break;
89 	case MMC1_BOOT:
90 		puts("MMC0\n");
91 		break;
92 	case MMC2_BOOT:
93 		puts("MMC1\n");
94 		break;
95 	case MMC3_BOOT:
96 		puts("MMC2\n");
97 		break;
98 	case FLEXSPI_BOOT:
99 		puts("FLEXSPI\n");
100 		break;
101 	case SATA_BOOT:
102 		puts("SATA\n");
103 		break;
104 	case NAND_BOOT:
105 		puts("NAND\n");
106 		break;
107 	case USB_BOOT:
108 		puts("USB\n");
109 		break;
110 	default:
111 		printf("Unknown device %u\n", bt_dev);
112 		break;
113 	}
114 
115 	return 0;
116 }
117 
118 enum boot_device get_boot_device(void)
119 {
120 	enum boot_device boot_dev = SD1_BOOT;
121 
122 	sc_rsrc_t dev_rsrc;
123 
124 	sc_misc_get_boot_dev(-1, &dev_rsrc);
125 
126 	switch (dev_rsrc) {
127 	case SC_R_SDHC_0:
128 		boot_dev = MMC1_BOOT;
129 		break;
130 	case SC_R_SDHC_1:
131 		boot_dev = SD2_BOOT;
132 		break;
133 	case SC_R_SDHC_2:
134 		boot_dev = SD3_BOOT;
135 		break;
136 	case SC_R_NAND:
137 		boot_dev = NAND_BOOT;
138 		break;
139 	case SC_R_FSPI_0:
140 		boot_dev = FLEXSPI_BOOT;
141 		break;
142 	case SC_R_SATA_0:
143 		boot_dev = SATA_BOOT;
144 		break;
145 	case SC_R_USB_0:
146 	case SC_R_USB_1:
147 	case SC_R_USB_2:
148 		boot_dev = USB_BOOT;
149 		break;
150 	default:
151 		break;
152 	}
153 
154 	return boot_dev;
155 }
156 
157 #ifdef CONFIG_ENV_IS_IN_MMC
158 __weak int board_mmc_get_env_dev(int devno)
159 {
160 	return CONFIG_SYS_MMC_ENV_DEV;
161 }
162 
163 int mmc_get_env_dev(void)
164 {
165 	sc_rsrc_t dev_rsrc;
166 	int devno;
167 
168 	sc_misc_get_boot_dev(-1, &dev_rsrc);
169 
170 	switch (dev_rsrc) {
171 	case SC_R_SDHC_0:
172 		devno = 0;
173 		break;
174 	case SC_R_SDHC_1:
175 		devno = 1;
176 		break;
177 	case SC_R_SDHC_2:
178 		devno = 2;
179 		break;
180 	default:
181 		/* If not boot from sd/mmc, use default value */
182 		return CONFIG_SYS_MMC_ENV_DEV;
183 	}
184 
185 	return board_mmc_get_env_dev(devno);
186 }
187 #endif
188 
189 #define MEMSTART_ALIGNMENT  SZ_2M /* Align the memory start with 2MB */
190 
191 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
192 			    sc_faddr_t *addr_end)
193 {
194 	sc_faddr_t start, end;
195 	int ret;
196 	bool owned;
197 
198 	owned = sc_rm_is_memreg_owned(-1, mr);
199 	if (owned) {
200 		ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
201 		if (ret) {
202 			printf("Memreg get info failed, %d\n", ret);
203 			return -EINVAL;
204 		}
205 		debug("0x%llx -- 0x%llx\n", start, end);
206 		*addr_start = start;
207 		*addr_end = end;
208 
209 		return 0;
210 	}
211 
212 	return -EINVAL;
213 }
214 
215 phys_size_t get_effective_memsize(void)
216 {
217 	sc_rm_mr_t mr;
218 	sc_faddr_t start, end, end1;
219 	int err;
220 
221 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
222 
223 	for (mr = 0; mr < 64; mr++) {
224 		err = get_owned_memreg(mr, &start, &end);
225 		if (!err) {
226 			start = roundup(start, MEMSTART_ALIGNMENT);
227 			/* Too small memory region, not use it */
228 			if (start > end)
229 				continue;
230 
231 			/* Find the memory region runs the U-Boot */
232 			if (start >= PHYS_SDRAM_1 && start <= end1 &&
233 			    (start <= CONFIG_SYS_TEXT_BASE &&
234 			    end >= CONFIG_SYS_TEXT_BASE)) {
235 				if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
236 				    PHYS_SDRAM_1_SIZE))
237 					return (end - PHYS_SDRAM_1 + 1);
238 				else
239 					return PHYS_SDRAM_1_SIZE;
240 			}
241 		}
242 	}
243 
244 	return PHYS_SDRAM_1_SIZE;
245 }
246 
247 int dram_init(void)
248 {
249 	sc_rm_mr_t mr;
250 	sc_faddr_t start, end, end1, end2;
251 	int err;
252 
253 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
254 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
255 	for (mr = 0; mr < 64; mr++) {
256 		err = get_owned_memreg(mr, &start, &end);
257 		if (!err) {
258 			start = roundup(start, MEMSTART_ALIGNMENT);
259 			/* Too small memory region, not use it */
260 			if (start > end)
261 				continue;
262 
263 			if (start >= PHYS_SDRAM_1 && start <= end1) {
264 				if ((end + 1) <= end1)
265 					gd->ram_size += end - start + 1;
266 				else
267 					gd->ram_size += end1 - start;
268 			} else if (start >= PHYS_SDRAM_2 && start <= end2) {
269 				if ((end + 1) <= end2)
270 					gd->ram_size += end - start + 1;
271 				else
272 					gd->ram_size += end2 - start;
273 			}
274 		}
275 	}
276 
277 	/* If error, set to the default value */
278 	if (!gd->ram_size) {
279 		gd->ram_size = PHYS_SDRAM_1_SIZE;
280 		gd->ram_size += PHYS_SDRAM_2_SIZE;
281 	}
282 	return 0;
283 }
284 
285 static void dram_bank_sort(int current_bank)
286 {
287 	phys_addr_t start;
288 	phys_size_t size;
289 
290 	while (current_bank > 0) {
291 		if (gd->bd->bi_dram[current_bank - 1].start >
292 		    gd->bd->bi_dram[current_bank].start) {
293 			start = gd->bd->bi_dram[current_bank - 1].start;
294 			size = gd->bd->bi_dram[current_bank - 1].size;
295 
296 			gd->bd->bi_dram[current_bank - 1].start =
297 				gd->bd->bi_dram[current_bank].start;
298 			gd->bd->bi_dram[current_bank - 1].size =
299 				gd->bd->bi_dram[current_bank].size;
300 
301 			gd->bd->bi_dram[current_bank].start = start;
302 			gd->bd->bi_dram[current_bank].size = size;
303 		}
304 		current_bank--;
305 	}
306 }
307 
308 int dram_init_banksize(void)
309 {
310 	sc_rm_mr_t mr;
311 	sc_faddr_t start, end, end1, end2;
312 	int i = 0;
313 	int err;
314 
315 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
316 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
317 
318 	for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
319 		err = get_owned_memreg(mr, &start, &end);
320 		if (!err) {
321 			start = roundup(start, MEMSTART_ALIGNMENT);
322 			if (start > end) /* Small memory region, no use it */
323 				continue;
324 
325 			if (start >= PHYS_SDRAM_1 && start <= end1) {
326 				gd->bd->bi_dram[i].start = start;
327 
328 				if ((end + 1) <= end1)
329 					gd->bd->bi_dram[i].size =
330 						end - start + 1;
331 				else
332 					gd->bd->bi_dram[i].size = end1 - start;
333 
334 				dram_bank_sort(i);
335 				i++;
336 			} else if (start >= PHYS_SDRAM_2 && start <= end2) {
337 				gd->bd->bi_dram[i].start = start;
338 
339 				if ((end + 1) <= end2)
340 					gd->bd->bi_dram[i].size =
341 						end - start + 1;
342 				else
343 					gd->bd->bi_dram[i].size = end2 - start;
344 
345 				dram_bank_sort(i);
346 				i++;
347 			}
348 		}
349 	}
350 
351 	/* If error, set to the default value */
352 	if (!i) {
353 		gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
354 		gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
355 		gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
356 		gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
357 	}
358 
359 	return 0;
360 }
361 
362 static u64 get_block_attrs(sc_faddr_t addr_start)
363 {
364 	u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
365 		PTE_BLOCK_PXN | PTE_BLOCK_UXN;
366 
367 	if ((addr_start >= PHYS_SDRAM_1 &&
368 	     addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
369 	    (addr_start >= PHYS_SDRAM_2 &&
370 	     addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
371 		return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
372 
373 	return attr;
374 }
375 
376 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
377 {
378 	sc_faddr_t end1, end2;
379 
380 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
381 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
382 
383 	if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
384 		if ((addr_end + 1) > end1)
385 			return end1 - addr_start;
386 	} else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
387 		if ((addr_end + 1) > end2)
388 			return end2 - addr_start;
389 	}
390 
391 	return (addr_end - addr_start + 1);
392 }
393 
394 #define MAX_PTE_ENTRIES 512
395 #define MAX_MEM_MAP_REGIONS 16
396 
397 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
398 struct mm_region *mem_map = imx8_mem_map;
399 
400 void enable_caches(void)
401 {
402 	sc_rm_mr_t mr;
403 	sc_faddr_t start, end;
404 	int err, i;
405 
406 	/* Create map for registers access from 0x1c000000 to 0x80000000*/
407 	imx8_mem_map[0].virt = 0x1c000000UL;
408 	imx8_mem_map[0].phys = 0x1c000000UL;
409 	imx8_mem_map[0].size = 0x64000000UL;
410 	imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
411 			 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
412 
413 	i = 1;
414 	for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
415 		err = get_owned_memreg(mr, &start, &end);
416 		if (!err) {
417 			imx8_mem_map[i].virt = start;
418 			imx8_mem_map[i].phys = start;
419 			imx8_mem_map[i].size = get_block_size(start, end);
420 			imx8_mem_map[i].attrs = get_block_attrs(start);
421 			i++;
422 		}
423 	}
424 
425 	if (i < MAX_MEM_MAP_REGIONS) {
426 		imx8_mem_map[i].size = 0;
427 		imx8_mem_map[i].attrs = 0;
428 	} else {
429 		puts("Error, need more MEM MAP REGIONS reserved\n");
430 		icache_enable();
431 		return;
432 	}
433 
434 	for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
435 		debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
436 		      i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
437 		      imx8_mem_map[i].size, imx8_mem_map[i].attrs);
438 	}
439 
440 	icache_enable();
441 	dcache_enable();
442 }
443 
444 #ifndef CONFIG_SYS_DCACHE_OFF
445 u64 get_page_table_size(void)
446 {
447 	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
448 	u64 size = 0;
449 
450 	/*
451 	 * For each memory region, the max table size:
452 	 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
453 	 */
454 	size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
455 
456 	/*
457 	 * We need to duplicate our page table once to have an emergency pt to
458 	 * resort to when splitting page tables later on
459 	 */
460 	size *= 2;
461 
462 	/*
463 	 * We may need to split page tables later on if dcache settings change,
464 	 * so reserve up to 4 (random pick) page tables for that.
465 	 */
466 	size += one_pt * 4;
467 
468 	return size;
469 }
470 #endif
471 
472 #define FUSE_MAC0_WORD0 708
473 #define FUSE_MAC0_WORD1 709
474 #define FUSE_MAC1_WORD0 710
475 #define FUSE_MAC1_WORD1 711
476 
477 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
478 {
479 	u32 word[2], val[2] = {};
480 	int i, ret;
481 
482 	if (dev_id == 0) {
483 		word[0] = FUSE_MAC0_WORD0;
484 		word[1] = FUSE_MAC0_WORD1;
485 	} else {
486 		word[0] = FUSE_MAC1_WORD0;
487 		word[1] = FUSE_MAC1_WORD1;
488 	}
489 
490 	for (i = 0; i < 2; i++) {
491 		ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
492 		if (ret < 0)
493 			goto err;
494 	}
495 
496 	mac[0] = val[0];
497 	mac[1] = val[0] >> 8;
498 	mac[2] = val[0] >> 16;
499 	mac[3] = val[0] >> 24;
500 	mac[4] = val[1];
501 	mac[5] = val[1] >> 8;
502 
503 	debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
504 	      __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
505 	return;
506 err:
507 	printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
508 }
509 
510 #if CONFIG_IS_ENABLED(CPU)
511 struct cpu_imx_platdata {
512 	const char *name;
513 	const char *rev;
514 	const char *type;
515 	u32 cpurev;
516 	u32 freq_mhz;
517 };
518 
519 u32 get_cpu_rev(void)
520 {
521 	u32 id = 0, rev = 0;
522 	int ret;
523 
524 	ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
525 	if (ret)
526 		return 0;
527 
528 	rev = (id >> 5)  & 0xf;
529 	id = (id & 0x1f) + MXC_SOC_IMX8;  /* Dummy ID for chip */
530 
531 	return (id << 12) | rev;
532 }
533 
534 const char *get_imx8_type(u32 imxtype)
535 {
536 	switch (imxtype) {
537 	case MXC_CPU_IMX8QXP:
538 	case MXC_CPU_IMX8QXP_A0:
539 		return "QXP";
540 	default:
541 		return "??";
542 	}
543 }
544 
545 const char *get_imx8_rev(u32 rev)
546 {
547 	switch (rev) {
548 	case CHIP_REV_A:
549 		return "A";
550 	case CHIP_REV_B:
551 		return "B";
552 	default:
553 		return "?";
554 	}
555 }
556 
557 const char *get_core_name(void)
558 {
559 	if (is_cortex_a35())
560 		return "A35";
561 	else if (is_cortex_a53())
562 		return "A53";
563 	else if (is_cortex_a72())
564 		return "A72";
565 	else
566 		return "?";
567 }
568 
569 int cpu_imx_get_desc(struct udevice *dev, char *buf, int size)
570 {
571 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
572 
573 	if (size < 100)
574 		return -ENOSPC;
575 
576 	snprintf(buf, size, "CPU:   Freescale i.MX8%s Rev%s %s at %u MHz\n",
577 		 plat->type, plat->rev, plat->name, plat->freq_mhz);
578 
579 	return 0;
580 }
581 
582 static int cpu_imx_get_info(struct udevice *dev, struct cpu_info *info)
583 {
584 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
585 
586 	info->cpu_freq = plat->freq_mhz * 1000;
587 	info->features = BIT(CPU_FEAT_L1_CACHE) | BIT(CPU_FEAT_MMU);
588 	return 0;
589 }
590 
591 static int cpu_imx_get_count(struct udevice *dev)
592 {
593 	return 4;
594 }
595 
596 static int cpu_imx_get_vendor(struct udevice *dev,  char *buf, int size)
597 {
598 	snprintf(buf, size, "NXP");
599 	return 0;
600 }
601 
602 static const struct cpu_ops cpu_imx8_ops = {
603 	.get_desc	= cpu_imx_get_desc,
604 	.get_info	= cpu_imx_get_info,
605 	.get_count	= cpu_imx_get_count,
606 	.get_vendor	= cpu_imx_get_vendor,
607 };
608 
609 static const struct udevice_id cpu_imx8_ids[] = {
610 	{ .compatible = "arm,cortex-a35" },
611 	{ }
612 };
613 
614 static int imx8_cpu_probe(struct udevice *dev)
615 {
616 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
617 	struct clk cpu_clk;
618 	u32 cpurev;
619 	int ret;
620 
621 	cpurev = get_cpu_rev();
622 	plat->cpurev = cpurev;
623 	plat->name = get_core_name();
624 	plat->rev = get_imx8_rev(cpurev & 0xFFF);
625 	plat->type = get_imx8_type((cpurev & 0xFF000) >> 12);
626 
627 	ret = clk_get_by_index(dev, 0, &cpu_clk);
628 	if (ret) {
629 		debug("%s: Failed to get CPU clk: %d\n", __func__, ret);
630 		return 0;
631 	}
632 
633 	plat->freq_mhz = clk_get_rate(&cpu_clk) / 1000000;
634 	return 0;
635 }
636 
637 U_BOOT_DRIVER(cpu_imx8_drv) = {
638 	.name		= "imx8x_cpu",
639 	.id		= UCLASS_CPU,
640 	.of_match	= cpu_imx8_ids,
641 	.ops		= &cpu_imx8_ops,
642 	.probe		= imx8_cpu_probe,
643 	.platdata_auto_alloc_size = sizeof(struct cpu_imx_platdata),
644 	.flags		= DM_FLAG_PRE_RELOC,
645 };
646 #endif
647