xref: /openbmc/u-boot/arch/arm/mach-imx/imx8/cpu.c (revision 3d7891d3)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2018 NXP
4  */
5 
6 #include <common.h>
7 #include <clk.h>
8 #include <cpu.h>
9 #include <dm.h>
10 #include <dm/device-internal.h>
11 #include <dm/lists.h>
12 #include <dm/uclass.h>
13 #include <errno.h>
14 #include <asm/arch/sci/sci.h>
15 #include <asm/arch/sys_proto.h>
16 #include <asm/arch-imx/cpu.h>
17 #include <asm/armv8/cpu.h>
18 #include <asm/armv8/mmu.h>
19 #include <asm/mach-imx/boot_mode.h>
20 
21 DECLARE_GLOBAL_DATA_PTR;
22 
23 #define BT_PASSOVER_TAG	0x504F
get_pass_over_info(void)24 struct pass_over_info_t *get_pass_over_info(void)
25 {
26 	struct pass_over_info_t *p =
27 		(struct pass_over_info_t *)PASS_OVER_INFO_ADDR;
28 
29 	if (p->barker != BT_PASSOVER_TAG ||
30 	    p->len != sizeof(struct pass_over_info_t))
31 		return NULL;
32 
33 	return p;
34 }
35 
arch_cpu_init(void)36 int arch_cpu_init(void)
37 {
38 #ifdef CONFIG_SPL_BUILD
39 	struct pass_over_info_t *pass_over;
40 
41 	if (is_soc_rev(CHIP_REV_A)) {
42 		pass_over = get_pass_over_info();
43 		if (pass_over && pass_over->g_ap_mu == 0) {
44 			/*
45 			 * When ap_mu is 0, means the U-Boot booted
46 			 * from first container
47 			 */
48 			sc_misc_boot_status(-1, SC_MISC_BOOT_STATUS_SUCCESS);
49 		}
50 	}
51 #endif
52 
53 	return 0;
54 }
55 
arch_cpu_init_dm(void)56 int arch_cpu_init_dm(void)
57 {
58 	struct udevice *devp;
59 	int node, ret;
60 
61 	node = fdt_node_offset_by_compatible(gd->fdt_blob, -1, "fsl,imx8-mu");
62 	ret = device_bind_driver_to_node(gd->dm_root, "imx8_scu", "imx8_scu",
63 					 offset_to_ofnode(node), &devp);
64 
65 	if (ret) {
66 		printf("could not find scu %d\n", ret);
67 		return ret;
68 	}
69 
70 	ret = device_probe(devp);
71 	if (ret) {
72 		printf("scu probe failed %d\n", ret);
73 		return ret;
74 	}
75 
76 	return 0;
77 }
78 
print_bootinfo(void)79 int print_bootinfo(void)
80 {
81 	enum boot_device bt_dev = get_boot_device();
82 
83 	puts("Boot:  ");
84 	switch (bt_dev) {
85 	case SD1_BOOT:
86 		puts("SD0\n");
87 		break;
88 	case SD2_BOOT:
89 		puts("SD1\n");
90 		break;
91 	case SD3_BOOT:
92 		puts("SD2\n");
93 		break;
94 	case MMC1_BOOT:
95 		puts("MMC0\n");
96 		break;
97 	case MMC2_BOOT:
98 		puts("MMC1\n");
99 		break;
100 	case MMC3_BOOT:
101 		puts("MMC2\n");
102 		break;
103 	case FLEXSPI_BOOT:
104 		puts("FLEXSPI\n");
105 		break;
106 	case SATA_BOOT:
107 		puts("SATA\n");
108 		break;
109 	case NAND_BOOT:
110 		puts("NAND\n");
111 		break;
112 	case USB_BOOT:
113 		puts("USB\n");
114 		break;
115 	default:
116 		printf("Unknown device %u\n", bt_dev);
117 		break;
118 	}
119 
120 	return 0;
121 }
122 
get_boot_device(void)123 enum boot_device get_boot_device(void)
124 {
125 	enum boot_device boot_dev = SD1_BOOT;
126 
127 	sc_rsrc_t dev_rsrc;
128 
129 	sc_misc_get_boot_dev(-1, &dev_rsrc);
130 
131 	switch (dev_rsrc) {
132 	case SC_R_SDHC_0:
133 		boot_dev = MMC1_BOOT;
134 		break;
135 	case SC_R_SDHC_1:
136 		boot_dev = SD2_BOOT;
137 		break;
138 	case SC_R_SDHC_2:
139 		boot_dev = SD3_BOOT;
140 		break;
141 	case SC_R_NAND:
142 		boot_dev = NAND_BOOT;
143 		break;
144 	case SC_R_FSPI_0:
145 		boot_dev = FLEXSPI_BOOT;
146 		break;
147 	case SC_R_SATA_0:
148 		boot_dev = SATA_BOOT;
149 		break;
150 	case SC_R_USB_0:
151 	case SC_R_USB_1:
152 	case SC_R_USB_2:
153 		boot_dev = USB_BOOT;
154 		break;
155 	default:
156 		break;
157 	}
158 
159 	return boot_dev;
160 }
161 
162 #ifdef CONFIG_ENV_IS_IN_MMC
board_mmc_get_env_dev(int devno)163 __weak int board_mmc_get_env_dev(int devno)
164 {
165 	return CONFIG_SYS_MMC_ENV_DEV;
166 }
167 
mmc_get_env_dev(void)168 int mmc_get_env_dev(void)
169 {
170 	sc_rsrc_t dev_rsrc;
171 	int devno;
172 
173 	sc_misc_get_boot_dev(-1, &dev_rsrc);
174 
175 	switch (dev_rsrc) {
176 	case SC_R_SDHC_0:
177 		devno = 0;
178 		break;
179 	case SC_R_SDHC_1:
180 		devno = 1;
181 		break;
182 	case SC_R_SDHC_2:
183 		devno = 2;
184 		break;
185 	default:
186 		/* If not boot from sd/mmc, use default value */
187 		return CONFIG_SYS_MMC_ENV_DEV;
188 	}
189 
190 	return board_mmc_get_env_dev(devno);
191 }
192 #endif
193 
194 #define MEMSTART_ALIGNMENT  SZ_2M /* Align the memory start with 2MB */
195 
get_owned_memreg(sc_rm_mr_t mr,sc_faddr_t * addr_start,sc_faddr_t * addr_end)196 static int get_owned_memreg(sc_rm_mr_t mr, sc_faddr_t *addr_start,
197 			    sc_faddr_t *addr_end)
198 {
199 	sc_faddr_t start, end;
200 	int ret;
201 	bool owned;
202 
203 	owned = sc_rm_is_memreg_owned(-1, mr);
204 	if (owned) {
205 		ret = sc_rm_get_memreg_info(-1, mr, &start, &end);
206 		if (ret) {
207 			printf("Memreg get info failed, %d\n", ret);
208 			return -EINVAL;
209 		}
210 		debug("0x%llx -- 0x%llx\n", start, end);
211 		*addr_start = start;
212 		*addr_end = end;
213 
214 		return 0;
215 	}
216 
217 	return -EINVAL;
218 }
219 
get_effective_memsize(void)220 phys_size_t get_effective_memsize(void)
221 {
222 	sc_rm_mr_t mr;
223 	sc_faddr_t start, end, end1;
224 	int err;
225 
226 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
227 
228 	for (mr = 0; mr < 64; mr++) {
229 		err = get_owned_memreg(mr, &start, &end);
230 		if (!err) {
231 			start = roundup(start, MEMSTART_ALIGNMENT);
232 			/* Too small memory region, not use it */
233 			if (start > end)
234 				continue;
235 
236 			/* Find the memory region runs the U-Boot */
237 			if (start >= PHYS_SDRAM_1 && start <= end1 &&
238 			    (start <= CONFIG_SYS_TEXT_BASE &&
239 			    end >= CONFIG_SYS_TEXT_BASE)) {
240 				if ((end + 1) <= ((sc_faddr_t)PHYS_SDRAM_1 +
241 				    PHYS_SDRAM_1_SIZE))
242 					return (end - PHYS_SDRAM_1 + 1);
243 				else
244 					return PHYS_SDRAM_1_SIZE;
245 			}
246 		}
247 	}
248 
249 	return PHYS_SDRAM_1_SIZE;
250 }
251 
dram_init(void)252 int dram_init(void)
253 {
254 	sc_rm_mr_t mr;
255 	sc_faddr_t start, end, end1, end2;
256 	int err;
257 
258 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
259 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
260 	for (mr = 0; mr < 64; mr++) {
261 		err = get_owned_memreg(mr, &start, &end);
262 		if (!err) {
263 			start = roundup(start, MEMSTART_ALIGNMENT);
264 			/* Too small memory region, not use it */
265 			if (start > end)
266 				continue;
267 
268 			if (start >= PHYS_SDRAM_1 && start <= end1) {
269 				if ((end + 1) <= end1)
270 					gd->ram_size += end - start + 1;
271 				else
272 					gd->ram_size += end1 - start;
273 			} else if (start >= PHYS_SDRAM_2 && start <= end2) {
274 				if ((end + 1) <= end2)
275 					gd->ram_size += end - start + 1;
276 				else
277 					gd->ram_size += end2 - start;
278 			}
279 		}
280 	}
281 
282 	/* If error, set to the default value */
283 	if (!gd->ram_size) {
284 		gd->ram_size = PHYS_SDRAM_1_SIZE;
285 		gd->ram_size += PHYS_SDRAM_2_SIZE;
286 	}
287 	return 0;
288 }
289 
dram_bank_sort(int current_bank)290 static void dram_bank_sort(int current_bank)
291 {
292 	phys_addr_t start;
293 	phys_size_t size;
294 
295 	while (current_bank > 0) {
296 		if (gd->bd->bi_dram[current_bank - 1].start >
297 		    gd->bd->bi_dram[current_bank].start) {
298 			start = gd->bd->bi_dram[current_bank - 1].start;
299 			size = gd->bd->bi_dram[current_bank - 1].size;
300 
301 			gd->bd->bi_dram[current_bank - 1].start =
302 				gd->bd->bi_dram[current_bank].start;
303 			gd->bd->bi_dram[current_bank - 1].size =
304 				gd->bd->bi_dram[current_bank].size;
305 
306 			gd->bd->bi_dram[current_bank].start = start;
307 			gd->bd->bi_dram[current_bank].size = size;
308 		}
309 		current_bank--;
310 	}
311 }
312 
dram_init_banksize(void)313 int dram_init_banksize(void)
314 {
315 	sc_rm_mr_t mr;
316 	sc_faddr_t start, end, end1, end2;
317 	int i = 0;
318 	int err;
319 
320 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
321 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
322 
323 	for (mr = 0; mr < 64 && i < CONFIG_NR_DRAM_BANKS; mr++) {
324 		err = get_owned_memreg(mr, &start, &end);
325 		if (!err) {
326 			start = roundup(start, MEMSTART_ALIGNMENT);
327 			if (start > end) /* Small memory region, no use it */
328 				continue;
329 
330 			if (start >= PHYS_SDRAM_1 && start <= end1) {
331 				gd->bd->bi_dram[i].start = start;
332 
333 				if ((end + 1) <= end1)
334 					gd->bd->bi_dram[i].size =
335 						end - start + 1;
336 				else
337 					gd->bd->bi_dram[i].size = end1 - start;
338 
339 				dram_bank_sort(i);
340 				i++;
341 			} else if (start >= PHYS_SDRAM_2 && start <= end2) {
342 				gd->bd->bi_dram[i].start = start;
343 
344 				if ((end + 1) <= end2)
345 					gd->bd->bi_dram[i].size =
346 						end - start + 1;
347 				else
348 					gd->bd->bi_dram[i].size = end2 - start;
349 
350 				dram_bank_sort(i);
351 				i++;
352 			}
353 		}
354 	}
355 
356 	/* If error, set to the default value */
357 	if (!i) {
358 		gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
359 		gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
360 		gd->bd->bi_dram[1].start = PHYS_SDRAM_2;
361 		gd->bd->bi_dram[1].size = PHYS_SDRAM_2_SIZE;
362 	}
363 
364 	return 0;
365 }
366 
get_block_attrs(sc_faddr_t addr_start)367 static u64 get_block_attrs(sc_faddr_t addr_start)
368 {
369 	u64 attr = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE |
370 		PTE_BLOCK_PXN | PTE_BLOCK_UXN;
371 
372 	if ((addr_start >= PHYS_SDRAM_1 &&
373 	     addr_start <= ((sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE)) ||
374 	    (addr_start >= PHYS_SDRAM_2 &&
375 	     addr_start <= ((sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE)))
376 		return (PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_OUTER_SHARE);
377 
378 	return attr;
379 }
380 
get_block_size(sc_faddr_t addr_start,sc_faddr_t addr_end)381 static u64 get_block_size(sc_faddr_t addr_start, sc_faddr_t addr_end)
382 {
383 	sc_faddr_t end1, end2;
384 
385 	end1 = (sc_faddr_t)PHYS_SDRAM_1 + PHYS_SDRAM_1_SIZE;
386 	end2 = (sc_faddr_t)PHYS_SDRAM_2 + PHYS_SDRAM_2_SIZE;
387 
388 	if (addr_start >= PHYS_SDRAM_1 && addr_start <= end1) {
389 		if ((addr_end + 1) > end1)
390 			return end1 - addr_start;
391 	} else if (addr_start >= PHYS_SDRAM_2 && addr_start <= end2) {
392 		if ((addr_end + 1) > end2)
393 			return end2 - addr_start;
394 	}
395 
396 	return (addr_end - addr_start + 1);
397 }
398 
399 #define MAX_PTE_ENTRIES 512
400 #define MAX_MEM_MAP_REGIONS 16
401 
402 static struct mm_region imx8_mem_map[MAX_MEM_MAP_REGIONS];
403 struct mm_region *mem_map = imx8_mem_map;
404 
enable_caches(void)405 void enable_caches(void)
406 {
407 	sc_rm_mr_t mr;
408 	sc_faddr_t start, end;
409 	int err, i;
410 
411 	/* Create map for registers access from 0x1c000000 to 0x80000000*/
412 	imx8_mem_map[0].virt = 0x1c000000UL;
413 	imx8_mem_map[0].phys = 0x1c000000UL;
414 	imx8_mem_map[0].size = 0x64000000UL;
415 	imx8_mem_map[0].attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
416 			 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN;
417 
418 	i = 1;
419 	for (mr = 0; mr < 64 && i < MAX_MEM_MAP_REGIONS; mr++) {
420 		err = get_owned_memreg(mr, &start, &end);
421 		if (!err) {
422 			imx8_mem_map[i].virt = start;
423 			imx8_mem_map[i].phys = start;
424 			imx8_mem_map[i].size = get_block_size(start, end);
425 			imx8_mem_map[i].attrs = get_block_attrs(start);
426 			i++;
427 		}
428 	}
429 
430 	if (i < MAX_MEM_MAP_REGIONS) {
431 		imx8_mem_map[i].size = 0;
432 		imx8_mem_map[i].attrs = 0;
433 	} else {
434 		puts("Error, need more MEM MAP REGIONS reserved\n");
435 		icache_enable();
436 		return;
437 	}
438 
439 	for (i = 0; i < MAX_MEM_MAP_REGIONS; i++) {
440 		debug("[%d] vir = 0x%llx phys = 0x%llx size = 0x%llx attrs = 0x%llx\n",
441 		      i, imx8_mem_map[i].virt, imx8_mem_map[i].phys,
442 		      imx8_mem_map[i].size, imx8_mem_map[i].attrs);
443 	}
444 
445 	icache_enable();
446 	dcache_enable();
447 }
448 
449 #ifndef CONFIG_SYS_DCACHE_OFF
get_page_table_size(void)450 u64 get_page_table_size(void)
451 {
452 	u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
453 	u64 size = 0;
454 
455 	/*
456 	 * For each memory region, the max table size:
457 	 * 2 level 3 tables + 2 level 2 tables + 1 level 1 table
458 	 */
459 	size = (2 + 2 + 1) * one_pt * MAX_MEM_MAP_REGIONS + one_pt;
460 
461 	/*
462 	 * We need to duplicate our page table once to have an emergency pt to
463 	 * resort to when splitting page tables later on
464 	 */
465 	size *= 2;
466 
467 	/*
468 	 * We may need to split page tables later on if dcache settings change,
469 	 * so reserve up to 4 (random pick) page tables for that.
470 	 */
471 	size += one_pt * 4;
472 
473 	return size;
474 }
475 #endif
476 
477 #define FUSE_MAC0_WORD0 708
478 #define FUSE_MAC0_WORD1 709
479 #define FUSE_MAC1_WORD0 710
480 #define FUSE_MAC1_WORD1 711
481 
imx_get_mac_from_fuse(int dev_id,unsigned char * mac)482 void imx_get_mac_from_fuse(int dev_id, unsigned char *mac)
483 {
484 	u32 word[2], val[2] = {};
485 	int i, ret;
486 
487 	if (dev_id == 0) {
488 		word[0] = FUSE_MAC0_WORD0;
489 		word[1] = FUSE_MAC0_WORD1;
490 	} else {
491 		word[0] = FUSE_MAC1_WORD0;
492 		word[1] = FUSE_MAC1_WORD1;
493 	}
494 
495 	for (i = 0; i < 2; i++) {
496 		ret = sc_misc_otp_fuse_read(-1, word[i], &val[i]);
497 		if (ret < 0)
498 			goto err;
499 	}
500 
501 	mac[0] = val[0];
502 	mac[1] = val[0] >> 8;
503 	mac[2] = val[0] >> 16;
504 	mac[3] = val[0] >> 24;
505 	mac[4] = val[1];
506 	mac[5] = val[1] >> 8;
507 
508 	debug("%s: MAC%d: %02x.%02x.%02x.%02x.%02x.%02x\n",
509 	      __func__, dev_id, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
510 	return;
511 err:
512 	printf("%s: fuse %d, err: %d\n", __func__, word[i], ret);
513 }
514 
get_cpu_rev(void)515 u32 get_cpu_rev(void)
516 {
517 	u32 id = 0, rev = 0;
518 	int ret;
519 
520 	ret = sc_misc_get_control(-1, SC_R_SYSTEM, SC_C_ID, &id);
521 	if (ret)
522 		return 0;
523 
524 	rev = (id >> 5)  & 0xf;
525 	id = (id & 0x1f) + MXC_SOC_IMX8;  /* Dummy ID for chip */
526 
527 	return (id << 12) | rev;
528 }
529 
530 #if CONFIG_IS_ENABLED(CPU)
531 struct cpu_imx_platdata {
532 	const char *name;
533 	const char *rev;
534 	const char *type;
535 	u32 cpurev;
536 	u32 freq_mhz;
537 };
538 
get_imx8_type(u32 imxtype)539 const char *get_imx8_type(u32 imxtype)
540 {
541 	switch (imxtype) {
542 	case MXC_CPU_IMX8QXP:
543 	case MXC_CPU_IMX8QXP_A0:
544 		return "QXP";
545 	default:
546 		return "??";
547 	}
548 }
549 
get_imx8_rev(u32 rev)550 const char *get_imx8_rev(u32 rev)
551 {
552 	switch (rev) {
553 	case CHIP_REV_A:
554 		return "A";
555 	case CHIP_REV_B:
556 		return "B";
557 	default:
558 		return "?";
559 	}
560 }
561 
get_core_name(void)562 const char *get_core_name(void)
563 {
564 	if (is_cortex_a35())
565 		return "A35";
566 	else if (is_cortex_a53())
567 		return "A53";
568 	else if (is_cortex_a72())
569 		return "A72";
570 	else
571 		return "?";
572 }
573 
cpu_imx_get_desc(struct udevice * dev,char * buf,int size)574 int cpu_imx_get_desc(struct udevice *dev, char *buf, int size)
575 {
576 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
577 
578 	if (size < 100)
579 		return -ENOSPC;
580 
581 	snprintf(buf, size, "NXP i.MX8%s Rev%s %s at %u MHz\n",
582 		 plat->type, plat->rev, plat->name, plat->freq_mhz);
583 
584 	return 0;
585 }
586 
cpu_imx_get_info(struct udevice * dev,struct cpu_info * info)587 static int cpu_imx_get_info(struct udevice *dev, struct cpu_info *info)
588 {
589 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
590 
591 	info->cpu_freq = plat->freq_mhz * 1000;
592 	info->features = BIT(CPU_FEAT_L1_CACHE) | BIT(CPU_FEAT_MMU);
593 	return 0;
594 }
595 
cpu_imx_get_count(struct udevice * dev)596 static int cpu_imx_get_count(struct udevice *dev)
597 {
598 	return 4;
599 }
600 
cpu_imx_get_vendor(struct udevice * dev,char * buf,int size)601 static int cpu_imx_get_vendor(struct udevice *dev,  char *buf, int size)
602 {
603 	snprintf(buf, size, "NXP");
604 	return 0;
605 }
606 
607 static const struct cpu_ops cpu_imx8_ops = {
608 	.get_desc	= cpu_imx_get_desc,
609 	.get_info	= cpu_imx_get_info,
610 	.get_count	= cpu_imx_get_count,
611 	.get_vendor	= cpu_imx_get_vendor,
612 };
613 
614 static const struct udevice_id cpu_imx8_ids[] = {
615 	{ .compatible = "arm,cortex-a35" },
616 	{ }
617 };
618 
imx8_get_cpu_rate(void)619 static ulong imx8_get_cpu_rate(void)
620 {
621 	ulong rate;
622 	int ret;
623 
624 	ret = sc_pm_get_clock_rate(-1, SC_R_A35, SC_PM_CLK_CPU,
625 				   (sc_pm_clock_rate_t *)&rate);
626 	if (ret) {
627 		printf("Could not read CPU frequency: %d\n", ret);
628 		return 0;
629 	}
630 
631 	return rate;
632 }
633 
imx8_cpu_probe(struct udevice * dev)634 static int imx8_cpu_probe(struct udevice *dev)
635 {
636 	struct cpu_imx_platdata *plat = dev_get_platdata(dev);
637 	u32 cpurev;
638 
639 	cpurev = get_cpu_rev();
640 	plat->cpurev = cpurev;
641 	plat->name = get_core_name();
642 	plat->rev = get_imx8_rev(cpurev & 0xFFF);
643 	plat->type = get_imx8_type((cpurev & 0xFF000) >> 12);
644 	plat->freq_mhz = imx8_get_cpu_rate() / 1000000;
645 	return 0;
646 }
647 
648 U_BOOT_DRIVER(cpu_imx8_drv) = {
649 	.name		= "imx8x_cpu",
650 	.id		= UCLASS_CPU,
651 	.of_match	= cpu_imx8_ids,
652 	.ops		= &cpu_imx8_ops,
653 	.probe		= imx8_cpu_probe,
654 	.platdata_auto_alloc_size = sizeof(struct cpu_imx_platdata),
655 	.flags		= DM_FLAG_PRE_RELOC,
656 };
657 #endif
658