1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright 2017 NXP
4  * Copyright 2014-2015 Freescale Semiconductor, Inc.
5  */
6 
7 #include <common.h>
8 #include <fsl_ddr_sdram.h>
9 #include <asm/io.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
12 #include <fm_eth.h>
13 #include <asm/armv8/mmu.h>
14 #include <asm/io.h>
15 #include <asm/arch/fsl_serdes.h>
16 #include <asm/arch/soc.h>
17 #include <asm/arch/cpu.h>
18 #include <asm/arch/speed.h>
19 #include <fsl_immap.h>
20 #include <asm/arch/mp.h>
21 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
25 #endif
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
28 #include <fsl_ddr.h>
29 #endif
30 #include <asm/arch/clock.h>
31 #include <hwconfig.h>
32 #include <fsl_qbman.h>
33 
34 DECLARE_GLOBAL_DATA_PTR;
35 
36 struct mm_region *mem_map = early_map;
37 
38 void cpu_name(char *name)
39 {
40 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
41 	unsigned int i, svr, ver;
42 
43 	svr = gur_in32(&gur->svr);
44 	ver = SVR_SOC_VER(svr);
45 
46 	for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
47 		if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
48 			strcpy(name, cpu_type_list[i].name);
49 
50 			if (IS_E_PROCESSOR(svr))
51 				strcat(name, "E");
52 
53 			sprintf(name + strlen(name), " Rev%d.%d",
54 				SVR_MAJ(svr), SVR_MIN(svr));
55 			break;
56 		}
57 
58 	if (i == ARRAY_SIZE(cpu_type_list))
59 		strcpy(name, "unknown");
60 }
61 
62 #ifndef CONFIG_SYS_DCACHE_OFF
63 /*
64  * To start MMU before DDR is available, we create MMU table in SRAM.
65  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
66  * levels of translation tables here to cover 40-bit address space.
67  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
68  * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
69  * Note, the debug print in cache_v8.c is not usable for debugging
70  * these early MMU tables because UART is not yet available.
71  */
72 static inline void early_mmu_setup(void)
73 {
74 	unsigned int el = current_el();
75 
76 	/* global data is already setup, no allocation yet */
77 	gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
78 	gd->arch.tlb_fillptr = gd->arch.tlb_addr;
79 	gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
80 
81 	/* Create early page tables */
82 	setup_pgtables();
83 
84 	/* point TTBR to the new table */
85 	set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
86 			  get_tcr(el, NULL, NULL) &
87 			  ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
88 			  MEMORY_ATTRIBUTES);
89 
90 	set_sctlr(get_sctlr() | CR_M);
91 }
92 
93 static void fix_pcie_mmu_map(void)
94 {
95 #ifdef CONFIG_ARCH_LS2080A
96 	unsigned int i;
97 	u32 svr, ver;
98 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
99 
100 	svr = gur_in32(&gur->svr);
101 	ver = SVR_SOC_VER(svr);
102 
103 	/* Fix PCIE base and size for LS2088A */
104 	if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
105 	    (ver == SVR_LS2048A) || (ver == SVR_LS2044A) ||
106 	    (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) {
107 		for (i = 0; i < ARRAY_SIZE(final_map); i++) {
108 			switch (final_map[i].phys) {
109 			case CONFIG_SYS_PCIE1_PHYS_ADDR:
110 				final_map[i].phys = 0x2000000000ULL;
111 				final_map[i].virt = 0x2000000000ULL;
112 				final_map[i].size = 0x800000000ULL;
113 				break;
114 			case CONFIG_SYS_PCIE2_PHYS_ADDR:
115 				final_map[i].phys = 0x2800000000ULL;
116 				final_map[i].virt = 0x2800000000ULL;
117 				final_map[i].size = 0x800000000ULL;
118 				break;
119 			case CONFIG_SYS_PCIE3_PHYS_ADDR:
120 				final_map[i].phys = 0x3000000000ULL;
121 				final_map[i].virt = 0x3000000000ULL;
122 				final_map[i].size = 0x800000000ULL;
123 				break;
124 			case CONFIG_SYS_PCIE4_PHYS_ADDR:
125 				final_map[i].phys = 0x3800000000ULL;
126 				final_map[i].virt = 0x3800000000ULL;
127 				final_map[i].size = 0x800000000ULL;
128 				break;
129 			default:
130 				break;
131 			}
132 		}
133 	}
134 #endif
135 }
136 
137 /*
138  * The final tables look similar to early tables, but different in detail.
139  * These tables are in DRAM. Sub tables are added to enable cache for
140  * QBMan and OCRAM.
141  *
142  * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
143  * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
144  */
145 static inline void final_mmu_setup(void)
146 {
147 	u64 tlb_addr_save = gd->arch.tlb_addr;
148 	unsigned int el = current_el();
149 	int index;
150 
151 	/* fix the final_map before filling in the block entries */
152 	fix_pcie_mmu_map();
153 
154 	mem_map = final_map;
155 
156 	/* Update mapping for DDR to actual size */
157 	for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
158 		/*
159 		 * Find the entry for DDR mapping and update the address and
160 		 * size. Zero-sized mapping will be skipped when creating MMU
161 		 * table.
162 		 */
163 		switch (final_map[index].virt) {
164 		case CONFIG_SYS_FSL_DRAM_BASE1:
165 			final_map[index].virt = gd->bd->bi_dram[0].start;
166 			final_map[index].phys = gd->bd->bi_dram[0].start;
167 			final_map[index].size = gd->bd->bi_dram[0].size;
168 			break;
169 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
170 		case CONFIG_SYS_FSL_DRAM_BASE2:
171 #if (CONFIG_NR_DRAM_BANKS >= 2)
172 			final_map[index].virt = gd->bd->bi_dram[1].start;
173 			final_map[index].phys = gd->bd->bi_dram[1].start;
174 			final_map[index].size = gd->bd->bi_dram[1].size;
175 #else
176 			final_map[index].size = 0;
177 #endif
178 		break;
179 #endif
180 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
181 		case CONFIG_SYS_FSL_DRAM_BASE3:
182 #if (CONFIG_NR_DRAM_BANKS >= 3)
183 			final_map[index].virt = gd->bd->bi_dram[2].start;
184 			final_map[index].phys = gd->bd->bi_dram[2].start;
185 			final_map[index].size = gd->bd->bi_dram[2].size;
186 #else
187 			final_map[index].size = 0;
188 #endif
189 		break;
190 #endif
191 		default:
192 			break;
193 		}
194 	}
195 
196 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
197 	if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
198 		if (el == 3) {
199 			/*
200 			 * Only use gd->arch.secure_ram if the address is
201 			 * recalculated. Align to 4KB for MMU table.
202 			 */
203 			/* put page tables in secure ram */
204 			index = ARRAY_SIZE(final_map) - 2;
205 			gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
206 			final_map[index].virt = gd->arch.secure_ram & ~0x3;
207 			final_map[index].phys = final_map[index].virt;
208 			final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
209 			final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
210 			gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
211 			tlb_addr_save = gd->arch.tlb_addr;
212 		} else {
213 			/* Use allocated (board_f.c) memory for TLB */
214 			tlb_addr_save = gd->arch.tlb_allocated;
215 			gd->arch.tlb_addr = tlb_addr_save;
216 		}
217 	}
218 #endif
219 
220 	/* Reset the fill ptr */
221 	gd->arch.tlb_fillptr = tlb_addr_save;
222 
223 	/* Create normal system page tables */
224 	setup_pgtables();
225 
226 	/* Create emergency page tables */
227 	gd->arch.tlb_addr = gd->arch.tlb_fillptr;
228 	gd->arch.tlb_emerg = gd->arch.tlb_addr;
229 	setup_pgtables();
230 	gd->arch.tlb_addr = tlb_addr_save;
231 
232 	/* Disable cache and MMU */
233 	dcache_disable();	/* TLBs are invalidated */
234 	invalidate_icache_all();
235 
236 	/* point TTBR to the new table */
237 	set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
238 			  MEMORY_ATTRIBUTES);
239 
240 	set_sctlr(get_sctlr() | CR_M);
241 }
242 
243 u64 get_page_table_size(void)
244 {
245 	return 0x10000;
246 }
247 
248 int arch_cpu_init(void)
249 {
250 	/*
251 	 * This function is called before U-Boot relocates itself to speed up
252 	 * on system running. It is not necessary to run if performance is not
253 	 * critical. Skip if MMU is already enabled by SPL or other means.
254 	 */
255 	if (get_sctlr() & CR_M)
256 		return 0;
257 
258 	icache_enable();
259 	__asm_invalidate_dcache_all();
260 	__asm_invalidate_tlb_all();
261 	early_mmu_setup();
262 	set_sctlr(get_sctlr() | CR_C);
263 	return 0;
264 }
265 
266 void mmu_setup(void)
267 {
268 	final_mmu_setup();
269 }
270 
271 /*
272  * This function is called from common/board_r.c.
273  * It recreates MMU table in main memory.
274  */
275 void enable_caches(void)
276 {
277 	mmu_setup();
278 	__asm_invalidate_tlb_all();
279 	icache_enable();
280 	dcache_enable();
281 }
282 #endif
283 
284 u32 initiator_type(u32 cluster, int init_id)
285 {
286 	struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
287 	u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
288 	u32 type = 0;
289 
290 	type = gur_in32(&gur->tp_ityp[idx]);
291 	if (type & TP_ITYP_AV)
292 		return type;
293 
294 	return 0;
295 }
296 
297 u32 cpu_pos_mask(void)
298 {
299 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
300 	int i = 0;
301 	u32 cluster, type, mask = 0;
302 
303 	do {
304 		int j;
305 
306 		cluster = gur_in32(&gur->tp_cluster[i].lower);
307 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
308 			type = initiator_type(cluster, j);
309 			if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
310 				mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
311 		}
312 		i++;
313 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
314 
315 	return mask;
316 }
317 
318 u32 cpu_mask(void)
319 {
320 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
321 	int i = 0, count = 0;
322 	u32 cluster, type, mask = 0;
323 
324 	do {
325 		int j;
326 
327 		cluster = gur_in32(&gur->tp_cluster[i].lower);
328 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
329 			type = initiator_type(cluster, j);
330 			if (type) {
331 				if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
332 					mask |= 1 << count;
333 				count++;
334 			}
335 		}
336 		i++;
337 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
338 
339 	return mask;
340 }
341 
342 /*
343  * Return the number of cores on this SOC.
344  */
345 int cpu_numcores(void)
346 {
347 	return hweight32(cpu_mask());
348 }
349 
350 int fsl_qoriq_core_to_cluster(unsigned int core)
351 {
352 	struct ccsr_gur __iomem *gur =
353 		(void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
354 	int i = 0, count = 0;
355 	u32 cluster;
356 
357 	do {
358 		int j;
359 
360 		cluster = gur_in32(&gur->tp_cluster[i].lower);
361 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
362 			if (initiator_type(cluster, j)) {
363 				if (count == core)
364 					return i;
365 				count++;
366 			}
367 		}
368 		i++;
369 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
370 
371 	return -1;      /* cannot identify the cluster */
372 }
373 
374 u32 fsl_qoriq_core_to_type(unsigned int core)
375 {
376 	struct ccsr_gur __iomem *gur =
377 		(void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
378 	int i = 0, count = 0;
379 	u32 cluster, type;
380 
381 	do {
382 		int j;
383 
384 		cluster = gur_in32(&gur->tp_cluster[i].lower);
385 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
386 			type = initiator_type(cluster, j);
387 			if (type) {
388 				if (count == core)
389 					return type;
390 				count++;
391 			}
392 		}
393 		i++;
394 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
395 
396 	return -1;      /* cannot identify the cluster */
397 }
398 
399 #ifndef CONFIG_FSL_LSCH3
400 uint get_svr(void)
401 {
402 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
403 
404 	return gur_in32(&gur->svr);
405 }
406 #endif
407 
408 #ifdef CONFIG_DISPLAY_CPUINFO
409 int print_cpuinfo(void)
410 {
411 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
412 	struct sys_info sysinfo;
413 	char buf[32];
414 	unsigned int i, core;
415 	u32 type, rcw, svr = gur_in32(&gur->svr);
416 
417 	puts("SoC: ");
418 
419 	cpu_name(buf);
420 	printf(" %s (0x%x)\n", buf, svr);
421 	memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
422 	get_sys_info(&sysinfo);
423 	puts("Clock Configuration:");
424 	for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
425 		if (!(i % 3))
426 			puts("\n       ");
427 		type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
428 		printf("CPU%d(%s):%-4s MHz  ", core,
429 		       type == TY_ITYP_VER_A7 ? "A7 " :
430 		       (type == TY_ITYP_VER_A53 ? "A53" :
431 		       (type == TY_ITYP_VER_A57 ? "A57" :
432 		       (type == TY_ITYP_VER_A72 ? "A72" : "   "))),
433 		       strmhz(buf, sysinfo.freq_processor[core]));
434 	}
435 	/* Display platform clock as Bus frequency. */
436 	printf("\n       Bus:      %-4s MHz  ",
437 	       strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
438 	printf("DDR:      %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
439 #ifdef CONFIG_SYS_DPAA_FMAN
440 	printf("  FMAN:     %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
441 #endif
442 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
443 	if (soc_has_dp_ddr()) {
444 		printf("     DP-DDR:   %-4s MT/s",
445 		       strmhz(buf, sysinfo.freq_ddrbus2));
446 	}
447 #endif
448 	puts("\n");
449 
450 	/*
451 	 * Display the RCW, so that no one gets confused as to what RCW
452 	 * we're actually using for this boot.
453 	 */
454 	puts("Reset Configuration Word (RCW):");
455 	for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
456 		rcw = gur_in32(&gur->rcwsr[i]);
457 		if ((i % 4) == 0)
458 			printf("\n       %08x:", i * 4);
459 		printf(" %08x", rcw);
460 	}
461 	puts("\n");
462 
463 	return 0;
464 }
465 #endif
466 
467 #ifdef CONFIG_FSL_ESDHC
468 int cpu_mmc_init(bd_t *bis)
469 {
470 	return fsl_esdhc_mmc_init(bis);
471 }
472 #endif
473 
474 int cpu_eth_init(bd_t *bis)
475 {
476 	int error = 0;
477 
478 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
479 	error = fsl_mc_ldpaa_init(bis);
480 #endif
481 #ifdef CONFIG_FMAN_ENET
482 	fm_standard_init(bis);
483 #endif
484 	return error;
485 }
486 
487 static inline int check_psci(void)
488 {
489 	unsigned int psci_ver;
490 
491 	psci_ver = sec_firmware_support_psci_version();
492 	if (psci_ver == PSCI_INVALID_VER)
493 		return 1;
494 
495 	return 0;
496 }
497 
498 static void config_core_prefetch(void)
499 {
500 	char *buf = NULL;
501 	char buffer[HWCONFIG_BUFFER_SIZE];
502 	const char *prefetch_arg = NULL;
503 	size_t arglen;
504 	unsigned int mask;
505 	struct pt_regs regs;
506 
507 	if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0)
508 		buf = buffer;
509 
510 	prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable",
511 					 &arglen, buf);
512 
513 	if (prefetch_arg) {
514 		mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff;
515 		if (mask & 0x1) {
516 			printf("Core0 prefetch can't be disabled\n");
517 			return;
518 		}
519 
520 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
521 		regs.regs[0] = SIP_PREFETCH_DISABLE_64;
522 		regs.regs[1] = mask;
523 		smc_call(&regs);
524 
525 		if (regs.regs[0])
526 			printf("Prefetch disable config failed for mask ");
527 		else
528 			printf("Prefetch disable config passed for mask ");
529 		printf("0x%x\n", mask);
530 	}
531 }
532 
533 int arch_early_init_r(void)
534 {
535 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
536 	u32 svr_dev_id;
537 	/*
538 	 * erratum A009635 is valid only for LS2080A SoC and
539 	 * its personalitiesi
540 	 */
541 	svr_dev_id = get_svr();
542 	if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
543 		erratum_a009635();
544 #endif
545 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
546 	erratum_a009942_check_cpo();
547 #endif
548 	if (check_psci()) {
549 		debug("PSCI: PSCI does not exist.\n");
550 
551 		/* if PSCI does not exist, boot secondary cores here */
552 		if (fsl_layerscape_wake_seconday_cores())
553 			printf("Did not wake secondary cores\n");
554 	}
555 
556 #ifdef CONFIG_SYS_FSL_HAS_RGMII
557 	fsl_rgmii_init();
558 #endif
559 
560 	config_core_prefetch();
561 
562 #ifdef CONFIG_SYS_HAS_SERDES
563 	fsl_serdes_init();
564 #endif
565 #ifdef CONFIG_FMAN_ENET
566 	fman_enet_init();
567 #endif
568 #ifdef CONFIG_SYS_DPAA_QBMAN
569 	setup_qbman_portals();
570 #endif
571 	return 0;
572 }
573 
574 int timer_init(void)
575 {
576 	u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
577 #ifdef CONFIG_FSL_LSCH3
578 	u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
579 #endif
580 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
581 	u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
582 	u32 svr_dev_id;
583 #endif
584 #ifdef COUNTER_FREQUENCY_REAL
585 	unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
586 
587 	/* Update with accurate clock frequency */
588 	if (current_el() == 3)
589 		asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
590 #endif
591 
592 #ifdef CONFIG_FSL_LSCH3
593 	/* Enable timebase for all clusters.
594 	 * It is safe to do so even some clusters are not enabled.
595 	 */
596 	out_le32(cltbenr, 0xf);
597 #endif
598 
599 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
600 	/*
601 	 * In certain Layerscape SoCs, the clock for each core's
602 	 * has an enable bit in the PMU Physical Core Time Base Enable
603 	 * Register (PCTBENR), which allows the watchdog to operate.
604 	 */
605 	setbits_le32(pctbenr, 0xff);
606 	/*
607 	 * For LS2080A SoC and its personalities, timer controller
608 	 * offset is different
609 	 */
610 	svr_dev_id = get_svr();
611 	if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A)))
612 		cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
613 
614 #endif
615 
616 	/* Enable clock for timer
617 	 * This is a global setting.
618 	 */
619 	out_le32(cntcr, 0x1);
620 
621 	return 0;
622 }
623 
624 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
625 
626 void __efi_runtime reset_cpu(ulong addr)
627 {
628 	u32 val;
629 
630 	/* Raise RESET_REQ_B */
631 	val = scfg_in32(rstcr);
632 	val |= 0x02;
633 	scfg_out32(rstcr, val);
634 }
635 
636 #ifdef CONFIG_EFI_LOADER
637 
638 void __efi_runtime EFIAPI efi_reset_system(
639 		       enum efi_reset_type reset_type,
640 		       efi_status_t reset_status,
641 		       unsigned long data_size, void *reset_data)
642 {
643 	switch (reset_type) {
644 	case EFI_RESET_COLD:
645 	case EFI_RESET_WARM:
646 	case EFI_RESET_PLATFORM_SPECIFIC:
647 		reset_cpu(0);
648 		break;
649 	case EFI_RESET_SHUTDOWN:
650 		/* Nothing we can do */
651 		break;
652 	}
653 
654 	while (1) { }
655 }
656 
657 efi_status_t efi_reset_system_init(void)
658 {
659 	return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
660 }
661 
662 #endif
663 
664 /*
665  * Calculate reserved memory with given memory bank
666  * Return aligned memory size on success
667  * Return (ram_size + needed size) for failure
668  */
669 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
670 {
671 	phys_size_t ram_top = ram_size;
672 
673 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
674 	ram_top = mc_get_dram_block_size();
675 	if (ram_top > ram_size)
676 		return ram_size + ram_top;
677 
678 	ram_top = ram_size - ram_top;
679 	/* The start address of MC reserved memory needs to be aligned. */
680 	ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
681 #endif
682 
683 	return ram_size - ram_top;
684 }
685 
686 phys_size_t get_effective_memsize(void)
687 {
688 	phys_size_t ea_size, rem = 0;
689 
690 	/*
691 	 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
692 	 * first region is 2GB space at 0x8000_0000. Secure memory needs to
693 	 * allocated from first region. If the memory extends to  the second
694 	 * region (or the third region if applicable), Management Complex (MC)
695 	 * memory should be put into the highest region, i.e. the end of DDR
696 	 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
697 	 * U-Boot doesn't relocate itself into higher address. Should DDR be
698 	 * configured to skip the first region, this function needs to be
699 	 * adjusted.
700 	 */
701 	if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
702 		ea_size = CONFIG_MAX_MEM_MAPPED;
703 		rem = gd->ram_size - ea_size;
704 	} else {
705 		ea_size = gd->ram_size;
706 	}
707 
708 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
709 	/* Check if we have enough space for secure memory */
710 	if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE)
711 		ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
712 	else
713 		printf("Error: No enough space for secure memory.\n");
714 #endif
715 	/* Check if we have enough memory for MC */
716 	if (rem < board_reserve_ram_top(rem)) {
717 		/* Not enough memory in high region to reserve */
718 		if (ea_size > board_reserve_ram_top(ea_size))
719 			ea_size -= board_reserve_ram_top(ea_size);
720 		else
721 			printf("Error: No enough space for reserved memory.\n");
722 	}
723 
724 	return ea_size;
725 }
726 
727 int dram_init_banksize(void)
728 {
729 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
730 	phys_size_t dp_ddr_size;
731 #endif
732 
733 	/*
734 	 * gd->ram_size has the total size of DDR memory, less reserved secure
735 	 * memory. The DDR extends from low region to high region(s) presuming
736 	 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
737 	 * the location of secure memory. gd->arch.resv_ram tracks the location
738 	 * of reserved memory for Management Complex (MC). Because gd->ram_size
739 	 * is reduced by this function if secure memory is reserved, checking
740 	 * gd->arch.secure_ram should be done to avoid running it repeatedly.
741 	 */
742 
743 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
744 	if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
745 		debug("No need to run again, skip %s\n", __func__);
746 
747 		return 0;
748 	}
749 #endif
750 
751 	gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
752 	if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
753 		gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
754 		gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
755 		gd->bd->bi_dram[1].size = gd->ram_size -
756 					  CONFIG_SYS_DDR_BLOCK1_SIZE;
757 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
758 		if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
759 			gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
760 			gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
761 						  CONFIG_SYS_DDR_BLOCK2_SIZE;
762 			gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
763 		}
764 #endif
765 	} else {
766 		gd->bd->bi_dram[0].size = gd->ram_size;
767 	}
768 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
769 	if (gd->bd->bi_dram[0].size >
770 				CONFIG_SYS_MEM_RESERVE_SECURE) {
771 		gd->bd->bi_dram[0].size -=
772 				CONFIG_SYS_MEM_RESERVE_SECURE;
773 		gd->arch.secure_ram = gd->bd->bi_dram[0].start +
774 				      gd->bd->bi_dram[0].size;
775 		gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
776 		gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
777 	}
778 #endif	/* CONFIG_SYS_MEM_RESERVE_SECURE */
779 
780 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
781 	/* Assign memory for MC */
782 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
783 	if (gd->bd->bi_dram[2].size >=
784 	    board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
785 		gd->arch.resv_ram = gd->bd->bi_dram[2].start +
786 			    gd->bd->bi_dram[2].size -
787 			    board_reserve_ram_top(gd->bd->bi_dram[2].size);
788 	} else
789 #endif
790 	{
791 		if (gd->bd->bi_dram[1].size >=
792 		    board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
793 			gd->arch.resv_ram = gd->bd->bi_dram[1].start +
794 				gd->bd->bi_dram[1].size -
795 				board_reserve_ram_top(gd->bd->bi_dram[1].size);
796 		} else if (gd->bd->bi_dram[0].size >
797 			   board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
798 			gd->arch.resv_ram = gd->bd->bi_dram[0].start +
799 				gd->bd->bi_dram[0].size -
800 				board_reserve_ram_top(gd->bd->bi_dram[0].size);
801 		}
802 	}
803 #endif	/* CONFIG_FSL_MC_ENET */
804 
805 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
806 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
807 #error "This SoC shouldn't have DP DDR"
808 #endif
809 	if (soc_has_dp_ddr()) {
810 		/* initialize DP-DDR here */
811 		puts("DP-DDR:  ");
812 		/*
813 		 * DDR controller use 0 as the base address for binding.
814 		 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
815 		 */
816 		dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
817 					  CONFIG_DP_DDR_CTRL,
818 					  CONFIG_DP_DDR_NUM_CTRLS,
819 					  CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
820 					  NULL, NULL, NULL);
821 		if (dp_ddr_size) {
822 			gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
823 			gd->bd->bi_dram[2].size = dp_ddr_size;
824 		} else {
825 			puts("Not detected");
826 		}
827 	}
828 #endif
829 
830 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
831 	debug("%s is called. gd->ram_size is reduced to %lu\n",
832 	      __func__, (ulong)gd->ram_size);
833 #endif
834 
835 	return 0;
836 }
837 
838 #if CONFIG_IS_ENABLED(EFI_LOADER)
839 void efi_add_known_memory(void)
840 {
841 	int i;
842 	phys_addr_t ram_start, start;
843 	phys_size_t ram_size;
844 	u64 pages;
845 
846 	/* Add RAM */
847 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
848 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
849 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
850 #error "This SoC shouldn't have DP DDR"
851 #endif
852 		if (i == 2)
853 			continue;	/* skip DP-DDR */
854 #endif
855 		ram_start = gd->bd->bi_dram[i].start;
856 		ram_size = gd->bd->bi_dram[i].size;
857 #ifdef CONFIG_RESV_RAM
858 		if (gd->arch.resv_ram >= ram_start &&
859 		    gd->arch.resv_ram < ram_start + ram_size)
860 			ram_size = gd->arch.resv_ram - ram_start;
861 #endif
862 		start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
863 		pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
864 
865 		efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
866 				   false);
867 	}
868 }
869 #endif
870 
871 /*
872  * Before DDR size is known, early MMU table have DDR mapped as device memory
873  * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
874  * needs to be set for these mappings.
875  * If a special case configures DDR with holes in the mapping, the holes need
876  * to be marked as invalid. This is not implemented in this function.
877  */
878 void update_early_mmu_table(void)
879 {
880 	if (!gd->arch.tlb_addr)
881 		return;
882 
883 	if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
884 		mmu_change_region_attr(
885 					CONFIG_SYS_SDRAM_BASE,
886 					gd->ram_size,
887 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
888 					PTE_BLOCK_OUTER_SHARE		|
889 					PTE_BLOCK_NS			|
890 					PTE_TYPE_VALID);
891 	} else {
892 		mmu_change_region_attr(
893 					CONFIG_SYS_SDRAM_BASE,
894 					CONFIG_SYS_DDR_BLOCK1_SIZE,
895 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
896 					PTE_BLOCK_OUTER_SHARE		|
897 					PTE_BLOCK_NS			|
898 					PTE_TYPE_VALID);
899 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
900 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
901 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
902 #endif
903 		if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
904 		    CONFIG_SYS_DDR_BLOCK2_SIZE) {
905 			mmu_change_region_attr(
906 					CONFIG_SYS_DDR_BLOCK2_BASE,
907 					CONFIG_SYS_DDR_BLOCK2_SIZE,
908 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
909 					PTE_BLOCK_OUTER_SHARE		|
910 					PTE_BLOCK_NS			|
911 					PTE_TYPE_VALID);
912 			mmu_change_region_attr(
913 					CONFIG_SYS_DDR_BLOCK3_BASE,
914 					gd->ram_size -
915 					CONFIG_SYS_DDR_BLOCK1_SIZE -
916 					CONFIG_SYS_DDR_BLOCK2_SIZE,
917 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
918 					PTE_BLOCK_OUTER_SHARE		|
919 					PTE_BLOCK_NS			|
920 					PTE_TYPE_VALID);
921 		} else
922 #endif
923 		{
924 			mmu_change_region_attr(
925 					CONFIG_SYS_DDR_BLOCK2_BASE,
926 					gd->ram_size -
927 					CONFIG_SYS_DDR_BLOCK1_SIZE,
928 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
929 					PTE_BLOCK_OUTER_SHARE		|
930 					PTE_BLOCK_NS			|
931 					PTE_TYPE_VALID);
932 		}
933 	}
934 }
935 
936 __weak int dram_init(void)
937 {
938 	fsl_initdram();
939 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
940 	/* This will break-before-make MMU for DDR */
941 	update_early_mmu_table();
942 #endif
943 
944 	return 0;
945 }
946