1 /*
2  * Copyright 2014-2015 Freescale Semiconductor, Inc.
3  *
4  * SPDX-License-Identifier:	GPL-2.0+
5  */
6 
7 #include <common.h>
8 #include <fsl_ddr_sdram.h>
9 #include <asm/io.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
12 #include <asm/armv8/mmu.h>
13 #include <asm/io.h>
14 #include <asm/arch/fsl_serdes.h>
15 #include <asm/arch/soc.h>
16 #include <asm/arch/cpu.h>
17 #include <asm/arch/speed.h>
18 #ifdef CONFIG_MP
19 #include <asm/arch/mp.h>
20 #endif
21 #include <efi_loader.h>
22 #include <fm_eth.h>
23 #include <fsl-mc/fsl_mc.h>
24 #ifdef CONFIG_FSL_ESDHC
25 #include <fsl_esdhc.h>
26 #endif
27 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
28 #include <asm/armv8/sec_firmware.h>
29 #endif
30 #ifdef CONFIG_SYS_FSL_DDR
31 #include <fsl_ddr.h>
32 #endif
33 
34 DECLARE_GLOBAL_DATA_PTR;
35 
36 struct mm_region *mem_map = early_map;
37 
38 void cpu_name(char *name)
39 {
40 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
41 	unsigned int i, svr, ver;
42 
43 	svr = gur_in32(&gur->svr);
44 	ver = SVR_SOC_VER(svr);
45 
46 	for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++)
47 		if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) {
48 			strcpy(name, cpu_type_list[i].name);
49 
50 			if (IS_E_PROCESSOR(svr))
51 				strcat(name, "E");
52 
53 			sprintf(name + strlen(name), " Rev%d.%d",
54 				SVR_MAJ(svr), SVR_MIN(svr));
55 			break;
56 		}
57 
58 	if (i == ARRAY_SIZE(cpu_type_list))
59 		strcpy(name, "unknown");
60 }
61 
62 #ifndef CONFIG_SYS_DCACHE_OFF
63 /*
64  * To start MMU before DDR is available, we create MMU table in SRAM.
65  * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
66  * levels of translation tables here to cover 40-bit address space.
67  * We use 4KB granule size, with 40 bits physical address, T0SZ=24
68  * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
69  * Note, the debug print in cache_v8.c is not usable for debugging
70  * these early MMU tables because UART is not yet available.
71  */
72 static inline void early_mmu_setup(void)
73 {
74 	unsigned int el = current_el();
75 
76 	/* global data is already setup, no allocation yet */
77 	gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE;
78 	gd->arch.tlb_fillptr = gd->arch.tlb_addr;
79 	gd->arch.tlb_size = EARLY_PGTABLE_SIZE;
80 
81 	/* Create early page tables */
82 	setup_pgtables();
83 
84 	/* point TTBR to the new table */
85 	set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
86 			  get_tcr(el, NULL, NULL) &
87 			  ~(TCR_ORGN_MASK | TCR_IRGN_MASK),
88 			  MEMORY_ATTRIBUTES);
89 
90 	set_sctlr(get_sctlr() | CR_M);
91 }
92 
93 static void fix_pcie_mmu_map(void)
94 {
95 #ifdef CONFIG_ARCH_LS2080A
96 	unsigned int i;
97 	u32 svr, ver;
98 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
99 
100 	svr = gur_in32(&gur->svr);
101 	ver = SVR_SOC_VER(svr);
102 
103 	/* Fix PCIE base and size for LS2088A */
104 	if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) ||
105 	    (ver == SVR_LS2048A) || (ver == SVR_LS2044A)) {
106 		for (i = 0; i < ARRAY_SIZE(final_map); i++) {
107 			switch (final_map[i].phys) {
108 			case CONFIG_SYS_PCIE1_PHYS_ADDR:
109 				final_map[i].phys = 0x2000000000ULL;
110 				final_map[i].virt = 0x2000000000ULL;
111 				final_map[i].size = 0x800000000ULL;
112 				break;
113 			case CONFIG_SYS_PCIE2_PHYS_ADDR:
114 				final_map[i].phys = 0x2800000000ULL;
115 				final_map[i].virt = 0x2800000000ULL;
116 				final_map[i].size = 0x800000000ULL;
117 				break;
118 			case CONFIG_SYS_PCIE3_PHYS_ADDR:
119 				final_map[i].phys = 0x3000000000ULL;
120 				final_map[i].virt = 0x3000000000ULL;
121 				final_map[i].size = 0x800000000ULL;
122 				break;
123 			case CONFIG_SYS_PCIE4_PHYS_ADDR:
124 				final_map[i].phys = 0x3800000000ULL;
125 				final_map[i].virt = 0x3800000000ULL;
126 				final_map[i].size = 0x800000000ULL;
127 				break;
128 			default:
129 				break;
130 			}
131 		}
132 	}
133 #endif
134 }
135 
136 /*
137  * The final tables look similar to early tables, but different in detail.
138  * These tables are in DRAM. Sub tables are added to enable cache for
139  * QBMan and OCRAM.
140  *
141  * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
142  * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
143  */
144 static inline void final_mmu_setup(void)
145 {
146 	u64 tlb_addr_save = gd->arch.tlb_addr;
147 	unsigned int el = current_el();
148 	int index;
149 
150 	/* fix the final_map before filling in the block entries */
151 	fix_pcie_mmu_map();
152 
153 	mem_map = final_map;
154 
155 	/* Update mapping for DDR to actual size */
156 	for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) {
157 		/*
158 		 * Find the entry for DDR mapping and update the address and
159 		 * size. Zero-sized mapping will be skipped when creating MMU
160 		 * table.
161 		 */
162 		switch (final_map[index].virt) {
163 		case CONFIG_SYS_FSL_DRAM_BASE1:
164 			final_map[index].virt = gd->bd->bi_dram[0].start;
165 			final_map[index].phys = gd->bd->bi_dram[0].start;
166 			final_map[index].size = gd->bd->bi_dram[0].size;
167 			break;
168 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
169 		case CONFIG_SYS_FSL_DRAM_BASE2:
170 #if (CONFIG_NR_DRAM_BANKS >= 2)
171 			final_map[index].virt = gd->bd->bi_dram[1].start;
172 			final_map[index].phys = gd->bd->bi_dram[1].start;
173 			final_map[index].size = gd->bd->bi_dram[1].size;
174 #else
175 			final_map[index].size = 0;
176 #endif
177 		break;
178 #endif
179 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
180 		case CONFIG_SYS_FSL_DRAM_BASE3:
181 #if (CONFIG_NR_DRAM_BANKS >= 3)
182 			final_map[index].virt = gd->bd->bi_dram[2].start;
183 			final_map[index].phys = gd->bd->bi_dram[2].start;
184 			final_map[index].size = gd->bd->bi_dram[2].size;
185 #else
186 			final_map[index].size = 0;
187 #endif
188 		break;
189 #endif
190 		default:
191 			break;
192 		}
193 	}
194 
195 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
196 	if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) {
197 		if (el == 3) {
198 			/*
199 			 * Only use gd->arch.secure_ram if the address is
200 			 * recalculated. Align to 4KB for MMU table.
201 			 */
202 			/* put page tables in secure ram */
203 			index = ARRAY_SIZE(final_map) - 2;
204 			gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff;
205 			final_map[index].virt = gd->arch.secure_ram & ~0x3;
206 			final_map[index].phys = final_map[index].virt;
207 			final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE;
208 			final_map[index].attrs = PTE_BLOCK_OUTER_SHARE;
209 			gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED;
210 			tlb_addr_save = gd->arch.tlb_addr;
211 		} else {
212 			/* Use allocated (board_f.c) memory for TLB */
213 			tlb_addr_save = gd->arch.tlb_allocated;
214 			gd->arch.tlb_addr = tlb_addr_save;
215 		}
216 	}
217 #endif
218 
219 	/* Reset the fill ptr */
220 	gd->arch.tlb_fillptr = tlb_addr_save;
221 
222 	/* Create normal system page tables */
223 	setup_pgtables();
224 
225 	/* Create emergency page tables */
226 	gd->arch.tlb_addr = gd->arch.tlb_fillptr;
227 	gd->arch.tlb_emerg = gd->arch.tlb_addr;
228 	setup_pgtables();
229 	gd->arch.tlb_addr = tlb_addr_save;
230 
231 	/* Disable cache and MMU */
232 	dcache_disable();	/* TLBs are invalidated */
233 	invalidate_icache_all();
234 
235 	/* point TTBR to the new table */
236 	set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
237 			  MEMORY_ATTRIBUTES);
238 
239 	set_sctlr(get_sctlr() | CR_M);
240 }
241 
242 u64 get_page_table_size(void)
243 {
244 	return 0x10000;
245 }
246 
247 int arch_cpu_init(void)
248 {
249 	icache_enable();
250 	__asm_invalidate_dcache_all();
251 	__asm_invalidate_tlb_all();
252 	early_mmu_setup();
253 	set_sctlr(get_sctlr() | CR_C);
254 	return 0;
255 }
256 
257 void mmu_setup(void)
258 {
259 	final_mmu_setup();
260 }
261 
262 /*
263  * This function is called from common/board_r.c.
264  * It recreates MMU table in main memory.
265  */
266 void enable_caches(void)
267 {
268 	mmu_setup();
269 	__asm_invalidate_tlb_all();
270 	icache_enable();
271 	dcache_enable();
272 }
273 #endif
274 
275 u32 initiator_type(u32 cluster, int init_id)
276 {
277 	struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
278 	u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
279 	u32 type = 0;
280 
281 	type = gur_in32(&gur->tp_ityp[idx]);
282 	if (type & TP_ITYP_AV)
283 		return type;
284 
285 	return 0;
286 }
287 
288 u32 cpu_pos_mask(void)
289 {
290 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
291 	int i = 0;
292 	u32 cluster, type, mask = 0;
293 
294 	do {
295 		int j;
296 
297 		cluster = gur_in32(&gur->tp_cluster[i].lower);
298 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
299 			type = initiator_type(cluster, j);
300 			if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM))
301 				mask |= 1 << (i * TP_INIT_PER_CLUSTER + j);
302 		}
303 		i++;
304 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
305 
306 	return mask;
307 }
308 
309 u32 cpu_mask(void)
310 {
311 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
312 	int i = 0, count = 0;
313 	u32 cluster, type, mask = 0;
314 
315 	do {
316 		int j;
317 
318 		cluster = gur_in32(&gur->tp_cluster[i].lower);
319 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
320 			type = initiator_type(cluster, j);
321 			if (type) {
322 				if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
323 					mask |= 1 << count;
324 				count++;
325 			}
326 		}
327 		i++;
328 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
329 
330 	return mask;
331 }
332 
333 /*
334  * Return the number of cores on this SOC.
335  */
336 int cpu_numcores(void)
337 {
338 	return hweight32(cpu_mask());
339 }
340 
341 int fsl_qoriq_core_to_cluster(unsigned int core)
342 {
343 	struct ccsr_gur __iomem *gur =
344 		(void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
345 	int i = 0, count = 0;
346 	u32 cluster;
347 
348 	do {
349 		int j;
350 
351 		cluster = gur_in32(&gur->tp_cluster[i].lower);
352 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
353 			if (initiator_type(cluster, j)) {
354 				if (count == core)
355 					return i;
356 				count++;
357 			}
358 		}
359 		i++;
360 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
361 
362 	return -1;      /* cannot identify the cluster */
363 }
364 
365 u32 fsl_qoriq_core_to_type(unsigned int core)
366 {
367 	struct ccsr_gur __iomem *gur =
368 		(void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
369 	int i = 0, count = 0;
370 	u32 cluster, type;
371 
372 	do {
373 		int j;
374 
375 		cluster = gur_in32(&gur->tp_cluster[i].lower);
376 		for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
377 			type = initiator_type(cluster, j);
378 			if (type) {
379 				if (count == core)
380 					return type;
381 				count++;
382 			}
383 		}
384 		i++;
385 	} while ((cluster & TP_CLUSTER_EOC) == 0x0);
386 
387 	return -1;      /* cannot identify the cluster */
388 }
389 
390 #ifndef CONFIG_FSL_LSCH3
391 uint get_svr(void)
392 {
393 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
394 
395 	return gur_in32(&gur->svr);
396 }
397 #endif
398 
399 #ifdef CONFIG_DISPLAY_CPUINFO
400 int print_cpuinfo(void)
401 {
402 	struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
403 	struct sys_info sysinfo;
404 	char buf[32];
405 	unsigned int i, core;
406 	u32 type, rcw, svr = gur_in32(&gur->svr);
407 
408 	puts("SoC: ");
409 
410 	cpu_name(buf);
411 	printf(" %s (0x%x)\n", buf, svr);
412 	memset((u8 *)buf, 0x00, ARRAY_SIZE(buf));
413 	get_sys_info(&sysinfo);
414 	puts("Clock Configuration:");
415 	for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
416 		if (!(i % 3))
417 			puts("\n       ");
418 		type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
419 		printf("CPU%d(%s):%-4s MHz  ", core,
420 		       type == TY_ITYP_VER_A7 ? "A7 " :
421 		       (type == TY_ITYP_VER_A53 ? "A53" :
422 		       (type == TY_ITYP_VER_A57 ? "A57" :
423 		       (type == TY_ITYP_VER_A72 ? "A72" : "   "))),
424 		       strmhz(buf, sysinfo.freq_processor[core]));
425 	}
426 	/* Display platform clock as Bus frequency. */
427 	printf("\n       Bus:      %-4s MHz  ",
428 	       strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV));
429 	printf("DDR:      %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus));
430 #ifdef CONFIG_SYS_DPAA_FMAN
431 	printf("  FMAN:     %-4s MHz", strmhz(buf, sysinfo.freq_fman[0]));
432 #endif
433 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
434 	if (soc_has_dp_ddr()) {
435 		printf("     DP-DDR:   %-4s MT/s",
436 		       strmhz(buf, sysinfo.freq_ddrbus2));
437 	}
438 #endif
439 	puts("\n");
440 
441 	/*
442 	 * Display the RCW, so that no one gets confused as to what RCW
443 	 * we're actually using for this boot.
444 	 */
445 	puts("Reset Configuration Word (RCW):");
446 	for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) {
447 		rcw = gur_in32(&gur->rcwsr[i]);
448 		if ((i % 4) == 0)
449 			printf("\n       %08x:", i * 4);
450 		printf(" %08x", rcw);
451 	}
452 	puts("\n");
453 
454 	return 0;
455 }
456 #endif
457 
458 #ifdef CONFIG_FSL_ESDHC
459 int cpu_mmc_init(bd_t *bis)
460 {
461 	return fsl_esdhc_mmc_init(bis);
462 }
463 #endif
464 
465 int cpu_eth_init(bd_t *bis)
466 {
467 	int error = 0;
468 
469 #ifdef CONFIG_FSL_MC_ENET
470 	error = fsl_mc_ldpaa_init(bis);
471 #endif
472 #ifdef CONFIG_FMAN_ENET
473 	fm_standard_init(bis);
474 #endif
475 	return error;
476 }
477 
478 int arch_early_init_r(void)
479 {
480 #ifdef CONFIG_MP
481 	int rv = 1;
482 	u32 psci_ver = 0xffffffff;
483 #endif
484 
485 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
486 	u32 svr_dev_id;
487 	/*
488 	 * erratum A009635 is valid only for LS2080A SoC and
489 	 * its personalitiesi
490 	 */
491 	svr_dev_id = get_svr() >> 16;
492 	if (svr_dev_id == SVR_DEV_LS2080A)
493 		erratum_a009635();
494 #endif
495 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
496 	erratum_a009942_check_cpo();
497 #endif
498 #ifdef CONFIG_MP
499 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && \
500 	defined(CONFIG_SEC_FIRMWARE_ARMV8_PSCI)
501 	/* Check the psci version to determine if the psci is supported */
502 	psci_ver = sec_firmware_support_psci_version();
503 #endif
504 	if (psci_ver == 0xffffffff) {
505 		rv = fsl_layerscape_wake_seconday_cores();
506 		if (rv)
507 			printf("Did not wake secondary cores\n");
508 	}
509 #endif
510 
511 #ifdef CONFIG_SYS_HAS_SERDES
512 	fsl_serdes_init();
513 #endif
514 #ifdef CONFIG_FMAN_ENET
515 	fman_enet_init();
516 #endif
517 	return 0;
518 }
519 
520 int timer_init(void)
521 {
522 	u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
523 #ifdef CONFIG_FSL_LSCH3
524 	u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
525 #endif
526 #ifdef CONFIG_ARCH_LS2080A
527 	u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET;
528 	u32 svr_dev_id;
529 #endif
530 #ifdef COUNTER_FREQUENCY_REAL
531 	unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
532 
533 	/* Update with accurate clock frequency */
534 	asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
535 #endif
536 
537 #ifdef CONFIG_FSL_LSCH3
538 	/* Enable timebase for all clusters.
539 	 * It is safe to do so even some clusters are not enabled.
540 	 */
541 	out_le32(cltbenr, 0xf);
542 #endif
543 
544 #ifdef CONFIG_ARCH_LS2080A
545 	/*
546 	 * In certain Layerscape SoCs, the clock for each core's
547 	 * has an enable bit in the PMU Physical Core Time Base Enable
548 	 * Register (PCTBENR), which allows the watchdog to operate.
549 	 */
550 	setbits_le32(pctbenr, 0xff);
551 	/*
552 	 * For LS2080A SoC and its personalities, timer controller
553 	 * offset is different
554 	 */
555 	svr_dev_id = get_svr() >> 16;
556 	if (svr_dev_id == SVR_DEV_LS2080A)
557 		cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR;
558 
559 #endif
560 
561 	/* Enable clock for timer
562 	 * This is a global setting.
563 	 */
564 	out_le32(cntcr, 0x1);
565 
566 	return 0;
567 }
568 
569 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
570 
571 void __efi_runtime reset_cpu(ulong addr)
572 {
573 	u32 val;
574 
575 	/* Raise RESET_REQ_B */
576 	val = scfg_in32(rstcr);
577 	val |= 0x02;
578 	scfg_out32(rstcr, val);
579 }
580 
581 #ifdef CONFIG_EFI_LOADER
582 
583 void __efi_runtime EFIAPI efi_reset_system(
584 		       enum efi_reset_type reset_type,
585 		       efi_status_t reset_status,
586 		       unsigned long data_size, void *reset_data)
587 {
588 	switch (reset_type) {
589 	case EFI_RESET_COLD:
590 	case EFI_RESET_WARM:
591 		reset_cpu(0);
592 		break;
593 	case EFI_RESET_SHUTDOWN:
594 		/* Nothing we can do */
595 		break;
596 	}
597 
598 	while (1) { }
599 }
600 
601 void efi_reset_system_init(void)
602 {
603        efi_add_runtime_mmio(&rstcr, sizeof(*rstcr));
604 }
605 
606 #endif
607 
608 phys_size_t board_reserve_ram_top(phys_size_t ram_size)
609 {
610 	phys_size_t ram_top = ram_size;
611 
612 #ifdef CONFIG_FSL_MC_ENET
613 	/* The start address of MC reserved memory needs to be aligned. */
614 	ram_top -= mc_get_dram_block_size();
615 	ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1);
616 #endif
617 
618 	return ram_size - ram_top;
619 }
620 
621 phys_size_t get_effective_memsize(void)
622 {
623 	phys_size_t ea_size, rem = 0;
624 
625 	/*
626 	 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
627 	 * first region is 2GB space at 0x8000_0000. If the memory extends to
628 	 * the second region (or the third region if applicable), the secure
629 	 * memory and Management Complex (MC) memory should be put into the
630 	 * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
631 	 * is set to the size of first region so U-Boot doesn't relocate itself
632 	 * into higher address. Should DDR be configured to skip the first
633 	 * region, this function needs to be adjusted.
634 	 */
635 	if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) {
636 		ea_size = CONFIG_MAX_MEM_MAPPED;
637 		rem = gd->ram_size - ea_size;
638 	} else {
639 		ea_size = gd->ram_size;
640 	}
641 
642 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
643 	/* Check if we have enough space for secure memory */
644 	if (rem > CONFIG_SYS_MEM_RESERVE_SECURE) {
645 		rem -= CONFIG_SYS_MEM_RESERVE_SECURE;
646 	} else {
647 		if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) {
648 			ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
649 			rem = 0;	/* Presume MC requires more memory */
650 		} else {
651 			printf("Error: No enough space for secure memory.\n");
652 		}
653 	}
654 #endif
655 	/* Check if we have enough memory for MC */
656 	if (rem < board_reserve_ram_top(rem)) {
657 		/* Not enough memory in high region to reserve */
658 		if (ea_size > board_reserve_ram_top(rem))
659 			ea_size -= board_reserve_ram_top(rem);
660 		else
661 			printf("Error: No enough space for reserved memory.\n");
662 	}
663 
664 	return ea_size;
665 }
666 
667 int dram_init_banksize(void)
668 {
669 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
670 	phys_size_t dp_ddr_size;
671 #endif
672 
673 	/*
674 	 * gd->ram_size has the total size of DDR memory, less reserved secure
675 	 * memory. The DDR extends from low region to high region(s) presuming
676 	 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
677 	 * the location of secure memory. gd->arch.resv_ram tracks the location
678 	 * of reserved memory for Management Complex (MC).
679 	 */
680 	gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE;
681 	if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) {
682 		gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE;
683 		gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE;
684 		gd->bd->bi_dram[1].size = gd->ram_size -
685 					  CONFIG_SYS_DDR_BLOCK1_SIZE;
686 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
687 		if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) {
688 			gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE;
689 			gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size -
690 						  CONFIG_SYS_DDR_BLOCK2_SIZE;
691 			gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE;
692 		}
693 #endif
694 	} else {
695 		gd->bd->bi_dram[0].size = gd->ram_size;
696 	}
697 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
698 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
699 	if (gd->bd->bi_dram[2].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
700 		gd->bd->bi_dram[2].size -= CONFIG_SYS_MEM_RESERVE_SECURE;
701 		gd->arch.secure_ram = gd->bd->bi_dram[2].start +
702 				      gd->bd->bi_dram[2].size;
703 		gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
704 		gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
705 	} else
706 #endif
707 	{
708 		if (gd->bd->bi_dram[1].size >= CONFIG_SYS_MEM_RESERVE_SECURE) {
709 			gd->bd->bi_dram[1].size -=
710 					CONFIG_SYS_MEM_RESERVE_SECURE;
711 			gd->arch.secure_ram = gd->bd->bi_dram[1].start +
712 					      gd->bd->bi_dram[1].size;
713 			gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
714 			gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
715 		} else if (gd->bd->bi_dram[0].size >
716 					CONFIG_SYS_MEM_RESERVE_SECURE) {
717 			gd->bd->bi_dram[0].size -=
718 					CONFIG_SYS_MEM_RESERVE_SECURE;
719 			gd->arch.secure_ram = gd->bd->bi_dram[0].start +
720 					      gd->bd->bi_dram[0].size;
721 			gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED;
722 			gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE;
723 		}
724 	}
725 #endif	/* CONFIG_SYS_MEM_RESERVE_SECURE */
726 
727 #ifdef CONFIG_FSL_MC_ENET
728 	/* Assign memory for MC */
729 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
730 	if (gd->bd->bi_dram[2].size >=
731 	    board_reserve_ram_top(gd->bd->bi_dram[2].size)) {
732 		gd->arch.resv_ram = gd->bd->bi_dram[2].start +
733 			    gd->bd->bi_dram[2].size -
734 			    board_reserve_ram_top(gd->bd->bi_dram[2].size);
735 	} else
736 #endif
737 	{
738 		if (gd->bd->bi_dram[1].size >=
739 		    board_reserve_ram_top(gd->bd->bi_dram[1].size)) {
740 			gd->arch.resv_ram = gd->bd->bi_dram[1].start +
741 				gd->bd->bi_dram[1].size -
742 				board_reserve_ram_top(gd->bd->bi_dram[1].size);
743 		} else if (gd->bd->bi_dram[0].size >
744 			   board_reserve_ram_top(gd->bd->bi_dram[0].size)) {
745 			gd->arch.resv_ram = gd->bd->bi_dram[0].start +
746 				gd->bd->bi_dram[0].size -
747 				board_reserve_ram_top(gd->bd->bi_dram[0].size);
748 		}
749 	}
750 #endif	/* CONFIG_FSL_MC_ENET */
751 
752 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
753 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
754 #error "This SoC shouldn't have DP DDR"
755 #endif
756 	if (soc_has_dp_ddr()) {
757 		/* initialize DP-DDR here */
758 		puts("DP-DDR:  ");
759 		/*
760 		 * DDR controller use 0 as the base address for binding.
761 		 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
762 		 */
763 		dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY,
764 					  CONFIG_DP_DDR_CTRL,
765 					  CONFIG_DP_DDR_NUM_CTRLS,
766 					  CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR,
767 					  NULL, NULL, NULL);
768 		if (dp_ddr_size) {
769 			gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE;
770 			gd->bd->bi_dram[2].size = dp_ddr_size;
771 		} else {
772 			puts("Not detected");
773 		}
774 	}
775 #endif
776 
777 	return 0;
778 }
779 
780 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
781 void efi_add_known_memory(void)
782 {
783 	int i;
784 	phys_addr_t ram_start, start;
785 	phys_size_t ram_size;
786 	u64 pages;
787 
788 	/* Add RAM */
789 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
790 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
791 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
792 #error "This SoC shouldn't have DP DDR"
793 #endif
794 		if (i == 2)
795 			continue;	/* skip DP-DDR */
796 #endif
797 		ram_start = gd->bd->bi_dram[i].start;
798 		ram_size = gd->bd->bi_dram[i].size;
799 #ifdef CONFIG_RESV_RAM
800 		if (gd->arch.resv_ram >= ram_start &&
801 		    gd->arch.resv_ram < ram_start + ram_size)
802 			ram_size = gd->arch.resv_ram - ram_start;
803 #endif
804 		start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
805 		pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT;
806 
807 		efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY,
808 				   false);
809 	}
810 }
811 #endif
812 
813 /*
814  * Before DDR size is known, early MMU table have DDR mapped as device memory
815  * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
816  * needs to be set for these mappings.
817  * If a special case configures DDR with holes in the mapping, the holes need
818  * to be marked as invalid. This is not implemented in this function.
819  */
820 void update_early_mmu_table(void)
821 {
822 	if (!gd->arch.tlb_addr)
823 		return;
824 
825 	if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) {
826 		mmu_change_region_attr(
827 					CONFIG_SYS_SDRAM_BASE,
828 					gd->ram_size,
829 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
830 					PTE_BLOCK_OUTER_SHARE		|
831 					PTE_BLOCK_NS			|
832 					PTE_TYPE_VALID);
833 	} else {
834 		mmu_change_region_attr(
835 					CONFIG_SYS_SDRAM_BASE,
836 					CONFIG_SYS_DDR_BLOCK1_SIZE,
837 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
838 					PTE_BLOCK_OUTER_SHARE		|
839 					PTE_BLOCK_NS			|
840 					PTE_TYPE_VALID);
841 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
842 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
843 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
844 #endif
845 		if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE >
846 		    CONFIG_SYS_DDR_BLOCK2_SIZE) {
847 			mmu_change_region_attr(
848 					CONFIG_SYS_DDR_BLOCK2_BASE,
849 					CONFIG_SYS_DDR_BLOCK2_SIZE,
850 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
851 					PTE_BLOCK_OUTER_SHARE		|
852 					PTE_BLOCK_NS			|
853 					PTE_TYPE_VALID);
854 			mmu_change_region_attr(
855 					CONFIG_SYS_DDR_BLOCK3_BASE,
856 					gd->ram_size -
857 					CONFIG_SYS_DDR_BLOCK1_SIZE -
858 					CONFIG_SYS_DDR_BLOCK2_SIZE,
859 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
860 					PTE_BLOCK_OUTER_SHARE		|
861 					PTE_BLOCK_NS			|
862 					PTE_TYPE_VALID);
863 		} else
864 #endif
865 		{
866 			mmu_change_region_attr(
867 					CONFIG_SYS_DDR_BLOCK2_BASE,
868 					gd->ram_size -
869 					CONFIG_SYS_DDR_BLOCK1_SIZE,
870 					PTE_BLOCK_MEMTYPE(MT_NORMAL)	|
871 					PTE_BLOCK_OUTER_SHARE		|
872 					PTE_BLOCK_NS			|
873 					PTE_TYPE_VALID);
874 		}
875 	}
876 }
877 
878 __weak int dram_init(void)
879 {
880 	fsl_initdram();
881 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
882 	/* This will break-before-make MMU for DDR */
883 	update_early_mmu_table();
884 #endif
885 
886 	return 0;
887 }
888