1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2017 NXP 4 * Copyright 2014-2015 Freescale Semiconductor, Inc. 5 */ 6 7 #include <common.h> 8 #include <fsl_ddr_sdram.h> 9 #include <asm/io.h> 10 #include <linux/errno.h> 11 #include <asm/system.h> 12 #include <fm_eth.h> 13 #include <asm/armv8/mmu.h> 14 #include <asm/io.h> 15 #include <asm/arch/fsl_serdes.h> 16 #include <asm/arch/soc.h> 17 #include <asm/arch/cpu.h> 18 #include <asm/arch/speed.h> 19 #include <fsl_immap.h> 20 #include <asm/arch/mp.h> 21 #include <efi_loader.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #include <asm/armv8/sec_firmware.h> 27 #ifdef CONFIG_SYS_FSL_DDR 28 #include <fsl_ddr.h> 29 #endif 30 #include <asm/arch/clock.h> 31 #include <hwconfig.h> 32 #include <fsl_qbman.h> 33 34 #ifdef CONFIG_TFABOOT 35 #include <environment.h> 36 #ifdef CONFIG_CHAIN_OF_TRUST 37 #include <fsl_validate.h> 38 #endif 39 #endif 40 41 DECLARE_GLOBAL_DATA_PTR; 42 43 static struct cpu_type cpu_type_list[] = { 44 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8), 45 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8), 46 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4), 47 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8), 48 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8), 49 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4), 50 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4), 51 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8), 52 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4), 53 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4), 54 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4), 55 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2), 56 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2), 57 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4), 58 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2), 59 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4), 60 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1), 61 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8), 62 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8), 63 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4), 64 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4), 65 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16), 66 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12), 67 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8), 68 }; 69 70 #define EARLY_PGTABLE_SIZE 0x5000 71 static struct mm_region early_map[] = { 72 #ifdef CONFIG_FSL_LSCH3 73 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 74 CONFIG_SYS_FSL_CCSR_SIZE, 75 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 76 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 77 }, 78 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 79 SYS_FSL_OCRAM_SPACE_SIZE, 80 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 81 }, 82 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 83 CONFIG_SYS_FSL_QSPI_SIZE1, 84 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE}, 85 #ifdef CONFIG_FSL_IFC 86 /* For IFC Region #1, only the first 4MB is cache-enabled */ 87 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1, 88 CONFIG_SYS_FSL_IFC_SIZE1_1, 89 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 90 }, 91 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 92 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 93 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1, 94 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 95 }, 96 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1, 97 CONFIG_SYS_FSL_IFC_SIZE1, 98 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 99 }, 100 #endif 101 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 102 CONFIG_SYS_FSL_DRAM_SIZE1, 103 #if defined(CONFIG_TFABOOT) || \ 104 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 105 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 106 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 107 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 108 #endif 109 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 110 }, 111 #ifdef CONFIG_FSL_IFC 112 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */ 113 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 114 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2, 115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 116 }, 117 #endif 118 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 119 CONFIG_SYS_FSL_DCSR_SIZE, 120 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 121 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 122 }, 123 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 124 CONFIG_SYS_FSL_DRAM_SIZE2, 125 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 126 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 127 }, 128 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 129 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 130 CONFIG_SYS_FSL_DRAM_SIZE3, 131 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 132 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 133 }, 134 #endif 135 #elif defined(CONFIG_FSL_LSCH2) 136 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 137 CONFIG_SYS_FSL_CCSR_SIZE, 138 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 139 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 140 }, 141 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 142 SYS_FSL_OCRAM_SPACE_SIZE, 143 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 144 }, 145 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 146 CONFIG_SYS_FSL_DCSR_SIZE, 147 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 148 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 149 }, 150 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 151 CONFIG_SYS_FSL_QSPI_SIZE, 152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 153 }, 154 #ifdef CONFIG_FSL_IFC 155 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 156 CONFIG_SYS_FSL_IFC_SIZE, 157 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 158 }, 159 #endif 160 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 161 CONFIG_SYS_FSL_DRAM_SIZE1, 162 #if defined(CONFIG_TFABOOT) || \ 163 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 164 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 165 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 166 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 167 #endif 168 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 169 }, 170 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 171 CONFIG_SYS_FSL_DRAM_SIZE2, 172 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 173 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 174 }, 175 #endif 176 {}, /* list terminator */ 177 }; 178 179 static struct mm_region final_map[] = { 180 #ifdef CONFIG_FSL_LSCH3 181 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 182 CONFIG_SYS_FSL_CCSR_SIZE, 183 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 184 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 185 }, 186 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 187 SYS_FSL_OCRAM_SPACE_SIZE, 188 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 189 }, 190 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 191 CONFIG_SYS_FSL_DRAM_SIZE1, 192 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 193 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 194 }, 195 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 196 CONFIG_SYS_FSL_QSPI_SIZE1, 197 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 198 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 199 }, 200 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2, 201 CONFIG_SYS_FSL_QSPI_SIZE2, 202 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 203 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 204 }, 205 #ifdef CONFIG_FSL_IFC 206 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 207 CONFIG_SYS_FSL_IFC_SIZE2, 208 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 209 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 210 }, 211 #endif 212 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 213 CONFIG_SYS_FSL_DCSR_SIZE, 214 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 215 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 216 }, 217 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE, 218 CONFIG_SYS_FSL_MC_SIZE, 219 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 220 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 221 }, 222 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE, 223 CONFIG_SYS_FSL_NI_SIZE, 224 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 225 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 226 }, 227 /* For QBMAN portal, only the first 64MB is cache-enabled */ 228 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 229 CONFIG_SYS_FSL_QBMAN_SIZE_1, 230 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 231 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS 232 }, 233 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 234 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 235 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1, 236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 238 }, 239 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 240 CONFIG_SYS_PCIE1_PHYS_SIZE, 241 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 242 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 243 }, 244 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 245 CONFIG_SYS_PCIE2_PHYS_SIZE, 246 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 247 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 248 }, 249 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 250 CONFIG_SYS_PCIE3_PHYS_SIZE, 251 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 252 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 253 }, 254 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) 255 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR, 256 CONFIG_SYS_PCIE4_PHYS_SIZE, 257 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 258 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 259 }, 260 #endif 261 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE, 262 CONFIG_SYS_FSL_WRIOP1_SIZE, 263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 265 }, 266 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE, 267 CONFIG_SYS_FSL_AIOP1_SIZE, 268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 270 }, 271 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE, 272 CONFIG_SYS_FSL_PEBUF_SIZE, 273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 274 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 275 }, 276 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 277 CONFIG_SYS_FSL_DRAM_SIZE2, 278 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 279 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 280 }, 281 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 282 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 283 CONFIG_SYS_FSL_DRAM_SIZE3, 284 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 285 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 286 }, 287 #endif 288 #elif defined(CONFIG_FSL_LSCH2) 289 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE, 290 CONFIG_SYS_FSL_BOOTROM_SIZE, 291 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 292 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 293 }, 294 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 295 CONFIG_SYS_FSL_CCSR_SIZE, 296 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 297 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 298 }, 299 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 300 SYS_FSL_OCRAM_SPACE_SIZE, 301 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 302 }, 303 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 304 CONFIG_SYS_FSL_DCSR_SIZE, 305 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 306 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 307 }, 308 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 309 CONFIG_SYS_FSL_QSPI_SIZE, 310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 311 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 312 }, 313 #ifdef CONFIG_FSL_IFC 314 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 315 CONFIG_SYS_FSL_IFC_SIZE, 316 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 317 }, 318 #endif 319 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 320 CONFIG_SYS_FSL_DRAM_SIZE1, 321 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 322 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 323 }, 324 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 325 CONFIG_SYS_FSL_QBMAN_SIZE, 326 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 327 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 328 }, 329 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 330 CONFIG_SYS_FSL_DRAM_SIZE2, 331 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 332 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 333 }, 334 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 335 CONFIG_SYS_PCIE1_PHYS_SIZE, 336 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 337 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 338 }, 339 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 340 CONFIG_SYS_PCIE2_PHYS_SIZE, 341 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 342 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 343 }, 344 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 345 CONFIG_SYS_PCIE3_PHYS_SIZE, 346 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 347 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 348 }, 349 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 350 CONFIG_SYS_FSL_DRAM_SIZE3, 351 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 352 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 353 }, 354 #endif 355 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 356 {}, /* space holder for secure mem */ 357 #endif 358 {}, 359 }; 360 361 struct mm_region *mem_map = early_map; 362 363 void cpu_name(char *name) 364 { 365 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 366 unsigned int i, svr, ver; 367 368 svr = gur_in32(&gur->svr); 369 ver = SVR_SOC_VER(svr); 370 371 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 372 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 373 strcpy(name, cpu_type_list[i].name); 374 #ifdef CONFIG_ARCH_LX2160A 375 if (IS_C_PROCESSOR(svr)) 376 strcat(name, "C"); 377 #endif 378 379 if (IS_E_PROCESSOR(svr)) 380 strcat(name, "E"); 381 382 sprintf(name + strlen(name), " Rev%d.%d", 383 SVR_MAJ(svr), SVR_MIN(svr)); 384 break; 385 } 386 387 if (i == ARRAY_SIZE(cpu_type_list)) 388 strcpy(name, "unknown"); 389 } 390 391 #ifndef CONFIG_SYS_DCACHE_OFF 392 /* 393 * To start MMU before DDR is available, we create MMU table in SRAM. 394 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 395 * levels of translation tables here to cover 40-bit address space. 396 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 397 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 398 * Note, the debug print in cache_v8.c is not usable for debugging 399 * these early MMU tables because UART is not yet available. 400 */ 401 static inline void early_mmu_setup(void) 402 { 403 unsigned int el = current_el(); 404 405 /* global data is already setup, no allocation yet */ 406 if (el == 3) 407 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 408 else 409 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE; 410 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 411 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 412 413 /* Create early page tables */ 414 setup_pgtables(); 415 416 /* point TTBR to the new table */ 417 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 418 get_tcr(el, NULL, NULL) & 419 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 420 MEMORY_ATTRIBUTES); 421 422 set_sctlr(get_sctlr() | CR_M); 423 } 424 425 static void fix_pcie_mmu_map(void) 426 { 427 #ifdef CONFIG_ARCH_LS2080A 428 unsigned int i; 429 u32 svr, ver; 430 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 431 432 svr = gur_in32(&gur->svr); 433 ver = SVR_SOC_VER(svr); 434 435 /* Fix PCIE base and size for LS2088A */ 436 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) || 437 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) || 438 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) { 439 for (i = 0; i < ARRAY_SIZE(final_map); i++) { 440 switch (final_map[i].phys) { 441 case CONFIG_SYS_PCIE1_PHYS_ADDR: 442 final_map[i].phys = 0x2000000000ULL; 443 final_map[i].virt = 0x2000000000ULL; 444 final_map[i].size = 0x800000000ULL; 445 break; 446 case CONFIG_SYS_PCIE2_PHYS_ADDR: 447 final_map[i].phys = 0x2800000000ULL; 448 final_map[i].virt = 0x2800000000ULL; 449 final_map[i].size = 0x800000000ULL; 450 break; 451 case CONFIG_SYS_PCIE3_PHYS_ADDR: 452 final_map[i].phys = 0x3000000000ULL; 453 final_map[i].virt = 0x3000000000ULL; 454 final_map[i].size = 0x800000000ULL; 455 break; 456 case CONFIG_SYS_PCIE4_PHYS_ADDR: 457 final_map[i].phys = 0x3800000000ULL; 458 final_map[i].virt = 0x3800000000ULL; 459 final_map[i].size = 0x800000000ULL; 460 break; 461 default: 462 break; 463 } 464 } 465 } 466 #endif 467 } 468 469 /* 470 * The final tables look similar to early tables, but different in detail. 471 * These tables are in DRAM. Sub tables are added to enable cache for 472 * QBMan and OCRAM. 473 * 474 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 475 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 476 */ 477 static inline void final_mmu_setup(void) 478 { 479 u64 tlb_addr_save = gd->arch.tlb_addr; 480 unsigned int el = current_el(); 481 int index; 482 483 /* fix the final_map before filling in the block entries */ 484 fix_pcie_mmu_map(); 485 486 mem_map = final_map; 487 488 /* Update mapping for DDR to actual size */ 489 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) { 490 /* 491 * Find the entry for DDR mapping and update the address and 492 * size. Zero-sized mapping will be skipped when creating MMU 493 * table. 494 */ 495 switch (final_map[index].virt) { 496 case CONFIG_SYS_FSL_DRAM_BASE1: 497 final_map[index].virt = gd->bd->bi_dram[0].start; 498 final_map[index].phys = gd->bd->bi_dram[0].start; 499 final_map[index].size = gd->bd->bi_dram[0].size; 500 break; 501 #ifdef CONFIG_SYS_FSL_DRAM_BASE2 502 case CONFIG_SYS_FSL_DRAM_BASE2: 503 #if (CONFIG_NR_DRAM_BANKS >= 2) 504 final_map[index].virt = gd->bd->bi_dram[1].start; 505 final_map[index].phys = gd->bd->bi_dram[1].start; 506 final_map[index].size = gd->bd->bi_dram[1].size; 507 #else 508 final_map[index].size = 0; 509 #endif 510 break; 511 #endif 512 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 513 case CONFIG_SYS_FSL_DRAM_BASE3: 514 #if (CONFIG_NR_DRAM_BANKS >= 3) 515 final_map[index].virt = gd->bd->bi_dram[2].start; 516 final_map[index].phys = gd->bd->bi_dram[2].start; 517 final_map[index].size = gd->bd->bi_dram[2].size; 518 #else 519 final_map[index].size = 0; 520 #endif 521 break; 522 #endif 523 default: 524 break; 525 } 526 } 527 528 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 529 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 530 if (el == 3) { 531 /* 532 * Only use gd->arch.secure_ram if the address is 533 * recalculated. Align to 4KB for MMU table. 534 */ 535 /* put page tables in secure ram */ 536 index = ARRAY_SIZE(final_map) - 2; 537 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 538 final_map[index].virt = gd->arch.secure_ram & ~0x3; 539 final_map[index].phys = final_map[index].virt; 540 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 541 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 542 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 543 tlb_addr_save = gd->arch.tlb_addr; 544 } else { 545 /* Use allocated (board_f.c) memory for TLB */ 546 tlb_addr_save = gd->arch.tlb_allocated; 547 gd->arch.tlb_addr = tlb_addr_save; 548 } 549 } 550 #endif 551 552 /* Reset the fill ptr */ 553 gd->arch.tlb_fillptr = tlb_addr_save; 554 555 /* Create normal system page tables */ 556 setup_pgtables(); 557 558 /* Create emergency page tables */ 559 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 560 gd->arch.tlb_emerg = gd->arch.tlb_addr; 561 setup_pgtables(); 562 gd->arch.tlb_addr = tlb_addr_save; 563 564 /* Disable cache and MMU */ 565 dcache_disable(); /* TLBs are invalidated */ 566 invalidate_icache_all(); 567 568 /* point TTBR to the new table */ 569 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 570 MEMORY_ATTRIBUTES); 571 572 set_sctlr(get_sctlr() | CR_M); 573 } 574 575 u64 get_page_table_size(void) 576 { 577 return 0x10000; 578 } 579 580 int arch_cpu_init(void) 581 { 582 /* 583 * This function is called before U-Boot relocates itself to speed up 584 * on system running. It is not necessary to run if performance is not 585 * critical. Skip if MMU is already enabled by SPL or other means. 586 */ 587 if (get_sctlr() & CR_M) 588 return 0; 589 590 icache_enable(); 591 __asm_invalidate_dcache_all(); 592 __asm_invalidate_tlb_all(); 593 early_mmu_setup(); 594 set_sctlr(get_sctlr() | CR_C); 595 return 0; 596 } 597 598 void mmu_setup(void) 599 { 600 final_mmu_setup(); 601 } 602 603 /* 604 * This function is called from common/board_r.c. 605 * It recreates MMU table in main memory. 606 */ 607 void enable_caches(void) 608 { 609 mmu_setup(); 610 __asm_invalidate_tlb_all(); 611 icache_enable(); 612 dcache_enable(); 613 } 614 #endif /* CONFIG_SYS_DCACHE_OFF */ 615 616 #ifdef CONFIG_TFABOOT 617 enum boot_src __get_boot_src(u32 porsr1) 618 { 619 enum boot_src src = BOOT_SOURCE_RESERVED; 620 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT; 621 #if !defined(CONFIG_NXP_LSCH3_2) 622 u32 val; 623 #endif 624 debug("%s: rcw_src 0x%x\n", __func__, rcw_src); 625 626 #if defined(CONFIG_FSL_LSCH3) 627 #if defined(CONFIG_NXP_LSCH3_2) 628 switch (rcw_src) { 629 case RCW_SRC_SDHC1_VAL: 630 src = BOOT_SOURCE_SD_MMC; 631 break; 632 case RCW_SRC_SDHC2_VAL: 633 src = BOOT_SOURCE_SD_MMC2; 634 break; 635 case RCW_SRC_I2C1_VAL: 636 src = BOOT_SOURCE_I2C1_EXTENDED; 637 break; 638 case RCW_SRC_FLEXSPI_NAND2K_VAL: 639 src = BOOT_SOURCE_XSPI_NAND; 640 break; 641 case RCW_SRC_FLEXSPI_NAND4K_VAL: 642 src = BOOT_SOURCE_XSPI_NAND; 643 break; 644 case RCW_SRC_RESERVED_1_VAL: 645 src = BOOT_SOURCE_RESERVED; 646 break; 647 case RCW_SRC_FLEXSPI_NOR_24B: 648 src = BOOT_SOURCE_XSPI_NOR; 649 break; 650 default: 651 src = BOOT_SOURCE_RESERVED; 652 } 653 #else 654 val = rcw_src & RCW_SRC_TYPE_MASK; 655 if (val == RCW_SRC_NOR_VAL) { 656 val = rcw_src & NOR_TYPE_MASK; 657 658 switch (val) { 659 case NOR_16B_VAL: 660 case NOR_32B_VAL: 661 src = BOOT_SOURCE_IFC_NOR; 662 break; 663 default: 664 src = BOOT_SOURCE_RESERVED; 665 } 666 } else { 667 /* RCW SRC Serial Flash */ 668 val = rcw_src & RCW_SRC_SERIAL_MASK; 669 switch (val) { 670 case RCW_SRC_QSPI_VAL: 671 /* RCW SRC Serial NOR (QSPI) */ 672 src = BOOT_SOURCE_QSPI_NOR; 673 break; 674 case RCW_SRC_SD_CARD_VAL: 675 /* RCW SRC SD Card */ 676 src = BOOT_SOURCE_SD_MMC; 677 break; 678 case RCW_SRC_EMMC_VAL: 679 /* RCW SRC EMMC */ 680 src = BOOT_SOURCE_SD_MMC; 681 break; 682 case RCW_SRC_I2C1_VAL: 683 /* RCW SRC I2C1 Extended */ 684 src = BOOT_SOURCE_I2C1_EXTENDED; 685 break; 686 default: 687 src = BOOT_SOURCE_RESERVED; 688 } 689 } 690 #endif 691 #elif defined(CONFIG_FSL_LSCH2) 692 /* RCW SRC NAND */ 693 val = rcw_src & RCW_SRC_NAND_MASK; 694 if (val == RCW_SRC_NAND_VAL) { 695 val = rcw_src & NAND_RESERVED_MASK; 696 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2) 697 src = BOOT_SOURCE_IFC_NAND; 698 699 } else { 700 /* RCW SRC NOR */ 701 val = rcw_src & RCW_SRC_NOR_MASK; 702 if (val == NOR_8B_VAL || val == NOR_16B_VAL) { 703 src = BOOT_SOURCE_IFC_NOR; 704 } else { 705 switch (rcw_src) { 706 case QSPI_VAL1: 707 case QSPI_VAL2: 708 src = BOOT_SOURCE_QSPI_NOR; 709 break; 710 case SD_VAL: 711 src = BOOT_SOURCE_SD_MMC; 712 break; 713 default: 714 src = BOOT_SOURCE_RESERVED; 715 } 716 } 717 } 718 #endif 719 720 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src) 721 src = BOOT_SOURCE_QSPI_NOR; 722 723 debug("%s: src 0x%x\n", __func__, src); 724 return src; 725 } 726 727 enum boot_src get_boot_src(void) 728 { 729 struct pt_regs regs; 730 u32 porsr1 = 0; 731 732 #if defined(CONFIG_FSL_LSCH3) 733 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 734 #elif defined(CONFIG_FSL_LSCH2) 735 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 736 #endif 737 738 if (current_el() == 2) { 739 regs.regs[0] = SIP_SVC_RCW; 740 741 smc_call(®s); 742 if (!regs.regs[0]) 743 porsr1 = regs.regs[1]; 744 } 745 746 if (current_el() == 3 || !porsr1) { 747 #ifdef CONFIG_FSL_LSCH3 748 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 749 #elif defined(CONFIG_FSL_LSCH2) 750 porsr1 = in_be32(&gur->porsr1); 751 #endif 752 } 753 754 debug("%s: porsr1 0x%x\n", __func__, porsr1); 755 756 return __get_boot_src(porsr1); 757 } 758 759 #ifdef CONFIG_ENV_IS_IN_MMC 760 int mmc_get_env_dev(void) 761 { 762 enum boot_src src = get_boot_src(); 763 int dev = CONFIG_SYS_MMC_ENV_DEV; 764 765 switch (src) { 766 case BOOT_SOURCE_SD_MMC: 767 dev = 0; 768 break; 769 case BOOT_SOURCE_SD_MMC2: 770 dev = 1; 771 break; 772 default: 773 break; 774 } 775 776 return dev; 777 } 778 #endif 779 780 enum env_location env_get_location(enum env_operation op, int prio) 781 { 782 enum boot_src src = get_boot_src(); 783 enum env_location env_loc = ENVL_NOWHERE; 784 785 if (prio) 786 return ENVL_UNKNOWN; 787 788 #ifdef CONFIG_CHAIN_OF_TRUST 789 /* Check Boot Mode 790 * If Boot Mode is Secure, return ENVL_NOWHERE 791 */ 792 if (fsl_check_boot_mode_secure() == 1) 793 goto done; 794 #endif 795 796 switch (src) { 797 case BOOT_SOURCE_IFC_NOR: 798 env_loc = ENVL_FLASH; 799 break; 800 case BOOT_SOURCE_QSPI_NOR: 801 /* FALLTHROUGH */ 802 case BOOT_SOURCE_XSPI_NOR: 803 env_loc = ENVL_SPI_FLASH; 804 break; 805 case BOOT_SOURCE_IFC_NAND: 806 /* FALLTHROUGH */ 807 case BOOT_SOURCE_QSPI_NAND: 808 /* FALLTHROUGH */ 809 case BOOT_SOURCE_XSPI_NAND: 810 env_loc = ENVL_NAND; 811 break; 812 case BOOT_SOURCE_SD_MMC: 813 /* FALLTHROUGH */ 814 case BOOT_SOURCE_SD_MMC2: 815 env_loc = ENVL_MMC; 816 break; 817 case BOOT_SOURCE_I2C1_EXTENDED: 818 /* FALLTHROUGH */ 819 default: 820 break; 821 } 822 823 #ifdef CONFIG_CHAIN_OF_TRUST 824 done: 825 #endif 826 return env_loc; 827 } 828 #endif /* CONFIG_TFABOOT */ 829 830 u32 initiator_type(u32 cluster, int init_id) 831 { 832 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 833 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 834 u32 type = 0; 835 836 type = gur_in32(&gur->tp_ityp[idx]); 837 if (type & TP_ITYP_AV) 838 return type; 839 840 return 0; 841 } 842 843 u32 cpu_pos_mask(void) 844 { 845 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 846 int i = 0; 847 u32 cluster, type, mask = 0; 848 849 do { 850 int j; 851 852 cluster = gur_in32(&gur->tp_cluster[i].lower); 853 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 854 type = initiator_type(cluster, j); 855 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 856 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 857 } 858 i++; 859 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 860 861 return mask; 862 } 863 864 u32 cpu_mask(void) 865 { 866 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 867 int i = 0, count = 0; 868 u32 cluster, type, mask = 0; 869 870 do { 871 int j; 872 873 cluster = gur_in32(&gur->tp_cluster[i].lower); 874 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 875 type = initiator_type(cluster, j); 876 if (type) { 877 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 878 mask |= 1 << count; 879 count++; 880 } 881 } 882 i++; 883 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 884 885 return mask; 886 } 887 888 /* 889 * Return the number of cores on this SOC. 890 */ 891 int cpu_numcores(void) 892 { 893 return hweight32(cpu_mask()); 894 } 895 896 int fsl_qoriq_core_to_cluster(unsigned int core) 897 { 898 struct ccsr_gur __iomem *gur = 899 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 900 int i = 0, count = 0; 901 u32 cluster; 902 903 do { 904 int j; 905 906 cluster = gur_in32(&gur->tp_cluster[i].lower); 907 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 908 if (initiator_type(cluster, j)) { 909 if (count == core) 910 return i; 911 count++; 912 } 913 } 914 i++; 915 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 916 917 return -1; /* cannot identify the cluster */ 918 } 919 920 u32 fsl_qoriq_core_to_type(unsigned int core) 921 { 922 struct ccsr_gur __iomem *gur = 923 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 924 int i = 0, count = 0; 925 u32 cluster, type; 926 927 do { 928 int j; 929 930 cluster = gur_in32(&gur->tp_cluster[i].lower); 931 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 932 type = initiator_type(cluster, j); 933 if (type) { 934 if (count == core) 935 return type; 936 count++; 937 } 938 } 939 i++; 940 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 941 942 return -1; /* cannot identify the cluster */ 943 } 944 945 #ifndef CONFIG_FSL_LSCH3 946 uint get_svr(void) 947 { 948 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 949 950 return gur_in32(&gur->svr); 951 } 952 #endif 953 954 #ifdef CONFIG_DISPLAY_CPUINFO 955 int print_cpuinfo(void) 956 { 957 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 958 struct sys_info sysinfo; 959 char buf[32]; 960 unsigned int i, core; 961 u32 type, rcw, svr = gur_in32(&gur->svr); 962 963 puts("SoC: "); 964 965 cpu_name(buf); 966 printf(" %s (0x%x)\n", buf, svr); 967 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 968 get_sys_info(&sysinfo); 969 puts("Clock Configuration:"); 970 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 971 if (!(i % 3)) 972 puts("\n "); 973 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 974 printf("CPU%d(%s):%-4s MHz ", core, 975 type == TY_ITYP_VER_A7 ? "A7 " : 976 (type == TY_ITYP_VER_A53 ? "A53" : 977 (type == TY_ITYP_VER_A57 ? "A57" : 978 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 979 strmhz(buf, sysinfo.freq_processor[core])); 980 } 981 /* Display platform clock as Bus frequency. */ 982 printf("\n Bus: %-4s MHz ", 983 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); 984 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 985 #ifdef CONFIG_SYS_DPAA_FMAN 986 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 987 #endif 988 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 989 if (soc_has_dp_ddr()) { 990 printf(" DP-DDR: %-4s MT/s", 991 strmhz(buf, sysinfo.freq_ddrbus2)); 992 } 993 #endif 994 puts("\n"); 995 996 /* 997 * Display the RCW, so that no one gets confused as to what RCW 998 * we're actually using for this boot. 999 */ 1000 puts("Reset Configuration Word (RCW):"); 1001 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 1002 rcw = gur_in32(&gur->rcwsr[i]); 1003 if ((i % 4) == 0) 1004 printf("\n %08x:", i * 4); 1005 printf(" %08x", rcw); 1006 } 1007 puts("\n"); 1008 1009 return 0; 1010 } 1011 #endif 1012 1013 #ifdef CONFIG_FSL_ESDHC 1014 int cpu_mmc_init(bd_t *bis) 1015 { 1016 return fsl_esdhc_mmc_init(bis); 1017 } 1018 #endif 1019 1020 int cpu_eth_init(bd_t *bis) 1021 { 1022 int error = 0; 1023 1024 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1025 error = fsl_mc_ldpaa_init(bis); 1026 #endif 1027 #ifdef CONFIG_FMAN_ENET 1028 fm_standard_init(bis); 1029 #endif 1030 return error; 1031 } 1032 1033 static inline int check_psci(void) 1034 { 1035 unsigned int psci_ver; 1036 1037 psci_ver = sec_firmware_support_psci_version(); 1038 if (psci_ver == PSCI_INVALID_VER) 1039 return 1; 1040 1041 return 0; 1042 } 1043 1044 static void config_core_prefetch(void) 1045 { 1046 char *buf = NULL; 1047 char buffer[HWCONFIG_BUFFER_SIZE]; 1048 const char *prefetch_arg = NULL; 1049 size_t arglen; 1050 unsigned int mask; 1051 struct pt_regs regs; 1052 1053 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0) 1054 buf = buffer; 1055 1056 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable", 1057 &arglen, buf); 1058 1059 if (prefetch_arg) { 1060 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff; 1061 if (mask & 0x1) { 1062 printf("Core0 prefetch can't be disabled\n"); 1063 return; 1064 } 1065 1066 #define SIP_PREFETCH_DISABLE_64 0xC200FF13 1067 regs.regs[0] = SIP_PREFETCH_DISABLE_64; 1068 regs.regs[1] = mask; 1069 smc_call(®s); 1070 1071 if (regs.regs[0]) 1072 printf("Prefetch disable config failed for mask "); 1073 else 1074 printf("Prefetch disable config passed for mask "); 1075 printf("0x%x\n", mask); 1076 } 1077 } 1078 1079 int arch_early_init_r(void) 1080 { 1081 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 1082 u32 svr_dev_id; 1083 /* 1084 * erratum A009635 is valid only for LS2080A SoC and 1085 * its personalitiesi 1086 */ 1087 svr_dev_id = get_svr(); 1088 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1089 erratum_a009635(); 1090 #endif 1091 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) 1092 erratum_a009942_check_cpo(); 1093 #endif 1094 if (check_psci()) { 1095 debug("PSCI: PSCI does not exist.\n"); 1096 1097 /* if PSCI does not exist, boot secondary cores here */ 1098 if (fsl_layerscape_wake_seconday_cores()) 1099 printf("Did not wake secondary cores\n"); 1100 } 1101 1102 config_core_prefetch(); 1103 1104 #ifdef CONFIG_SYS_HAS_SERDES 1105 fsl_serdes_init(); 1106 #endif 1107 #ifdef CONFIG_SYS_FSL_HAS_RGMII 1108 /* some dpmacs in armv8a based freescale layerscape SOCs can be 1109 * configured via both serdes(sgmii, xfi, xlaui etc) bits and via 1110 * EC*_PMUX(rgmii) bits in RCW. 1111 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from 1112 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits 1113 * Now if a dpmac is enabled by serdes bits then it takes precedence 1114 * over EC*_PMUX bits. i.e. in LX2160A if we select serdes protocol 1115 * that configures dpmac17 as SGMII and set the EC1_PMUX as RGMII, 1116 * then the dpmac is SGMII and not RGMII. 1117 * 1118 * Therefore, move the fsl_rgmii_init after fsl_serdes_init. in 1119 * fsl_rgmii_init function of SOC, we will check if the dpmac is enabled 1120 * or not? if it is (fsl_serdes_init has already enabled the dpmac), 1121 * then don't enable it. 1122 */ 1123 fsl_rgmii_init(); 1124 #endif 1125 #ifdef CONFIG_FMAN_ENET 1126 fman_enet_init(); 1127 #endif 1128 #ifdef CONFIG_SYS_DPAA_QBMAN 1129 setup_qbman_portals(); 1130 #endif 1131 return 0; 1132 } 1133 1134 int timer_init(void) 1135 { 1136 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 1137 #ifdef CONFIG_FSL_LSCH3 1138 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 1139 #endif 1140 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1141 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 1142 u32 svr_dev_id; 1143 #endif 1144 #ifdef COUNTER_FREQUENCY_REAL 1145 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 1146 1147 /* Update with accurate clock frequency */ 1148 if (current_el() == 3) 1149 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 1150 #endif 1151 1152 #ifdef CONFIG_FSL_LSCH3 1153 /* Enable timebase for all clusters. 1154 * It is safe to do so even some clusters are not enabled. 1155 */ 1156 out_le32(cltbenr, 0xf); 1157 #endif 1158 1159 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1160 /* 1161 * In certain Layerscape SoCs, the clock for each core's 1162 * has an enable bit in the PMU Physical Core Time Base Enable 1163 * Register (PCTBENR), which allows the watchdog to operate. 1164 */ 1165 setbits_le32(pctbenr, 0xff); 1166 /* 1167 * For LS2080A SoC and its personalities, timer controller 1168 * offset is different 1169 */ 1170 svr_dev_id = get_svr(); 1171 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1172 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 1173 1174 #endif 1175 1176 /* Enable clock for timer 1177 * This is a global setting. 1178 */ 1179 out_le32(cntcr, 0x1); 1180 1181 return 0; 1182 } 1183 1184 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 1185 1186 void __efi_runtime reset_cpu(ulong addr) 1187 { 1188 u32 val; 1189 1190 #ifdef CONFIG_ARCH_LX2160A 1191 val = in_le32(rstcr); 1192 val |= 0x01; 1193 out_le32(rstcr, val); 1194 #else 1195 /* Raise RESET_REQ_B */ 1196 val = scfg_in32(rstcr); 1197 val |= 0x02; 1198 scfg_out32(rstcr, val); 1199 #endif 1200 } 1201 1202 #ifdef CONFIG_EFI_LOADER 1203 1204 void __efi_runtime EFIAPI efi_reset_system( 1205 enum efi_reset_type reset_type, 1206 efi_status_t reset_status, 1207 unsigned long data_size, void *reset_data) 1208 { 1209 switch (reset_type) { 1210 case EFI_RESET_COLD: 1211 case EFI_RESET_WARM: 1212 case EFI_RESET_PLATFORM_SPECIFIC: 1213 reset_cpu(0); 1214 break; 1215 case EFI_RESET_SHUTDOWN: 1216 /* Nothing we can do */ 1217 break; 1218 } 1219 1220 while (1) { } 1221 } 1222 1223 efi_status_t efi_reset_system_init(void) 1224 { 1225 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 1226 } 1227 1228 #endif 1229 1230 /* 1231 * Calculate reserved memory with given memory bank 1232 * Return aligned memory size on success 1233 * Return (ram_size + needed size) for failure 1234 */ 1235 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 1236 { 1237 phys_size_t ram_top = ram_size; 1238 1239 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1240 ram_top = mc_get_dram_block_size(); 1241 if (ram_top > ram_size) 1242 return ram_size + ram_top; 1243 1244 ram_top = ram_size - ram_top; 1245 /* The start address of MC reserved memory needs to be aligned. */ 1246 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 1247 #endif 1248 1249 return ram_size - ram_top; 1250 } 1251 1252 phys_size_t get_effective_memsize(void) 1253 { 1254 phys_size_t ea_size, rem = 0; 1255 1256 /* 1257 * For ARMv8 SoCs, DDR memory is split into two or three regions. The 1258 * first region is 2GB space at 0x8000_0000. Secure memory needs to 1259 * allocated from first region. If the memory extends to the second 1260 * region (or the third region if applicable), Management Complex (MC) 1261 * memory should be put into the highest region, i.e. the end of DDR 1262 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so 1263 * U-Boot doesn't relocate itself into higher address. Should DDR be 1264 * configured to skip the first region, this function needs to be 1265 * adjusted. 1266 */ 1267 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { 1268 ea_size = CONFIG_MAX_MEM_MAPPED; 1269 rem = gd->ram_size - ea_size; 1270 } else { 1271 ea_size = gd->ram_size; 1272 } 1273 1274 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1275 /* Check if we have enough space for secure memory */ 1276 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) 1277 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1278 else 1279 printf("Error: No enough space for secure memory.\n"); 1280 #endif 1281 /* Check if we have enough memory for MC */ 1282 if (rem < board_reserve_ram_top(rem)) { 1283 /* Not enough memory in high region to reserve */ 1284 if (ea_size > board_reserve_ram_top(ea_size)) 1285 ea_size -= board_reserve_ram_top(ea_size); 1286 else 1287 printf("Error: No enough space for reserved memory.\n"); 1288 } 1289 1290 return ea_size; 1291 } 1292 1293 #ifdef CONFIG_TFABOOT 1294 phys_size_t tfa_get_dram_size(void) 1295 { 1296 struct pt_regs regs; 1297 phys_size_t dram_size = 0; 1298 1299 regs.regs[0] = SMC_DRAM_BANK_INFO; 1300 regs.regs[1] = -1; 1301 1302 smc_call(®s); 1303 if (regs.regs[0]) 1304 return 0; 1305 1306 dram_size = regs.regs[1]; 1307 return dram_size; 1308 } 1309 1310 static int tfa_dram_init_banksize(void) 1311 { 1312 int i = 0, ret = 0; 1313 struct pt_regs regs; 1314 phys_size_t dram_size = tfa_get_dram_size(); 1315 1316 debug("dram_size %llx\n", dram_size); 1317 1318 if (!dram_size) 1319 return -EINVAL; 1320 1321 do { 1322 regs.regs[0] = SMC_DRAM_BANK_INFO; 1323 regs.regs[1] = i; 1324 1325 smc_call(®s); 1326 if (regs.regs[0]) { 1327 ret = -EINVAL; 1328 break; 1329 } 1330 1331 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1], 1332 regs.regs[2]); 1333 gd->bd->bi_dram[i].start = regs.regs[1]; 1334 gd->bd->bi_dram[i].size = regs.regs[2]; 1335 1336 dram_size -= gd->bd->bi_dram[i].size; 1337 1338 i++; 1339 } while (dram_size); 1340 1341 if (i > 0) 1342 ret = 0; 1343 1344 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1345 /* Assign memory for MC */ 1346 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1347 if (gd->bd->bi_dram[2].size >= 1348 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1349 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1350 gd->bd->bi_dram[2].size - 1351 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1352 } else 1353 #endif 1354 { 1355 if (gd->bd->bi_dram[1].size >= 1356 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1357 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1358 gd->bd->bi_dram[1].size - 1359 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1360 } else if (gd->bd->bi_dram[0].size > 1361 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1362 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1363 gd->bd->bi_dram[0].size - 1364 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1365 } 1366 } 1367 #endif /* CONFIG_FSL_MC_ENET */ 1368 1369 return ret; 1370 } 1371 #endif 1372 1373 int dram_init_banksize(void) 1374 { 1375 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1376 phys_size_t dp_ddr_size; 1377 #endif 1378 1379 #ifdef CONFIG_TFABOOT 1380 if (!tfa_dram_init_banksize()) 1381 return 0; 1382 #endif 1383 /* 1384 * gd->ram_size has the total size of DDR memory, less reserved secure 1385 * memory. The DDR extends from low region to high region(s) presuming 1386 * no hole is created with DDR configuration. gd->arch.secure_ram tracks 1387 * the location of secure memory. gd->arch.resv_ram tracks the location 1388 * of reserved memory for Management Complex (MC). Because gd->ram_size 1389 * is reduced by this function if secure memory is reserved, checking 1390 * gd->arch.secure_ram should be done to avoid running it repeatedly. 1391 */ 1392 1393 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1394 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 1395 debug("No need to run again, skip %s\n", __func__); 1396 1397 return 0; 1398 } 1399 #endif 1400 1401 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; 1402 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) { 1403 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE; 1404 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE; 1405 gd->bd->bi_dram[1].size = gd->ram_size - 1406 CONFIG_SYS_DDR_BLOCK1_SIZE; 1407 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1408 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) { 1409 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE; 1410 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size - 1411 CONFIG_SYS_DDR_BLOCK2_SIZE; 1412 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE; 1413 } 1414 #endif 1415 } else { 1416 gd->bd->bi_dram[0].size = gd->ram_size; 1417 } 1418 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1419 if (gd->bd->bi_dram[0].size > 1420 CONFIG_SYS_MEM_RESERVE_SECURE) { 1421 gd->bd->bi_dram[0].size -= 1422 CONFIG_SYS_MEM_RESERVE_SECURE; 1423 gd->arch.secure_ram = gd->bd->bi_dram[0].start + 1424 gd->bd->bi_dram[0].size; 1425 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; 1426 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1427 } 1428 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */ 1429 1430 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1431 /* Assign memory for MC */ 1432 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1433 if (gd->bd->bi_dram[2].size >= 1434 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1435 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1436 gd->bd->bi_dram[2].size - 1437 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1438 } else 1439 #endif 1440 { 1441 if (gd->bd->bi_dram[1].size >= 1442 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1443 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1444 gd->bd->bi_dram[1].size - 1445 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1446 } else if (gd->bd->bi_dram[0].size > 1447 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1448 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1449 gd->bd->bi_dram[0].size - 1450 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1451 } 1452 } 1453 #endif /* CONFIG_FSL_MC_ENET */ 1454 1455 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1456 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1457 #error "This SoC shouldn't have DP DDR" 1458 #endif 1459 if (soc_has_dp_ddr()) { 1460 /* initialize DP-DDR here */ 1461 puts("DP-DDR: "); 1462 /* 1463 * DDR controller use 0 as the base address for binding. 1464 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access. 1465 */ 1466 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY, 1467 CONFIG_DP_DDR_CTRL, 1468 CONFIG_DP_DDR_NUM_CTRLS, 1469 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR, 1470 NULL, NULL, NULL); 1471 if (dp_ddr_size) { 1472 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE; 1473 gd->bd->bi_dram[2].size = dp_ddr_size; 1474 } else { 1475 puts("Not detected"); 1476 } 1477 } 1478 #endif 1479 1480 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1481 debug("%s is called. gd->ram_size is reduced to %lu\n", 1482 __func__, (ulong)gd->ram_size); 1483 #endif 1484 1485 return 0; 1486 } 1487 1488 #if CONFIG_IS_ENABLED(EFI_LOADER) 1489 void efi_add_known_memory(void) 1490 { 1491 int i; 1492 phys_addr_t ram_start, start; 1493 phys_size_t ram_size; 1494 u64 pages; 1495 1496 /* Add RAM */ 1497 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 1498 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1499 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1500 #error "This SoC shouldn't have DP DDR" 1501 #endif 1502 if (i == 2) 1503 continue; /* skip DP-DDR */ 1504 #endif 1505 ram_start = gd->bd->bi_dram[i].start; 1506 ram_size = gd->bd->bi_dram[i].size; 1507 #ifdef CONFIG_RESV_RAM 1508 if (gd->arch.resv_ram >= ram_start && 1509 gd->arch.resv_ram < ram_start + ram_size) 1510 ram_size = gd->arch.resv_ram - ram_start; 1511 #endif 1512 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 1513 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 1514 1515 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, 1516 false); 1517 } 1518 } 1519 #endif 1520 1521 /* 1522 * Before DDR size is known, early MMU table have DDR mapped as device memory 1523 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory" 1524 * needs to be set for these mappings. 1525 * If a special case configures DDR with holes in the mapping, the holes need 1526 * to be marked as invalid. This is not implemented in this function. 1527 */ 1528 void update_early_mmu_table(void) 1529 { 1530 if (!gd->arch.tlb_addr) 1531 return; 1532 1533 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) { 1534 mmu_change_region_attr( 1535 CONFIG_SYS_SDRAM_BASE, 1536 gd->ram_size, 1537 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1538 PTE_BLOCK_OUTER_SHARE | 1539 PTE_BLOCK_NS | 1540 PTE_TYPE_VALID); 1541 } else { 1542 mmu_change_region_attr( 1543 CONFIG_SYS_SDRAM_BASE, 1544 CONFIG_SYS_DDR_BLOCK1_SIZE, 1545 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1546 PTE_BLOCK_OUTER_SHARE | 1547 PTE_BLOCK_NS | 1548 PTE_TYPE_VALID); 1549 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1550 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE 1551 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE" 1552 #endif 1553 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE > 1554 CONFIG_SYS_DDR_BLOCK2_SIZE) { 1555 mmu_change_region_attr( 1556 CONFIG_SYS_DDR_BLOCK2_BASE, 1557 CONFIG_SYS_DDR_BLOCK2_SIZE, 1558 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1559 PTE_BLOCK_OUTER_SHARE | 1560 PTE_BLOCK_NS | 1561 PTE_TYPE_VALID); 1562 mmu_change_region_attr( 1563 CONFIG_SYS_DDR_BLOCK3_BASE, 1564 gd->ram_size - 1565 CONFIG_SYS_DDR_BLOCK1_SIZE - 1566 CONFIG_SYS_DDR_BLOCK2_SIZE, 1567 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1568 PTE_BLOCK_OUTER_SHARE | 1569 PTE_BLOCK_NS | 1570 PTE_TYPE_VALID); 1571 } else 1572 #endif 1573 { 1574 mmu_change_region_attr( 1575 CONFIG_SYS_DDR_BLOCK2_BASE, 1576 gd->ram_size - 1577 CONFIG_SYS_DDR_BLOCK1_SIZE, 1578 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1579 PTE_BLOCK_OUTER_SHARE | 1580 PTE_BLOCK_NS | 1581 PTE_TYPE_VALID); 1582 } 1583 } 1584 } 1585 1586 __weak int dram_init(void) 1587 { 1588 fsl_initdram(); 1589 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \ 1590 defined(CONFIG_SPL_BUILD) 1591 /* This will break-before-make MMU for DDR */ 1592 update_early_mmu_table(); 1593 #endif 1594 1595 return 0; 1596 } 1597