1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2017 NXP 4 * Copyright 2014-2015 Freescale Semiconductor, Inc. 5 */ 6 7 #include <common.h> 8 #include <fsl_ddr_sdram.h> 9 #include <asm/io.h> 10 #include <linux/errno.h> 11 #include <asm/system.h> 12 #include <fm_eth.h> 13 #include <asm/armv8/mmu.h> 14 #include <asm/io.h> 15 #include <asm/arch/fsl_serdes.h> 16 #include <asm/arch/soc.h> 17 #include <asm/arch/cpu.h> 18 #include <asm/arch/speed.h> 19 #include <fsl_immap.h> 20 #include <asm/arch/mp.h> 21 #include <efi_loader.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #include <asm/armv8/sec_firmware.h> 27 #ifdef CONFIG_SYS_FSL_DDR 28 #include <fsl_ddr.h> 29 #endif 30 #include <asm/arch/clock.h> 31 #include <hwconfig.h> 32 #include <fsl_qbman.h> 33 34 #ifdef CONFIG_TFABOOT 35 #include <environment.h> 36 #ifdef CONFIG_CHAIN_OF_TRUST 37 #include <fsl_validate.h> 38 #endif 39 #endif 40 41 DECLARE_GLOBAL_DATA_PTR; 42 43 static struct cpu_type cpu_type_list[] = { 44 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8), 45 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8), 46 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4), 47 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8), 48 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8), 49 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4), 50 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4), 51 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8), 52 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4), 53 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4), 54 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2), 55 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4), 56 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2), 57 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4), 58 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1), 59 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8), 60 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8), 61 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4), 62 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4), 63 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16), 64 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12), 65 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8), 66 }; 67 68 #define EARLY_PGTABLE_SIZE 0x5000 69 static struct mm_region early_map[] = { 70 #ifdef CONFIG_FSL_LSCH3 71 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 72 CONFIG_SYS_FSL_CCSR_SIZE, 73 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 74 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 75 }, 76 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 77 SYS_FSL_OCRAM_SPACE_SIZE, 78 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 79 }, 80 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 81 CONFIG_SYS_FSL_QSPI_SIZE1, 82 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE}, 83 #ifdef CONFIG_FSL_IFC 84 /* For IFC Region #1, only the first 4MB is cache-enabled */ 85 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1, 86 CONFIG_SYS_FSL_IFC_SIZE1_1, 87 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 88 }, 89 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 90 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 91 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1, 92 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 93 }, 94 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1, 95 CONFIG_SYS_FSL_IFC_SIZE1, 96 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 97 }, 98 #endif 99 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 100 CONFIG_SYS_FSL_DRAM_SIZE1, 101 #if defined(CONFIG_TFABOOT) || \ 102 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 103 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 104 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 105 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 106 #endif 107 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 108 }, 109 #ifdef CONFIG_FSL_IFC 110 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */ 111 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 112 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2, 113 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 114 }, 115 #endif 116 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 117 CONFIG_SYS_FSL_DCSR_SIZE, 118 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 119 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 120 }, 121 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 122 CONFIG_SYS_FSL_DRAM_SIZE2, 123 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 124 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 125 }, 126 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 127 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 128 CONFIG_SYS_FSL_DRAM_SIZE3, 129 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 130 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 131 }, 132 #endif 133 #elif defined(CONFIG_FSL_LSCH2) 134 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 135 CONFIG_SYS_FSL_CCSR_SIZE, 136 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 137 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 138 }, 139 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 140 SYS_FSL_OCRAM_SPACE_SIZE, 141 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 142 }, 143 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 144 CONFIG_SYS_FSL_DCSR_SIZE, 145 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 146 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 147 }, 148 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 149 CONFIG_SYS_FSL_QSPI_SIZE, 150 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 151 }, 152 #ifdef CONFIG_FSL_IFC 153 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 154 CONFIG_SYS_FSL_IFC_SIZE, 155 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 156 }, 157 #endif 158 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 159 CONFIG_SYS_FSL_DRAM_SIZE1, 160 #if defined(CONFIG_TFABOOT) || \ 161 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 162 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 163 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 164 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 165 #endif 166 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 167 }, 168 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 169 CONFIG_SYS_FSL_DRAM_SIZE2, 170 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 171 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 172 }, 173 #endif 174 {}, /* list terminator */ 175 }; 176 177 static struct mm_region final_map[] = { 178 #ifdef CONFIG_FSL_LSCH3 179 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 180 CONFIG_SYS_FSL_CCSR_SIZE, 181 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 182 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 183 }, 184 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 185 SYS_FSL_OCRAM_SPACE_SIZE, 186 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 187 }, 188 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 189 CONFIG_SYS_FSL_DRAM_SIZE1, 190 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 191 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 192 }, 193 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 194 CONFIG_SYS_FSL_QSPI_SIZE1, 195 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 196 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 197 }, 198 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2, 199 CONFIG_SYS_FSL_QSPI_SIZE2, 200 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 201 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 202 }, 203 #ifdef CONFIG_FSL_IFC 204 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 205 CONFIG_SYS_FSL_IFC_SIZE2, 206 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 207 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 208 }, 209 #endif 210 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 211 CONFIG_SYS_FSL_DCSR_SIZE, 212 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 213 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 214 }, 215 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE, 216 CONFIG_SYS_FSL_MC_SIZE, 217 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 218 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 219 }, 220 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE, 221 CONFIG_SYS_FSL_NI_SIZE, 222 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 223 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 224 }, 225 /* For QBMAN portal, only the first 64MB is cache-enabled */ 226 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 227 CONFIG_SYS_FSL_QBMAN_SIZE_1, 228 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 229 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS 230 }, 231 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 232 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 233 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1, 234 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 235 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 236 }, 237 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 238 CONFIG_SYS_PCIE1_PHYS_SIZE, 239 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 240 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 241 }, 242 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 243 CONFIG_SYS_PCIE2_PHYS_SIZE, 244 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 245 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 246 }, 247 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 248 CONFIG_SYS_PCIE3_PHYS_SIZE, 249 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 250 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 251 }, 252 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) 253 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR, 254 CONFIG_SYS_PCIE4_PHYS_SIZE, 255 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 256 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 257 }, 258 #endif 259 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE, 260 CONFIG_SYS_FSL_WRIOP1_SIZE, 261 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 262 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 263 }, 264 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE, 265 CONFIG_SYS_FSL_AIOP1_SIZE, 266 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 267 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 268 }, 269 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE, 270 CONFIG_SYS_FSL_PEBUF_SIZE, 271 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 272 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 273 }, 274 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 275 CONFIG_SYS_FSL_DRAM_SIZE2, 276 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 277 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 278 }, 279 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 280 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 281 CONFIG_SYS_FSL_DRAM_SIZE3, 282 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 283 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 284 }, 285 #endif 286 #elif defined(CONFIG_FSL_LSCH2) 287 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE, 288 CONFIG_SYS_FSL_BOOTROM_SIZE, 289 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 290 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 291 }, 292 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 293 CONFIG_SYS_FSL_CCSR_SIZE, 294 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 295 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 296 }, 297 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 298 SYS_FSL_OCRAM_SPACE_SIZE, 299 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 300 }, 301 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 302 CONFIG_SYS_FSL_DCSR_SIZE, 303 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 304 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 305 }, 306 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 307 CONFIG_SYS_FSL_QSPI_SIZE, 308 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 309 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 310 }, 311 #ifdef CONFIG_FSL_IFC 312 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 313 CONFIG_SYS_FSL_IFC_SIZE, 314 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 315 }, 316 #endif 317 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 318 CONFIG_SYS_FSL_DRAM_SIZE1, 319 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 320 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 321 }, 322 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 323 CONFIG_SYS_FSL_QBMAN_SIZE, 324 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 325 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 326 }, 327 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 328 CONFIG_SYS_FSL_DRAM_SIZE2, 329 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 330 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 331 }, 332 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 333 CONFIG_SYS_PCIE1_PHYS_SIZE, 334 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 335 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 336 }, 337 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 338 CONFIG_SYS_PCIE2_PHYS_SIZE, 339 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 340 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 341 }, 342 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 343 CONFIG_SYS_PCIE3_PHYS_SIZE, 344 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 345 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 346 }, 347 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 348 CONFIG_SYS_FSL_DRAM_SIZE3, 349 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 350 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 351 }, 352 #endif 353 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 354 {}, /* space holder for secure mem */ 355 #endif 356 {}, 357 }; 358 359 struct mm_region *mem_map = early_map; 360 361 void cpu_name(char *name) 362 { 363 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 364 unsigned int i, svr, ver; 365 366 svr = gur_in32(&gur->svr); 367 ver = SVR_SOC_VER(svr); 368 369 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 370 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 371 strcpy(name, cpu_type_list[i].name); 372 #ifdef CONFIG_ARCH_LX2160A 373 if (IS_C_PROCESSOR(svr)) 374 strcat(name, "C"); 375 #endif 376 377 if (IS_E_PROCESSOR(svr)) 378 strcat(name, "E"); 379 380 sprintf(name + strlen(name), " Rev%d.%d", 381 SVR_MAJ(svr), SVR_MIN(svr)); 382 break; 383 } 384 385 if (i == ARRAY_SIZE(cpu_type_list)) 386 strcpy(name, "unknown"); 387 } 388 389 #ifndef CONFIG_SYS_DCACHE_OFF 390 /* 391 * To start MMU before DDR is available, we create MMU table in SRAM. 392 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 393 * levels of translation tables here to cover 40-bit address space. 394 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 395 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 396 * Note, the debug print in cache_v8.c is not usable for debugging 397 * these early MMU tables because UART is not yet available. 398 */ 399 static inline void early_mmu_setup(void) 400 { 401 unsigned int el = current_el(); 402 403 /* global data is already setup, no allocation yet */ 404 if (el == 3) 405 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 406 else 407 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE; 408 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 409 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 410 411 /* Create early page tables */ 412 setup_pgtables(); 413 414 /* point TTBR to the new table */ 415 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 416 get_tcr(el, NULL, NULL) & 417 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 418 MEMORY_ATTRIBUTES); 419 420 set_sctlr(get_sctlr() | CR_M); 421 } 422 423 static void fix_pcie_mmu_map(void) 424 { 425 #ifdef CONFIG_ARCH_LS2080A 426 unsigned int i; 427 u32 svr, ver; 428 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 429 430 svr = gur_in32(&gur->svr); 431 ver = SVR_SOC_VER(svr); 432 433 /* Fix PCIE base and size for LS2088A */ 434 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) || 435 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) || 436 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) { 437 for (i = 0; i < ARRAY_SIZE(final_map); i++) { 438 switch (final_map[i].phys) { 439 case CONFIG_SYS_PCIE1_PHYS_ADDR: 440 final_map[i].phys = 0x2000000000ULL; 441 final_map[i].virt = 0x2000000000ULL; 442 final_map[i].size = 0x800000000ULL; 443 break; 444 case CONFIG_SYS_PCIE2_PHYS_ADDR: 445 final_map[i].phys = 0x2800000000ULL; 446 final_map[i].virt = 0x2800000000ULL; 447 final_map[i].size = 0x800000000ULL; 448 break; 449 case CONFIG_SYS_PCIE3_PHYS_ADDR: 450 final_map[i].phys = 0x3000000000ULL; 451 final_map[i].virt = 0x3000000000ULL; 452 final_map[i].size = 0x800000000ULL; 453 break; 454 case CONFIG_SYS_PCIE4_PHYS_ADDR: 455 final_map[i].phys = 0x3800000000ULL; 456 final_map[i].virt = 0x3800000000ULL; 457 final_map[i].size = 0x800000000ULL; 458 break; 459 default: 460 break; 461 } 462 } 463 } 464 #endif 465 } 466 467 /* 468 * The final tables look similar to early tables, but different in detail. 469 * These tables are in DRAM. Sub tables are added to enable cache for 470 * QBMan and OCRAM. 471 * 472 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 473 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 474 */ 475 static inline void final_mmu_setup(void) 476 { 477 u64 tlb_addr_save = gd->arch.tlb_addr; 478 unsigned int el = current_el(); 479 int index; 480 481 /* fix the final_map before filling in the block entries */ 482 fix_pcie_mmu_map(); 483 484 mem_map = final_map; 485 486 /* Update mapping for DDR to actual size */ 487 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) { 488 /* 489 * Find the entry for DDR mapping and update the address and 490 * size. Zero-sized mapping will be skipped when creating MMU 491 * table. 492 */ 493 switch (final_map[index].virt) { 494 case CONFIG_SYS_FSL_DRAM_BASE1: 495 final_map[index].virt = gd->bd->bi_dram[0].start; 496 final_map[index].phys = gd->bd->bi_dram[0].start; 497 final_map[index].size = gd->bd->bi_dram[0].size; 498 break; 499 #ifdef CONFIG_SYS_FSL_DRAM_BASE2 500 case CONFIG_SYS_FSL_DRAM_BASE2: 501 #if (CONFIG_NR_DRAM_BANKS >= 2) 502 final_map[index].virt = gd->bd->bi_dram[1].start; 503 final_map[index].phys = gd->bd->bi_dram[1].start; 504 final_map[index].size = gd->bd->bi_dram[1].size; 505 #else 506 final_map[index].size = 0; 507 #endif 508 break; 509 #endif 510 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 511 case CONFIG_SYS_FSL_DRAM_BASE3: 512 #if (CONFIG_NR_DRAM_BANKS >= 3) 513 final_map[index].virt = gd->bd->bi_dram[2].start; 514 final_map[index].phys = gd->bd->bi_dram[2].start; 515 final_map[index].size = gd->bd->bi_dram[2].size; 516 #else 517 final_map[index].size = 0; 518 #endif 519 break; 520 #endif 521 default: 522 break; 523 } 524 } 525 526 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 527 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 528 if (el == 3) { 529 /* 530 * Only use gd->arch.secure_ram if the address is 531 * recalculated. Align to 4KB for MMU table. 532 */ 533 /* put page tables in secure ram */ 534 index = ARRAY_SIZE(final_map) - 2; 535 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 536 final_map[index].virt = gd->arch.secure_ram & ~0x3; 537 final_map[index].phys = final_map[index].virt; 538 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 539 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 540 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 541 tlb_addr_save = gd->arch.tlb_addr; 542 } else { 543 /* Use allocated (board_f.c) memory for TLB */ 544 tlb_addr_save = gd->arch.tlb_allocated; 545 gd->arch.tlb_addr = tlb_addr_save; 546 } 547 } 548 #endif 549 550 /* Reset the fill ptr */ 551 gd->arch.tlb_fillptr = tlb_addr_save; 552 553 /* Create normal system page tables */ 554 setup_pgtables(); 555 556 /* Create emergency page tables */ 557 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 558 gd->arch.tlb_emerg = gd->arch.tlb_addr; 559 setup_pgtables(); 560 gd->arch.tlb_addr = tlb_addr_save; 561 562 /* Disable cache and MMU */ 563 dcache_disable(); /* TLBs are invalidated */ 564 invalidate_icache_all(); 565 566 /* point TTBR to the new table */ 567 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 568 MEMORY_ATTRIBUTES); 569 570 set_sctlr(get_sctlr() | CR_M); 571 } 572 573 u64 get_page_table_size(void) 574 { 575 return 0x10000; 576 } 577 578 int arch_cpu_init(void) 579 { 580 /* 581 * This function is called before U-Boot relocates itself to speed up 582 * on system running. It is not necessary to run if performance is not 583 * critical. Skip if MMU is already enabled by SPL or other means. 584 */ 585 if (get_sctlr() & CR_M) 586 return 0; 587 588 icache_enable(); 589 __asm_invalidate_dcache_all(); 590 __asm_invalidate_tlb_all(); 591 early_mmu_setup(); 592 set_sctlr(get_sctlr() | CR_C); 593 return 0; 594 } 595 596 void mmu_setup(void) 597 { 598 final_mmu_setup(); 599 } 600 601 /* 602 * This function is called from common/board_r.c. 603 * It recreates MMU table in main memory. 604 */ 605 void enable_caches(void) 606 { 607 mmu_setup(); 608 __asm_invalidate_tlb_all(); 609 icache_enable(); 610 dcache_enable(); 611 } 612 #endif /* CONFIG_SYS_DCACHE_OFF */ 613 614 #ifdef CONFIG_TFABOOT 615 enum boot_src __get_boot_src(u32 porsr1) 616 { 617 enum boot_src src = BOOT_SOURCE_RESERVED; 618 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT; 619 #if !defined(CONFIG_NXP_LSCH3_2) 620 u32 val; 621 #endif 622 debug("%s: rcw_src 0x%x\n", __func__, rcw_src); 623 624 #if defined(CONFIG_FSL_LSCH3) 625 #if defined(CONFIG_NXP_LSCH3_2) 626 switch (rcw_src) { 627 case RCW_SRC_SDHC1_VAL: 628 src = BOOT_SOURCE_SD_MMC; 629 break; 630 case RCW_SRC_SDHC2_VAL: 631 src = BOOT_SOURCE_SD_MMC2; 632 break; 633 case RCW_SRC_I2C1_VAL: 634 src = BOOT_SOURCE_I2C1_EXTENDED; 635 break; 636 case RCW_SRC_FLEXSPI_NAND2K_VAL: 637 src = BOOT_SOURCE_XSPI_NAND; 638 break; 639 case RCW_SRC_FLEXSPI_NAND4K_VAL: 640 src = BOOT_SOURCE_XSPI_NAND; 641 break; 642 case RCW_SRC_RESERVED_1_VAL: 643 src = BOOT_SOURCE_RESERVED; 644 break; 645 case RCW_SRC_FLEXSPI_NOR_24B: 646 src = BOOT_SOURCE_XSPI_NOR; 647 break; 648 default: 649 src = BOOT_SOURCE_RESERVED; 650 } 651 #else 652 val = rcw_src & RCW_SRC_TYPE_MASK; 653 if (val == RCW_SRC_NOR_VAL) { 654 val = rcw_src & NOR_TYPE_MASK; 655 656 switch (val) { 657 case NOR_16B_VAL: 658 case NOR_32B_VAL: 659 src = BOOT_SOURCE_IFC_NOR; 660 break; 661 default: 662 src = BOOT_SOURCE_RESERVED; 663 } 664 } else { 665 /* RCW SRC Serial Flash */ 666 val = rcw_src & RCW_SRC_SERIAL_MASK; 667 switch (val) { 668 case RCW_SRC_QSPI_VAL: 669 /* RCW SRC Serial NOR (QSPI) */ 670 src = BOOT_SOURCE_QSPI_NOR; 671 break; 672 case RCW_SRC_SD_CARD_VAL: 673 /* RCW SRC SD Card */ 674 src = BOOT_SOURCE_SD_MMC; 675 break; 676 case RCW_SRC_EMMC_VAL: 677 /* RCW SRC EMMC */ 678 src = BOOT_SOURCE_SD_MMC2; 679 break; 680 case RCW_SRC_I2C1_VAL: 681 /* RCW SRC I2C1 Extended */ 682 src = BOOT_SOURCE_I2C1_EXTENDED; 683 break; 684 default: 685 src = BOOT_SOURCE_RESERVED; 686 } 687 } 688 #endif 689 #elif defined(CONFIG_FSL_LSCH2) 690 /* RCW SRC NAND */ 691 val = rcw_src & RCW_SRC_NAND_MASK; 692 if (val == RCW_SRC_NAND_VAL) { 693 val = rcw_src & NAND_RESERVED_MASK; 694 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2) 695 src = BOOT_SOURCE_IFC_NAND; 696 697 } else { 698 /* RCW SRC NOR */ 699 val = rcw_src & RCW_SRC_NOR_MASK; 700 if (val == NOR_8B_VAL || val == NOR_16B_VAL) { 701 src = BOOT_SOURCE_IFC_NOR; 702 } else { 703 switch (rcw_src) { 704 case QSPI_VAL1: 705 case QSPI_VAL2: 706 src = BOOT_SOURCE_QSPI_NOR; 707 break; 708 case SD_VAL: 709 src = BOOT_SOURCE_SD_MMC; 710 break; 711 default: 712 src = BOOT_SOURCE_RESERVED; 713 } 714 } 715 } 716 #endif 717 718 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src) 719 src = BOOT_SOURCE_QSPI_NOR; 720 721 debug("%s: src 0x%x\n", __func__, src); 722 return src; 723 } 724 725 enum boot_src get_boot_src(void) 726 { 727 struct pt_regs regs; 728 u32 porsr1 = 0; 729 730 #if defined(CONFIG_FSL_LSCH3) 731 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 732 #elif defined(CONFIG_FSL_LSCH2) 733 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 734 #endif 735 736 if (current_el() == 2) { 737 regs.regs[0] = SIP_SVC_RCW; 738 739 smc_call(®s); 740 if (!regs.regs[0]) 741 porsr1 = regs.regs[1]; 742 } 743 744 if (current_el() == 3 || !porsr1) { 745 #ifdef CONFIG_FSL_LSCH3 746 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 747 #elif defined(CONFIG_FSL_LSCH2) 748 porsr1 = in_be32(&gur->porsr1); 749 #endif 750 } 751 752 debug("%s: porsr1 0x%x\n", __func__, porsr1); 753 754 return __get_boot_src(porsr1); 755 } 756 757 #ifdef CONFIG_ENV_IS_IN_MMC 758 int mmc_get_env_dev(void) 759 { 760 enum boot_src src = get_boot_src(); 761 int dev = CONFIG_SYS_MMC_ENV_DEV; 762 763 switch (src) { 764 case BOOT_SOURCE_SD_MMC: 765 dev = 0; 766 break; 767 case BOOT_SOURCE_SD_MMC2: 768 dev = 1; 769 break; 770 default: 771 break; 772 } 773 774 return dev; 775 } 776 #endif 777 778 enum env_location env_get_location(enum env_operation op, int prio) 779 { 780 enum boot_src src = get_boot_src(); 781 enum env_location env_loc = ENVL_NOWHERE; 782 783 if (prio) 784 return ENVL_UNKNOWN; 785 786 #ifdef CONFIG_CHAIN_OF_TRUST 787 /* Check Boot Mode 788 * If Boot Mode is Secure, return ENVL_NOWHERE 789 */ 790 if (fsl_check_boot_mode_secure() == 1) 791 goto done; 792 #endif 793 794 switch (src) { 795 case BOOT_SOURCE_IFC_NOR: 796 env_loc = ENVL_FLASH; 797 break; 798 case BOOT_SOURCE_QSPI_NOR: 799 /* FALLTHROUGH */ 800 case BOOT_SOURCE_XSPI_NOR: 801 env_loc = ENVL_SPI_FLASH; 802 break; 803 case BOOT_SOURCE_IFC_NAND: 804 /* FALLTHROUGH */ 805 case BOOT_SOURCE_QSPI_NAND: 806 /* FALLTHROUGH */ 807 case BOOT_SOURCE_XSPI_NAND: 808 env_loc = ENVL_NAND; 809 break; 810 case BOOT_SOURCE_SD_MMC: 811 /* FALLTHROUGH */ 812 case BOOT_SOURCE_SD_MMC2: 813 env_loc = ENVL_MMC; 814 break; 815 case BOOT_SOURCE_I2C1_EXTENDED: 816 /* FALLTHROUGH */ 817 default: 818 break; 819 } 820 821 #ifdef CONFIG_CHAIN_OF_TRUST 822 done: 823 #endif 824 return env_loc; 825 } 826 #endif /* CONFIG_TFABOOT */ 827 828 u32 initiator_type(u32 cluster, int init_id) 829 { 830 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 831 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 832 u32 type = 0; 833 834 type = gur_in32(&gur->tp_ityp[idx]); 835 if (type & TP_ITYP_AV) 836 return type; 837 838 return 0; 839 } 840 841 u32 cpu_pos_mask(void) 842 { 843 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 844 int i = 0; 845 u32 cluster, type, mask = 0; 846 847 do { 848 int j; 849 850 cluster = gur_in32(&gur->tp_cluster[i].lower); 851 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 852 type = initiator_type(cluster, j); 853 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 854 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 855 } 856 i++; 857 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 858 859 return mask; 860 } 861 862 u32 cpu_mask(void) 863 { 864 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 865 int i = 0, count = 0; 866 u32 cluster, type, mask = 0; 867 868 do { 869 int j; 870 871 cluster = gur_in32(&gur->tp_cluster[i].lower); 872 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 873 type = initiator_type(cluster, j); 874 if (type) { 875 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 876 mask |= 1 << count; 877 count++; 878 } 879 } 880 i++; 881 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 882 883 return mask; 884 } 885 886 /* 887 * Return the number of cores on this SOC. 888 */ 889 int cpu_numcores(void) 890 { 891 return hweight32(cpu_mask()); 892 } 893 894 int fsl_qoriq_core_to_cluster(unsigned int core) 895 { 896 struct ccsr_gur __iomem *gur = 897 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 898 int i = 0, count = 0; 899 u32 cluster; 900 901 do { 902 int j; 903 904 cluster = gur_in32(&gur->tp_cluster[i].lower); 905 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 906 if (initiator_type(cluster, j)) { 907 if (count == core) 908 return i; 909 count++; 910 } 911 } 912 i++; 913 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 914 915 return -1; /* cannot identify the cluster */ 916 } 917 918 u32 fsl_qoriq_core_to_type(unsigned int core) 919 { 920 struct ccsr_gur __iomem *gur = 921 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 922 int i = 0, count = 0; 923 u32 cluster, type; 924 925 do { 926 int j; 927 928 cluster = gur_in32(&gur->tp_cluster[i].lower); 929 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 930 type = initiator_type(cluster, j); 931 if (type) { 932 if (count == core) 933 return type; 934 count++; 935 } 936 } 937 i++; 938 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 939 940 return -1; /* cannot identify the cluster */ 941 } 942 943 #ifndef CONFIG_FSL_LSCH3 944 uint get_svr(void) 945 { 946 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 947 948 return gur_in32(&gur->svr); 949 } 950 #endif 951 952 #ifdef CONFIG_DISPLAY_CPUINFO 953 int print_cpuinfo(void) 954 { 955 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 956 struct sys_info sysinfo; 957 char buf[32]; 958 unsigned int i, core; 959 u32 type, rcw, svr = gur_in32(&gur->svr); 960 961 puts("SoC: "); 962 963 cpu_name(buf); 964 printf(" %s (0x%x)\n", buf, svr); 965 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 966 get_sys_info(&sysinfo); 967 puts("Clock Configuration:"); 968 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 969 if (!(i % 3)) 970 puts("\n "); 971 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 972 printf("CPU%d(%s):%-4s MHz ", core, 973 type == TY_ITYP_VER_A7 ? "A7 " : 974 (type == TY_ITYP_VER_A53 ? "A53" : 975 (type == TY_ITYP_VER_A57 ? "A57" : 976 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 977 strmhz(buf, sysinfo.freq_processor[core])); 978 } 979 /* Display platform clock as Bus frequency. */ 980 printf("\n Bus: %-4s MHz ", 981 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); 982 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 983 #ifdef CONFIG_SYS_DPAA_FMAN 984 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 985 #endif 986 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 987 if (soc_has_dp_ddr()) { 988 printf(" DP-DDR: %-4s MT/s", 989 strmhz(buf, sysinfo.freq_ddrbus2)); 990 } 991 #endif 992 puts("\n"); 993 994 /* 995 * Display the RCW, so that no one gets confused as to what RCW 996 * we're actually using for this boot. 997 */ 998 puts("Reset Configuration Word (RCW):"); 999 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 1000 rcw = gur_in32(&gur->rcwsr[i]); 1001 if ((i % 4) == 0) 1002 printf("\n %08x:", i * 4); 1003 printf(" %08x", rcw); 1004 } 1005 puts("\n"); 1006 1007 return 0; 1008 } 1009 #endif 1010 1011 #ifdef CONFIG_FSL_ESDHC 1012 int cpu_mmc_init(bd_t *bis) 1013 { 1014 return fsl_esdhc_mmc_init(bis); 1015 } 1016 #endif 1017 1018 int cpu_eth_init(bd_t *bis) 1019 { 1020 int error = 0; 1021 1022 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1023 error = fsl_mc_ldpaa_init(bis); 1024 #endif 1025 #ifdef CONFIG_FMAN_ENET 1026 fm_standard_init(bis); 1027 #endif 1028 return error; 1029 } 1030 1031 static inline int check_psci(void) 1032 { 1033 unsigned int psci_ver; 1034 1035 psci_ver = sec_firmware_support_psci_version(); 1036 if (psci_ver == PSCI_INVALID_VER) 1037 return 1; 1038 1039 return 0; 1040 } 1041 1042 static void config_core_prefetch(void) 1043 { 1044 char *buf = NULL; 1045 char buffer[HWCONFIG_BUFFER_SIZE]; 1046 const char *prefetch_arg = NULL; 1047 size_t arglen; 1048 unsigned int mask; 1049 struct pt_regs regs; 1050 1051 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0) 1052 buf = buffer; 1053 1054 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable", 1055 &arglen, buf); 1056 1057 if (prefetch_arg) { 1058 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff; 1059 if (mask & 0x1) { 1060 printf("Core0 prefetch can't be disabled\n"); 1061 return; 1062 } 1063 1064 #define SIP_PREFETCH_DISABLE_64 0xC200FF13 1065 regs.regs[0] = SIP_PREFETCH_DISABLE_64; 1066 regs.regs[1] = mask; 1067 smc_call(®s); 1068 1069 if (regs.regs[0]) 1070 printf("Prefetch disable config failed for mask "); 1071 else 1072 printf("Prefetch disable config passed for mask "); 1073 printf("0x%x\n", mask); 1074 } 1075 } 1076 1077 int arch_early_init_r(void) 1078 { 1079 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 1080 u32 svr_dev_id; 1081 /* 1082 * erratum A009635 is valid only for LS2080A SoC and 1083 * its personalitiesi 1084 */ 1085 svr_dev_id = get_svr(); 1086 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1087 erratum_a009635(); 1088 #endif 1089 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) 1090 erratum_a009942_check_cpo(); 1091 #endif 1092 if (check_psci()) { 1093 debug("PSCI: PSCI does not exist.\n"); 1094 1095 /* if PSCI does not exist, boot secondary cores here */ 1096 if (fsl_layerscape_wake_seconday_cores()) 1097 printf("Did not wake secondary cores\n"); 1098 } 1099 1100 #ifdef CONFIG_SYS_FSL_HAS_RGMII 1101 fsl_rgmii_init(); 1102 #endif 1103 1104 config_core_prefetch(); 1105 1106 #ifdef CONFIG_SYS_HAS_SERDES 1107 fsl_serdes_init(); 1108 #endif 1109 #ifdef CONFIG_FMAN_ENET 1110 fman_enet_init(); 1111 #endif 1112 #ifdef CONFIG_SYS_DPAA_QBMAN 1113 setup_qbman_portals(); 1114 #endif 1115 return 0; 1116 } 1117 1118 int timer_init(void) 1119 { 1120 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 1121 #ifdef CONFIG_FSL_LSCH3 1122 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 1123 #endif 1124 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1125 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 1126 u32 svr_dev_id; 1127 #endif 1128 #ifdef COUNTER_FREQUENCY_REAL 1129 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 1130 1131 /* Update with accurate clock frequency */ 1132 if (current_el() == 3) 1133 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 1134 #endif 1135 1136 #ifdef CONFIG_FSL_LSCH3 1137 /* Enable timebase for all clusters. 1138 * It is safe to do so even some clusters are not enabled. 1139 */ 1140 out_le32(cltbenr, 0xf); 1141 #endif 1142 1143 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1144 /* 1145 * In certain Layerscape SoCs, the clock for each core's 1146 * has an enable bit in the PMU Physical Core Time Base Enable 1147 * Register (PCTBENR), which allows the watchdog to operate. 1148 */ 1149 setbits_le32(pctbenr, 0xff); 1150 /* 1151 * For LS2080A SoC and its personalities, timer controller 1152 * offset is different 1153 */ 1154 svr_dev_id = get_svr(); 1155 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1156 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 1157 1158 #endif 1159 1160 /* Enable clock for timer 1161 * This is a global setting. 1162 */ 1163 out_le32(cntcr, 0x1); 1164 1165 return 0; 1166 } 1167 1168 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 1169 1170 void __efi_runtime reset_cpu(ulong addr) 1171 { 1172 u32 val; 1173 1174 #ifdef CONFIG_ARCH_LX2160A 1175 val = in_le32(rstcr); 1176 val |= 0x01; 1177 out_le32(rstcr, val); 1178 #else 1179 /* Raise RESET_REQ_B */ 1180 val = scfg_in32(rstcr); 1181 val |= 0x02; 1182 scfg_out32(rstcr, val); 1183 #endif 1184 } 1185 1186 #ifdef CONFIG_EFI_LOADER 1187 1188 void __efi_runtime EFIAPI efi_reset_system( 1189 enum efi_reset_type reset_type, 1190 efi_status_t reset_status, 1191 unsigned long data_size, void *reset_data) 1192 { 1193 switch (reset_type) { 1194 case EFI_RESET_COLD: 1195 case EFI_RESET_WARM: 1196 case EFI_RESET_PLATFORM_SPECIFIC: 1197 reset_cpu(0); 1198 break; 1199 case EFI_RESET_SHUTDOWN: 1200 /* Nothing we can do */ 1201 break; 1202 } 1203 1204 while (1) { } 1205 } 1206 1207 efi_status_t efi_reset_system_init(void) 1208 { 1209 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 1210 } 1211 1212 #endif 1213 1214 /* 1215 * Calculate reserved memory with given memory bank 1216 * Return aligned memory size on success 1217 * Return (ram_size + needed size) for failure 1218 */ 1219 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 1220 { 1221 phys_size_t ram_top = ram_size; 1222 1223 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1224 ram_top = mc_get_dram_block_size(); 1225 if (ram_top > ram_size) 1226 return ram_size + ram_top; 1227 1228 ram_top = ram_size - ram_top; 1229 /* The start address of MC reserved memory needs to be aligned. */ 1230 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 1231 #endif 1232 1233 return ram_size - ram_top; 1234 } 1235 1236 phys_size_t get_effective_memsize(void) 1237 { 1238 phys_size_t ea_size, rem = 0; 1239 1240 /* 1241 * For ARMv8 SoCs, DDR memory is split into two or three regions. The 1242 * first region is 2GB space at 0x8000_0000. Secure memory needs to 1243 * allocated from first region. If the memory extends to the second 1244 * region (or the third region if applicable), Management Complex (MC) 1245 * memory should be put into the highest region, i.e. the end of DDR 1246 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so 1247 * U-Boot doesn't relocate itself into higher address. Should DDR be 1248 * configured to skip the first region, this function needs to be 1249 * adjusted. 1250 */ 1251 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { 1252 ea_size = CONFIG_MAX_MEM_MAPPED; 1253 rem = gd->ram_size - ea_size; 1254 } else { 1255 ea_size = gd->ram_size; 1256 } 1257 1258 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1259 /* Check if we have enough space for secure memory */ 1260 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) 1261 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1262 else 1263 printf("Error: No enough space for secure memory.\n"); 1264 #endif 1265 /* Check if we have enough memory for MC */ 1266 if (rem < board_reserve_ram_top(rem)) { 1267 /* Not enough memory in high region to reserve */ 1268 if (ea_size > board_reserve_ram_top(ea_size)) 1269 ea_size -= board_reserve_ram_top(ea_size); 1270 else 1271 printf("Error: No enough space for reserved memory.\n"); 1272 } 1273 1274 return ea_size; 1275 } 1276 1277 #ifdef CONFIG_TFABOOT 1278 phys_size_t tfa_get_dram_size(void) 1279 { 1280 struct pt_regs regs; 1281 phys_size_t dram_size = 0; 1282 1283 regs.regs[0] = SMC_DRAM_BANK_INFO; 1284 regs.regs[1] = -1; 1285 1286 smc_call(®s); 1287 if (regs.regs[0]) 1288 return 0; 1289 1290 dram_size = regs.regs[1]; 1291 return dram_size; 1292 } 1293 1294 static int tfa_dram_init_banksize(void) 1295 { 1296 int i = 0, ret = 0; 1297 struct pt_regs regs; 1298 phys_size_t dram_size = tfa_get_dram_size(); 1299 1300 debug("dram_size %llx\n", dram_size); 1301 1302 if (!dram_size) 1303 return -EINVAL; 1304 1305 do { 1306 regs.regs[0] = SMC_DRAM_BANK_INFO; 1307 regs.regs[1] = i; 1308 1309 smc_call(®s); 1310 if (regs.regs[0]) { 1311 ret = -EINVAL; 1312 break; 1313 } 1314 1315 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1], 1316 regs.regs[2]); 1317 gd->bd->bi_dram[i].start = regs.regs[1]; 1318 gd->bd->bi_dram[i].size = regs.regs[2]; 1319 1320 dram_size -= gd->bd->bi_dram[i].size; 1321 1322 i++; 1323 } while (dram_size); 1324 1325 if (i > 0) 1326 ret = 0; 1327 1328 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1329 /* Assign memory for MC */ 1330 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1331 if (gd->bd->bi_dram[2].size >= 1332 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1333 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1334 gd->bd->bi_dram[2].size - 1335 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1336 } else 1337 #endif 1338 { 1339 if (gd->bd->bi_dram[1].size >= 1340 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1341 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1342 gd->bd->bi_dram[1].size - 1343 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1344 } else if (gd->bd->bi_dram[0].size > 1345 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1346 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1347 gd->bd->bi_dram[0].size - 1348 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1349 } 1350 } 1351 #endif /* CONFIG_FSL_MC_ENET */ 1352 1353 return ret; 1354 } 1355 #endif 1356 1357 int dram_init_banksize(void) 1358 { 1359 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1360 phys_size_t dp_ddr_size; 1361 #endif 1362 1363 #ifdef CONFIG_TFABOOT 1364 if (!tfa_dram_init_banksize()) 1365 return 0; 1366 #endif 1367 /* 1368 * gd->ram_size has the total size of DDR memory, less reserved secure 1369 * memory. The DDR extends from low region to high region(s) presuming 1370 * no hole is created with DDR configuration. gd->arch.secure_ram tracks 1371 * the location of secure memory. gd->arch.resv_ram tracks the location 1372 * of reserved memory for Management Complex (MC). Because gd->ram_size 1373 * is reduced by this function if secure memory is reserved, checking 1374 * gd->arch.secure_ram should be done to avoid running it repeatedly. 1375 */ 1376 1377 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1378 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 1379 debug("No need to run again, skip %s\n", __func__); 1380 1381 return 0; 1382 } 1383 #endif 1384 1385 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; 1386 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) { 1387 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE; 1388 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE; 1389 gd->bd->bi_dram[1].size = gd->ram_size - 1390 CONFIG_SYS_DDR_BLOCK1_SIZE; 1391 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1392 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) { 1393 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE; 1394 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size - 1395 CONFIG_SYS_DDR_BLOCK2_SIZE; 1396 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE; 1397 } 1398 #endif 1399 } else { 1400 gd->bd->bi_dram[0].size = gd->ram_size; 1401 } 1402 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1403 if (gd->bd->bi_dram[0].size > 1404 CONFIG_SYS_MEM_RESERVE_SECURE) { 1405 gd->bd->bi_dram[0].size -= 1406 CONFIG_SYS_MEM_RESERVE_SECURE; 1407 gd->arch.secure_ram = gd->bd->bi_dram[0].start + 1408 gd->bd->bi_dram[0].size; 1409 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; 1410 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1411 } 1412 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */ 1413 1414 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1415 /* Assign memory for MC */ 1416 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1417 if (gd->bd->bi_dram[2].size >= 1418 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1419 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1420 gd->bd->bi_dram[2].size - 1421 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1422 } else 1423 #endif 1424 { 1425 if (gd->bd->bi_dram[1].size >= 1426 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1427 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1428 gd->bd->bi_dram[1].size - 1429 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1430 } else if (gd->bd->bi_dram[0].size > 1431 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1432 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1433 gd->bd->bi_dram[0].size - 1434 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1435 } 1436 } 1437 #endif /* CONFIG_FSL_MC_ENET */ 1438 1439 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1440 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1441 #error "This SoC shouldn't have DP DDR" 1442 #endif 1443 if (soc_has_dp_ddr()) { 1444 /* initialize DP-DDR here */ 1445 puts("DP-DDR: "); 1446 /* 1447 * DDR controller use 0 as the base address for binding. 1448 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access. 1449 */ 1450 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY, 1451 CONFIG_DP_DDR_CTRL, 1452 CONFIG_DP_DDR_NUM_CTRLS, 1453 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR, 1454 NULL, NULL, NULL); 1455 if (dp_ddr_size) { 1456 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE; 1457 gd->bd->bi_dram[2].size = dp_ddr_size; 1458 } else { 1459 puts("Not detected"); 1460 } 1461 } 1462 #endif 1463 1464 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1465 debug("%s is called. gd->ram_size is reduced to %lu\n", 1466 __func__, (ulong)gd->ram_size); 1467 #endif 1468 1469 return 0; 1470 } 1471 1472 #if CONFIG_IS_ENABLED(EFI_LOADER) 1473 void efi_add_known_memory(void) 1474 { 1475 int i; 1476 phys_addr_t ram_start, start; 1477 phys_size_t ram_size; 1478 u64 pages; 1479 1480 /* Add RAM */ 1481 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 1482 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1483 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1484 #error "This SoC shouldn't have DP DDR" 1485 #endif 1486 if (i == 2) 1487 continue; /* skip DP-DDR */ 1488 #endif 1489 ram_start = gd->bd->bi_dram[i].start; 1490 ram_size = gd->bd->bi_dram[i].size; 1491 #ifdef CONFIG_RESV_RAM 1492 if (gd->arch.resv_ram >= ram_start && 1493 gd->arch.resv_ram < ram_start + ram_size) 1494 ram_size = gd->arch.resv_ram - ram_start; 1495 #endif 1496 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 1497 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 1498 1499 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, 1500 false); 1501 } 1502 } 1503 #endif 1504 1505 /* 1506 * Before DDR size is known, early MMU table have DDR mapped as device memory 1507 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory" 1508 * needs to be set for these mappings. 1509 * If a special case configures DDR with holes in the mapping, the holes need 1510 * to be marked as invalid. This is not implemented in this function. 1511 */ 1512 void update_early_mmu_table(void) 1513 { 1514 if (!gd->arch.tlb_addr) 1515 return; 1516 1517 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) { 1518 mmu_change_region_attr( 1519 CONFIG_SYS_SDRAM_BASE, 1520 gd->ram_size, 1521 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1522 PTE_BLOCK_OUTER_SHARE | 1523 PTE_BLOCK_NS | 1524 PTE_TYPE_VALID); 1525 } else { 1526 mmu_change_region_attr( 1527 CONFIG_SYS_SDRAM_BASE, 1528 CONFIG_SYS_DDR_BLOCK1_SIZE, 1529 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1530 PTE_BLOCK_OUTER_SHARE | 1531 PTE_BLOCK_NS | 1532 PTE_TYPE_VALID); 1533 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1534 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE 1535 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE" 1536 #endif 1537 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE > 1538 CONFIG_SYS_DDR_BLOCK2_SIZE) { 1539 mmu_change_region_attr( 1540 CONFIG_SYS_DDR_BLOCK2_BASE, 1541 CONFIG_SYS_DDR_BLOCK2_SIZE, 1542 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1543 PTE_BLOCK_OUTER_SHARE | 1544 PTE_BLOCK_NS | 1545 PTE_TYPE_VALID); 1546 mmu_change_region_attr( 1547 CONFIG_SYS_DDR_BLOCK3_BASE, 1548 gd->ram_size - 1549 CONFIG_SYS_DDR_BLOCK1_SIZE - 1550 CONFIG_SYS_DDR_BLOCK2_SIZE, 1551 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1552 PTE_BLOCK_OUTER_SHARE | 1553 PTE_BLOCK_NS | 1554 PTE_TYPE_VALID); 1555 } else 1556 #endif 1557 { 1558 mmu_change_region_attr( 1559 CONFIG_SYS_DDR_BLOCK2_BASE, 1560 gd->ram_size - 1561 CONFIG_SYS_DDR_BLOCK1_SIZE, 1562 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1563 PTE_BLOCK_OUTER_SHARE | 1564 PTE_BLOCK_NS | 1565 PTE_TYPE_VALID); 1566 } 1567 } 1568 } 1569 1570 __weak int dram_init(void) 1571 { 1572 fsl_initdram(); 1573 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \ 1574 defined(CONFIG_SPL_BUILD) 1575 /* This will break-before-make MMU for DDR */ 1576 update_early_mmu_table(); 1577 #endif 1578 1579 return 0; 1580 } 1581