1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2017 NXP 4 * Copyright 2014-2015 Freescale Semiconductor, Inc. 5 */ 6 7 #include <common.h> 8 #include <fsl_ddr_sdram.h> 9 #include <asm/io.h> 10 #include <linux/errno.h> 11 #include <asm/system.h> 12 #include <fm_eth.h> 13 #include <asm/armv8/mmu.h> 14 #include <asm/io.h> 15 #include <asm/arch/fsl_serdes.h> 16 #include <asm/arch/soc.h> 17 #include <asm/arch/cpu.h> 18 #include <asm/arch/speed.h> 19 #include <fsl_immap.h> 20 #include <asm/arch/mp.h> 21 #include <efi_loader.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #include <asm/armv8/sec_firmware.h> 27 #ifdef CONFIG_SYS_FSL_DDR 28 #include <fsl_ddr.h> 29 #endif 30 #include <asm/arch/clock.h> 31 #include <hwconfig.h> 32 #include <fsl_qbman.h> 33 34 DECLARE_GLOBAL_DATA_PTR; 35 36 static struct cpu_type cpu_type_list[] = { 37 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8), 38 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8), 39 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4), 40 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8), 41 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8), 42 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4), 43 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4), 44 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8), 45 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4), 46 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4), 47 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2), 48 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4), 49 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2), 50 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4), 51 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1), 52 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8), 53 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8), 54 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4), 55 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4), 56 }; 57 58 #define EARLY_PGTABLE_SIZE 0x5000 59 static struct mm_region early_map[] = { 60 #ifdef CONFIG_FSL_LSCH3 61 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 62 CONFIG_SYS_FSL_CCSR_SIZE, 63 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 64 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 65 }, 66 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 67 SYS_FSL_OCRAM_SPACE_SIZE, 68 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 69 }, 70 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 71 CONFIG_SYS_FSL_QSPI_SIZE1, 72 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE}, 73 #ifdef CONFIG_FSL_IFC 74 /* For IFC Region #1, only the first 4MB is cache-enabled */ 75 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1, 76 CONFIG_SYS_FSL_IFC_SIZE1_1, 77 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 78 }, 79 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 80 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 81 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1, 82 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 83 }, 84 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1, 85 CONFIG_SYS_FSL_IFC_SIZE1, 86 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 87 }, 88 #endif 89 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 90 CONFIG_SYS_FSL_DRAM_SIZE1, 91 #if defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD) 92 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 93 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 94 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 95 #endif 96 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 97 }, 98 #ifdef CONFIG_FSL_IFC 99 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */ 100 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 101 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2, 102 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 103 }, 104 #endif 105 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 106 CONFIG_SYS_FSL_DCSR_SIZE, 107 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 108 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 109 }, 110 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 111 CONFIG_SYS_FSL_DRAM_SIZE2, 112 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 113 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 114 }, 115 #elif defined(CONFIG_FSL_LSCH2) 116 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 117 CONFIG_SYS_FSL_CCSR_SIZE, 118 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 119 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 120 }, 121 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 122 SYS_FSL_OCRAM_SPACE_SIZE, 123 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 124 }, 125 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 126 CONFIG_SYS_FSL_DCSR_SIZE, 127 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 128 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 129 }, 130 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 131 CONFIG_SYS_FSL_QSPI_SIZE, 132 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 133 }, 134 #ifdef CONFIG_FSL_IFC 135 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 136 CONFIG_SYS_FSL_IFC_SIZE, 137 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 138 }, 139 #endif 140 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 141 CONFIG_SYS_FSL_DRAM_SIZE1, 142 #if defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD) 143 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 144 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 145 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 146 #endif 147 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 148 }, 149 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 150 CONFIG_SYS_FSL_DRAM_SIZE2, 151 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 152 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 153 }, 154 #endif 155 {}, /* list terminator */ 156 }; 157 158 static struct mm_region final_map[] = { 159 #ifdef CONFIG_FSL_LSCH3 160 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 161 CONFIG_SYS_FSL_CCSR_SIZE, 162 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 163 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 164 }, 165 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 166 SYS_FSL_OCRAM_SPACE_SIZE, 167 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 168 }, 169 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 170 CONFIG_SYS_FSL_DRAM_SIZE1, 171 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 172 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 173 }, 174 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 175 CONFIG_SYS_FSL_QSPI_SIZE1, 176 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 177 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 178 }, 179 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2, 180 CONFIG_SYS_FSL_QSPI_SIZE2, 181 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 182 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 183 }, 184 #ifdef CONFIG_FSL_IFC 185 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 186 CONFIG_SYS_FSL_IFC_SIZE2, 187 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 188 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 189 }, 190 #endif 191 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 192 CONFIG_SYS_FSL_DCSR_SIZE, 193 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 194 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 195 }, 196 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE, 197 CONFIG_SYS_FSL_MC_SIZE, 198 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 199 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 200 }, 201 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE, 202 CONFIG_SYS_FSL_NI_SIZE, 203 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 204 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 205 }, 206 /* For QBMAN portal, only the first 64MB is cache-enabled */ 207 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 208 CONFIG_SYS_FSL_QBMAN_SIZE_1, 209 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 210 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS 211 }, 212 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 213 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 214 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1, 215 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 216 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 217 }, 218 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 219 CONFIG_SYS_PCIE1_PHYS_SIZE, 220 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 221 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 222 }, 223 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 224 CONFIG_SYS_PCIE2_PHYS_SIZE, 225 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 226 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 227 }, 228 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 229 CONFIG_SYS_PCIE3_PHYS_SIZE, 230 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 231 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 232 }, 233 #ifdef CONFIG_ARCH_LS2080A 234 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR, 235 CONFIG_SYS_PCIE4_PHYS_SIZE, 236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 238 }, 239 #endif 240 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE, 241 CONFIG_SYS_FSL_WRIOP1_SIZE, 242 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 243 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 244 }, 245 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE, 246 CONFIG_SYS_FSL_AIOP1_SIZE, 247 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 248 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 249 }, 250 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE, 251 CONFIG_SYS_FSL_PEBUF_SIZE, 252 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 253 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 254 }, 255 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 256 CONFIG_SYS_FSL_DRAM_SIZE2, 257 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 258 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 259 }, 260 #elif defined(CONFIG_FSL_LSCH2) 261 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE, 262 CONFIG_SYS_FSL_BOOTROM_SIZE, 263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 265 }, 266 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 267 CONFIG_SYS_FSL_CCSR_SIZE, 268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 270 }, 271 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 272 SYS_FSL_OCRAM_SPACE_SIZE, 273 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 274 }, 275 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 276 CONFIG_SYS_FSL_DCSR_SIZE, 277 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 278 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 279 }, 280 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 281 CONFIG_SYS_FSL_QSPI_SIZE, 282 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 283 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 284 }, 285 #ifdef CONFIG_FSL_IFC 286 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 287 CONFIG_SYS_FSL_IFC_SIZE, 288 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 289 }, 290 #endif 291 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 292 CONFIG_SYS_FSL_DRAM_SIZE1, 293 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 294 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 295 }, 296 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 297 CONFIG_SYS_FSL_QBMAN_SIZE, 298 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 299 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 300 }, 301 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 302 CONFIG_SYS_FSL_DRAM_SIZE2, 303 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 304 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 305 }, 306 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 307 CONFIG_SYS_PCIE1_PHYS_SIZE, 308 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 309 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 310 }, 311 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 312 CONFIG_SYS_PCIE2_PHYS_SIZE, 313 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 314 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 315 }, 316 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 317 CONFIG_SYS_PCIE3_PHYS_SIZE, 318 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 319 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 320 }, 321 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 322 CONFIG_SYS_FSL_DRAM_SIZE3, 323 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 324 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 325 }, 326 #endif 327 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 328 {}, /* space holder for secure mem */ 329 #endif 330 {}, 331 }; 332 333 struct mm_region *mem_map = early_map; 334 335 void cpu_name(char *name) 336 { 337 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 338 unsigned int i, svr, ver; 339 340 svr = gur_in32(&gur->svr); 341 ver = SVR_SOC_VER(svr); 342 343 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 344 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 345 strcpy(name, cpu_type_list[i].name); 346 347 if (IS_E_PROCESSOR(svr)) 348 strcat(name, "E"); 349 350 sprintf(name + strlen(name), " Rev%d.%d", 351 SVR_MAJ(svr), SVR_MIN(svr)); 352 break; 353 } 354 355 if (i == ARRAY_SIZE(cpu_type_list)) 356 strcpy(name, "unknown"); 357 } 358 359 #ifndef CONFIG_SYS_DCACHE_OFF 360 /* 361 * To start MMU before DDR is available, we create MMU table in SRAM. 362 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 363 * levels of translation tables here to cover 40-bit address space. 364 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 365 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 366 * Note, the debug print in cache_v8.c is not usable for debugging 367 * these early MMU tables because UART is not yet available. 368 */ 369 static inline void early_mmu_setup(void) 370 { 371 unsigned int el = current_el(); 372 373 /* global data is already setup, no allocation yet */ 374 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 375 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 376 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 377 378 /* Create early page tables */ 379 setup_pgtables(); 380 381 /* point TTBR to the new table */ 382 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 383 get_tcr(el, NULL, NULL) & 384 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 385 MEMORY_ATTRIBUTES); 386 387 set_sctlr(get_sctlr() | CR_M); 388 } 389 390 static void fix_pcie_mmu_map(void) 391 { 392 #ifdef CONFIG_ARCH_LS2080A 393 unsigned int i; 394 u32 svr, ver; 395 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 396 397 svr = gur_in32(&gur->svr); 398 ver = SVR_SOC_VER(svr); 399 400 /* Fix PCIE base and size for LS2088A */ 401 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) || 402 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) || 403 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) { 404 for (i = 0; i < ARRAY_SIZE(final_map); i++) { 405 switch (final_map[i].phys) { 406 case CONFIG_SYS_PCIE1_PHYS_ADDR: 407 final_map[i].phys = 0x2000000000ULL; 408 final_map[i].virt = 0x2000000000ULL; 409 final_map[i].size = 0x800000000ULL; 410 break; 411 case CONFIG_SYS_PCIE2_PHYS_ADDR: 412 final_map[i].phys = 0x2800000000ULL; 413 final_map[i].virt = 0x2800000000ULL; 414 final_map[i].size = 0x800000000ULL; 415 break; 416 case CONFIG_SYS_PCIE3_PHYS_ADDR: 417 final_map[i].phys = 0x3000000000ULL; 418 final_map[i].virt = 0x3000000000ULL; 419 final_map[i].size = 0x800000000ULL; 420 break; 421 case CONFIG_SYS_PCIE4_PHYS_ADDR: 422 final_map[i].phys = 0x3800000000ULL; 423 final_map[i].virt = 0x3800000000ULL; 424 final_map[i].size = 0x800000000ULL; 425 break; 426 default: 427 break; 428 } 429 } 430 } 431 #endif 432 } 433 434 /* 435 * The final tables look similar to early tables, but different in detail. 436 * These tables are in DRAM. Sub tables are added to enable cache for 437 * QBMan and OCRAM. 438 * 439 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 440 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 441 */ 442 static inline void final_mmu_setup(void) 443 { 444 u64 tlb_addr_save = gd->arch.tlb_addr; 445 unsigned int el = current_el(); 446 int index; 447 448 /* fix the final_map before filling in the block entries */ 449 fix_pcie_mmu_map(); 450 451 mem_map = final_map; 452 453 /* Update mapping for DDR to actual size */ 454 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) { 455 /* 456 * Find the entry for DDR mapping and update the address and 457 * size. Zero-sized mapping will be skipped when creating MMU 458 * table. 459 */ 460 switch (final_map[index].virt) { 461 case CONFIG_SYS_FSL_DRAM_BASE1: 462 final_map[index].virt = gd->bd->bi_dram[0].start; 463 final_map[index].phys = gd->bd->bi_dram[0].start; 464 final_map[index].size = gd->bd->bi_dram[0].size; 465 break; 466 #ifdef CONFIG_SYS_FSL_DRAM_BASE2 467 case CONFIG_SYS_FSL_DRAM_BASE2: 468 #if (CONFIG_NR_DRAM_BANKS >= 2) 469 final_map[index].virt = gd->bd->bi_dram[1].start; 470 final_map[index].phys = gd->bd->bi_dram[1].start; 471 final_map[index].size = gd->bd->bi_dram[1].size; 472 #else 473 final_map[index].size = 0; 474 #endif 475 break; 476 #endif 477 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 478 case CONFIG_SYS_FSL_DRAM_BASE3: 479 #if (CONFIG_NR_DRAM_BANKS >= 3) 480 final_map[index].virt = gd->bd->bi_dram[2].start; 481 final_map[index].phys = gd->bd->bi_dram[2].start; 482 final_map[index].size = gd->bd->bi_dram[2].size; 483 #else 484 final_map[index].size = 0; 485 #endif 486 break; 487 #endif 488 default: 489 break; 490 } 491 } 492 493 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 494 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 495 if (el == 3) { 496 /* 497 * Only use gd->arch.secure_ram if the address is 498 * recalculated. Align to 4KB for MMU table. 499 */ 500 /* put page tables in secure ram */ 501 index = ARRAY_SIZE(final_map) - 2; 502 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 503 final_map[index].virt = gd->arch.secure_ram & ~0x3; 504 final_map[index].phys = final_map[index].virt; 505 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 506 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 507 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 508 tlb_addr_save = gd->arch.tlb_addr; 509 } else { 510 /* Use allocated (board_f.c) memory for TLB */ 511 tlb_addr_save = gd->arch.tlb_allocated; 512 gd->arch.tlb_addr = tlb_addr_save; 513 } 514 } 515 #endif 516 517 /* Reset the fill ptr */ 518 gd->arch.tlb_fillptr = tlb_addr_save; 519 520 /* Create normal system page tables */ 521 setup_pgtables(); 522 523 /* Create emergency page tables */ 524 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 525 gd->arch.tlb_emerg = gd->arch.tlb_addr; 526 setup_pgtables(); 527 gd->arch.tlb_addr = tlb_addr_save; 528 529 /* Disable cache and MMU */ 530 dcache_disable(); /* TLBs are invalidated */ 531 invalidate_icache_all(); 532 533 /* point TTBR to the new table */ 534 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 535 MEMORY_ATTRIBUTES); 536 537 set_sctlr(get_sctlr() | CR_M); 538 } 539 540 u64 get_page_table_size(void) 541 { 542 return 0x10000; 543 } 544 545 int arch_cpu_init(void) 546 { 547 /* 548 * This function is called before U-Boot relocates itself to speed up 549 * on system running. It is not necessary to run if performance is not 550 * critical. Skip if MMU is already enabled by SPL or other means. 551 */ 552 if (get_sctlr() & CR_M) 553 return 0; 554 555 icache_enable(); 556 __asm_invalidate_dcache_all(); 557 __asm_invalidate_tlb_all(); 558 early_mmu_setup(); 559 set_sctlr(get_sctlr() | CR_C); 560 return 0; 561 } 562 563 void mmu_setup(void) 564 { 565 final_mmu_setup(); 566 } 567 568 /* 569 * This function is called from common/board_r.c. 570 * It recreates MMU table in main memory. 571 */ 572 void enable_caches(void) 573 { 574 mmu_setup(); 575 __asm_invalidate_tlb_all(); 576 icache_enable(); 577 dcache_enable(); 578 } 579 #endif 580 581 u32 initiator_type(u32 cluster, int init_id) 582 { 583 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 584 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 585 u32 type = 0; 586 587 type = gur_in32(&gur->tp_ityp[idx]); 588 if (type & TP_ITYP_AV) 589 return type; 590 591 return 0; 592 } 593 594 u32 cpu_pos_mask(void) 595 { 596 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 597 int i = 0; 598 u32 cluster, type, mask = 0; 599 600 do { 601 int j; 602 603 cluster = gur_in32(&gur->tp_cluster[i].lower); 604 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 605 type = initiator_type(cluster, j); 606 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 607 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 608 } 609 i++; 610 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 611 612 return mask; 613 } 614 615 u32 cpu_mask(void) 616 { 617 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 618 int i = 0, count = 0; 619 u32 cluster, type, mask = 0; 620 621 do { 622 int j; 623 624 cluster = gur_in32(&gur->tp_cluster[i].lower); 625 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 626 type = initiator_type(cluster, j); 627 if (type) { 628 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 629 mask |= 1 << count; 630 count++; 631 } 632 } 633 i++; 634 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 635 636 return mask; 637 } 638 639 /* 640 * Return the number of cores on this SOC. 641 */ 642 int cpu_numcores(void) 643 { 644 return hweight32(cpu_mask()); 645 } 646 647 int fsl_qoriq_core_to_cluster(unsigned int core) 648 { 649 struct ccsr_gur __iomem *gur = 650 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 651 int i = 0, count = 0; 652 u32 cluster; 653 654 do { 655 int j; 656 657 cluster = gur_in32(&gur->tp_cluster[i].lower); 658 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 659 if (initiator_type(cluster, j)) { 660 if (count == core) 661 return i; 662 count++; 663 } 664 } 665 i++; 666 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 667 668 return -1; /* cannot identify the cluster */ 669 } 670 671 u32 fsl_qoriq_core_to_type(unsigned int core) 672 { 673 struct ccsr_gur __iomem *gur = 674 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 675 int i = 0, count = 0; 676 u32 cluster, type; 677 678 do { 679 int j; 680 681 cluster = gur_in32(&gur->tp_cluster[i].lower); 682 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 683 type = initiator_type(cluster, j); 684 if (type) { 685 if (count == core) 686 return type; 687 count++; 688 } 689 } 690 i++; 691 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 692 693 return -1; /* cannot identify the cluster */ 694 } 695 696 #ifndef CONFIG_FSL_LSCH3 697 uint get_svr(void) 698 { 699 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 700 701 return gur_in32(&gur->svr); 702 } 703 #endif 704 705 #ifdef CONFIG_DISPLAY_CPUINFO 706 int print_cpuinfo(void) 707 { 708 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 709 struct sys_info sysinfo; 710 char buf[32]; 711 unsigned int i, core; 712 u32 type, rcw, svr = gur_in32(&gur->svr); 713 714 puts("SoC: "); 715 716 cpu_name(buf); 717 printf(" %s (0x%x)\n", buf, svr); 718 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 719 get_sys_info(&sysinfo); 720 puts("Clock Configuration:"); 721 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 722 if (!(i % 3)) 723 puts("\n "); 724 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 725 printf("CPU%d(%s):%-4s MHz ", core, 726 type == TY_ITYP_VER_A7 ? "A7 " : 727 (type == TY_ITYP_VER_A53 ? "A53" : 728 (type == TY_ITYP_VER_A57 ? "A57" : 729 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 730 strmhz(buf, sysinfo.freq_processor[core])); 731 } 732 /* Display platform clock as Bus frequency. */ 733 printf("\n Bus: %-4s MHz ", 734 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); 735 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 736 #ifdef CONFIG_SYS_DPAA_FMAN 737 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 738 #endif 739 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 740 if (soc_has_dp_ddr()) { 741 printf(" DP-DDR: %-4s MT/s", 742 strmhz(buf, sysinfo.freq_ddrbus2)); 743 } 744 #endif 745 puts("\n"); 746 747 /* 748 * Display the RCW, so that no one gets confused as to what RCW 749 * we're actually using for this boot. 750 */ 751 puts("Reset Configuration Word (RCW):"); 752 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 753 rcw = gur_in32(&gur->rcwsr[i]); 754 if ((i % 4) == 0) 755 printf("\n %08x:", i * 4); 756 printf(" %08x", rcw); 757 } 758 puts("\n"); 759 760 return 0; 761 } 762 #endif 763 764 #ifdef CONFIG_FSL_ESDHC 765 int cpu_mmc_init(bd_t *bis) 766 { 767 return fsl_esdhc_mmc_init(bis); 768 } 769 #endif 770 771 int cpu_eth_init(bd_t *bis) 772 { 773 int error = 0; 774 775 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 776 error = fsl_mc_ldpaa_init(bis); 777 #endif 778 #ifdef CONFIG_FMAN_ENET 779 fm_standard_init(bis); 780 #endif 781 return error; 782 } 783 784 static inline int check_psci(void) 785 { 786 unsigned int psci_ver; 787 788 psci_ver = sec_firmware_support_psci_version(); 789 if (psci_ver == PSCI_INVALID_VER) 790 return 1; 791 792 return 0; 793 } 794 795 static void config_core_prefetch(void) 796 { 797 char *buf = NULL; 798 char buffer[HWCONFIG_BUFFER_SIZE]; 799 const char *prefetch_arg = NULL; 800 size_t arglen; 801 unsigned int mask; 802 struct pt_regs regs; 803 804 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0) 805 buf = buffer; 806 807 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable", 808 &arglen, buf); 809 810 if (prefetch_arg) { 811 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff; 812 if (mask & 0x1) { 813 printf("Core0 prefetch can't be disabled\n"); 814 return; 815 } 816 817 #define SIP_PREFETCH_DISABLE_64 0xC200FF13 818 regs.regs[0] = SIP_PREFETCH_DISABLE_64; 819 regs.regs[1] = mask; 820 smc_call(®s); 821 822 if (regs.regs[0]) 823 printf("Prefetch disable config failed for mask "); 824 else 825 printf("Prefetch disable config passed for mask "); 826 printf("0x%x\n", mask); 827 } 828 } 829 830 int arch_early_init_r(void) 831 { 832 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 833 u32 svr_dev_id; 834 /* 835 * erratum A009635 is valid only for LS2080A SoC and 836 * its personalitiesi 837 */ 838 svr_dev_id = get_svr(); 839 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 840 erratum_a009635(); 841 #endif 842 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) 843 erratum_a009942_check_cpo(); 844 #endif 845 if (check_psci()) { 846 debug("PSCI: PSCI does not exist.\n"); 847 848 /* if PSCI does not exist, boot secondary cores here */ 849 if (fsl_layerscape_wake_seconday_cores()) 850 printf("Did not wake secondary cores\n"); 851 } 852 853 #ifdef CONFIG_SYS_FSL_HAS_RGMII 854 fsl_rgmii_init(); 855 #endif 856 857 config_core_prefetch(); 858 859 #ifdef CONFIG_SYS_HAS_SERDES 860 fsl_serdes_init(); 861 #endif 862 #ifdef CONFIG_FMAN_ENET 863 fman_enet_init(); 864 #endif 865 #ifdef CONFIG_SYS_DPAA_QBMAN 866 setup_qbman_portals(); 867 #endif 868 return 0; 869 } 870 871 int timer_init(void) 872 { 873 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 874 #ifdef CONFIG_FSL_LSCH3 875 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 876 #endif 877 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 878 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 879 u32 svr_dev_id; 880 #endif 881 #ifdef COUNTER_FREQUENCY_REAL 882 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 883 884 /* Update with accurate clock frequency */ 885 if (current_el() == 3) 886 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 887 #endif 888 889 #ifdef CONFIG_FSL_LSCH3 890 /* Enable timebase for all clusters. 891 * It is safe to do so even some clusters are not enabled. 892 */ 893 out_le32(cltbenr, 0xf); 894 #endif 895 896 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 897 /* 898 * In certain Layerscape SoCs, the clock for each core's 899 * has an enable bit in the PMU Physical Core Time Base Enable 900 * Register (PCTBENR), which allows the watchdog to operate. 901 */ 902 setbits_le32(pctbenr, 0xff); 903 /* 904 * For LS2080A SoC and its personalities, timer controller 905 * offset is different 906 */ 907 svr_dev_id = get_svr(); 908 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 909 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 910 911 #endif 912 913 /* Enable clock for timer 914 * This is a global setting. 915 */ 916 out_le32(cntcr, 0x1); 917 918 return 0; 919 } 920 921 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 922 923 void __efi_runtime reset_cpu(ulong addr) 924 { 925 u32 val; 926 927 /* Raise RESET_REQ_B */ 928 val = scfg_in32(rstcr); 929 val |= 0x02; 930 scfg_out32(rstcr, val); 931 } 932 933 #ifdef CONFIG_EFI_LOADER 934 935 void __efi_runtime EFIAPI efi_reset_system( 936 enum efi_reset_type reset_type, 937 efi_status_t reset_status, 938 unsigned long data_size, void *reset_data) 939 { 940 switch (reset_type) { 941 case EFI_RESET_COLD: 942 case EFI_RESET_WARM: 943 case EFI_RESET_PLATFORM_SPECIFIC: 944 reset_cpu(0); 945 break; 946 case EFI_RESET_SHUTDOWN: 947 /* Nothing we can do */ 948 break; 949 } 950 951 while (1) { } 952 } 953 954 efi_status_t efi_reset_system_init(void) 955 { 956 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 957 } 958 959 #endif 960 961 /* 962 * Calculate reserved memory with given memory bank 963 * Return aligned memory size on success 964 * Return (ram_size + needed size) for failure 965 */ 966 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 967 { 968 phys_size_t ram_top = ram_size; 969 970 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 971 ram_top = mc_get_dram_block_size(); 972 if (ram_top > ram_size) 973 return ram_size + ram_top; 974 975 ram_top = ram_size - ram_top; 976 /* The start address of MC reserved memory needs to be aligned. */ 977 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 978 #endif 979 980 return ram_size - ram_top; 981 } 982 983 phys_size_t get_effective_memsize(void) 984 { 985 phys_size_t ea_size, rem = 0; 986 987 /* 988 * For ARMv8 SoCs, DDR memory is split into two or three regions. The 989 * first region is 2GB space at 0x8000_0000. Secure memory needs to 990 * allocated from first region. If the memory extends to the second 991 * region (or the third region if applicable), Management Complex (MC) 992 * memory should be put into the highest region, i.e. the end of DDR 993 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so 994 * U-Boot doesn't relocate itself into higher address. Should DDR be 995 * configured to skip the first region, this function needs to be 996 * adjusted. 997 */ 998 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { 999 ea_size = CONFIG_MAX_MEM_MAPPED; 1000 rem = gd->ram_size - ea_size; 1001 } else { 1002 ea_size = gd->ram_size; 1003 } 1004 1005 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1006 /* Check if we have enough space for secure memory */ 1007 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) 1008 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1009 else 1010 printf("Error: No enough space for secure memory.\n"); 1011 #endif 1012 /* Check if we have enough memory for MC */ 1013 if (rem < board_reserve_ram_top(rem)) { 1014 /* Not enough memory in high region to reserve */ 1015 if (ea_size > board_reserve_ram_top(ea_size)) 1016 ea_size -= board_reserve_ram_top(ea_size); 1017 else 1018 printf("Error: No enough space for reserved memory.\n"); 1019 } 1020 1021 return ea_size; 1022 } 1023 1024 int dram_init_banksize(void) 1025 { 1026 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1027 phys_size_t dp_ddr_size; 1028 #endif 1029 1030 /* 1031 * gd->ram_size has the total size of DDR memory, less reserved secure 1032 * memory. The DDR extends from low region to high region(s) presuming 1033 * no hole is created with DDR configuration. gd->arch.secure_ram tracks 1034 * the location of secure memory. gd->arch.resv_ram tracks the location 1035 * of reserved memory for Management Complex (MC). Because gd->ram_size 1036 * is reduced by this function if secure memory is reserved, checking 1037 * gd->arch.secure_ram should be done to avoid running it repeatedly. 1038 */ 1039 1040 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1041 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 1042 debug("No need to run again, skip %s\n", __func__); 1043 1044 return 0; 1045 } 1046 #endif 1047 1048 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; 1049 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) { 1050 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE; 1051 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE; 1052 gd->bd->bi_dram[1].size = gd->ram_size - 1053 CONFIG_SYS_DDR_BLOCK1_SIZE; 1054 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1055 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) { 1056 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE; 1057 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size - 1058 CONFIG_SYS_DDR_BLOCK2_SIZE; 1059 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE; 1060 } 1061 #endif 1062 } else { 1063 gd->bd->bi_dram[0].size = gd->ram_size; 1064 } 1065 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1066 if (gd->bd->bi_dram[0].size > 1067 CONFIG_SYS_MEM_RESERVE_SECURE) { 1068 gd->bd->bi_dram[0].size -= 1069 CONFIG_SYS_MEM_RESERVE_SECURE; 1070 gd->arch.secure_ram = gd->bd->bi_dram[0].start + 1071 gd->bd->bi_dram[0].size; 1072 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; 1073 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1074 } 1075 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */ 1076 1077 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1078 /* Assign memory for MC */ 1079 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1080 if (gd->bd->bi_dram[2].size >= 1081 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1082 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1083 gd->bd->bi_dram[2].size - 1084 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1085 } else 1086 #endif 1087 { 1088 if (gd->bd->bi_dram[1].size >= 1089 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1090 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1091 gd->bd->bi_dram[1].size - 1092 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1093 } else if (gd->bd->bi_dram[0].size > 1094 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1095 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1096 gd->bd->bi_dram[0].size - 1097 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1098 } 1099 } 1100 #endif /* CONFIG_FSL_MC_ENET */ 1101 1102 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1103 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1104 #error "This SoC shouldn't have DP DDR" 1105 #endif 1106 if (soc_has_dp_ddr()) { 1107 /* initialize DP-DDR here */ 1108 puts("DP-DDR: "); 1109 /* 1110 * DDR controller use 0 as the base address for binding. 1111 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access. 1112 */ 1113 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY, 1114 CONFIG_DP_DDR_CTRL, 1115 CONFIG_DP_DDR_NUM_CTRLS, 1116 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR, 1117 NULL, NULL, NULL); 1118 if (dp_ddr_size) { 1119 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE; 1120 gd->bd->bi_dram[2].size = dp_ddr_size; 1121 } else { 1122 puts("Not detected"); 1123 } 1124 } 1125 #endif 1126 1127 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1128 debug("%s is called. gd->ram_size is reduced to %lu\n", 1129 __func__, (ulong)gd->ram_size); 1130 #endif 1131 1132 return 0; 1133 } 1134 1135 #if CONFIG_IS_ENABLED(EFI_LOADER) 1136 void efi_add_known_memory(void) 1137 { 1138 int i; 1139 phys_addr_t ram_start, start; 1140 phys_size_t ram_size; 1141 u64 pages; 1142 1143 /* Add RAM */ 1144 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 1145 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1146 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1147 #error "This SoC shouldn't have DP DDR" 1148 #endif 1149 if (i == 2) 1150 continue; /* skip DP-DDR */ 1151 #endif 1152 ram_start = gd->bd->bi_dram[i].start; 1153 ram_size = gd->bd->bi_dram[i].size; 1154 #ifdef CONFIG_RESV_RAM 1155 if (gd->arch.resv_ram >= ram_start && 1156 gd->arch.resv_ram < ram_start + ram_size) 1157 ram_size = gd->arch.resv_ram - ram_start; 1158 #endif 1159 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 1160 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 1161 1162 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, 1163 false); 1164 } 1165 } 1166 #endif 1167 1168 /* 1169 * Before DDR size is known, early MMU table have DDR mapped as device memory 1170 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory" 1171 * needs to be set for these mappings. 1172 * If a special case configures DDR with holes in the mapping, the holes need 1173 * to be marked as invalid. This is not implemented in this function. 1174 */ 1175 void update_early_mmu_table(void) 1176 { 1177 if (!gd->arch.tlb_addr) 1178 return; 1179 1180 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) { 1181 mmu_change_region_attr( 1182 CONFIG_SYS_SDRAM_BASE, 1183 gd->ram_size, 1184 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1185 PTE_BLOCK_OUTER_SHARE | 1186 PTE_BLOCK_NS | 1187 PTE_TYPE_VALID); 1188 } else { 1189 mmu_change_region_attr( 1190 CONFIG_SYS_SDRAM_BASE, 1191 CONFIG_SYS_DDR_BLOCK1_SIZE, 1192 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1193 PTE_BLOCK_OUTER_SHARE | 1194 PTE_BLOCK_NS | 1195 PTE_TYPE_VALID); 1196 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1197 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE 1198 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE" 1199 #endif 1200 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE > 1201 CONFIG_SYS_DDR_BLOCK2_SIZE) { 1202 mmu_change_region_attr( 1203 CONFIG_SYS_DDR_BLOCK2_BASE, 1204 CONFIG_SYS_DDR_BLOCK2_SIZE, 1205 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1206 PTE_BLOCK_OUTER_SHARE | 1207 PTE_BLOCK_NS | 1208 PTE_TYPE_VALID); 1209 mmu_change_region_attr( 1210 CONFIG_SYS_DDR_BLOCK3_BASE, 1211 gd->ram_size - 1212 CONFIG_SYS_DDR_BLOCK1_SIZE - 1213 CONFIG_SYS_DDR_BLOCK2_SIZE, 1214 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1215 PTE_BLOCK_OUTER_SHARE | 1216 PTE_BLOCK_NS | 1217 PTE_TYPE_VALID); 1218 } else 1219 #endif 1220 { 1221 mmu_change_region_attr( 1222 CONFIG_SYS_DDR_BLOCK2_BASE, 1223 gd->ram_size - 1224 CONFIG_SYS_DDR_BLOCK1_SIZE, 1225 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1226 PTE_BLOCK_OUTER_SHARE | 1227 PTE_BLOCK_NS | 1228 PTE_TYPE_VALID); 1229 } 1230 } 1231 } 1232 1233 __weak int dram_init(void) 1234 { 1235 fsl_initdram(); 1236 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD) 1237 /* This will break-before-make MMU for DDR */ 1238 update_early_mmu_table(); 1239 #endif 1240 1241 return 0; 1242 } 1243