1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright 2017 NXP 4 * Copyright 2014-2015 Freescale Semiconductor, Inc. 5 */ 6 7 #include <common.h> 8 #include <fsl_ddr_sdram.h> 9 #include <asm/io.h> 10 #include <linux/errno.h> 11 #include <asm/system.h> 12 #include <fm_eth.h> 13 #include <asm/armv8/mmu.h> 14 #include <asm/io.h> 15 #include <asm/arch/fsl_serdes.h> 16 #include <asm/arch/soc.h> 17 #include <asm/arch/cpu.h> 18 #include <asm/arch/speed.h> 19 #include <fsl_immap.h> 20 #include <asm/arch/mp.h> 21 #include <efi_loader.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #include <asm/armv8/sec_firmware.h> 27 #ifdef CONFIG_SYS_FSL_DDR 28 #include <fsl_ddr.h> 29 #endif 30 #include <asm/arch/clock.h> 31 #include <hwconfig.h> 32 #include <fsl_qbman.h> 33 34 #ifdef CONFIG_TFABOOT 35 #include <environment.h> 36 #ifdef CONFIG_CHAIN_OF_TRUST 37 #include <fsl_validate.h> 38 #endif 39 #endif 40 41 DECLARE_GLOBAL_DATA_PTR; 42 43 static struct cpu_type cpu_type_list[] = { 44 CPU_TYPE_ENTRY(LS2080A, LS2080A, 8), 45 CPU_TYPE_ENTRY(LS2085A, LS2085A, 8), 46 CPU_TYPE_ENTRY(LS2045A, LS2045A, 4), 47 CPU_TYPE_ENTRY(LS2088A, LS2088A, 8), 48 CPU_TYPE_ENTRY(LS2084A, LS2084A, 8), 49 CPU_TYPE_ENTRY(LS2048A, LS2048A, 4), 50 CPU_TYPE_ENTRY(LS2044A, LS2044A, 4), 51 CPU_TYPE_ENTRY(LS2081A, LS2081A, 8), 52 CPU_TYPE_ENTRY(LS2041A, LS2041A, 4), 53 CPU_TYPE_ENTRY(LS1043A, LS1043A, 4), 54 CPU_TYPE_ENTRY(LS1043A, LS1043A_P23, 4), 55 CPU_TYPE_ENTRY(LS1023A, LS1023A, 2), 56 CPU_TYPE_ENTRY(LS1023A, LS1023A_P23, 2), 57 CPU_TYPE_ENTRY(LS1046A, LS1046A, 4), 58 CPU_TYPE_ENTRY(LS1026A, LS1026A, 2), 59 CPU_TYPE_ENTRY(LS2040A, LS2040A, 4), 60 CPU_TYPE_ENTRY(LS1012A, LS1012A, 1), 61 CPU_TYPE_ENTRY(LS1088A, LS1088A, 8), 62 CPU_TYPE_ENTRY(LS1084A, LS1084A, 8), 63 CPU_TYPE_ENTRY(LS1048A, LS1048A, 4), 64 CPU_TYPE_ENTRY(LS1044A, LS1044A, 4), 65 CPU_TYPE_ENTRY(LX2160A, LX2160A, 16), 66 CPU_TYPE_ENTRY(LX2120A, LX2120A, 12), 67 CPU_TYPE_ENTRY(LX2080A, LX2080A, 8), 68 }; 69 70 #define EARLY_PGTABLE_SIZE 0x5000 71 static struct mm_region early_map[] = { 72 #ifdef CONFIG_FSL_LSCH3 73 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 74 CONFIG_SYS_FSL_CCSR_SIZE, 75 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 76 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 77 }, 78 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 79 SYS_FSL_OCRAM_SPACE_SIZE, 80 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 81 }, 82 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 83 CONFIG_SYS_FSL_QSPI_SIZE1, 84 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE}, 85 #ifdef CONFIG_FSL_IFC 86 /* For IFC Region #1, only the first 4MB is cache-enabled */ 87 { CONFIG_SYS_FSL_IFC_BASE1, CONFIG_SYS_FSL_IFC_BASE1, 88 CONFIG_SYS_FSL_IFC_SIZE1_1, 89 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 90 }, 91 { CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 92 CONFIG_SYS_FSL_IFC_BASE1 + CONFIG_SYS_FSL_IFC_SIZE1_1, 93 CONFIG_SYS_FSL_IFC_SIZE1 - CONFIG_SYS_FSL_IFC_SIZE1_1, 94 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 95 }, 96 { CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FSL_IFC_BASE1, 97 CONFIG_SYS_FSL_IFC_SIZE1, 98 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 99 }, 100 #endif 101 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 102 CONFIG_SYS_FSL_DRAM_SIZE1, 103 #if defined(CONFIG_TFABOOT) || \ 104 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 105 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 106 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 107 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 108 #endif 109 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 110 }, 111 #ifdef CONFIG_FSL_IFC 112 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */ 113 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 114 CONFIG_SYS_FLASH_BASE - CONFIG_SYS_FSL_IFC_BASE2, 115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 116 }, 117 #endif 118 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 119 CONFIG_SYS_FSL_DCSR_SIZE, 120 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 121 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 122 }, 123 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 124 CONFIG_SYS_FSL_DRAM_SIZE2, 125 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 126 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 127 }, 128 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 129 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 130 CONFIG_SYS_FSL_DRAM_SIZE3, 131 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 132 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 133 }, 134 #endif 135 #elif defined(CONFIG_FSL_LSCH2) 136 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 137 CONFIG_SYS_FSL_CCSR_SIZE, 138 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 139 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 140 }, 141 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 142 SYS_FSL_OCRAM_SPACE_SIZE, 143 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 144 }, 145 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 146 CONFIG_SYS_FSL_DCSR_SIZE, 147 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 148 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 149 }, 150 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 151 CONFIG_SYS_FSL_QSPI_SIZE, 152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 153 }, 154 #ifdef CONFIG_FSL_IFC 155 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 156 CONFIG_SYS_FSL_IFC_SIZE, 157 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 158 }, 159 #endif 160 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 161 CONFIG_SYS_FSL_DRAM_SIZE1, 162 #if defined(CONFIG_TFABOOT) || \ 163 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD)) 164 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 165 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */ 166 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 167 #endif 168 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 169 }, 170 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 171 CONFIG_SYS_FSL_DRAM_SIZE2, 172 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_PXN | PTE_BLOCK_UXN | 173 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 174 }, 175 #endif 176 {}, /* list terminator */ 177 }; 178 179 static struct mm_region final_map[] = { 180 #ifdef CONFIG_FSL_LSCH3 181 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 182 CONFIG_SYS_FSL_CCSR_SIZE, 183 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 184 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 185 }, 186 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 187 SYS_FSL_OCRAM_SPACE_SIZE, 188 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 189 }, 190 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 191 CONFIG_SYS_FSL_DRAM_SIZE1, 192 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 193 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 194 }, 195 { CONFIG_SYS_FSL_QSPI_BASE1, CONFIG_SYS_FSL_QSPI_BASE1, 196 CONFIG_SYS_FSL_QSPI_SIZE1, 197 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 198 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 199 }, 200 { CONFIG_SYS_FSL_QSPI_BASE2, CONFIG_SYS_FSL_QSPI_BASE2, 201 CONFIG_SYS_FSL_QSPI_SIZE2, 202 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 203 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 204 }, 205 #ifdef CONFIG_FSL_IFC 206 { CONFIG_SYS_FSL_IFC_BASE2, CONFIG_SYS_FSL_IFC_BASE2, 207 CONFIG_SYS_FSL_IFC_SIZE2, 208 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 209 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 210 }, 211 #endif 212 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 213 CONFIG_SYS_FSL_DCSR_SIZE, 214 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 215 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 216 }, 217 { CONFIG_SYS_FSL_MC_BASE, CONFIG_SYS_FSL_MC_BASE, 218 CONFIG_SYS_FSL_MC_SIZE, 219 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 220 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 221 }, 222 { CONFIG_SYS_FSL_NI_BASE, CONFIG_SYS_FSL_NI_BASE, 223 CONFIG_SYS_FSL_NI_SIZE, 224 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 225 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 226 }, 227 /* For QBMAN portal, only the first 64MB is cache-enabled */ 228 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 229 CONFIG_SYS_FSL_QBMAN_SIZE_1, 230 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 231 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN | PTE_BLOCK_NS 232 }, 233 { CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 234 CONFIG_SYS_FSL_QBMAN_BASE + CONFIG_SYS_FSL_QBMAN_SIZE_1, 235 CONFIG_SYS_FSL_QBMAN_SIZE - CONFIG_SYS_FSL_QBMAN_SIZE_1, 236 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 237 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 238 }, 239 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 240 CONFIG_SYS_PCIE1_PHYS_SIZE, 241 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 242 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 243 }, 244 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 245 CONFIG_SYS_PCIE2_PHYS_SIZE, 246 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 247 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 248 }, 249 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 250 CONFIG_SYS_PCIE3_PHYS_SIZE, 251 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 252 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 253 }, 254 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LX2160A) 255 { CONFIG_SYS_PCIE4_PHYS_ADDR, CONFIG_SYS_PCIE4_PHYS_ADDR, 256 CONFIG_SYS_PCIE4_PHYS_SIZE, 257 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 258 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 259 }, 260 #endif 261 { CONFIG_SYS_FSL_WRIOP1_BASE, CONFIG_SYS_FSL_WRIOP1_BASE, 262 CONFIG_SYS_FSL_WRIOP1_SIZE, 263 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 264 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 265 }, 266 { CONFIG_SYS_FSL_AIOP1_BASE, CONFIG_SYS_FSL_AIOP1_BASE, 267 CONFIG_SYS_FSL_AIOP1_SIZE, 268 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 269 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 270 }, 271 { CONFIG_SYS_FSL_PEBUF_BASE, CONFIG_SYS_FSL_PEBUF_BASE, 272 CONFIG_SYS_FSL_PEBUF_SIZE, 273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 274 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 275 }, 276 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 277 CONFIG_SYS_FSL_DRAM_SIZE2, 278 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 279 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 280 }, 281 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 282 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 283 CONFIG_SYS_FSL_DRAM_SIZE3, 284 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 285 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 286 }, 287 #endif 288 #elif defined(CONFIG_FSL_LSCH2) 289 { CONFIG_SYS_FSL_BOOTROM_BASE, CONFIG_SYS_FSL_BOOTROM_BASE, 290 CONFIG_SYS_FSL_BOOTROM_SIZE, 291 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 292 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 293 }, 294 { CONFIG_SYS_FSL_CCSR_BASE, CONFIG_SYS_FSL_CCSR_BASE, 295 CONFIG_SYS_FSL_CCSR_SIZE, 296 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 297 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 298 }, 299 { CONFIG_SYS_FSL_OCRAM_BASE, CONFIG_SYS_FSL_OCRAM_BASE, 300 SYS_FSL_OCRAM_SPACE_SIZE, 301 PTE_BLOCK_MEMTYPE(MT_NORMAL) | PTE_BLOCK_NON_SHARE 302 }, 303 { CONFIG_SYS_FSL_DCSR_BASE, CONFIG_SYS_FSL_DCSR_BASE, 304 CONFIG_SYS_FSL_DCSR_SIZE, 305 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 306 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 307 }, 308 { CONFIG_SYS_FSL_QSPI_BASE, CONFIG_SYS_FSL_QSPI_BASE, 309 CONFIG_SYS_FSL_QSPI_SIZE, 310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 311 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 312 }, 313 #ifdef CONFIG_FSL_IFC 314 { CONFIG_SYS_FSL_IFC_BASE, CONFIG_SYS_FSL_IFC_BASE, 315 CONFIG_SYS_FSL_IFC_SIZE, 316 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | PTE_BLOCK_NON_SHARE 317 }, 318 #endif 319 { CONFIG_SYS_FSL_DRAM_BASE1, CONFIG_SYS_FSL_DRAM_BASE1, 320 CONFIG_SYS_FSL_DRAM_SIZE1, 321 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 322 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 323 }, 324 { CONFIG_SYS_FSL_QBMAN_BASE, CONFIG_SYS_FSL_QBMAN_BASE, 325 CONFIG_SYS_FSL_QBMAN_SIZE, 326 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 327 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 328 }, 329 { CONFIG_SYS_FSL_DRAM_BASE2, CONFIG_SYS_FSL_DRAM_BASE2, 330 CONFIG_SYS_FSL_DRAM_SIZE2, 331 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 332 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 333 }, 334 { CONFIG_SYS_PCIE1_PHYS_ADDR, CONFIG_SYS_PCIE1_PHYS_ADDR, 335 CONFIG_SYS_PCIE1_PHYS_SIZE, 336 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 337 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 338 }, 339 { CONFIG_SYS_PCIE2_PHYS_ADDR, CONFIG_SYS_PCIE2_PHYS_ADDR, 340 CONFIG_SYS_PCIE2_PHYS_SIZE, 341 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 342 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 343 }, 344 { CONFIG_SYS_PCIE3_PHYS_ADDR, CONFIG_SYS_PCIE3_PHYS_ADDR, 345 CONFIG_SYS_PCIE3_PHYS_SIZE, 346 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) | 347 PTE_BLOCK_NON_SHARE | PTE_BLOCK_PXN | PTE_BLOCK_UXN 348 }, 349 { CONFIG_SYS_FSL_DRAM_BASE3, CONFIG_SYS_FSL_DRAM_BASE3, 350 CONFIG_SYS_FSL_DRAM_SIZE3, 351 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 352 PTE_BLOCK_OUTER_SHARE | PTE_BLOCK_NS 353 }, 354 #endif 355 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 356 {}, /* space holder for secure mem */ 357 #endif 358 {}, 359 }; 360 361 struct mm_region *mem_map = early_map; 362 363 void cpu_name(char *name) 364 { 365 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 366 unsigned int i, svr, ver; 367 368 svr = gur_in32(&gur->svr); 369 ver = SVR_SOC_VER(svr); 370 371 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 372 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 373 strcpy(name, cpu_type_list[i].name); 374 #ifdef CONFIG_ARCH_LX2160A 375 if (IS_C_PROCESSOR(svr)) 376 strcat(name, "C"); 377 #endif 378 379 if (IS_E_PROCESSOR(svr)) 380 strcat(name, "E"); 381 382 sprintf(name + strlen(name), " Rev%d.%d", 383 SVR_MAJ(svr), SVR_MIN(svr)); 384 break; 385 } 386 387 if (i == ARRAY_SIZE(cpu_type_list)) 388 strcpy(name, "unknown"); 389 } 390 391 #ifndef CONFIG_SYS_DCACHE_OFF 392 /* 393 * To start MMU before DDR is available, we create MMU table in SRAM. 394 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 395 * levels of translation tables here to cover 40-bit address space. 396 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 397 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 398 * Note, the debug print in cache_v8.c is not usable for debugging 399 * these early MMU tables because UART is not yet available. 400 */ 401 static inline void early_mmu_setup(void) 402 { 403 unsigned int el = current_el(); 404 405 /* global data is already setup, no allocation yet */ 406 if (el == 3) 407 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 408 else 409 gd->arch.tlb_addr = CONFIG_SYS_DDR_SDRAM_BASE; 410 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 411 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 412 413 /* Create early page tables */ 414 setup_pgtables(); 415 416 /* point TTBR to the new table */ 417 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 418 get_tcr(el, NULL, NULL) & 419 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 420 MEMORY_ATTRIBUTES); 421 422 set_sctlr(get_sctlr() | CR_M); 423 } 424 425 static void fix_pcie_mmu_map(void) 426 { 427 #ifdef CONFIG_ARCH_LS2080A 428 unsigned int i; 429 u32 svr, ver; 430 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 431 432 svr = gur_in32(&gur->svr); 433 ver = SVR_SOC_VER(svr); 434 435 /* Fix PCIE base and size for LS2088A */ 436 if ((ver == SVR_LS2088A) || (ver == SVR_LS2084A) || 437 (ver == SVR_LS2048A) || (ver == SVR_LS2044A) || 438 (ver == SVR_LS2081A) || (ver == SVR_LS2041A)) { 439 for (i = 0; i < ARRAY_SIZE(final_map); i++) { 440 switch (final_map[i].phys) { 441 case CONFIG_SYS_PCIE1_PHYS_ADDR: 442 final_map[i].phys = 0x2000000000ULL; 443 final_map[i].virt = 0x2000000000ULL; 444 final_map[i].size = 0x800000000ULL; 445 break; 446 case CONFIG_SYS_PCIE2_PHYS_ADDR: 447 final_map[i].phys = 0x2800000000ULL; 448 final_map[i].virt = 0x2800000000ULL; 449 final_map[i].size = 0x800000000ULL; 450 break; 451 case CONFIG_SYS_PCIE3_PHYS_ADDR: 452 final_map[i].phys = 0x3000000000ULL; 453 final_map[i].virt = 0x3000000000ULL; 454 final_map[i].size = 0x800000000ULL; 455 break; 456 case CONFIG_SYS_PCIE4_PHYS_ADDR: 457 final_map[i].phys = 0x3800000000ULL; 458 final_map[i].virt = 0x3800000000ULL; 459 final_map[i].size = 0x800000000ULL; 460 break; 461 default: 462 break; 463 } 464 } 465 } 466 #endif 467 } 468 469 /* 470 * The final tables look similar to early tables, but different in detail. 471 * These tables are in DRAM. Sub tables are added to enable cache for 472 * QBMan and OCRAM. 473 * 474 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 475 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 476 */ 477 static inline void final_mmu_setup(void) 478 { 479 u64 tlb_addr_save = gd->arch.tlb_addr; 480 unsigned int el = current_el(); 481 int index; 482 483 /* fix the final_map before filling in the block entries */ 484 fix_pcie_mmu_map(); 485 486 mem_map = final_map; 487 488 /* Update mapping for DDR to actual size */ 489 for (index = 0; index < ARRAY_SIZE(final_map) - 2; index++) { 490 /* 491 * Find the entry for DDR mapping and update the address and 492 * size. Zero-sized mapping will be skipped when creating MMU 493 * table. 494 */ 495 switch (final_map[index].virt) { 496 case CONFIG_SYS_FSL_DRAM_BASE1: 497 final_map[index].virt = gd->bd->bi_dram[0].start; 498 final_map[index].phys = gd->bd->bi_dram[0].start; 499 final_map[index].size = gd->bd->bi_dram[0].size; 500 break; 501 #ifdef CONFIG_SYS_FSL_DRAM_BASE2 502 case CONFIG_SYS_FSL_DRAM_BASE2: 503 #if (CONFIG_NR_DRAM_BANKS >= 2) 504 final_map[index].virt = gd->bd->bi_dram[1].start; 505 final_map[index].phys = gd->bd->bi_dram[1].start; 506 final_map[index].size = gd->bd->bi_dram[1].size; 507 #else 508 final_map[index].size = 0; 509 #endif 510 break; 511 #endif 512 #ifdef CONFIG_SYS_FSL_DRAM_BASE3 513 case CONFIG_SYS_FSL_DRAM_BASE3: 514 #if (CONFIG_NR_DRAM_BANKS >= 3) 515 final_map[index].virt = gd->bd->bi_dram[2].start; 516 final_map[index].phys = gd->bd->bi_dram[2].start; 517 final_map[index].size = gd->bd->bi_dram[2].size; 518 #else 519 final_map[index].size = 0; 520 #endif 521 break; 522 #endif 523 default: 524 break; 525 } 526 } 527 528 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 529 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 530 if (el == 3) { 531 /* 532 * Only use gd->arch.secure_ram if the address is 533 * recalculated. Align to 4KB for MMU table. 534 */ 535 /* put page tables in secure ram */ 536 index = ARRAY_SIZE(final_map) - 2; 537 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 538 final_map[index].virt = gd->arch.secure_ram & ~0x3; 539 final_map[index].phys = final_map[index].virt; 540 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 541 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 542 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 543 tlb_addr_save = gd->arch.tlb_addr; 544 } else { 545 /* Use allocated (board_f.c) memory for TLB */ 546 tlb_addr_save = gd->arch.tlb_allocated; 547 gd->arch.tlb_addr = tlb_addr_save; 548 } 549 } 550 #endif 551 552 /* Reset the fill ptr */ 553 gd->arch.tlb_fillptr = tlb_addr_save; 554 555 /* Create normal system page tables */ 556 setup_pgtables(); 557 558 /* Create emergency page tables */ 559 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 560 gd->arch.tlb_emerg = gd->arch.tlb_addr; 561 setup_pgtables(); 562 gd->arch.tlb_addr = tlb_addr_save; 563 564 /* Disable cache and MMU */ 565 dcache_disable(); /* TLBs are invalidated */ 566 invalidate_icache_all(); 567 568 /* point TTBR to the new table */ 569 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 570 MEMORY_ATTRIBUTES); 571 572 set_sctlr(get_sctlr() | CR_M); 573 } 574 575 u64 get_page_table_size(void) 576 { 577 return 0x10000; 578 } 579 580 int arch_cpu_init(void) 581 { 582 /* 583 * This function is called before U-Boot relocates itself to speed up 584 * on system running. It is not necessary to run if performance is not 585 * critical. Skip if MMU is already enabled by SPL or other means. 586 */ 587 if (get_sctlr() & CR_M) 588 return 0; 589 590 icache_enable(); 591 __asm_invalidate_dcache_all(); 592 __asm_invalidate_tlb_all(); 593 early_mmu_setup(); 594 set_sctlr(get_sctlr() | CR_C); 595 return 0; 596 } 597 598 void mmu_setup(void) 599 { 600 final_mmu_setup(); 601 } 602 603 /* 604 * This function is called from common/board_r.c. 605 * It recreates MMU table in main memory. 606 */ 607 void enable_caches(void) 608 { 609 mmu_setup(); 610 __asm_invalidate_tlb_all(); 611 icache_enable(); 612 dcache_enable(); 613 } 614 #endif /* CONFIG_SYS_DCACHE_OFF */ 615 616 #ifdef CONFIG_TFABOOT 617 enum boot_src __get_boot_src(u32 porsr1) 618 { 619 enum boot_src src = BOOT_SOURCE_RESERVED; 620 u32 rcw_src = (porsr1 & RCW_SRC_MASK) >> RCW_SRC_BIT; 621 #if !defined(CONFIG_NXP_LSCH3_2) 622 u32 val; 623 #endif 624 debug("%s: rcw_src 0x%x\n", __func__, rcw_src); 625 626 #if defined(CONFIG_FSL_LSCH3) 627 #if defined(CONFIG_NXP_LSCH3_2) 628 switch (rcw_src) { 629 case RCW_SRC_SDHC1_VAL: 630 src = BOOT_SOURCE_SD_MMC; 631 break; 632 case RCW_SRC_SDHC2_VAL: 633 src = BOOT_SOURCE_SD_MMC2; 634 break; 635 case RCW_SRC_I2C1_VAL: 636 src = BOOT_SOURCE_I2C1_EXTENDED; 637 break; 638 case RCW_SRC_FLEXSPI_NAND2K_VAL: 639 src = BOOT_SOURCE_XSPI_NAND; 640 break; 641 case RCW_SRC_FLEXSPI_NAND4K_VAL: 642 src = BOOT_SOURCE_XSPI_NAND; 643 break; 644 case RCW_SRC_RESERVED_1_VAL: 645 src = BOOT_SOURCE_RESERVED; 646 break; 647 case RCW_SRC_FLEXSPI_NOR_24B: 648 src = BOOT_SOURCE_XSPI_NOR; 649 break; 650 default: 651 src = BOOT_SOURCE_RESERVED; 652 } 653 #else 654 val = rcw_src & RCW_SRC_TYPE_MASK; 655 if (val == RCW_SRC_NOR_VAL) { 656 val = rcw_src & NOR_TYPE_MASK; 657 658 switch (val) { 659 case NOR_16B_VAL: 660 case NOR_32B_VAL: 661 src = BOOT_SOURCE_IFC_NOR; 662 break; 663 default: 664 src = BOOT_SOURCE_RESERVED; 665 } 666 } else { 667 /* RCW SRC Serial Flash */ 668 val = rcw_src & RCW_SRC_SERIAL_MASK; 669 switch (val) { 670 case RCW_SRC_QSPI_VAL: 671 /* RCW SRC Serial NOR (QSPI) */ 672 src = BOOT_SOURCE_QSPI_NOR; 673 break; 674 case RCW_SRC_SD_CARD_VAL: 675 /* RCW SRC SD Card */ 676 src = BOOT_SOURCE_SD_MMC; 677 break; 678 case RCW_SRC_EMMC_VAL: 679 /* RCW SRC EMMC */ 680 src = BOOT_SOURCE_SD_MMC; 681 break; 682 case RCW_SRC_I2C1_VAL: 683 /* RCW SRC I2C1 Extended */ 684 src = BOOT_SOURCE_I2C1_EXTENDED; 685 break; 686 default: 687 src = BOOT_SOURCE_RESERVED; 688 } 689 } 690 #endif 691 #elif defined(CONFIG_FSL_LSCH2) 692 /* RCW SRC NAND */ 693 val = rcw_src & RCW_SRC_NAND_MASK; 694 if (val == RCW_SRC_NAND_VAL) { 695 val = rcw_src & NAND_RESERVED_MASK; 696 if (val != NAND_RESERVED_1 && val != NAND_RESERVED_2) 697 src = BOOT_SOURCE_IFC_NAND; 698 699 } else { 700 /* RCW SRC NOR */ 701 val = rcw_src & RCW_SRC_NOR_MASK; 702 if (val == NOR_8B_VAL || val == NOR_16B_VAL) { 703 src = BOOT_SOURCE_IFC_NOR; 704 } else { 705 switch (rcw_src) { 706 case QSPI_VAL1: 707 case QSPI_VAL2: 708 src = BOOT_SOURCE_QSPI_NOR; 709 break; 710 case SD_VAL: 711 src = BOOT_SOURCE_SD_MMC; 712 break; 713 default: 714 src = BOOT_SOURCE_RESERVED; 715 } 716 } 717 } 718 #endif 719 720 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539) && !rcw_src) 721 src = BOOT_SOURCE_QSPI_NOR; 722 723 debug("%s: src 0x%x\n", __func__, src); 724 return src; 725 } 726 727 enum boot_src get_boot_src(void) 728 { 729 struct pt_regs regs; 730 u32 porsr1 = 0; 731 732 #if defined(CONFIG_FSL_LSCH3) 733 u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE; 734 #elif defined(CONFIG_FSL_LSCH2) 735 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 736 #endif 737 738 if (current_el() == 2) { 739 regs.regs[0] = SIP_SVC_RCW; 740 741 smc_call(®s); 742 if (!regs.regs[0]) 743 porsr1 = regs.regs[1]; 744 } 745 746 if (current_el() == 3 || !porsr1) { 747 #ifdef CONFIG_FSL_LSCH3 748 porsr1 = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4); 749 #elif defined(CONFIG_FSL_LSCH2) 750 porsr1 = in_be32(&gur->porsr1); 751 #endif 752 } 753 754 debug("%s: porsr1 0x%x\n", __func__, porsr1); 755 756 return __get_boot_src(porsr1); 757 } 758 759 #ifdef CONFIG_ENV_IS_IN_MMC 760 int mmc_get_env_dev(void) 761 { 762 enum boot_src src = get_boot_src(); 763 int dev = CONFIG_SYS_MMC_ENV_DEV; 764 765 switch (src) { 766 case BOOT_SOURCE_SD_MMC: 767 dev = 0; 768 break; 769 case BOOT_SOURCE_SD_MMC2: 770 dev = 1; 771 break; 772 default: 773 break; 774 } 775 776 return dev; 777 } 778 #endif 779 780 enum env_location env_get_location(enum env_operation op, int prio) 781 { 782 enum boot_src src = get_boot_src(); 783 enum env_location env_loc = ENVL_NOWHERE; 784 785 if (prio) 786 return ENVL_UNKNOWN; 787 788 #ifdef CONFIG_CHAIN_OF_TRUST 789 /* Check Boot Mode 790 * If Boot Mode is Secure, return ENVL_NOWHERE 791 */ 792 if (fsl_check_boot_mode_secure() == 1) 793 goto done; 794 #endif 795 796 switch (src) { 797 case BOOT_SOURCE_IFC_NOR: 798 env_loc = ENVL_FLASH; 799 break; 800 case BOOT_SOURCE_QSPI_NOR: 801 /* FALLTHROUGH */ 802 case BOOT_SOURCE_XSPI_NOR: 803 env_loc = ENVL_SPI_FLASH; 804 break; 805 case BOOT_SOURCE_IFC_NAND: 806 /* FALLTHROUGH */ 807 case BOOT_SOURCE_QSPI_NAND: 808 /* FALLTHROUGH */ 809 case BOOT_SOURCE_XSPI_NAND: 810 env_loc = ENVL_NAND; 811 break; 812 case BOOT_SOURCE_SD_MMC: 813 /* FALLTHROUGH */ 814 case BOOT_SOURCE_SD_MMC2: 815 env_loc = ENVL_MMC; 816 break; 817 case BOOT_SOURCE_I2C1_EXTENDED: 818 /* FALLTHROUGH */ 819 default: 820 break; 821 } 822 823 #ifdef CONFIG_CHAIN_OF_TRUST 824 done: 825 #endif 826 return env_loc; 827 } 828 #endif /* CONFIG_TFABOOT */ 829 830 u32 initiator_type(u32 cluster, int init_id) 831 { 832 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 833 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 834 u32 type = 0; 835 836 type = gur_in32(&gur->tp_ityp[idx]); 837 if (type & TP_ITYP_AV) 838 return type; 839 840 return 0; 841 } 842 843 u32 cpu_pos_mask(void) 844 { 845 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 846 int i = 0; 847 u32 cluster, type, mask = 0; 848 849 do { 850 int j; 851 852 cluster = gur_in32(&gur->tp_cluster[i].lower); 853 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 854 type = initiator_type(cluster, j); 855 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 856 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 857 } 858 i++; 859 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 860 861 return mask; 862 } 863 864 u32 cpu_mask(void) 865 { 866 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 867 int i = 0, count = 0; 868 u32 cluster, type, mask = 0; 869 870 do { 871 int j; 872 873 cluster = gur_in32(&gur->tp_cluster[i].lower); 874 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 875 type = initiator_type(cluster, j); 876 if (type) { 877 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 878 mask |= 1 << count; 879 count++; 880 } 881 } 882 i++; 883 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 884 885 return mask; 886 } 887 888 /* 889 * Return the number of cores on this SOC. 890 */ 891 int cpu_numcores(void) 892 { 893 return hweight32(cpu_mask()); 894 } 895 896 int fsl_qoriq_core_to_cluster(unsigned int core) 897 { 898 struct ccsr_gur __iomem *gur = 899 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 900 int i = 0, count = 0; 901 u32 cluster; 902 903 do { 904 int j; 905 906 cluster = gur_in32(&gur->tp_cluster[i].lower); 907 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 908 if (initiator_type(cluster, j)) { 909 if (count == core) 910 return i; 911 count++; 912 } 913 } 914 i++; 915 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 916 917 return -1; /* cannot identify the cluster */ 918 } 919 920 u32 fsl_qoriq_core_to_type(unsigned int core) 921 { 922 struct ccsr_gur __iomem *gur = 923 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 924 int i = 0, count = 0; 925 u32 cluster, type; 926 927 do { 928 int j; 929 930 cluster = gur_in32(&gur->tp_cluster[i].lower); 931 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 932 type = initiator_type(cluster, j); 933 if (type) { 934 if (count == core) 935 return type; 936 count++; 937 } 938 } 939 i++; 940 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 941 942 return -1; /* cannot identify the cluster */ 943 } 944 945 #ifndef CONFIG_FSL_LSCH3 946 uint get_svr(void) 947 { 948 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 949 950 return gur_in32(&gur->svr); 951 } 952 #endif 953 954 #ifdef CONFIG_DISPLAY_CPUINFO 955 int print_cpuinfo(void) 956 { 957 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 958 struct sys_info sysinfo; 959 char buf[32]; 960 unsigned int i, core; 961 u32 type, rcw, svr = gur_in32(&gur->svr); 962 963 puts("SoC: "); 964 965 cpu_name(buf); 966 printf(" %s (0x%x)\n", buf, svr); 967 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 968 get_sys_info(&sysinfo); 969 puts("Clock Configuration:"); 970 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 971 if (!(i % 3)) 972 puts("\n "); 973 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 974 printf("CPU%d(%s):%-4s MHz ", core, 975 type == TY_ITYP_VER_A7 ? "A7 " : 976 (type == TY_ITYP_VER_A53 ? "A53" : 977 (type == TY_ITYP_VER_A57 ? "A57" : 978 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 979 strmhz(buf, sysinfo.freq_processor[core])); 980 } 981 /* Display platform clock as Bus frequency. */ 982 printf("\n Bus: %-4s MHz ", 983 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); 984 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 985 #ifdef CONFIG_SYS_DPAA_FMAN 986 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 987 #endif 988 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 989 if (soc_has_dp_ddr()) { 990 printf(" DP-DDR: %-4s MT/s", 991 strmhz(buf, sysinfo.freq_ddrbus2)); 992 } 993 #endif 994 puts("\n"); 995 996 /* 997 * Display the RCW, so that no one gets confused as to what RCW 998 * we're actually using for this boot. 999 */ 1000 puts("Reset Configuration Word (RCW):"); 1001 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 1002 rcw = gur_in32(&gur->rcwsr[i]); 1003 if ((i % 4) == 0) 1004 printf("\n %08x:", i * 4); 1005 printf(" %08x", rcw); 1006 } 1007 puts("\n"); 1008 1009 return 0; 1010 } 1011 #endif 1012 1013 #ifdef CONFIG_FSL_ESDHC 1014 int cpu_mmc_init(bd_t *bis) 1015 { 1016 return fsl_esdhc_mmc_init(bis); 1017 } 1018 #endif 1019 1020 int cpu_eth_init(bd_t *bis) 1021 { 1022 int error = 0; 1023 1024 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1025 error = fsl_mc_ldpaa_init(bis); 1026 #endif 1027 #ifdef CONFIG_FMAN_ENET 1028 fm_standard_init(bis); 1029 #endif 1030 return error; 1031 } 1032 1033 static inline int check_psci(void) 1034 { 1035 unsigned int psci_ver; 1036 1037 psci_ver = sec_firmware_support_psci_version(); 1038 if (psci_ver == PSCI_INVALID_VER) 1039 return 1; 1040 1041 return 0; 1042 } 1043 1044 static void config_core_prefetch(void) 1045 { 1046 char *buf = NULL; 1047 char buffer[HWCONFIG_BUFFER_SIZE]; 1048 const char *prefetch_arg = NULL; 1049 size_t arglen; 1050 unsigned int mask; 1051 struct pt_regs regs; 1052 1053 if (env_get_f("hwconfig", buffer, sizeof(buffer)) > 0) 1054 buf = buffer; 1055 1056 prefetch_arg = hwconfig_subarg_f("core_prefetch", "disable", 1057 &arglen, buf); 1058 1059 if (prefetch_arg) { 1060 mask = simple_strtoul(prefetch_arg, NULL, 0) & 0xff; 1061 if (mask & 0x1) { 1062 printf("Core0 prefetch can't be disabled\n"); 1063 return; 1064 } 1065 1066 #define SIP_PREFETCH_DISABLE_64 0xC200FF13 1067 regs.regs[0] = SIP_PREFETCH_DISABLE_64; 1068 regs.regs[1] = mask; 1069 smc_call(®s); 1070 1071 if (regs.regs[0]) 1072 printf("Prefetch disable config failed for mask "); 1073 else 1074 printf("Prefetch disable config passed for mask "); 1075 printf("0x%x\n", mask); 1076 } 1077 } 1078 1079 int arch_early_init_r(void) 1080 { 1081 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 1082 u32 svr_dev_id; 1083 /* 1084 * erratum A009635 is valid only for LS2080A SoC and 1085 * its personalitiesi 1086 */ 1087 svr_dev_id = get_svr(); 1088 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1089 erratum_a009635(); 1090 #endif 1091 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) 1092 erratum_a009942_check_cpo(); 1093 #endif 1094 if (check_psci()) { 1095 debug("PSCI: PSCI does not exist.\n"); 1096 1097 /* if PSCI does not exist, boot secondary cores here */ 1098 if (fsl_layerscape_wake_seconday_cores()) 1099 printf("Did not wake secondary cores\n"); 1100 } 1101 1102 #ifdef CONFIG_SYS_FSL_HAS_RGMII 1103 fsl_rgmii_init(); 1104 #endif 1105 1106 config_core_prefetch(); 1107 1108 #ifdef CONFIG_SYS_HAS_SERDES 1109 fsl_serdes_init(); 1110 #endif 1111 #ifdef CONFIG_FMAN_ENET 1112 fman_enet_init(); 1113 #endif 1114 #ifdef CONFIG_SYS_DPAA_QBMAN 1115 setup_qbman_portals(); 1116 #endif 1117 return 0; 1118 } 1119 1120 int timer_init(void) 1121 { 1122 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 1123 #ifdef CONFIG_FSL_LSCH3 1124 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 1125 #endif 1126 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1127 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 1128 u32 svr_dev_id; 1129 #endif 1130 #ifdef COUNTER_FREQUENCY_REAL 1131 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 1132 1133 /* Update with accurate clock frequency */ 1134 if (current_el() == 3) 1135 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 1136 #endif 1137 1138 #ifdef CONFIG_FSL_LSCH3 1139 /* Enable timebase for all clusters. 1140 * It is safe to do so even some clusters are not enabled. 1141 */ 1142 out_le32(cltbenr, 0xf); 1143 #endif 1144 1145 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) 1146 /* 1147 * In certain Layerscape SoCs, the clock for each core's 1148 * has an enable bit in the PMU Physical Core Time Base Enable 1149 * Register (PCTBENR), which allows the watchdog to operate. 1150 */ 1151 setbits_le32(pctbenr, 0xff); 1152 /* 1153 * For LS2080A SoC and its personalities, timer controller 1154 * offset is different 1155 */ 1156 svr_dev_id = get_svr(); 1157 if (IS_SVR_DEV(svr_dev_id, SVR_DEV(SVR_LS2080A))) 1158 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 1159 1160 #endif 1161 1162 /* Enable clock for timer 1163 * This is a global setting. 1164 */ 1165 out_le32(cntcr, 0x1); 1166 1167 return 0; 1168 } 1169 1170 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 1171 1172 void __efi_runtime reset_cpu(ulong addr) 1173 { 1174 u32 val; 1175 1176 #ifdef CONFIG_ARCH_LX2160A 1177 val = in_le32(rstcr); 1178 val |= 0x01; 1179 out_le32(rstcr, val); 1180 #else 1181 /* Raise RESET_REQ_B */ 1182 val = scfg_in32(rstcr); 1183 val |= 0x02; 1184 scfg_out32(rstcr, val); 1185 #endif 1186 } 1187 1188 #ifdef CONFIG_EFI_LOADER 1189 1190 void __efi_runtime EFIAPI efi_reset_system( 1191 enum efi_reset_type reset_type, 1192 efi_status_t reset_status, 1193 unsigned long data_size, void *reset_data) 1194 { 1195 switch (reset_type) { 1196 case EFI_RESET_COLD: 1197 case EFI_RESET_WARM: 1198 case EFI_RESET_PLATFORM_SPECIFIC: 1199 reset_cpu(0); 1200 break; 1201 case EFI_RESET_SHUTDOWN: 1202 /* Nothing we can do */ 1203 break; 1204 } 1205 1206 while (1) { } 1207 } 1208 1209 efi_status_t efi_reset_system_init(void) 1210 { 1211 return efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 1212 } 1213 1214 #endif 1215 1216 /* 1217 * Calculate reserved memory with given memory bank 1218 * Return aligned memory size on success 1219 * Return (ram_size + needed size) for failure 1220 */ 1221 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 1222 { 1223 phys_size_t ram_top = ram_size; 1224 1225 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1226 ram_top = mc_get_dram_block_size(); 1227 if (ram_top > ram_size) 1228 return ram_size + ram_top; 1229 1230 ram_top = ram_size - ram_top; 1231 /* The start address of MC reserved memory needs to be aligned. */ 1232 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 1233 #endif 1234 1235 return ram_size - ram_top; 1236 } 1237 1238 phys_size_t get_effective_memsize(void) 1239 { 1240 phys_size_t ea_size, rem = 0; 1241 1242 /* 1243 * For ARMv8 SoCs, DDR memory is split into two or three regions. The 1244 * first region is 2GB space at 0x8000_0000. Secure memory needs to 1245 * allocated from first region. If the memory extends to the second 1246 * region (or the third region if applicable), Management Complex (MC) 1247 * memory should be put into the highest region, i.e. the end of DDR 1248 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so 1249 * U-Boot doesn't relocate itself into higher address. Should DDR be 1250 * configured to skip the first region, this function needs to be 1251 * adjusted. 1252 */ 1253 if (gd->ram_size > CONFIG_MAX_MEM_MAPPED) { 1254 ea_size = CONFIG_MAX_MEM_MAPPED; 1255 rem = gd->ram_size - ea_size; 1256 } else { 1257 ea_size = gd->ram_size; 1258 } 1259 1260 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1261 /* Check if we have enough space for secure memory */ 1262 if (ea_size > CONFIG_SYS_MEM_RESERVE_SECURE) 1263 ea_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1264 else 1265 printf("Error: No enough space for secure memory.\n"); 1266 #endif 1267 /* Check if we have enough memory for MC */ 1268 if (rem < board_reserve_ram_top(rem)) { 1269 /* Not enough memory in high region to reserve */ 1270 if (ea_size > board_reserve_ram_top(ea_size)) 1271 ea_size -= board_reserve_ram_top(ea_size); 1272 else 1273 printf("Error: No enough space for reserved memory.\n"); 1274 } 1275 1276 return ea_size; 1277 } 1278 1279 #ifdef CONFIG_TFABOOT 1280 phys_size_t tfa_get_dram_size(void) 1281 { 1282 struct pt_regs regs; 1283 phys_size_t dram_size = 0; 1284 1285 regs.regs[0] = SMC_DRAM_BANK_INFO; 1286 regs.regs[1] = -1; 1287 1288 smc_call(®s); 1289 if (regs.regs[0]) 1290 return 0; 1291 1292 dram_size = regs.regs[1]; 1293 return dram_size; 1294 } 1295 1296 static int tfa_dram_init_banksize(void) 1297 { 1298 int i = 0, ret = 0; 1299 struct pt_regs regs; 1300 phys_size_t dram_size = tfa_get_dram_size(); 1301 1302 debug("dram_size %llx\n", dram_size); 1303 1304 if (!dram_size) 1305 return -EINVAL; 1306 1307 do { 1308 regs.regs[0] = SMC_DRAM_BANK_INFO; 1309 regs.regs[1] = i; 1310 1311 smc_call(®s); 1312 if (regs.regs[0]) { 1313 ret = -EINVAL; 1314 break; 1315 } 1316 1317 debug("bank[%d]: start %lx, size %lx\n", i, regs.regs[1], 1318 regs.regs[2]); 1319 gd->bd->bi_dram[i].start = regs.regs[1]; 1320 gd->bd->bi_dram[i].size = regs.regs[2]; 1321 1322 dram_size -= gd->bd->bi_dram[i].size; 1323 1324 i++; 1325 } while (dram_size); 1326 1327 if (i > 0) 1328 ret = 0; 1329 1330 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1331 /* Assign memory for MC */ 1332 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1333 if (gd->bd->bi_dram[2].size >= 1334 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1335 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1336 gd->bd->bi_dram[2].size - 1337 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1338 } else 1339 #endif 1340 { 1341 if (gd->bd->bi_dram[1].size >= 1342 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1343 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1344 gd->bd->bi_dram[1].size - 1345 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1346 } else if (gd->bd->bi_dram[0].size > 1347 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1348 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1349 gd->bd->bi_dram[0].size - 1350 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1351 } 1352 } 1353 #endif /* CONFIG_FSL_MC_ENET */ 1354 1355 return ret; 1356 } 1357 #endif 1358 1359 int dram_init_banksize(void) 1360 { 1361 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1362 phys_size_t dp_ddr_size; 1363 #endif 1364 1365 #ifdef CONFIG_TFABOOT 1366 if (!tfa_dram_init_banksize()) 1367 return 0; 1368 #endif 1369 /* 1370 * gd->ram_size has the total size of DDR memory, less reserved secure 1371 * memory. The DDR extends from low region to high region(s) presuming 1372 * no hole is created with DDR configuration. gd->arch.secure_ram tracks 1373 * the location of secure memory. gd->arch.resv_ram tracks the location 1374 * of reserved memory for Management Complex (MC). Because gd->ram_size 1375 * is reduced by this function if secure memory is reserved, checking 1376 * gd->arch.secure_ram should be done to avoid running it repeatedly. 1377 */ 1378 1379 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1380 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 1381 debug("No need to run again, skip %s\n", __func__); 1382 1383 return 0; 1384 } 1385 #endif 1386 1387 gd->bd->bi_dram[0].start = CONFIG_SYS_SDRAM_BASE; 1388 if (gd->ram_size > CONFIG_SYS_DDR_BLOCK1_SIZE) { 1389 gd->bd->bi_dram[0].size = CONFIG_SYS_DDR_BLOCK1_SIZE; 1390 gd->bd->bi_dram[1].start = CONFIG_SYS_DDR_BLOCK2_BASE; 1391 gd->bd->bi_dram[1].size = gd->ram_size - 1392 CONFIG_SYS_DDR_BLOCK1_SIZE; 1393 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1394 if (gd->bi_dram[1].size > CONFIG_SYS_DDR_BLOCK2_SIZE) { 1395 gd->bd->bi_dram[2].start = CONFIG_SYS_DDR_BLOCK3_BASE; 1396 gd->bd->bi_dram[2].size = gd->bd->bi_dram[1].size - 1397 CONFIG_SYS_DDR_BLOCK2_SIZE; 1398 gd->bd->bi_dram[1].size = CONFIG_SYS_DDR_BLOCK2_SIZE; 1399 } 1400 #endif 1401 } else { 1402 gd->bd->bi_dram[0].size = gd->ram_size; 1403 } 1404 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1405 if (gd->bd->bi_dram[0].size > 1406 CONFIG_SYS_MEM_RESERVE_SECURE) { 1407 gd->bd->bi_dram[0].size -= 1408 CONFIG_SYS_MEM_RESERVE_SECURE; 1409 gd->arch.secure_ram = gd->bd->bi_dram[0].start + 1410 gd->bd->bi_dram[0].size; 1411 gd->arch.secure_ram |= MEM_RESERVE_SECURE_MAINTAINED; 1412 gd->ram_size -= CONFIG_SYS_MEM_RESERVE_SECURE; 1413 } 1414 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */ 1415 1416 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD) 1417 /* Assign memory for MC */ 1418 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1419 if (gd->bd->bi_dram[2].size >= 1420 board_reserve_ram_top(gd->bd->bi_dram[2].size)) { 1421 gd->arch.resv_ram = gd->bd->bi_dram[2].start + 1422 gd->bd->bi_dram[2].size - 1423 board_reserve_ram_top(gd->bd->bi_dram[2].size); 1424 } else 1425 #endif 1426 { 1427 if (gd->bd->bi_dram[1].size >= 1428 board_reserve_ram_top(gd->bd->bi_dram[1].size)) { 1429 gd->arch.resv_ram = gd->bd->bi_dram[1].start + 1430 gd->bd->bi_dram[1].size - 1431 board_reserve_ram_top(gd->bd->bi_dram[1].size); 1432 } else if (gd->bd->bi_dram[0].size > 1433 board_reserve_ram_top(gd->bd->bi_dram[0].size)) { 1434 gd->arch.resv_ram = gd->bd->bi_dram[0].start + 1435 gd->bd->bi_dram[0].size - 1436 board_reserve_ram_top(gd->bd->bi_dram[0].size); 1437 } 1438 } 1439 #endif /* CONFIG_FSL_MC_ENET */ 1440 1441 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1442 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1443 #error "This SoC shouldn't have DP DDR" 1444 #endif 1445 if (soc_has_dp_ddr()) { 1446 /* initialize DP-DDR here */ 1447 puts("DP-DDR: "); 1448 /* 1449 * DDR controller use 0 as the base address for binding. 1450 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access. 1451 */ 1452 dp_ddr_size = fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY, 1453 CONFIG_DP_DDR_CTRL, 1454 CONFIG_DP_DDR_NUM_CTRLS, 1455 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR, 1456 NULL, NULL, NULL); 1457 if (dp_ddr_size) { 1458 gd->bd->bi_dram[2].start = CONFIG_SYS_DP_DDR_BASE; 1459 gd->bd->bi_dram[2].size = dp_ddr_size; 1460 } else { 1461 puts("Not detected"); 1462 } 1463 } 1464 #endif 1465 1466 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 1467 debug("%s is called. gd->ram_size is reduced to %lu\n", 1468 __func__, (ulong)gd->ram_size); 1469 #endif 1470 1471 return 0; 1472 } 1473 1474 #if CONFIG_IS_ENABLED(EFI_LOADER) 1475 void efi_add_known_memory(void) 1476 { 1477 int i; 1478 phys_addr_t ram_start, start; 1479 phys_size_t ram_size; 1480 u64 pages; 1481 1482 /* Add RAM */ 1483 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 1484 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY 1485 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1486 #error "This SoC shouldn't have DP DDR" 1487 #endif 1488 if (i == 2) 1489 continue; /* skip DP-DDR */ 1490 #endif 1491 ram_start = gd->bd->bi_dram[i].start; 1492 ram_size = gd->bd->bi_dram[i].size; 1493 #ifdef CONFIG_RESV_RAM 1494 if (gd->arch.resv_ram >= ram_start && 1495 gd->arch.resv_ram < ram_start + ram_size) 1496 ram_size = gd->arch.resv_ram - ram_start; 1497 #endif 1498 start = (ram_start + EFI_PAGE_MASK) & ~EFI_PAGE_MASK; 1499 pages = (ram_size + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; 1500 1501 efi_add_memory_map(start, pages, EFI_CONVENTIONAL_MEMORY, 1502 false); 1503 } 1504 } 1505 #endif 1506 1507 /* 1508 * Before DDR size is known, early MMU table have DDR mapped as device memory 1509 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory" 1510 * needs to be set for these mappings. 1511 * If a special case configures DDR with holes in the mapping, the holes need 1512 * to be marked as invalid. This is not implemented in this function. 1513 */ 1514 void update_early_mmu_table(void) 1515 { 1516 if (!gd->arch.tlb_addr) 1517 return; 1518 1519 if (gd->ram_size <= CONFIG_SYS_FSL_DRAM_SIZE1) { 1520 mmu_change_region_attr( 1521 CONFIG_SYS_SDRAM_BASE, 1522 gd->ram_size, 1523 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1524 PTE_BLOCK_OUTER_SHARE | 1525 PTE_BLOCK_NS | 1526 PTE_TYPE_VALID); 1527 } else { 1528 mmu_change_region_attr( 1529 CONFIG_SYS_SDRAM_BASE, 1530 CONFIG_SYS_DDR_BLOCK1_SIZE, 1531 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1532 PTE_BLOCK_OUTER_SHARE | 1533 PTE_BLOCK_NS | 1534 PTE_TYPE_VALID); 1535 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE 1536 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE 1537 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE" 1538 #endif 1539 if (gd->ram_size - CONFIG_SYS_DDR_BLOCK1_SIZE > 1540 CONFIG_SYS_DDR_BLOCK2_SIZE) { 1541 mmu_change_region_attr( 1542 CONFIG_SYS_DDR_BLOCK2_BASE, 1543 CONFIG_SYS_DDR_BLOCK2_SIZE, 1544 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1545 PTE_BLOCK_OUTER_SHARE | 1546 PTE_BLOCK_NS | 1547 PTE_TYPE_VALID); 1548 mmu_change_region_attr( 1549 CONFIG_SYS_DDR_BLOCK3_BASE, 1550 gd->ram_size - 1551 CONFIG_SYS_DDR_BLOCK1_SIZE - 1552 CONFIG_SYS_DDR_BLOCK2_SIZE, 1553 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1554 PTE_BLOCK_OUTER_SHARE | 1555 PTE_BLOCK_NS | 1556 PTE_TYPE_VALID); 1557 } else 1558 #endif 1559 { 1560 mmu_change_region_attr( 1561 CONFIG_SYS_DDR_BLOCK2_BASE, 1562 gd->ram_size - 1563 CONFIG_SYS_DDR_BLOCK1_SIZE, 1564 PTE_BLOCK_MEMTYPE(MT_NORMAL) | 1565 PTE_BLOCK_OUTER_SHARE | 1566 PTE_BLOCK_NS | 1567 PTE_TYPE_VALID); 1568 } 1569 } 1570 } 1571 1572 __weak int dram_init(void) 1573 { 1574 fsl_initdram(); 1575 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \ 1576 defined(CONFIG_SPL_BUILD) 1577 /* This will break-before-make MMU for DDR */ 1578 update_early_mmu_table(); 1579 #endif 1580 1581 return 0; 1582 } 1583