1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <linux/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <efi_loader.h> 21 #include <fm_eth.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT 27 #include <asm/armv8/sec_firmware.h> 28 #endif 29 #ifdef CONFIG_SYS_FSL_DDR 30 #include <fsl_ddr.h> 31 #endif 32 33 DECLARE_GLOBAL_DATA_PTR; 34 35 struct mm_region *mem_map = early_map; 36 37 void cpu_name(char *name) 38 { 39 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 40 unsigned int i, svr, ver; 41 42 svr = gur_in32(&gur->svr); 43 ver = SVR_SOC_VER(svr); 44 45 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 46 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 47 strcpy(name, cpu_type_list[i].name); 48 49 if (IS_E_PROCESSOR(svr)) 50 strcat(name, "E"); 51 52 sprintf(name + strlen(name), " Rev%d.%d", 53 SVR_MAJ(svr), SVR_MIN(svr)); 54 break; 55 } 56 57 if (i == ARRAY_SIZE(cpu_type_list)) 58 strcpy(name, "unknown"); 59 } 60 61 #ifndef CONFIG_SYS_DCACHE_OFF 62 /* 63 * To start MMU before DDR is available, we create MMU table in SRAM. 64 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 65 * levels of translation tables here to cover 40-bit address space. 66 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 67 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 68 * Note, the debug print in cache_v8.c is not usable for debugging 69 * these early MMU tables because UART is not yet available. 70 */ 71 static inline void early_mmu_setup(void) 72 { 73 unsigned int el = current_el(); 74 75 /* global data is already setup, no allocation yet */ 76 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 77 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 78 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 79 80 /* Create early page tables */ 81 setup_pgtables(); 82 83 /* point TTBR to the new table */ 84 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 85 get_tcr(el, NULL, NULL) & 86 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 87 MEMORY_ATTRIBUTES); 88 89 set_sctlr(get_sctlr() | CR_M); 90 } 91 92 /* 93 * The final tables look similar to early tables, but different in detail. 94 * These tables are in DRAM. Sub tables are added to enable cache for 95 * QBMan and OCRAM. 96 * 97 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 98 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 99 */ 100 static inline void final_mmu_setup(void) 101 { 102 u64 tlb_addr_save = gd->arch.tlb_addr; 103 unsigned int el = current_el(); 104 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 105 int index; 106 #endif 107 108 mem_map = final_map; 109 110 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 111 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 112 if (el == 3) { 113 /* 114 * Only use gd->arch.secure_ram if the address is 115 * recalculated. Align to 4KB for MMU table. 116 */ 117 /* put page tables in secure ram */ 118 index = ARRAY_SIZE(final_map) - 2; 119 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 120 final_map[index].virt = gd->arch.secure_ram & ~0x3; 121 final_map[index].phys = final_map[index].virt; 122 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 123 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 124 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 125 tlb_addr_save = gd->arch.tlb_addr; 126 } else { 127 /* Use allocated (board_f.c) memory for TLB */ 128 tlb_addr_save = gd->arch.tlb_allocated; 129 gd->arch.tlb_addr = tlb_addr_save; 130 } 131 } 132 #endif 133 134 /* Reset the fill ptr */ 135 gd->arch.tlb_fillptr = tlb_addr_save; 136 137 /* Create normal system page tables */ 138 setup_pgtables(); 139 140 /* Create emergency page tables */ 141 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 142 gd->arch.tlb_emerg = gd->arch.tlb_addr; 143 setup_pgtables(); 144 gd->arch.tlb_addr = tlb_addr_save; 145 146 /* flush new MMU table */ 147 flush_dcache_range(gd->arch.tlb_addr, 148 gd->arch.tlb_addr + gd->arch.tlb_size); 149 150 /* point TTBR to the new table */ 151 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 152 MEMORY_ATTRIBUTES); 153 /* 154 * EL3 MMU is already enabled, just need to invalidate TLB to load the 155 * new table. The new table is compatible with the current table, if 156 * MMU somehow walks through the new table before invalidation TLB, 157 * it still works. So we don't need to turn off MMU here. 158 * When EL2 MMU table is created by calling this function, MMU needs 159 * to be enabled. 160 */ 161 set_sctlr(get_sctlr() | CR_M); 162 } 163 164 u64 get_page_table_size(void) 165 { 166 return 0x10000; 167 } 168 169 int arch_cpu_init(void) 170 { 171 icache_enable(); 172 __asm_invalidate_dcache_all(); 173 __asm_invalidate_tlb_all(); 174 early_mmu_setup(); 175 set_sctlr(get_sctlr() | CR_C); 176 return 0; 177 } 178 179 void mmu_setup(void) 180 { 181 final_mmu_setup(); 182 } 183 184 /* 185 * This function is called from common/board_r.c. 186 * It recreates MMU table in main memory. 187 */ 188 void enable_caches(void) 189 { 190 mmu_setup(); 191 __asm_invalidate_tlb_all(); 192 icache_enable(); 193 dcache_enable(); 194 } 195 #endif 196 197 u32 initiator_type(u32 cluster, int init_id) 198 { 199 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 200 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 201 u32 type = 0; 202 203 type = gur_in32(&gur->tp_ityp[idx]); 204 if (type & TP_ITYP_AV) 205 return type; 206 207 return 0; 208 } 209 210 u32 cpu_pos_mask(void) 211 { 212 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 213 int i = 0; 214 u32 cluster, type, mask = 0; 215 216 do { 217 int j; 218 219 cluster = gur_in32(&gur->tp_cluster[i].lower); 220 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 221 type = initiator_type(cluster, j); 222 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 223 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 224 } 225 i++; 226 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 227 228 return mask; 229 } 230 231 u32 cpu_mask(void) 232 { 233 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 234 int i = 0, count = 0; 235 u32 cluster, type, mask = 0; 236 237 do { 238 int j; 239 240 cluster = gur_in32(&gur->tp_cluster[i].lower); 241 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 242 type = initiator_type(cluster, j); 243 if (type) { 244 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 245 mask |= 1 << count; 246 count++; 247 } 248 } 249 i++; 250 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 251 252 return mask; 253 } 254 255 /* 256 * Return the number of cores on this SOC. 257 */ 258 int cpu_numcores(void) 259 { 260 return hweight32(cpu_mask()); 261 } 262 263 int fsl_qoriq_core_to_cluster(unsigned int core) 264 { 265 struct ccsr_gur __iomem *gur = 266 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 267 int i = 0, count = 0; 268 u32 cluster; 269 270 do { 271 int j; 272 273 cluster = gur_in32(&gur->tp_cluster[i].lower); 274 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 275 if (initiator_type(cluster, j)) { 276 if (count == core) 277 return i; 278 count++; 279 } 280 } 281 i++; 282 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 283 284 return -1; /* cannot identify the cluster */ 285 } 286 287 u32 fsl_qoriq_core_to_type(unsigned int core) 288 { 289 struct ccsr_gur __iomem *gur = 290 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 291 int i = 0, count = 0; 292 u32 cluster, type; 293 294 do { 295 int j; 296 297 cluster = gur_in32(&gur->tp_cluster[i].lower); 298 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 299 type = initiator_type(cluster, j); 300 if (type) { 301 if (count == core) 302 return type; 303 count++; 304 } 305 } 306 i++; 307 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 308 309 return -1; /* cannot identify the cluster */ 310 } 311 312 #ifndef CONFIG_FSL_LSCH3 313 uint get_svr(void) 314 { 315 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 316 317 return gur_in32(&gur->svr); 318 } 319 #endif 320 321 #ifdef CONFIG_DISPLAY_CPUINFO 322 int print_cpuinfo(void) 323 { 324 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 325 struct sys_info sysinfo; 326 char buf[32]; 327 unsigned int i, core; 328 u32 type, rcw, svr = gur_in32(&gur->svr); 329 330 puts("SoC: "); 331 332 cpu_name(buf); 333 printf(" %s (0x%x)\n", buf, svr); 334 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 335 get_sys_info(&sysinfo); 336 puts("Clock Configuration:"); 337 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 338 if (!(i % 3)) 339 puts("\n "); 340 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 341 printf("CPU%d(%s):%-4s MHz ", core, 342 type == TY_ITYP_VER_A7 ? "A7 " : 343 (type == TY_ITYP_VER_A53 ? "A53" : 344 (type == TY_ITYP_VER_A57 ? "A57" : 345 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 346 strmhz(buf, sysinfo.freq_processor[core])); 347 } 348 /* Display platform clock as Bus frequency. */ 349 printf("\n Bus: %-4s MHz ", 350 strmhz(buf, sysinfo.freq_systembus / CONFIG_SYS_FSL_PCLK_DIV)); 351 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 352 #ifdef CONFIG_SYS_DPAA_FMAN 353 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 354 #endif 355 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 356 if (soc_has_dp_ddr()) { 357 printf(" DP-DDR: %-4s MT/s", 358 strmhz(buf, sysinfo.freq_ddrbus2)); 359 } 360 #endif 361 puts("\n"); 362 363 /* 364 * Display the RCW, so that no one gets confused as to what RCW 365 * we're actually using for this boot. 366 */ 367 puts("Reset Configuration Word (RCW):"); 368 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 369 rcw = gur_in32(&gur->rcwsr[i]); 370 if ((i % 4) == 0) 371 printf("\n %08x:", i * 4); 372 printf(" %08x", rcw); 373 } 374 puts("\n"); 375 376 return 0; 377 } 378 #endif 379 380 #ifdef CONFIG_FSL_ESDHC 381 int cpu_mmc_init(bd_t *bis) 382 { 383 return fsl_esdhc_mmc_init(bis); 384 } 385 #endif 386 387 int cpu_eth_init(bd_t *bis) 388 { 389 int error = 0; 390 391 #ifdef CONFIG_FSL_MC_ENET 392 error = fsl_mc_ldpaa_init(bis); 393 #endif 394 #ifdef CONFIG_FMAN_ENET 395 fm_standard_init(bis); 396 #endif 397 return error; 398 } 399 400 int arch_early_init_r(void) 401 { 402 #ifdef CONFIG_MP 403 int rv = 1; 404 u32 psci_ver = 0xffffffff; 405 #endif 406 407 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 408 erratum_a009635(); 409 #endif 410 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR) 411 erratum_a009942_check_cpo(); 412 #endif 413 #ifdef CONFIG_MP 414 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && \ 415 defined(CONFIG_SEC_FIRMWARE_ARMV8_PSCI) 416 /* Check the psci version to determine if the psci is supported */ 417 psci_ver = sec_firmware_support_psci_version(); 418 #endif 419 if (psci_ver == 0xffffffff) { 420 rv = fsl_layerscape_wake_seconday_cores(); 421 if (rv) 422 printf("Did not wake secondary cores\n"); 423 } 424 #endif 425 426 #ifdef CONFIG_SYS_HAS_SERDES 427 fsl_serdes_init(); 428 #endif 429 #ifdef CONFIG_FMAN_ENET 430 fman_enet_init(); 431 #endif 432 return 0; 433 } 434 435 int timer_init(void) 436 { 437 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 438 #ifdef CONFIG_FSL_LSCH3 439 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 440 #endif 441 #ifdef CONFIG_LS2080A 442 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 443 u32 svr_dev_id; 444 #endif 445 #ifdef COUNTER_FREQUENCY_REAL 446 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 447 448 /* Update with accurate clock frequency */ 449 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 450 #endif 451 452 #ifdef CONFIG_FSL_LSCH3 453 /* Enable timebase for all clusters. 454 * It is safe to do so even some clusters are not enabled. 455 */ 456 out_le32(cltbenr, 0xf); 457 #endif 458 459 #ifdef CONFIG_LS2080A 460 /* 461 * In certain Layerscape SoCs, the clock for each core's 462 * has an enable bit in the PMU Physical Core Time Base Enable 463 * Register (PCTBENR), which allows the watchdog to operate. 464 */ 465 setbits_le32(pctbenr, 0xff); 466 /* 467 * For LS2080A SoC and its personalities, timer controller 468 * offset is different 469 */ 470 svr_dev_id = get_svr() >> 16; 471 if (svr_dev_id == SVR_DEV_LS2080A) 472 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 473 474 #endif 475 476 /* Enable clock for timer 477 * This is a global setting. 478 */ 479 out_le32(cntcr, 0x1); 480 481 return 0; 482 } 483 484 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 485 486 void __efi_runtime reset_cpu(ulong addr) 487 { 488 u32 val; 489 490 /* Raise RESET_REQ_B */ 491 val = scfg_in32(rstcr); 492 val |= 0x02; 493 scfg_out32(rstcr, val); 494 } 495 496 #ifdef CONFIG_EFI_LOADER 497 498 void __efi_runtime EFIAPI efi_reset_system( 499 enum efi_reset_type reset_type, 500 efi_status_t reset_status, 501 unsigned long data_size, void *reset_data) 502 { 503 switch (reset_type) { 504 case EFI_RESET_COLD: 505 case EFI_RESET_WARM: 506 reset_cpu(0); 507 break; 508 case EFI_RESET_SHUTDOWN: 509 /* Nothing we can do */ 510 break; 511 } 512 513 while (1) { } 514 } 515 516 void efi_reset_system_init(void) 517 { 518 efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 519 } 520 521 #endif 522 523 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 524 { 525 phys_size_t ram_top = ram_size; 526 527 #ifdef CONFIG_SYS_MEM_TOP_HIDE 528 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 529 #endif 530 531 /* Carve the MC private DRAM block from the end of DRAM */ 532 #ifdef CONFIG_FSL_MC_ENET 533 ram_top -= mc_get_dram_block_size(); 534 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 535 #endif 536 537 return ram_top; 538 } 539