1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <linux/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <efi_loader.h> 21 #include <fm_eth.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT 27 #include <asm/armv8/sec_firmware.h> 28 #endif 29 30 DECLARE_GLOBAL_DATA_PTR; 31 32 struct mm_region *mem_map = early_map; 33 34 void cpu_name(char *name) 35 { 36 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 37 unsigned int i, svr, ver; 38 39 svr = gur_in32(&gur->svr); 40 ver = SVR_SOC_VER(svr); 41 42 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 43 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 44 strcpy(name, cpu_type_list[i].name); 45 46 if (IS_E_PROCESSOR(svr)) 47 strcat(name, "E"); 48 49 sprintf(name + strlen(name), " Rev%d.%d", 50 SVR_MAJ(svr), SVR_MIN(svr)); 51 break; 52 } 53 54 if (i == ARRAY_SIZE(cpu_type_list)) 55 strcpy(name, "unknown"); 56 } 57 58 #ifndef CONFIG_SYS_DCACHE_OFF 59 /* 60 * To start MMU before DDR is available, we create MMU table in SRAM. 61 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 62 * levels of translation tables here to cover 40-bit address space. 63 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 64 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 65 * Note, the debug print in cache_v8.c is not usable for debugging 66 * these early MMU tables because UART is not yet available. 67 */ 68 static inline void early_mmu_setup(void) 69 { 70 unsigned int el = current_el(); 71 72 /* global data is already setup, no allocation yet */ 73 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 74 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 75 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 76 77 /* Create early page tables */ 78 setup_pgtables(); 79 80 /* point TTBR to the new table */ 81 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 82 get_tcr(el, NULL, NULL) & 83 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 84 MEMORY_ATTRIBUTES); 85 86 set_sctlr(get_sctlr() | CR_M); 87 } 88 89 /* 90 * The final tables look similar to early tables, but different in detail. 91 * These tables are in DRAM. Sub tables are added to enable cache for 92 * QBMan and OCRAM. 93 * 94 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 95 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 96 */ 97 static inline void final_mmu_setup(void) 98 { 99 u64 tlb_addr_save = gd->arch.tlb_addr; 100 unsigned int el = current_el(); 101 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 102 int index; 103 #endif 104 105 mem_map = final_map; 106 107 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 108 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 109 if (el == 3) { 110 /* 111 * Only use gd->arch.secure_ram if the address is 112 * recalculated. Align to 4KB for MMU table. 113 */ 114 /* put page tables in secure ram */ 115 index = ARRAY_SIZE(final_map) - 2; 116 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 117 final_map[index].virt = gd->arch.secure_ram & ~0x3; 118 final_map[index].phys = final_map[index].virt; 119 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 120 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 121 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 122 tlb_addr_save = gd->arch.tlb_addr; 123 } else { 124 /* Use allocated (board_f.c) memory for TLB */ 125 tlb_addr_save = gd->arch.tlb_allocated; 126 gd->arch.tlb_addr = tlb_addr_save; 127 } 128 } 129 #endif 130 131 /* Reset the fill ptr */ 132 gd->arch.tlb_fillptr = tlb_addr_save; 133 134 /* Create normal system page tables */ 135 setup_pgtables(); 136 137 /* Create emergency page tables */ 138 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 139 gd->arch.tlb_emerg = gd->arch.tlb_addr; 140 setup_pgtables(); 141 gd->arch.tlb_addr = tlb_addr_save; 142 143 /* flush new MMU table */ 144 flush_dcache_range(gd->arch.tlb_addr, 145 gd->arch.tlb_addr + gd->arch.tlb_size); 146 147 /* point TTBR to the new table */ 148 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 149 MEMORY_ATTRIBUTES); 150 /* 151 * EL3 MMU is already enabled, just need to invalidate TLB to load the 152 * new table. The new table is compatible with the current table, if 153 * MMU somehow walks through the new table before invalidation TLB, 154 * it still works. So we don't need to turn off MMU here. 155 * When EL2 MMU table is created by calling this function, MMU needs 156 * to be enabled. 157 */ 158 set_sctlr(get_sctlr() | CR_M); 159 } 160 161 u64 get_page_table_size(void) 162 { 163 return 0x10000; 164 } 165 166 int arch_cpu_init(void) 167 { 168 icache_enable(); 169 __asm_invalidate_dcache_all(); 170 __asm_invalidate_tlb_all(); 171 early_mmu_setup(); 172 set_sctlr(get_sctlr() | CR_C); 173 return 0; 174 } 175 176 void mmu_setup(void) 177 { 178 final_mmu_setup(); 179 } 180 181 /* 182 * This function is called from common/board_r.c. 183 * It recreates MMU table in main memory. 184 */ 185 void enable_caches(void) 186 { 187 mmu_setup(); 188 __asm_invalidate_tlb_all(); 189 icache_enable(); 190 dcache_enable(); 191 } 192 #endif 193 194 u32 initiator_type(u32 cluster, int init_id) 195 { 196 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 197 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 198 u32 type = 0; 199 200 type = gur_in32(&gur->tp_ityp[idx]); 201 if (type & TP_ITYP_AV) 202 return type; 203 204 return 0; 205 } 206 207 u32 cpu_pos_mask(void) 208 { 209 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 210 int i = 0; 211 u32 cluster, type, mask = 0; 212 213 do { 214 int j; 215 216 cluster = gur_in32(&gur->tp_cluster[i].lower); 217 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 218 type = initiator_type(cluster, j); 219 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 220 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 221 } 222 i++; 223 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 224 225 return mask; 226 } 227 228 u32 cpu_mask(void) 229 { 230 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 231 int i = 0, count = 0; 232 u32 cluster, type, mask = 0; 233 234 do { 235 int j; 236 237 cluster = gur_in32(&gur->tp_cluster[i].lower); 238 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 239 type = initiator_type(cluster, j); 240 if (type) { 241 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 242 mask |= 1 << count; 243 count++; 244 } 245 } 246 i++; 247 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 248 249 return mask; 250 } 251 252 /* 253 * Return the number of cores on this SOC. 254 */ 255 int cpu_numcores(void) 256 { 257 return hweight32(cpu_mask()); 258 } 259 260 int fsl_qoriq_core_to_cluster(unsigned int core) 261 { 262 struct ccsr_gur __iomem *gur = 263 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 264 int i = 0, count = 0; 265 u32 cluster; 266 267 do { 268 int j; 269 270 cluster = gur_in32(&gur->tp_cluster[i].lower); 271 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 272 if (initiator_type(cluster, j)) { 273 if (count == core) 274 return i; 275 count++; 276 } 277 } 278 i++; 279 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 280 281 return -1; /* cannot identify the cluster */ 282 } 283 284 u32 fsl_qoriq_core_to_type(unsigned int core) 285 { 286 struct ccsr_gur __iomem *gur = 287 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 288 int i = 0, count = 0; 289 u32 cluster, type; 290 291 do { 292 int j; 293 294 cluster = gur_in32(&gur->tp_cluster[i].lower); 295 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 296 type = initiator_type(cluster, j); 297 if (type) { 298 if (count == core) 299 return type; 300 count++; 301 } 302 } 303 i++; 304 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 305 306 return -1; /* cannot identify the cluster */ 307 } 308 309 #ifndef CONFIG_FSL_LSCH3 310 uint get_svr(void) 311 { 312 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 313 314 return gur_in32(&gur->svr); 315 } 316 #endif 317 318 #ifdef CONFIG_DISPLAY_CPUINFO 319 int print_cpuinfo(void) 320 { 321 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 322 struct sys_info sysinfo; 323 char buf[32]; 324 unsigned int i, core; 325 u32 type, rcw, svr = gur_in32(&gur->svr); 326 327 puts("SoC: "); 328 329 cpu_name(buf); 330 printf(" %s (0x%x)\n", buf, svr); 331 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 332 get_sys_info(&sysinfo); 333 puts("Clock Configuration:"); 334 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 335 if (!(i % 3)) 336 puts("\n "); 337 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 338 printf("CPU%d(%s):%-4s MHz ", core, 339 type == TY_ITYP_VER_A7 ? "A7 " : 340 (type == TY_ITYP_VER_A53 ? "A53" : 341 (type == TY_ITYP_VER_A57 ? "A57" : 342 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 343 strmhz(buf, sysinfo.freq_processor[core])); 344 } 345 printf("\n Bus: %-4s MHz ", 346 strmhz(buf, sysinfo.freq_systembus)); 347 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 348 #ifdef CONFIG_SYS_DPAA_FMAN 349 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 350 #endif 351 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 352 if (soc_has_dp_ddr()) { 353 printf(" DP-DDR: %-4s MT/s", 354 strmhz(buf, sysinfo.freq_ddrbus2)); 355 } 356 #endif 357 puts("\n"); 358 359 /* 360 * Display the RCW, so that no one gets confused as to what RCW 361 * we're actually using for this boot. 362 */ 363 puts("Reset Configuration Word (RCW):"); 364 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 365 rcw = gur_in32(&gur->rcwsr[i]); 366 if ((i % 4) == 0) 367 printf("\n %08x:", i * 4); 368 printf(" %08x", rcw); 369 } 370 puts("\n"); 371 372 return 0; 373 } 374 #endif 375 376 #ifdef CONFIG_FSL_ESDHC 377 int cpu_mmc_init(bd_t *bis) 378 { 379 return fsl_esdhc_mmc_init(bis); 380 } 381 #endif 382 383 int cpu_eth_init(bd_t *bis) 384 { 385 int error = 0; 386 387 #ifdef CONFIG_FSL_MC_ENET 388 error = fsl_mc_ldpaa_init(bis); 389 #endif 390 #ifdef CONFIG_FMAN_ENET 391 fm_standard_init(bis); 392 #endif 393 return error; 394 } 395 396 int arch_early_init_r(void) 397 { 398 #ifdef CONFIG_MP 399 int rv = 1; 400 u32 psci_ver = 0xffffffff; 401 #endif 402 403 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 404 erratum_a009635(); 405 #endif 406 407 #ifdef CONFIG_MP 408 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI) 409 /* Check the psci version to determine if the psci is supported */ 410 psci_ver = sec_firmware_support_psci_version(); 411 #endif 412 if (psci_ver == 0xffffffff) { 413 rv = fsl_layerscape_wake_seconday_cores(); 414 if (rv) 415 printf("Did not wake secondary cores\n"); 416 } 417 #endif 418 419 #ifdef CONFIG_SYS_HAS_SERDES 420 fsl_serdes_init(); 421 #endif 422 #ifdef CONFIG_FMAN_ENET 423 fman_enet_init(); 424 #endif 425 return 0; 426 } 427 428 int timer_init(void) 429 { 430 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 431 #ifdef CONFIG_FSL_LSCH3 432 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 433 #endif 434 #ifdef CONFIG_LS2080A 435 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 436 u32 svr_dev_id; 437 #endif 438 #ifdef COUNTER_FREQUENCY_REAL 439 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 440 441 /* Update with accurate clock frequency */ 442 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 443 #endif 444 445 #ifdef CONFIG_FSL_LSCH3 446 /* Enable timebase for all clusters. 447 * It is safe to do so even some clusters are not enabled. 448 */ 449 out_le32(cltbenr, 0xf); 450 #endif 451 452 #ifdef CONFIG_LS2080A 453 /* 454 * In certain Layerscape SoCs, the clock for each core's 455 * has an enable bit in the PMU Physical Core Time Base Enable 456 * Register (PCTBENR), which allows the watchdog to operate. 457 */ 458 setbits_le32(pctbenr, 0xff); 459 /* 460 * For LS2080A SoC and its personalities, timer controller 461 * offset is different 462 */ 463 svr_dev_id = get_svr() >> 16; 464 if (svr_dev_id == SVR_DEV_LS2080A) 465 cntcr = (u32 *)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR; 466 467 #endif 468 469 /* Enable clock for timer 470 * This is a global setting. 471 */ 472 out_le32(cntcr, 0x1); 473 474 return 0; 475 } 476 477 __efi_runtime_data u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 478 479 void __efi_runtime reset_cpu(ulong addr) 480 { 481 u32 val; 482 483 /* Raise RESET_REQ_B */ 484 val = scfg_in32(rstcr); 485 val |= 0x02; 486 scfg_out32(rstcr, val); 487 } 488 489 #ifdef CONFIG_EFI_LOADER 490 491 void __efi_runtime EFIAPI efi_reset_system( 492 enum efi_reset_type reset_type, 493 efi_status_t reset_status, 494 unsigned long data_size, void *reset_data) 495 { 496 switch (reset_type) { 497 case EFI_RESET_COLD: 498 case EFI_RESET_WARM: 499 reset_cpu(0); 500 break; 501 case EFI_RESET_SHUTDOWN: 502 /* Nothing we can do */ 503 break; 504 } 505 506 while (1) { } 507 } 508 509 void efi_reset_system_init(void) 510 { 511 efi_add_runtime_mmio(&rstcr, sizeof(*rstcr)); 512 } 513 514 #endif 515 516 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 517 { 518 phys_size_t ram_top = ram_size; 519 520 #ifdef CONFIG_SYS_MEM_TOP_HIDE 521 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 522 #endif 523 524 /* Carve the MC private DRAM block from the end of DRAM */ 525 #ifdef CONFIG_FSL_MC_ENET 526 ram_top -= mc_get_dram_block_size(); 527 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 528 #endif 529 530 return ram_top; 531 } 532