1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl_debug_server.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT 27 #include <asm/armv8/sec_firmware.h> 28 #endif 29 30 DECLARE_GLOBAL_DATA_PTR; 31 32 struct mm_region *mem_map = early_map; 33 34 void cpu_name(char *name) 35 { 36 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 37 unsigned int i, svr, ver; 38 39 svr = gur_in32(&gur->svr); 40 ver = SVR_SOC_VER(svr); 41 42 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 43 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 44 strcpy(name, cpu_type_list[i].name); 45 46 if (IS_E_PROCESSOR(svr)) 47 strcat(name, "E"); 48 break; 49 } 50 51 if (i == ARRAY_SIZE(cpu_type_list)) 52 strcpy(name, "unknown"); 53 } 54 55 #ifndef CONFIG_SYS_DCACHE_OFF 56 /* 57 * To start MMU before DDR is available, we create MMU table in SRAM. 58 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 59 * levels of translation tables here to cover 40-bit address space. 60 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 61 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 62 * Note, the debug print in cache_v8.c is not usable for debugging 63 * these early MMU tables because UART is not yet available. 64 */ 65 static inline void early_mmu_setup(void) 66 { 67 unsigned int el = current_el(); 68 69 /* global data is already setup, no allocation yet */ 70 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 71 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 72 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 73 74 /* Create early page tables */ 75 setup_pgtables(); 76 77 /* point TTBR to the new table */ 78 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 79 get_tcr(el, NULL, NULL) & 80 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 81 MEMORY_ATTRIBUTES); 82 83 set_sctlr(get_sctlr() | CR_M); 84 } 85 86 /* 87 * The final tables look similar to early tables, but different in detail. 88 * These tables are in DRAM. Sub tables are added to enable cache for 89 * QBMan and OCRAM. 90 * 91 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 92 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 93 */ 94 static inline void final_mmu_setup(void) 95 { 96 u64 tlb_addr_save = gd->arch.tlb_addr; 97 unsigned int el = current_el(); 98 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 99 int index; 100 #endif 101 102 mem_map = final_map; 103 104 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 105 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 106 if (el == 3) { 107 /* 108 * Only use gd->arch.secure_ram if the address is 109 * recalculated. Align to 4KB for MMU table. 110 */ 111 /* put page tables in secure ram */ 112 index = ARRAY_SIZE(final_map) - 2; 113 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 114 final_map[index].virt = gd->arch.secure_ram & ~0x3; 115 final_map[index].phys = final_map[index].virt; 116 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 117 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 118 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 119 tlb_addr_save = gd->arch.tlb_addr; 120 } else { 121 /* Use allocated (board_f.c) memory for TLB */ 122 tlb_addr_save = gd->arch.tlb_allocated; 123 gd->arch.tlb_addr = tlb_addr_save; 124 } 125 } 126 #endif 127 128 /* Reset the fill ptr */ 129 gd->arch.tlb_fillptr = tlb_addr_save; 130 131 /* Create normal system page tables */ 132 setup_pgtables(); 133 134 /* Create emergency page tables */ 135 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 136 gd->arch.tlb_emerg = gd->arch.tlb_addr; 137 setup_pgtables(); 138 gd->arch.tlb_addr = tlb_addr_save; 139 140 /* flush new MMU table */ 141 flush_dcache_range(gd->arch.tlb_addr, 142 gd->arch.tlb_addr + gd->arch.tlb_size); 143 144 /* point TTBR to the new table */ 145 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 146 MEMORY_ATTRIBUTES); 147 /* 148 * EL3 MMU is already enabled, just need to invalidate TLB to load the 149 * new table. The new table is compatible with the current table, if 150 * MMU somehow walks through the new table before invalidation TLB, 151 * it still works. So we don't need to turn off MMU here. 152 * When EL2 MMU table is created by calling this function, MMU needs 153 * to be enabled. 154 */ 155 set_sctlr(get_sctlr() | CR_M); 156 } 157 158 u64 get_page_table_size(void) 159 { 160 return 0x10000; 161 } 162 163 int arch_cpu_init(void) 164 { 165 icache_enable(); 166 __asm_invalidate_dcache_all(); 167 __asm_invalidate_tlb_all(); 168 early_mmu_setup(); 169 set_sctlr(get_sctlr() | CR_C); 170 return 0; 171 } 172 173 void mmu_setup(void) 174 { 175 final_mmu_setup(); 176 } 177 178 /* 179 * This function is called from common/board_r.c. 180 * It recreates MMU table in main memory. 181 */ 182 void enable_caches(void) 183 { 184 mmu_setup(); 185 __asm_invalidate_tlb_all(); 186 icache_enable(); 187 dcache_enable(); 188 } 189 #endif 190 191 static inline u32 initiator_type(u32 cluster, int init_id) 192 { 193 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 194 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 195 u32 type = 0; 196 197 type = gur_in32(&gur->tp_ityp[idx]); 198 if (type & TP_ITYP_AV) 199 return type; 200 201 return 0; 202 } 203 204 u32 cpu_mask(void) 205 { 206 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 207 int i = 0, count = 0; 208 u32 cluster, type, mask = 0; 209 210 do { 211 int j; 212 213 cluster = gur_in32(&gur->tp_cluster[i].lower); 214 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 215 type = initiator_type(cluster, j); 216 if (type) { 217 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 218 mask |= 1 << count; 219 count++; 220 } 221 } 222 i++; 223 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 224 225 return mask; 226 } 227 228 /* 229 * Return the number of cores on this SOC. 230 */ 231 int cpu_numcores(void) 232 { 233 return hweight32(cpu_mask()); 234 } 235 236 int fsl_qoriq_core_to_cluster(unsigned int core) 237 { 238 struct ccsr_gur __iomem *gur = 239 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 240 int i = 0, count = 0; 241 u32 cluster; 242 243 do { 244 int j; 245 246 cluster = gur_in32(&gur->tp_cluster[i].lower); 247 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 248 if (initiator_type(cluster, j)) { 249 if (count == core) 250 return i; 251 count++; 252 } 253 } 254 i++; 255 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 256 257 return -1; /* cannot identify the cluster */ 258 } 259 260 u32 fsl_qoriq_core_to_type(unsigned int core) 261 { 262 struct ccsr_gur __iomem *gur = 263 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 264 int i = 0, count = 0; 265 u32 cluster, type; 266 267 do { 268 int j; 269 270 cluster = gur_in32(&gur->tp_cluster[i].lower); 271 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 272 type = initiator_type(cluster, j); 273 if (type) { 274 if (count == core) 275 return type; 276 count++; 277 } 278 } 279 i++; 280 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 281 282 return -1; /* cannot identify the cluster */ 283 } 284 285 uint get_svr(void) 286 { 287 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 288 289 return gur_in32(&gur->svr); 290 } 291 292 #ifdef CONFIG_DISPLAY_CPUINFO 293 int print_cpuinfo(void) 294 { 295 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 296 struct sys_info sysinfo; 297 char buf[32]; 298 unsigned int i, core; 299 u32 type, rcw, svr = gur_in32(&gur->svr); 300 301 puts("SoC: "); 302 303 cpu_name(buf); 304 printf(" %s (0x%x)\n", buf, svr); 305 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 306 get_sys_info(&sysinfo); 307 puts("Clock Configuration:"); 308 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 309 if (!(i % 3)) 310 puts("\n "); 311 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 312 printf("CPU%d(%s):%-4s MHz ", core, 313 type == TY_ITYP_VER_A7 ? "A7 " : 314 (type == TY_ITYP_VER_A53 ? "A53" : 315 (type == TY_ITYP_VER_A57 ? "A57" : 316 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 317 strmhz(buf, sysinfo.freq_processor[core])); 318 } 319 printf("\n Bus: %-4s MHz ", 320 strmhz(buf, sysinfo.freq_systembus)); 321 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 322 #ifdef CONFIG_SYS_DPAA_FMAN 323 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 324 #endif 325 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 326 if (soc_has_dp_ddr()) { 327 printf(" DP-DDR: %-4s MT/s", 328 strmhz(buf, sysinfo.freq_ddrbus2)); 329 } 330 #endif 331 puts("\n"); 332 333 /* 334 * Display the RCW, so that no one gets confused as to what RCW 335 * we're actually using for this boot. 336 */ 337 puts("Reset Configuration Word (RCW):"); 338 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 339 rcw = gur_in32(&gur->rcwsr[i]); 340 if ((i % 4) == 0) 341 printf("\n %08x:", i * 4); 342 printf(" %08x", rcw); 343 } 344 puts("\n"); 345 346 return 0; 347 } 348 #endif 349 350 #ifdef CONFIG_FSL_ESDHC 351 int cpu_mmc_init(bd_t *bis) 352 { 353 return fsl_esdhc_mmc_init(bis); 354 } 355 #endif 356 357 int cpu_eth_init(bd_t *bis) 358 { 359 int error = 0; 360 361 #ifdef CONFIG_FSL_MC_ENET 362 error = fsl_mc_ldpaa_init(bis); 363 #endif 364 #ifdef CONFIG_FMAN_ENET 365 fm_standard_init(bis); 366 #endif 367 return error; 368 } 369 370 int arch_early_init_r(void) 371 { 372 #ifdef CONFIG_MP 373 int rv = 1; 374 u32 psci_ver = 0xffffffff; 375 #endif 376 377 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 378 erratum_a009635(); 379 #endif 380 381 #ifdef CONFIG_MP 382 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI) 383 /* Check the psci version to determine if the psci is supported */ 384 psci_ver = sec_firmware_support_psci_version(); 385 #endif 386 if (psci_ver == 0xffffffff) { 387 rv = fsl_layerscape_wake_seconday_cores(); 388 if (rv) 389 printf("Did not wake secondary cores\n"); 390 } 391 #endif 392 393 #ifdef CONFIG_SYS_HAS_SERDES 394 fsl_serdes_init(); 395 #endif 396 #ifdef CONFIG_FMAN_ENET 397 fman_enet_init(); 398 #endif 399 return 0; 400 } 401 402 int timer_init(void) 403 { 404 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 405 #ifdef CONFIG_FSL_LSCH3 406 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 407 #endif 408 #ifdef CONFIG_LS2080A 409 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 410 #endif 411 #ifdef COUNTER_FREQUENCY_REAL 412 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 413 414 /* Update with accurate clock frequency */ 415 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 416 #endif 417 418 #ifdef CONFIG_FSL_LSCH3 419 /* Enable timebase for all clusters. 420 * It is safe to do so even some clusters are not enabled. 421 */ 422 out_le32(cltbenr, 0xf); 423 #endif 424 425 #ifdef CONFIG_LS2080A 426 /* 427 * In certain Layerscape SoCs, the clock for each core's 428 * has an enable bit in the PMU Physical Core Time Base Enable 429 * Register (PCTBENR), which allows the watchdog to operate. 430 */ 431 setbits_le32(pctbenr, 0xff); 432 #endif 433 434 /* Enable clock for timer 435 * This is a global setting. 436 */ 437 out_le32(cntcr, 0x1); 438 439 return 0; 440 } 441 442 void reset_cpu(ulong addr) 443 { 444 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 445 u32 val; 446 447 /* Raise RESET_REQ_B */ 448 val = scfg_in32(rstcr); 449 val |= 0x02; 450 scfg_out32(rstcr, val); 451 } 452 453 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 454 { 455 phys_size_t ram_top = ram_size; 456 457 #ifdef CONFIG_SYS_MEM_TOP_HIDE 458 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 459 #endif 460 /* Carve the Debug Server private DRAM block from the end of DRAM */ 461 #ifdef CONFIG_FSL_DEBUG_SERVER 462 ram_top -= debug_server_get_dram_block_size(); 463 #endif 464 465 /* Carve the MC private DRAM block from the end of DRAM */ 466 #ifdef CONFIG_FSL_MC_ENET 467 ram_top -= mc_get_dram_block_size(); 468 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 469 #endif 470 471 return ram_top; 472 } 473