1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <linux/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl-mc/fsl_mc.h> 22 #ifdef CONFIG_FSL_ESDHC 23 #include <fsl_esdhc.h> 24 #endif 25 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT 26 #include <asm/armv8/sec_firmware.h> 27 #endif 28 29 DECLARE_GLOBAL_DATA_PTR; 30 31 struct mm_region *mem_map = early_map; 32 33 void cpu_name(char *name) 34 { 35 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 36 unsigned int i, svr, ver; 37 38 svr = gur_in32(&gur->svr); 39 ver = SVR_SOC_VER(svr); 40 41 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 42 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 43 strcpy(name, cpu_type_list[i].name); 44 45 if (IS_E_PROCESSOR(svr)) 46 strcat(name, "E"); 47 48 sprintf(name + strlen(name), " Rev%d.%d", 49 SVR_MAJ(svr), SVR_MIN(svr)); 50 break; 51 } 52 53 if (i == ARRAY_SIZE(cpu_type_list)) 54 strcpy(name, "unknown"); 55 } 56 57 #ifndef CONFIG_SYS_DCACHE_OFF 58 /* 59 * To start MMU before DDR is available, we create MMU table in SRAM. 60 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 61 * levels of translation tables here to cover 40-bit address space. 62 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 63 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 64 * Note, the debug print in cache_v8.c is not usable for debugging 65 * these early MMU tables because UART is not yet available. 66 */ 67 static inline void early_mmu_setup(void) 68 { 69 unsigned int el = current_el(); 70 71 /* global data is already setup, no allocation yet */ 72 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 73 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 74 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 75 76 /* Create early page tables */ 77 setup_pgtables(); 78 79 /* point TTBR to the new table */ 80 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 81 get_tcr(el, NULL, NULL) & 82 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 83 MEMORY_ATTRIBUTES); 84 85 set_sctlr(get_sctlr() | CR_M); 86 } 87 88 /* 89 * The final tables look similar to early tables, but different in detail. 90 * These tables are in DRAM. Sub tables are added to enable cache for 91 * QBMan and OCRAM. 92 * 93 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 94 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 95 */ 96 static inline void final_mmu_setup(void) 97 { 98 u64 tlb_addr_save = gd->arch.tlb_addr; 99 unsigned int el = current_el(); 100 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 101 int index; 102 #endif 103 104 mem_map = final_map; 105 106 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 107 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 108 if (el == 3) { 109 /* 110 * Only use gd->arch.secure_ram if the address is 111 * recalculated. Align to 4KB for MMU table. 112 */ 113 /* put page tables in secure ram */ 114 index = ARRAY_SIZE(final_map) - 2; 115 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 116 final_map[index].virt = gd->arch.secure_ram & ~0x3; 117 final_map[index].phys = final_map[index].virt; 118 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 119 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 120 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 121 tlb_addr_save = gd->arch.tlb_addr; 122 } else { 123 /* Use allocated (board_f.c) memory for TLB */ 124 tlb_addr_save = gd->arch.tlb_allocated; 125 gd->arch.tlb_addr = tlb_addr_save; 126 } 127 } 128 #endif 129 130 /* Reset the fill ptr */ 131 gd->arch.tlb_fillptr = tlb_addr_save; 132 133 /* Create normal system page tables */ 134 setup_pgtables(); 135 136 /* Create emergency page tables */ 137 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 138 gd->arch.tlb_emerg = gd->arch.tlb_addr; 139 setup_pgtables(); 140 gd->arch.tlb_addr = tlb_addr_save; 141 142 /* flush new MMU table */ 143 flush_dcache_range(gd->arch.tlb_addr, 144 gd->arch.tlb_addr + gd->arch.tlb_size); 145 146 /* point TTBR to the new table */ 147 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 148 MEMORY_ATTRIBUTES); 149 /* 150 * EL3 MMU is already enabled, just need to invalidate TLB to load the 151 * new table. The new table is compatible with the current table, if 152 * MMU somehow walks through the new table before invalidation TLB, 153 * it still works. So we don't need to turn off MMU here. 154 * When EL2 MMU table is created by calling this function, MMU needs 155 * to be enabled. 156 */ 157 set_sctlr(get_sctlr() | CR_M); 158 } 159 160 u64 get_page_table_size(void) 161 { 162 return 0x10000; 163 } 164 165 int arch_cpu_init(void) 166 { 167 icache_enable(); 168 __asm_invalidate_dcache_all(); 169 __asm_invalidate_tlb_all(); 170 early_mmu_setup(); 171 set_sctlr(get_sctlr() | CR_C); 172 return 0; 173 } 174 175 void mmu_setup(void) 176 { 177 final_mmu_setup(); 178 } 179 180 /* 181 * This function is called from common/board_r.c. 182 * It recreates MMU table in main memory. 183 */ 184 void enable_caches(void) 185 { 186 mmu_setup(); 187 __asm_invalidate_tlb_all(); 188 icache_enable(); 189 dcache_enable(); 190 } 191 #endif 192 193 static inline u32 initiator_type(u32 cluster, int init_id) 194 { 195 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 196 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 197 u32 type = 0; 198 199 type = gur_in32(&gur->tp_ityp[idx]); 200 if (type & TP_ITYP_AV) 201 return type; 202 203 return 0; 204 } 205 206 u32 cpu_pos_mask(void) 207 { 208 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 209 int i = 0; 210 u32 cluster, type, mask = 0; 211 212 do { 213 int j; 214 215 cluster = gur_in32(&gur->tp_cluster[i].lower); 216 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 217 type = initiator_type(cluster, j); 218 if (type && (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)) 219 mask |= 1 << (i * TP_INIT_PER_CLUSTER + j); 220 } 221 i++; 222 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 223 224 return mask; 225 } 226 227 u32 cpu_mask(void) 228 { 229 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 230 int i = 0, count = 0; 231 u32 cluster, type, mask = 0; 232 233 do { 234 int j; 235 236 cluster = gur_in32(&gur->tp_cluster[i].lower); 237 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 238 type = initiator_type(cluster, j); 239 if (type) { 240 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 241 mask |= 1 << count; 242 count++; 243 } 244 } 245 i++; 246 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 247 248 return mask; 249 } 250 251 /* 252 * Return the number of cores on this SOC. 253 */ 254 int cpu_numcores(void) 255 { 256 return hweight32(cpu_mask()); 257 } 258 259 int fsl_qoriq_core_to_cluster(unsigned int core) 260 { 261 struct ccsr_gur __iomem *gur = 262 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 263 int i = 0, count = 0; 264 u32 cluster; 265 266 do { 267 int j; 268 269 cluster = gur_in32(&gur->tp_cluster[i].lower); 270 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 271 if (initiator_type(cluster, j)) { 272 if (count == core) 273 return i; 274 count++; 275 } 276 } 277 i++; 278 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 279 280 return -1; /* cannot identify the cluster */ 281 } 282 283 u32 fsl_qoriq_core_to_type(unsigned int core) 284 { 285 struct ccsr_gur __iomem *gur = 286 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 287 int i = 0, count = 0; 288 u32 cluster, type; 289 290 do { 291 int j; 292 293 cluster = gur_in32(&gur->tp_cluster[i].lower); 294 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 295 type = initiator_type(cluster, j); 296 if (type) { 297 if (count == core) 298 return type; 299 count++; 300 } 301 } 302 i++; 303 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 304 305 return -1; /* cannot identify the cluster */ 306 } 307 308 uint get_svr(void) 309 { 310 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 311 312 return gur_in32(&gur->svr); 313 } 314 315 #ifdef CONFIG_DISPLAY_CPUINFO 316 int print_cpuinfo(void) 317 { 318 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 319 struct sys_info sysinfo; 320 char buf[32]; 321 unsigned int i, core; 322 u32 type, rcw, svr = gur_in32(&gur->svr); 323 324 puts("SoC: "); 325 326 cpu_name(buf); 327 printf(" %s (0x%x)\n", buf, svr); 328 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 329 get_sys_info(&sysinfo); 330 puts("Clock Configuration:"); 331 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 332 if (!(i % 3)) 333 puts("\n "); 334 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 335 printf("CPU%d(%s):%-4s MHz ", core, 336 type == TY_ITYP_VER_A7 ? "A7 " : 337 (type == TY_ITYP_VER_A53 ? "A53" : 338 (type == TY_ITYP_VER_A57 ? "A57" : 339 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 340 strmhz(buf, sysinfo.freq_processor[core])); 341 } 342 printf("\n Bus: %-4s MHz ", 343 strmhz(buf, sysinfo.freq_systembus)); 344 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 345 #ifdef CONFIG_SYS_DPAA_FMAN 346 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 347 #endif 348 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 349 if (soc_has_dp_ddr()) { 350 printf(" DP-DDR: %-4s MT/s", 351 strmhz(buf, sysinfo.freq_ddrbus2)); 352 } 353 #endif 354 puts("\n"); 355 356 /* 357 * Display the RCW, so that no one gets confused as to what RCW 358 * we're actually using for this boot. 359 */ 360 puts("Reset Configuration Word (RCW):"); 361 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 362 rcw = gur_in32(&gur->rcwsr[i]); 363 if ((i % 4) == 0) 364 printf("\n %08x:", i * 4); 365 printf(" %08x", rcw); 366 } 367 puts("\n"); 368 369 return 0; 370 } 371 #endif 372 373 #ifdef CONFIG_FSL_ESDHC 374 int cpu_mmc_init(bd_t *bis) 375 { 376 return fsl_esdhc_mmc_init(bis); 377 } 378 #endif 379 380 int cpu_eth_init(bd_t *bis) 381 { 382 int error = 0; 383 384 #ifdef CONFIG_FSL_MC_ENET 385 error = fsl_mc_ldpaa_init(bis); 386 #endif 387 #ifdef CONFIG_FMAN_ENET 388 fm_standard_init(bis); 389 #endif 390 return error; 391 } 392 393 int arch_early_init_r(void) 394 { 395 #ifdef CONFIG_MP 396 int rv = 1; 397 u32 psci_ver = 0xffffffff; 398 #endif 399 400 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 401 erratum_a009635(); 402 #endif 403 404 #ifdef CONFIG_MP 405 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI) 406 /* Check the psci version to determine if the psci is supported */ 407 psci_ver = sec_firmware_support_psci_version(); 408 #endif 409 if (psci_ver == 0xffffffff) { 410 rv = fsl_layerscape_wake_seconday_cores(); 411 if (rv) 412 printf("Did not wake secondary cores\n"); 413 } 414 #endif 415 416 #ifdef CONFIG_SYS_HAS_SERDES 417 fsl_serdes_init(); 418 #endif 419 #ifdef CONFIG_FMAN_ENET 420 fman_enet_init(); 421 #endif 422 return 0; 423 } 424 425 int timer_init(void) 426 { 427 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 428 #ifdef CONFIG_FSL_LSCH3 429 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 430 #endif 431 #ifdef CONFIG_LS2080A 432 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 433 #endif 434 #ifdef COUNTER_FREQUENCY_REAL 435 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 436 437 /* Update with accurate clock frequency */ 438 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 439 #endif 440 441 #ifdef CONFIG_FSL_LSCH3 442 /* Enable timebase for all clusters. 443 * It is safe to do so even some clusters are not enabled. 444 */ 445 out_le32(cltbenr, 0xf); 446 #endif 447 448 #ifdef CONFIG_LS2080A 449 /* 450 * In certain Layerscape SoCs, the clock for each core's 451 * has an enable bit in the PMU Physical Core Time Base Enable 452 * Register (PCTBENR), which allows the watchdog to operate. 453 */ 454 setbits_le32(pctbenr, 0xff); 455 #endif 456 457 /* Enable clock for timer 458 * This is a global setting. 459 */ 460 out_le32(cntcr, 0x1); 461 462 return 0; 463 } 464 465 void reset_cpu(ulong addr) 466 { 467 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 468 u32 val; 469 470 /* Raise RESET_REQ_B */ 471 val = scfg_in32(rstcr); 472 val |= 0x02; 473 scfg_out32(rstcr, val); 474 } 475 476 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 477 { 478 phys_size_t ram_top = ram_size; 479 480 #ifdef CONFIG_SYS_MEM_TOP_HIDE 481 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 482 #endif 483 484 /* Carve the MC private DRAM block from the end of DRAM */ 485 #ifdef CONFIG_FSL_MC_ENET 486 ram_top -= mc_get_dram_block_size(); 487 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 488 #endif 489 490 return ram_top; 491 } 492