1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <linux/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl-mc/fsl_mc.h> 22 #ifdef CONFIG_FSL_ESDHC 23 #include <fsl_esdhc.h> 24 #endif 25 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT 26 #include <asm/armv8/sec_firmware.h> 27 #endif 28 29 DECLARE_GLOBAL_DATA_PTR; 30 31 struct mm_region *mem_map = early_map; 32 33 void cpu_name(char *name) 34 { 35 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 36 unsigned int i, svr, ver; 37 38 svr = gur_in32(&gur->svr); 39 ver = SVR_SOC_VER(svr); 40 41 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 42 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 43 strcpy(name, cpu_type_list[i].name); 44 45 if (IS_E_PROCESSOR(svr)) 46 strcat(name, "E"); 47 break; 48 } 49 50 if (i == ARRAY_SIZE(cpu_type_list)) 51 strcpy(name, "unknown"); 52 } 53 54 #ifndef CONFIG_SYS_DCACHE_OFF 55 /* 56 * To start MMU before DDR is available, we create MMU table in SRAM. 57 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 58 * levels of translation tables here to cover 40-bit address space. 59 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 60 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose. 61 * Note, the debug print in cache_v8.c is not usable for debugging 62 * these early MMU tables because UART is not yet available. 63 */ 64 static inline void early_mmu_setup(void) 65 { 66 unsigned int el = current_el(); 67 68 /* global data is already setup, no allocation yet */ 69 gd->arch.tlb_addr = CONFIG_SYS_FSL_OCRAM_BASE; 70 gd->arch.tlb_fillptr = gd->arch.tlb_addr; 71 gd->arch.tlb_size = EARLY_PGTABLE_SIZE; 72 73 /* Create early page tables */ 74 setup_pgtables(); 75 76 /* point TTBR to the new table */ 77 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, 78 get_tcr(el, NULL, NULL) & 79 ~(TCR_ORGN_MASK | TCR_IRGN_MASK), 80 MEMORY_ATTRIBUTES); 81 82 set_sctlr(get_sctlr() | CR_M); 83 } 84 85 /* 86 * The final tables look similar to early tables, but different in detail. 87 * These tables are in DRAM. Sub tables are added to enable cache for 88 * QBMan and OCRAM. 89 * 90 * Put the MMU table in secure memory if gd->arch.secure_ram is valid. 91 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0. 92 */ 93 static inline void final_mmu_setup(void) 94 { 95 u64 tlb_addr_save = gd->arch.tlb_addr; 96 unsigned int el = current_el(); 97 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 98 int index; 99 #endif 100 101 mem_map = final_map; 102 103 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 104 if (gd->arch.secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 105 if (el == 3) { 106 /* 107 * Only use gd->arch.secure_ram if the address is 108 * recalculated. Align to 4KB for MMU table. 109 */ 110 /* put page tables in secure ram */ 111 index = ARRAY_SIZE(final_map) - 2; 112 gd->arch.tlb_addr = gd->arch.secure_ram & ~0xfff; 113 final_map[index].virt = gd->arch.secure_ram & ~0x3; 114 final_map[index].phys = final_map[index].virt; 115 final_map[index].size = CONFIG_SYS_MEM_RESERVE_SECURE; 116 final_map[index].attrs = PTE_BLOCK_OUTER_SHARE; 117 gd->arch.secure_ram |= MEM_RESERVE_SECURE_SECURED; 118 tlb_addr_save = gd->arch.tlb_addr; 119 } else { 120 /* Use allocated (board_f.c) memory for TLB */ 121 tlb_addr_save = gd->arch.tlb_allocated; 122 gd->arch.tlb_addr = tlb_addr_save; 123 } 124 } 125 #endif 126 127 /* Reset the fill ptr */ 128 gd->arch.tlb_fillptr = tlb_addr_save; 129 130 /* Create normal system page tables */ 131 setup_pgtables(); 132 133 /* Create emergency page tables */ 134 gd->arch.tlb_addr = gd->arch.tlb_fillptr; 135 gd->arch.tlb_emerg = gd->arch.tlb_addr; 136 setup_pgtables(); 137 gd->arch.tlb_addr = tlb_addr_save; 138 139 /* flush new MMU table */ 140 flush_dcache_range(gd->arch.tlb_addr, 141 gd->arch.tlb_addr + gd->arch.tlb_size); 142 143 /* point TTBR to the new table */ 144 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL), 145 MEMORY_ATTRIBUTES); 146 /* 147 * EL3 MMU is already enabled, just need to invalidate TLB to load the 148 * new table. The new table is compatible with the current table, if 149 * MMU somehow walks through the new table before invalidation TLB, 150 * it still works. So we don't need to turn off MMU here. 151 * When EL2 MMU table is created by calling this function, MMU needs 152 * to be enabled. 153 */ 154 set_sctlr(get_sctlr() | CR_M); 155 } 156 157 u64 get_page_table_size(void) 158 { 159 return 0x10000; 160 } 161 162 int arch_cpu_init(void) 163 { 164 icache_enable(); 165 __asm_invalidate_dcache_all(); 166 __asm_invalidate_tlb_all(); 167 early_mmu_setup(); 168 set_sctlr(get_sctlr() | CR_C); 169 return 0; 170 } 171 172 void mmu_setup(void) 173 { 174 final_mmu_setup(); 175 } 176 177 /* 178 * This function is called from common/board_r.c. 179 * It recreates MMU table in main memory. 180 */ 181 void enable_caches(void) 182 { 183 mmu_setup(); 184 __asm_invalidate_tlb_all(); 185 icache_enable(); 186 dcache_enable(); 187 } 188 #endif 189 190 static inline u32 initiator_type(u32 cluster, int init_id) 191 { 192 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 193 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 194 u32 type = 0; 195 196 type = gur_in32(&gur->tp_ityp[idx]); 197 if (type & TP_ITYP_AV) 198 return type; 199 200 return 0; 201 } 202 203 u32 cpu_mask(void) 204 { 205 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 206 int i = 0, count = 0; 207 u32 cluster, type, mask = 0; 208 209 do { 210 int j; 211 212 cluster = gur_in32(&gur->tp_cluster[i].lower); 213 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 214 type = initiator_type(cluster, j); 215 if (type) { 216 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 217 mask |= 1 << count; 218 count++; 219 } 220 } 221 i++; 222 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 223 224 return mask; 225 } 226 227 /* 228 * Return the number of cores on this SOC. 229 */ 230 int cpu_numcores(void) 231 { 232 return hweight32(cpu_mask()); 233 } 234 235 int fsl_qoriq_core_to_cluster(unsigned int core) 236 { 237 struct ccsr_gur __iomem *gur = 238 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 239 int i = 0, count = 0; 240 u32 cluster; 241 242 do { 243 int j; 244 245 cluster = gur_in32(&gur->tp_cluster[i].lower); 246 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 247 if (initiator_type(cluster, j)) { 248 if (count == core) 249 return i; 250 count++; 251 } 252 } 253 i++; 254 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 255 256 return -1; /* cannot identify the cluster */ 257 } 258 259 u32 fsl_qoriq_core_to_type(unsigned int core) 260 { 261 struct ccsr_gur __iomem *gur = 262 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 263 int i = 0, count = 0; 264 u32 cluster, type; 265 266 do { 267 int j; 268 269 cluster = gur_in32(&gur->tp_cluster[i].lower); 270 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 271 type = initiator_type(cluster, j); 272 if (type) { 273 if (count == core) 274 return type; 275 count++; 276 } 277 } 278 i++; 279 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 280 281 return -1; /* cannot identify the cluster */ 282 } 283 284 uint get_svr(void) 285 { 286 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 287 288 return gur_in32(&gur->svr); 289 } 290 291 #ifdef CONFIG_DISPLAY_CPUINFO 292 int print_cpuinfo(void) 293 { 294 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 295 struct sys_info sysinfo; 296 char buf[32]; 297 unsigned int i, core; 298 u32 type, rcw, svr = gur_in32(&gur->svr); 299 300 puts("SoC: "); 301 302 cpu_name(buf); 303 printf(" %s (0x%x)\n", buf, svr); 304 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 305 get_sys_info(&sysinfo); 306 puts("Clock Configuration:"); 307 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 308 if (!(i % 3)) 309 puts("\n "); 310 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 311 printf("CPU%d(%s):%-4s MHz ", core, 312 type == TY_ITYP_VER_A7 ? "A7 " : 313 (type == TY_ITYP_VER_A53 ? "A53" : 314 (type == TY_ITYP_VER_A57 ? "A57" : 315 (type == TY_ITYP_VER_A72 ? "A72" : " "))), 316 strmhz(buf, sysinfo.freq_processor[core])); 317 } 318 printf("\n Bus: %-4s MHz ", 319 strmhz(buf, sysinfo.freq_systembus)); 320 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 321 #ifdef CONFIG_SYS_DPAA_FMAN 322 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 323 #endif 324 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 325 if (soc_has_dp_ddr()) { 326 printf(" DP-DDR: %-4s MT/s", 327 strmhz(buf, sysinfo.freq_ddrbus2)); 328 } 329 #endif 330 puts("\n"); 331 332 /* 333 * Display the RCW, so that no one gets confused as to what RCW 334 * we're actually using for this boot. 335 */ 336 puts("Reset Configuration Word (RCW):"); 337 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 338 rcw = gur_in32(&gur->rcwsr[i]); 339 if ((i % 4) == 0) 340 printf("\n %08x:", i * 4); 341 printf(" %08x", rcw); 342 } 343 puts("\n"); 344 345 return 0; 346 } 347 #endif 348 349 #ifdef CONFIG_FSL_ESDHC 350 int cpu_mmc_init(bd_t *bis) 351 { 352 return fsl_esdhc_mmc_init(bis); 353 } 354 #endif 355 356 int cpu_eth_init(bd_t *bis) 357 { 358 int error = 0; 359 360 #ifdef CONFIG_FSL_MC_ENET 361 error = fsl_mc_ldpaa_init(bis); 362 #endif 363 #ifdef CONFIG_FMAN_ENET 364 fm_standard_init(bis); 365 #endif 366 return error; 367 } 368 369 int arch_early_init_r(void) 370 { 371 #ifdef CONFIG_MP 372 int rv = 1; 373 u32 psci_ver = 0xffffffff; 374 #endif 375 376 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 377 erratum_a009635(); 378 #endif 379 380 #ifdef CONFIG_MP 381 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI) 382 /* Check the psci version to determine if the psci is supported */ 383 psci_ver = sec_firmware_support_psci_version(); 384 #endif 385 if (psci_ver == 0xffffffff) { 386 rv = fsl_layerscape_wake_seconday_cores(); 387 if (rv) 388 printf("Did not wake secondary cores\n"); 389 } 390 #endif 391 392 #ifdef CONFIG_SYS_HAS_SERDES 393 fsl_serdes_init(); 394 #endif 395 #ifdef CONFIG_FMAN_ENET 396 fman_enet_init(); 397 #endif 398 return 0; 399 } 400 401 int timer_init(void) 402 { 403 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 404 #ifdef CONFIG_FSL_LSCH3 405 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 406 #endif 407 #ifdef CONFIG_LS2080A 408 u32 __iomem *pctbenr = (u32 *)FSL_PMU_PCTBENR_OFFSET; 409 #endif 410 #ifdef COUNTER_FREQUENCY_REAL 411 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 412 413 /* Update with accurate clock frequency */ 414 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 415 #endif 416 417 #ifdef CONFIG_FSL_LSCH3 418 /* Enable timebase for all clusters. 419 * It is safe to do so even some clusters are not enabled. 420 */ 421 out_le32(cltbenr, 0xf); 422 #endif 423 424 #ifdef CONFIG_LS2080A 425 /* 426 * In certain Layerscape SoCs, the clock for each core's 427 * has an enable bit in the PMU Physical Core Time Base Enable 428 * Register (PCTBENR), which allows the watchdog to operate. 429 */ 430 setbits_le32(pctbenr, 0xff); 431 #endif 432 433 /* Enable clock for timer 434 * This is a global setting. 435 */ 436 out_le32(cntcr, 0x1); 437 438 return 0; 439 } 440 441 void reset_cpu(ulong addr) 442 { 443 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 444 u32 val; 445 446 /* Raise RESET_REQ_B */ 447 val = scfg_in32(rstcr); 448 val |= 0x02; 449 scfg_out32(rstcr, val); 450 } 451 452 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 453 { 454 phys_size_t ram_top = ram_size; 455 456 #ifdef CONFIG_SYS_MEM_TOP_HIDE 457 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 458 #endif 459 460 /* Carve the MC private DRAM block from the end of DRAM */ 461 #ifdef CONFIG_FSL_MC_ENET 462 ram_top -= mc_get_dram_block_size(); 463 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 464 #endif 465 466 return ram_top; 467 } 468