1 /* 2 * Copyright 2014-2015 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/io.h> 9 #include <asm/errno.h> 10 #include <asm/system.h> 11 #include <asm/armv8/mmu.h> 12 #include <asm/io.h> 13 #include <asm/arch/fsl_serdes.h> 14 #include <asm/arch/soc.h> 15 #include <asm/arch/cpu.h> 16 #include <asm/arch/speed.h> 17 #ifdef CONFIG_MP 18 #include <asm/arch/mp.h> 19 #endif 20 #include <fm_eth.h> 21 #include <fsl_debug_server.h> 22 #include <fsl-mc/fsl_mc.h> 23 #ifdef CONFIG_FSL_ESDHC 24 #include <fsl_esdhc.h> 25 #endif 26 27 DECLARE_GLOBAL_DATA_PTR; 28 29 void cpu_name(char *name) 30 { 31 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 32 unsigned int i, svr, ver; 33 34 svr = gur_in32(&gur->svr); 35 ver = SVR_SOC_VER(svr); 36 37 for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) 38 if ((cpu_type_list[i].soc_ver & SVR_WO_E) == ver) { 39 strcpy(name, cpu_type_list[i].name); 40 41 if (IS_E_PROCESSOR(svr)) 42 strcat(name, "E"); 43 break; 44 } 45 46 if (i == ARRAY_SIZE(cpu_type_list)) 47 strcpy(name, "unknown"); 48 } 49 50 #ifndef CONFIG_SYS_DCACHE_OFF 51 /* 52 * Set the block entries according to the information of the table. 53 */ 54 static int set_block_entry(const struct sys_mmu_table *list, 55 struct table_info *table) 56 { 57 u64 block_size = 0, block_shift = 0; 58 u64 block_addr, index; 59 int j; 60 61 if (table->entry_size == BLOCK_SIZE_L1) { 62 block_size = BLOCK_SIZE_L1; 63 block_shift = SECTION_SHIFT_L1; 64 } else if (table->entry_size == BLOCK_SIZE_L2) { 65 block_size = BLOCK_SIZE_L2; 66 block_shift = SECTION_SHIFT_L2; 67 } else { 68 return -EINVAL; 69 } 70 71 block_addr = list->phys_addr; 72 index = (list->virt_addr - table->table_base) >> block_shift; 73 74 for (j = 0; j < (list->size >> block_shift); j++) { 75 set_pgtable_section(table->ptr, 76 index, 77 block_addr, 78 list->memory_type, 79 list->attribute); 80 block_addr += block_size; 81 index++; 82 } 83 84 return 0; 85 } 86 87 /* 88 * Find the corresponding table entry for the list. 89 */ 90 static int find_table(const struct sys_mmu_table *list, 91 struct table_info *table, u64 *level0_table) 92 { 93 u64 index = 0, level = 0; 94 u64 *level_table = level0_table; 95 u64 temp_base = 0, block_size = 0, block_shift = 0; 96 97 while (level < 3) { 98 if (level == 0) { 99 block_size = BLOCK_SIZE_L0; 100 block_shift = SECTION_SHIFT_L0; 101 } else if (level == 1) { 102 block_size = BLOCK_SIZE_L1; 103 block_shift = SECTION_SHIFT_L1; 104 } else if (level == 2) { 105 block_size = BLOCK_SIZE_L2; 106 block_shift = SECTION_SHIFT_L2; 107 } 108 109 index = 0; 110 while (list->virt_addr >= temp_base) { 111 index++; 112 temp_base += block_size; 113 } 114 115 temp_base -= block_size; 116 117 if ((level_table[index - 1] & PMD_TYPE_MASK) == 118 PMD_TYPE_TABLE) { 119 level_table = (u64 *)(level_table[index - 1] & 120 ~PMD_TYPE_MASK); 121 level++; 122 continue; 123 } else { 124 if (level == 0) 125 return -EINVAL; 126 127 if ((list->phys_addr + list->size) > 128 (temp_base + block_size * NUM_OF_ENTRY)) 129 return -EINVAL; 130 131 /* 132 * Check the address and size of the list member is 133 * aligned with the block size. 134 */ 135 if (((list->phys_addr & (block_size - 1)) != 0) || 136 ((list->size & (block_size - 1)) != 0)) 137 return -EINVAL; 138 139 table->ptr = level_table; 140 table->table_base = temp_base - 141 ((index - 1) << block_shift); 142 table->entry_size = block_size; 143 144 return 0; 145 } 146 } 147 return -EINVAL; 148 } 149 150 /* 151 * To start MMU before DDR is available, we create MMU table in SRAM. 152 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three 153 * levels of translation tables here to cover 40-bit address space. 154 * We use 4KB granule size, with 40 bits physical address, T0SZ=24 155 * Level 0 IA[39], table address @0 156 * Level 1 IA[38:30], table address @0x1000, 0x2000 157 * Level 2 IA[29:21], table address @0x3000, 0x4000 158 * Address above 0x5000 is free for other purpose. 159 */ 160 static inline void early_mmu_setup(void) 161 { 162 unsigned int el, i; 163 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; 164 u64 *level1_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); 165 u64 *level1_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); 166 u64 *level2_table0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); 167 u64 *level2_table1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000); 168 169 struct table_info table = {level0_table, 0, BLOCK_SIZE_L0}; 170 171 /* Invalidate all table entries */ 172 memset(level0_table, 0, 0x5000); 173 174 /* Fill in the table entries */ 175 set_pgtable_table(level0_table, 0, level1_table0); 176 set_pgtable_table(level0_table, 1, level1_table1); 177 set_pgtable_table(level1_table0, 0, level2_table0); 178 179 #ifdef CONFIG_FSL_LSCH3 180 set_pgtable_table(level1_table0, 181 CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1, 182 level2_table1); 183 #elif defined(CONFIG_FSL_LSCH2) 184 set_pgtable_table(level1_table0, 1, level2_table1); 185 #endif 186 /* Find the table and fill in the block entries */ 187 for (i = 0; i < ARRAY_SIZE(early_mmu_table); i++) { 188 if (find_table(&early_mmu_table[i], 189 &table, level0_table) == 0) { 190 /* 191 * If find_table() returns error, it cannot be dealt 192 * with here. Breakpoint can be added for debugging. 193 */ 194 set_block_entry(&early_mmu_table[i], &table); 195 /* 196 * If set_block_entry() returns error, it cannot be 197 * dealt with here too. 198 */ 199 } 200 } 201 202 el = current_el(); 203 204 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR, 205 MEMORY_ATTRIBUTES); 206 set_sctlr(get_sctlr() | CR_M); 207 } 208 209 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 210 /* 211 * Called from final mmu setup. The phys_addr is new, non-existing 212 * address. A new sub table is created @level2_table_secure to cover 213 * size of CONFIG_SYS_MEM_RESERVE_SECURE memory. 214 */ 215 static inline int final_secure_ddr(u64 *level0_table, 216 u64 *level2_table_secure, 217 phys_addr_t phys_addr) 218 { 219 int ret = -EINVAL; 220 struct table_info table = {}; 221 struct sys_mmu_table ddr_entry = { 222 0, 0, BLOCK_SIZE_L1, MT_NORMAL, 223 PMD_SECT_OUTER_SHARE | PMD_SECT_NS 224 }; 225 u64 index; 226 227 /* Need to create a new table */ 228 ddr_entry.virt_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 229 ddr_entry.phys_addr = phys_addr & ~(BLOCK_SIZE_L1 - 1); 230 ret = find_table(&ddr_entry, &table, level0_table); 231 if (ret) 232 return ret; 233 index = (ddr_entry.virt_addr - table.table_base) >> SECTION_SHIFT_L1; 234 set_pgtable_table(table.ptr, index, level2_table_secure); 235 table.ptr = level2_table_secure; 236 table.table_base = ddr_entry.virt_addr; 237 table.entry_size = BLOCK_SIZE_L2; 238 ret = set_block_entry(&ddr_entry, &table); 239 if (ret) { 240 printf("MMU error: could not fill non-secure ddr block entries\n"); 241 return ret; 242 } 243 ddr_entry.virt_addr = phys_addr; 244 ddr_entry.phys_addr = phys_addr; 245 ddr_entry.size = CONFIG_SYS_MEM_RESERVE_SECURE; 246 ddr_entry.attribute = PMD_SECT_OUTER_SHARE; 247 ret = find_table(&ddr_entry, &table, level0_table); 248 if (ret) { 249 printf("MMU error: could not find secure ddr table\n"); 250 return ret; 251 } 252 ret = set_block_entry(&ddr_entry, &table); 253 if (ret) 254 printf("MMU error: could not set secure ddr block entry\n"); 255 256 return ret; 257 } 258 #endif 259 260 /* 261 * The final tables look similar to early tables, but different in detail. 262 * These tables are in DRAM. Sub tables are added to enable cache for 263 * QBMan and OCRAM. 264 * 265 * Put the MMU table in secure memory if gd->secure_ram is valid. 266 * OCRAM will be not used for this purpose so gd->secure_ram can't be 0. 267 * 268 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB. 269 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB. 270 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB. 271 * 272 * For LSCH3: 273 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB. 274 * For LSCH2: 275 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB. 276 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB. 277 */ 278 static inline void final_mmu_setup(void) 279 { 280 unsigned int el = current_el(); 281 unsigned int i; 282 u64 *level0_table = (u64 *)gd->arch.tlb_addr; 283 u64 *level1_table0; 284 u64 *level1_table1; 285 u64 *level2_table0; 286 u64 *level2_table1; 287 #ifdef CONFIG_FSL_LSCH2 288 u64 *level2_table2; 289 #endif 290 struct table_info table = {NULL, 0, BLOCK_SIZE_L0}; 291 292 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 293 u64 *level2_table_secure; 294 295 if (el == 3) { 296 /* 297 * Only use gd->secure_ram if the address is recalculated 298 * Align to 4KB for MMU table 299 */ 300 if (gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) 301 level0_table = (u64 *)(gd->secure_ram & ~0xfff); 302 else 303 printf("MMU warning: gd->secure_ram is not maintained, disabled.\n"); 304 } 305 #endif 306 level1_table0 = level0_table + 512; 307 level1_table1 = level1_table0 + 512; 308 level2_table0 = level1_table1 + 512; 309 level2_table1 = level2_table0 + 512; 310 #ifdef CONFIG_FSL_LSCH2 311 level2_table2 = level2_table1 + 512; 312 #endif 313 table.ptr = level0_table; 314 315 /* Invalidate all table entries */ 316 memset(level0_table, 0, PGTABLE_SIZE); 317 318 /* Fill in the table entries */ 319 set_pgtable_table(level0_table, 0, level1_table0); 320 set_pgtable_table(level0_table, 1, level1_table1); 321 set_pgtable_table(level1_table0, 0, level2_table0); 322 #ifdef CONFIG_FSL_LSCH3 323 set_pgtable_table(level1_table0, 324 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 325 level2_table1); 326 #elif defined(CONFIG_FSL_LSCH2) 327 set_pgtable_table(level1_table0, 1, level2_table1); 328 set_pgtable_table(level1_table0, 329 CONFIG_SYS_FSL_QBMAN_BASE >> SECTION_SHIFT_L1, 330 level2_table2); 331 #endif 332 333 /* Find the table and fill in the block entries */ 334 for (i = 0; i < ARRAY_SIZE(final_mmu_table); i++) { 335 if (find_table(&final_mmu_table[i], 336 &table, level0_table) == 0) { 337 if (set_block_entry(&final_mmu_table[i], 338 &table) != 0) { 339 printf("MMU error: could not set block entry for %p\n", 340 &final_mmu_table[i]); 341 } 342 343 } else { 344 printf("MMU error: could not find the table for %p\n", 345 &final_mmu_table[i]); 346 } 347 } 348 /* Set the secure memory to secure in MMU */ 349 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE 350 if (el == 3 && gd->secure_ram & MEM_RESERVE_SECURE_MAINTAINED) { 351 #ifdef CONFIG_FSL_LSCH3 352 level2_table_secure = level2_table1 + 512; 353 #elif defined(CONFIG_FSL_LSCH2) 354 level2_table_secure = level2_table2 + 512; 355 #endif 356 if (!final_secure_ddr(level0_table, 357 level2_table_secure, 358 gd->secure_ram & ~0x3)) { 359 gd->secure_ram |= MEM_RESERVE_SECURE_SECURED; 360 debug("Now MMU table is in secured memory at 0x%llx\n", 361 gd->secure_ram & ~0x3); 362 } else { 363 printf("MMU warning: Failed to secure DDR\n"); 364 } 365 } 366 #endif 367 368 /* flush new MMU table */ 369 flush_dcache_range((ulong)level0_table, 370 (ulong)level0_table + gd->arch.tlb_size); 371 372 #ifdef CONFIG_SYS_DPAA_FMAN 373 flush_dcache_all(); 374 #endif 375 /* point TTBR to the new table */ 376 set_ttbr_tcr_mair(el, (u64)level0_table, LAYERSCAPE_TCR_FINAL, 377 MEMORY_ATTRIBUTES); 378 /* 379 * MMU is already enabled, just need to invalidate TLB to load the 380 * new table. The new table is compatible with the current table, if 381 * MMU somehow walks through the new table before invalidation TLB, 382 * it still works. So we don't need to turn off MMU here. 383 */ 384 } 385 386 int arch_cpu_init(void) 387 { 388 icache_enable(); 389 __asm_invalidate_dcache_all(); 390 __asm_invalidate_tlb_all(); 391 early_mmu_setup(); 392 set_sctlr(get_sctlr() | CR_C); 393 return 0; 394 } 395 396 /* 397 * This function is called from lib/board.c. 398 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. 399 * There is no need to disable d-cache for this operation. 400 */ 401 void enable_caches(void) 402 { 403 final_mmu_setup(); 404 __asm_invalidate_tlb_all(); 405 } 406 #endif 407 408 static inline u32 initiator_type(u32 cluster, int init_id) 409 { 410 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 411 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; 412 u32 type = 0; 413 414 type = gur_in32(&gur->tp_ityp[idx]); 415 if (type & TP_ITYP_AV) 416 return type; 417 418 return 0; 419 } 420 421 u32 cpu_mask(void) 422 { 423 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 424 int i = 0, count = 0; 425 u32 cluster, type, mask = 0; 426 427 do { 428 int j; 429 430 cluster = gur_in32(&gur->tp_cluster[i].lower); 431 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 432 type = initiator_type(cluster, j); 433 if (type) { 434 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) 435 mask |= 1 << count; 436 count++; 437 } 438 } 439 i++; 440 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 441 442 return mask; 443 } 444 445 /* 446 * Return the number of cores on this SOC. 447 */ 448 int cpu_numcores(void) 449 { 450 return hweight32(cpu_mask()); 451 } 452 453 int fsl_qoriq_core_to_cluster(unsigned int core) 454 { 455 struct ccsr_gur __iomem *gur = 456 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 457 int i = 0, count = 0; 458 u32 cluster; 459 460 do { 461 int j; 462 463 cluster = gur_in32(&gur->tp_cluster[i].lower); 464 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 465 if (initiator_type(cluster, j)) { 466 if (count == core) 467 return i; 468 count++; 469 } 470 } 471 i++; 472 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 473 474 return -1; /* cannot identify the cluster */ 475 } 476 477 u32 fsl_qoriq_core_to_type(unsigned int core) 478 { 479 struct ccsr_gur __iomem *gur = 480 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); 481 int i = 0, count = 0; 482 u32 cluster, type; 483 484 do { 485 int j; 486 487 cluster = gur_in32(&gur->tp_cluster[i].lower); 488 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { 489 type = initiator_type(cluster, j); 490 if (type) { 491 if (count == core) 492 return type; 493 count++; 494 } 495 } 496 i++; 497 } while ((cluster & TP_CLUSTER_EOC) == 0x0); 498 499 return -1; /* cannot identify the cluster */ 500 } 501 502 #ifdef CONFIG_DISPLAY_CPUINFO 503 int print_cpuinfo(void) 504 { 505 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 506 struct sys_info sysinfo; 507 char buf[32]; 508 unsigned int i, core; 509 u32 type, rcw; 510 511 puts("SoC: "); 512 513 cpu_name(buf); 514 printf(" %s (0x%x)\n", buf, gur_in32(&gur->svr)); 515 memset((u8 *)buf, 0x00, ARRAY_SIZE(buf)); 516 get_sys_info(&sysinfo); 517 puts("Clock Configuration:"); 518 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { 519 if (!(i % 3)) 520 puts("\n "); 521 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); 522 printf("CPU%d(%s):%-4s MHz ", core, 523 type == TY_ITYP_VER_A7 ? "A7 " : 524 (type == TY_ITYP_VER_A53 ? "A53" : 525 (type == TY_ITYP_VER_A57 ? "A57" : " ")), 526 strmhz(buf, sysinfo.freq_processor[core])); 527 } 528 printf("\n Bus: %-4s MHz ", 529 strmhz(buf, sysinfo.freq_systembus)); 530 printf("DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus)); 531 #ifdef CONFIG_SYS_DPAA_FMAN 532 printf(" FMAN: %-4s MHz", strmhz(buf, sysinfo.freq_fman[0])); 533 #endif 534 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR 535 printf(" DP-DDR: %-4s MT/s", strmhz(buf, sysinfo.freq_ddrbus2)); 536 #endif 537 puts("\n"); 538 539 /* 540 * Display the RCW, so that no one gets confused as to what RCW 541 * we're actually using for this boot. 542 */ 543 puts("Reset Configuration Word (RCW):"); 544 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 545 rcw = gur_in32(&gur->rcwsr[i]); 546 if ((i % 4) == 0) 547 printf("\n %08x:", i * 4); 548 printf(" %08x", rcw); 549 } 550 puts("\n"); 551 552 return 0; 553 } 554 #endif 555 556 #ifdef CONFIG_FSL_ESDHC 557 int cpu_mmc_init(bd_t *bis) 558 { 559 return fsl_esdhc_mmc_init(bis); 560 } 561 #endif 562 563 int cpu_eth_init(bd_t *bis) 564 { 565 int error = 0; 566 567 #ifdef CONFIG_FSL_MC_ENET 568 error = fsl_mc_ldpaa_init(bis); 569 #endif 570 #ifdef CONFIG_FMAN_ENET 571 fm_standard_init(bis); 572 #endif 573 return error; 574 } 575 576 int arch_early_init_r(void) 577 { 578 #ifdef CONFIG_MP 579 int rv = 1; 580 #endif 581 582 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635 583 erratum_a009635(); 584 #endif 585 586 #ifdef CONFIG_MP 587 rv = fsl_layerscape_wake_seconday_cores(); 588 if (rv) 589 printf("Did not wake secondary cores\n"); 590 #endif 591 592 #ifdef CONFIG_SYS_HAS_SERDES 593 fsl_serdes_init(); 594 #endif 595 #ifdef CONFIG_FMAN_ENET 596 fman_enet_init(); 597 #endif 598 return 0; 599 } 600 601 int timer_init(void) 602 { 603 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; 604 #ifdef CONFIG_FSL_LSCH3 605 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; 606 #endif 607 #ifdef COUNTER_FREQUENCY_REAL 608 unsigned long cntfrq = COUNTER_FREQUENCY_REAL; 609 610 /* Update with accurate clock frequency */ 611 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); 612 #endif 613 614 #ifdef CONFIG_FSL_LSCH3 615 /* Enable timebase for all clusters. 616 * It is safe to do so even some clusters are not enabled. 617 */ 618 out_le32(cltbenr, 0xf); 619 #endif 620 621 /* Enable clock for timer 622 * This is a global setting. 623 */ 624 out_le32(cntcr, 0x1); 625 626 return 0; 627 } 628 629 void reset_cpu(ulong addr) 630 { 631 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; 632 u32 val; 633 634 /* Raise RESET_REQ_B */ 635 val = scfg_in32(rstcr); 636 val |= 0x02; 637 scfg_out32(rstcr, val); 638 } 639 640 phys_size_t board_reserve_ram_top(phys_size_t ram_size) 641 { 642 phys_size_t ram_top = ram_size; 643 644 #ifdef CONFIG_SYS_MEM_TOP_HIDE 645 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function 646 #endif 647 /* Carve the Debug Server private DRAM block from the end of DRAM */ 648 #ifdef CONFIG_FSL_DEBUG_SERVER 649 ram_top -= debug_server_get_dram_block_size(); 650 #endif 651 652 /* Carve the MC private DRAM block from the end of DRAM */ 653 #ifdef CONFIG_FSL_MC_ENET 654 ram_top -= mc_get_dram_block_size(); 655 ram_top &= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN - 1); 656 #endif 657 658 return ram_top; 659 } 660