1 /* 2 * Copyright 2014 Freescale Semiconductor, Inc. 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <asm/arch/clock.h> 9 #include <asm/io.h> 10 #include <asm/arch/immap_ls102xa.h> 11 #include <asm/cache.h> 12 #include <asm/system.h> 13 #include <tsec.h> 14 #include <netdev.h> 15 #include <fsl_esdhc.h> 16 17 #include "fsl_epu.h" 18 19 #define DCSR_RCPM2_BLOCK_OFFSET 0x223000 20 #define DCSR_RCPM2_CPMFSMCR0 0x400 21 #define DCSR_RCPM2_CPMFSMSR0 0x404 22 #define DCSR_RCPM2_CPMFSMCR1 0x414 23 #define DCSR_RCPM2_CPMFSMSR1 0x418 24 #define CPMFSMSR_FSM_STATE_MASK 0x7f 25 26 DECLARE_GLOBAL_DATA_PTR; 27 28 #ifndef CONFIG_SYS_DCACHE_OFF 29 30 /* 31 * Bit[1] of the descriptor indicates the descriptor type, 32 * and bit[0] indicates whether the descriptor is valid. 33 */ 34 #define PMD_TYPE_TABLE 0x3 35 #define PMD_TYPE_SECT 0x1 36 37 /* AttrIndx[2:0] */ 38 #define PMD_ATTRINDX(t) ((t) << 2) 39 40 /* Section */ 41 #define PMD_SECT_AF (1 << 10) 42 43 #define BLOCK_SIZE_L1 (1UL << 30) 44 #define BLOCK_SIZE_L2 (1UL << 21) 45 46 /* TTBCR flags */ 47 #define TTBCR_EAE (1 << 31) 48 #define TTBCR_T0SZ(x) ((x) << 0) 49 #define TTBCR_T1SZ(x) ((x) << 16) 50 #define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0)) 51 #define TTBCR_IRGN0_NC (0 << 8) 52 #define TTBCR_IRGN0_WBWA (1 << 8) 53 #define TTBCR_IRGN0_WT (2 << 8) 54 #define TTBCR_IRGN0_WBNWA (3 << 8) 55 #define TTBCR_IRGN0_MASK (3 << 8) 56 #define TTBCR_ORGN0_NC (0 << 10) 57 #define TTBCR_ORGN0_WBWA (1 << 10) 58 #define TTBCR_ORGN0_WT (2 << 10) 59 #define TTBCR_ORGN0_WBNWA (3 << 10) 60 #define TTBCR_ORGN0_MASK (3 << 10) 61 #define TTBCR_SHARED_NON (0 << 12) 62 #define TTBCR_SHARED_OUTER (2 << 12) 63 #define TTBCR_SHARED_INNER (3 << 12) 64 #define TTBCR_EPD0 (0 << 7) 65 #define TTBCR (TTBCR_SHARED_NON | \ 66 TTBCR_ORGN0_NC | \ 67 TTBCR_IRGN0_NC | \ 68 TTBCR_USING_TTBR0 | \ 69 TTBCR_EAE) 70 71 /* 72 * Memory region attributes for LPAE (defined in pgtable): 73 * 74 * n = AttrIndx[2:0] 75 * 76 * n MAIR 77 * UNCACHED 000 00000000 78 * BUFFERABLE 001 01000100 79 * DEV_WC 001 01000100 80 * WRITETHROUGH 010 10101010 81 * WRITEBACK 011 11101110 82 * DEV_CACHED 011 11101110 83 * DEV_SHARED 100 00000100 84 * DEV_NONSHARED 100 00000100 85 * unused 101 86 * unused 110 87 * WRITEALLOC 111 11111111 88 */ 89 #define MT_MAIR0 0xeeaa4400 90 #define MT_MAIR1 0xff000004 91 #define MT_STRONLY_ORDER 0 92 #define MT_NORMAL_NC 1 93 #define MT_DEVICE_MEM 4 94 #define MT_NORMAL 7 95 96 /* The phy_addr must be aligned to 4KB */ 97 static inline void set_pgtable(u32 *page_table, u32 index, u32 phy_addr) 98 { 99 u32 value = phy_addr | PMD_TYPE_TABLE; 100 101 page_table[2 * index] = value; 102 page_table[2 * index + 1] = 0; 103 } 104 105 /* The phy_addr must be aligned to 4KB */ 106 static inline void set_pgsection(u32 *page_table, u32 index, u64 phy_addr, 107 u32 memory_type) 108 { 109 u64 value; 110 111 value = phy_addr | PMD_TYPE_SECT | PMD_SECT_AF; 112 value |= PMD_ATTRINDX(memory_type); 113 page_table[2 * index] = value & 0xFFFFFFFF; 114 page_table[2 * index + 1] = (value >> 32) & 0xFFFFFFFF; 115 } 116 117 /* 118 * Start MMU after DDR is available, we create MMU table in DRAM. 119 * The base address of TTLB is gd->arch.tlb_addr. We use two 120 * levels of translation tables here to cover 40-bit address space. 121 * 122 * The TTLBs are located at PHY 2G~4G. 123 * 124 * VA mapping: 125 * 126 * ------- <---- 0GB 127 * | | 128 * | | 129 * |-------| <---- 0x24000000 130 * |///////| ===> 192MB VA map for PCIe1 with offset 0x40_0000_0000 131 * |-------| <---- 0x300000000 132 * | | 133 * |-------| <---- 0x34000000 134 * |///////| ===> 192MB VA map for PCIe2 with offset 0x48_0000_0000 135 * |-------| <---- 0x40000000 136 * | | 137 * |-------| <---- 0x80000000 DDR0 space start 138 * |\\\\\\\| 139 *.|\\\\\\\| ===> 2GB VA map for 2GB DDR0 Memory space 140 * |\\\\\\\| 141 * ------- <---- 4GB DDR0 space end 142 */ 143 static void mmu_setup(void) 144 { 145 u32 *level0_table = (u32 *)gd->arch.tlb_addr; 146 u32 *level1_table = (u32 *)(gd->arch.tlb_addr + 0x1000); 147 u64 va_start = 0; 148 u32 reg; 149 int i; 150 151 /* Level 0 Table 2-3 are used to map DDR */ 152 set_pgsection(level0_table, 3, 3 * BLOCK_SIZE_L1, MT_NORMAL); 153 set_pgsection(level0_table, 2, 2 * BLOCK_SIZE_L1, MT_NORMAL); 154 /* Level 0 Table 1 is used to map device */ 155 set_pgsection(level0_table, 1, 1 * BLOCK_SIZE_L1, MT_DEVICE_MEM); 156 /* Level 0 Table 0 is used to map device including PCIe MEM */ 157 set_pgtable(level0_table, 0, (u32)level1_table); 158 159 /* Level 1 has 512 entries */ 160 for (i = 0; i < 512; i++) { 161 /* Mapping for PCIe 1 */ 162 if (va_start >= CONFIG_SYS_PCIE1_VIRT_ADDR && 163 va_start < (CONFIG_SYS_PCIE1_VIRT_ADDR + 164 CONFIG_SYS_PCIE_MMAP_SIZE)) 165 set_pgsection(level1_table, i, 166 CONFIG_SYS_PCIE1_PHYS_BASE + va_start, 167 MT_DEVICE_MEM); 168 /* Mapping for PCIe 2 */ 169 else if (va_start >= CONFIG_SYS_PCIE2_VIRT_ADDR && 170 va_start < (CONFIG_SYS_PCIE2_VIRT_ADDR + 171 CONFIG_SYS_PCIE_MMAP_SIZE)) 172 set_pgsection(level1_table, i, 173 CONFIG_SYS_PCIE2_PHYS_BASE + va_start, 174 MT_DEVICE_MEM); 175 else 176 set_pgsection(level1_table, i, 177 va_start, 178 MT_DEVICE_MEM); 179 va_start += BLOCK_SIZE_L2; 180 } 181 182 asm volatile("dsb sy;isb"); 183 asm volatile("mcr p15, 0, %0, c2, c0, 2" /* Write RT to TTBCR */ 184 : : "r" (TTBCR) : "memory"); 185 asm volatile("mcrr p15, 0, %0, %1, c2" /* TTBR 0 */ 186 : : "r" ((u32)level0_table), "r" (0) : "memory"); 187 asm volatile("mcr p15, 0, %0, c10, c2, 0" /* write MAIR 0 */ 188 : : "r" (MT_MAIR0) : "memory"); 189 asm volatile("mcr p15, 0, %0, c10, c2, 1" /* write MAIR 1 */ 190 : : "r" (MT_MAIR1) : "memory"); 191 192 /* Set the access control to all-supervisor */ 193 asm volatile("mcr p15, 0, %0, c3, c0, 0" 194 : : "r" (~0)); 195 196 /* Enable the mmu */ 197 reg = get_cr(); 198 set_cr(reg | CR_M); 199 } 200 201 /* 202 * This function is called from lib/board.c. It recreates MMU 203 * table in main memory. MMU and i/d-cache are enabled here. 204 */ 205 void enable_caches(void) 206 { 207 /* Invalidate all TLB */ 208 mmu_page_table_flush(gd->arch.tlb_addr, 209 gd->arch.tlb_addr + gd->arch.tlb_size); 210 /* Set up and enable mmu */ 211 mmu_setup(); 212 213 /* Invalidate & Enable d-cache */ 214 invalidate_dcache_all(); 215 set_cr(get_cr() | CR_C); 216 } 217 #endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ 218 219 #if defined(CONFIG_DISPLAY_CPUINFO) 220 int print_cpuinfo(void) 221 { 222 char buf1[32], buf2[32]; 223 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 224 unsigned int svr, major, minor, ver, i; 225 226 svr = in_be32(&gur->svr); 227 major = SVR_MAJ(svr); 228 minor = SVR_MIN(svr); 229 230 puts("CPU: Freescale LayerScape "); 231 232 ver = SVR_SOC_VER(svr); 233 switch (ver) { 234 case SOC_VER_SLS1020: 235 puts("SLS1020"); 236 break; 237 case SOC_VER_LS1020: 238 puts("LS1020"); 239 break; 240 case SOC_VER_LS1021: 241 puts("LS1021"); 242 break; 243 case SOC_VER_LS1022: 244 puts("LS1022"); 245 break; 246 default: 247 puts("Unknown"); 248 break; 249 } 250 251 if (IS_E_PROCESSOR(svr) && (ver != SOC_VER_SLS1020)) 252 puts("E"); 253 254 printf(", Version: %d.%d, (0x%08x)\n", major, minor, svr); 255 256 puts("Clock Configuration:"); 257 258 printf("\n CPU0(ARMV7):%-4s MHz, ", strmhz(buf1, gd->cpu_clk)); 259 printf("\n Bus:%-4s MHz, ", strmhz(buf1, gd->bus_clk)); 260 printf("DDR:%-4s MHz (%s MT/s data rate), ", 261 strmhz(buf1, gd->mem_clk/2), strmhz(buf2, gd->mem_clk)); 262 puts("\n"); 263 264 /* Display the RCW, so that no one gets confused as to what RCW 265 * we're actually using for this boot. 266 */ 267 puts("Reset Configuration Word (RCW):"); 268 for (i = 0; i < ARRAY_SIZE(gur->rcwsr); i++) { 269 u32 rcw = in_be32(&gur->rcwsr[i]); 270 271 if ((i % 4) == 0) 272 printf("\n %08x:", i * 4); 273 printf(" %08x", rcw); 274 } 275 puts("\n"); 276 277 return 0; 278 } 279 #endif 280 281 #ifdef CONFIG_FSL_ESDHC 282 int cpu_mmc_init(bd_t *bis) 283 { 284 return fsl_esdhc_mmc_init(bis); 285 } 286 #endif 287 288 int cpu_eth_init(bd_t *bis) 289 { 290 #ifdef CONFIG_TSEC_ENET 291 tsec_standard_init(bis); 292 #endif 293 294 return 0; 295 } 296 297 int arch_cpu_init(void) 298 { 299 void *epu_base = (void *)(CONFIG_SYS_DCSRBAR + EPU_BLOCK_OFFSET); 300 void *rcpm2_base = 301 (void *)(CONFIG_SYS_DCSRBAR + DCSR_RCPM2_BLOCK_OFFSET); 302 u32 state; 303 304 /* 305 * The RCPM FSM state may not be reset after power-on. 306 * So, reset them. 307 */ 308 state = in_be32(rcpm2_base + DCSR_RCPM2_CPMFSMSR0) & 309 CPMFSMSR_FSM_STATE_MASK; 310 if (state != 0) { 311 out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR0, 0x80); 312 out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR0, 0x0); 313 } 314 315 state = in_be32(rcpm2_base + DCSR_RCPM2_CPMFSMSR1) & 316 CPMFSMSR_FSM_STATE_MASK; 317 if (state != 0) { 318 out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR1, 0x80); 319 out_be32(rcpm2_base + DCSR_RCPM2_CPMFSMCR1, 0x0); 320 } 321 322 /* 323 * After wakeup from deep sleep, Clear EPU registers 324 * as early as possible to prevent from possible issue. 325 * It's also safe to clear at normal boot. 326 */ 327 fsl_epu_clean(epu_base); 328 329 return 0; 330 } 331 332 #ifdef CONFIG_ARMV7_NONSEC 333 /* Set the address at which the secondary core starts from.*/ 334 void smp_set_core_boot_addr(unsigned long addr, int corenr) 335 { 336 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 337 338 out_be32(&gur->scratchrw[0], addr); 339 } 340 341 /* Release the secondary core from holdoff state and kick it */ 342 void smp_kick_all_cpus(void) 343 { 344 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); 345 346 out_be32(&gur->brrl, 0x2); 347 348 /* 349 * LS1 STANDBYWFE is not captured outside the ARM module in the soc. 350 * So add a delay to wait bootrom execute WFE. 351 */ 352 udelay(1); 353 354 asm volatile("sev"); 355 } 356 #endif 357