1/* 2 * (C) Copyright 2014-2015 Freescale Semiconductor 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 * 6 * Extracted from armv8/start.S 7 */ 8 9#include <config.h> 10#include <linux/linkage.h> 11#include <asm/gic.h> 12#include <asm/macro.h> 13#include <asm/arch-fsl-layerscape/soc.h> 14#ifdef CONFIG_MP 15#include <asm/arch/mp.h> 16#endif 17#ifdef CONFIG_FSL_LSCH3 18#include <asm/arch-fsl-layerscape/immap_lsch3.h> 19#endif 20#include <asm/u-boot.h> 21 22/* Get GIC offset 23* For LS1043a rev1.0, GIC base address align with 4k. 24* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT] 25* is set, GIC base address align with 4K, or else align 26* with 64k. 27* output: 28* x0: the base address of GICD 29* x1: the base address of GICC 30*/ 31ENTRY(get_gic_offset) 32 ldr x0, =GICD_BASE 33#ifdef CONFIG_GICV2 34 ldr x1, =GICC_BASE 35#endif 36#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN 37 ldr x2, =DCFG_CCSR_SVR 38 ldr w2, [x2] 39 rev w2, w2 40 mov w3, w2 41 ands w3, w3, #SVR_WO_E << 8 42 mov w4, #SVR_LS1043A << 8 43 cmp w3, w4 44 b.ne 1f 45 ands w2, w2, #0xff 46 cmp w2, #REV1_0 47 b.eq 1f 48 ldr x2, =SCFG_GIC400_ALIGN 49 ldr w2, [x2] 50 rev w2, w2 51 tbnz w2, #GIC_ADDR_BIT, 1f 52 ldr x0, =GICD_BASE_64K 53#ifdef CONFIG_GICV2 54 ldr x1, =GICC_BASE_64K 55#endif 561: 57#endif 58 ret 59ENDPROC(get_gic_offset) 60 61ENTRY(smp_kick_all_cpus) 62 /* Kick secondary cpus up by SGI 0 interrupt */ 63#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 64 mov x29, lr /* Save LR */ 65 bl get_gic_offset 66 bl gic_kick_secondary_cpus 67 mov lr, x29 /* Restore LR */ 68#endif 69 ret 70ENDPROC(smp_kick_all_cpus) 71 72 73ENTRY(lowlevel_init) 74 mov x29, lr /* Save LR */ 75 76#ifdef CONFIG_FSL_LSCH3 77 78 /* Set Wuo bit for RN-I 20 */ 79#ifdef CONFIG_LS2080A 80 ldr x0, =CCI_AUX_CONTROL_BASE(20) 81 ldr x1, =0x00000010 82 bl ccn504_set_aux 83 84 /* 85 * Set forced-order mode in RNI-6, RNI-20 86 * This is required for performance optimization on LS2088A 87 * LS2080A family does not support setting forced-order mode, 88 * so skip this operation for LS2080A family 89 */ 90 bl get_svr 91 lsr w0, w0, #16 92 ldr w1, =SVR_DEV_LS2080A 93 cmp w0, w1 94 b.eq 1f 95 96 ldr x0, =CCI_AUX_CONTROL_BASE(6) 97 ldr x1, =0x00000020 98 bl ccn504_set_aux 99 ldr x0, =CCI_AUX_CONTROL_BASE(20) 100 ldr x1, =0x00000020 101 bl ccn504_set_aux 1021: 103#endif 104 105 /* Add fully-coherent masters to DVM domain */ 106 ldr x0, =CCI_MN_BASE 107 ldr x1, =CCI_MN_RNF_NODEID_LIST 108 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET 109 bl ccn504_add_masters_to_dvm 110 111 /* Set all RN-I ports to QoS of 15 */ 112 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0) 113 ldr x1, =0x00FF000C 114 bl ccn504_set_qos 115 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0) 116 ldr x1, =0x00FF000C 117 bl ccn504_set_qos 118 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0) 119 ldr x1, =0x00FF000C 120 bl ccn504_set_qos 121 122 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2) 123 ldr x1, =0x00FF000C 124 bl ccn504_set_qos 125 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2) 126 ldr x1, =0x00FF000C 127 bl ccn504_set_qos 128 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2) 129 ldr x1, =0x00FF000C 130 bl ccn504_set_qos 131 132 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6) 133 ldr x1, =0x00FF000C 134 bl ccn504_set_qos 135 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6) 136 ldr x1, =0x00FF000C 137 bl ccn504_set_qos 138 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6) 139 ldr x1, =0x00FF000C 140 bl ccn504_set_qos 141 142 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12) 143 ldr x1, =0x00FF000C 144 bl ccn504_set_qos 145 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12) 146 ldr x1, =0x00FF000C 147 bl ccn504_set_qos 148 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12) 149 ldr x1, =0x00FF000C 150 bl ccn504_set_qos 151 152 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16) 153 ldr x1, =0x00FF000C 154 bl ccn504_set_qos 155 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16) 156 ldr x1, =0x00FF000C 157 bl ccn504_set_qos 158 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16) 159 ldr x1, =0x00FF000C 160 bl ccn504_set_qos 161 162 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20) 163 ldr x1, =0x00FF000C 164 bl ccn504_set_qos 165 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20) 166 ldr x1, =0x00FF000C 167 bl ccn504_set_qos 168 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20) 169 ldr x1, =0x00FF000C 170 bl ccn504_set_qos 171#endif 172 173#ifdef SMMU_BASE 174 /* Set the SMMU page size in the sACR register */ 175 ldr x1, =SMMU_BASE 176 ldr w0, [x1, #0x10] 177 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */ 178 str w0, [x1, #0x10] 179#endif 180 181 /* Initialize GIC Secure Bank Status */ 182#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3) 183 branch_if_slave x0, 1f 184 bl get_gic_offset 185 bl gic_init_secure 1861: 187#ifdef CONFIG_GICV3 188 ldr x0, =GICR_BASE 189 bl gic_init_secure_percpu 190#elif defined(CONFIG_GICV2) 191 bl get_gic_offset 192 bl gic_init_secure_percpu 193#endif 194#endif 195 196 branch_if_master x0, x1, 2f 197 198#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY) 199 ldr x0, =secondary_boot_func 200 blr x0 201#endif 202 2032: 204#ifdef CONFIG_FSL_TZPC_BP147 205 /* Set Non Secure access for all devices protected via TZPC */ 206 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */ 207 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */ 208 str w0, [x1] 209 210 isb 211 dsb sy 212#endif 213 214#ifdef CONFIG_FSL_TZASC_400 215 /* 216 * LS2080 and its personalities does not support TZASC 217 * So skip TZASC related operations 218 */ 219 bl get_svr 220 lsr w0, w0, #16 221 ldr w1, =SVR_DEV_LS2080A 222 cmp w0, w1 223 b.eq 1f 224 225 /* Set TZASC so that: 226 * a. We use only Region0 whose global secure write/read is EN 227 * b. We use only Region0 whose NSAID write/read is EN 228 * 229 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just 230 * placeholders. 231 */ 232 ldr x1, =TZASC_GATE_KEEPER(0) 233 ldr w0, [x1] /* Filter 0 Gate Keeper Register */ 234 orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */ 235 str w0, [x1] 236 237 ldr x1, =TZASC_GATE_KEEPER(1) 238 ldr w0, [x1] /* Filter 0 Gate Keeper Register */ 239 orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */ 240 str w0, [x1] 241 242 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0) 243 ldr w0, [x1] /* Region-0 Attributes Register */ 244 orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */ 245 orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */ 246 str w0, [x1] 247 248 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1) 249 ldr w0, [x1] /* Region-1 Attributes Register */ 250 orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */ 251 orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */ 252 str w0, [x1] 253 254 ldr x1, =TZASC_REGION_ID_ACCESS_0(0) 255 ldr w0, [x1] /* Region-0 Access Register */ 256 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ 257 str w0, [x1] 258 259 ldr x1, =TZASC_REGION_ID_ACCESS_0(1) 260 ldr w0, [x1] /* Region-1 Attributes Register */ 261 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */ 262 str w0, [x1] 263 264 isb 265 dsb sy 266#endif 2671: 268#ifdef CONFIG_ARCH_LS1046A 269 /* Initialize the L2 RAM latency */ 270 mrs x1, S3_1_c11_c0_2 271 mov x0, #0x1C7 272 /* Clear L2 Tag RAM latency and L2 Data RAM latency */ 273 bic x1, x1, x0 274 /* Set L2 data ram latency bits [2:0] */ 275 orr x1, x1, #0x2 276 /* set L2 tag ram latency bits [8:6] */ 277 orr x1, x1, #0x80 278 msr S3_1_c11_c0_2, x1 279 isb 280#endif 281 282#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD) 283 bl fsl_ocram_init 284#endif 285 286 mov lr, x29 /* Restore LR */ 287 ret 288ENDPROC(lowlevel_init) 289 290#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD) 291ENTRY(fsl_ocram_init) 292 mov x28, lr /* Save LR */ 293 bl fsl_clear_ocram 294 bl fsl_ocram_clear_ecc_err 295 mov lr, x28 /* Restore LR */ 296 ret 297ENDPROC(fsl_ocram_init) 298 299ENTRY(fsl_clear_ocram) 300/* Clear OCRAM */ 301 ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE 302 ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE) 303 mov x2, #0 304clear_loop: 305 str x2, [x0] 306 add x0, x0, #8 307 cmp x0, x1 308 b.lo clear_loop 309 ret 310ENDPROC(fsl_clear_ocram) 311 312ENTRY(fsl_ocram_clear_ecc_err) 313 /* OCRAM1/2 ECC status bit */ 314 mov w1, #0x60 315 ldr x0, =DCSR_DCFG_SBEESR2 316 str w1, [x0] 317 ldr x0, =DCSR_DCFG_MBEESR2 318 str w1, [x0] 319 ret 320ENDPROC(fsl_ocram_init) 321#endif 322 323#ifdef CONFIG_FSL_LSCH3 324 .globl get_svr 325get_svr: 326 ldr x1, =FSL_LSCH3_SVR 327 ldr w0, [x1] 328 ret 329 330hnf_pstate_poll: 331 /* x0 has the desired status, return 0 for success, 1 for timeout 332 * clobber x1, x2, x3, x4, x6, x7 333 */ 334 mov x1, x0 335 mov x7, #0 /* flag for timeout */ 336 mrs x3, cntpct_el0 /* read timer */ 337 add x3, x3, #1200 /* timeout after 100 microseconds */ 338 mov x0, #0x18 339 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */ 340 mov w6, #8 /* HN-F node count */ 3411: 342 ldr x2, [x0] 343 cmp x2, x1 /* check status */ 344 b.eq 2f 345 mrs x4, cntpct_el0 346 cmp x4, x3 347 b.ls 1b 348 mov x7, #1 /* timeout */ 349 b 3f 3502: 351 add x0, x0, #0x10000 /* move to next node */ 352 subs w6, w6, #1 353 cbnz w6, 1b 3543: 355 mov x0, x7 356 ret 357 358hnf_set_pstate: 359 /* x0 has the desired state, clobber x1, x2, x6 */ 360 mov x1, x0 361 /* power state to SFONLY */ 362 mov w6, #8 /* HN-F node count */ 363 mov x0, #0x10 364 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */ 3651: /* set pstate to sfonly */ 366 ldr x2, [x0] 367 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */ 368 orr x2, x2, x1 369 str x2, [x0] 370 add x0, x0, #0x10000 /* move to next node */ 371 subs w6, w6, #1 372 cbnz w6, 1b 373 374 ret 375 376ENTRY(__asm_flush_l3_dcache) 377 /* 378 * Return status in x0 379 * success 0 380 * tmeout 1 for setting SFONLY, 2 for FAM, 3 for both 381 */ 382 mov x29, lr 383 mov x8, #0 384 385 dsb sy 386 mov x0, #0x1 /* HNFPSTAT_SFONLY */ 387 bl hnf_set_pstate 388 389 mov x0, #0x4 /* SFONLY status */ 390 bl hnf_pstate_poll 391 cbz x0, 1f 392 mov x8, #1 /* timeout */ 3931: 394 dsb sy 395 mov x0, #0x3 /* HNFPSTAT_FAM */ 396 bl hnf_set_pstate 397 398 mov x0, #0xc /* FAM status */ 399 bl hnf_pstate_poll 400 cbz x0, 1f 401 add x8, x8, #0x2 4021: 403 mov x0, x8 404 mov lr, x29 405 ret 406ENDPROC(__asm_flush_l3_dcache) 407#endif 408 409#ifdef CONFIG_MP 410 /* Keep literals not used by the secondary boot code outside it */ 411 .ltorg 412 413 /* Using 64 bit alignment since the spin table is accessed as data */ 414 .align 4 415 .global secondary_boot_code 416 /* Secondary Boot Code starts here */ 417secondary_boot_code: 418 .global __spin_table 419__spin_table: 420 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE 421 422 .align 2 423ENTRY(secondary_boot_func) 424 /* 425 * MPIDR_EL1 Fields: 426 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1) 427 * MPIDR[7:2] = AFF0_RES 428 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3) 429 * MPIDR[23:16] = AFF2_CLUSTERID 430 * MPIDR[24] = MT 431 * MPIDR[29:25] = RES0 432 * MPIDR[30] = U 433 * MPIDR[31] = ME 434 * MPIDR[39:32] = AFF3 435 * 436 * Linear Processor ID (LPID) calculation from MPIDR_EL1: 437 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now 438 * until AFF2_CLUSTERID and AFF3 have non-zero values) 439 * 440 * LPID = MPIDR[15:8] | MPIDR[1:0] 441 */ 442 mrs x0, mpidr_el1 443 ubfm x1, x0, #8, #15 444 ubfm x2, x0, #0, #1 445 orr x10, x2, x1, lsl #2 /* x10 has LPID */ 446 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */ 447 /* 448 * offset of the spin table element for this core from start of spin 449 * table (each elem is padded to 64 bytes) 450 */ 451 lsl x1, x10, #6 452 ldr x0, =__spin_table 453 /* physical address of this cpus spin table element */ 454 add x11, x1, x0 455 456 ldr x0, =__real_cntfrq 457 ldr x0, [x0] 458 msr cntfrq_el0, x0 /* set with real frequency */ 459 str x9, [x11, #16] /* LPID */ 460 mov x4, #1 461 str x4, [x11, #8] /* STATUS */ 462 dsb sy 463#if defined(CONFIG_GICV3) 464 gic_wait_for_interrupt_m x0 465#elif defined(CONFIG_GICV2) 466 bl get_gic_offset 467 mov x0, x1 468 gic_wait_for_interrupt_m x0, w1 469#endif 470 471slave_cpu: 472 wfe 473 ldr x0, [x11] 474 cbz x0, slave_cpu 475#ifndef CONFIG_ARMV8_SWITCH_TO_EL1 476 mrs x1, sctlr_el2 477#else 478 mrs x1, sctlr_el1 479#endif 480 tbz x1, #25, cpu_is_le 481 rev x0, x0 /* BE to LE conversion */ 482cpu_is_le: 483 ldr x5, [x11, #24] 484 ldr x6, =IH_ARCH_DEFAULT 485 cmp x6, x5 486 b.eq 1f 487 488#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 489 adr x4, secondary_switch_to_el1 490 ldr x5, =ES_TO_AARCH64 491#else 492 ldr x4, [x11] 493 ldr x5, =ES_TO_AARCH32 494#endif 495 bl secondary_switch_to_el2 496 4971: 498#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 499 adr x4, secondary_switch_to_el1 500#else 501 ldr x4, [x11] 502#endif 503 ldr x5, =ES_TO_AARCH64 504 bl secondary_switch_to_el2 505 506ENDPROC(secondary_boot_func) 507 508ENTRY(secondary_switch_to_el2) 509 switch_el x6, 1f, 0f, 0f 5100: ret 5111: armv8_switch_to_el2_m x4, x5, x6 512ENDPROC(secondary_switch_to_el2) 513 514ENTRY(secondary_switch_to_el1) 515 mrs x0, mpidr_el1 516 ubfm x1, x0, #8, #15 517 ubfm x2, x0, #0, #1 518 orr x10, x2, x1, lsl #2 /* x10 has LPID */ 519 520 lsl x1, x10, #6 521 ldr x0, =__spin_table 522 /* physical address of this cpus spin table element */ 523 add x11, x1, x0 524 525 ldr x4, [x11] 526 527 ldr x5, [x11, #24] 528 ldr x6, =IH_ARCH_DEFAULT 529 cmp x6, x5 530 b.eq 2f 531 532 ldr x5, =ES_TO_AARCH32 533 bl switch_to_el1 534 5352: ldr x5, =ES_TO_AARCH64 536 537switch_to_el1: 538 switch_el x6, 0f, 1f, 0f 5390: ret 5401: armv8_switch_to_el1_m x4, x5, x6 541ENDPROC(secondary_switch_to_el1) 542 543 /* Ensure that the literals used by the secondary boot code are 544 * assembled within it (this is required so that we can protect 545 * this area with a single memreserve region 546 */ 547 .ltorg 548 549 /* 64 bit alignment for elements accessed as data */ 550 .align 4 551 .global __real_cntfrq 552__real_cntfrq: 553 .quad COUNTER_FREQUENCY 554 .globl __secondary_boot_code_size 555 .type __secondary_boot_code_size, %object 556 /* Secondary Boot Code ends here */ 557__secondary_boot_code_size: 558 .quad .-secondary_boot_code 559#endif 560