1/* 2 * Copyright 2008-2012 Freescale Semiconductor, Inc. 3 * Kumar Gala <kumar.gala@freescale.com> 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8#include <asm-offsets.h> 9#include <config.h> 10#include <mpc85xx.h> 11#include <version.h> 12 13#include <ppc_asm.tmpl> 14#include <ppc_defs.h> 15 16#include <asm/cache.h> 17#include <asm/mmu.h> 18 19/* To boot secondary cpus, we need a place for them to start up. 20 * Normally, they start at 0xfffffffc, but that's usually the 21 * firmware, and we don't want to have to run the firmware again. 22 * Instead, the primary cpu will set the BPTR to point here to 23 * this page. We then set up the core, and head to 24 * start_secondary. Note that this means that the code below 25 * must never exceed 1023 instructions (the branch at the end 26 * would then be the 1024th). 27 */ 28 .globl __secondary_start_page 29 .align 12 30__secondary_start_page: 31/* First do some preliminary setup */ 32 lis r3, HID0_EMCP@h /* enable machine check */ 33#ifndef CONFIG_E500MC 34 ori r3,r3,HID0_TBEN@l /* enable Timebase */ 35#endif 36#ifdef CONFIG_PHYS_64BIT 37 ori r3,r3,HID0_ENMAS7@l /* enable MAS7 updates */ 38#endif 39 mtspr SPRN_HID0,r3 40 41#ifndef CONFIG_E500MC 42 li r3,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 43 mfspr r0,PVR 44 andi. r0,r0,0xff 45 cmpwi r0,0x50@l /* if we are rev 5.0 or greater set MBDD */ 46 blt 1f 47 /* Set MBDD bit also */ 48 ori r3, r3, HID1_MBDD@l 491: 50 mtspr SPRN_HID1,r3 51#endif 52 53#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 54 mfspr r3,SPRN_HDBCR1 55 oris r3,r3,0x0100 56 mtspr SPRN_HDBCR1,r3 57#endif 58 59#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 60 mfspr r3,SPRN_SVR 61 rlwinm r3,r3,0,0xff 62 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 63 cmpw r3,r4 64 beq 1f 65 66#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 67 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 68 cmpw r3,r4 69 beq 1f 70#endif 71 72 /* Not a supported revision affected by erratum */ 73 b 2f 74 751: /* Erratum says set bits 55:60 to 001001 */ 76 msync 77 isync 78 mfspr r3,SPRN_HDBCR0 79 li r4,0x48 80 rlwimi r3,r4,0,0x1f8 81 mtspr SPRN_HDBCR0,r3 82 isync 832: 84#endif 85 86 /* Enable branch prediction */ 87 lis r3,BUCSR_ENABLE@h 88 ori r3,r3,BUCSR_ENABLE@l 89 mtspr SPRN_BUCSR,r3 90 91 /* Ensure TB is 0 */ 92 li r3,0 93 mttbl r3 94 mttbu r3 95 96 /* Enable/invalidate the I-Cache */ 97 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 98 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 99 mtspr SPRN_L1CSR1,r2 1001: 101 mfspr r3,SPRN_L1CSR1 102 and. r1,r3,r2 103 bne 1b 104 105 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 106 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 107 mtspr SPRN_L1CSR1,r3 108 isync 1092: 110 mfspr r3,SPRN_L1CSR1 111 andi. r1,r3,L1CSR1_ICE@l 112 beq 2b 113 114 /* Enable/invalidate the D-Cache */ 115 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 116 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 117 mtspr SPRN_L1CSR0,r2 1181: 119 mfspr r3,SPRN_L1CSR0 120 and. r1,r3,r2 121 bne 1b 122 123 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 124 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 125 mtspr SPRN_L1CSR0,r3 126 isync 1272: 128 mfspr r3,SPRN_L1CSR0 129 andi. r1,r3,L1CSR0_DCE@l 130 beq 2b 131 132#define toreset(x) (x - __secondary_start_page + 0xfffff000) 133 134 /* get our PIR to figure out our table entry */ 135 lis r3,toreset(__spin_table_addr)@h 136 ori r3,r3,toreset(__spin_table_addr)@l 137 lwz r3,0(r3) 138 139 mfspr r0,SPRN_PIR 140#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2 141/* 142 * PIR definition for Chassis 2 143 * 0-17 Reserved (logic 0s) 144 * 18-19 CHIP_ID, 2'b00 - SoC 1 145 * all others - reserved 146 * 20-24 CLUSTER_ID 5'b00000 - CCM 1 147 * all others - reserved 148 * 25-26 CORE_CLUSTER_ID 2'b00 - cluster 1 149 * 2'b01 - cluster 2 150 * 2'b10 - cluster 3 151 * 2'b11 - cluster 4 152 * 27-28 CORE_ID 2'b00 - core 0 153 * 2'b01 - core 1 154 * 2'b10 - core 2 155 * 2'b11 - core 3 156 * 29-31 THREAD_ID 3'b000 - thread 0 157 * 3'b001 - thread 1 158 * 159 * Power-on PIR increments threads by 0x01, cores within a cluster by 0x08 160 * and clusters by 0x20. 161 * 162 * We renumber PIR so that all threads in the system are consecutive. 163 */ 164 165 rlwinm r8,r0,29,0x03 /* r8 = core within cluster */ 166 srwi r10,r0,5 /* r10 = cluster */ 167 168 mulli r5,r10,CONFIG_SYS_FSL_CORES_PER_CLUSTER 169 add r5,r5,r8 /* for spin table index */ 170 mulli r4,r5,CONFIG_SYS_FSL_THREADS_PER_CORE /* for PIR */ 171#elif defined(CONFIG_E500MC) 172 rlwinm r4,r0,27,27,31 173 mr r5,r4 174#else 175 mr r4,r0 176 mr r5,r4 177#endif 178 179 /* 180 * r10 has the base address for the entry. 181 * we cannot access it yet before setting up a new TLB 182 */ 183 slwi r8,r5,6 /* spin table is padded to 64 byte */ 184 add r10,r3,r8 185 186 mtspr SPRN_PIR,r4 /* write to PIR register */ 187 188#ifdef CONFIG_SYS_CACHE_STASHING 189 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 190 slwi r8,r4,1 191 addi r8,r8,32 192 mtspr L1CSR2,r8 193#endif 194 195#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \ 196 defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011) 197 /* 198 * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0 199 * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0 200 * also appleis to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1 201 */ 202 mfspr r3,SPRN_SVR 203 rlwinm r6,r3,24,~0x800 /* clear E bit */ 204 205 lis r5,SVR_P4080@h 206 ori r5,r5,SVR_P4080@l 207 cmpw r6,r5 208 bne 1f 209 210 rlwinm r3,r3,0,0xf0 211 li r5,0x30 212 cmpw r3,r5 213 bge 2f 2141: 215#ifdef CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011 216 lis r3,toreset(enable_cpu_a011_workaround)@ha 217 lwz r3,toreset(enable_cpu_a011_workaround)@l(r3) 218 cmpwi r3,0 219 beq 2f 220#endif 221 mfspr r3,L1CSR2 222 oris r3,r3,(L1CSR2_DCWS)@h 223 mtspr L1CSR2,r3 2242: 225#endif 226 227#ifdef CONFIG_SYS_FSL_ERRATUM_A005812 228 /* 229 * A-005812 workaround sets bit 32 of SPR 976 for SoCs running in 230 * write shadow mode. This code should run after other code setting 231 * DCWS. 232 */ 233 mfspr r3,L1CSR2 234 andis. r3,r3,(L1CSR2_DCWS)@h 235 beq 1f 236 mfspr r3, SPRN_HDBCR0 237 oris r3, r3, 0x8000 238 mtspr SPRN_HDBCR0, r3 2391: 240#endif 241 242#ifdef CONFIG_BACKSIDE_L2_CACHE 243 /* skip L2 setup on P2040/P2040E as they have no L2 */ 244 mfspr r3,SPRN_SVR 245 rlwinm r6,r3,24,~0x800 /* clear E bit of SVR */ 246 247 lis r3,SVR_P2040@h 248 ori r3,r3,SVR_P2040@l 249 cmpw r6,r3 250 beq 3f 251 252 /* Enable/invalidate the L2 cache */ 253 msync 254 lis r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@h 255 ori r2,r2,(L2CSR0_L2FI|L2CSR0_L2LFC)@l 256 mtspr SPRN_L2CSR0,r2 2571: 258 mfspr r3,SPRN_L2CSR0 259 and. r1,r3,r2 260 bne 1b 261 262#ifdef CONFIG_SYS_CACHE_STASHING 263 /* set stash id to (coreID) * 2 + 32 + L2 (1) */ 264 addi r3,r8,1 265 mtspr SPRN_L2CSR1,r3 266#endif 267 268 lis r3,CONFIG_SYS_INIT_L2CSR0@h 269 ori r3,r3,CONFIG_SYS_INIT_L2CSR0@l 270 mtspr SPRN_L2CSR0,r3 271 isync 2722: 273 mfspr r3,SPRN_L2CSR0 274 andis. r1,r3,L2CSR0_L2E@h 275 beq 2b 276#endif 2773: 278 /* setup mapping for the spin table, WIMGE=0b00100 */ 279 lis r13,toreset(__spin_table_addr)@h 280 ori r13,r13,toreset(__spin_table_addr)@l 281 lwz r13,0(r13) 282 /* mask by 4K */ 283 rlwinm r13,r13,0,0,19 284 285 lis r11,(MAS0_TLBSEL(1)|MAS0_ESEL(1))@h 286 mtspr SPRN_MAS0,r11 287 lis r11,(MAS1_VALID|MAS1_IPROT)@h 288 ori r11,r11,(MAS1_TS|MAS1_TSIZE(BOOKE_PAGESZ_4K))@l 289 mtspr SPRN_MAS1,r11 290 oris r11,r13,(MAS2_M|MAS2_G)@h 291 ori r11,r13,(MAS2_M|MAS2_G)@l 292 mtspr SPRN_MAS2,r11 293 oris r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@h 294 ori r11,r13,(MAS3_SX|MAS3_SW|MAS3_SR)@l 295 mtspr SPRN_MAS3,r11 296 li r11,0 297 mtspr SPRN_MAS7,r11 298 tlbwe 299 300 /* 301 * __bootpg_addr has the address of __second_half_boot_page 302 * jump there in AS=1 space with cache enabled 303 */ 304 lis r13,toreset(__bootpg_addr)@h 305 ori r13,r13,toreset(__bootpg_addr)@l 306 lwz r11,0(r13) 307 mtspr SPRN_SRR0,r11 308 mfmsr r13 309 ori r12,r13,MSR_IS|MSR_DS@l 310 mtspr SPRN_SRR1,r12 311 rfi 312 313 /* 314 * Allocate some space for the SDRAM address of the bootpg. 315 * This variable has to be in the boot page so that it can 316 * be accessed by secondary cores when they come out of reset. 317 */ 318 .align L1_CACHE_SHIFT 319 .globl __bootpg_addr 320__bootpg_addr: 321 .long 0 322 323 .global __spin_table_addr 324__spin_table_addr: 325 .long 0 326 327 /* 328 * This variable is set by cpu_init_r() after parsing hwconfig 329 * to enable workaround for erratum NMG_CPU_A011. 330 */ 331 .align L1_CACHE_SHIFT 332 .global enable_cpu_a011_workaround 333enable_cpu_a011_workaround: 334 .long 1 335 336 /* Fill in the empty space. The actual reset vector is 337 * the last word of the page */ 338__secondary_start_code_end: 339 .space 4092 - (__secondary_start_code_end - __secondary_start_page) 340__secondary_reset_vector: 341 b __secondary_start_page 342 343 344/* this is a separated page for the spin table and cacheable boot code */ 345 .align L1_CACHE_SHIFT 346 .global __second_half_boot_page 347__second_half_boot_page: 348#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE 349 lis r3,(spin_table_compat - __second_half_boot_page)@h 350 ori r3,r3,(spin_table_compat - __second_half_boot_page)@l 351 add r3,r3,r11 /* r11 has the address of __second_half_boot_page */ 352 lwz r14,0(r3) 353#endif 354 355#define ENTRY_ADDR_UPPER 0 356#define ENTRY_ADDR_LOWER 4 357#define ENTRY_R3_UPPER 8 358#define ENTRY_R3_LOWER 12 359#define ENTRY_RESV 16 360#define ENTRY_PIR 20 361#define ENTRY_SIZE 64 362 /* 363 * setup the entry 364 * r10 has the base address of the spin table. 365 * spin table is defined as 366 * struct { 367 * uint64_t entry_addr; 368 * uint64_t r3; 369 * uint32_t rsvd1; 370 * uint32_t pir; 371 * }; 372 * we pad this struct to 64 bytes so each entry is in its own cacheline 373 */ 374 li r3,0 375 li r8,1 376 mfspr r4,SPRN_PIR 377 stw r3,ENTRY_ADDR_UPPER(r10) 378 stw r3,ENTRY_R3_UPPER(r10) 379 stw r4,ENTRY_R3_LOWER(r10) 380 stw r3,ENTRY_RESV(r10) 381 stw r4,ENTRY_PIR(r10) 382 msync 383 stw r8,ENTRY_ADDR_LOWER(r10) 384 385 /* spin waiting for addr */ 3863: 387/* 388 * To comply with ePAPR 1.1, the spin table has been moved to cache-enabled 389 * memory. Old OS may not work with this change. A patch is waiting to be 390 * accepted for Linux kernel. Other OS needs similar fix to spin table. 391 * For OSes with old spin table code, we can enable this temporary fix by 392 * setting environmental variable "spin_table_compat". For new OSes, set 393 * "spin_table_compat=no". After Linux is fixed, we can remove this macro 394 * and related code. For now, it is enabled by default. 395 */ 396#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE 397 cmpwi r14,0 398 beq 4f 399 dcbf 0, r10 400 sync 4014: 402#endif 403 lwz r4,ENTRY_ADDR_LOWER(r10) 404 andi. r11,r4,1 405 bne 3b 406 isync 407 408 /* setup IVORs to match fixed offsets */ 409#include "fixed_ivor.S" 410 411 /* get the upper bits of the addr */ 412 lwz r11,ENTRY_ADDR_UPPER(r10) 413 414 /* setup branch addr */ 415 mtspr SPRN_SRR0,r4 416 417 /* mark the entry as released */ 418 li r8,3 419 stw r8,ENTRY_ADDR_LOWER(r10) 420 421 /* mask by ~64M to setup our tlb we will jump to */ 422 rlwinm r12,r4,0,0,5 423 424 /* 425 * setup r3, r4, r5, r6, r7, r8, r9 426 * r3 contains the value to put in the r3 register at secondary cpu 427 * entry. The high 32-bits are ignored on 32-bit chip implementations. 428 * 64-bit chip implementations however shall load all 64-bits 429 */ 430#ifdef CONFIG_SYS_PPC64 431 ld r3,ENTRY_R3_UPPER(r10) 432#else 433 lwz r3,ENTRY_R3_LOWER(r10) 434#endif 435 li r4,0 436 li r5,0 437 li r6,0 438 lis r7,(64*1024*1024)@h 439 li r8,0 440 li r9,0 441 442 /* load up the pir */ 443 lwz r0,ENTRY_PIR(r10) 444 mtspr SPRN_PIR,r0 445 mfspr r0,SPRN_PIR 446 stw r0,ENTRY_PIR(r10) 447 448 mtspr IVPR,r12 449/* 450 * Coming here, we know the cpu has one TLB mapping in TLB1[0] 451 * which maps 0xfffff000-0xffffffff one-to-one. We set up a 452 * second mapping that maps addr 1:1 for 64M, and then we jump to 453 * addr 454 */ 455 lis r10,(MAS0_TLBSEL(1)|MAS0_ESEL(0))@h 456 mtspr SPRN_MAS0,r10 457 lis r10,(MAS1_VALID|MAS1_IPROT)@h 458 ori r10,r10,(MAS1_TSIZE(BOOKE_PAGESZ_64M))@l 459 mtspr SPRN_MAS1,r10 460 /* WIMGE = 0b00000 for now */ 461 mtspr SPRN_MAS2,r12 462 ori r12,r12,(MAS3_SX|MAS3_SW|MAS3_SR) 463 mtspr SPRN_MAS3,r12 464#ifdef CONFIG_ENABLE_36BIT_PHYS 465 mtspr SPRN_MAS7,r11 466#endif 467 tlbwe 468 469/* Now we have another mapping for this page, so we jump to that 470 * mapping 471 */ 472 mtspr SPRN_SRR1,r13 473 rfi 474 475 476 .align 6 477 .globl __spin_table 478__spin_table: 479 .space CONFIG_MAX_CPUS*ENTRY_SIZE 480 481#ifdef CONFIG_PPC_SPINTABLE_COMPATIBLE 482 .align L1_CACHE_SHIFT 483 .global spin_table_compat 484spin_table_compat: 485 .long 1 486 487#endif 488 489__spin_table_end: 490 .space 4096 - (__spin_table_end - __spin_table) 491