1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 9 * 10 * The processor starts at 0xfffffffc and the code is first executed in the 11 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 12 * 13 */ 14 15#include <asm-offsets.h> 16#include <config.h> 17#include <mpc85xx.h> 18#include <version.h> 19 20#include <ppc_asm.tmpl> 21#include <ppc_defs.h> 22 23#include <asm/cache.h> 24#include <asm/mmu.h> 25 26#undef MSR_KERNEL 27#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 28 29#if defined(CONFIG_NAND_SPL) || \ 30 (defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL)) 31#define MINIMAL_SPL 32#endif 33 34#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \ 35 !defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE) 36#define NOR_BOOT 37#endif 38 39/* 40 * Set up GOT: Global Offset Table 41 * 42 * Use r12 to access the GOT 43 */ 44 START_GOT 45 GOT_ENTRY(_GOT2_TABLE_) 46 GOT_ENTRY(_FIXUP_TABLE_) 47 48#ifndef MINIMAL_SPL 49 GOT_ENTRY(_start) 50 GOT_ENTRY(_start_of_vectors) 51 GOT_ENTRY(_end_of_vectors) 52 GOT_ENTRY(transfer_to_handler) 53#endif 54 55 GOT_ENTRY(__init_end) 56 GOT_ENTRY(__bss_end) 57 GOT_ENTRY(__bss_start) 58 END_GOT 59 60/* 61 * e500 Startup -- after reset only the last 4KB of the effective 62 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 63 * section is located at THIS LAST page and basically does three 64 * things: clear some registers, set up exception tables and 65 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 66 * continue the boot procedure. 67 68 * Once the boot rom is mapped by TLB entries we can proceed 69 * with normal startup. 70 * 71 */ 72 73 .section .bootpg,"ax" 74 .globl _start_e500 75 76_start_e500: 77/* Enable debug exception */ 78 li r1,MSR_DE 79 mtmsr r1 80 81 /* 82 * If we got an ePAPR device tree pointer passed in as r3, we need that 83 * later in cpu_init_early_f(). Save it to a safe register before we 84 * clobber it so that we can fetch it from there later. 85 */ 86 mr r24, r3 87 88#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 89 mfspr r3,SPRN_SVR 90 rlwinm r3,r3,0,0xff 91 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 92 cmpw r3,r4 93 beq 1f 94 95#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 96 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 97 cmpw r3,r4 98 beq 1f 99#endif 100 101 /* Not a supported revision affected by erratum */ 102 li r27,0 103 b 2f 104 1051: li r27,1 /* Remember for later that we have the erratum */ 106 /* Erratum says set bits 55:60 to 001001 */ 107 msync 108 isync 109 mfspr r3,SPRN_HDBCR0 110 li r4,0x48 111 rlwimi r3,r4,0,0x1f8 112 mtspr SPRN_HDBCR0,r3 113 isync 1142: 115#endif 116#ifdef CONFIG_SYS_FSL_ERRATUM_A005125 117 msync 118 isync 119 mfspr r3, SPRN_HDBCR0 120 oris r3, r3, 0x0080 121 mtspr SPRN_HDBCR0, r3 122#endif 123 124 125#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 126 /* ISBC uses L2 as stack. 127 * Disable L2 cache here so that u-boot can enable it later 128 * as part of it's normal flow 129 */ 130 131 /* Check if L2 is enabled */ 132 mfspr r3, SPRN_L2CSR0 133 lis r2, L2CSR0_L2E@h 134 ori r2, r2, L2CSR0_L2E@l 135 and. r4, r3, r2 136 beq l2_disabled 137 138 mfspr r3, SPRN_L2CSR0 139 /* Flush L2 cache */ 140 lis r2,(L2CSR0_L2FL)@h 141 ori r2, r2, (L2CSR0_L2FL)@l 142 or r3, r2, r3 143 sync 144 isync 145 mtspr SPRN_L2CSR0,r3 146 isync 1471: 148 mfspr r3, SPRN_L2CSR0 149 and. r1, r3, r2 150 bne 1b 151 152 mfspr r3, SPRN_L2CSR0 153 lis r2, L2CSR0_L2E@h 154 ori r2, r2, L2CSR0_L2E@l 155 andc r4, r3, r2 156 sync 157 isync 158 mtspr SPRN_L2CSR0,r4 159 isync 160 161l2_disabled: 162#endif 163 164/* clear registers/arrays not reset by hardware */ 165 166 /* L1 */ 167 li r0,2 168 mtspr L1CSR0,r0 /* invalidate d-cache */ 169 mtspr L1CSR1,r0 /* invalidate i-cache */ 170 171 mfspr r1,DBSR 172 mtspr DBSR,r1 /* Clear all valid bits */ 173 174 175 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch 176 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 177 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 178 mtspr MAS0, \scratch 179 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h 180 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l 181 mtspr MAS1, \scratch 182 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 183 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 184 mtspr MAS2, \scratch 185 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 186 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 187 mtspr MAS3, \scratch 188 lis \scratch, \phy_high@h 189 ori \scratch, \scratch, \phy_high@l 190 mtspr MAS7, \scratch 191 isync 192 msync 193 tlbwe 194 isync 195 .endm 196 197 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch 198 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 199 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 200 mtspr MAS0, \scratch 201 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h 202 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l 203 mtspr MAS1, \scratch 204 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 205 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 206 mtspr MAS2, \scratch 207 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 208 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 209 mtspr MAS3, \scratch 210 lis \scratch, \phy_high@h 211 ori \scratch, \scratch, \phy_high@l 212 mtspr MAS7, \scratch 213 isync 214 msync 215 tlbwe 216 isync 217 .endm 218 219 .macro delete_tlb1_entry esel scratch 220 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 221 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 222 mtspr MAS0, \scratch 223 li \scratch, 0 224 mtspr MAS1, \scratch 225 isync 226 msync 227 tlbwe 228 isync 229 .endm 230 231 .macro delete_tlb0_entry esel epn wimg scratch 232 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 233 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 234 mtspr MAS0, \scratch 235 li \scratch, 0 236 mtspr MAS1, \scratch 237 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 238 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 239 mtspr MAS2, \scratch 240 isync 241 msync 242 tlbwe 243 isync 244 .endm 245 246/* Interrupt vectors do not fit in minimal SPL. */ 247#if !defined(MINIMAL_SPL) 248 /* Setup interrupt vectors */ 249 lis r1,CONFIG_SYS_MONITOR_BASE@h 250 mtspr IVPR,r1 251 252 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 253 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 254 255 addi r4,r3,CriticalInput - _start + _START_OFFSET 256 mtspr IVOR0,r4 /* 0: Critical input */ 257 addi r4,r3,MachineCheck - _start + _START_OFFSET 258 mtspr IVOR1,r4 /* 1: Machine check */ 259 addi r4,r3,DataStorage - _start + _START_OFFSET 260 mtspr IVOR2,r4 /* 2: Data storage */ 261 addi r4,r3,InstStorage - _start + _START_OFFSET 262 mtspr IVOR3,r4 /* 3: Instruction storage */ 263 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 264 mtspr IVOR4,r4 /* 4: External interrupt */ 265 addi r4,r3,Alignment - _start + _START_OFFSET 266 mtspr IVOR5,r4 /* 5: Alignment */ 267 addi r4,r3,ProgramCheck - _start + _START_OFFSET 268 mtspr IVOR6,r4 /* 6: Program check */ 269 addi r4,r3,FPUnavailable - _start + _START_OFFSET 270 mtspr IVOR7,r4 /* 7: floating point unavailable */ 271 addi r4,r3,SystemCall - _start + _START_OFFSET 272 mtspr IVOR8,r4 /* 8: System call */ 273 /* 9: Auxiliary processor unavailable(unsupported) */ 274 addi r4,r3,Decrementer - _start + _START_OFFSET 275 mtspr IVOR10,r4 /* 10: Decrementer */ 276 addi r4,r3,IntervalTimer - _start + _START_OFFSET 277 mtspr IVOR11,r4 /* 11: Interval timer */ 278 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 279 mtspr IVOR12,r4 /* 12: Watchdog timer */ 280 addi r4,r3,DataTLBError - _start + _START_OFFSET 281 mtspr IVOR13,r4 /* 13: Data TLB error */ 282 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 283 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 284 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 285 mtspr IVOR15,r4 /* 15: Debug */ 286#endif 287 288 /* Clear and set up some registers. */ 289 li r0,0x0000 290 lis r1,0xffff 291 mtspr DEC,r0 /* prevent dec exceptions */ 292 mttbl r0 /* prevent fit & wdt exceptions */ 293 mttbu r0 294 mtspr TSR,r1 /* clear all timer exception status */ 295 mtspr TCR,r0 /* disable all */ 296 mtspr ESR,r0 /* clear exception syndrome register */ 297 mtspr MCSR,r0 /* machine check syndrome register */ 298 mtxer r0 /* clear integer exception register */ 299 300#ifdef CONFIG_SYS_BOOK3E_HV 301 mtspr MAS8,r0 /* make sure MAS8 is clear */ 302#endif 303 304 /* Enable Time Base and Select Time Base Clock */ 305 lis r0,HID0_EMCP@h /* Enable machine check */ 306#if defined(CONFIG_ENABLE_36BIT_PHYS) 307 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 308#endif 309#ifndef CONFIG_E500MC 310 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 311#endif 312 mtspr HID0,r0 313 314#ifndef CONFIG_E500MC 315 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 316 mfspr r3,PVR 317 andi. r3,r3, 0xff 318 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 319 blt 1f 320 /* Set MBDD bit also */ 321 ori r0, r0, HID1_MBDD@l 3221: 323 mtspr HID1,r0 324#endif 325 326#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 327 mfspr r3,SPRN_HDBCR1 328 oris r3,r3,0x0100 329 mtspr SPRN_HDBCR1,r3 330#endif 331 332 /* Enable Branch Prediction */ 333#if defined(CONFIG_BTB) 334 lis r0,BUCSR_ENABLE@h 335 ori r0,r0,BUCSR_ENABLE@l 336 mtspr SPRN_BUCSR,r0 337#endif 338 339#if defined(CONFIG_SYS_INIT_DBCR) 340 lis r1,0xffff 341 ori r1,r1,0xffff 342 mtspr DBSR,r1 /* Clear all status bits */ 343 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 344 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 345 mtspr DBCR0,r0 346#endif 347 348#ifdef CONFIG_MPC8569 349#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 350#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 351 352 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 353 * use address space which is more than 12bits, and it must be done in 354 * the 4K boot page. So we set this bit here. 355 */ 356 357 /* create a temp mapping TLB0[0] for LBCR */ 358 create_tlb0_entry 0, \ 359 0, BOOKE_PAGESZ_4K, \ 360 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \ 361 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \ 362 0, r6 363 364 /* Set LBCR register */ 365 lis r4,CONFIG_SYS_LBCR_ADDR@h 366 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 367 368 lis r5,CONFIG_SYS_LBC_LBCR@h 369 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 370 stw r5,0(r4) 371 isync 372 373 /* invalidate this temp TLB */ 374 lis r4,CONFIG_SYS_LBC_ADDR@h 375 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 376 tlbivax 0,r4 377 isync 378 379#endif /* CONFIG_MPC8569 */ 380 381/* 382 * Search for the TLB that covers the code we're executing, and shrink it 383 * so that it covers only this 4K page. That will ensure that any other 384 * TLB we create won't interfere with it. We assume that the TLB exists, 385 * which is why we don't check the Valid bit of MAS1. We also assume 386 * it is in TLB1. 387 * 388 * This is necessary, for example, when booting from the on-chip ROM, 389 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 390 */ 391 bl nexti /* Find our address */ 392nexti: mflr r1 /* R1 = our PC */ 393 li r2, 0 394 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 395 isync 396 msync 397 tlbsx 0, r1 /* This must succeed */ 398 399 mfspr r14, MAS0 /* Save ESEL for later */ 400 rlwinm r14, r14, 16, 0xfff 401 402 /* Set the size of the TLB to 4KB */ 403 mfspr r3, MAS1 404 li r2, 0xF80 405 andc r3, r3, r2 /* Clear the TSIZE bits */ 406 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 407 oris r3, r3, MAS1_IPROT@h 408 mtspr MAS1, r3 409 410 /* 411 * Set the base address of the TLB to our PC. We assume that 412 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 413 */ 414 lis r3, MAS2_EPN@h 415 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 416 417 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 418 419 mfspr r2, MAS2 420 andc r2, r2, r3 421 or r2, r2, r1 422#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 423 cmpwi r27,0 424 beq 1f 425 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 426 rlwinm r2, r2, 0, ~MAS2_I 427 ori r2, r2, MAS2_G 4281: 429#endif 430 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 431 432 mfspr r2, MAS3 433 andc r2, r2, r3 434 or r2, r2, r1 435 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 436 437 isync 438 msync 439 tlbwe 440 441/* 442 * Clear out any other TLB entries that may exist, to avoid conflicts. 443 * Our TLB entry is in r14. 444 */ 445 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 446 tlbivax 0, r0 447 tlbsync 448 449 mfspr r4, SPRN_TLB1CFG 450 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 451 452 li r3, 0 453 mtspr MAS1, r3 4541: cmpw r3, r14 455 rlwinm r5, r3, 16, MAS0_ESEL_MSK 456 addi r3, r3, 1 457 beq 2f /* skip the entry we're executing from */ 458 459 oris r5, r5, MAS0_TLBSEL(1)@h 460 mtspr MAS0, r5 461 462 isync 463 tlbwe 464 isync 465 msync 466 4672: cmpw r3, r4 468 blt 1b 469 470#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) 471/* 472 * TLB entry for debuggging in AS1 473 * Create temporary TLB entry in AS0 to handle debug exception 474 * As on debug exception MSR is cleared i.e. Address space is changed 475 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 476 * in AS1. 477 */ 478 479#ifdef NOR_BOOT 480/* 481 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 482 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 483 * and this window is outside of 4K boot window. 484 */ 485 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 486 0, BOOKE_PAGESZ_4M, \ 487 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 488 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 489 0, r6 490 491#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 492 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 493 0, BOOKE_PAGESZ_1M, \ 494 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 495 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 496 0, r6 497#else 498/* 499 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 500 * because "nexti" will resize TLB to 4K 501 */ 502 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 503 0, BOOKE_PAGESZ_256K, \ 504 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \ 505 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \ 506 0, r6 507#endif 508#endif 509 510/* 511 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 512 * location is not where we want it. This typically happens on a 36-bit 513 * system, where we want to move CCSR to near the top of 36-bit address space. 514 * 515 * To move CCSR, we create two temporary TLBs, one for the old location, and 516 * another for the new location. On CoreNet systems, we also need to create 517 * a special, temporary LAW. 518 * 519 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 520 * long-term TLBs, so we use TLB0 here. 521 */ 522#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 523 524#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 525#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 526#endif 527 528create_ccsr_new_tlb: 529 /* 530 * Create a TLB for the new location of CCSR. Register R8 is reserved 531 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 532 */ 533 lis r8, CONFIG_SYS_CCSRBAR@h 534 ori r8, r8, CONFIG_SYS_CCSRBAR@l 535 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 536 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 537 create_tlb0_entry 0, \ 538 0, BOOKE_PAGESZ_4K, \ 539 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \ 540 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \ 541 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 542 /* 543 * Create a TLB for the current location of CCSR. Register R9 is reserved 544 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 545 */ 546create_ccsr_old_tlb: 547 create_tlb0_entry 1, \ 548 0, BOOKE_PAGESZ_4K, \ 549 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \ 550 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \ 551 0, r3 /* The default CCSR address is always a 32-bit number */ 552 553 554 /* 555 * We have a TLB for what we think is the current (old) CCSR. Let's 556 * verify that, otherwise we won't be able to move it. 557 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 558 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 559 */ 560verify_old_ccsr: 561 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 562 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 563#ifdef CONFIG_FSL_CORENET 564 lwz r1, 4(r9) /* CCSRBARL */ 565#else 566 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 567 slwi r1, r1, 12 568#endif 569 570 cmpl 0, r0, r1 571 572 /* 573 * If the value we read from CCSRBARL is not what we expect, then 574 * enter an infinite loop. This will at least allow a debugger to 575 * halt execution and examine TLBs, etc. There's no point in going 576 * on. 577 */ 578infinite_debug_loop: 579 bne infinite_debug_loop 580 581#ifdef CONFIG_FSL_CORENET 582 583#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 584#define LAW_EN 0x80000000 585#define LAW_SIZE_4K 0xb 586#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 587#define CCSRAR_C 0x80000000 /* Commit */ 588 589create_temp_law: 590 /* 591 * On CoreNet systems, we create the temporary LAW using a special LAW 592 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 593 */ 594 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 595 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 596 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 597 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 598 lis r2, CCSRBAR_LAWAR@h 599 ori r2, r2, CCSRBAR_LAWAR@l 600 601 stw r0, 0xc00(r9) /* LAWBARH0 */ 602 stw r1, 0xc04(r9) /* LAWBARL0 */ 603 sync 604 stw r2, 0xc08(r9) /* LAWAR0 */ 605 606 /* 607 * Read back from LAWAR to ensure the update is complete. e500mc 608 * cores also require an isync. 609 */ 610 lwz r0, 0xc08(r9) /* LAWAR0 */ 611 isync 612 613 /* 614 * Read the current CCSRBARH and CCSRBARL using load word instructions. 615 * Follow this with an isync instruction. This forces any outstanding 616 * accesses to configuration space to completion. 617 */ 618read_old_ccsrbar: 619 lwz r0, 0(r9) /* CCSRBARH */ 620 lwz r0, 4(r9) /* CCSRBARL */ 621 isync 622 623 /* 624 * Write the new values for CCSRBARH and CCSRBARL to their old 625 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 626 * has a new value written it loads a CCSRBARH shadow register. When 627 * the CCSRBARL is written, the CCSRBARH shadow register contents 628 * along with the CCSRBARL value are loaded into the CCSRBARH and 629 * CCSRBARL registers, respectively. Follow this with a sync 630 * instruction. 631 */ 632write_new_ccsrbar: 633 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 634 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 635 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 636 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 637 lis r2, CCSRAR_C@h 638 ori r2, r2, CCSRAR_C@l 639 640 stw r0, 0(r9) /* Write to CCSRBARH */ 641 sync /* Make sure we write to CCSRBARH first */ 642 stw r1, 4(r9) /* Write to CCSRBARL */ 643 sync 644 645 /* 646 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 647 * Follow this with a sync instruction. 648 */ 649 stw r2, 8(r9) 650 sync 651 652 /* Delete the temporary LAW */ 653delete_temp_law: 654 li r1, 0 655 stw r1, 0xc08(r8) 656 sync 657 stw r1, 0xc00(r8) 658 stw r1, 0xc04(r8) 659 sync 660 661#else /* #ifdef CONFIG_FSL_CORENET */ 662 663write_new_ccsrbar: 664 /* 665 * Read the current value of CCSRBAR using a load word instruction 666 * followed by an isync. This forces all accesses to configuration 667 * space to complete. 668 */ 669 sync 670 lwz r0, 0(r9) 671 isync 672 673/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 674#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 675 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 676 677 /* Write the new value to CCSRBAR. */ 678 lis r0, CCSRBAR_PHYS_RS12@h 679 ori r0, r0, CCSRBAR_PHYS_RS12@l 680 stw r0, 0(r9) 681 sync 682 683 /* 684 * The manual says to perform a load of an address that does not 685 * access configuration space or the on-chip SRAM using an existing TLB, 686 * but that doesn't appear to be necessary. We will do the isync, 687 * though. 688 */ 689 isync 690 691 /* 692 * Read the contents of CCSRBAR from its new location, followed by 693 * another isync. 694 */ 695 lwz r0, 0(r8) 696 isync 697 698#endif /* #ifdef CONFIG_FSL_CORENET */ 699 700 /* Delete the temporary TLBs */ 701delete_temp_tlbs: 702 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3 703 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3 704 705#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 706 707#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500) 708create_ccsr_l2_tlb: 709 /* 710 * Create a TLB for the MMR location of CCSR 711 * to access L2CSR0 register 712 */ 713 create_tlb0_entry 0, \ 714 0, BOOKE_PAGESZ_4K, \ 715 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \ 716 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \ 717 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 718 719enable_l2_cluster_l2: 720 /* enable L2 cache */ 721 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h 722 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l 723 li r4, 33 /* stash id */ 724 stw r4, 4(r3) 725 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h 726 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l 727 sync 728 stw r4, 0(r3) /* invalidate L2 */ 7291: sync 730 lwz r0, 0(r3) 731 twi 0, r0, 0 732 isync 733 and. r1, r0, r4 734 bne 1b 735 lis r4, (L2CSR0_L2E|L2CSR0_L2PE)@h 736 ori r4, r4, (L2CSR0_L2REP_MODE)@l 737 sync 738 stw r4, 0(r3) /* enable L2 */ 739delete_ccsr_l2_tlb: 740 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3 741#endif 742 743 /* 744 * Enable the L1. On e6500, this has to be done 745 * after the L2 is up. 746 */ 747 748#ifdef CONFIG_SYS_CACHE_STASHING 749 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 750 li r2,(32 + 0) 751 mtspr L1CSR2,r2 752#endif 753 754 /* Enable/invalidate the I-Cache */ 755 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 756 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 757 mtspr SPRN_L1CSR1,r2 7581: 759 mfspr r3,SPRN_L1CSR1 760 and. r1,r3,r2 761 bne 1b 762 763 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 764 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 765 mtspr SPRN_L1CSR1,r3 766 isync 7672: 768 mfspr r3,SPRN_L1CSR1 769 andi. r1,r3,L1CSR1_ICE@l 770 beq 2b 771 772 /* Enable/invalidate the D-Cache */ 773 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 774 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 775 mtspr SPRN_L1CSR0,r2 7761: 777 mfspr r3,SPRN_L1CSR0 778 and. r1,r3,r2 779 bne 1b 780 781 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 782 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 783 mtspr SPRN_L1CSR0,r3 784 isync 7852: 786 mfspr r3,SPRN_L1CSR0 787 andi. r1,r3,L1CSR0_DCE@l 788 beq 2b 789#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 790#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 791#define LAW_SIZE_1M 0x13 792#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 793 794 cmpwi r27,0 795 beq 9f 796 797 /* 798 * Create a TLB entry for CCSR 799 * 800 * We're executing out of TLB1 entry in r14, and that's the only 801 * TLB entry that exists. To allocate some TLB entries for our 802 * own use, flip a bit high enough that we won't flip it again 803 * via incrementing. 804 */ 805 806 xori r8, r14, 32 807 lis r0, MAS0_TLBSEL(1)@h 808 rlwimi r0, r8, 16, MAS0_ESEL_MSK 809 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 810 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 811 lis r7, CONFIG_SYS_CCSRBAR@h 812 ori r7, r7, CONFIG_SYS_CCSRBAR@l 813 ori r2, r7, MAS2_I|MAS2_G 814 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 815 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 816 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 817 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 818 mtspr MAS0, r0 819 mtspr MAS1, r1 820 mtspr MAS2, r2 821 mtspr MAS3, r3 822 mtspr MAS7, r4 823 isync 824 tlbwe 825 isync 826 msync 827 828 /* Map DCSR temporarily to physical address zero */ 829 li r0, 0 830 lis r3, DCSRBAR_LAWAR@h 831 ori r3, r3, DCSRBAR_LAWAR@l 832 833 stw r0, 0xc00(r7) /* LAWBARH0 */ 834 stw r0, 0xc04(r7) /* LAWBARL0 */ 835 sync 836 stw r3, 0xc08(r7) /* LAWAR0 */ 837 838 /* Read back from LAWAR to ensure the update is complete. */ 839 lwz r3, 0xc08(r7) /* LAWAR0 */ 840 isync 841 842 /* Create a TLB entry for DCSR at zero */ 843 844 addi r9, r8, 1 845 lis r0, MAS0_TLBSEL(1)@h 846 rlwimi r0, r9, 16, MAS0_ESEL_MSK 847 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 848 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 849 li r6, 0 /* DCSR effective address */ 850 ori r2, r6, MAS2_I|MAS2_G 851 li r3, MAS3_SW|MAS3_SR 852 li r4, 0 853 mtspr MAS0, r0 854 mtspr MAS1, r1 855 mtspr MAS2, r2 856 mtspr MAS3, r3 857 mtspr MAS7, r4 858 isync 859 tlbwe 860 isync 861 msync 862 863 /* enable the timebase */ 864#define CTBENR 0xe2084 865 li r3, 1 866 addis r4, r7, CTBENR@ha 867 stw r3, CTBENR@l(r4) 868 lwz r3, CTBENR@l(r4) 869 twi 0,r3,0 870 isync 871 872 .macro erratum_set_ccsr offset value 873 addis r3, r7, \offset@ha 874 lis r4, \value@h 875 addi r3, r3, \offset@l 876 ori r4, r4, \value@l 877 bl erratum_set_value 878 .endm 879 880 .macro erratum_set_dcsr offset value 881 addis r3, r6, \offset@ha 882 lis r4, \value@h 883 addi r3, r3, \offset@l 884 ori r4, r4, \value@l 885 bl erratum_set_value 886 .endm 887 888 erratum_set_dcsr 0xb0e08 0xe0201800 889 erratum_set_dcsr 0xb0e18 0xe0201800 890 erratum_set_dcsr 0xb0e38 0xe0400000 891 erratum_set_dcsr 0xb0008 0x00900000 892 erratum_set_dcsr 0xb0e40 0xe00a0000 893 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 894#ifdef CONFIG_RAMBOOT_PBL 895 erratum_set_ccsr 0x10f00 0x495e5000 896#else 897 erratum_set_ccsr 0x10f00 0x415e5000 898#endif 899 erratum_set_ccsr 0x11f00 0x415e5000 900 901 /* Make temp mapping uncacheable again, if it was initially */ 902 bl 2f 9032: mflr r3 904 tlbsx 0, r3 905 mfspr r4, MAS2 906 rlwimi r4, r15, 0, MAS2_I 907 rlwimi r4, r15, 0, MAS2_G 908 mtspr MAS2, r4 909 isync 910 tlbwe 911 isync 912 msync 913 914 /* Clear the cache */ 915 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 916 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 917 sync 918 isync 919 mtspr SPRN_L1CSR1,r3 920 isync 9212: sync 922 mfspr r4,SPRN_L1CSR1 923 and. r4,r4,r3 924 bne 2b 925 926 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 927 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 928 sync 929 isync 930 mtspr SPRN_L1CSR1,r3 931 isync 9322: sync 933 mfspr r4,SPRN_L1CSR1 934 and. r4,r4,r3 935 beq 2b 936 937 /* Remove temporary mappings */ 938 lis r0, MAS0_TLBSEL(1)@h 939 rlwimi r0, r9, 16, MAS0_ESEL_MSK 940 li r3, 0 941 mtspr MAS0, r0 942 mtspr MAS1, r3 943 isync 944 tlbwe 945 isync 946 msync 947 948 li r3, 0 949 stw r3, 0xc08(r7) /* LAWAR0 */ 950 lwz r3, 0xc08(r7) 951 isync 952 953 lis r0, MAS0_TLBSEL(1)@h 954 rlwimi r0, r8, 16, MAS0_ESEL_MSK 955 li r3, 0 956 mtspr MAS0, r0 957 mtspr MAS1, r3 958 isync 959 tlbwe 960 isync 961 msync 962 963 b 9f 964 965 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 966erratum_set_value: 967 /* Lock two cache lines into I-Cache */ 968 sync 969 mfspr r11, SPRN_L1CSR1 970 rlwinm r11, r11, 0, ~L1CSR1_ICUL 971 sync 972 isync 973 mtspr SPRN_L1CSR1, r11 974 isync 975 976 mflr r12 977 bl 5f 9785: mflr r5 979 addi r5, r5, 2f - 5b 980 icbtls 0, 0, r5 981 addi r5, r5, 64 982 983 sync 984 mfspr r11, SPRN_L1CSR1 9853: andi. r11, r11, L1CSR1_ICUL 986 bne 3b 987 988 icbtls 0, 0, r5 989 addi r5, r5, 64 990 991 sync 992 mfspr r11, SPRN_L1CSR1 9933: andi. r11, r11, L1CSR1_ICUL 994 bne 3b 995 996 b 2f 997 .align 6 998 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9992: sync 1000 1001 mfspr r5, SPRN_TBRL 1002 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10034: mfspr r5, SPRN_TBRL 1004 subf. r5, r5, r11 1005 bgt 4b 1006 1007 stw r4, 0(r3) 1008 1009 mfspr r5, SPRN_TBRL 1010 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10114: mfspr r5, SPRN_TBRL 1012 subf. r5, r5, r11 1013 bgt 4b 1014 1015 sync 1016 1017 /* 1018 * Fill out the rest of this cache line and the next with nops, 1019 * to ensure that nothing outside the locked area will be 1020 * fetched due to a branch. 1021 */ 1022 .rept 19 1023 nop 1024 .endr 1025 1026 sync 1027 mfspr r11, SPRN_L1CSR1 1028 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1029 sync 1030 isync 1031 mtspr SPRN_L1CSR1, r11 1032 isync 1033 1034 mtlr r12 1035 blr 1036 10379: 1038#endif 1039 1040create_init_ram_area: 1041 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1042 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1043 1044#ifdef NOR_BOOT 1045 /* create a temp mapping in AS=1 to the 4M boot window */ 1046 create_tlb1_entry 15, \ 1047 1, BOOKE_PAGESZ_4M, \ 1048 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 1049 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1050 0, r6 1051 1052#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1053 /* create a temp mapping in AS = 1 for Flash mapping 1054 * created by PBL for ISBC code 1055 */ 1056 create_tlb1_entry 15, \ 1057 1, BOOKE_PAGESZ_1M, \ 1058 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1059 CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1060 0, r6 1061#else 1062 /* 1063 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1064 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1065 */ 1066 create_tlb1_entry 15, \ 1067 1, BOOKE_PAGESZ_1M, \ 1068 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1069 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1070 0, r6 1071#endif 1072 1073 /* create a temp mapping in AS=1 to the stack */ 1074#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1075 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1076 create_tlb1_entry 14, \ 1077 1, BOOKE_PAGESZ_16K, \ 1078 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1079 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1080 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6 1081 1082#else 1083 create_tlb1_entry 14, \ 1084 1, BOOKE_PAGESZ_16K, \ 1085 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1086 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \ 1087 0, r6 1088#endif 1089 1090 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1091 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1092 lis r7,switch_as@h 1093 ori r7,r7,switch_as@l 1094 1095 mtspr SPRN_SRR0,r7 1096 mtspr SPRN_SRR1,r6 1097 rfi 1098 1099switch_as: 1100/* L1 DCache is used for initial RAM */ 1101 1102 /* Allocate Initial RAM in data cache. 1103 */ 1104 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1105 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1106 mfspr r2, L1CFG0 1107 andi. r2, r2, 0x1ff 1108 /* cache size * 1024 / (2 * L1 line size) */ 1109 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1110 mtctr r2 1111 li r0,0 11121: 1113 dcbz r0,r3 1114 dcbtls 0,r0,r3 1115 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1116 bdnz 1b 1117 1118 /* Jump out the last 4K page and continue to 'normal' start */ 1119#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL) 1120 /* We assume that we're already running at the address we're linked at */ 1121 b _start_cont 1122#else 1123 /* Calculate absolute address in FLASH and jump there */ 1124 /*--------------------------------------------------------------*/ 1125 lis r3,CONFIG_SYS_MONITOR_BASE@h 1126 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1127 addi r3,r3,_start_cont - _start + _START_OFFSET 1128 mtlr r3 1129 blr 1130#endif 1131 1132 .text 1133 .globl _start 1134_start: 1135 .long 0x27051956 /* U-BOOT Magic Number */ 1136 .globl version_string 1137version_string: 1138 .ascii U_BOOT_VERSION_STRING, "\0" 1139 1140 .align 4 1141 .globl _start_cont 1142_start_cont: 1143 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1144 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1145 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1146 li r0,0 1147 stw r0,0(r3) /* Terminate Back Chain */ 1148 stw r0,+4(r3) /* NULL return address. */ 1149 mr r1,r3 /* Transfer to SP(r1) */ 1150 1151 GET_GOT 1152 1153 /* Pass our potential ePAPR device tree pointer to cpu_init_early_f */ 1154 mr r3, r24 1155 1156 bl cpu_init_early_f 1157 1158 /* switch back to AS = 0 */ 1159 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1160 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1161 mtmsr r3 1162 isync 1163 1164 bl cpu_init_f 1165 bl board_init_f 1166 isync 1167 1168 /* NOTREACHED - board_init_f() does not return */ 1169 1170#ifndef MINIMAL_SPL 1171 . = EXC_OFF_SYS_RESET 1172 .globl _start_of_vectors 1173_start_of_vectors: 1174 1175/* Critical input. */ 1176 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1177 1178/* Machine check */ 1179 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1180 1181/* Data Storage exception. */ 1182 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1183 1184/* Instruction Storage exception. */ 1185 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1186 1187/* External Interrupt exception. */ 1188 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1189 1190/* Alignment exception. */ 1191 . = 0x0600 1192Alignment: 1193 EXCEPTION_PROLOG(SRR0, SRR1) 1194 mfspr r4,DAR 1195 stw r4,_DAR(r21) 1196 mfspr r5,DSISR 1197 stw r5,_DSISR(r21) 1198 addi r3,r1,STACK_FRAME_OVERHEAD 1199 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1200 1201/* Program check exception */ 1202 . = 0x0700 1203ProgramCheck: 1204 EXCEPTION_PROLOG(SRR0, SRR1) 1205 addi r3,r1,STACK_FRAME_OVERHEAD 1206 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1207 MSR_KERNEL, COPY_EE) 1208 1209 /* No FPU on MPC85xx. This exception is not supposed to happen. 1210 */ 1211 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1212 1213 . = 0x0900 1214/* 1215 * r0 - SYSCALL number 1216 * r3-... arguments 1217 */ 1218SystemCall: 1219 addis r11,r0,0 /* get functions table addr */ 1220 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1221 addis r12,r0,0 /* get number of functions */ 1222 ori r12,r12,0 1223 1224 cmplw 0,r0,r12 1225 bge 1f 1226 1227 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1228 add r11,r11,r0 1229 lwz r11,0(r11) 1230 1231 li r20,0xd00-4 /* Get stack pointer */ 1232 lwz r12,0(r20) 1233 subi r12,r12,12 /* Adjust stack pointer */ 1234 li r0,0xc00+_end_back-SystemCall 1235 cmplw 0,r0,r12 /* Check stack overflow */ 1236 bgt 1f 1237 stw r12,0(r20) 1238 1239 mflr r0 1240 stw r0,0(r12) 1241 mfspr r0,SRR0 1242 stw r0,4(r12) 1243 mfspr r0,SRR1 1244 stw r0,8(r12) 1245 1246 li r12,0xc00+_back-SystemCall 1247 mtlr r12 1248 mtspr SRR0,r11 1249 12501: SYNC 1251 rfi 1252_back: 1253 1254 mfmsr r11 /* Disable interrupts */ 1255 li r12,0 1256 ori r12,r12,MSR_EE 1257 andc r11,r11,r12 1258 SYNC /* Some chip revs need this... */ 1259 mtmsr r11 1260 SYNC 1261 1262 li r12,0xd00-4 /* restore regs */ 1263 lwz r12,0(r12) 1264 1265 lwz r11,0(r12) 1266 mtlr r11 1267 lwz r11,4(r12) 1268 mtspr SRR0,r11 1269 lwz r11,8(r12) 1270 mtspr SRR1,r11 1271 1272 addi r12,r12,12 /* Adjust stack pointer */ 1273 li r20,0xd00-4 1274 stw r12,0(r20) 1275 1276 SYNC 1277 rfi 1278_end_back: 1279 1280 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1281 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1282 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1283 1284 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1285 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1286 1287 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1288 1289 .globl _end_of_vectors 1290_end_of_vectors: 1291 1292 1293 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1294 1295/* 1296 * This code finishes saving the registers to the exception frame 1297 * and jumps to the appropriate handler for the exception. 1298 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1299 */ 1300 .globl transfer_to_handler 1301transfer_to_handler: 1302 stw r22,_NIP(r21) 1303 lis r22,MSR_POW@h 1304 andc r23,r23,r22 1305 stw r23,_MSR(r21) 1306 SAVE_GPR(7, r21) 1307 SAVE_4GPRS(8, r21) 1308 SAVE_8GPRS(12, r21) 1309 SAVE_8GPRS(24, r21) 1310 1311 mflr r23 1312 andi. r24,r23,0x3f00 /* get vector offset */ 1313 stw r24,TRAP(r21) 1314 li r22,0 1315 stw r22,RESULT(r21) 1316 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1317 1318 lwz r24,0(r23) /* virtual address of handler */ 1319 lwz r23,4(r23) /* where to go when done */ 1320 mtspr SRR0,r24 1321 mtspr SRR1,r20 1322 mtlr r23 1323 SYNC 1324 rfi /* jump to handler, enable MMU */ 1325 1326int_return: 1327 mfmsr r28 /* Disable interrupts */ 1328 li r4,0 1329 ori r4,r4,MSR_EE 1330 andc r28,r28,r4 1331 SYNC /* Some chip revs need this... */ 1332 mtmsr r28 1333 SYNC 1334 lwz r2,_CTR(r1) 1335 lwz r0,_LINK(r1) 1336 mtctr r2 1337 mtlr r0 1338 lwz r2,_XER(r1) 1339 lwz r0,_CCR(r1) 1340 mtspr XER,r2 1341 mtcrf 0xFF,r0 1342 REST_10GPRS(3, r1) 1343 REST_10GPRS(13, r1) 1344 REST_8GPRS(23, r1) 1345 REST_GPR(31, r1) 1346 lwz r2,_NIP(r1) /* Restore environment */ 1347 lwz r0,_MSR(r1) 1348 mtspr SRR0,r2 1349 mtspr SRR1,r0 1350 lwz r0,GPR0(r1) 1351 lwz r2,GPR2(r1) 1352 lwz r1,GPR1(r1) 1353 SYNC 1354 rfi 1355 1356crit_return: 1357 mfmsr r28 /* Disable interrupts */ 1358 li r4,0 1359 ori r4,r4,MSR_EE 1360 andc r28,r28,r4 1361 SYNC /* Some chip revs need this... */ 1362 mtmsr r28 1363 SYNC 1364 lwz r2,_CTR(r1) 1365 lwz r0,_LINK(r1) 1366 mtctr r2 1367 mtlr r0 1368 lwz r2,_XER(r1) 1369 lwz r0,_CCR(r1) 1370 mtspr XER,r2 1371 mtcrf 0xFF,r0 1372 REST_10GPRS(3, r1) 1373 REST_10GPRS(13, r1) 1374 REST_8GPRS(23, r1) 1375 REST_GPR(31, r1) 1376 lwz r2,_NIP(r1) /* Restore environment */ 1377 lwz r0,_MSR(r1) 1378 mtspr SPRN_CSRR0,r2 1379 mtspr SPRN_CSRR1,r0 1380 lwz r0,GPR0(r1) 1381 lwz r2,GPR2(r1) 1382 lwz r1,GPR1(r1) 1383 SYNC 1384 rfci 1385 1386mck_return: 1387 mfmsr r28 /* Disable interrupts */ 1388 li r4,0 1389 ori r4,r4,MSR_EE 1390 andc r28,r28,r4 1391 SYNC /* Some chip revs need this... */ 1392 mtmsr r28 1393 SYNC 1394 lwz r2,_CTR(r1) 1395 lwz r0,_LINK(r1) 1396 mtctr r2 1397 mtlr r0 1398 lwz r2,_XER(r1) 1399 lwz r0,_CCR(r1) 1400 mtspr XER,r2 1401 mtcrf 0xFF,r0 1402 REST_10GPRS(3, r1) 1403 REST_10GPRS(13, r1) 1404 REST_8GPRS(23, r1) 1405 REST_GPR(31, r1) 1406 lwz r2,_NIP(r1) /* Restore environment */ 1407 lwz r0,_MSR(r1) 1408 mtspr SPRN_MCSRR0,r2 1409 mtspr SPRN_MCSRR1,r0 1410 lwz r0,GPR0(r1) 1411 lwz r2,GPR2(r1) 1412 lwz r1,GPR1(r1) 1413 SYNC 1414 rfmci 1415 1416/* Cache functions. 1417*/ 1418.globl flush_icache 1419flush_icache: 1420.globl invalidate_icache 1421invalidate_icache: 1422 mfspr r0,L1CSR1 1423 ori r0,r0,L1CSR1_ICFI 1424 msync 1425 isync 1426 mtspr L1CSR1,r0 1427 isync 1428 blr /* entire I cache */ 1429 1430.globl invalidate_dcache 1431invalidate_dcache: 1432 mfspr r0,L1CSR0 1433 ori r0,r0,L1CSR0_DCFI 1434 msync 1435 isync 1436 mtspr L1CSR0,r0 1437 isync 1438 blr 1439 1440 .globl icache_enable 1441icache_enable: 1442 mflr r8 1443 bl invalidate_icache 1444 mtlr r8 1445 isync 1446 mfspr r4,L1CSR1 1447 ori r4,r4,0x0001 1448 oris r4,r4,0x0001 1449 mtspr L1CSR1,r4 1450 isync 1451 blr 1452 1453 .globl icache_disable 1454icache_disable: 1455 mfspr r0,L1CSR1 1456 lis r3,0 1457 ori r3,r3,L1CSR1_ICE 1458 andc r0,r0,r3 1459 mtspr L1CSR1,r0 1460 isync 1461 blr 1462 1463 .globl icache_status 1464icache_status: 1465 mfspr r3,L1CSR1 1466 andi. r3,r3,L1CSR1_ICE 1467 blr 1468 1469 .globl dcache_enable 1470dcache_enable: 1471 mflr r8 1472 bl invalidate_dcache 1473 mtlr r8 1474 isync 1475 mfspr r0,L1CSR0 1476 ori r0,r0,0x0001 1477 oris r0,r0,0x0001 1478 msync 1479 isync 1480 mtspr L1CSR0,r0 1481 isync 1482 blr 1483 1484 .globl dcache_disable 1485dcache_disable: 1486 mfspr r3,L1CSR0 1487 lis r4,0 1488 ori r4,r4,L1CSR0_DCE 1489 andc r3,r3,r4 1490 mtspr L1CSR0,r3 1491 isync 1492 blr 1493 1494 .globl dcache_status 1495dcache_status: 1496 mfspr r3,L1CSR0 1497 andi. r3,r3,L1CSR0_DCE 1498 blr 1499 1500 .globl get_pir 1501get_pir: 1502 mfspr r3,PIR 1503 blr 1504 1505 .globl get_pvr 1506get_pvr: 1507 mfspr r3,PVR 1508 blr 1509 1510 .globl get_svr 1511get_svr: 1512 mfspr r3,SVR 1513 blr 1514 1515 .globl wr_tcr 1516wr_tcr: 1517 mtspr TCR,r3 1518 blr 1519 1520/*------------------------------------------------------------------------------- */ 1521/* Function: in8 */ 1522/* Description: Input 8 bits */ 1523/*------------------------------------------------------------------------------- */ 1524 .globl in8 1525in8: 1526 lbz r3,0x0000(r3) 1527 blr 1528 1529/*------------------------------------------------------------------------------- */ 1530/* Function: out8 */ 1531/* Description: Output 8 bits */ 1532/*------------------------------------------------------------------------------- */ 1533 .globl out8 1534out8: 1535 stb r4,0x0000(r3) 1536 sync 1537 blr 1538 1539/*------------------------------------------------------------------------------- */ 1540/* Function: out16 */ 1541/* Description: Output 16 bits */ 1542/*------------------------------------------------------------------------------- */ 1543 .globl out16 1544out16: 1545 sth r4,0x0000(r3) 1546 sync 1547 blr 1548 1549/*------------------------------------------------------------------------------- */ 1550/* Function: out16r */ 1551/* Description: Byte reverse and output 16 bits */ 1552/*------------------------------------------------------------------------------- */ 1553 .globl out16r 1554out16r: 1555 sthbrx r4,r0,r3 1556 sync 1557 blr 1558 1559/*------------------------------------------------------------------------------- */ 1560/* Function: out32 */ 1561/* Description: Output 32 bits */ 1562/*------------------------------------------------------------------------------- */ 1563 .globl out32 1564out32: 1565 stw r4,0x0000(r3) 1566 sync 1567 blr 1568 1569/*------------------------------------------------------------------------------- */ 1570/* Function: out32r */ 1571/* Description: Byte reverse and output 32 bits */ 1572/*------------------------------------------------------------------------------- */ 1573 .globl out32r 1574out32r: 1575 stwbrx r4,r0,r3 1576 sync 1577 blr 1578 1579/*------------------------------------------------------------------------------- */ 1580/* Function: in16 */ 1581/* Description: Input 16 bits */ 1582/*------------------------------------------------------------------------------- */ 1583 .globl in16 1584in16: 1585 lhz r3,0x0000(r3) 1586 blr 1587 1588/*------------------------------------------------------------------------------- */ 1589/* Function: in16r */ 1590/* Description: Input 16 bits and byte reverse */ 1591/*------------------------------------------------------------------------------- */ 1592 .globl in16r 1593in16r: 1594 lhbrx r3,r0,r3 1595 blr 1596 1597/*------------------------------------------------------------------------------- */ 1598/* Function: in32 */ 1599/* Description: Input 32 bits */ 1600/*------------------------------------------------------------------------------- */ 1601 .globl in32 1602in32: 1603 lwz 3,0x0000(3) 1604 blr 1605 1606/*------------------------------------------------------------------------------- */ 1607/* Function: in32r */ 1608/* Description: Input 32 bits and byte reverse */ 1609/*------------------------------------------------------------------------------- */ 1610 .globl in32r 1611in32r: 1612 lwbrx r3,r0,r3 1613 blr 1614#endif /* !MINIMAL_SPL */ 1615 1616/*------------------------------------------------------------------------------*/ 1617 1618/* 1619 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1620 */ 1621 .globl write_tlb 1622write_tlb: 1623 mtspr MAS0,r3 1624 mtspr MAS1,r4 1625 mtspr MAS2,r5 1626 mtspr MAS3,r6 1627#ifdef CONFIG_ENABLE_36BIT_PHYS 1628 mtspr MAS7,r7 1629#endif 1630 li r3,0 1631#ifdef CONFIG_SYS_BOOK3E_HV 1632 mtspr MAS8,r3 1633#endif 1634 isync 1635 tlbwe 1636 msync 1637 isync 1638 blr 1639 1640/* 1641 * void relocate_code (addr_sp, gd, addr_moni) 1642 * 1643 * This "function" does not return, instead it continues in RAM 1644 * after relocating the monitor code. 1645 * 1646 * r3 = dest 1647 * r4 = src 1648 * r5 = length in bytes 1649 * r6 = cachelinesize 1650 */ 1651 .globl relocate_code 1652relocate_code: 1653 mr r1,r3 /* Set new stack pointer */ 1654 mr r9,r4 /* Save copy of Init Data pointer */ 1655 mr r10,r5 /* Save copy of Destination Address */ 1656 1657 GET_GOT 1658 mr r3,r5 /* Destination Address */ 1659 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1660 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1661 lwz r5,GOT(__init_end) 1662 sub r5,r5,r4 1663 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1664 1665 /* 1666 * Fix GOT pointer: 1667 * 1668 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1669 * 1670 * Offset: 1671 */ 1672 sub r15,r10,r4 1673 1674 /* First our own GOT */ 1675 add r12,r12,r15 1676 /* the the one used by the C code */ 1677 add r30,r30,r15 1678 1679 /* 1680 * Now relocate code 1681 */ 1682 1683 cmplw cr1,r3,r4 1684 addi r0,r5,3 1685 srwi. r0,r0,2 1686 beq cr1,4f /* In place copy is not necessary */ 1687 beq 7f /* Protect against 0 count */ 1688 mtctr r0 1689 bge cr1,2f 1690 1691 la r8,-4(r4) 1692 la r7,-4(r3) 16931: lwzu r0,4(r8) 1694 stwu r0,4(r7) 1695 bdnz 1b 1696 b 4f 1697 16982: slwi r0,r0,2 1699 add r8,r4,r0 1700 add r7,r3,r0 17013: lwzu r0,-4(r8) 1702 stwu r0,-4(r7) 1703 bdnz 3b 1704 1705/* 1706 * Now flush the cache: note that we must start from a cache aligned 1707 * address. Otherwise we might miss one cache line. 1708 */ 17094: cmpwi r6,0 1710 add r5,r3,r5 1711 beq 7f /* Always flush prefetch queue in any case */ 1712 subi r0,r6,1 1713 andc r3,r3,r0 1714 mr r4,r3 17155: dcbst 0,r4 1716 add r4,r4,r6 1717 cmplw r4,r5 1718 blt 5b 1719 sync /* Wait for all dcbst to complete on bus */ 1720 mr r4,r3 17216: icbi 0,r4 1722 add r4,r4,r6 1723 cmplw r4,r5 1724 blt 6b 17257: sync /* Wait for all icbi to complete on bus */ 1726 isync 1727 1728/* 1729 * We are done. Do not return, instead branch to second part of board 1730 * initialization, now running from RAM. 1731 */ 1732 1733 addi r0,r10,in_ram - _start + _START_OFFSET 1734 1735 /* 1736 * As IVPR is going to point RAM address, 1737 * Make sure IVOR15 has valid opcode to support debugger 1738 */ 1739 mtspr IVOR15,r0 1740 1741 /* 1742 * Re-point the IVPR at RAM 1743 */ 1744 mtspr IVPR,r10 1745 1746 mtlr r0 1747 blr /* NEVER RETURNS! */ 1748 .globl in_ram 1749in_ram: 1750 1751 /* 1752 * Relocation Function, r12 point to got2+0x8000 1753 * 1754 * Adjust got2 pointers, no need to check for 0, this code 1755 * already puts a few entries in the table. 1756 */ 1757 li r0,__got2_entries@sectoff@l 1758 la r3,GOT(_GOT2_TABLE_) 1759 lwz r11,GOT(_GOT2_TABLE_) 1760 mtctr r0 1761 sub r11,r3,r11 1762 addi r3,r3,-4 17631: lwzu r0,4(r3) 1764 cmpwi r0,0 1765 beq- 2f 1766 add r0,r0,r11 1767 stw r0,0(r3) 17682: bdnz 1b 1769 1770 /* 1771 * Now adjust the fixups and the pointers to the fixups 1772 * in case we need to move ourselves again. 1773 */ 1774 li r0,__fixup_entries@sectoff@l 1775 lwz r3,GOT(_FIXUP_TABLE_) 1776 cmpwi r0,0 1777 mtctr r0 1778 addi r3,r3,-4 1779 beq 4f 17803: lwzu r4,4(r3) 1781 lwzux r0,r4,r11 1782 cmpwi r0,0 1783 add r0,r0,r11 1784 stw r4,0(r3) 1785 beq- 5f 1786 stw r0,0(r4) 17875: bdnz 3b 17884: 1789clear_bss: 1790 /* 1791 * Now clear BSS segment 1792 */ 1793 lwz r3,GOT(__bss_start) 1794 lwz r4,GOT(__bss_end) 1795 1796 cmplw 0,r3,r4 1797 beq 6f 1798 1799 li r0,0 18005: 1801 stw r0,0(r3) 1802 addi r3,r3,4 1803 cmplw 0,r3,r4 1804 blt 5b 18056: 1806 1807 mr r3,r9 /* Init Data pointer */ 1808 mr r4,r10 /* Destination Address */ 1809 bl board_init_r 1810 1811#ifndef MINIMAL_SPL 1812 /* 1813 * Copy exception vector code to low memory 1814 * 1815 * r3: dest_addr 1816 * r7: source address, r8: end address, r9: target address 1817 */ 1818 .globl trap_init 1819trap_init: 1820 mflr r4 /* save link register */ 1821 GET_GOT 1822 lwz r7,GOT(_start_of_vectors) 1823 lwz r8,GOT(_end_of_vectors) 1824 1825 li r9,0x100 /* reset vector always at 0x100 */ 1826 1827 cmplw 0,r7,r8 1828 bgelr /* return if r7>=r8 - just in case */ 18291: 1830 lwz r0,0(r7) 1831 stw r0,0(r9) 1832 addi r7,r7,4 1833 addi r9,r9,4 1834 cmplw 0,r7,r8 1835 bne 1b 1836 1837 /* 1838 * relocate `hdlr' and `int_return' entries 1839 */ 1840 li r7,.L_CriticalInput - _start + _START_OFFSET 1841 bl trap_reloc 1842 li r7,.L_MachineCheck - _start + _START_OFFSET 1843 bl trap_reloc 1844 li r7,.L_DataStorage - _start + _START_OFFSET 1845 bl trap_reloc 1846 li r7,.L_InstStorage - _start + _START_OFFSET 1847 bl trap_reloc 1848 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1849 bl trap_reloc 1850 li r7,.L_Alignment - _start + _START_OFFSET 1851 bl trap_reloc 1852 li r7,.L_ProgramCheck - _start + _START_OFFSET 1853 bl trap_reloc 1854 li r7,.L_FPUnavailable - _start + _START_OFFSET 1855 bl trap_reloc 1856 li r7,.L_Decrementer - _start + _START_OFFSET 1857 bl trap_reloc 1858 li r7,.L_IntervalTimer - _start + _START_OFFSET 1859 li r8,_end_of_vectors - _start + _START_OFFSET 18602: 1861 bl trap_reloc 1862 addi r7,r7,0x100 /* next exception vector */ 1863 cmplw 0,r7,r8 1864 blt 2b 1865 1866 /* Update IVORs as per relocated vector table address */ 1867 li r7,0x0100 1868 mtspr IVOR0,r7 /* 0: Critical input */ 1869 li r7,0x0200 1870 mtspr IVOR1,r7 /* 1: Machine check */ 1871 li r7,0x0300 1872 mtspr IVOR2,r7 /* 2: Data storage */ 1873 li r7,0x0400 1874 mtspr IVOR3,r7 /* 3: Instruction storage */ 1875 li r7,0x0500 1876 mtspr IVOR4,r7 /* 4: External interrupt */ 1877 li r7,0x0600 1878 mtspr IVOR5,r7 /* 5: Alignment */ 1879 li r7,0x0700 1880 mtspr IVOR6,r7 /* 6: Program check */ 1881 li r7,0x0800 1882 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1883 li r7,0x0900 1884 mtspr IVOR8,r7 /* 8: System call */ 1885 /* 9: Auxiliary processor unavailable(unsupported) */ 1886 li r7,0x0a00 1887 mtspr IVOR10,r7 /* 10: Decrementer */ 1888 li r7,0x0b00 1889 mtspr IVOR11,r7 /* 11: Interval timer */ 1890 li r7,0x0c00 1891 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1892 li r7,0x0d00 1893 mtspr IVOR13,r7 /* 13: Data TLB error */ 1894 li r7,0x0e00 1895 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1896 li r7,0x0f00 1897 mtspr IVOR15,r7 /* 15: Debug */ 1898 1899 lis r7,0x0 1900 mtspr IVPR,r7 1901 1902 mtlr r4 /* restore link register */ 1903 blr 1904 1905.globl unlock_ram_in_cache 1906unlock_ram_in_cache: 1907 /* invalidate the INIT_RAM section */ 1908 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1909 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1910 mfspr r4,L1CFG0 1911 andi. r4,r4,0x1ff 1912 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1913 mtctr r4 19141: dcbi r0,r3 1915 dcblc r0,r3 1916 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1917 bdnz 1b 1918 sync 1919 1920 /* Invalidate the TLB entries for the cache */ 1921 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1922 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1923 tlbivax 0,r3 1924 addi r3,r3,0x1000 1925 tlbivax 0,r3 1926 addi r3,r3,0x1000 1927 tlbivax 0,r3 1928 addi r3,r3,0x1000 1929 tlbivax 0,r3 1930 isync 1931 blr 1932 1933.globl flush_dcache 1934flush_dcache: 1935 mfspr r3,SPRN_L1CFG0 1936 1937 rlwinm r5,r3,9,3 /* Extract cache block size */ 1938 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1939 * are currently defined. 1940 */ 1941 li r4,32 1942 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1943 * log2(number of ways) 1944 */ 1945 slw r5,r4,r5 /* r5 = cache block size */ 1946 1947 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1948 mulli r7,r7,13 /* An 8-way cache will require 13 1949 * loads per set. 1950 */ 1951 slw r7,r7,r6 1952 1953 /* save off HID0 and set DCFA */ 1954 mfspr r8,SPRN_HID0 1955 ori r9,r8,HID0_DCFA@l 1956 mtspr SPRN_HID0,r9 1957 isync 1958 1959 lis r4,0 1960 mtctr r7 1961 19621: lwz r3,0(r4) /* Load... */ 1963 add r4,r4,r5 1964 bdnz 1b 1965 1966 msync 1967 lis r4,0 1968 mtctr r7 1969 19701: dcbf 0,r4 /* ...and flush. */ 1971 add r4,r4,r5 1972 bdnz 1b 1973 1974 /* restore HID0 */ 1975 mtspr SPRN_HID0,r8 1976 isync 1977 1978 blr 1979#endif /* !MINIMAL_SPL */ 1980