1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * SPDX-License-Identifier: GPL-2.0+ 6 */ 7 8/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 9 * 10 * The processor starts at 0xfffffffc and the code is first executed in the 11 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 12 * 13 */ 14 15#include <asm-offsets.h> 16#include <config.h> 17#include <mpc85xx.h> 18#include <version.h> 19 20#include <ppc_asm.tmpl> 21#include <ppc_defs.h> 22 23#include <asm/cache.h> 24#include <asm/mmu.h> 25 26#undef MSR_KERNEL 27#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 28 29#define LAW_EN 0x80000000 30 31#if defined(CONFIG_NAND_SPL) || \ 32 (defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL)) 33#define MINIMAL_SPL 34#endif 35 36#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \ 37 !defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE) 38#define NOR_BOOT 39#endif 40 41/* 42 * Set up GOT: Global Offset Table 43 * 44 * Use r12 to access the GOT 45 */ 46 START_GOT 47 GOT_ENTRY(_GOT2_TABLE_) 48 GOT_ENTRY(_FIXUP_TABLE_) 49 50#ifndef MINIMAL_SPL 51 GOT_ENTRY(_start) 52 GOT_ENTRY(_start_of_vectors) 53 GOT_ENTRY(_end_of_vectors) 54 GOT_ENTRY(transfer_to_handler) 55#endif 56 57 GOT_ENTRY(__init_end) 58 GOT_ENTRY(__bss_end) 59 GOT_ENTRY(__bss_start) 60 END_GOT 61 62/* 63 * e500 Startup -- after reset only the last 4KB of the effective 64 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 65 * section is located at THIS LAST page and basically does three 66 * things: clear some registers, set up exception tables and 67 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 68 * continue the boot procedure. 69 70 * Once the boot rom is mapped by TLB entries we can proceed 71 * with normal startup. 72 * 73 */ 74 75 .section .bootpg,"ax" 76 .globl _start_e500 77 78_start_e500: 79/* Enable debug exception */ 80 li r1,MSR_DE 81 mtmsr r1 82 83 /* 84 * If we got an ePAPR device tree pointer passed in as r3, we need that 85 * later in cpu_init_early_f(). Save it to a safe register before we 86 * clobber it so that we can fetch it from there later. 87 */ 88 mr r24, r3 89 90#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 91 mfspr r3,SPRN_SVR 92 rlwinm r3,r3,0,0xff 93 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 94 cmpw r3,r4 95 beq 1f 96 97#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 98 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 99 cmpw r3,r4 100 beq 1f 101#endif 102 103 /* Not a supported revision affected by erratum */ 104 li r27,0 105 b 2f 106 1071: li r27,1 /* Remember for later that we have the erratum */ 108 /* Erratum says set bits 55:60 to 001001 */ 109 msync 110 isync 111 mfspr r3,SPRN_HDBCR0 112 li r4,0x48 113 rlwimi r3,r4,0,0x1f8 114 mtspr SPRN_HDBCR0,r3 115 isync 1162: 117#endif 118#ifdef CONFIG_SYS_FSL_ERRATUM_A005125 119 msync 120 isync 121 mfspr r3, SPRN_HDBCR0 122 oris r3, r3, 0x0080 123 mtspr SPRN_HDBCR0, r3 124#endif 125 126 127#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) && \ 128 !defined(CONFIG_E6500) 129 /* ISBC uses L2 as stack. 130 * Disable L2 cache here so that u-boot can enable it later 131 * as part of it's normal flow 132 */ 133 134 /* Check if L2 is enabled */ 135 mfspr r3, SPRN_L2CSR0 136 lis r2, L2CSR0_L2E@h 137 ori r2, r2, L2CSR0_L2E@l 138 and. r4, r3, r2 139 beq l2_disabled 140 141 mfspr r3, SPRN_L2CSR0 142 /* Flush L2 cache */ 143 lis r2,(L2CSR0_L2FL)@h 144 ori r2, r2, (L2CSR0_L2FL)@l 145 or r3, r2, r3 146 sync 147 isync 148 mtspr SPRN_L2CSR0,r3 149 isync 1501: 151 mfspr r3, SPRN_L2CSR0 152 and. r1, r3, r2 153 bne 1b 154 155 mfspr r3, SPRN_L2CSR0 156 lis r2, L2CSR0_L2E@h 157 ori r2, r2, L2CSR0_L2E@l 158 andc r4, r3, r2 159 sync 160 isync 161 mtspr SPRN_L2CSR0,r4 162 isync 163 164l2_disabled: 165#endif 166 167/* clear registers/arrays not reset by hardware */ 168 169 /* L1 */ 170 li r0,2 171 mtspr L1CSR0,r0 /* invalidate d-cache */ 172 mtspr L1CSR1,r0 /* invalidate i-cache */ 173 174 mfspr r1,DBSR 175 mtspr DBSR,r1 /* Clear all valid bits */ 176 177 178 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch 179 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 180 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 181 mtspr MAS0, \scratch 182 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h 183 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l 184 mtspr MAS1, \scratch 185 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 186 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 187 mtspr MAS2, \scratch 188 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 189 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 190 mtspr MAS3, \scratch 191 lis \scratch, \phy_high@h 192 ori \scratch, \scratch, \phy_high@l 193 mtspr MAS7, \scratch 194 isync 195 msync 196 tlbwe 197 isync 198 .endm 199 200 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch 201 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 202 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 203 mtspr MAS0, \scratch 204 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h 205 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l 206 mtspr MAS1, \scratch 207 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 208 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 209 mtspr MAS2, \scratch 210 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 211 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 212 mtspr MAS3, \scratch 213 lis \scratch, \phy_high@h 214 ori \scratch, \scratch, \phy_high@l 215 mtspr MAS7, \scratch 216 isync 217 msync 218 tlbwe 219 isync 220 .endm 221 222 .macro delete_tlb1_entry esel scratch 223 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 224 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 225 mtspr MAS0, \scratch 226 li \scratch, 0 227 mtspr MAS1, \scratch 228 isync 229 msync 230 tlbwe 231 isync 232 .endm 233 234 .macro delete_tlb0_entry esel epn wimg scratch 235 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 236 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 237 mtspr MAS0, \scratch 238 li \scratch, 0 239 mtspr MAS1, \scratch 240 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 241 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 242 mtspr MAS2, \scratch 243 isync 244 msync 245 tlbwe 246 isync 247 .endm 248 249/* Interrupt vectors do not fit in minimal SPL. */ 250#if !defined(MINIMAL_SPL) 251 /* Setup interrupt vectors */ 252 lis r1,CONFIG_SYS_MONITOR_BASE@h 253 mtspr IVPR,r1 254 255 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 256 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 257 258 addi r4,r3,CriticalInput - _start + _START_OFFSET 259 mtspr IVOR0,r4 /* 0: Critical input */ 260 addi r4,r3,MachineCheck - _start + _START_OFFSET 261 mtspr IVOR1,r4 /* 1: Machine check */ 262 addi r4,r3,DataStorage - _start + _START_OFFSET 263 mtspr IVOR2,r4 /* 2: Data storage */ 264 addi r4,r3,InstStorage - _start + _START_OFFSET 265 mtspr IVOR3,r4 /* 3: Instruction storage */ 266 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 267 mtspr IVOR4,r4 /* 4: External interrupt */ 268 addi r4,r3,Alignment - _start + _START_OFFSET 269 mtspr IVOR5,r4 /* 5: Alignment */ 270 addi r4,r3,ProgramCheck - _start + _START_OFFSET 271 mtspr IVOR6,r4 /* 6: Program check */ 272 addi r4,r3,FPUnavailable - _start + _START_OFFSET 273 mtspr IVOR7,r4 /* 7: floating point unavailable */ 274 addi r4,r3,SystemCall - _start + _START_OFFSET 275 mtspr IVOR8,r4 /* 8: System call */ 276 /* 9: Auxiliary processor unavailable(unsupported) */ 277 addi r4,r3,Decrementer - _start + _START_OFFSET 278 mtspr IVOR10,r4 /* 10: Decrementer */ 279 addi r4,r3,IntervalTimer - _start + _START_OFFSET 280 mtspr IVOR11,r4 /* 11: Interval timer */ 281 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 282 mtspr IVOR12,r4 /* 12: Watchdog timer */ 283 addi r4,r3,DataTLBError - _start + _START_OFFSET 284 mtspr IVOR13,r4 /* 13: Data TLB error */ 285 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 286 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 287 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 288 mtspr IVOR15,r4 /* 15: Debug */ 289#endif 290 291 /* Clear and set up some registers. */ 292 li r0,0x0000 293 lis r1,0xffff 294 mtspr DEC,r0 /* prevent dec exceptions */ 295 mttbl r0 /* prevent fit & wdt exceptions */ 296 mttbu r0 297 mtspr TSR,r1 /* clear all timer exception status */ 298 mtspr TCR,r0 /* disable all */ 299 mtspr ESR,r0 /* clear exception syndrome register */ 300 mtspr MCSR,r0 /* machine check syndrome register */ 301 mtxer r0 /* clear integer exception register */ 302 303#ifdef CONFIG_SYS_BOOK3E_HV 304 mtspr MAS8,r0 /* make sure MAS8 is clear */ 305#endif 306 307 /* Enable Time Base and Select Time Base Clock */ 308 lis r0,HID0_EMCP@h /* Enable machine check */ 309#if defined(CONFIG_ENABLE_36BIT_PHYS) 310 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 311#endif 312#ifndef CONFIG_E500MC 313 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 314#endif 315 mtspr HID0,r0 316 317#ifndef CONFIG_E500MC 318 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 319 mfspr r3,PVR 320 andi. r3,r3, 0xff 321 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 322 blt 1f 323 /* Set MBDD bit also */ 324 ori r0, r0, HID1_MBDD@l 3251: 326 mtspr HID1,r0 327#endif 328 329#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 330 mfspr r3,SPRN_HDBCR1 331 oris r3,r3,0x0100 332 mtspr SPRN_HDBCR1,r3 333#endif 334 335 /* Enable Branch Prediction */ 336#if defined(CONFIG_BTB) 337 lis r0,BUCSR_ENABLE@h 338 ori r0,r0,BUCSR_ENABLE@l 339 mtspr SPRN_BUCSR,r0 340#endif 341 342#if defined(CONFIG_SYS_INIT_DBCR) 343 lis r1,0xffff 344 ori r1,r1,0xffff 345 mtspr DBSR,r1 /* Clear all status bits */ 346 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 347 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 348 mtspr DBCR0,r0 349#endif 350 351#ifdef CONFIG_MPC8569 352#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 353#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 354 355 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 356 * use address space which is more than 12bits, and it must be done in 357 * the 4K boot page. So we set this bit here. 358 */ 359 360 /* create a temp mapping TLB0[0] for LBCR */ 361 create_tlb0_entry 0, \ 362 0, BOOKE_PAGESZ_4K, \ 363 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \ 364 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \ 365 0, r6 366 367 /* Set LBCR register */ 368 lis r4,CONFIG_SYS_LBCR_ADDR@h 369 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 370 371 lis r5,CONFIG_SYS_LBC_LBCR@h 372 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 373 stw r5,0(r4) 374 isync 375 376 /* invalidate this temp TLB */ 377 lis r4,CONFIG_SYS_LBC_ADDR@h 378 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 379 tlbivax 0,r4 380 isync 381 382#endif /* CONFIG_MPC8569 */ 383 384/* 385 * Search for the TLB that covers the code we're executing, and shrink it 386 * so that it covers only this 4K page. That will ensure that any other 387 * TLB we create won't interfere with it. We assume that the TLB exists, 388 * which is why we don't check the Valid bit of MAS1. We also assume 389 * it is in TLB1. 390 * 391 * This is necessary, for example, when booting from the on-chip ROM, 392 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 393 */ 394 bl nexti /* Find our address */ 395nexti: mflr r1 /* R1 = our PC */ 396 li r2, 0 397 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 398 isync 399 msync 400 tlbsx 0, r1 /* This must succeed */ 401 402 mfspr r14, MAS0 /* Save ESEL for later */ 403 rlwinm r14, r14, 16, 0xfff 404 405 /* Set the size of the TLB to 4KB */ 406 mfspr r3, MAS1 407 li r2, 0xF80 408 andc r3, r3, r2 /* Clear the TSIZE bits */ 409 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 410 oris r3, r3, MAS1_IPROT@h 411 mtspr MAS1, r3 412 413 /* 414 * Set the base address of the TLB to our PC. We assume that 415 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 416 */ 417 lis r3, MAS2_EPN@h 418 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 419 420 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 421 422 mfspr r2, MAS2 423 andc r2, r2, r3 424 or r2, r2, r1 425#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 426 cmpwi r27,0 427 beq 1f 428 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 429 rlwinm r2, r2, 0, ~MAS2_I 430 ori r2, r2, MAS2_G 4311: 432#endif 433 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 434 435 mfspr r2, MAS3 436 andc r2, r2, r3 437 or r2, r2, r1 438 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 439 440 isync 441 msync 442 tlbwe 443 444/* 445 * Clear out any other TLB entries that may exist, to avoid conflicts. 446 * Our TLB entry is in r14. 447 */ 448 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 449 tlbivax 0, r0 450 tlbsync 451 452 mfspr r4, SPRN_TLB1CFG 453 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 454 455 li r3, 0 456 mtspr MAS1, r3 4571: cmpw r3, r14 458 rlwinm r5, r3, 16, MAS0_ESEL_MSK 459 addi r3, r3, 1 460 beq 2f /* skip the entry we're executing from */ 461 462 oris r5, r5, MAS0_TLBSEL(1)@h 463 mtspr MAS0, r5 464 465 isync 466 tlbwe 467 isync 468 msync 469 4702: cmpw r3, r4 471 blt 1b 472 473#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \ 474 !defined(CONFIG_SECURE_BOOT) 475/* 476 * TLB entry for debuggging in AS1 477 * Create temporary TLB entry in AS0 to handle debug exception 478 * As on debug exception MSR is cleared i.e. Address space is changed 479 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 480 * in AS1. 481 */ 482 483#ifdef NOR_BOOT 484/* 485 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 486 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 487 * and this window is outside of 4K boot window. 488 */ 489 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 490 0, BOOKE_PAGESZ_4M, \ 491 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 492 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 493 0, r6 494 495#else 496/* 497 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 498 * because "nexti" will resize TLB to 4K 499 */ 500 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 501 0, BOOKE_PAGESZ_256K, \ 502 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \ 503 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \ 504 0, r6 505#endif 506#endif 507 508/* 509 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 510 * location is not where we want it. This typically happens on a 36-bit 511 * system, where we want to move CCSR to near the top of 36-bit address space. 512 * 513 * To move CCSR, we create two temporary TLBs, one for the old location, and 514 * another for the new location. On CoreNet systems, we also need to create 515 * a special, temporary LAW. 516 * 517 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 518 * long-term TLBs, so we use TLB0 here. 519 */ 520#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 521 522#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 523#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 524#endif 525 526create_ccsr_new_tlb: 527 /* 528 * Create a TLB for the new location of CCSR. Register R8 is reserved 529 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 530 */ 531 lis r8, CONFIG_SYS_CCSRBAR@h 532 ori r8, r8, CONFIG_SYS_CCSRBAR@l 533 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 534 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 535 create_tlb0_entry 0, \ 536 0, BOOKE_PAGESZ_4K, \ 537 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \ 538 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \ 539 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 540 /* 541 * Create a TLB for the current location of CCSR. Register R9 is reserved 542 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 543 */ 544create_ccsr_old_tlb: 545 create_tlb0_entry 1, \ 546 0, BOOKE_PAGESZ_4K, \ 547 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \ 548 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \ 549 0, r3 /* The default CCSR address is always a 32-bit number */ 550 551 552 /* 553 * We have a TLB for what we think is the current (old) CCSR. Let's 554 * verify that, otherwise we won't be able to move it. 555 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 556 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 557 */ 558verify_old_ccsr: 559 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 560 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 561#ifdef CONFIG_FSL_CORENET 562 lwz r1, 4(r9) /* CCSRBARL */ 563#else 564 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 565 slwi r1, r1, 12 566#endif 567 568 cmpl 0, r0, r1 569 570 /* 571 * If the value we read from CCSRBARL is not what we expect, then 572 * enter an infinite loop. This will at least allow a debugger to 573 * halt execution and examine TLBs, etc. There's no point in going 574 * on. 575 */ 576infinite_debug_loop: 577 bne infinite_debug_loop 578 579#ifdef CONFIG_FSL_CORENET 580 581#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 582#define LAW_SIZE_4K 0xb 583#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 584#define CCSRAR_C 0x80000000 /* Commit */ 585 586create_temp_law: 587 /* 588 * On CoreNet systems, we create the temporary LAW using a special LAW 589 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 590 */ 591 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 592 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 593 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 594 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 595 lis r2, CCSRBAR_LAWAR@h 596 ori r2, r2, CCSRBAR_LAWAR@l 597 598 stw r0, 0xc00(r9) /* LAWBARH0 */ 599 stw r1, 0xc04(r9) /* LAWBARL0 */ 600 sync 601 stw r2, 0xc08(r9) /* LAWAR0 */ 602 603 /* 604 * Read back from LAWAR to ensure the update is complete. e500mc 605 * cores also require an isync. 606 */ 607 lwz r0, 0xc08(r9) /* LAWAR0 */ 608 isync 609 610 /* 611 * Read the current CCSRBARH and CCSRBARL using load word instructions. 612 * Follow this with an isync instruction. This forces any outstanding 613 * accesses to configuration space to completion. 614 */ 615read_old_ccsrbar: 616 lwz r0, 0(r9) /* CCSRBARH */ 617 lwz r0, 4(r9) /* CCSRBARL */ 618 isync 619 620 /* 621 * Write the new values for CCSRBARH and CCSRBARL to their old 622 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 623 * has a new value written it loads a CCSRBARH shadow register. When 624 * the CCSRBARL is written, the CCSRBARH shadow register contents 625 * along with the CCSRBARL value are loaded into the CCSRBARH and 626 * CCSRBARL registers, respectively. Follow this with a sync 627 * instruction. 628 */ 629write_new_ccsrbar: 630 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 631 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 632 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 633 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 634 lis r2, CCSRAR_C@h 635 ori r2, r2, CCSRAR_C@l 636 637 stw r0, 0(r9) /* Write to CCSRBARH */ 638 sync /* Make sure we write to CCSRBARH first */ 639 stw r1, 4(r9) /* Write to CCSRBARL */ 640 sync 641 642 /* 643 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 644 * Follow this with a sync instruction. 645 */ 646 stw r2, 8(r9) 647 sync 648 649 /* Delete the temporary LAW */ 650delete_temp_law: 651 li r1, 0 652 stw r1, 0xc08(r8) 653 sync 654 stw r1, 0xc00(r8) 655 stw r1, 0xc04(r8) 656 sync 657 658#else /* #ifdef CONFIG_FSL_CORENET */ 659 660write_new_ccsrbar: 661 /* 662 * Read the current value of CCSRBAR using a load word instruction 663 * followed by an isync. This forces all accesses to configuration 664 * space to complete. 665 */ 666 sync 667 lwz r0, 0(r9) 668 isync 669 670/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 671#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 672 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 673 674 /* Write the new value to CCSRBAR. */ 675 lis r0, CCSRBAR_PHYS_RS12@h 676 ori r0, r0, CCSRBAR_PHYS_RS12@l 677 stw r0, 0(r9) 678 sync 679 680 /* 681 * The manual says to perform a load of an address that does not 682 * access configuration space or the on-chip SRAM using an existing TLB, 683 * but that doesn't appear to be necessary. We will do the isync, 684 * though. 685 */ 686 isync 687 688 /* 689 * Read the contents of CCSRBAR from its new location, followed by 690 * another isync. 691 */ 692 lwz r0, 0(r8) 693 isync 694 695#endif /* #ifdef CONFIG_FSL_CORENET */ 696 697 /* Delete the temporary TLBs */ 698delete_temp_tlbs: 699 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3 700 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3 701 702#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 703 704#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500) 705create_ccsr_l2_tlb: 706 /* 707 * Create a TLB for the MMR location of CCSR 708 * to access L2CSR0 register 709 */ 710 create_tlb0_entry 0, \ 711 0, BOOKE_PAGESZ_4K, \ 712 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \ 713 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \ 714 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 715 716enable_l2_cluster_l2: 717 /* enable L2 cache */ 718 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h 719 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l 720 li r4, 33 /* stash id */ 721 stw r4, 4(r3) 722 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h 723 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l 724 sync 725 stw r4, 0(r3) /* invalidate L2 */ 7261: sync 727 lwz r0, 0(r3) 728 twi 0, r0, 0 729 isync 730 and. r1, r0, r4 731 bne 1b 732 lis r4, (L2CSR0_L2E|L2CSR0_L2PE)@h 733 ori r4, r4, (L2CSR0_L2REP_MODE)@l 734 sync 735 stw r4, 0(r3) /* enable L2 */ 736delete_ccsr_l2_tlb: 737 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3 738#endif 739 740 /* 741 * Enable the L1. On e6500, this has to be done 742 * after the L2 is up. 743 */ 744 745#ifdef CONFIG_SYS_CACHE_STASHING 746 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 747 li r2,(32 + 0) 748 mtspr L1CSR2,r2 749#endif 750 751 /* Enable/invalidate the I-Cache */ 752 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 753 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 754 mtspr SPRN_L1CSR1,r2 7551: 756 mfspr r3,SPRN_L1CSR1 757 and. r1,r3,r2 758 bne 1b 759 760 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 761 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 762 mtspr SPRN_L1CSR1,r3 763 isync 7642: 765 mfspr r3,SPRN_L1CSR1 766 andi. r1,r3,L1CSR1_ICE@l 767 beq 2b 768 769 /* Enable/invalidate the D-Cache */ 770 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 771 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 772 mtspr SPRN_L1CSR0,r2 7731: 774 mfspr r3,SPRN_L1CSR0 775 and. r1,r3,r2 776 bne 1b 777 778 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 779 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 780 mtspr SPRN_L1CSR0,r3 781 isync 7822: 783 mfspr r3,SPRN_L1CSR0 784 andi. r1,r3,L1CSR0_DCE@l 785 beq 2b 786#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 787#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 788#define LAW_SIZE_1M 0x13 789#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 790 791 cmpwi r27,0 792 beq 9f 793 794 /* 795 * Create a TLB entry for CCSR 796 * 797 * We're executing out of TLB1 entry in r14, and that's the only 798 * TLB entry that exists. To allocate some TLB entries for our 799 * own use, flip a bit high enough that we won't flip it again 800 * via incrementing. 801 */ 802 803 xori r8, r14, 32 804 lis r0, MAS0_TLBSEL(1)@h 805 rlwimi r0, r8, 16, MAS0_ESEL_MSK 806 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 807 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 808 lis r7, CONFIG_SYS_CCSRBAR@h 809 ori r7, r7, CONFIG_SYS_CCSRBAR@l 810 ori r2, r7, MAS2_I|MAS2_G 811 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 812 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 813 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 814 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 815 mtspr MAS0, r0 816 mtspr MAS1, r1 817 mtspr MAS2, r2 818 mtspr MAS3, r3 819 mtspr MAS7, r4 820 isync 821 tlbwe 822 isync 823 msync 824 825 /* Map DCSR temporarily to physical address zero */ 826 li r0, 0 827 lis r3, DCSRBAR_LAWAR@h 828 ori r3, r3, DCSRBAR_LAWAR@l 829 830 stw r0, 0xc00(r7) /* LAWBARH0 */ 831 stw r0, 0xc04(r7) /* LAWBARL0 */ 832 sync 833 stw r3, 0xc08(r7) /* LAWAR0 */ 834 835 /* Read back from LAWAR to ensure the update is complete. */ 836 lwz r3, 0xc08(r7) /* LAWAR0 */ 837 isync 838 839 /* Create a TLB entry for DCSR at zero */ 840 841 addi r9, r8, 1 842 lis r0, MAS0_TLBSEL(1)@h 843 rlwimi r0, r9, 16, MAS0_ESEL_MSK 844 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 845 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 846 li r6, 0 /* DCSR effective address */ 847 ori r2, r6, MAS2_I|MAS2_G 848 li r3, MAS3_SW|MAS3_SR 849 li r4, 0 850 mtspr MAS0, r0 851 mtspr MAS1, r1 852 mtspr MAS2, r2 853 mtspr MAS3, r3 854 mtspr MAS7, r4 855 isync 856 tlbwe 857 isync 858 msync 859 860 /* enable the timebase */ 861#define CTBENR 0xe2084 862 li r3, 1 863 addis r4, r7, CTBENR@ha 864 stw r3, CTBENR@l(r4) 865 lwz r3, CTBENR@l(r4) 866 twi 0,r3,0 867 isync 868 869 .macro erratum_set_ccsr offset value 870 addis r3, r7, \offset@ha 871 lis r4, \value@h 872 addi r3, r3, \offset@l 873 ori r4, r4, \value@l 874 bl erratum_set_value 875 .endm 876 877 .macro erratum_set_dcsr offset value 878 addis r3, r6, \offset@ha 879 lis r4, \value@h 880 addi r3, r3, \offset@l 881 ori r4, r4, \value@l 882 bl erratum_set_value 883 .endm 884 885 erratum_set_dcsr 0xb0e08 0xe0201800 886 erratum_set_dcsr 0xb0e18 0xe0201800 887 erratum_set_dcsr 0xb0e38 0xe0400000 888 erratum_set_dcsr 0xb0008 0x00900000 889 erratum_set_dcsr 0xb0e40 0xe00a0000 890 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 891#ifdef CONFIG_RAMBOOT_PBL 892 erratum_set_ccsr 0x10f00 0x495e5000 893#else 894 erratum_set_ccsr 0x10f00 0x415e5000 895#endif 896 erratum_set_ccsr 0x11f00 0x415e5000 897 898 /* Make temp mapping uncacheable again, if it was initially */ 899 bl 2f 9002: mflr r3 901 tlbsx 0, r3 902 mfspr r4, MAS2 903 rlwimi r4, r15, 0, MAS2_I 904 rlwimi r4, r15, 0, MAS2_G 905 mtspr MAS2, r4 906 isync 907 tlbwe 908 isync 909 msync 910 911 /* Clear the cache */ 912 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 913 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 914 sync 915 isync 916 mtspr SPRN_L1CSR1,r3 917 isync 9182: sync 919 mfspr r4,SPRN_L1CSR1 920 and. r4,r4,r3 921 bne 2b 922 923 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 924 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 925 sync 926 isync 927 mtspr SPRN_L1CSR1,r3 928 isync 9292: sync 930 mfspr r4,SPRN_L1CSR1 931 and. r4,r4,r3 932 beq 2b 933 934 /* Remove temporary mappings */ 935 lis r0, MAS0_TLBSEL(1)@h 936 rlwimi r0, r9, 16, MAS0_ESEL_MSK 937 li r3, 0 938 mtspr MAS0, r0 939 mtspr MAS1, r3 940 isync 941 tlbwe 942 isync 943 msync 944 945 li r3, 0 946 stw r3, 0xc08(r7) /* LAWAR0 */ 947 lwz r3, 0xc08(r7) 948 isync 949 950 lis r0, MAS0_TLBSEL(1)@h 951 rlwimi r0, r8, 16, MAS0_ESEL_MSK 952 li r3, 0 953 mtspr MAS0, r0 954 mtspr MAS1, r3 955 isync 956 tlbwe 957 isync 958 msync 959 960 b 9f 961 962 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 963erratum_set_value: 964 /* Lock two cache lines into I-Cache */ 965 sync 966 mfspr r11, SPRN_L1CSR1 967 rlwinm r11, r11, 0, ~L1CSR1_ICUL 968 sync 969 isync 970 mtspr SPRN_L1CSR1, r11 971 isync 972 973 mflr r12 974 bl 5f 9755: mflr r5 976 addi r5, r5, 2f - 5b 977 icbtls 0, 0, r5 978 addi r5, r5, 64 979 980 sync 981 mfspr r11, SPRN_L1CSR1 9823: andi. r11, r11, L1CSR1_ICUL 983 bne 3b 984 985 icbtls 0, 0, r5 986 addi r5, r5, 64 987 988 sync 989 mfspr r11, SPRN_L1CSR1 9903: andi. r11, r11, L1CSR1_ICUL 991 bne 3b 992 993 b 2f 994 .align 6 995 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9962: sync 997 998 mfspr r5, SPRN_TBRL 999 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10004: mfspr r5, SPRN_TBRL 1001 subf. r5, r5, r11 1002 bgt 4b 1003 1004 stw r4, 0(r3) 1005 1006 mfspr r5, SPRN_TBRL 1007 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10084: mfspr r5, SPRN_TBRL 1009 subf. r5, r5, r11 1010 bgt 4b 1011 1012 sync 1013 1014 /* 1015 * Fill out the rest of this cache line and the next with nops, 1016 * to ensure that nothing outside the locked area will be 1017 * fetched due to a branch. 1018 */ 1019 .rept 19 1020 nop 1021 .endr 1022 1023 sync 1024 mfspr r11, SPRN_L1CSR1 1025 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1026 sync 1027 isync 1028 mtspr SPRN_L1CSR1, r11 1029 isync 1030 1031 mtlr r12 1032 blr 1033 10349: 1035#endif 1036 1037create_init_ram_area: 1038 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1039 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1040 1041#ifdef NOR_BOOT 1042 /* create a temp mapping in AS=1 to the 4M boot window */ 1043 create_tlb1_entry 15, \ 1044 1, BOOKE_PAGESZ_4M, \ 1045 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 1046 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1047 0, r6 1048 1049#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1050 /* create a temp mapping in AS = 1 for Flash mapping 1051 * created by PBL for ISBC code 1052 */ 1053 create_tlb1_entry 15, \ 1054 1, BOOKE_PAGESZ_1M, \ 1055 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1056 CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1057 0, r6 1058#else 1059 /* 1060 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1061 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1062 */ 1063 create_tlb1_entry 15, \ 1064 1, BOOKE_PAGESZ_1M, \ 1065 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1066 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1067 0, r6 1068#endif 1069 1070 /* create a temp mapping in AS=1 to the stack */ 1071#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1072 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1073 create_tlb1_entry 14, \ 1074 1, BOOKE_PAGESZ_16K, \ 1075 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1076 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1077 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6 1078 1079#else 1080 create_tlb1_entry 14, \ 1081 1, BOOKE_PAGESZ_16K, \ 1082 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1083 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \ 1084 0, r6 1085#endif 1086 1087 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1088 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1089 lis r7,switch_as@h 1090 ori r7,r7,switch_as@l 1091 1092 mtspr SPRN_SRR0,r7 1093 mtspr SPRN_SRR1,r6 1094 rfi 1095 1096switch_as: 1097/* L1 DCache is used for initial RAM */ 1098 1099 /* Allocate Initial RAM in data cache. 1100 */ 1101 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1102 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1103 mfspr r2, L1CFG0 1104 andi. r2, r2, 0x1ff 1105 /* cache size * 1024 / (2 * L1 line size) */ 1106 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1107 mtctr r2 1108 li r0,0 11091: 1110 dcbz r0,r3 1111 dcbtls 0,r0,r3 1112 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1113 bdnz 1b 1114 1115 /* Jump out the last 4K page and continue to 'normal' start */ 1116#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL) 1117 /* We assume that we're already running at the address we're linked at */ 1118 b _start_cont 1119#else 1120 /* Calculate absolute address in FLASH and jump there */ 1121 /*--------------------------------------------------------------*/ 1122 lis r3,CONFIG_SYS_MONITOR_BASE@h 1123 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1124 addi r3,r3,_start_cont - _start + _START_OFFSET 1125 mtlr r3 1126 blr 1127#endif 1128 1129 .text 1130 .globl _start 1131_start: 1132 .long 0x27051956 /* U-BOOT Magic Number */ 1133 .globl version_string 1134version_string: 1135 .ascii U_BOOT_VERSION_STRING, "\0" 1136 1137 .align 4 1138 .globl _start_cont 1139_start_cont: 1140 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1141 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1142 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1143 li r0,0 1144 stw r0,0(r3) /* Terminate Back Chain */ 1145 stw r0,+4(r3) /* NULL return address. */ 1146 mr r1,r3 /* Transfer to SP(r1) */ 1147 1148 GET_GOT 1149 1150 /* Pass our potential ePAPR device tree pointer to cpu_init_early_f */ 1151 mr r3, r24 1152 1153 bl cpu_init_early_f 1154 1155 /* switch back to AS = 0 */ 1156 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1157 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1158 mtmsr r3 1159 isync 1160 1161 bl cpu_init_f 1162 bl board_init_f 1163 isync 1164 1165 /* NOTREACHED - board_init_f() does not return */ 1166 1167#ifndef MINIMAL_SPL 1168 . = EXC_OFF_SYS_RESET 1169 .globl _start_of_vectors 1170_start_of_vectors: 1171 1172/* Critical input. */ 1173 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1174 1175/* Machine check */ 1176 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1177 1178/* Data Storage exception. */ 1179 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1180 1181/* Instruction Storage exception. */ 1182 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1183 1184/* External Interrupt exception. */ 1185 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1186 1187/* Alignment exception. */ 1188 . = 0x0600 1189Alignment: 1190 EXCEPTION_PROLOG(SRR0, SRR1) 1191 mfspr r4,DAR 1192 stw r4,_DAR(r21) 1193 mfspr r5,DSISR 1194 stw r5,_DSISR(r21) 1195 addi r3,r1,STACK_FRAME_OVERHEAD 1196 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1197 1198/* Program check exception */ 1199 . = 0x0700 1200ProgramCheck: 1201 EXCEPTION_PROLOG(SRR0, SRR1) 1202 addi r3,r1,STACK_FRAME_OVERHEAD 1203 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1204 MSR_KERNEL, COPY_EE) 1205 1206 /* No FPU on MPC85xx. This exception is not supposed to happen. 1207 */ 1208 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1209 1210 . = 0x0900 1211/* 1212 * r0 - SYSCALL number 1213 * r3-... arguments 1214 */ 1215SystemCall: 1216 addis r11,r0,0 /* get functions table addr */ 1217 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1218 addis r12,r0,0 /* get number of functions */ 1219 ori r12,r12,0 1220 1221 cmplw 0,r0,r12 1222 bge 1f 1223 1224 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1225 add r11,r11,r0 1226 lwz r11,0(r11) 1227 1228 li r20,0xd00-4 /* Get stack pointer */ 1229 lwz r12,0(r20) 1230 subi r12,r12,12 /* Adjust stack pointer */ 1231 li r0,0xc00+_end_back-SystemCall 1232 cmplw 0,r0,r12 /* Check stack overflow */ 1233 bgt 1f 1234 stw r12,0(r20) 1235 1236 mflr r0 1237 stw r0,0(r12) 1238 mfspr r0,SRR0 1239 stw r0,4(r12) 1240 mfspr r0,SRR1 1241 stw r0,8(r12) 1242 1243 li r12,0xc00+_back-SystemCall 1244 mtlr r12 1245 mtspr SRR0,r11 1246 12471: SYNC 1248 rfi 1249_back: 1250 1251 mfmsr r11 /* Disable interrupts */ 1252 li r12,0 1253 ori r12,r12,MSR_EE 1254 andc r11,r11,r12 1255 SYNC /* Some chip revs need this... */ 1256 mtmsr r11 1257 SYNC 1258 1259 li r12,0xd00-4 /* restore regs */ 1260 lwz r12,0(r12) 1261 1262 lwz r11,0(r12) 1263 mtlr r11 1264 lwz r11,4(r12) 1265 mtspr SRR0,r11 1266 lwz r11,8(r12) 1267 mtspr SRR1,r11 1268 1269 addi r12,r12,12 /* Adjust stack pointer */ 1270 li r20,0xd00-4 1271 stw r12,0(r20) 1272 1273 SYNC 1274 rfi 1275_end_back: 1276 1277 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1278 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1279 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1280 1281 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1282 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1283 1284 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1285 1286 .globl _end_of_vectors 1287_end_of_vectors: 1288 1289 1290 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1291 1292/* 1293 * This code finishes saving the registers to the exception frame 1294 * and jumps to the appropriate handler for the exception. 1295 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1296 */ 1297 .globl transfer_to_handler 1298transfer_to_handler: 1299 stw r22,_NIP(r21) 1300 lis r22,MSR_POW@h 1301 andc r23,r23,r22 1302 stw r23,_MSR(r21) 1303 SAVE_GPR(7, r21) 1304 SAVE_4GPRS(8, r21) 1305 SAVE_8GPRS(12, r21) 1306 SAVE_8GPRS(24, r21) 1307 1308 mflr r23 1309 andi. r24,r23,0x3f00 /* get vector offset */ 1310 stw r24,TRAP(r21) 1311 li r22,0 1312 stw r22,RESULT(r21) 1313 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1314 1315 lwz r24,0(r23) /* virtual address of handler */ 1316 lwz r23,4(r23) /* where to go when done */ 1317 mtspr SRR0,r24 1318 mtspr SRR1,r20 1319 mtlr r23 1320 SYNC 1321 rfi /* jump to handler, enable MMU */ 1322 1323int_return: 1324 mfmsr r28 /* Disable interrupts */ 1325 li r4,0 1326 ori r4,r4,MSR_EE 1327 andc r28,r28,r4 1328 SYNC /* Some chip revs need this... */ 1329 mtmsr r28 1330 SYNC 1331 lwz r2,_CTR(r1) 1332 lwz r0,_LINK(r1) 1333 mtctr r2 1334 mtlr r0 1335 lwz r2,_XER(r1) 1336 lwz r0,_CCR(r1) 1337 mtspr XER,r2 1338 mtcrf 0xFF,r0 1339 REST_10GPRS(3, r1) 1340 REST_10GPRS(13, r1) 1341 REST_8GPRS(23, r1) 1342 REST_GPR(31, r1) 1343 lwz r2,_NIP(r1) /* Restore environment */ 1344 lwz r0,_MSR(r1) 1345 mtspr SRR0,r2 1346 mtspr SRR1,r0 1347 lwz r0,GPR0(r1) 1348 lwz r2,GPR2(r1) 1349 lwz r1,GPR1(r1) 1350 SYNC 1351 rfi 1352 1353crit_return: 1354 mfmsr r28 /* Disable interrupts */ 1355 li r4,0 1356 ori r4,r4,MSR_EE 1357 andc r28,r28,r4 1358 SYNC /* Some chip revs need this... */ 1359 mtmsr r28 1360 SYNC 1361 lwz r2,_CTR(r1) 1362 lwz r0,_LINK(r1) 1363 mtctr r2 1364 mtlr r0 1365 lwz r2,_XER(r1) 1366 lwz r0,_CCR(r1) 1367 mtspr XER,r2 1368 mtcrf 0xFF,r0 1369 REST_10GPRS(3, r1) 1370 REST_10GPRS(13, r1) 1371 REST_8GPRS(23, r1) 1372 REST_GPR(31, r1) 1373 lwz r2,_NIP(r1) /* Restore environment */ 1374 lwz r0,_MSR(r1) 1375 mtspr SPRN_CSRR0,r2 1376 mtspr SPRN_CSRR1,r0 1377 lwz r0,GPR0(r1) 1378 lwz r2,GPR2(r1) 1379 lwz r1,GPR1(r1) 1380 SYNC 1381 rfci 1382 1383mck_return: 1384 mfmsr r28 /* Disable interrupts */ 1385 li r4,0 1386 ori r4,r4,MSR_EE 1387 andc r28,r28,r4 1388 SYNC /* Some chip revs need this... */ 1389 mtmsr r28 1390 SYNC 1391 lwz r2,_CTR(r1) 1392 lwz r0,_LINK(r1) 1393 mtctr r2 1394 mtlr r0 1395 lwz r2,_XER(r1) 1396 lwz r0,_CCR(r1) 1397 mtspr XER,r2 1398 mtcrf 0xFF,r0 1399 REST_10GPRS(3, r1) 1400 REST_10GPRS(13, r1) 1401 REST_8GPRS(23, r1) 1402 REST_GPR(31, r1) 1403 lwz r2,_NIP(r1) /* Restore environment */ 1404 lwz r0,_MSR(r1) 1405 mtspr SPRN_MCSRR0,r2 1406 mtspr SPRN_MCSRR1,r0 1407 lwz r0,GPR0(r1) 1408 lwz r2,GPR2(r1) 1409 lwz r1,GPR1(r1) 1410 SYNC 1411 rfmci 1412 1413/* Cache functions. 1414*/ 1415.globl flush_icache 1416flush_icache: 1417.globl invalidate_icache 1418invalidate_icache: 1419 mfspr r0,L1CSR1 1420 ori r0,r0,L1CSR1_ICFI 1421 msync 1422 isync 1423 mtspr L1CSR1,r0 1424 isync 1425 blr /* entire I cache */ 1426 1427.globl invalidate_dcache 1428invalidate_dcache: 1429 mfspr r0,L1CSR0 1430 ori r0,r0,L1CSR0_DCFI 1431 msync 1432 isync 1433 mtspr L1CSR0,r0 1434 isync 1435 blr 1436 1437 .globl icache_enable 1438icache_enable: 1439 mflr r8 1440 bl invalidate_icache 1441 mtlr r8 1442 isync 1443 mfspr r4,L1CSR1 1444 ori r4,r4,0x0001 1445 oris r4,r4,0x0001 1446 mtspr L1CSR1,r4 1447 isync 1448 blr 1449 1450 .globl icache_disable 1451icache_disable: 1452 mfspr r0,L1CSR1 1453 lis r3,0 1454 ori r3,r3,L1CSR1_ICE 1455 andc r0,r0,r3 1456 mtspr L1CSR1,r0 1457 isync 1458 blr 1459 1460 .globl icache_status 1461icache_status: 1462 mfspr r3,L1CSR1 1463 andi. r3,r3,L1CSR1_ICE 1464 blr 1465 1466 .globl dcache_enable 1467dcache_enable: 1468 mflr r8 1469 bl invalidate_dcache 1470 mtlr r8 1471 isync 1472 mfspr r0,L1CSR0 1473 ori r0,r0,0x0001 1474 oris r0,r0,0x0001 1475 msync 1476 isync 1477 mtspr L1CSR0,r0 1478 isync 1479 blr 1480 1481 .globl dcache_disable 1482dcache_disable: 1483 mfspr r3,L1CSR0 1484 lis r4,0 1485 ori r4,r4,L1CSR0_DCE 1486 andc r3,r3,r4 1487 mtspr L1CSR0,r3 1488 isync 1489 blr 1490 1491 .globl dcache_status 1492dcache_status: 1493 mfspr r3,L1CSR0 1494 andi. r3,r3,L1CSR0_DCE 1495 blr 1496 1497 .globl get_pir 1498get_pir: 1499 mfspr r3,PIR 1500 blr 1501 1502 .globl get_pvr 1503get_pvr: 1504 mfspr r3,PVR 1505 blr 1506 1507 .globl get_svr 1508get_svr: 1509 mfspr r3,SVR 1510 blr 1511 1512 .globl wr_tcr 1513wr_tcr: 1514 mtspr TCR,r3 1515 blr 1516 1517/*------------------------------------------------------------------------------- */ 1518/* Function: in8 */ 1519/* Description: Input 8 bits */ 1520/*------------------------------------------------------------------------------- */ 1521 .globl in8 1522in8: 1523 lbz r3,0x0000(r3) 1524 blr 1525 1526/*------------------------------------------------------------------------------- */ 1527/* Function: out8 */ 1528/* Description: Output 8 bits */ 1529/*------------------------------------------------------------------------------- */ 1530 .globl out8 1531out8: 1532 stb r4,0x0000(r3) 1533 sync 1534 blr 1535 1536/*------------------------------------------------------------------------------- */ 1537/* Function: out16 */ 1538/* Description: Output 16 bits */ 1539/*------------------------------------------------------------------------------- */ 1540 .globl out16 1541out16: 1542 sth r4,0x0000(r3) 1543 sync 1544 blr 1545 1546/*------------------------------------------------------------------------------- */ 1547/* Function: out16r */ 1548/* Description: Byte reverse and output 16 bits */ 1549/*------------------------------------------------------------------------------- */ 1550 .globl out16r 1551out16r: 1552 sthbrx r4,r0,r3 1553 sync 1554 blr 1555 1556/*------------------------------------------------------------------------------- */ 1557/* Function: out32 */ 1558/* Description: Output 32 bits */ 1559/*------------------------------------------------------------------------------- */ 1560 .globl out32 1561out32: 1562 stw r4,0x0000(r3) 1563 sync 1564 blr 1565 1566/*------------------------------------------------------------------------------- */ 1567/* Function: out32r */ 1568/* Description: Byte reverse and output 32 bits */ 1569/*------------------------------------------------------------------------------- */ 1570 .globl out32r 1571out32r: 1572 stwbrx r4,r0,r3 1573 sync 1574 blr 1575 1576/*------------------------------------------------------------------------------- */ 1577/* Function: in16 */ 1578/* Description: Input 16 bits */ 1579/*------------------------------------------------------------------------------- */ 1580 .globl in16 1581in16: 1582 lhz r3,0x0000(r3) 1583 blr 1584 1585/*------------------------------------------------------------------------------- */ 1586/* Function: in16r */ 1587/* Description: Input 16 bits and byte reverse */ 1588/*------------------------------------------------------------------------------- */ 1589 .globl in16r 1590in16r: 1591 lhbrx r3,r0,r3 1592 blr 1593 1594/*------------------------------------------------------------------------------- */ 1595/* Function: in32 */ 1596/* Description: Input 32 bits */ 1597/*------------------------------------------------------------------------------- */ 1598 .globl in32 1599in32: 1600 lwz 3,0x0000(3) 1601 blr 1602 1603/*------------------------------------------------------------------------------- */ 1604/* Function: in32r */ 1605/* Description: Input 32 bits and byte reverse */ 1606/*------------------------------------------------------------------------------- */ 1607 .globl in32r 1608in32r: 1609 lwbrx r3,r0,r3 1610 blr 1611#endif /* !MINIMAL_SPL */ 1612 1613/*------------------------------------------------------------------------------*/ 1614 1615/* 1616 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1617 */ 1618 .globl write_tlb 1619write_tlb: 1620 mtspr MAS0,r3 1621 mtspr MAS1,r4 1622 mtspr MAS2,r5 1623 mtspr MAS3,r6 1624#ifdef CONFIG_ENABLE_36BIT_PHYS 1625 mtspr MAS7,r7 1626#endif 1627 li r3,0 1628#ifdef CONFIG_SYS_BOOK3E_HV 1629 mtspr MAS8,r3 1630#endif 1631 isync 1632 tlbwe 1633 msync 1634 isync 1635 blr 1636 1637/* 1638 * void relocate_code (addr_sp, gd, addr_moni) 1639 * 1640 * This "function" does not return, instead it continues in RAM 1641 * after relocating the monitor code. 1642 * 1643 * r3 = dest 1644 * r4 = src 1645 * r5 = length in bytes 1646 * r6 = cachelinesize 1647 */ 1648 .globl relocate_code 1649relocate_code: 1650 mr r1,r3 /* Set new stack pointer */ 1651 mr r9,r4 /* Save copy of Init Data pointer */ 1652 mr r10,r5 /* Save copy of Destination Address */ 1653 1654 GET_GOT 1655#ifndef CONFIG_SPL_SKIP_RELOCATE 1656 mr r3,r5 /* Destination Address */ 1657 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1658 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1659 lwz r5,GOT(__init_end) 1660 sub r5,r5,r4 1661 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1662 1663 /* 1664 * Fix GOT pointer: 1665 * 1666 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1667 * 1668 * Offset: 1669 */ 1670 sub r15,r10,r4 1671 1672 /* First our own GOT */ 1673 add r12,r12,r15 1674 /* the the one used by the C code */ 1675 add r30,r30,r15 1676 1677 /* 1678 * Now relocate code 1679 */ 1680 1681 cmplw cr1,r3,r4 1682 addi r0,r5,3 1683 srwi. r0,r0,2 1684 beq cr1,4f /* In place copy is not necessary */ 1685 beq 7f /* Protect against 0 count */ 1686 mtctr r0 1687 bge cr1,2f 1688 1689 la r8,-4(r4) 1690 la r7,-4(r3) 16911: lwzu r0,4(r8) 1692 stwu r0,4(r7) 1693 bdnz 1b 1694 b 4f 1695 16962: slwi r0,r0,2 1697 add r8,r4,r0 1698 add r7,r3,r0 16993: lwzu r0,-4(r8) 1700 stwu r0,-4(r7) 1701 bdnz 3b 1702 1703/* 1704 * Now flush the cache: note that we must start from a cache aligned 1705 * address. Otherwise we might miss one cache line. 1706 */ 17074: cmpwi r6,0 1708 add r5,r3,r5 1709 beq 7f /* Always flush prefetch queue in any case */ 1710 subi r0,r6,1 1711 andc r3,r3,r0 1712 mr r4,r3 17135: dcbst 0,r4 1714 add r4,r4,r6 1715 cmplw r4,r5 1716 blt 5b 1717 sync /* Wait for all dcbst to complete on bus */ 1718 mr r4,r3 17196: icbi 0,r4 1720 add r4,r4,r6 1721 cmplw r4,r5 1722 blt 6b 17237: sync /* Wait for all icbi to complete on bus */ 1724 isync 1725 1726/* 1727 * We are done. Do not return, instead branch to second part of board 1728 * initialization, now running from RAM. 1729 */ 1730 1731 addi r0,r10,in_ram - _start + _START_OFFSET 1732 1733 /* 1734 * As IVPR is going to point RAM address, 1735 * Make sure IVOR15 has valid opcode to support debugger 1736 */ 1737 mtspr IVOR15,r0 1738 1739 /* 1740 * Re-point the IVPR at RAM 1741 */ 1742 mtspr IVPR,r10 1743 1744 mtlr r0 1745 blr /* NEVER RETURNS! */ 1746#endif 1747 .globl in_ram 1748in_ram: 1749 1750 /* 1751 * Relocation Function, r12 point to got2+0x8000 1752 * 1753 * Adjust got2 pointers, no need to check for 0, this code 1754 * already puts a few entries in the table. 1755 */ 1756 li r0,__got2_entries@sectoff@l 1757 la r3,GOT(_GOT2_TABLE_) 1758 lwz r11,GOT(_GOT2_TABLE_) 1759 mtctr r0 1760 sub r11,r3,r11 1761 addi r3,r3,-4 17621: lwzu r0,4(r3) 1763 cmpwi r0,0 1764 beq- 2f 1765 add r0,r0,r11 1766 stw r0,0(r3) 17672: bdnz 1b 1768 1769 /* 1770 * Now adjust the fixups and the pointers to the fixups 1771 * in case we need to move ourselves again. 1772 */ 1773 li r0,__fixup_entries@sectoff@l 1774 lwz r3,GOT(_FIXUP_TABLE_) 1775 cmpwi r0,0 1776 mtctr r0 1777 addi r3,r3,-4 1778 beq 4f 17793: lwzu r4,4(r3) 1780 lwzux r0,r4,r11 1781 cmpwi r0,0 1782 add r0,r0,r11 1783 stw r4,0(r3) 1784 beq- 5f 1785 stw r0,0(r4) 17865: bdnz 3b 17874: 1788clear_bss: 1789 /* 1790 * Now clear BSS segment 1791 */ 1792 lwz r3,GOT(__bss_start) 1793 lwz r4,GOT(__bss_end) 1794 1795 cmplw 0,r3,r4 1796 beq 6f 1797 1798 li r0,0 17995: 1800 stw r0,0(r3) 1801 addi r3,r3,4 1802 cmplw 0,r3,r4 1803 blt 5b 18046: 1805 1806 mr r3,r9 /* Init Data pointer */ 1807 mr r4,r10 /* Destination Address */ 1808 bl board_init_r 1809 1810#ifndef MINIMAL_SPL 1811 /* 1812 * Copy exception vector code to low memory 1813 * 1814 * r3: dest_addr 1815 * r7: source address, r8: end address, r9: target address 1816 */ 1817 .globl trap_init 1818trap_init: 1819 mflr r4 /* save link register */ 1820 GET_GOT 1821 lwz r7,GOT(_start_of_vectors) 1822 lwz r8,GOT(_end_of_vectors) 1823 1824 li r9,0x100 /* reset vector always at 0x100 */ 1825 1826 cmplw 0,r7,r8 1827 bgelr /* return if r7>=r8 - just in case */ 18281: 1829 lwz r0,0(r7) 1830 stw r0,0(r9) 1831 addi r7,r7,4 1832 addi r9,r9,4 1833 cmplw 0,r7,r8 1834 bne 1b 1835 1836 /* 1837 * relocate `hdlr' and `int_return' entries 1838 */ 1839 li r7,.L_CriticalInput - _start + _START_OFFSET 1840 bl trap_reloc 1841 li r7,.L_MachineCheck - _start + _START_OFFSET 1842 bl trap_reloc 1843 li r7,.L_DataStorage - _start + _START_OFFSET 1844 bl trap_reloc 1845 li r7,.L_InstStorage - _start + _START_OFFSET 1846 bl trap_reloc 1847 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1848 bl trap_reloc 1849 li r7,.L_Alignment - _start + _START_OFFSET 1850 bl trap_reloc 1851 li r7,.L_ProgramCheck - _start + _START_OFFSET 1852 bl trap_reloc 1853 li r7,.L_FPUnavailable - _start + _START_OFFSET 1854 bl trap_reloc 1855 li r7,.L_Decrementer - _start + _START_OFFSET 1856 bl trap_reloc 1857 li r7,.L_IntervalTimer - _start + _START_OFFSET 1858 li r8,_end_of_vectors - _start + _START_OFFSET 18592: 1860 bl trap_reloc 1861 addi r7,r7,0x100 /* next exception vector */ 1862 cmplw 0,r7,r8 1863 blt 2b 1864 1865 /* Update IVORs as per relocated vector table address */ 1866 li r7,0x0100 1867 mtspr IVOR0,r7 /* 0: Critical input */ 1868 li r7,0x0200 1869 mtspr IVOR1,r7 /* 1: Machine check */ 1870 li r7,0x0300 1871 mtspr IVOR2,r7 /* 2: Data storage */ 1872 li r7,0x0400 1873 mtspr IVOR3,r7 /* 3: Instruction storage */ 1874 li r7,0x0500 1875 mtspr IVOR4,r7 /* 4: External interrupt */ 1876 li r7,0x0600 1877 mtspr IVOR5,r7 /* 5: Alignment */ 1878 li r7,0x0700 1879 mtspr IVOR6,r7 /* 6: Program check */ 1880 li r7,0x0800 1881 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1882 li r7,0x0900 1883 mtspr IVOR8,r7 /* 8: System call */ 1884 /* 9: Auxiliary processor unavailable(unsupported) */ 1885 li r7,0x0a00 1886 mtspr IVOR10,r7 /* 10: Decrementer */ 1887 li r7,0x0b00 1888 mtspr IVOR11,r7 /* 11: Interval timer */ 1889 li r7,0x0c00 1890 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1891 li r7,0x0d00 1892 mtspr IVOR13,r7 /* 13: Data TLB error */ 1893 li r7,0x0e00 1894 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1895 li r7,0x0f00 1896 mtspr IVOR15,r7 /* 15: Debug */ 1897 1898 lis r7,0x0 1899 mtspr IVPR,r7 1900 1901 mtlr r4 /* restore link register */ 1902 blr 1903 1904.globl unlock_ram_in_cache 1905unlock_ram_in_cache: 1906 /* invalidate the INIT_RAM section */ 1907 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1908 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1909 mfspr r4,L1CFG0 1910 andi. r4,r4,0x1ff 1911 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1912 mtctr r4 19131: dcbi r0,r3 1914 dcblc r0,r3 1915 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1916 bdnz 1b 1917 sync 1918 1919 /* Invalidate the TLB entries for the cache */ 1920 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1921 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1922 tlbivax 0,r3 1923 addi r3,r3,0x1000 1924 tlbivax 0,r3 1925 addi r3,r3,0x1000 1926 tlbivax 0,r3 1927 addi r3,r3,0x1000 1928 tlbivax 0,r3 1929 isync 1930 blr 1931 1932.globl flush_dcache 1933flush_dcache: 1934 mfspr r3,SPRN_L1CFG0 1935 1936 rlwinm r5,r3,9,3 /* Extract cache block size */ 1937 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1938 * are currently defined. 1939 */ 1940 li r4,32 1941 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1942 * log2(number of ways) 1943 */ 1944 slw r5,r4,r5 /* r5 = cache block size */ 1945 1946 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1947 mulli r7,r7,13 /* An 8-way cache will require 13 1948 * loads per set. 1949 */ 1950 slw r7,r7,r6 1951 1952 /* save off HID0 and set DCFA */ 1953 mfspr r8,SPRN_HID0 1954 ori r9,r8,HID0_DCFA@l 1955 mtspr SPRN_HID0,r9 1956 isync 1957 1958 lis r4,0 1959 mtctr r7 1960 19611: lwz r3,0(r4) /* Load... */ 1962 add r4,r4,r5 1963 bdnz 1b 1964 1965 msync 1966 lis r4,0 1967 mtctr r7 1968 19691: dcbf 0,r4 /* ...and flush. */ 1970 add r4,r4,r5 1971 bdnz 1b 1972 1973 /* restore HID0 */ 1974 mtspr SPRN_HID0,r8 1975 isync 1976 1977 blr 1978#endif /* !MINIMAL_SPL */ 1979