1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * See file CREDITS for list of people who contributed to this 6 * project. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 25 * 26 * The processor starts at 0xfffffffc and the code is first executed in the 27 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 28 * 29 */ 30 31#include <asm-offsets.h> 32#include <config.h> 33#include <mpc85xx.h> 34#include <version.h> 35 36#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ 37 38#include <ppc_asm.tmpl> 39#include <ppc_defs.h> 40 41#include <asm/cache.h> 42#include <asm/mmu.h> 43 44#undef MSR_KERNEL 45#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 46 47/* 48 * Set up GOT: Global Offset Table 49 * 50 * Use r12 to access the GOT 51 */ 52 START_GOT 53 GOT_ENTRY(_GOT2_TABLE_) 54 GOT_ENTRY(_FIXUP_TABLE_) 55 56#ifndef CONFIG_NAND_SPL 57 GOT_ENTRY(_start) 58 GOT_ENTRY(_start_of_vectors) 59 GOT_ENTRY(_end_of_vectors) 60 GOT_ENTRY(transfer_to_handler) 61#endif 62 63 GOT_ENTRY(__init_end) 64 GOT_ENTRY(__bss_end__) 65 GOT_ENTRY(__bss_start) 66 END_GOT 67 68/* 69 * e500 Startup -- after reset only the last 4KB of the effective 70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 71 * section is located at THIS LAST page and basically does three 72 * things: clear some registers, set up exception tables and 73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 74 * continue the boot procedure. 75 76 * Once the boot rom is mapped by TLB entries we can proceed 77 * with normal startup. 78 * 79 */ 80 81 .section .bootpg,"ax" 82 .globl _start_e500 83 84_start_e500: 85/* Enable debug exception */ 86 li r1,MSR_DE 87 mtmsr r1 88 89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 90 mfspr r3,SPRN_SVR 91 rlwinm r3,r3,0,0xff 92 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 93 cmpw r3,r4 94 beq 1f 95 96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 97 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 98 cmpw r3,r4 99 beq 1f 100#endif 101 102 /* Not a supported revision affected by erratum */ 103 li r27,0 104 b 2f 105 1061: li r27,1 /* Remember for later that we have the erratum */ 107 /* Erratum says set bits 55:60 to 001001 */ 108 msync 109 isync 110 mfspr r3,976 111 li r4,0x48 112 rlwimi r3,r4,0,0x1f8 113 mtspr 976,r3 114 isync 1152: 116#endif 117 118#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 119 /* ISBC uses L2 as stack. 120 * Disable L2 cache here so that u-boot can enable it later 121 * as part of it's normal flow 122 */ 123 124 /* Check if L2 is enabled */ 125 mfspr r3, SPRN_L2CSR0 126 lis r2, L2CSR0_L2E@h 127 ori r2, r2, L2CSR0_L2E@l 128 and. r4, r3, r2 129 beq l2_disabled 130 131 mfspr r3, SPRN_L2CSR0 132 /* Flush L2 cache */ 133 lis r2,(L2CSR0_L2FL)@h 134 ori r2, r2, (L2CSR0_L2FL)@l 135 or r3, r2, r3 136 sync 137 isync 138 mtspr SPRN_L2CSR0,r3 139 isync 1401: 141 mfspr r3, SPRN_L2CSR0 142 and. r1, r3, r2 143 bne 1b 144 145 mfspr r3, SPRN_L2CSR0 146 lis r2, L2CSR0_L2E@h 147 ori r2, r2, L2CSR0_L2E@l 148 andc r4, r3, r2 149 sync 150 isync 151 mtspr SPRN_L2CSR0,r4 152 isync 153 154l2_disabled: 155#endif 156 157/* clear registers/arrays not reset by hardware */ 158 159 /* L1 */ 160 li r0,2 161 mtspr L1CSR0,r0 /* invalidate d-cache */ 162 mtspr L1CSR1,r0 /* invalidate i-cache */ 163 164 mfspr r1,DBSR 165 mtspr DBSR,r1 /* Clear all valid bits */ 166 167 /* 168 * Enable L1 Caches early 169 * 170 */ 171 172#ifdef CONFIG_SYS_CACHE_STASHING 173 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 174 li r2,(32 + 0) 175 mtspr L1CSR2,r2 176#endif 177 178 /* Enable/invalidate the I-Cache */ 179 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 180 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 181 mtspr SPRN_L1CSR1,r2 1821: 183 mfspr r3,SPRN_L1CSR1 184 and. r1,r3,r2 185 bne 1b 186 187 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 188 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 189 mtspr SPRN_L1CSR1,r3 190 isync 1912: 192 mfspr r3,SPRN_L1CSR1 193 andi. r1,r3,L1CSR1_ICE@l 194 beq 2b 195 196 /* Enable/invalidate the D-Cache */ 197 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 198 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 199 mtspr SPRN_L1CSR0,r2 2001: 201 mfspr r3,SPRN_L1CSR0 202 and. r1,r3,r2 203 bne 1b 204 205 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 206 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 207 mtspr SPRN_L1CSR0,r3 208 isync 2092: 210 mfspr r3,SPRN_L1CSR0 211 andi. r1,r3,L1CSR0_DCE@l 212 beq 2b 213 214 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch 215 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 216 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 217 mtspr MAS0, \scratch 218 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h 219 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l 220 mtspr MAS1, \scratch 221 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 222 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 223 mtspr MAS2, \scratch 224 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 225 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 226 mtspr MAS3, \scratch 227 lis \scratch, \phy_high@h 228 ori \scratch, \scratch, \phy_high@l 229 mtspr MAS7, \scratch 230 isync 231 msync 232 tlbwe 233 isync 234 .endm 235 236 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch 237 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 238 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 239 mtspr MAS0, \scratch 240 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h 241 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l 242 mtspr MAS1, \scratch 243 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 244 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 245 mtspr MAS2, \scratch 246 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 247 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 248 mtspr MAS3, \scratch 249 lis \scratch, \phy_high@h 250 ori \scratch, \scratch, \phy_high@l 251 mtspr MAS7, \scratch 252 isync 253 msync 254 tlbwe 255 isync 256 .endm 257 258 .macro delete_tlb1_entry esel scratch 259 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 260 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 261 mtspr MAS0, \scratch 262 li \scratch, 0 263 mtspr MAS1, \scratch 264 isync 265 msync 266 tlbwe 267 isync 268 .endm 269 270 .macro delete_tlb0_entry esel epn wimg scratch 271 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 272 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 273 mtspr MAS0, \scratch 274 li \scratch, 0 275 mtspr MAS1, \scratch 276 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 277 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 278 mtspr MAS2, \scratch 279 isync 280 msync 281 tlbwe 282 isync 283 .endm 284 285#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL) 286/* 287 * TLB entry for debuggging in AS1 288 * Create temporary TLB entry in AS0 to handle debug exception 289 * As on debug exception MSR is cleared i.e. Address space is changed 290 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 291 * in AS1. 292 */ 293 294#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 295/* 296 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 297 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 298 * and this window is outside of 4K boot window. 299 */ 300 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 301 0, BOOKE_PAGESZ_4M, \ 302 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 303 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 304 0, r6 305 306#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 307 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 308 0, BOOKE_PAGESZ_1M, \ 309 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 310 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 311 0, r6 312#else 313/* 314 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 315 * because "nexti" will resize TLB to 4K 316 */ 317 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 318 0, BOOKE_PAGESZ_256K, \ 319 CONFIG_SYS_MONITOR_BASE, MAS2_I, \ 320 CONFIG_SYS_MONITOR_BASE, MAS3_SX|MAS3_SW|MAS3_SR, \ 321 0, r6 322#endif 323#endif 324 325/* 326 * Ne need to setup interrupt vector for NAND SPL 327 * because NAND SPL never compiles it. 328 */ 329#if !defined(CONFIG_NAND_SPL) 330 /* Setup interrupt vectors */ 331 lis r1,CONFIG_SYS_MONITOR_BASE@h 332 mtspr IVPR,r1 333 334 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 335 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 336 337 addi r4,r3,CriticalInput - _start + _START_OFFSET 338 mtspr IVOR0,r4 /* 0: Critical input */ 339 addi r4,r3,MachineCheck - _start + _START_OFFSET 340 mtspr IVOR1,r4 /* 1: Machine check */ 341 addi r4,r3,DataStorage - _start + _START_OFFSET 342 mtspr IVOR2,r4 /* 2: Data storage */ 343 addi r4,r3,InstStorage - _start + _START_OFFSET 344 mtspr IVOR3,r4 /* 3: Instruction storage */ 345 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 346 mtspr IVOR4,r4 /* 4: External interrupt */ 347 addi r4,r3,Alignment - _start + _START_OFFSET 348 mtspr IVOR5,r4 /* 5: Alignment */ 349 addi r4,r3,ProgramCheck - _start + _START_OFFSET 350 mtspr IVOR6,r4 /* 6: Program check */ 351 addi r4,r3,FPUnavailable - _start + _START_OFFSET 352 mtspr IVOR7,r4 /* 7: floating point unavailable */ 353 addi r4,r3,SystemCall - _start + _START_OFFSET 354 mtspr IVOR8,r4 /* 8: System call */ 355 /* 9: Auxiliary processor unavailable(unsupported) */ 356 addi r4,r3,Decrementer - _start + _START_OFFSET 357 mtspr IVOR10,r4 /* 10: Decrementer */ 358 addi r4,r3,IntervalTimer - _start + _START_OFFSET 359 mtspr IVOR11,r4 /* 11: Interval timer */ 360 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 361 mtspr IVOR12,r4 /* 12: Watchdog timer */ 362 addi r4,r3,DataTLBError - _start + _START_OFFSET 363 mtspr IVOR13,r4 /* 13: Data TLB error */ 364 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 365 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 366 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 367 mtspr IVOR15,r4 /* 15: Debug */ 368#endif 369 370 /* Clear and set up some registers. */ 371 li r0,0x0000 372 lis r1,0xffff 373 mtspr DEC,r0 /* prevent dec exceptions */ 374 mttbl r0 /* prevent fit & wdt exceptions */ 375 mttbu r0 376 mtspr TSR,r1 /* clear all timer exception status */ 377 mtspr TCR,r0 /* disable all */ 378 mtspr ESR,r0 /* clear exception syndrome register */ 379 mtspr MCSR,r0 /* machine check syndrome register */ 380 mtxer r0 /* clear integer exception register */ 381 382#ifdef CONFIG_SYS_BOOK3E_HV 383 mtspr MAS8,r0 /* make sure MAS8 is clear */ 384#endif 385 386 /* Enable Time Base and Select Time Base Clock */ 387 lis r0,HID0_EMCP@h /* Enable machine check */ 388#if defined(CONFIG_ENABLE_36BIT_PHYS) 389 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 390#endif 391#ifndef CONFIG_E500MC 392 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 393#endif 394 mtspr HID0,r0 395 396#ifndef CONFIG_E500MC 397 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 398 mfspr r3,PVR 399 andi. r3,r3, 0xff 400 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 401 blt 1f 402 /* Set MBDD bit also */ 403 ori r0, r0, HID1_MBDD@l 4041: 405 mtspr HID1,r0 406#endif 407 408#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 409 mfspr r3,977 410 oris r3,r3,0x0100 411 mtspr 977,r3 412#endif 413 414 /* Enable Branch Prediction */ 415#if defined(CONFIG_BTB) 416 lis r0,BUCSR_ENABLE@h 417 ori r0,r0,BUCSR_ENABLE@l 418 mtspr SPRN_BUCSR,r0 419#endif 420 421#if defined(CONFIG_SYS_INIT_DBCR) 422 lis r1,0xffff 423 ori r1,r1,0xffff 424 mtspr DBSR,r1 /* Clear all status bits */ 425 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 426 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 427 mtspr DBCR0,r0 428#endif 429 430#ifdef CONFIG_MPC8569 431#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 432#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 433 434 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 435 * use address space which is more than 12bits, and it must be done in 436 * the 4K boot page. So we set this bit here. 437 */ 438 439 /* create a temp mapping TLB0[0] for LBCR */ 440 create_tlb0_entry 0, \ 441 0, BOOKE_PAGESZ_4K, \ 442 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \ 443 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \ 444 0, r6 445 446 /* Set LBCR register */ 447 lis r4,CONFIG_SYS_LBCR_ADDR@h 448 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 449 450 lis r5,CONFIG_SYS_LBC_LBCR@h 451 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 452 stw r5,0(r4) 453 isync 454 455 /* invalidate this temp TLB */ 456 lis r4,CONFIG_SYS_LBC_ADDR@h 457 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 458 tlbivax 0,r4 459 isync 460 461#endif /* CONFIG_MPC8569 */ 462 463/* 464 * Search for the TLB that covers the code we're executing, and shrink it 465 * so that it covers only this 4K page. That will ensure that any other 466 * TLB we create won't interfere with it. We assume that the TLB exists, 467 * which is why we don't check the Valid bit of MAS1. We also assume 468 * it is in TLB1. 469 * 470 * This is necessary, for example, when booting from the on-chip ROM, 471 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 472 */ 473 bl nexti /* Find our address */ 474nexti: mflr r1 /* R1 = our PC */ 475 li r2, 0 476 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 477 isync 478 msync 479 tlbsx 0, r1 /* This must succeed */ 480 481 mfspr r14, MAS0 /* Save ESEL for later */ 482 rlwinm r14, r14, 16, 0xfff 483 484 /* Set the size of the TLB to 4KB */ 485 mfspr r3, MAS1 486 li r2, 0xF00 487 andc r3, r3, r2 /* Clear the TSIZE bits */ 488 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 489 oris r3, r3, MAS1_IPROT@h 490 mtspr MAS1, r3 491 492 /* 493 * Set the base address of the TLB to our PC. We assume that 494 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 495 */ 496 lis r3, MAS2_EPN@h 497 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 498 499 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 500 501 mfspr r2, MAS2 502 andc r2, r2, r3 503 or r2, r2, r1 504#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 505 cmpwi r27,0 506 beq 1f 507 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 508 rlwinm r2, r2, 0, ~MAS2_I 509 ori r2, r2, MAS2_G 5101: 511#endif 512 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 513 514 mfspr r2, MAS3 515 andc r2, r2, r3 516 or r2, r2, r1 517 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 518 519 isync 520 msync 521 tlbwe 522 523/* 524 * Clear out any other TLB entries that may exist, to avoid conflicts. 525 * Our TLB entry is in r14. 526 */ 527 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 528 tlbivax 0, r0 529 tlbsync 530 531 mfspr r4, SPRN_TLB1CFG 532 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 533 534 li r3, 0 535 mtspr MAS1, r3 5361: cmpw r3, r14 537#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL) 538 cmpwi cr1, r3, CONFIG_SYS_PPC_E500_DEBUG_TLB 539 cror cr0*4+eq, cr0*4+eq, cr1*4+eq 540#endif 541 rlwinm r5, r3, 16, MAS0_ESEL_MSK 542 addi r3, r3, 1 543 beq 2f /* skip the entry we're executing from */ 544 545 oris r5, r5, MAS0_TLBSEL(1)@h 546 mtspr MAS0, r5 547 548 isync 549 tlbwe 550 isync 551 msync 552 5532: cmpw r3, r4 554 blt 1b 555 556/* 557 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 558 * location is not where we want it. This typically happens on a 36-bit 559 * system, where we want to move CCSR to near the top of 36-bit address space. 560 * 561 * To move CCSR, we create two temporary TLBs, one for the old location, and 562 * another for the new location. On CoreNet systems, we also need to create 563 * a special, temporary LAW. 564 * 565 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 566 * long-term TLBs, so we use TLB0 here. 567 */ 568#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 569 570#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 571#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 572#endif 573 574create_ccsr_new_tlb: 575 /* 576 * Create a TLB for the new location of CCSR. Register R8 is reserved 577 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 578 */ 579 lis r8, CONFIG_SYS_CCSRBAR@h 580 ori r8, r8, CONFIG_SYS_CCSRBAR@l 581 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 582 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 583 create_tlb0_entry 0, \ 584 0, BOOKE_PAGESZ_4K, \ 585 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \ 586 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \ 587 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 588 /* 589 * Create a TLB for the current location of CCSR. Register R9 is reserved 590 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 591 */ 592create_ccsr_old_tlb: 593 create_tlb0_entry 1, \ 594 0, BOOKE_PAGESZ_4K, \ 595 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \ 596 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \ 597 0, r3 /* The default CCSR address is always a 32-bit number */ 598 599 600 /* 601 * We have a TLB for what we think is the current (old) CCSR. Let's 602 * verify that, otherwise we won't be able to move it. 603 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 604 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 605 */ 606verify_old_ccsr: 607 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 608 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 609#ifdef CONFIG_FSL_CORENET 610 lwz r1, 4(r9) /* CCSRBARL */ 611#else 612 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 613 slwi r1, r1, 12 614#endif 615 616 cmpl 0, r0, r1 617 618 /* 619 * If the value we read from CCSRBARL is not what we expect, then 620 * enter an infinite loop. This will at least allow a debugger to 621 * halt execution and examine TLBs, etc. There's no point in going 622 * on. 623 */ 624infinite_debug_loop: 625 bne infinite_debug_loop 626 627#ifdef CONFIG_FSL_CORENET 628 629#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 630#define LAW_EN 0x80000000 631#define LAW_SIZE_4K 0xb 632#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 633#define CCSRAR_C 0x80000000 /* Commit */ 634 635create_temp_law: 636 /* 637 * On CoreNet systems, we create the temporary LAW using a special LAW 638 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 639 */ 640 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 641 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 642 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 643 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 644 lis r2, CCSRBAR_LAWAR@h 645 ori r2, r2, CCSRBAR_LAWAR@l 646 647 stw r0, 0xc00(r9) /* LAWBARH0 */ 648 stw r1, 0xc04(r9) /* LAWBARL0 */ 649 sync 650 stw r2, 0xc08(r9) /* LAWAR0 */ 651 652 /* 653 * Read back from LAWAR to ensure the update is complete. e500mc 654 * cores also require an isync. 655 */ 656 lwz r0, 0xc08(r9) /* LAWAR0 */ 657 isync 658 659 /* 660 * Read the current CCSRBARH and CCSRBARL using load word instructions. 661 * Follow this with an isync instruction. This forces any outstanding 662 * accesses to configuration space to completion. 663 */ 664read_old_ccsrbar: 665 lwz r0, 0(r9) /* CCSRBARH */ 666 lwz r0, 4(r9) /* CCSRBARL */ 667 isync 668 669 /* 670 * Write the new values for CCSRBARH and CCSRBARL to their old 671 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 672 * has a new value written it loads a CCSRBARH shadow register. When 673 * the CCSRBARL is written, the CCSRBARH shadow register contents 674 * along with the CCSRBARL value are loaded into the CCSRBARH and 675 * CCSRBARL registers, respectively. Follow this with a sync 676 * instruction. 677 */ 678write_new_ccsrbar: 679 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 680 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 681 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 682 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 683 lis r2, CCSRAR_C@h 684 ori r2, r2, CCSRAR_C@l 685 686 stw r0, 0(r9) /* Write to CCSRBARH */ 687 sync /* Make sure we write to CCSRBARH first */ 688 stw r1, 4(r9) /* Write to CCSRBARL */ 689 sync 690 691 /* 692 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 693 * Follow this with a sync instruction. 694 */ 695 stw r2, 8(r9) 696 sync 697 698 /* Delete the temporary LAW */ 699delete_temp_law: 700 li r1, 0 701 stw r1, 0xc08(r8) 702 sync 703 stw r1, 0xc00(r8) 704 stw r1, 0xc04(r8) 705 sync 706 707#else /* #ifdef CONFIG_FSL_CORENET */ 708 709write_new_ccsrbar: 710 /* 711 * Read the current value of CCSRBAR using a load word instruction 712 * followed by an isync. This forces all accesses to configuration 713 * space to complete. 714 */ 715 sync 716 lwz r0, 0(r9) 717 isync 718 719/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 720#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 721 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 722 723 /* Write the new value to CCSRBAR. */ 724 lis r0, CCSRBAR_PHYS_RS12@h 725 ori r0, r0, CCSRBAR_PHYS_RS12@l 726 stw r0, 0(r9) 727 sync 728 729 /* 730 * The manual says to perform a load of an address that does not 731 * access configuration space or the on-chip SRAM using an existing TLB, 732 * but that doesn't appear to be necessary. We will do the isync, 733 * though. 734 */ 735 isync 736 737 /* 738 * Read the contents of CCSRBAR from its new location, followed by 739 * another isync. 740 */ 741 lwz r0, 0(r8) 742 isync 743 744#endif /* #ifdef CONFIG_FSL_CORENET */ 745 746 /* Delete the temporary TLBs */ 747delete_temp_tlbs: 748 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3 749 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3 750 751#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 752 753#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2 754create_ccsr_l2_tlb: 755 /* 756 * Create a TLB for the MMR location of CCSR 757 * to access L2CSR0 register 758 */ 759 create_tlb0_entry 0, \ 760 0, BOOKE_PAGESZ_4K, \ 761 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \ 762 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \ 763 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 764 765enable_l2_cluster_l2: 766 /* enable L2 cache */ 767 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h 768 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l 769 li r4, 33 /* stash id */ 770 stw r4, 4(r3) 771 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h 772 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l 773 sync 774 stw r4, 0(r3) /* invalidate L2 */ 7751: sync 776 lwz r0, 0(r3) 777 twi 0, r0, 0 778 isync 779 and. r1, r0, r4 780 bne 1b 781 lis r4, L2CSR0_L2E@h 782 sync 783 stw r4, 0(r3) /* eanble L2 */ 784delete_ccsr_l2_tlb: 785 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3 786#endif 787 788#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 789#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 790#define LAW_SIZE_1M 0x13 791#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 792 793 cmpwi r27,0 794 beq 9f 795 796 /* 797 * Create a TLB entry for CCSR 798 * 799 * We're executing out of TLB1 entry in r14, and that's the only 800 * TLB entry that exists. To allocate some TLB entries for our 801 * own use, flip a bit high enough that we won't flip it again 802 * via incrementing. 803 */ 804 805 xori r8, r14, 32 806 lis r0, MAS0_TLBSEL(1)@h 807 rlwimi r0, r8, 16, MAS0_ESEL_MSK 808 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 809 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 810 lis r7, CONFIG_SYS_CCSRBAR@h 811 ori r7, r7, CONFIG_SYS_CCSRBAR@l 812 ori r2, r7, MAS2_I|MAS2_G 813 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 814 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 815 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 816 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 817 mtspr MAS0, r0 818 mtspr MAS1, r1 819 mtspr MAS2, r2 820 mtspr MAS3, r3 821 mtspr MAS7, r4 822 isync 823 tlbwe 824 isync 825 msync 826 827 /* Map DCSR temporarily to physical address zero */ 828 li r0, 0 829 lis r3, DCSRBAR_LAWAR@h 830 ori r3, r3, DCSRBAR_LAWAR@l 831 832 stw r0, 0xc00(r7) /* LAWBARH0 */ 833 stw r0, 0xc04(r7) /* LAWBARL0 */ 834 sync 835 stw r3, 0xc08(r7) /* LAWAR0 */ 836 837 /* Read back from LAWAR to ensure the update is complete. */ 838 lwz r3, 0xc08(r7) /* LAWAR0 */ 839 isync 840 841 /* Create a TLB entry for DCSR at zero */ 842 843 addi r9, r8, 1 844 lis r0, MAS0_TLBSEL(1)@h 845 rlwimi r0, r9, 16, MAS0_ESEL_MSK 846 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 847 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 848 li r6, 0 /* DCSR effective address */ 849 ori r2, r6, MAS2_I|MAS2_G 850 li r3, MAS3_SW|MAS3_SR 851 li r4, 0 852 mtspr MAS0, r0 853 mtspr MAS1, r1 854 mtspr MAS2, r2 855 mtspr MAS3, r3 856 mtspr MAS7, r4 857 isync 858 tlbwe 859 isync 860 msync 861 862 /* enable the timebase */ 863#define CTBENR 0xe2084 864 li r3, 1 865 addis r4, r7, CTBENR@ha 866 stw r3, CTBENR@l(r4) 867 lwz r3, CTBENR@l(r4) 868 twi 0,r3,0 869 isync 870 871 .macro erratum_set_ccsr offset value 872 addis r3, r7, \offset@ha 873 lis r4, \value@h 874 addi r3, r3, \offset@l 875 ori r4, r4, \value@l 876 bl erratum_set_value 877 .endm 878 879 .macro erratum_set_dcsr offset value 880 addis r3, r6, \offset@ha 881 lis r4, \value@h 882 addi r3, r3, \offset@l 883 ori r4, r4, \value@l 884 bl erratum_set_value 885 .endm 886 887 erratum_set_dcsr 0xb0e08 0xe0201800 888 erratum_set_dcsr 0xb0e18 0xe0201800 889 erratum_set_dcsr 0xb0e38 0xe0400000 890 erratum_set_dcsr 0xb0008 0x00900000 891 erratum_set_dcsr 0xb0e40 0xe00a0000 892 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 893 erratum_set_ccsr 0x10f00 0x415e5000 894 erratum_set_ccsr 0x11f00 0x415e5000 895 896 /* Make temp mapping uncacheable again, if it was initially */ 897 bl 2f 8982: mflr r3 899 tlbsx 0, r3 900 mfspr r4, MAS2 901 rlwimi r4, r15, 0, MAS2_I 902 rlwimi r4, r15, 0, MAS2_G 903 mtspr MAS2, r4 904 isync 905 tlbwe 906 isync 907 msync 908 909 /* Clear the cache */ 910 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 911 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 912 sync 913 isync 914 mtspr SPRN_L1CSR1,r3 915 isync 9162: sync 917 mfspr r4,SPRN_L1CSR1 918 and. r4,r4,r3 919 bne 2b 920 921 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 922 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 923 sync 924 isync 925 mtspr SPRN_L1CSR1,r3 926 isync 9272: sync 928 mfspr r4,SPRN_L1CSR1 929 and. r4,r4,r3 930 beq 2b 931 932 /* Remove temporary mappings */ 933 lis r0, MAS0_TLBSEL(1)@h 934 rlwimi r0, r9, 16, MAS0_ESEL_MSK 935 li r3, 0 936 mtspr MAS0, r0 937 mtspr MAS1, r3 938 isync 939 tlbwe 940 isync 941 msync 942 943 li r3, 0 944 stw r3, 0xc08(r7) /* LAWAR0 */ 945 lwz r3, 0xc08(r7) 946 isync 947 948 lis r0, MAS0_TLBSEL(1)@h 949 rlwimi r0, r8, 16, MAS0_ESEL_MSK 950 li r3, 0 951 mtspr MAS0, r0 952 mtspr MAS1, r3 953 isync 954 tlbwe 955 isync 956 msync 957 958 b 9f 959 960 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 961erratum_set_value: 962 /* Lock two cache lines into I-Cache */ 963 sync 964 mfspr r11, SPRN_L1CSR1 965 rlwinm r11, r11, 0, ~L1CSR1_ICUL 966 sync 967 isync 968 mtspr SPRN_L1CSR1, r11 969 isync 970 971 mflr r12 972 bl 5f 9735: mflr r5 974 addi r5, r5, 2f - 5b 975 icbtls 0, 0, r5 976 addi r5, r5, 64 977 978 sync 979 mfspr r11, SPRN_L1CSR1 9803: andi. r11, r11, L1CSR1_ICUL 981 bne 3b 982 983 icbtls 0, 0, r5 984 addi r5, r5, 64 985 986 sync 987 mfspr r11, SPRN_L1CSR1 9883: andi. r11, r11, L1CSR1_ICUL 989 bne 3b 990 991 b 2f 992 .align 6 993 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9942: sync 995 996 mfspr r5, SPRN_TBRL 997 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9984: mfspr r5, SPRN_TBRL 999 subf. r5, r5, r11 1000 bgt 4b 1001 1002 stw r4, 0(r3) 1003 1004 mfspr r5, SPRN_TBRL 1005 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10064: mfspr r5, SPRN_TBRL 1007 subf. r5, r5, r11 1008 bgt 4b 1009 1010 sync 1011 1012 /* 1013 * Fill out the rest of this cache line and the next with nops, 1014 * to ensure that nothing outside the locked area will be 1015 * fetched due to a branch. 1016 */ 1017 .rept 19 1018 nop 1019 .endr 1020 1021 sync 1022 mfspr r11, SPRN_L1CSR1 1023 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1024 sync 1025 isync 1026 mtspr SPRN_L1CSR1, r11 1027 isync 1028 1029 mtlr r12 1030 blr 1031 10329: 1033#endif 1034 1035create_init_ram_area: 1036 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1037 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1038 1039#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 1040 /* create a temp mapping in AS=1 to the 4M boot window */ 1041 create_tlb1_entry 15, \ 1042 1, BOOKE_PAGESZ_4M, \ 1043 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 1044 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1045 0, r6 1046 1047#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1048 /* create a temp mapping in AS = 1 for Flash mapping 1049 * created by PBL for ISBC code 1050 */ 1051 create_tlb1_entry 15, \ 1052 1, BOOKE_PAGESZ_1M, \ 1053 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 1054 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1055 0, r6 1056#else 1057 /* 1058 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1059 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1060 */ 1061 create_tlb1_entry 15, \ 1062 1, BOOKE_PAGESZ_1M, \ 1063 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 1064 CONFIG_SYS_MONITOR_BASE, MAS3_SX|MAS3_SW|MAS3_SR, \ 1065 0, r6 1066#endif 1067 1068 /* create a temp mapping in AS=1 to the stack */ 1069#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1070 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1071 create_tlb1_entry 14, \ 1072 1, BOOKE_PAGESZ_16K, \ 1073 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1074 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1075 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6 1076 1077#else 1078 create_tlb1_entry 14, \ 1079 1, BOOKE_PAGESZ_16K, \ 1080 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1081 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \ 1082 0, r6 1083#endif 1084 1085 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1086 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1087 lis r7,switch_as@h 1088 ori r7,r7,switch_as@l 1089 1090 mtspr SPRN_SRR0,r7 1091 mtspr SPRN_SRR1,r6 1092 rfi 1093 1094switch_as: 1095/* L1 DCache is used for initial RAM */ 1096 1097 /* Allocate Initial RAM in data cache. 1098 */ 1099 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1100 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1101 mfspr r2, L1CFG0 1102 andi. r2, r2, 0x1ff 1103 /* cache size * 1024 / (2 * L1 line size) */ 1104 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1105 mtctr r2 1106 li r0,0 11071: 1108 dcbz r0,r3 1109 dcbtls 0,r0,r3 1110 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1111 bdnz 1b 1112 1113 /* Jump out the last 4K page and continue to 'normal' start */ 1114#ifdef CONFIG_SYS_RAMBOOT 1115 b _start_cont 1116#else 1117 /* Calculate absolute address in FLASH and jump there */ 1118 /*--------------------------------------------------------------*/ 1119 lis r3,CONFIG_SYS_MONITOR_BASE@h 1120 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1121 addi r3,r3,_start_cont - _start + _START_OFFSET 1122 mtlr r3 1123 blr 1124#endif 1125 1126 .text 1127 .globl _start 1128_start: 1129 .long 0x27051956 /* U-BOOT Magic Number */ 1130 .globl version_string 1131version_string: 1132 .ascii U_BOOT_VERSION_STRING, "\0" 1133 1134 .align 4 1135 .globl _start_cont 1136_start_cont: 1137 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1138 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1139 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1140 li r0,0 1141 stw r0,0(r3) /* Terminate Back Chain */ 1142 stw r0,+4(r3) /* NULL return address. */ 1143 mr r1,r3 /* Transfer to SP(r1) */ 1144 1145 GET_GOT 1146 bl cpu_init_early_f 1147 1148 /* switch back to AS = 0 */ 1149 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1150 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1151 mtmsr r3 1152 isync 1153 1154 bl cpu_init_f 1155 bl board_init_f 1156 isync 1157 1158 /* NOTREACHED - board_init_f() does not return */ 1159 1160#ifndef CONFIG_NAND_SPL 1161 . = EXC_OFF_SYS_RESET 1162 .globl _start_of_vectors 1163_start_of_vectors: 1164 1165/* Critical input. */ 1166 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1167 1168/* Machine check */ 1169 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1170 1171/* Data Storage exception. */ 1172 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1173 1174/* Instruction Storage exception. */ 1175 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1176 1177/* External Interrupt exception. */ 1178 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1179 1180/* Alignment exception. */ 1181 . = 0x0600 1182Alignment: 1183 EXCEPTION_PROLOG(SRR0, SRR1) 1184 mfspr r4,DAR 1185 stw r4,_DAR(r21) 1186 mfspr r5,DSISR 1187 stw r5,_DSISR(r21) 1188 addi r3,r1,STACK_FRAME_OVERHEAD 1189 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1190 1191/* Program check exception */ 1192 . = 0x0700 1193ProgramCheck: 1194 EXCEPTION_PROLOG(SRR0, SRR1) 1195 addi r3,r1,STACK_FRAME_OVERHEAD 1196 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1197 MSR_KERNEL, COPY_EE) 1198 1199 /* No FPU on MPC85xx. This exception is not supposed to happen. 1200 */ 1201 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1202 1203 . = 0x0900 1204/* 1205 * r0 - SYSCALL number 1206 * r3-... arguments 1207 */ 1208SystemCall: 1209 addis r11,r0,0 /* get functions table addr */ 1210 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1211 addis r12,r0,0 /* get number of functions */ 1212 ori r12,r12,0 1213 1214 cmplw 0,r0,r12 1215 bge 1f 1216 1217 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1218 add r11,r11,r0 1219 lwz r11,0(r11) 1220 1221 li r20,0xd00-4 /* Get stack pointer */ 1222 lwz r12,0(r20) 1223 subi r12,r12,12 /* Adjust stack pointer */ 1224 li r0,0xc00+_end_back-SystemCall 1225 cmplw 0,r0,r12 /* Check stack overflow */ 1226 bgt 1f 1227 stw r12,0(r20) 1228 1229 mflr r0 1230 stw r0,0(r12) 1231 mfspr r0,SRR0 1232 stw r0,4(r12) 1233 mfspr r0,SRR1 1234 stw r0,8(r12) 1235 1236 li r12,0xc00+_back-SystemCall 1237 mtlr r12 1238 mtspr SRR0,r11 1239 12401: SYNC 1241 rfi 1242_back: 1243 1244 mfmsr r11 /* Disable interrupts */ 1245 li r12,0 1246 ori r12,r12,MSR_EE 1247 andc r11,r11,r12 1248 SYNC /* Some chip revs need this... */ 1249 mtmsr r11 1250 SYNC 1251 1252 li r12,0xd00-4 /* restore regs */ 1253 lwz r12,0(r12) 1254 1255 lwz r11,0(r12) 1256 mtlr r11 1257 lwz r11,4(r12) 1258 mtspr SRR0,r11 1259 lwz r11,8(r12) 1260 mtspr SRR1,r11 1261 1262 addi r12,r12,12 /* Adjust stack pointer */ 1263 li r20,0xd00-4 1264 stw r12,0(r20) 1265 1266 SYNC 1267 rfi 1268_end_back: 1269 1270 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1271 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1272 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1273 1274 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1275 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1276 1277 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1278 1279 .globl _end_of_vectors 1280_end_of_vectors: 1281 1282 1283 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1284 1285/* 1286 * This code finishes saving the registers to the exception frame 1287 * and jumps to the appropriate handler for the exception. 1288 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1289 */ 1290 .globl transfer_to_handler 1291transfer_to_handler: 1292 stw r22,_NIP(r21) 1293 lis r22,MSR_POW@h 1294 andc r23,r23,r22 1295 stw r23,_MSR(r21) 1296 SAVE_GPR(7, r21) 1297 SAVE_4GPRS(8, r21) 1298 SAVE_8GPRS(12, r21) 1299 SAVE_8GPRS(24, r21) 1300 1301 mflr r23 1302 andi. r24,r23,0x3f00 /* get vector offset */ 1303 stw r24,TRAP(r21) 1304 li r22,0 1305 stw r22,RESULT(r21) 1306 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1307 1308 lwz r24,0(r23) /* virtual address of handler */ 1309 lwz r23,4(r23) /* where to go when done */ 1310 mtspr SRR0,r24 1311 mtspr SRR1,r20 1312 mtlr r23 1313 SYNC 1314 rfi /* jump to handler, enable MMU */ 1315 1316int_return: 1317 mfmsr r28 /* Disable interrupts */ 1318 li r4,0 1319 ori r4,r4,MSR_EE 1320 andc r28,r28,r4 1321 SYNC /* Some chip revs need this... */ 1322 mtmsr r28 1323 SYNC 1324 lwz r2,_CTR(r1) 1325 lwz r0,_LINK(r1) 1326 mtctr r2 1327 mtlr r0 1328 lwz r2,_XER(r1) 1329 lwz r0,_CCR(r1) 1330 mtspr XER,r2 1331 mtcrf 0xFF,r0 1332 REST_10GPRS(3, r1) 1333 REST_10GPRS(13, r1) 1334 REST_8GPRS(23, r1) 1335 REST_GPR(31, r1) 1336 lwz r2,_NIP(r1) /* Restore environment */ 1337 lwz r0,_MSR(r1) 1338 mtspr SRR0,r2 1339 mtspr SRR1,r0 1340 lwz r0,GPR0(r1) 1341 lwz r2,GPR2(r1) 1342 lwz r1,GPR1(r1) 1343 SYNC 1344 rfi 1345 1346crit_return: 1347 mfmsr r28 /* Disable interrupts */ 1348 li r4,0 1349 ori r4,r4,MSR_EE 1350 andc r28,r28,r4 1351 SYNC /* Some chip revs need this... */ 1352 mtmsr r28 1353 SYNC 1354 lwz r2,_CTR(r1) 1355 lwz r0,_LINK(r1) 1356 mtctr r2 1357 mtlr r0 1358 lwz r2,_XER(r1) 1359 lwz r0,_CCR(r1) 1360 mtspr XER,r2 1361 mtcrf 0xFF,r0 1362 REST_10GPRS(3, r1) 1363 REST_10GPRS(13, r1) 1364 REST_8GPRS(23, r1) 1365 REST_GPR(31, r1) 1366 lwz r2,_NIP(r1) /* Restore environment */ 1367 lwz r0,_MSR(r1) 1368 mtspr SPRN_CSRR0,r2 1369 mtspr SPRN_CSRR1,r0 1370 lwz r0,GPR0(r1) 1371 lwz r2,GPR2(r1) 1372 lwz r1,GPR1(r1) 1373 SYNC 1374 rfci 1375 1376mck_return: 1377 mfmsr r28 /* Disable interrupts */ 1378 li r4,0 1379 ori r4,r4,MSR_EE 1380 andc r28,r28,r4 1381 SYNC /* Some chip revs need this... */ 1382 mtmsr r28 1383 SYNC 1384 lwz r2,_CTR(r1) 1385 lwz r0,_LINK(r1) 1386 mtctr r2 1387 mtlr r0 1388 lwz r2,_XER(r1) 1389 lwz r0,_CCR(r1) 1390 mtspr XER,r2 1391 mtcrf 0xFF,r0 1392 REST_10GPRS(3, r1) 1393 REST_10GPRS(13, r1) 1394 REST_8GPRS(23, r1) 1395 REST_GPR(31, r1) 1396 lwz r2,_NIP(r1) /* Restore environment */ 1397 lwz r0,_MSR(r1) 1398 mtspr SPRN_MCSRR0,r2 1399 mtspr SPRN_MCSRR1,r0 1400 lwz r0,GPR0(r1) 1401 lwz r2,GPR2(r1) 1402 lwz r1,GPR1(r1) 1403 SYNC 1404 rfmci 1405 1406/* Cache functions. 1407*/ 1408.globl flush_icache 1409flush_icache: 1410.globl invalidate_icache 1411invalidate_icache: 1412 mfspr r0,L1CSR1 1413 ori r0,r0,L1CSR1_ICFI 1414 msync 1415 isync 1416 mtspr L1CSR1,r0 1417 isync 1418 blr /* entire I cache */ 1419 1420.globl invalidate_dcache 1421invalidate_dcache: 1422 mfspr r0,L1CSR0 1423 ori r0,r0,L1CSR0_DCFI 1424 msync 1425 isync 1426 mtspr L1CSR0,r0 1427 isync 1428 blr 1429 1430 .globl icache_enable 1431icache_enable: 1432 mflr r8 1433 bl invalidate_icache 1434 mtlr r8 1435 isync 1436 mfspr r4,L1CSR1 1437 ori r4,r4,0x0001 1438 oris r4,r4,0x0001 1439 mtspr L1CSR1,r4 1440 isync 1441 blr 1442 1443 .globl icache_disable 1444icache_disable: 1445 mfspr r0,L1CSR1 1446 lis r3,0 1447 ori r3,r3,L1CSR1_ICE 1448 andc r0,r0,r3 1449 mtspr L1CSR1,r0 1450 isync 1451 blr 1452 1453 .globl icache_status 1454icache_status: 1455 mfspr r3,L1CSR1 1456 andi. r3,r3,L1CSR1_ICE 1457 blr 1458 1459 .globl dcache_enable 1460dcache_enable: 1461 mflr r8 1462 bl invalidate_dcache 1463 mtlr r8 1464 isync 1465 mfspr r0,L1CSR0 1466 ori r0,r0,0x0001 1467 oris r0,r0,0x0001 1468 msync 1469 isync 1470 mtspr L1CSR0,r0 1471 isync 1472 blr 1473 1474 .globl dcache_disable 1475dcache_disable: 1476 mfspr r3,L1CSR0 1477 lis r4,0 1478 ori r4,r4,L1CSR0_DCE 1479 andc r3,r3,r4 1480 mtspr L1CSR0,r3 1481 isync 1482 blr 1483 1484 .globl dcache_status 1485dcache_status: 1486 mfspr r3,L1CSR0 1487 andi. r3,r3,L1CSR0_DCE 1488 blr 1489 1490 .globl get_pir 1491get_pir: 1492 mfspr r3,PIR 1493 blr 1494 1495 .globl get_pvr 1496get_pvr: 1497 mfspr r3,PVR 1498 blr 1499 1500 .globl get_svr 1501get_svr: 1502 mfspr r3,SVR 1503 blr 1504 1505 .globl wr_tcr 1506wr_tcr: 1507 mtspr TCR,r3 1508 blr 1509 1510/*------------------------------------------------------------------------------- */ 1511/* Function: in8 */ 1512/* Description: Input 8 bits */ 1513/*------------------------------------------------------------------------------- */ 1514 .globl in8 1515in8: 1516 lbz r3,0x0000(r3) 1517 blr 1518 1519/*------------------------------------------------------------------------------- */ 1520/* Function: out8 */ 1521/* Description: Output 8 bits */ 1522/*------------------------------------------------------------------------------- */ 1523 .globl out8 1524out8: 1525 stb r4,0x0000(r3) 1526 sync 1527 blr 1528 1529/*------------------------------------------------------------------------------- */ 1530/* Function: out16 */ 1531/* Description: Output 16 bits */ 1532/*------------------------------------------------------------------------------- */ 1533 .globl out16 1534out16: 1535 sth r4,0x0000(r3) 1536 sync 1537 blr 1538 1539/*------------------------------------------------------------------------------- */ 1540/* Function: out16r */ 1541/* Description: Byte reverse and output 16 bits */ 1542/*------------------------------------------------------------------------------- */ 1543 .globl out16r 1544out16r: 1545 sthbrx r4,r0,r3 1546 sync 1547 blr 1548 1549/*------------------------------------------------------------------------------- */ 1550/* Function: out32 */ 1551/* Description: Output 32 bits */ 1552/*------------------------------------------------------------------------------- */ 1553 .globl out32 1554out32: 1555 stw r4,0x0000(r3) 1556 sync 1557 blr 1558 1559/*------------------------------------------------------------------------------- */ 1560/* Function: out32r */ 1561/* Description: Byte reverse and output 32 bits */ 1562/*------------------------------------------------------------------------------- */ 1563 .globl out32r 1564out32r: 1565 stwbrx r4,r0,r3 1566 sync 1567 blr 1568 1569/*------------------------------------------------------------------------------- */ 1570/* Function: in16 */ 1571/* Description: Input 16 bits */ 1572/*------------------------------------------------------------------------------- */ 1573 .globl in16 1574in16: 1575 lhz r3,0x0000(r3) 1576 blr 1577 1578/*------------------------------------------------------------------------------- */ 1579/* Function: in16r */ 1580/* Description: Input 16 bits and byte reverse */ 1581/*------------------------------------------------------------------------------- */ 1582 .globl in16r 1583in16r: 1584 lhbrx r3,r0,r3 1585 blr 1586 1587/*------------------------------------------------------------------------------- */ 1588/* Function: in32 */ 1589/* Description: Input 32 bits */ 1590/*------------------------------------------------------------------------------- */ 1591 .globl in32 1592in32: 1593 lwz 3,0x0000(3) 1594 blr 1595 1596/*------------------------------------------------------------------------------- */ 1597/* Function: in32r */ 1598/* Description: Input 32 bits and byte reverse */ 1599/*------------------------------------------------------------------------------- */ 1600 .globl in32r 1601in32r: 1602 lwbrx r3,r0,r3 1603 blr 1604#endif /* !CONFIG_NAND_SPL */ 1605 1606/*------------------------------------------------------------------------------*/ 1607 1608/* 1609 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1610 */ 1611 .globl write_tlb 1612write_tlb: 1613 mtspr MAS0,r3 1614 mtspr MAS1,r4 1615 mtspr MAS2,r5 1616 mtspr MAS3,r6 1617#ifdef CONFIG_ENABLE_36BIT_PHYS 1618 mtspr MAS7,r7 1619#endif 1620 li r3,0 1621#ifdef CONFIG_SYS_BOOK3E_HV 1622 mtspr MAS8,r3 1623#endif 1624 isync 1625 tlbwe 1626 msync 1627 isync 1628 blr 1629 1630/* 1631 * void relocate_code (addr_sp, gd, addr_moni) 1632 * 1633 * This "function" does not return, instead it continues in RAM 1634 * after relocating the monitor code. 1635 * 1636 * r3 = dest 1637 * r4 = src 1638 * r5 = length in bytes 1639 * r6 = cachelinesize 1640 */ 1641 .globl relocate_code 1642relocate_code: 1643 mr r1,r3 /* Set new stack pointer */ 1644 mr r9,r4 /* Save copy of Init Data pointer */ 1645 mr r10,r5 /* Save copy of Destination Address */ 1646 1647 GET_GOT 1648 mr r3,r5 /* Destination Address */ 1649 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1650 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1651 lwz r5,GOT(__init_end) 1652 sub r5,r5,r4 1653 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1654 1655 /* 1656 * Fix GOT pointer: 1657 * 1658 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1659 * 1660 * Offset: 1661 */ 1662 sub r15,r10,r4 1663 1664 /* First our own GOT */ 1665 add r12,r12,r15 1666 /* the the one used by the C code */ 1667 add r30,r30,r15 1668 1669 /* 1670 * Now relocate code 1671 */ 1672 1673 cmplw cr1,r3,r4 1674 addi r0,r5,3 1675 srwi. r0,r0,2 1676 beq cr1,4f /* In place copy is not necessary */ 1677 beq 7f /* Protect against 0 count */ 1678 mtctr r0 1679 bge cr1,2f 1680 1681 la r8,-4(r4) 1682 la r7,-4(r3) 16831: lwzu r0,4(r8) 1684 stwu r0,4(r7) 1685 bdnz 1b 1686 b 4f 1687 16882: slwi r0,r0,2 1689 add r8,r4,r0 1690 add r7,r3,r0 16913: lwzu r0,-4(r8) 1692 stwu r0,-4(r7) 1693 bdnz 3b 1694 1695/* 1696 * Now flush the cache: note that we must start from a cache aligned 1697 * address. Otherwise we might miss one cache line. 1698 */ 16994: cmpwi r6,0 1700 add r5,r3,r5 1701 beq 7f /* Always flush prefetch queue in any case */ 1702 subi r0,r6,1 1703 andc r3,r3,r0 1704 mr r4,r3 17055: dcbst 0,r4 1706 add r4,r4,r6 1707 cmplw r4,r5 1708 blt 5b 1709 sync /* Wait for all dcbst to complete on bus */ 1710 mr r4,r3 17116: icbi 0,r4 1712 add r4,r4,r6 1713 cmplw r4,r5 1714 blt 6b 17157: sync /* Wait for all icbi to complete on bus */ 1716 isync 1717 1718/* 1719 * We are done. Do not return, instead branch to second part of board 1720 * initialization, now running from RAM. 1721 */ 1722 1723 addi r0,r10,in_ram - _start + _START_OFFSET 1724 1725 /* 1726 * As IVPR is going to point RAM address, 1727 * Make sure IVOR15 has valid opcode to support debugger 1728 */ 1729 mtspr IVOR15,r0 1730 1731 /* 1732 * Re-point the IVPR at RAM 1733 */ 1734 mtspr IVPR,r10 1735 1736 mtlr r0 1737 blr /* NEVER RETURNS! */ 1738 .globl in_ram 1739in_ram: 1740 1741 /* 1742 * Relocation Function, r12 point to got2+0x8000 1743 * 1744 * Adjust got2 pointers, no need to check for 0, this code 1745 * already puts a few entries in the table. 1746 */ 1747 li r0,__got2_entries@sectoff@l 1748 la r3,GOT(_GOT2_TABLE_) 1749 lwz r11,GOT(_GOT2_TABLE_) 1750 mtctr r0 1751 sub r11,r3,r11 1752 addi r3,r3,-4 17531: lwzu r0,4(r3) 1754 cmpwi r0,0 1755 beq- 2f 1756 add r0,r0,r11 1757 stw r0,0(r3) 17582: bdnz 1b 1759 1760 /* 1761 * Now adjust the fixups and the pointers to the fixups 1762 * in case we need to move ourselves again. 1763 */ 1764 li r0,__fixup_entries@sectoff@l 1765 lwz r3,GOT(_FIXUP_TABLE_) 1766 cmpwi r0,0 1767 mtctr r0 1768 addi r3,r3,-4 1769 beq 4f 17703: lwzu r4,4(r3) 1771 lwzux r0,r4,r11 1772 cmpwi r0,0 1773 add r0,r0,r11 1774 stw r4,0(r3) 1775 beq- 5f 1776 stw r0,0(r4) 17775: bdnz 3b 17784: 1779clear_bss: 1780 /* 1781 * Now clear BSS segment 1782 */ 1783 lwz r3,GOT(__bss_start) 1784 lwz r4,GOT(__bss_end__) 1785 1786 cmplw 0,r3,r4 1787 beq 6f 1788 1789 li r0,0 17905: 1791 stw r0,0(r3) 1792 addi r3,r3,4 1793 cmplw 0,r3,r4 1794 bne 5b 17956: 1796 1797 mr r3,r9 /* Init Data pointer */ 1798 mr r4,r10 /* Destination Address */ 1799 bl board_init_r 1800 1801#ifndef CONFIG_NAND_SPL 1802 /* 1803 * Copy exception vector code to low memory 1804 * 1805 * r3: dest_addr 1806 * r7: source address, r8: end address, r9: target address 1807 */ 1808 .globl trap_init 1809trap_init: 1810 mflr r4 /* save link register */ 1811 GET_GOT 1812 lwz r7,GOT(_start_of_vectors) 1813 lwz r8,GOT(_end_of_vectors) 1814 1815 li r9,0x100 /* reset vector always at 0x100 */ 1816 1817 cmplw 0,r7,r8 1818 bgelr /* return if r7>=r8 - just in case */ 18191: 1820 lwz r0,0(r7) 1821 stw r0,0(r9) 1822 addi r7,r7,4 1823 addi r9,r9,4 1824 cmplw 0,r7,r8 1825 bne 1b 1826 1827 /* 1828 * relocate `hdlr' and `int_return' entries 1829 */ 1830 li r7,.L_CriticalInput - _start + _START_OFFSET 1831 bl trap_reloc 1832 li r7,.L_MachineCheck - _start + _START_OFFSET 1833 bl trap_reloc 1834 li r7,.L_DataStorage - _start + _START_OFFSET 1835 bl trap_reloc 1836 li r7,.L_InstStorage - _start + _START_OFFSET 1837 bl trap_reloc 1838 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1839 bl trap_reloc 1840 li r7,.L_Alignment - _start + _START_OFFSET 1841 bl trap_reloc 1842 li r7,.L_ProgramCheck - _start + _START_OFFSET 1843 bl trap_reloc 1844 li r7,.L_FPUnavailable - _start + _START_OFFSET 1845 bl trap_reloc 1846 li r7,.L_Decrementer - _start + _START_OFFSET 1847 bl trap_reloc 1848 li r7,.L_IntervalTimer - _start + _START_OFFSET 1849 li r8,_end_of_vectors - _start + _START_OFFSET 18502: 1851 bl trap_reloc 1852 addi r7,r7,0x100 /* next exception vector */ 1853 cmplw 0,r7,r8 1854 blt 2b 1855 1856 /* Update IVORs as per relocated vector table address */ 1857 li r7,0x0100 1858 mtspr IVOR0,r7 /* 0: Critical input */ 1859 li r7,0x0200 1860 mtspr IVOR1,r7 /* 1: Machine check */ 1861 li r7,0x0300 1862 mtspr IVOR2,r7 /* 2: Data storage */ 1863 li r7,0x0400 1864 mtspr IVOR3,r7 /* 3: Instruction storage */ 1865 li r7,0x0500 1866 mtspr IVOR4,r7 /* 4: External interrupt */ 1867 li r7,0x0600 1868 mtspr IVOR5,r7 /* 5: Alignment */ 1869 li r7,0x0700 1870 mtspr IVOR6,r7 /* 6: Program check */ 1871 li r7,0x0800 1872 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1873 li r7,0x0900 1874 mtspr IVOR8,r7 /* 8: System call */ 1875 /* 9: Auxiliary processor unavailable(unsupported) */ 1876 li r7,0x0a00 1877 mtspr IVOR10,r7 /* 10: Decrementer */ 1878 li r7,0x0b00 1879 mtspr IVOR11,r7 /* 11: Interval timer */ 1880 li r7,0x0c00 1881 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1882 li r7,0x0d00 1883 mtspr IVOR13,r7 /* 13: Data TLB error */ 1884 li r7,0x0e00 1885 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1886 li r7,0x0f00 1887 mtspr IVOR15,r7 /* 15: Debug */ 1888 1889 lis r7,0x0 1890 mtspr IVPR,r7 1891 1892 mtlr r4 /* restore link register */ 1893 blr 1894 1895.globl unlock_ram_in_cache 1896unlock_ram_in_cache: 1897 /* invalidate the INIT_RAM section */ 1898 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1899 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1900 mfspr r4,L1CFG0 1901 andi. r4,r4,0x1ff 1902 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1903 mtctr r4 19041: dcbi r0,r3 1905 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1906 bdnz 1b 1907 sync 1908 1909 /* Invalidate the TLB entries for the cache */ 1910 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1911 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1912 tlbivax 0,r3 1913 addi r3,r3,0x1000 1914 tlbivax 0,r3 1915 addi r3,r3,0x1000 1916 tlbivax 0,r3 1917 addi r3,r3,0x1000 1918 tlbivax 0,r3 1919 isync 1920 blr 1921 1922.globl flush_dcache 1923flush_dcache: 1924 mfspr r3,SPRN_L1CFG0 1925 1926 rlwinm r5,r3,9,3 /* Extract cache block size */ 1927 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1928 * are currently defined. 1929 */ 1930 li r4,32 1931 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1932 * log2(number of ways) 1933 */ 1934 slw r5,r4,r5 /* r5 = cache block size */ 1935 1936 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1937 mulli r7,r7,13 /* An 8-way cache will require 13 1938 * loads per set. 1939 */ 1940 slw r7,r7,r6 1941 1942 /* save off HID0 and set DCFA */ 1943 mfspr r8,SPRN_HID0 1944 ori r9,r8,HID0_DCFA@l 1945 mtspr SPRN_HID0,r9 1946 isync 1947 1948 lis r4,0 1949 mtctr r7 1950 19511: lwz r3,0(r4) /* Load... */ 1952 add r4,r4,r5 1953 bdnz 1b 1954 1955 msync 1956 lis r4,0 1957 mtctr r7 1958 19591: dcbf 0,r4 /* ...and flush. */ 1960 add r4,r4,r5 1961 bdnz 1b 1962 1963 /* restore HID0 */ 1964 mtspr SPRN_HID0,r8 1965 isync 1966 1967 blr 1968 1969.globl setup_ivors 1970setup_ivors: 1971 1972#include "fixed_ivor.S" 1973 blr 1974#endif /* !CONFIG_NAND_SPL */ 1975