1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * See file CREDITS for list of people who contributed to this 6 * project. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 25 * 26 * The processor starts at 0xfffffffc and the code is first executed in the 27 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 28 * 29 */ 30 31#include <asm-offsets.h> 32#include <config.h> 33#include <mpc85xx.h> 34#include <version.h> 35 36#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ 37 38#include <ppc_asm.tmpl> 39#include <ppc_defs.h> 40 41#include <asm/cache.h> 42#include <asm/mmu.h> 43 44#undef MSR_KERNEL 45#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 46 47#if defined(CONFIG_NAND_SPL) || \ 48 (defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL)) 49#define MINIMAL_SPL 50#endif 51 52#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 53#define NOR_BOOT 54#endif 55 56/* 57 * Set up GOT: Global Offset Table 58 * 59 * Use r12 to access the GOT 60 */ 61 START_GOT 62 GOT_ENTRY(_GOT2_TABLE_) 63 GOT_ENTRY(_FIXUP_TABLE_) 64 65#ifndef MINIMAL_SPL 66 GOT_ENTRY(_start) 67 GOT_ENTRY(_start_of_vectors) 68 GOT_ENTRY(_end_of_vectors) 69 GOT_ENTRY(transfer_to_handler) 70#endif 71 72 GOT_ENTRY(__init_end) 73 GOT_ENTRY(__bss_end) 74 GOT_ENTRY(__bss_start) 75 END_GOT 76 77/* 78 * e500 Startup -- after reset only the last 4KB of the effective 79 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 80 * section is located at THIS LAST page and basically does three 81 * things: clear some registers, set up exception tables and 82 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 83 * continue the boot procedure. 84 85 * Once the boot rom is mapped by TLB entries we can proceed 86 * with normal startup. 87 * 88 */ 89 90 .section .bootpg,"ax" 91 .globl _start_e500 92 93_start_e500: 94/* Enable debug exception */ 95 li r1,MSR_DE 96 mtmsr r1 97 98#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 99 mfspr r3,SPRN_SVR 100 rlwinm r3,r3,0,0xff 101 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 102 cmpw r3,r4 103 beq 1f 104 105#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 106 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 107 cmpw r3,r4 108 beq 1f 109#endif 110 111 /* Not a supported revision affected by erratum */ 112 li r27,0 113 b 2f 114 1151: li r27,1 /* Remember for later that we have the erratum */ 116 /* Erratum says set bits 55:60 to 001001 */ 117 msync 118 isync 119 mfspr r3,976 120 li r4,0x48 121 rlwimi r3,r4,0,0x1f8 122 mtspr 976,r3 123 isync 1242: 125#endif 126 127#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 128 /* ISBC uses L2 as stack. 129 * Disable L2 cache here so that u-boot can enable it later 130 * as part of it's normal flow 131 */ 132 133 /* Check if L2 is enabled */ 134 mfspr r3, SPRN_L2CSR0 135 lis r2, L2CSR0_L2E@h 136 ori r2, r2, L2CSR0_L2E@l 137 and. r4, r3, r2 138 beq l2_disabled 139 140 mfspr r3, SPRN_L2CSR0 141 /* Flush L2 cache */ 142 lis r2,(L2CSR0_L2FL)@h 143 ori r2, r2, (L2CSR0_L2FL)@l 144 or r3, r2, r3 145 sync 146 isync 147 mtspr SPRN_L2CSR0,r3 148 isync 1491: 150 mfspr r3, SPRN_L2CSR0 151 and. r1, r3, r2 152 bne 1b 153 154 mfspr r3, SPRN_L2CSR0 155 lis r2, L2CSR0_L2E@h 156 ori r2, r2, L2CSR0_L2E@l 157 andc r4, r3, r2 158 sync 159 isync 160 mtspr SPRN_L2CSR0,r4 161 isync 162 163l2_disabled: 164#endif 165 166/* clear registers/arrays not reset by hardware */ 167 168 /* L1 */ 169 li r0,2 170 mtspr L1CSR0,r0 /* invalidate d-cache */ 171 mtspr L1CSR1,r0 /* invalidate i-cache */ 172 173 mfspr r1,DBSR 174 mtspr DBSR,r1 /* Clear all valid bits */ 175 176 /* 177 * Enable L1 Caches early 178 * 179 */ 180 181#ifdef CONFIG_SYS_CACHE_STASHING 182 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 183 li r2,(32 + 0) 184 mtspr L1CSR2,r2 185#endif 186 187 /* Enable/invalidate the I-Cache */ 188 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 189 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 190 mtspr SPRN_L1CSR1,r2 1911: 192 mfspr r3,SPRN_L1CSR1 193 and. r1,r3,r2 194 bne 1b 195 196 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 197 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 198 mtspr SPRN_L1CSR1,r3 199 isync 2002: 201 mfspr r3,SPRN_L1CSR1 202 andi. r1,r3,L1CSR1_ICE@l 203 beq 2b 204 205 /* Enable/invalidate the D-Cache */ 206 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 207 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 208 mtspr SPRN_L1CSR0,r2 2091: 210 mfspr r3,SPRN_L1CSR0 211 and. r1,r3,r2 212 bne 1b 213 214 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 215 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 216 mtspr SPRN_L1CSR0,r3 217 isync 2182: 219 mfspr r3,SPRN_L1CSR0 220 andi. r1,r3,L1CSR0_DCE@l 221 beq 2b 222 223 .macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch 224 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 225 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 226 mtspr MAS0, \scratch 227 lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h 228 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l 229 mtspr MAS1, \scratch 230 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 231 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 232 mtspr MAS2, \scratch 233 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 234 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 235 mtspr MAS3, \scratch 236 lis \scratch, \phy_high@h 237 ori \scratch, \scratch, \phy_high@l 238 mtspr MAS7, \scratch 239 isync 240 msync 241 tlbwe 242 isync 243 .endm 244 245 .macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch 246 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 247 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 248 mtspr MAS0, \scratch 249 lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h 250 ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l 251 mtspr MAS1, \scratch 252 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 253 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 254 mtspr MAS2, \scratch 255 lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h 256 ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l 257 mtspr MAS3, \scratch 258 lis \scratch, \phy_high@h 259 ori \scratch, \scratch, \phy_high@l 260 mtspr MAS7, \scratch 261 isync 262 msync 263 tlbwe 264 isync 265 .endm 266 267 .macro delete_tlb1_entry esel scratch 268 lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h 269 ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l 270 mtspr MAS0, \scratch 271 li \scratch, 0 272 mtspr MAS1, \scratch 273 isync 274 msync 275 tlbwe 276 isync 277 .endm 278 279 .macro delete_tlb0_entry esel epn wimg scratch 280 lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h 281 ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l 282 mtspr MAS0, \scratch 283 li \scratch, 0 284 mtspr MAS1, \scratch 285 lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h 286 ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l 287 mtspr MAS2, \scratch 288 isync 289 msync 290 tlbwe 291 isync 292 .endm 293 294/* Interrupt vectors do not fit in minimal SPL. */ 295#if !defined(MINIMAL_SPL) 296 /* Setup interrupt vectors */ 297 lis r1,CONFIG_SYS_MONITOR_BASE@h 298 mtspr IVPR,r1 299 300 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 301 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 302 303 addi r4,r3,CriticalInput - _start + _START_OFFSET 304 mtspr IVOR0,r4 /* 0: Critical input */ 305 addi r4,r3,MachineCheck - _start + _START_OFFSET 306 mtspr IVOR1,r4 /* 1: Machine check */ 307 addi r4,r3,DataStorage - _start + _START_OFFSET 308 mtspr IVOR2,r4 /* 2: Data storage */ 309 addi r4,r3,InstStorage - _start + _START_OFFSET 310 mtspr IVOR3,r4 /* 3: Instruction storage */ 311 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 312 mtspr IVOR4,r4 /* 4: External interrupt */ 313 addi r4,r3,Alignment - _start + _START_OFFSET 314 mtspr IVOR5,r4 /* 5: Alignment */ 315 addi r4,r3,ProgramCheck - _start + _START_OFFSET 316 mtspr IVOR6,r4 /* 6: Program check */ 317 addi r4,r3,FPUnavailable - _start + _START_OFFSET 318 mtspr IVOR7,r4 /* 7: floating point unavailable */ 319 addi r4,r3,SystemCall - _start + _START_OFFSET 320 mtspr IVOR8,r4 /* 8: System call */ 321 /* 9: Auxiliary processor unavailable(unsupported) */ 322 addi r4,r3,Decrementer - _start + _START_OFFSET 323 mtspr IVOR10,r4 /* 10: Decrementer */ 324 addi r4,r3,IntervalTimer - _start + _START_OFFSET 325 mtspr IVOR11,r4 /* 11: Interval timer */ 326 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 327 mtspr IVOR12,r4 /* 12: Watchdog timer */ 328 addi r4,r3,DataTLBError - _start + _START_OFFSET 329 mtspr IVOR13,r4 /* 13: Data TLB error */ 330 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 331 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 332 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 333 mtspr IVOR15,r4 /* 15: Debug */ 334#endif 335 336 /* Clear and set up some registers. */ 337 li r0,0x0000 338 lis r1,0xffff 339 mtspr DEC,r0 /* prevent dec exceptions */ 340 mttbl r0 /* prevent fit & wdt exceptions */ 341 mttbu r0 342 mtspr TSR,r1 /* clear all timer exception status */ 343 mtspr TCR,r0 /* disable all */ 344 mtspr ESR,r0 /* clear exception syndrome register */ 345 mtspr MCSR,r0 /* machine check syndrome register */ 346 mtxer r0 /* clear integer exception register */ 347 348#ifdef CONFIG_SYS_BOOK3E_HV 349 mtspr MAS8,r0 /* make sure MAS8 is clear */ 350#endif 351 352 /* Enable Time Base and Select Time Base Clock */ 353 lis r0,HID0_EMCP@h /* Enable machine check */ 354#if defined(CONFIG_ENABLE_36BIT_PHYS) 355 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 356#endif 357#ifndef CONFIG_E500MC 358 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 359#endif 360 mtspr HID0,r0 361 362#ifndef CONFIG_E500MC 363 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 364 mfspr r3,PVR 365 andi. r3,r3, 0xff 366 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 367 blt 1f 368 /* Set MBDD bit also */ 369 ori r0, r0, HID1_MBDD@l 3701: 371 mtspr HID1,r0 372#endif 373 374#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 375 mfspr r3,977 376 oris r3,r3,0x0100 377 mtspr 977,r3 378#endif 379 380 /* Enable Branch Prediction */ 381#if defined(CONFIG_BTB) 382 lis r0,BUCSR_ENABLE@h 383 ori r0,r0,BUCSR_ENABLE@l 384 mtspr SPRN_BUCSR,r0 385#endif 386 387#if defined(CONFIG_SYS_INIT_DBCR) 388 lis r1,0xffff 389 ori r1,r1,0xffff 390 mtspr DBSR,r1 /* Clear all status bits */ 391 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 392 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 393 mtspr DBCR0,r0 394#endif 395 396#ifdef CONFIG_MPC8569 397#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 398#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 399 400 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 401 * use address space which is more than 12bits, and it must be done in 402 * the 4K boot page. So we set this bit here. 403 */ 404 405 /* create a temp mapping TLB0[0] for LBCR */ 406 create_tlb0_entry 0, \ 407 0, BOOKE_PAGESZ_4K, \ 408 CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \ 409 CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \ 410 0, r6 411 412 /* Set LBCR register */ 413 lis r4,CONFIG_SYS_LBCR_ADDR@h 414 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 415 416 lis r5,CONFIG_SYS_LBC_LBCR@h 417 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 418 stw r5,0(r4) 419 isync 420 421 /* invalidate this temp TLB */ 422 lis r4,CONFIG_SYS_LBC_ADDR@h 423 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 424 tlbivax 0,r4 425 isync 426 427#endif /* CONFIG_MPC8569 */ 428 429/* 430 * Search for the TLB that covers the code we're executing, and shrink it 431 * so that it covers only this 4K page. That will ensure that any other 432 * TLB we create won't interfere with it. We assume that the TLB exists, 433 * which is why we don't check the Valid bit of MAS1. We also assume 434 * it is in TLB1. 435 * 436 * This is necessary, for example, when booting from the on-chip ROM, 437 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 438 */ 439 bl nexti /* Find our address */ 440nexti: mflr r1 /* R1 = our PC */ 441 li r2, 0 442 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 443 isync 444 msync 445 tlbsx 0, r1 /* This must succeed */ 446 447 mfspr r14, MAS0 /* Save ESEL for later */ 448 rlwinm r14, r14, 16, 0xfff 449 450 /* Set the size of the TLB to 4KB */ 451 mfspr r3, MAS1 452 li r2, 0xF80 453 andc r3, r3, r2 /* Clear the TSIZE bits */ 454 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 455 oris r3, r3, MAS1_IPROT@h 456 mtspr MAS1, r3 457 458 /* 459 * Set the base address of the TLB to our PC. We assume that 460 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 461 */ 462 lis r3, MAS2_EPN@h 463 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 464 465 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 466 467 mfspr r2, MAS2 468 andc r2, r2, r3 469 or r2, r2, r1 470#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 471 cmpwi r27,0 472 beq 1f 473 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 474 rlwinm r2, r2, 0, ~MAS2_I 475 ori r2, r2, MAS2_G 4761: 477#endif 478 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 479 480 mfspr r2, MAS3 481 andc r2, r2, r3 482 or r2, r2, r1 483 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 484 485 isync 486 msync 487 tlbwe 488 489/* 490 * Clear out any other TLB entries that may exist, to avoid conflicts. 491 * Our TLB entry is in r14. 492 */ 493 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 494 tlbivax 0, r0 495 tlbsync 496 497 mfspr r4, SPRN_TLB1CFG 498 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 499 500 li r3, 0 501 mtspr MAS1, r3 5021: cmpw r3, r14 503 rlwinm r5, r3, 16, MAS0_ESEL_MSK 504 addi r3, r3, 1 505 beq 2f /* skip the entry we're executing from */ 506 507 oris r5, r5, MAS0_TLBSEL(1)@h 508 mtspr MAS0, r5 509 510 isync 511 tlbwe 512 isync 513 msync 514 5152: cmpw r3, r4 516 blt 1b 517 518#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) 519/* 520 * TLB entry for debuggging in AS1 521 * Create temporary TLB entry in AS0 to handle debug exception 522 * As on debug exception MSR is cleared i.e. Address space is changed 523 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 524 * in AS1. 525 */ 526 527#ifdef NOR_BOOT 528/* 529 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 530 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 531 * and this window is outside of 4K boot window. 532 */ 533 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 534 0, BOOKE_PAGESZ_4M, \ 535 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 536 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 537 0, r6 538 539#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 540 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 541 0, BOOKE_PAGESZ_1M, \ 542 CONFIG_SYS_MONITOR_BASE, MAS2_I|MAS2_G, \ 543 CONFIG_SYS_PBI_FLASH_WINDOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 544 0, r6 545#else 546/* 547 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 548 * because "nexti" will resize TLB to 4K 549 */ 550 create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \ 551 0, BOOKE_PAGESZ_256K, \ 552 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \ 553 CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \ 554 0, r6 555#endif 556#endif 557 558/* 559 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 560 * location is not where we want it. This typically happens on a 36-bit 561 * system, where we want to move CCSR to near the top of 36-bit address space. 562 * 563 * To move CCSR, we create two temporary TLBs, one for the old location, and 564 * another for the new location. On CoreNet systems, we also need to create 565 * a special, temporary LAW. 566 * 567 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 568 * long-term TLBs, so we use TLB0 here. 569 */ 570#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 571 572#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 573#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 574#endif 575 576create_ccsr_new_tlb: 577 /* 578 * Create a TLB for the new location of CCSR. Register R8 is reserved 579 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 580 */ 581 lis r8, CONFIG_SYS_CCSRBAR@h 582 ori r8, r8, CONFIG_SYS_CCSRBAR@l 583 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 584 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 585 create_tlb0_entry 0, \ 586 0, BOOKE_PAGESZ_4K, \ 587 CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \ 588 CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \ 589 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 590 /* 591 * Create a TLB for the current location of CCSR. Register R9 is reserved 592 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 593 */ 594create_ccsr_old_tlb: 595 create_tlb0_entry 1, \ 596 0, BOOKE_PAGESZ_4K, \ 597 CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \ 598 CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \ 599 0, r3 /* The default CCSR address is always a 32-bit number */ 600 601 602 /* 603 * We have a TLB for what we think is the current (old) CCSR. Let's 604 * verify that, otherwise we won't be able to move it. 605 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 606 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 607 */ 608verify_old_ccsr: 609 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 610 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 611#ifdef CONFIG_FSL_CORENET 612 lwz r1, 4(r9) /* CCSRBARL */ 613#else 614 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 615 slwi r1, r1, 12 616#endif 617 618 cmpl 0, r0, r1 619 620 /* 621 * If the value we read from CCSRBARL is not what we expect, then 622 * enter an infinite loop. This will at least allow a debugger to 623 * halt execution and examine TLBs, etc. There's no point in going 624 * on. 625 */ 626infinite_debug_loop: 627 bne infinite_debug_loop 628 629#ifdef CONFIG_FSL_CORENET 630 631#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 632#define LAW_EN 0x80000000 633#define LAW_SIZE_4K 0xb 634#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 635#define CCSRAR_C 0x80000000 /* Commit */ 636 637create_temp_law: 638 /* 639 * On CoreNet systems, we create the temporary LAW using a special LAW 640 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 641 */ 642 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 643 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 644 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 645 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 646 lis r2, CCSRBAR_LAWAR@h 647 ori r2, r2, CCSRBAR_LAWAR@l 648 649 stw r0, 0xc00(r9) /* LAWBARH0 */ 650 stw r1, 0xc04(r9) /* LAWBARL0 */ 651 sync 652 stw r2, 0xc08(r9) /* LAWAR0 */ 653 654 /* 655 * Read back from LAWAR to ensure the update is complete. e500mc 656 * cores also require an isync. 657 */ 658 lwz r0, 0xc08(r9) /* LAWAR0 */ 659 isync 660 661 /* 662 * Read the current CCSRBARH and CCSRBARL using load word instructions. 663 * Follow this with an isync instruction. This forces any outstanding 664 * accesses to configuration space to completion. 665 */ 666read_old_ccsrbar: 667 lwz r0, 0(r9) /* CCSRBARH */ 668 lwz r0, 4(r9) /* CCSRBARL */ 669 isync 670 671 /* 672 * Write the new values for CCSRBARH and CCSRBARL to their old 673 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 674 * has a new value written it loads a CCSRBARH shadow register. When 675 * the CCSRBARL is written, the CCSRBARH shadow register contents 676 * along with the CCSRBARL value are loaded into the CCSRBARH and 677 * CCSRBARL registers, respectively. Follow this with a sync 678 * instruction. 679 */ 680write_new_ccsrbar: 681 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 682 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 683 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 684 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 685 lis r2, CCSRAR_C@h 686 ori r2, r2, CCSRAR_C@l 687 688 stw r0, 0(r9) /* Write to CCSRBARH */ 689 sync /* Make sure we write to CCSRBARH first */ 690 stw r1, 4(r9) /* Write to CCSRBARL */ 691 sync 692 693 /* 694 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 695 * Follow this with a sync instruction. 696 */ 697 stw r2, 8(r9) 698 sync 699 700 /* Delete the temporary LAW */ 701delete_temp_law: 702 li r1, 0 703 stw r1, 0xc08(r8) 704 sync 705 stw r1, 0xc00(r8) 706 stw r1, 0xc04(r8) 707 sync 708 709#else /* #ifdef CONFIG_FSL_CORENET */ 710 711write_new_ccsrbar: 712 /* 713 * Read the current value of CCSRBAR using a load word instruction 714 * followed by an isync. This forces all accesses to configuration 715 * space to complete. 716 */ 717 sync 718 lwz r0, 0(r9) 719 isync 720 721/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 722#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 723 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 724 725 /* Write the new value to CCSRBAR. */ 726 lis r0, CCSRBAR_PHYS_RS12@h 727 ori r0, r0, CCSRBAR_PHYS_RS12@l 728 stw r0, 0(r9) 729 sync 730 731 /* 732 * The manual says to perform a load of an address that does not 733 * access configuration space or the on-chip SRAM using an existing TLB, 734 * but that doesn't appear to be necessary. We will do the isync, 735 * though. 736 */ 737 isync 738 739 /* 740 * Read the contents of CCSRBAR from its new location, followed by 741 * another isync. 742 */ 743 lwz r0, 0(r8) 744 isync 745 746#endif /* #ifdef CONFIG_FSL_CORENET */ 747 748 /* Delete the temporary TLBs */ 749delete_temp_tlbs: 750 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3 751 delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3 752 753#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 754 755#ifdef CONFIG_SYS_FSL_QORIQ_CHASSIS2 756create_ccsr_l2_tlb: 757 /* 758 * Create a TLB for the MMR location of CCSR 759 * to access L2CSR0 register 760 */ 761 create_tlb0_entry 0, \ 762 0, BOOKE_PAGESZ_4K, \ 763 CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \ 764 CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \ 765 CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3 766 767enable_l2_cluster_l2: 768 /* enable L2 cache */ 769 lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h 770 ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l 771 li r4, 33 /* stash id */ 772 stw r4, 4(r3) 773 lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h 774 ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l 775 sync 776 stw r4, 0(r3) /* invalidate L2 */ 7771: sync 778 lwz r0, 0(r3) 779 twi 0, r0, 0 780 isync 781 and. r1, r0, r4 782 bne 1b 783 lis r4, L2CSR0_L2E@h 784 sync 785 stw r4, 0(r3) /* eanble L2 */ 786delete_ccsr_l2_tlb: 787 delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3 788#endif 789 790#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 791#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 792#define LAW_SIZE_1M 0x13 793#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 794 795 cmpwi r27,0 796 beq 9f 797 798 /* 799 * Create a TLB entry for CCSR 800 * 801 * We're executing out of TLB1 entry in r14, and that's the only 802 * TLB entry that exists. To allocate some TLB entries for our 803 * own use, flip a bit high enough that we won't flip it again 804 * via incrementing. 805 */ 806 807 xori r8, r14, 32 808 lis r0, MAS0_TLBSEL(1)@h 809 rlwimi r0, r8, 16, MAS0_ESEL_MSK 810 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 811 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 812 lis r7, CONFIG_SYS_CCSRBAR@h 813 ori r7, r7, CONFIG_SYS_CCSRBAR@l 814 ori r2, r7, MAS2_I|MAS2_G 815 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 816 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 817 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 818 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 819 mtspr MAS0, r0 820 mtspr MAS1, r1 821 mtspr MAS2, r2 822 mtspr MAS3, r3 823 mtspr MAS7, r4 824 isync 825 tlbwe 826 isync 827 msync 828 829 /* Map DCSR temporarily to physical address zero */ 830 li r0, 0 831 lis r3, DCSRBAR_LAWAR@h 832 ori r3, r3, DCSRBAR_LAWAR@l 833 834 stw r0, 0xc00(r7) /* LAWBARH0 */ 835 stw r0, 0xc04(r7) /* LAWBARL0 */ 836 sync 837 stw r3, 0xc08(r7) /* LAWAR0 */ 838 839 /* Read back from LAWAR to ensure the update is complete. */ 840 lwz r3, 0xc08(r7) /* LAWAR0 */ 841 isync 842 843 /* Create a TLB entry for DCSR at zero */ 844 845 addi r9, r8, 1 846 lis r0, MAS0_TLBSEL(1)@h 847 rlwimi r0, r9, 16, MAS0_ESEL_MSK 848 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 849 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 850 li r6, 0 /* DCSR effective address */ 851 ori r2, r6, MAS2_I|MAS2_G 852 li r3, MAS3_SW|MAS3_SR 853 li r4, 0 854 mtspr MAS0, r0 855 mtspr MAS1, r1 856 mtspr MAS2, r2 857 mtspr MAS3, r3 858 mtspr MAS7, r4 859 isync 860 tlbwe 861 isync 862 msync 863 864 /* enable the timebase */ 865#define CTBENR 0xe2084 866 li r3, 1 867 addis r4, r7, CTBENR@ha 868 stw r3, CTBENR@l(r4) 869 lwz r3, CTBENR@l(r4) 870 twi 0,r3,0 871 isync 872 873 .macro erratum_set_ccsr offset value 874 addis r3, r7, \offset@ha 875 lis r4, \value@h 876 addi r3, r3, \offset@l 877 ori r4, r4, \value@l 878 bl erratum_set_value 879 .endm 880 881 .macro erratum_set_dcsr offset value 882 addis r3, r6, \offset@ha 883 lis r4, \value@h 884 addi r3, r3, \offset@l 885 ori r4, r4, \value@l 886 bl erratum_set_value 887 .endm 888 889 erratum_set_dcsr 0xb0e08 0xe0201800 890 erratum_set_dcsr 0xb0e18 0xe0201800 891 erratum_set_dcsr 0xb0e38 0xe0400000 892 erratum_set_dcsr 0xb0008 0x00900000 893 erratum_set_dcsr 0xb0e40 0xe00a0000 894 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 895 erratum_set_ccsr 0x10f00 0x415e5000 896 erratum_set_ccsr 0x11f00 0x415e5000 897 898 /* Make temp mapping uncacheable again, if it was initially */ 899 bl 2f 9002: mflr r3 901 tlbsx 0, r3 902 mfspr r4, MAS2 903 rlwimi r4, r15, 0, MAS2_I 904 rlwimi r4, r15, 0, MAS2_G 905 mtspr MAS2, r4 906 isync 907 tlbwe 908 isync 909 msync 910 911 /* Clear the cache */ 912 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 913 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 914 sync 915 isync 916 mtspr SPRN_L1CSR1,r3 917 isync 9182: sync 919 mfspr r4,SPRN_L1CSR1 920 and. r4,r4,r3 921 bne 2b 922 923 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 924 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 925 sync 926 isync 927 mtspr SPRN_L1CSR1,r3 928 isync 9292: sync 930 mfspr r4,SPRN_L1CSR1 931 and. r4,r4,r3 932 beq 2b 933 934 /* Remove temporary mappings */ 935 lis r0, MAS0_TLBSEL(1)@h 936 rlwimi r0, r9, 16, MAS0_ESEL_MSK 937 li r3, 0 938 mtspr MAS0, r0 939 mtspr MAS1, r3 940 isync 941 tlbwe 942 isync 943 msync 944 945 li r3, 0 946 stw r3, 0xc08(r7) /* LAWAR0 */ 947 lwz r3, 0xc08(r7) 948 isync 949 950 lis r0, MAS0_TLBSEL(1)@h 951 rlwimi r0, r8, 16, MAS0_ESEL_MSK 952 li r3, 0 953 mtspr MAS0, r0 954 mtspr MAS1, r3 955 isync 956 tlbwe 957 isync 958 msync 959 960 b 9f 961 962 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 963erratum_set_value: 964 /* Lock two cache lines into I-Cache */ 965 sync 966 mfspr r11, SPRN_L1CSR1 967 rlwinm r11, r11, 0, ~L1CSR1_ICUL 968 sync 969 isync 970 mtspr SPRN_L1CSR1, r11 971 isync 972 973 mflr r12 974 bl 5f 9755: mflr r5 976 addi r5, r5, 2f - 5b 977 icbtls 0, 0, r5 978 addi r5, r5, 64 979 980 sync 981 mfspr r11, SPRN_L1CSR1 9823: andi. r11, r11, L1CSR1_ICUL 983 bne 3b 984 985 icbtls 0, 0, r5 986 addi r5, r5, 64 987 988 sync 989 mfspr r11, SPRN_L1CSR1 9903: andi. r11, r11, L1CSR1_ICUL 991 bne 3b 992 993 b 2f 994 .align 6 995 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9962: sync 997 998 mfspr r5, SPRN_TBRL 999 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10004: mfspr r5, SPRN_TBRL 1001 subf. r5, r5, r11 1002 bgt 4b 1003 1004 stw r4, 0(r3) 1005 1006 mfspr r5, SPRN_TBRL 1007 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 10084: mfspr r5, SPRN_TBRL 1009 subf. r5, r5, r11 1010 bgt 4b 1011 1012 sync 1013 1014 /* 1015 * Fill out the rest of this cache line and the next with nops, 1016 * to ensure that nothing outside the locked area will be 1017 * fetched due to a branch. 1018 */ 1019 .rept 19 1020 nop 1021 .endr 1022 1023 sync 1024 mfspr r11, SPRN_L1CSR1 1025 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1026 sync 1027 isync 1028 mtspr SPRN_L1CSR1, r11 1029 isync 1030 1031 mtlr r12 1032 blr 1033 10349: 1035#endif 1036 1037create_init_ram_area: 1038 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1039 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1040 1041#ifdef NOR_BOOT 1042 /* create a temp mapping in AS=1 to the 4M boot window */ 1043 create_tlb1_entry 15, \ 1044 1, BOOKE_PAGESZ_4M, \ 1045 CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \ 1046 0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1047 0, r6 1048 1049#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1050 /* create a temp mapping in AS = 1 for Flash mapping 1051 * created by PBL for ISBC code 1052 */ 1053 create_tlb1_entry 15, \ 1054 1, BOOKE_PAGESZ_1M, \ 1055 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1056 CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1057 0, r6 1058#else 1059 /* 1060 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1061 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1062 */ 1063 create_tlb1_entry 15, \ 1064 1, BOOKE_PAGESZ_1M, \ 1065 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \ 1066 CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \ 1067 0, r6 1068#endif 1069 1070 /* create a temp mapping in AS=1 to the stack */ 1071#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1072 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1073 create_tlb1_entry 14, \ 1074 1, BOOKE_PAGESZ_16K, \ 1075 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1076 CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \ 1077 CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6 1078 1079#else 1080 create_tlb1_entry 14, \ 1081 1, BOOKE_PAGESZ_16K, \ 1082 CONFIG_SYS_INIT_RAM_ADDR, 0, \ 1083 CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \ 1084 0, r6 1085#endif 1086 1087 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1088 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1089 lis r7,switch_as@h 1090 ori r7,r7,switch_as@l 1091 1092 mtspr SPRN_SRR0,r7 1093 mtspr SPRN_SRR1,r6 1094 rfi 1095 1096switch_as: 1097/* L1 DCache is used for initial RAM */ 1098 1099 /* Allocate Initial RAM in data cache. 1100 */ 1101 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1102 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1103 mfspr r2, L1CFG0 1104 andi. r2, r2, 0x1ff 1105 /* cache size * 1024 / (2 * L1 line size) */ 1106 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1107 mtctr r2 1108 li r0,0 11091: 1110 dcbz r0,r3 1111 dcbtls 0,r0,r3 1112 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1113 bdnz 1b 1114 1115 /* Jump out the last 4K page and continue to 'normal' start */ 1116#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL) 1117 /* We assume that we're already running at the address we're linked at */ 1118 b _start_cont 1119#else 1120 /* Calculate absolute address in FLASH and jump there */ 1121 /*--------------------------------------------------------------*/ 1122 lis r3,CONFIG_SYS_MONITOR_BASE@h 1123 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1124 addi r3,r3,_start_cont - _start + _START_OFFSET 1125 mtlr r3 1126 blr 1127#endif 1128 1129 .text 1130 .globl _start 1131_start: 1132 .long 0x27051956 /* U-BOOT Magic Number */ 1133 .globl version_string 1134version_string: 1135 .ascii U_BOOT_VERSION_STRING, "\0" 1136 1137 .align 4 1138 .globl _start_cont 1139_start_cont: 1140 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1141 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1142 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1143 li r0,0 1144 stw r0,0(r3) /* Terminate Back Chain */ 1145 stw r0,+4(r3) /* NULL return address. */ 1146 mr r1,r3 /* Transfer to SP(r1) */ 1147 1148 GET_GOT 1149 bl cpu_init_early_f 1150 1151 /* switch back to AS = 0 */ 1152 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1153 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1154 mtmsr r3 1155 isync 1156 1157 bl cpu_init_f 1158 bl board_init_f 1159 isync 1160 1161 /* NOTREACHED - board_init_f() does not return */ 1162 1163#ifndef MINIMAL_SPL 1164 . = EXC_OFF_SYS_RESET 1165 .globl _start_of_vectors 1166_start_of_vectors: 1167 1168/* Critical input. */ 1169 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1170 1171/* Machine check */ 1172 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1173 1174/* Data Storage exception. */ 1175 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1176 1177/* Instruction Storage exception. */ 1178 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1179 1180/* External Interrupt exception. */ 1181 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1182 1183/* Alignment exception. */ 1184 . = 0x0600 1185Alignment: 1186 EXCEPTION_PROLOG(SRR0, SRR1) 1187 mfspr r4,DAR 1188 stw r4,_DAR(r21) 1189 mfspr r5,DSISR 1190 stw r5,_DSISR(r21) 1191 addi r3,r1,STACK_FRAME_OVERHEAD 1192 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1193 1194/* Program check exception */ 1195 . = 0x0700 1196ProgramCheck: 1197 EXCEPTION_PROLOG(SRR0, SRR1) 1198 addi r3,r1,STACK_FRAME_OVERHEAD 1199 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1200 MSR_KERNEL, COPY_EE) 1201 1202 /* No FPU on MPC85xx. This exception is not supposed to happen. 1203 */ 1204 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1205 1206 . = 0x0900 1207/* 1208 * r0 - SYSCALL number 1209 * r3-... arguments 1210 */ 1211SystemCall: 1212 addis r11,r0,0 /* get functions table addr */ 1213 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1214 addis r12,r0,0 /* get number of functions */ 1215 ori r12,r12,0 1216 1217 cmplw 0,r0,r12 1218 bge 1f 1219 1220 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1221 add r11,r11,r0 1222 lwz r11,0(r11) 1223 1224 li r20,0xd00-4 /* Get stack pointer */ 1225 lwz r12,0(r20) 1226 subi r12,r12,12 /* Adjust stack pointer */ 1227 li r0,0xc00+_end_back-SystemCall 1228 cmplw 0,r0,r12 /* Check stack overflow */ 1229 bgt 1f 1230 stw r12,0(r20) 1231 1232 mflr r0 1233 stw r0,0(r12) 1234 mfspr r0,SRR0 1235 stw r0,4(r12) 1236 mfspr r0,SRR1 1237 stw r0,8(r12) 1238 1239 li r12,0xc00+_back-SystemCall 1240 mtlr r12 1241 mtspr SRR0,r11 1242 12431: SYNC 1244 rfi 1245_back: 1246 1247 mfmsr r11 /* Disable interrupts */ 1248 li r12,0 1249 ori r12,r12,MSR_EE 1250 andc r11,r11,r12 1251 SYNC /* Some chip revs need this... */ 1252 mtmsr r11 1253 SYNC 1254 1255 li r12,0xd00-4 /* restore regs */ 1256 lwz r12,0(r12) 1257 1258 lwz r11,0(r12) 1259 mtlr r11 1260 lwz r11,4(r12) 1261 mtspr SRR0,r11 1262 lwz r11,8(r12) 1263 mtspr SRR1,r11 1264 1265 addi r12,r12,12 /* Adjust stack pointer */ 1266 li r20,0xd00-4 1267 stw r12,0(r20) 1268 1269 SYNC 1270 rfi 1271_end_back: 1272 1273 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1274 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1275 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1276 1277 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1278 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1279 1280 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1281 1282 .globl _end_of_vectors 1283_end_of_vectors: 1284 1285 1286 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1287 1288/* 1289 * This code finishes saving the registers to the exception frame 1290 * and jumps to the appropriate handler for the exception. 1291 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1292 */ 1293 .globl transfer_to_handler 1294transfer_to_handler: 1295 stw r22,_NIP(r21) 1296 lis r22,MSR_POW@h 1297 andc r23,r23,r22 1298 stw r23,_MSR(r21) 1299 SAVE_GPR(7, r21) 1300 SAVE_4GPRS(8, r21) 1301 SAVE_8GPRS(12, r21) 1302 SAVE_8GPRS(24, r21) 1303 1304 mflr r23 1305 andi. r24,r23,0x3f00 /* get vector offset */ 1306 stw r24,TRAP(r21) 1307 li r22,0 1308 stw r22,RESULT(r21) 1309 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1310 1311 lwz r24,0(r23) /* virtual address of handler */ 1312 lwz r23,4(r23) /* where to go when done */ 1313 mtspr SRR0,r24 1314 mtspr SRR1,r20 1315 mtlr r23 1316 SYNC 1317 rfi /* jump to handler, enable MMU */ 1318 1319int_return: 1320 mfmsr r28 /* Disable interrupts */ 1321 li r4,0 1322 ori r4,r4,MSR_EE 1323 andc r28,r28,r4 1324 SYNC /* Some chip revs need this... */ 1325 mtmsr r28 1326 SYNC 1327 lwz r2,_CTR(r1) 1328 lwz r0,_LINK(r1) 1329 mtctr r2 1330 mtlr r0 1331 lwz r2,_XER(r1) 1332 lwz r0,_CCR(r1) 1333 mtspr XER,r2 1334 mtcrf 0xFF,r0 1335 REST_10GPRS(3, r1) 1336 REST_10GPRS(13, r1) 1337 REST_8GPRS(23, r1) 1338 REST_GPR(31, r1) 1339 lwz r2,_NIP(r1) /* Restore environment */ 1340 lwz r0,_MSR(r1) 1341 mtspr SRR0,r2 1342 mtspr SRR1,r0 1343 lwz r0,GPR0(r1) 1344 lwz r2,GPR2(r1) 1345 lwz r1,GPR1(r1) 1346 SYNC 1347 rfi 1348 1349crit_return: 1350 mfmsr r28 /* Disable interrupts */ 1351 li r4,0 1352 ori r4,r4,MSR_EE 1353 andc r28,r28,r4 1354 SYNC /* Some chip revs need this... */ 1355 mtmsr r28 1356 SYNC 1357 lwz r2,_CTR(r1) 1358 lwz r0,_LINK(r1) 1359 mtctr r2 1360 mtlr r0 1361 lwz r2,_XER(r1) 1362 lwz r0,_CCR(r1) 1363 mtspr XER,r2 1364 mtcrf 0xFF,r0 1365 REST_10GPRS(3, r1) 1366 REST_10GPRS(13, r1) 1367 REST_8GPRS(23, r1) 1368 REST_GPR(31, r1) 1369 lwz r2,_NIP(r1) /* Restore environment */ 1370 lwz r0,_MSR(r1) 1371 mtspr SPRN_CSRR0,r2 1372 mtspr SPRN_CSRR1,r0 1373 lwz r0,GPR0(r1) 1374 lwz r2,GPR2(r1) 1375 lwz r1,GPR1(r1) 1376 SYNC 1377 rfci 1378 1379mck_return: 1380 mfmsr r28 /* Disable interrupts */ 1381 li r4,0 1382 ori r4,r4,MSR_EE 1383 andc r28,r28,r4 1384 SYNC /* Some chip revs need this... */ 1385 mtmsr r28 1386 SYNC 1387 lwz r2,_CTR(r1) 1388 lwz r0,_LINK(r1) 1389 mtctr r2 1390 mtlr r0 1391 lwz r2,_XER(r1) 1392 lwz r0,_CCR(r1) 1393 mtspr XER,r2 1394 mtcrf 0xFF,r0 1395 REST_10GPRS(3, r1) 1396 REST_10GPRS(13, r1) 1397 REST_8GPRS(23, r1) 1398 REST_GPR(31, r1) 1399 lwz r2,_NIP(r1) /* Restore environment */ 1400 lwz r0,_MSR(r1) 1401 mtspr SPRN_MCSRR0,r2 1402 mtspr SPRN_MCSRR1,r0 1403 lwz r0,GPR0(r1) 1404 lwz r2,GPR2(r1) 1405 lwz r1,GPR1(r1) 1406 SYNC 1407 rfmci 1408 1409/* Cache functions. 1410*/ 1411.globl flush_icache 1412flush_icache: 1413.globl invalidate_icache 1414invalidate_icache: 1415 mfspr r0,L1CSR1 1416 ori r0,r0,L1CSR1_ICFI 1417 msync 1418 isync 1419 mtspr L1CSR1,r0 1420 isync 1421 blr /* entire I cache */ 1422 1423.globl invalidate_dcache 1424invalidate_dcache: 1425 mfspr r0,L1CSR0 1426 ori r0,r0,L1CSR0_DCFI 1427 msync 1428 isync 1429 mtspr L1CSR0,r0 1430 isync 1431 blr 1432 1433 .globl icache_enable 1434icache_enable: 1435 mflr r8 1436 bl invalidate_icache 1437 mtlr r8 1438 isync 1439 mfspr r4,L1CSR1 1440 ori r4,r4,0x0001 1441 oris r4,r4,0x0001 1442 mtspr L1CSR1,r4 1443 isync 1444 blr 1445 1446 .globl icache_disable 1447icache_disable: 1448 mfspr r0,L1CSR1 1449 lis r3,0 1450 ori r3,r3,L1CSR1_ICE 1451 andc r0,r0,r3 1452 mtspr L1CSR1,r0 1453 isync 1454 blr 1455 1456 .globl icache_status 1457icache_status: 1458 mfspr r3,L1CSR1 1459 andi. r3,r3,L1CSR1_ICE 1460 blr 1461 1462 .globl dcache_enable 1463dcache_enable: 1464 mflr r8 1465 bl invalidate_dcache 1466 mtlr r8 1467 isync 1468 mfspr r0,L1CSR0 1469 ori r0,r0,0x0001 1470 oris r0,r0,0x0001 1471 msync 1472 isync 1473 mtspr L1CSR0,r0 1474 isync 1475 blr 1476 1477 .globl dcache_disable 1478dcache_disable: 1479 mfspr r3,L1CSR0 1480 lis r4,0 1481 ori r4,r4,L1CSR0_DCE 1482 andc r3,r3,r4 1483 mtspr L1CSR0,r3 1484 isync 1485 blr 1486 1487 .globl dcache_status 1488dcache_status: 1489 mfspr r3,L1CSR0 1490 andi. r3,r3,L1CSR0_DCE 1491 blr 1492 1493 .globl get_pir 1494get_pir: 1495 mfspr r3,PIR 1496 blr 1497 1498 .globl get_pvr 1499get_pvr: 1500 mfspr r3,PVR 1501 blr 1502 1503 .globl get_svr 1504get_svr: 1505 mfspr r3,SVR 1506 blr 1507 1508 .globl wr_tcr 1509wr_tcr: 1510 mtspr TCR,r3 1511 blr 1512 1513/*------------------------------------------------------------------------------- */ 1514/* Function: in8 */ 1515/* Description: Input 8 bits */ 1516/*------------------------------------------------------------------------------- */ 1517 .globl in8 1518in8: 1519 lbz r3,0x0000(r3) 1520 blr 1521 1522/*------------------------------------------------------------------------------- */ 1523/* Function: out8 */ 1524/* Description: Output 8 bits */ 1525/*------------------------------------------------------------------------------- */ 1526 .globl out8 1527out8: 1528 stb r4,0x0000(r3) 1529 sync 1530 blr 1531 1532/*------------------------------------------------------------------------------- */ 1533/* Function: out16 */ 1534/* Description: Output 16 bits */ 1535/*------------------------------------------------------------------------------- */ 1536 .globl out16 1537out16: 1538 sth r4,0x0000(r3) 1539 sync 1540 blr 1541 1542/*------------------------------------------------------------------------------- */ 1543/* Function: out16r */ 1544/* Description: Byte reverse and output 16 bits */ 1545/*------------------------------------------------------------------------------- */ 1546 .globl out16r 1547out16r: 1548 sthbrx r4,r0,r3 1549 sync 1550 blr 1551 1552/*------------------------------------------------------------------------------- */ 1553/* Function: out32 */ 1554/* Description: Output 32 bits */ 1555/*------------------------------------------------------------------------------- */ 1556 .globl out32 1557out32: 1558 stw r4,0x0000(r3) 1559 sync 1560 blr 1561 1562/*------------------------------------------------------------------------------- */ 1563/* Function: out32r */ 1564/* Description: Byte reverse and output 32 bits */ 1565/*------------------------------------------------------------------------------- */ 1566 .globl out32r 1567out32r: 1568 stwbrx r4,r0,r3 1569 sync 1570 blr 1571 1572/*------------------------------------------------------------------------------- */ 1573/* Function: in16 */ 1574/* Description: Input 16 bits */ 1575/*------------------------------------------------------------------------------- */ 1576 .globl in16 1577in16: 1578 lhz r3,0x0000(r3) 1579 blr 1580 1581/*------------------------------------------------------------------------------- */ 1582/* Function: in16r */ 1583/* Description: Input 16 bits and byte reverse */ 1584/*------------------------------------------------------------------------------- */ 1585 .globl in16r 1586in16r: 1587 lhbrx r3,r0,r3 1588 blr 1589 1590/*------------------------------------------------------------------------------- */ 1591/* Function: in32 */ 1592/* Description: Input 32 bits */ 1593/*------------------------------------------------------------------------------- */ 1594 .globl in32 1595in32: 1596 lwz 3,0x0000(3) 1597 blr 1598 1599/*------------------------------------------------------------------------------- */ 1600/* Function: in32r */ 1601/* Description: Input 32 bits and byte reverse */ 1602/*------------------------------------------------------------------------------- */ 1603 .globl in32r 1604in32r: 1605 lwbrx r3,r0,r3 1606 blr 1607#endif /* !MINIMAL_SPL */ 1608 1609/*------------------------------------------------------------------------------*/ 1610 1611/* 1612 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1613 */ 1614 .globl write_tlb 1615write_tlb: 1616 mtspr MAS0,r3 1617 mtspr MAS1,r4 1618 mtspr MAS2,r5 1619 mtspr MAS3,r6 1620#ifdef CONFIG_ENABLE_36BIT_PHYS 1621 mtspr MAS7,r7 1622#endif 1623 li r3,0 1624#ifdef CONFIG_SYS_BOOK3E_HV 1625 mtspr MAS8,r3 1626#endif 1627 isync 1628 tlbwe 1629 msync 1630 isync 1631 blr 1632 1633/* 1634 * void relocate_code (addr_sp, gd, addr_moni) 1635 * 1636 * This "function" does not return, instead it continues in RAM 1637 * after relocating the monitor code. 1638 * 1639 * r3 = dest 1640 * r4 = src 1641 * r5 = length in bytes 1642 * r6 = cachelinesize 1643 */ 1644 .globl relocate_code 1645relocate_code: 1646 mr r1,r3 /* Set new stack pointer */ 1647 mr r9,r4 /* Save copy of Init Data pointer */ 1648 mr r10,r5 /* Save copy of Destination Address */ 1649 1650 GET_GOT 1651 mr r3,r5 /* Destination Address */ 1652 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1653 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1654 lwz r5,GOT(__init_end) 1655 sub r5,r5,r4 1656 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1657 1658 /* 1659 * Fix GOT pointer: 1660 * 1661 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1662 * 1663 * Offset: 1664 */ 1665 sub r15,r10,r4 1666 1667 /* First our own GOT */ 1668 add r12,r12,r15 1669 /* the the one used by the C code */ 1670 add r30,r30,r15 1671 1672 /* 1673 * Now relocate code 1674 */ 1675 1676 cmplw cr1,r3,r4 1677 addi r0,r5,3 1678 srwi. r0,r0,2 1679 beq cr1,4f /* In place copy is not necessary */ 1680 beq 7f /* Protect against 0 count */ 1681 mtctr r0 1682 bge cr1,2f 1683 1684 la r8,-4(r4) 1685 la r7,-4(r3) 16861: lwzu r0,4(r8) 1687 stwu r0,4(r7) 1688 bdnz 1b 1689 b 4f 1690 16912: slwi r0,r0,2 1692 add r8,r4,r0 1693 add r7,r3,r0 16943: lwzu r0,-4(r8) 1695 stwu r0,-4(r7) 1696 bdnz 3b 1697 1698/* 1699 * Now flush the cache: note that we must start from a cache aligned 1700 * address. Otherwise we might miss one cache line. 1701 */ 17024: cmpwi r6,0 1703 add r5,r3,r5 1704 beq 7f /* Always flush prefetch queue in any case */ 1705 subi r0,r6,1 1706 andc r3,r3,r0 1707 mr r4,r3 17085: dcbst 0,r4 1709 add r4,r4,r6 1710 cmplw r4,r5 1711 blt 5b 1712 sync /* Wait for all dcbst to complete on bus */ 1713 mr r4,r3 17146: icbi 0,r4 1715 add r4,r4,r6 1716 cmplw r4,r5 1717 blt 6b 17187: sync /* Wait for all icbi to complete on bus */ 1719 isync 1720 1721/* 1722 * We are done. Do not return, instead branch to second part of board 1723 * initialization, now running from RAM. 1724 */ 1725 1726 addi r0,r10,in_ram - _start + _START_OFFSET 1727 1728 /* 1729 * As IVPR is going to point RAM address, 1730 * Make sure IVOR15 has valid opcode to support debugger 1731 */ 1732 mtspr IVOR15,r0 1733 1734 /* 1735 * Re-point the IVPR at RAM 1736 */ 1737 mtspr IVPR,r10 1738 1739 mtlr r0 1740 blr /* NEVER RETURNS! */ 1741 .globl in_ram 1742in_ram: 1743 1744 /* 1745 * Relocation Function, r12 point to got2+0x8000 1746 * 1747 * Adjust got2 pointers, no need to check for 0, this code 1748 * already puts a few entries in the table. 1749 */ 1750 li r0,__got2_entries@sectoff@l 1751 la r3,GOT(_GOT2_TABLE_) 1752 lwz r11,GOT(_GOT2_TABLE_) 1753 mtctr r0 1754 sub r11,r3,r11 1755 addi r3,r3,-4 17561: lwzu r0,4(r3) 1757 cmpwi r0,0 1758 beq- 2f 1759 add r0,r0,r11 1760 stw r0,0(r3) 17612: bdnz 1b 1762 1763 /* 1764 * Now adjust the fixups and the pointers to the fixups 1765 * in case we need to move ourselves again. 1766 */ 1767 li r0,__fixup_entries@sectoff@l 1768 lwz r3,GOT(_FIXUP_TABLE_) 1769 cmpwi r0,0 1770 mtctr r0 1771 addi r3,r3,-4 1772 beq 4f 17733: lwzu r4,4(r3) 1774 lwzux r0,r4,r11 1775 cmpwi r0,0 1776 add r0,r0,r11 1777 stw r4,0(r3) 1778 beq- 5f 1779 stw r0,0(r4) 17805: bdnz 3b 17814: 1782clear_bss: 1783 /* 1784 * Now clear BSS segment 1785 */ 1786 lwz r3,GOT(__bss_start) 1787 lwz r4,GOT(__bss_end) 1788 1789 cmplw 0,r3,r4 1790 beq 6f 1791 1792 li r0,0 17935: 1794 stw r0,0(r3) 1795 addi r3,r3,4 1796 cmplw 0,r3,r4 1797 bne 5b 17986: 1799 1800 mr r3,r9 /* Init Data pointer */ 1801 mr r4,r10 /* Destination Address */ 1802 bl board_init_r 1803 1804#ifndef MINIMAL_SPL 1805 /* 1806 * Copy exception vector code to low memory 1807 * 1808 * r3: dest_addr 1809 * r7: source address, r8: end address, r9: target address 1810 */ 1811 .globl trap_init 1812trap_init: 1813 mflr r4 /* save link register */ 1814 GET_GOT 1815 lwz r7,GOT(_start_of_vectors) 1816 lwz r8,GOT(_end_of_vectors) 1817 1818 li r9,0x100 /* reset vector always at 0x100 */ 1819 1820 cmplw 0,r7,r8 1821 bgelr /* return if r7>=r8 - just in case */ 18221: 1823 lwz r0,0(r7) 1824 stw r0,0(r9) 1825 addi r7,r7,4 1826 addi r9,r9,4 1827 cmplw 0,r7,r8 1828 bne 1b 1829 1830 /* 1831 * relocate `hdlr' and `int_return' entries 1832 */ 1833 li r7,.L_CriticalInput - _start + _START_OFFSET 1834 bl trap_reloc 1835 li r7,.L_MachineCheck - _start + _START_OFFSET 1836 bl trap_reloc 1837 li r7,.L_DataStorage - _start + _START_OFFSET 1838 bl trap_reloc 1839 li r7,.L_InstStorage - _start + _START_OFFSET 1840 bl trap_reloc 1841 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1842 bl trap_reloc 1843 li r7,.L_Alignment - _start + _START_OFFSET 1844 bl trap_reloc 1845 li r7,.L_ProgramCheck - _start + _START_OFFSET 1846 bl trap_reloc 1847 li r7,.L_FPUnavailable - _start + _START_OFFSET 1848 bl trap_reloc 1849 li r7,.L_Decrementer - _start + _START_OFFSET 1850 bl trap_reloc 1851 li r7,.L_IntervalTimer - _start + _START_OFFSET 1852 li r8,_end_of_vectors - _start + _START_OFFSET 18532: 1854 bl trap_reloc 1855 addi r7,r7,0x100 /* next exception vector */ 1856 cmplw 0,r7,r8 1857 blt 2b 1858 1859 /* Update IVORs as per relocated vector table address */ 1860 li r7,0x0100 1861 mtspr IVOR0,r7 /* 0: Critical input */ 1862 li r7,0x0200 1863 mtspr IVOR1,r7 /* 1: Machine check */ 1864 li r7,0x0300 1865 mtspr IVOR2,r7 /* 2: Data storage */ 1866 li r7,0x0400 1867 mtspr IVOR3,r7 /* 3: Instruction storage */ 1868 li r7,0x0500 1869 mtspr IVOR4,r7 /* 4: External interrupt */ 1870 li r7,0x0600 1871 mtspr IVOR5,r7 /* 5: Alignment */ 1872 li r7,0x0700 1873 mtspr IVOR6,r7 /* 6: Program check */ 1874 li r7,0x0800 1875 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1876 li r7,0x0900 1877 mtspr IVOR8,r7 /* 8: System call */ 1878 /* 9: Auxiliary processor unavailable(unsupported) */ 1879 li r7,0x0a00 1880 mtspr IVOR10,r7 /* 10: Decrementer */ 1881 li r7,0x0b00 1882 mtspr IVOR11,r7 /* 11: Interval timer */ 1883 li r7,0x0c00 1884 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1885 li r7,0x0d00 1886 mtspr IVOR13,r7 /* 13: Data TLB error */ 1887 li r7,0x0e00 1888 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1889 li r7,0x0f00 1890 mtspr IVOR15,r7 /* 15: Debug */ 1891 1892 lis r7,0x0 1893 mtspr IVPR,r7 1894 1895 mtlr r4 /* restore link register */ 1896 blr 1897 1898.globl unlock_ram_in_cache 1899unlock_ram_in_cache: 1900 /* invalidate the INIT_RAM section */ 1901 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1902 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1903 mfspr r4,L1CFG0 1904 andi. r4,r4,0x1ff 1905 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1906 mtctr r4 19071: dcbi r0,r3 1908 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1909 bdnz 1b 1910 sync 1911 1912 /* Invalidate the TLB entries for the cache */ 1913 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1914 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1915 tlbivax 0,r3 1916 addi r3,r3,0x1000 1917 tlbivax 0,r3 1918 addi r3,r3,0x1000 1919 tlbivax 0,r3 1920 addi r3,r3,0x1000 1921 tlbivax 0,r3 1922 isync 1923 blr 1924 1925.globl flush_dcache 1926flush_dcache: 1927 mfspr r3,SPRN_L1CFG0 1928 1929 rlwinm r5,r3,9,3 /* Extract cache block size */ 1930 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1931 * are currently defined. 1932 */ 1933 li r4,32 1934 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1935 * log2(number of ways) 1936 */ 1937 slw r5,r4,r5 /* r5 = cache block size */ 1938 1939 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1940 mulli r7,r7,13 /* An 8-way cache will require 13 1941 * loads per set. 1942 */ 1943 slw r7,r7,r6 1944 1945 /* save off HID0 and set DCFA */ 1946 mfspr r8,SPRN_HID0 1947 ori r9,r8,HID0_DCFA@l 1948 mtspr SPRN_HID0,r9 1949 isync 1950 1951 lis r4,0 1952 mtctr r7 1953 19541: lwz r3,0(r4) /* Load... */ 1955 add r4,r4,r5 1956 bdnz 1b 1957 1958 msync 1959 lis r4,0 1960 mtctr r7 1961 19621: dcbf 0,r4 /* ...and flush. */ 1963 add r4,r4,r5 1964 bdnz 1b 1965 1966 /* restore HID0 */ 1967 mtspr SPRN_HID0,r8 1968 isync 1969 1970 blr 1971 1972.globl setup_ivors 1973setup_ivors: 1974 1975#include "fixed_ivor.S" 1976 blr 1977#endif /* !MINIMAL_SPL */ 1978