1/* 2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc. 3 * Copyright (C) 2003 Motorola,Inc. 4 * 5 * See file CREDITS for list of people who contributed to this 6 * project. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of 11 * the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, 21 * MA 02111-1307 USA 22 */ 23 24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards 25 * 26 * The processor starts at 0xfffffffc and the code is first executed in the 27 * last 4K page(0xfffff000-0xffffffff) in flash/rom. 28 * 29 */ 30 31#include <asm-offsets.h> 32#include <config.h> 33#include <mpc85xx.h> 34#include <version.h> 35 36#define _LINUX_CONFIG_H 1 /* avoid reading Linux autoconf.h file */ 37 38#include <ppc_asm.tmpl> 39#include <ppc_defs.h> 40 41#include <asm/cache.h> 42#include <asm/mmu.h> 43 44#undef MSR_KERNEL 45#define MSR_KERNEL ( MSR_ME ) /* Machine Check */ 46 47/* 48 * Set up GOT: Global Offset Table 49 * 50 * Use r12 to access the GOT 51 */ 52 START_GOT 53 GOT_ENTRY(_GOT2_TABLE_) 54 GOT_ENTRY(_FIXUP_TABLE_) 55 56#ifndef CONFIG_NAND_SPL 57 GOT_ENTRY(_start) 58 GOT_ENTRY(_start_of_vectors) 59 GOT_ENTRY(_end_of_vectors) 60 GOT_ENTRY(transfer_to_handler) 61#endif 62 63 GOT_ENTRY(__init_end) 64 GOT_ENTRY(__bss_end__) 65 GOT_ENTRY(__bss_start) 66 END_GOT 67 68/* 69 * e500 Startup -- after reset only the last 4KB of the effective 70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg 71 * section is located at THIS LAST page and basically does three 72 * things: clear some registers, set up exception tables and 73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to 74 * continue the boot procedure. 75 76 * Once the boot rom is mapped by TLB entries we can proceed 77 * with normal startup. 78 * 79 */ 80 81 .section .bootpg,"ax" 82 .globl _start_e500 83 84_start_e500: 85/* Enable debug exception */ 86 li r1,MSR_DE 87 mtmsr r1 88 89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 90 mfspr r3,SPRN_SVR 91 rlwinm r3,r3,0,0xff 92 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV 93 cmpw r3,r4 94 beq 1f 95 96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 97 li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2 98 cmpw r3,r4 99 beq 1f 100#endif 101 102 /* Not a supported revision affected by erratum */ 103 li r27,0 104 b 2f 105 1061: li r27,1 /* Remember for later that we have the erratum */ 107 /* Erratum says set bits 55:60 to 001001 */ 108 msync 109 isync 110 mfspr r3,976 111 li r4,0x48 112 rlwimi r3,r4,0,0x1f8 113 mtspr 976,r3 114 isync 1152: 116#endif 117 118#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) 119 /* ISBC uses L2 as stack. 120 * Disable L2 cache here so that u-boot can enable it later 121 * as part of it's normal flow 122 */ 123 124 /* Check if L2 is enabled */ 125 mfspr r3, SPRN_L2CSR0 126 lis r2, L2CSR0_L2E@h 127 ori r2, r2, L2CSR0_L2E@l 128 and. r4, r3, r2 129 beq l2_disabled 130 131 mfspr r3, SPRN_L2CSR0 132 /* Flush L2 cache */ 133 lis r2,(L2CSR0_L2FL)@h 134 ori r2, r2, (L2CSR0_L2FL)@l 135 or r3, r2, r3 136 sync 137 isync 138 mtspr SPRN_L2CSR0,r3 139 isync 1401: 141 mfspr r3, SPRN_L2CSR0 142 and. r1, r3, r2 143 bne 1b 144 145 mfspr r3, SPRN_L2CSR0 146 lis r2, L2CSR0_L2E@h 147 ori r2, r2, L2CSR0_L2E@l 148 andc r4, r3, r2 149 sync 150 isync 151 mtspr SPRN_L2CSR0,r4 152 isync 153 154l2_disabled: 155#endif 156 157/* clear registers/arrays not reset by hardware */ 158 159 /* L1 */ 160 li r0,2 161 mtspr L1CSR0,r0 /* invalidate d-cache */ 162 mtspr L1CSR1,r0 /* invalidate i-cache */ 163 164 mfspr r1,DBSR 165 mtspr DBSR,r1 /* Clear all valid bits */ 166 167 /* 168 * Enable L1 Caches early 169 * 170 */ 171 172#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING) 173 /* set stash id to (coreID) * 2 + 32 + L1 CT (0) */ 174 li r2,(32 + 0) 175 mtspr L1CSR2,r2 176#endif 177 178 /* Enable/invalidate the I-Cache */ 179 lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 180 ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 181 mtspr SPRN_L1CSR1,r2 1821: 183 mfspr r3,SPRN_L1CSR1 184 and. r1,r3,r2 185 bne 1b 186 187 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 188 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 189 mtspr SPRN_L1CSR1,r3 190 isync 1912: 192 mfspr r3,SPRN_L1CSR1 193 andi. r1,r3,L1CSR1_ICE@l 194 beq 2b 195 196 /* Enable/invalidate the D-Cache */ 197 lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h 198 ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l 199 mtspr SPRN_L1CSR0,r2 2001: 201 mfspr r3,SPRN_L1CSR0 202 and. r1,r3,r2 203 bne 1b 204 205 lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h 206 ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l 207 mtspr SPRN_L1CSR0,r3 208 isync 2092: 210 mfspr r3,SPRN_L1CSR0 211 andi. r1,r3,L1CSR0_DCE@l 212 beq 2b 213 214#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL) 215/* 216 * TLB entry for debuggging in AS1 217 * Create temporary TLB entry in AS0 to handle debug exception 218 * As on debug exception MSR is cleared i.e. Address space is changed 219 * to 0. A TLB entry (in AS0) is required to handle debug exception generated 220 * in AS1. 221 */ 222 223 lis r6,FSL_BOOKE_MAS0(1, 224 CONFIG_SYS_PPC_E500_DEBUG_TLB, 0)@h 225 ori r6,r6,FSL_BOOKE_MAS0(1, 226 CONFIG_SYS_PPC_E500_DEBUG_TLB, 0)@l 227 228#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 229/* 230 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 231 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff. 232 * and this window is outside of 4K boot window. 233 */ 234 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_4M)@h 235 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_4M)@l 236 237 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, 238 (MAS2_I|MAS2_G))@h 239 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, 240 (MAS2_I|MAS2_G))@l 241 242 /* The 85xx has the default boot window 0xff800000 - 0xffffffff */ 243 lis r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 244 ori r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 245#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 246 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 247 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 248 249 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE,(MAS2_I|MAS2_G))@h 250 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE,(MAS2_I|MAS2_G))@l 251 252 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 253 (MAS3_SX|MAS3_SW|MAS3_SR))@h 254 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 255 (MAS3_SX|MAS3_SW|MAS3_SR))@l 256#else 257/* 258 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address 259 * because "nexti" will resize TLB to 4K 260 */ 261 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_256K)@h 262 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_256K)@l 263 264 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I))@h 265 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, 266 (MAS2_I))@l 267 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, 268 (MAS3_SX|MAS3_SW|MAS3_SR))@h 269 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, 270 (MAS3_SX|MAS3_SW|MAS3_SR))@l 271#endif 272 mtspr MAS0,r6 273 mtspr MAS1,r7 274 mtspr MAS2,r8 275 mtspr MAS3,r9 276 tlbwe 277 isync 278#endif 279 280/* 281 * Ne need to setup interrupt vector for NAND SPL 282 * because NAND SPL never compiles it. 283 */ 284#if !defined(CONFIG_NAND_SPL) 285 /* Setup interrupt vectors */ 286 lis r1,CONFIG_SYS_MONITOR_BASE@h 287 mtspr IVPR,r1 288 289 lis r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@h 290 ori r3,r3,(CONFIG_SYS_MONITOR_BASE & 0xffff)@l 291 292 addi r4,r3,CriticalInput - _start + _START_OFFSET 293 mtspr IVOR0,r4 /* 0: Critical input */ 294 addi r4,r3,MachineCheck - _start + _START_OFFSET 295 mtspr IVOR1,r4 /* 1: Machine check */ 296 addi r4,r3,DataStorage - _start + _START_OFFSET 297 mtspr IVOR2,r4 /* 2: Data storage */ 298 addi r4,r3,InstStorage - _start + _START_OFFSET 299 mtspr IVOR3,r4 /* 3: Instruction storage */ 300 addi r4,r3,ExtInterrupt - _start + _START_OFFSET 301 mtspr IVOR4,r4 /* 4: External interrupt */ 302 addi r4,r3,Alignment - _start + _START_OFFSET 303 mtspr IVOR5,r4 /* 5: Alignment */ 304 addi r4,r3,ProgramCheck - _start + _START_OFFSET 305 mtspr IVOR6,r4 /* 6: Program check */ 306 addi r4,r3,FPUnavailable - _start + _START_OFFSET 307 mtspr IVOR7,r4 /* 7: floating point unavailable */ 308 addi r4,r3,SystemCall - _start + _START_OFFSET 309 mtspr IVOR8,r4 /* 8: System call */ 310 /* 9: Auxiliary processor unavailable(unsupported) */ 311 addi r4,r3,Decrementer - _start + _START_OFFSET 312 mtspr IVOR10,r4 /* 10: Decrementer */ 313 addi r4,r3,IntervalTimer - _start + _START_OFFSET 314 mtspr IVOR11,r4 /* 11: Interval timer */ 315 addi r4,r3,WatchdogTimer - _start + _START_OFFSET 316 mtspr IVOR12,r4 /* 12: Watchdog timer */ 317 addi r4,r3,DataTLBError - _start + _START_OFFSET 318 mtspr IVOR13,r4 /* 13: Data TLB error */ 319 addi r4,r3,InstructionTLBError - _start + _START_OFFSET 320 mtspr IVOR14,r4 /* 14: Instruction TLB error */ 321 addi r4,r3,DebugBreakpoint - _start + _START_OFFSET 322 mtspr IVOR15,r4 /* 15: Debug */ 323#endif 324 325 /* Clear and set up some registers. */ 326 li r0,0x0000 327 lis r1,0xffff 328 mtspr DEC,r0 /* prevent dec exceptions */ 329 mttbl r0 /* prevent fit & wdt exceptions */ 330 mttbu r0 331 mtspr TSR,r1 /* clear all timer exception status */ 332 mtspr TCR,r0 /* disable all */ 333 mtspr ESR,r0 /* clear exception syndrome register */ 334 mtspr MCSR,r0 /* machine check syndrome register */ 335 mtxer r0 /* clear integer exception register */ 336 337#ifdef CONFIG_SYS_BOOK3E_HV 338 mtspr MAS8,r0 /* make sure MAS8 is clear */ 339#endif 340 341 /* Enable Time Base and Select Time Base Clock */ 342 lis r0,HID0_EMCP@h /* Enable machine check */ 343#if defined(CONFIG_ENABLE_36BIT_PHYS) 344 ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */ 345#endif 346#ifndef CONFIG_E500MC 347 ori r0,r0,HID0_TBEN@l /* Enable Timebase */ 348#endif 349 mtspr HID0,r0 350 351#ifndef CONFIG_E500MC 352 li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */ 353 mfspr r3,PVR 354 andi. r3,r3, 0xff 355 cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */ 356 blt 1f 357 /* Set MBDD bit also */ 358 ori r0, r0, HID1_MBDD@l 3591: 360 mtspr HID1,r0 361#endif 362 363#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999 364 mfspr r3,977 365 oris r3,r3,0x0100 366 mtspr 977,r3 367#endif 368 369 /* Enable Branch Prediction */ 370#if defined(CONFIG_BTB) 371 lis r0,BUCSR_ENABLE@h 372 ori r0,r0,BUCSR_ENABLE@l 373 mtspr SPRN_BUCSR,r0 374#endif 375 376#if defined(CONFIG_SYS_INIT_DBCR) 377 lis r1,0xffff 378 ori r1,r1,0xffff 379 mtspr DBSR,r1 /* Clear all status bits */ 380 lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */ 381 ori r0,r0,CONFIG_SYS_INIT_DBCR@l 382 mtspr DBCR0,r0 383#endif 384 385#ifdef CONFIG_MPC8569 386#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000) 387#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0) 388 389 /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to 390 * use address space which is more than 12bits, and it must be done in 391 * the 4K boot page. So we set this bit here. 392 */ 393 394 /* create a temp mapping TLB0[0] for LBCR */ 395 lis r6,FSL_BOOKE_MAS0(0, 0, 0)@h 396 ori r6,r6,FSL_BOOKE_MAS0(0, 0, 0)@l 397 398 lis r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h 399 ori r7,r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l 400 401 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@h 402 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@l 403 404 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0, 405 (MAS3_SX|MAS3_SW|MAS3_SR))@h 406 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0, 407 (MAS3_SX|MAS3_SW|MAS3_SR))@l 408 409 mtspr MAS0,r6 410 mtspr MAS1,r7 411 mtspr MAS2,r8 412 mtspr MAS3,r9 413 isync 414 msync 415 tlbwe 416 417 /* Set LBCR register */ 418 lis r4,CONFIG_SYS_LBCR_ADDR@h 419 ori r4,r4,CONFIG_SYS_LBCR_ADDR@l 420 421 lis r5,CONFIG_SYS_LBC_LBCR@h 422 ori r5,r5,CONFIG_SYS_LBC_LBCR@l 423 stw r5,0(r4) 424 isync 425 426 /* invalidate this temp TLB */ 427 lis r4,CONFIG_SYS_LBC_ADDR@h 428 ori r4,r4,CONFIG_SYS_LBC_ADDR@l 429 tlbivax 0,r4 430 isync 431 432#endif /* CONFIG_MPC8569 */ 433 434/* 435 * Search for the TLB that covers the code we're executing, and shrink it 436 * so that it covers only this 4K page. That will ensure that any other 437 * TLB we create won't interfere with it. We assume that the TLB exists, 438 * which is why we don't check the Valid bit of MAS1. We also assume 439 * it is in TLB1. 440 * 441 * This is necessary, for example, when booting from the on-chip ROM, 442 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR. 443 */ 444 bl nexti /* Find our address */ 445nexti: mflr r1 /* R1 = our PC */ 446 li r2, 0 447 mtspr MAS6, r2 /* Assume the current PID and AS are 0 */ 448 isync 449 msync 450 tlbsx 0, r1 /* This must succeed */ 451 452 mfspr r14, MAS0 /* Save ESEL for later */ 453 rlwinm r14, r14, 16, 0xfff 454 455 /* Set the size of the TLB to 4KB */ 456 mfspr r3, MAS1 457 li r2, 0xF00 458 andc r3, r3, r2 /* Clear the TSIZE bits */ 459 ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l 460 oris r3, r3, MAS1_IPROT@h 461 mtspr MAS1, r3 462 463 /* 464 * Set the base address of the TLB to our PC. We assume that 465 * virtual == physical. We also assume that MAS2_EPN == MAS3_RPN. 466 */ 467 lis r3, MAS2_EPN@h 468 ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */ 469 470 and r1, r1, r3 /* Our PC, rounded down to the nearest page */ 471 472 mfspr r2, MAS2 473 andc r2, r2, r3 474 or r2, r2, r1 475#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 476 cmpwi r27,0 477 beq 1f 478 andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */ 479 rlwinm r2, r2, 0, ~MAS2_I 480 ori r2, r2, MAS2_G 4811: 482#endif 483 mtspr MAS2, r2 /* Set the EPN to our PC base address */ 484 485 mfspr r2, MAS3 486 andc r2, r2, r3 487 or r2, r2, r1 488 mtspr MAS3, r2 /* Set the RPN to our PC base address */ 489 490 isync 491 msync 492 tlbwe 493 494/* 495 * Clear out any other TLB entries that may exist, to avoid conflicts. 496 * Our TLB entry is in r14. 497 */ 498 li r0, TLBIVAX_ALL | TLBIVAX_TLB0 499 tlbivax 0, r0 500 tlbsync 501 502 mfspr r4, SPRN_TLB1CFG 503 rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK 504 505 li r3, 0 506 mtspr MAS1, r3 5071: cmpw r3, r14 508#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(CONFIG_NAND_SPL) 509 cmpwi cr1, r3, CONFIG_SYS_PPC_E500_DEBUG_TLB 510 cror cr0*4+eq, cr0*4+eq, cr1*4+eq 511#endif 512 rlwinm r5, r3, 16, MAS0_ESEL_MSK 513 addi r3, r3, 1 514 beq 2f /* skip the entry we're executing from */ 515 516 oris r5, r5, MAS0_TLBSEL(1)@h 517 mtspr MAS0, r5 518 519 isync 520 tlbwe 521 isync 522 msync 523 5242: cmpw r3, r4 525 blt 1b 526 527/* 528 * Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default 529 * location is not where we want it. This typically happens on a 36-bit 530 * system, where we want to move CCSR to near the top of 36-bit address space. 531 * 532 * To move CCSR, we create two temporary TLBs, one for the old location, and 533 * another for the new location. On CoreNet systems, we also need to create 534 * a special, temporary LAW. 535 * 536 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for 537 * long-term TLBs, so we use TLB0 here. 538 */ 539#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) 540 541#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW) 542#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined." 543#endif 544 545create_ccsr_new_tlb: 546 /* 547 * Create a TLB for the new location of CCSR. Register R8 is reserved 548 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR). 549 */ 550 lis r8, CONFIG_SYS_CCSRBAR@h 551 ori r8, r8, CONFIG_SYS_CCSRBAR@l 552 lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h 553 ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l 554 lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h 555 ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l 556 lis r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h 557 ori r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l 558 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h 559 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l 560 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 561 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 562#ifdef CONFIG_ENABLE_36BIT_PHYS 563 lis r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 564 ori r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 565 mtspr MAS7, r7 566#endif 567 mtspr MAS0, r0 568 mtspr MAS1, r1 569 mtspr MAS2, r2 570 mtspr MAS3, r3 571 isync 572 msync 573 tlbwe 574 575 /* 576 * Create a TLB for the current location of CCSR. Register R9 is reserved 577 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000). 578 */ 579create_ccsr_old_tlb: 580 lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h 581 ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l 582 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h 583 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l 584 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@h 585 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@l 586#ifdef CONFIG_ENABLE_36BIT_PHYS 587 li r7, 0 /* The default CCSR address is always a 32-bit number */ 588 mtspr MAS7, r7 589#endif 590 mtspr MAS0, r0 591 /* MAS1 is the same as above */ 592 mtspr MAS2, r2 593 mtspr MAS3, r3 594 isync 595 msync 596 tlbwe 597 598 /* 599 * We have a TLB for what we think is the current (old) CCSR. Let's 600 * verify that, otherwise we won't be able to move it. 601 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only 602 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems. 603 */ 604verify_old_ccsr: 605 lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h 606 ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l 607#ifdef CONFIG_FSL_CORENET 608 lwz r1, 4(r9) /* CCSRBARL */ 609#else 610 lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */ 611 slwi r1, r1, 12 612#endif 613 614 cmpl 0, r0, r1 615 616 /* 617 * If the value we read from CCSRBARL is not what we expect, then 618 * enter an infinite loop. This will at least allow a debugger to 619 * halt execution and examine TLBs, etc. There's no point in going 620 * on. 621 */ 622infinite_debug_loop: 623 bne infinite_debug_loop 624 625#ifdef CONFIG_FSL_CORENET 626 627#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 628#define LAW_EN 0x80000000 629#define LAW_SIZE_4K 0xb 630#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K) 631#define CCSRAR_C 0x80000000 /* Commit */ 632 633create_temp_law: 634 /* 635 * On CoreNet systems, we create the temporary LAW using a special LAW 636 * target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR. 637 */ 638 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 639 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 640 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 641 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 642 lis r2, CCSRBAR_LAWAR@h 643 ori r2, r2, CCSRBAR_LAWAR@l 644 645 stw r0, 0xc00(r9) /* LAWBARH0 */ 646 stw r1, 0xc04(r9) /* LAWBARL0 */ 647 sync 648 stw r2, 0xc08(r9) /* LAWAR0 */ 649 650 /* 651 * Read back from LAWAR to ensure the update is complete. e500mc 652 * cores also require an isync. 653 */ 654 lwz r0, 0xc08(r9) /* LAWAR0 */ 655 isync 656 657 /* 658 * Read the current CCSRBARH and CCSRBARL using load word instructions. 659 * Follow this with an isync instruction. This forces any outstanding 660 * accesses to configuration space to completion. 661 */ 662read_old_ccsrbar: 663 lwz r0, 0(r9) /* CCSRBARH */ 664 lwz r0, 4(r9) /* CCSRBARL */ 665 isync 666 667 /* 668 * Write the new values for CCSRBARH and CCSRBARL to their old 669 * locations. The CCSRBARH has a shadow register. When the CCSRBARH 670 * has a new value written it loads a CCSRBARH shadow register. When 671 * the CCSRBARL is written, the CCSRBARH shadow register contents 672 * along with the CCSRBARL value are loaded into the CCSRBARH and 673 * CCSRBARL registers, respectively. Follow this with a sync 674 * instruction. 675 */ 676write_new_ccsrbar: 677 lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 678 ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 679 lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h 680 ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l 681 lis r2, CCSRAR_C@h 682 ori r2, r2, CCSRAR_C@l 683 684 stw r0, 0(r9) /* Write to CCSRBARH */ 685 sync /* Make sure we write to CCSRBARH first */ 686 stw r1, 4(r9) /* Write to CCSRBARL */ 687 sync 688 689 /* 690 * Write a 1 to the commit bit (C) of CCSRAR at the old location. 691 * Follow this with a sync instruction. 692 */ 693 stw r2, 8(r9) 694 sync 695 696 /* Delete the temporary LAW */ 697delete_temp_law: 698 li r1, 0 699 stw r1, 0xc08(r8) 700 sync 701 stw r1, 0xc00(r8) 702 stw r1, 0xc04(r8) 703 sync 704 705#else /* #ifdef CONFIG_FSL_CORENET */ 706 707write_new_ccsrbar: 708 /* 709 * Read the current value of CCSRBAR using a load word instruction 710 * followed by an isync. This forces all accesses to configuration 711 * space to complete. 712 */ 713 sync 714 lwz r0, 0(r9) 715 isync 716 717/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */ 718#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \ 719 (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12)) 720 721 /* Write the new value to CCSRBAR. */ 722 lis r0, CCSRBAR_PHYS_RS12@h 723 ori r0, r0, CCSRBAR_PHYS_RS12@l 724 stw r0, 0(r9) 725 sync 726 727 /* 728 * The manual says to perform a load of an address that does not 729 * access configuration space or the on-chip SRAM using an existing TLB, 730 * but that doesn't appear to be necessary. We will do the isync, 731 * though. 732 */ 733 isync 734 735 /* 736 * Read the contents of CCSRBAR from its new location, followed by 737 * another isync. 738 */ 739 lwz r0, 0(r8) 740 isync 741 742#endif /* #ifdef CONFIG_FSL_CORENET */ 743 744 /* Delete the temporary TLBs */ 745delete_temp_tlbs: 746 lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h 747 ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l 748 li r1, 0 749 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h 750 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l 751 mtspr MAS0, r0 752 mtspr MAS1, r1 753 mtspr MAS2, r2 754 isync 755 msync 756 tlbwe 757 758 lis r0, FSL_BOOKE_MAS0(0, 1, 0)@h 759 ori r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l 760 lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h 761 ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l 762 mtspr MAS0, r0 763 mtspr MAS2, r2 764 isync 765 msync 766 tlbwe 767#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */ 768 769#ifdef CONFIG_SYS_FSL_ERRATUM_A004510 770#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) 771#define LAW_SIZE_1M 0x13 772#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M) 773 774 cmpwi r27,0 775 beq 9f 776 777 /* 778 * Create a TLB entry for CCSR 779 * 780 * We're executing out of TLB1 entry in r14, and that's the only 781 * TLB entry that exists. To allocate some TLB entries for our 782 * own use, flip a bit high enough that we won't flip it again 783 * via incrementing. 784 */ 785 786 xori r8, r14, 32 787 lis r0, MAS0_TLBSEL(1)@h 788 rlwimi r0, r8, 16, MAS0_ESEL_MSK 789 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h 790 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l 791 lis r7, CONFIG_SYS_CCSRBAR@h 792 ori r7, r7, CONFIG_SYS_CCSRBAR@l 793 ori r2, r7, MAS2_I|MAS2_G 794 lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h 795 ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l 796 lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h 797 ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l 798 mtspr MAS0, r0 799 mtspr MAS1, r1 800 mtspr MAS2, r2 801 mtspr MAS3, r3 802 mtspr MAS7, r4 803 isync 804 tlbwe 805 isync 806 msync 807 808 /* Map DCSR temporarily to physical address zero */ 809 li r0, 0 810 lis r3, DCSRBAR_LAWAR@h 811 ori r3, r3, DCSRBAR_LAWAR@l 812 813 stw r0, 0xc00(r7) /* LAWBARH0 */ 814 stw r0, 0xc04(r7) /* LAWBARL0 */ 815 sync 816 stw r3, 0xc08(r7) /* LAWAR0 */ 817 818 /* Read back from LAWAR to ensure the update is complete. */ 819 lwz r3, 0xc08(r7) /* LAWAR0 */ 820 isync 821 822 /* Create a TLB entry for DCSR at zero */ 823 824 addi r9, r8, 1 825 lis r0, MAS0_TLBSEL(1)@h 826 rlwimi r0, r9, 16, MAS0_ESEL_MSK 827 lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h 828 ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l 829 li r6, 0 /* DCSR effective address */ 830 ori r2, r6, MAS2_I|MAS2_G 831 li r3, MAS3_SW|MAS3_SR 832 li r4, 0 833 mtspr MAS0, r0 834 mtspr MAS1, r1 835 mtspr MAS2, r2 836 mtspr MAS3, r3 837 mtspr MAS7, r4 838 isync 839 tlbwe 840 isync 841 msync 842 843 /* enable the timebase */ 844#define CTBENR 0xe2084 845 li r3, 1 846 addis r4, r7, CTBENR@ha 847 stw r3, CTBENR@l(r4) 848 lwz r3, CTBENR@l(r4) 849 twi 0,r3,0 850 isync 851 852 .macro erratum_set_ccsr offset value 853 addis r3, r7, \offset@ha 854 lis r4, \value@h 855 addi r3, r3, \offset@l 856 ori r4, r4, \value@l 857 bl erratum_set_value 858 .endm 859 860 .macro erratum_set_dcsr offset value 861 addis r3, r6, \offset@ha 862 lis r4, \value@h 863 addi r3, r3, \offset@l 864 ori r4, r4, \value@l 865 bl erratum_set_value 866 .endm 867 868 erratum_set_dcsr 0xb0e08 0xe0201800 869 erratum_set_dcsr 0xb0e18 0xe0201800 870 erratum_set_dcsr 0xb0e38 0xe0400000 871 erratum_set_dcsr 0xb0008 0x00900000 872 erratum_set_dcsr 0xb0e40 0xe00a0000 873 erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY 874 erratum_set_ccsr 0x10f00 0x415e5000 875 erratum_set_ccsr 0x11f00 0x415e5000 876 877 /* Make temp mapping uncacheable again, if it was initially */ 878 bl 2f 8792: mflr r3 880 tlbsx 0, r3 881 mfspr r4, MAS2 882 rlwimi r4, r15, 0, MAS2_I 883 rlwimi r4, r15, 0, MAS2_G 884 mtspr MAS2, r4 885 isync 886 tlbwe 887 isync 888 msync 889 890 /* Clear the cache */ 891 lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h 892 ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l 893 sync 894 isync 895 mtspr SPRN_L1CSR1,r3 896 isync 8972: sync 898 mfspr r4,SPRN_L1CSR1 899 and. r4,r4,r3 900 bne 2b 901 902 lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h 903 ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l 904 sync 905 isync 906 mtspr SPRN_L1CSR1,r3 907 isync 9082: sync 909 mfspr r4,SPRN_L1CSR1 910 and. r4,r4,r3 911 beq 2b 912 913 /* Remove temporary mappings */ 914 lis r0, MAS0_TLBSEL(1)@h 915 rlwimi r0, r9, 16, MAS0_ESEL_MSK 916 li r3, 0 917 mtspr MAS0, r0 918 mtspr MAS1, r3 919 isync 920 tlbwe 921 isync 922 msync 923 924 li r3, 0 925 stw r3, 0xc08(r7) /* LAWAR0 */ 926 lwz r3, 0xc08(r7) 927 isync 928 929 lis r0, MAS0_TLBSEL(1)@h 930 rlwimi r0, r8, 16, MAS0_ESEL_MSK 931 li r3, 0 932 mtspr MAS0, r0 933 mtspr MAS1, r3 934 isync 935 tlbwe 936 isync 937 msync 938 939 b 9f 940 941 /* r3 = addr, r4 = value, clobbers r5, r11, r12 */ 942erratum_set_value: 943 /* Lock two cache lines into I-Cache */ 944 sync 945 mfspr r11, SPRN_L1CSR1 946 rlwinm r11, r11, 0, ~L1CSR1_ICUL 947 sync 948 isync 949 mtspr SPRN_L1CSR1, r11 950 isync 951 952 mflr r12 953 bl 5f 9545: mflr r5 955 addi r5, r5, 2f - 5b 956 icbtls 0, 0, r5 957 addi r5, r5, 64 958 959 sync 960 mfspr r11, SPRN_L1CSR1 9613: andi. r11, r11, L1CSR1_ICUL 962 bne 3b 963 964 icbtls 0, 0, r5 965 addi r5, r5, 64 966 967 sync 968 mfspr r11, SPRN_L1CSR1 9693: andi. r11, r11, L1CSR1_ICUL 970 bne 3b 971 972 b 2f 973 .align 6 974 /* Inside a locked cacheline, wait a while, write, then wait a while */ 9752: sync 976 977 mfspr r5, SPRN_TBRL 978 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9794: mfspr r5, SPRN_TBRL 980 subf. r5, r5, r11 981 bgt 4b 982 983 stw r4, 0(r3) 984 985 mfspr r5, SPRN_TBRL 986 addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */ 9874: mfspr r5, SPRN_TBRL 988 subf. r5, r5, r11 989 bgt 4b 990 991 sync 992 993 /* 994 * Fill out the rest of this cache line and the next with nops, 995 * to ensure that nothing outside the locked area will be 996 * fetched due to a branch. 997 */ 998 .rept 19 999 nop 1000 .endr 1001 1002 sync 1003 mfspr r11, SPRN_L1CSR1 1004 rlwinm r11, r11, 0, ~L1CSR1_ICUL 1005 sync 1006 isync 1007 mtspr SPRN_L1CSR1, r11 1008 isync 1009 1010 mtlr r12 1011 blr 1012 10139: 1014#endif 1015 1016create_init_ram_area: 1017 lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h 1018 ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l 1019 1020#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT) 1021 /* create a temp mapping in AS=1 to the 4M boot window */ 1022 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@h 1023 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@l 1024 1025 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h 1026 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l 1027 1028 /* The 85xx has the default boot window 0xff800000 - 0xffffffff */ 1029 lis r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1030 ori r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1031#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT) 1032 /* create a temp mapping in AS = 1 for Flash mapping 1033 * created by PBL for ISBC code 1034 */ 1035 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h 1036 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l 1037 1038 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h 1039 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l 1040 1041 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 1042 (MAS3_SX|MAS3_SW|MAS3_SR))@h 1043 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0, 1044 (MAS3_SX|MAS3_SW|MAS3_SR))@l 1045#else 1046 /* 1047 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main 1048 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage. 1049 */ 1050 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h 1051 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l 1052 1053 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h 1054 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l 1055 1056 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1057 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1058#endif 1059 1060 mtspr MAS0,r6 1061 mtspr MAS1,r7 1062 mtspr MAS2,r8 1063 mtspr MAS3,r9 1064 isync 1065 msync 1066 tlbwe 1067 1068 /* create a temp mapping in AS=1 to the stack */ 1069 lis r6,FSL_BOOKE_MAS0(1, 14, 0)@h 1070 ori r6,r6,FSL_BOOKE_MAS0(1, 14, 0)@l 1071 1072 lis r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@h 1073 ori r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@l 1074 1075 lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@h 1076 ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@l 1077 1078#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \ 1079 defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH) 1080 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0, 1081 (MAS3_SX|MAS3_SW|MAS3_SR))@h 1082 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0, 1083 (MAS3_SX|MAS3_SW|MAS3_SR))@l 1084 li r10,CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH 1085 mtspr MAS7,r10 1086#else 1087 lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h 1088 ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l 1089#endif 1090 1091 mtspr MAS0,r6 1092 mtspr MAS1,r7 1093 mtspr MAS2,r8 1094 mtspr MAS3,r9 1095 isync 1096 msync 1097 tlbwe 1098 1099 lis r6,MSR_IS|MSR_DS|MSR_DE@h 1100 ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l 1101 lis r7,switch_as@h 1102 ori r7,r7,switch_as@l 1103 1104 mtspr SPRN_SRR0,r7 1105 mtspr SPRN_SRR1,r6 1106 rfi 1107 1108switch_as: 1109/* L1 DCache is used for initial RAM */ 1110 1111 /* Allocate Initial RAM in data cache. 1112 */ 1113 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1114 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1115 mfspr r2, L1CFG0 1116 andi. r2, r2, 0x1ff 1117 /* cache size * 1024 / (2 * L1 line size) */ 1118 slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT) 1119 mtctr r2 1120 li r0,0 11211: 1122 dcbz r0,r3 1123 dcbtls 0,r0,r3 1124 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1125 bdnz 1b 1126 1127 /* Jump out the last 4K page and continue to 'normal' start */ 1128#ifdef CONFIG_SYS_RAMBOOT 1129 b _start_cont 1130#else 1131 /* Calculate absolute address in FLASH and jump there */ 1132 /*--------------------------------------------------------------*/ 1133 lis r3,CONFIG_SYS_MONITOR_BASE@h 1134 ori r3,r3,CONFIG_SYS_MONITOR_BASE@l 1135 addi r3,r3,_start_cont - _start + _START_OFFSET 1136 mtlr r3 1137 blr 1138#endif 1139 1140 .text 1141 .globl _start 1142_start: 1143 .long 0x27051956 /* U-BOOT Magic Number */ 1144 .globl version_string 1145version_string: 1146 .ascii U_BOOT_VERSION_STRING, "\0" 1147 1148 .align 4 1149 .globl _start_cont 1150_start_cont: 1151 /* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/ 1152 lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h 1153 ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */ 1154 li r0,0 1155 stw r0,0(r3) /* Terminate Back Chain */ 1156 stw r0,+4(r3) /* NULL return address. */ 1157 mr r1,r3 /* Transfer to SP(r1) */ 1158 1159 GET_GOT 1160 bl cpu_init_early_f 1161 1162 /* switch back to AS = 0 */ 1163 lis r3,(MSR_CE|MSR_ME|MSR_DE)@h 1164 ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l 1165 mtmsr r3 1166 isync 1167 1168 bl cpu_init_f 1169 bl board_init_f 1170 isync 1171 1172 /* NOTREACHED - board_init_f() does not return */ 1173 1174#ifndef CONFIG_NAND_SPL 1175 . = EXC_OFF_SYS_RESET 1176 .globl _start_of_vectors 1177_start_of_vectors: 1178 1179/* Critical input. */ 1180 CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException) 1181 1182/* Machine check */ 1183 MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException) 1184 1185/* Data Storage exception. */ 1186 STD_EXCEPTION(0x0300, DataStorage, UnknownException) 1187 1188/* Instruction Storage exception. */ 1189 STD_EXCEPTION(0x0400, InstStorage, UnknownException) 1190 1191/* External Interrupt exception. */ 1192 STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException) 1193 1194/* Alignment exception. */ 1195 . = 0x0600 1196Alignment: 1197 EXCEPTION_PROLOG(SRR0, SRR1) 1198 mfspr r4,DAR 1199 stw r4,_DAR(r21) 1200 mfspr r5,DSISR 1201 stw r5,_DSISR(r21) 1202 addi r3,r1,STACK_FRAME_OVERHEAD 1203 EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE) 1204 1205/* Program check exception */ 1206 . = 0x0700 1207ProgramCheck: 1208 EXCEPTION_PROLOG(SRR0, SRR1) 1209 addi r3,r1,STACK_FRAME_OVERHEAD 1210 EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException, 1211 MSR_KERNEL, COPY_EE) 1212 1213 /* No FPU on MPC85xx. This exception is not supposed to happen. 1214 */ 1215 STD_EXCEPTION(0x0800, FPUnavailable, UnknownException) 1216 1217 . = 0x0900 1218/* 1219 * r0 - SYSCALL number 1220 * r3-... arguments 1221 */ 1222SystemCall: 1223 addis r11,r0,0 /* get functions table addr */ 1224 ori r11,r11,0 /* Note: this code is patched in trap_init */ 1225 addis r12,r0,0 /* get number of functions */ 1226 ori r12,r12,0 1227 1228 cmplw 0,r0,r12 1229 bge 1f 1230 1231 rlwinm r0,r0,2,0,31 /* fn_addr = fn_tbl[r0] */ 1232 add r11,r11,r0 1233 lwz r11,0(r11) 1234 1235 li r20,0xd00-4 /* Get stack pointer */ 1236 lwz r12,0(r20) 1237 subi r12,r12,12 /* Adjust stack pointer */ 1238 li r0,0xc00+_end_back-SystemCall 1239 cmplw 0,r0,r12 /* Check stack overflow */ 1240 bgt 1f 1241 stw r12,0(r20) 1242 1243 mflr r0 1244 stw r0,0(r12) 1245 mfspr r0,SRR0 1246 stw r0,4(r12) 1247 mfspr r0,SRR1 1248 stw r0,8(r12) 1249 1250 li r12,0xc00+_back-SystemCall 1251 mtlr r12 1252 mtspr SRR0,r11 1253 12541: SYNC 1255 rfi 1256_back: 1257 1258 mfmsr r11 /* Disable interrupts */ 1259 li r12,0 1260 ori r12,r12,MSR_EE 1261 andc r11,r11,r12 1262 SYNC /* Some chip revs need this... */ 1263 mtmsr r11 1264 SYNC 1265 1266 li r12,0xd00-4 /* restore regs */ 1267 lwz r12,0(r12) 1268 1269 lwz r11,0(r12) 1270 mtlr r11 1271 lwz r11,4(r12) 1272 mtspr SRR0,r11 1273 lwz r11,8(r12) 1274 mtspr SRR1,r11 1275 1276 addi r12,r12,12 /* Adjust stack pointer */ 1277 li r20,0xd00-4 1278 stw r12,0(r20) 1279 1280 SYNC 1281 rfi 1282_end_back: 1283 1284 STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt) 1285 STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException) 1286 STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException) 1287 1288 STD_EXCEPTION(0x0d00, DataTLBError, UnknownException) 1289 STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException) 1290 1291 CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException ) 1292 1293 .globl _end_of_vectors 1294_end_of_vectors: 1295 1296 1297 . = . + (0x100 - ( . & 0xff )) /* align for debug */ 1298 1299/* 1300 * This code finishes saving the registers to the exception frame 1301 * and jumps to the appropriate handler for the exception. 1302 * Register r21 is pointer into trap frame, r1 has new stack pointer. 1303 */ 1304 .globl transfer_to_handler 1305transfer_to_handler: 1306 stw r22,_NIP(r21) 1307 lis r22,MSR_POW@h 1308 andc r23,r23,r22 1309 stw r23,_MSR(r21) 1310 SAVE_GPR(7, r21) 1311 SAVE_4GPRS(8, r21) 1312 SAVE_8GPRS(12, r21) 1313 SAVE_8GPRS(24, r21) 1314 1315 mflr r23 1316 andi. r24,r23,0x3f00 /* get vector offset */ 1317 stw r24,TRAP(r21) 1318 li r22,0 1319 stw r22,RESULT(r21) 1320 mtspr SPRG2,r22 /* r1 is now kernel sp */ 1321 1322 lwz r24,0(r23) /* virtual address of handler */ 1323 lwz r23,4(r23) /* where to go when done */ 1324 mtspr SRR0,r24 1325 mtspr SRR1,r20 1326 mtlr r23 1327 SYNC 1328 rfi /* jump to handler, enable MMU */ 1329 1330int_return: 1331 mfmsr r28 /* Disable interrupts */ 1332 li r4,0 1333 ori r4,r4,MSR_EE 1334 andc r28,r28,r4 1335 SYNC /* Some chip revs need this... */ 1336 mtmsr r28 1337 SYNC 1338 lwz r2,_CTR(r1) 1339 lwz r0,_LINK(r1) 1340 mtctr r2 1341 mtlr r0 1342 lwz r2,_XER(r1) 1343 lwz r0,_CCR(r1) 1344 mtspr XER,r2 1345 mtcrf 0xFF,r0 1346 REST_10GPRS(3, r1) 1347 REST_10GPRS(13, r1) 1348 REST_8GPRS(23, r1) 1349 REST_GPR(31, r1) 1350 lwz r2,_NIP(r1) /* Restore environment */ 1351 lwz r0,_MSR(r1) 1352 mtspr SRR0,r2 1353 mtspr SRR1,r0 1354 lwz r0,GPR0(r1) 1355 lwz r2,GPR2(r1) 1356 lwz r1,GPR1(r1) 1357 SYNC 1358 rfi 1359 1360crit_return: 1361 mfmsr r28 /* Disable interrupts */ 1362 li r4,0 1363 ori r4,r4,MSR_EE 1364 andc r28,r28,r4 1365 SYNC /* Some chip revs need this... */ 1366 mtmsr r28 1367 SYNC 1368 lwz r2,_CTR(r1) 1369 lwz r0,_LINK(r1) 1370 mtctr r2 1371 mtlr r0 1372 lwz r2,_XER(r1) 1373 lwz r0,_CCR(r1) 1374 mtspr XER,r2 1375 mtcrf 0xFF,r0 1376 REST_10GPRS(3, r1) 1377 REST_10GPRS(13, r1) 1378 REST_8GPRS(23, r1) 1379 REST_GPR(31, r1) 1380 lwz r2,_NIP(r1) /* Restore environment */ 1381 lwz r0,_MSR(r1) 1382 mtspr SPRN_CSRR0,r2 1383 mtspr SPRN_CSRR1,r0 1384 lwz r0,GPR0(r1) 1385 lwz r2,GPR2(r1) 1386 lwz r1,GPR1(r1) 1387 SYNC 1388 rfci 1389 1390mck_return: 1391 mfmsr r28 /* Disable interrupts */ 1392 li r4,0 1393 ori r4,r4,MSR_EE 1394 andc r28,r28,r4 1395 SYNC /* Some chip revs need this... */ 1396 mtmsr r28 1397 SYNC 1398 lwz r2,_CTR(r1) 1399 lwz r0,_LINK(r1) 1400 mtctr r2 1401 mtlr r0 1402 lwz r2,_XER(r1) 1403 lwz r0,_CCR(r1) 1404 mtspr XER,r2 1405 mtcrf 0xFF,r0 1406 REST_10GPRS(3, r1) 1407 REST_10GPRS(13, r1) 1408 REST_8GPRS(23, r1) 1409 REST_GPR(31, r1) 1410 lwz r2,_NIP(r1) /* Restore environment */ 1411 lwz r0,_MSR(r1) 1412 mtspr SPRN_MCSRR0,r2 1413 mtspr SPRN_MCSRR1,r0 1414 lwz r0,GPR0(r1) 1415 lwz r2,GPR2(r1) 1416 lwz r1,GPR1(r1) 1417 SYNC 1418 rfmci 1419 1420/* Cache functions. 1421*/ 1422.globl flush_icache 1423flush_icache: 1424.globl invalidate_icache 1425invalidate_icache: 1426 mfspr r0,L1CSR1 1427 ori r0,r0,L1CSR1_ICFI 1428 msync 1429 isync 1430 mtspr L1CSR1,r0 1431 isync 1432 blr /* entire I cache */ 1433 1434.globl invalidate_dcache 1435invalidate_dcache: 1436 mfspr r0,L1CSR0 1437 ori r0,r0,L1CSR0_DCFI 1438 msync 1439 isync 1440 mtspr L1CSR0,r0 1441 isync 1442 blr 1443 1444 .globl icache_enable 1445icache_enable: 1446 mflr r8 1447 bl invalidate_icache 1448 mtlr r8 1449 isync 1450 mfspr r4,L1CSR1 1451 ori r4,r4,0x0001 1452 oris r4,r4,0x0001 1453 mtspr L1CSR1,r4 1454 isync 1455 blr 1456 1457 .globl icache_disable 1458icache_disable: 1459 mfspr r0,L1CSR1 1460 lis r3,0 1461 ori r3,r3,L1CSR1_ICE 1462 andc r0,r0,r3 1463 mtspr L1CSR1,r0 1464 isync 1465 blr 1466 1467 .globl icache_status 1468icache_status: 1469 mfspr r3,L1CSR1 1470 andi. r3,r3,L1CSR1_ICE 1471 blr 1472 1473 .globl dcache_enable 1474dcache_enable: 1475 mflr r8 1476 bl invalidate_dcache 1477 mtlr r8 1478 isync 1479 mfspr r0,L1CSR0 1480 ori r0,r0,0x0001 1481 oris r0,r0,0x0001 1482 msync 1483 isync 1484 mtspr L1CSR0,r0 1485 isync 1486 blr 1487 1488 .globl dcache_disable 1489dcache_disable: 1490 mfspr r3,L1CSR0 1491 lis r4,0 1492 ori r4,r4,L1CSR0_DCE 1493 andc r3,r3,r4 1494 mtspr L1CSR0,r3 1495 isync 1496 blr 1497 1498 .globl dcache_status 1499dcache_status: 1500 mfspr r3,L1CSR0 1501 andi. r3,r3,L1CSR0_DCE 1502 blr 1503 1504 .globl get_pir 1505get_pir: 1506 mfspr r3,PIR 1507 blr 1508 1509 .globl get_pvr 1510get_pvr: 1511 mfspr r3,PVR 1512 blr 1513 1514 .globl get_svr 1515get_svr: 1516 mfspr r3,SVR 1517 blr 1518 1519 .globl wr_tcr 1520wr_tcr: 1521 mtspr TCR,r3 1522 blr 1523 1524/*------------------------------------------------------------------------------- */ 1525/* Function: in8 */ 1526/* Description: Input 8 bits */ 1527/*------------------------------------------------------------------------------- */ 1528 .globl in8 1529in8: 1530 lbz r3,0x0000(r3) 1531 blr 1532 1533/*------------------------------------------------------------------------------- */ 1534/* Function: out8 */ 1535/* Description: Output 8 bits */ 1536/*------------------------------------------------------------------------------- */ 1537 .globl out8 1538out8: 1539 stb r4,0x0000(r3) 1540 sync 1541 blr 1542 1543/*------------------------------------------------------------------------------- */ 1544/* Function: out16 */ 1545/* Description: Output 16 bits */ 1546/*------------------------------------------------------------------------------- */ 1547 .globl out16 1548out16: 1549 sth r4,0x0000(r3) 1550 sync 1551 blr 1552 1553/*------------------------------------------------------------------------------- */ 1554/* Function: out16r */ 1555/* Description: Byte reverse and output 16 bits */ 1556/*------------------------------------------------------------------------------- */ 1557 .globl out16r 1558out16r: 1559 sthbrx r4,r0,r3 1560 sync 1561 blr 1562 1563/*------------------------------------------------------------------------------- */ 1564/* Function: out32 */ 1565/* Description: Output 32 bits */ 1566/*------------------------------------------------------------------------------- */ 1567 .globl out32 1568out32: 1569 stw r4,0x0000(r3) 1570 sync 1571 blr 1572 1573/*------------------------------------------------------------------------------- */ 1574/* Function: out32r */ 1575/* Description: Byte reverse and output 32 bits */ 1576/*------------------------------------------------------------------------------- */ 1577 .globl out32r 1578out32r: 1579 stwbrx r4,r0,r3 1580 sync 1581 blr 1582 1583/*------------------------------------------------------------------------------- */ 1584/* Function: in16 */ 1585/* Description: Input 16 bits */ 1586/*------------------------------------------------------------------------------- */ 1587 .globl in16 1588in16: 1589 lhz r3,0x0000(r3) 1590 blr 1591 1592/*------------------------------------------------------------------------------- */ 1593/* Function: in16r */ 1594/* Description: Input 16 bits and byte reverse */ 1595/*------------------------------------------------------------------------------- */ 1596 .globl in16r 1597in16r: 1598 lhbrx r3,r0,r3 1599 blr 1600 1601/*------------------------------------------------------------------------------- */ 1602/* Function: in32 */ 1603/* Description: Input 32 bits */ 1604/*------------------------------------------------------------------------------- */ 1605 .globl in32 1606in32: 1607 lwz 3,0x0000(3) 1608 blr 1609 1610/*------------------------------------------------------------------------------- */ 1611/* Function: in32r */ 1612/* Description: Input 32 bits and byte reverse */ 1613/*------------------------------------------------------------------------------- */ 1614 .globl in32r 1615in32r: 1616 lwbrx r3,r0,r3 1617 blr 1618#endif /* !CONFIG_NAND_SPL */ 1619 1620/*------------------------------------------------------------------------------*/ 1621 1622/* 1623 * void write_tlb(mas0, mas1, mas2, mas3, mas7) 1624 */ 1625 .globl write_tlb 1626write_tlb: 1627 mtspr MAS0,r3 1628 mtspr MAS1,r4 1629 mtspr MAS2,r5 1630 mtspr MAS3,r6 1631#ifdef CONFIG_ENABLE_36BIT_PHYS 1632 mtspr MAS7,r7 1633#endif 1634 li r3,0 1635#ifdef CONFIG_SYS_BOOK3E_HV 1636 mtspr MAS8,r3 1637#endif 1638 isync 1639 tlbwe 1640 msync 1641 isync 1642 blr 1643 1644/* 1645 * void relocate_code (addr_sp, gd, addr_moni) 1646 * 1647 * This "function" does not return, instead it continues in RAM 1648 * after relocating the monitor code. 1649 * 1650 * r3 = dest 1651 * r4 = src 1652 * r5 = length in bytes 1653 * r6 = cachelinesize 1654 */ 1655 .globl relocate_code 1656relocate_code: 1657 mr r1,r3 /* Set new stack pointer */ 1658 mr r9,r4 /* Save copy of Init Data pointer */ 1659 mr r10,r5 /* Save copy of Destination Address */ 1660 1661 GET_GOT 1662 mr r3,r5 /* Destination Address */ 1663 lis r4,CONFIG_SYS_MONITOR_BASE@h /* Source Address */ 1664 ori r4,r4,CONFIG_SYS_MONITOR_BASE@l 1665 lwz r5,GOT(__init_end) 1666 sub r5,r5,r4 1667 li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */ 1668 1669 /* 1670 * Fix GOT pointer: 1671 * 1672 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address 1673 * 1674 * Offset: 1675 */ 1676 sub r15,r10,r4 1677 1678 /* First our own GOT */ 1679 add r12,r12,r15 1680 /* the the one used by the C code */ 1681 add r30,r30,r15 1682 1683 /* 1684 * Now relocate code 1685 */ 1686 1687 cmplw cr1,r3,r4 1688 addi r0,r5,3 1689 srwi. r0,r0,2 1690 beq cr1,4f /* In place copy is not necessary */ 1691 beq 7f /* Protect against 0 count */ 1692 mtctr r0 1693 bge cr1,2f 1694 1695 la r8,-4(r4) 1696 la r7,-4(r3) 16971: lwzu r0,4(r8) 1698 stwu r0,4(r7) 1699 bdnz 1b 1700 b 4f 1701 17022: slwi r0,r0,2 1703 add r8,r4,r0 1704 add r7,r3,r0 17053: lwzu r0,-4(r8) 1706 stwu r0,-4(r7) 1707 bdnz 3b 1708 1709/* 1710 * Now flush the cache: note that we must start from a cache aligned 1711 * address. Otherwise we might miss one cache line. 1712 */ 17134: cmpwi r6,0 1714 add r5,r3,r5 1715 beq 7f /* Always flush prefetch queue in any case */ 1716 subi r0,r6,1 1717 andc r3,r3,r0 1718 mr r4,r3 17195: dcbst 0,r4 1720 add r4,r4,r6 1721 cmplw r4,r5 1722 blt 5b 1723 sync /* Wait for all dcbst to complete on bus */ 1724 mr r4,r3 17256: icbi 0,r4 1726 add r4,r4,r6 1727 cmplw r4,r5 1728 blt 6b 17297: sync /* Wait for all icbi to complete on bus */ 1730 isync 1731 1732/* 1733 * We are done. Do not return, instead branch to second part of board 1734 * initialization, now running from RAM. 1735 */ 1736 1737 addi r0,r10,in_ram - _start + _START_OFFSET 1738 1739 /* 1740 * As IVPR is going to point RAM address, 1741 * Make sure IVOR15 has valid opcode to support debugger 1742 */ 1743 mtspr IVOR15,r0 1744 1745 /* 1746 * Re-point the IVPR at RAM 1747 */ 1748 mtspr IVPR,r10 1749 1750 mtlr r0 1751 blr /* NEVER RETURNS! */ 1752 .globl in_ram 1753in_ram: 1754 1755 /* 1756 * Relocation Function, r12 point to got2+0x8000 1757 * 1758 * Adjust got2 pointers, no need to check for 0, this code 1759 * already puts a few entries in the table. 1760 */ 1761 li r0,__got2_entries@sectoff@l 1762 la r3,GOT(_GOT2_TABLE_) 1763 lwz r11,GOT(_GOT2_TABLE_) 1764 mtctr r0 1765 sub r11,r3,r11 1766 addi r3,r3,-4 17671: lwzu r0,4(r3) 1768 cmpwi r0,0 1769 beq- 2f 1770 add r0,r0,r11 1771 stw r0,0(r3) 17722: bdnz 1b 1773 1774 /* 1775 * Now adjust the fixups and the pointers to the fixups 1776 * in case we need to move ourselves again. 1777 */ 1778 li r0,__fixup_entries@sectoff@l 1779 lwz r3,GOT(_FIXUP_TABLE_) 1780 cmpwi r0,0 1781 mtctr r0 1782 addi r3,r3,-4 1783 beq 4f 17843: lwzu r4,4(r3) 1785 lwzux r0,r4,r11 1786 cmpwi r0,0 1787 add r0,r0,r11 1788 stw r4,0(r3) 1789 beq- 5f 1790 stw r0,0(r4) 17915: bdnz 3b 17924: 1793clear_bss: 1794 /* 1795 * Now clear BSS segment 1796 */ 1797 lwz r3,GOT(__bss_start) 1798 lwz r4,GOT(__bss_end__) 1799 1800 cmplw 0,r3,r4 1801 beq 6f 1802 1803 li r0,0 18045: 1805 stw r0,0(r3) 1806 addi r3,r3,4 1807 cmplw 0,r3,r4 1808 bne 5b 18096: 1810 1811 mr r3,r9 /* Init Data pointer */ 1812 mr r4,r10 /* Destination Address */ 1813 bl board_init_r 1814 1815#ifndef CONFIG_NAND_SPL 1816 /* 1817 * Copy exception vector code to low memory 1818 * 1819 * r3: dest_addr 1820 * r7: source address, r8: end address, r9: target address 1821 */ 1822 .globl trap_init 1823trap_init: 1824 mflr r4 /* save link register */ 1825 GET_GOT 1826 lwz r7,GOT(_start_of_vectors) 1827 lwz r8,GOT(_end_of_vectors) 1828 1829 li r9,0x100 /* reset vector always at 0x100 */ 1830 1831 cmplw 0,r7,r8 1832 bgelr /* return if r7>=r8 - just in case */ 18331: 1834 lwz r0,0(r7) 1835 stw r0,0(r9) 1836 addi r7,r7,4 1837 addi r9,r9,4 1838 cmplw 0,r7,r8 1839 bne 1b 1840 1841 /* 1842 * relocate `hdlr' and `int_return' entries 1843 */ 1844 li r7,.L_CriticalInput - _start + _START_OFFSET 1845 bl trap_reloc 1846 li r7,.L_MachineCheck - _start + _START_OFFSET 1847 bl trap_reloc 1848 li r7,.L_DataStorage - _start + _START_OFFSET 1849 bl trap_reloc 1850 li r7,.L_InstStorage - _start + _START_OFFSET 1851 bl trap_reloc 1852 li r7,.L_ExtInterrupt - _start + _START_OFFSET 1853 bl trap_reloc 1854 li r7,.L_Alignment - _start + _START_OFFSET 1855 bl trap_reloc 1856 li r7,.L_ProgramCheck - _start + _START_OFFSET 1857 bl trap_reloc 1858 li r7,.L_FPUnavailable - _start + _START_OFFSET 1859 bl trap_reloc 1860 li r7,.L_Decrementer - _start + _START_OFFSET 1861 bl trap_reloc 1862 li r7,.L_IntervalTimer - _start + _START_OFFSET 1863 li r8,_end_of_vectors - _start + _START_OFFSET 18642: 1865 bl trap_reloc 1866 addi r7,r7,0x100 /* next exception vector */ 1867 cmplw 0,r7,r8 1868 blt 2b 1869 1870 /* Update IVORs as per relocated vector table address */ 1871 li r7,0x0100 1872 mtspr IVOR0,r7 /* 0: Critical input */ 1873 li r7,0x0200 1874 mtspr IVOR1,r7 /* 1: Machine check */ 1875 li r7,0x0300 1876 mtspr IVOR2,r7 /* 2: Data storage */ 1877 li r7,0x0400 1878 mtspr IVOR3,r7 /* 3: Instruction storage */ 1879 li r7,0x0500 1880 mtspr IVOR4,r7 /* 4: External interrupt */ 1881 li r7,0x0600 1882 mtspr IVOR5,r7 /* 5: Alignment */ 1883 li r7,0x0700 1884 mtspr IVOR6,r7 /* 6: Program check */ 1885 li r7,0x0800 1886 mtspr IVOR7,r7 /* 7: floating point unavailable */ 1887 li r7,0x0900 1888 mtspr IVOR8,r7 /* 8: System call */ 1889 /* 9: Auxiliary processor unavailable(unsupported) */ 1890 li r7,0x0a00 1891 mtspr IVOR10,r7 /* 10: Decrementer */ 1892 li r7,0x0b00 1893 mtspr IVOR11,r7 /* 11: Interval timer */ 1894 li r7,0x0c00 1895 mtspr IVOR12,r7 /* 12: Watchdog timer */ 1896 li r7,0x0d00 1897 mtspr IVOR13,r7 /* 13: Data TLB error */ 1898 li r7,0x0e00 1899 mtspr IVOR14,r7 /* 14: Instruction TLB error */ 1900 li r7,0x0f00 1901 mtspr IVOR15,r7 /* 15: Debug */ 1902 1903 lis r7,0x0 1904 mtspr IVPR,r7 1905 1906 mtlr r4 /* restore link register */ 1907 blr 1908 1909.globl unlock_ram_in_cache 1910unlock_ram_in_cache: 1911 /* invalidate the INIT_RAM section */ 1912 lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h 1913 ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l 1914 mfspr r4,L1CFG0 1915 andi. r4,r4,0x1ff 1916 slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT) 1917 mtctr r4 19181: dcbi r0,r3 1919 addi r3,r3,CONFIG_SYS_CACHELINE_SIZE 1920 bdnz 1b 1921 sync 1922 1923 /* Invalidate the TLB entries for the cache */ 1924 lis r3,CONFIG_SYS_INIT_RAM_ADDR@h 1925 ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l 1926 tlbivax 0,r3 1927 addi r3,r3,0x1000 1928 tlbivax 0,r3 1929 addi r3,r3,0x1000 1930 tlbivax 0,r3 1931 addi r3,r3,0x1000 1932 tlbivax 0,r3 1933 isync 1934 blr 1935 1936.globl flush_dcache 1937flush_dcache: 1938 mfspr r3,SPRN_L1CFG0 1939 1940 rlwinm r5,r3,9,3 /* Extract cache block size */ 1941 twlgti r5,1 /* Only 32 and 64 byte cache blocks 1942 * are currently defined. 1943 */ 1944 li r4,32 1945 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1946 * log2(number of ways) 1947 */ 1948 slw r5,r4,r5 /* r5 = cache block size */ 1949 1950 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1951 mulli r7,r7,13 /* An 8-way cache will require 13 1952 * loads per set. 1953 */ 1954 slw r7,r7,r6 1955 1956 /* save off HID0 and set DCFA */ 1957 mfspr r8,SPRN_HID0 1958 ori r9,r8,HID0_DCFA@l 1959 mtspr SPRN_HID0,r9 1960 isync 1961 1962 lis r4,0 1963 mtctr r7 1964 19651: lwz r3,0(r4) /* Load... */ 1966 add r4,r4,r5 1967 bdnz 1b 1968 1969 msync 1970 lis r4,0 1971 mtctr r7 1972 19731: dcbf 0,r4 /* ...and flush. */ 1974 add r4,r4,r5 1975 bdnz 1b 1976 1977 /* restore HID0 */ 1978 mtspr SPRN_HID0,r8 1979 isync 1980 1981 blr 1982 1983.globl setup_ivors 1984setup_ivors: 1985 1986#include "fixed_ivor.S" 1987 blr 1988#endif /* !CONFIG_NAND_SPL */ 1989