1/* 2 * Kernel execution entry point code. 3 * 4 * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> 5 * Initial PowerPC version. 6 * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Rewritten for PReP 8 * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 9 * Low-level exception handers, MMU support, and rewrite. 10 * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> 11 * PowerPC 8xx modifications. 12 * Copyright (c) 1998-1999 TiVo, Inc. 13 * PowerPC 403GCX modifications. 14 * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> 15 * PowerPC 403GCX/405GP modifications. 16 * Copyright 2000 MontaVista Software Inc. 17 * PPC405 modifications 18 * PowerPC 403GCX/405GP modifications. 19 * Author: MontaVista Software, Inc. 20 * frank_rowand@mvista.com or source@mvista.com 21 * debbie_chu@mvista.com 22 * Copyright 2002-2005 MontaVista Software, Inc. 23 * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> 24 * 25 * This program is free software; you can redistribute it and/or modify it 26 * under the terms of the GNU General Public License as published by the 27 * Free Software Foundation; either version 2 of the License, or (at your 28 * option) any later version. 29 */ 30 31#include <linux/init.h> 32#include <asm/processor.h> 33#include <asm/page.h> 34#include <asm/mmu.h> 35#include <asm/pgtable.h> 36#include <asm/cputable.h> 37#include <asm/thread_info.h> 38#include <asm/ppc_asm.h> 39#include <asm/asm-offsets.h> 40#include <asm/synch.h> 41#include "head_booke.h" 42 43 44/* As with the other PowerPC ports, it is expected that when code 45 * execution begins here, the following registers contain valid, yet 46 * optional, information: 47 * 48 * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) 49 * r4 - Starting address of the init RAM disk 50 * r5 - Ending address of the init RAM disk 51 * r6 - Start of kernel command line string (e.g. "mem=128") 52 * r7 - End of kernel command line string 53 * 54 */ 55 __HEAD 56_ENTRY(_stext); 57_ENTRY(_start); 58 /* 59 * Reserve a word at a fixed location to store the address 60 * of abatron_pteptrs 61 */ 62 nop 63/* 64 * Save parameters we are passed 65 */ 66 mr r31,r3 67 mr r30,r4 68 mr r29,r5 69 mr r28,r6 70 mr r27,r7 71 li r24,0 /* CPU number */ 72 73 bl init_cpu_state 74 75 /* 76 * This is where the main kernel code starts. 77 */ 78 79 /* ptr to current */ 80 lis r2,init_task@h 81 ori r2,r2,init_task@l 82 83 /* ptr to current thread */ 84 addi r4,r2,THREAD /* init task's THREAD */ 85 mtspr SPRN_SPRG_THREAD,r4 86 87 /* stack */ 88 lis r1,init_thread_union@h 89 ori r1,r1,init_thread_union@l 90 li r0,0 91 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) 92 93 bl early_init 94 95/* 96 * Decide what sort of machine this is and initialize the MMU. 97 */ 98 mr r3,r31 99 mr r4,r30 100 mr r5,r29 101 mr r6,r28 102 mr r7,r27 103 bl machine_init 104 bl MMU_init 105 106 /* Setup PTE pointers for the Abatron bdiGDB */ 107 lis r6, swapper_pg_dir@h 108 ori r6, r6, swapper_pg_dir@l 109 lis r5, abatron_pteptrs@h 110 ori r5, r5, abatron_pteptrs@l 111 lis r4, KERNELBASE@h 112 ori r4, r4, KERNELBASE@l 113 stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ 114 stw r6, 0(r5) 115 116 /* Clear the Machine Check Syndrome Register */ 117 li r0,0 118 mtspr SPRN_MCSR,r0 119 120 /* Let's move on */ 121 lis r4,start_kernel@h 122 ori r4,r4,start_kernel@l 123 lis r3,MSR_KERNEL@h 124 ori r3,r3,MSR_KERNEL@l 125 mtspr SPRN_SRR0,r4 126 mtspr SPRN_SRR1,r3 127 rfi /* change context and jump to start_kernel */ 128 129/* 130 * Interrupt vector entry code 131 * 132 * The Book E MMUs are always on so we don't need to handle 133 * interrupts in real mode as with previous PPC processors. In 134 * this case we handle interrupts in the kernel virtual address 135 * space. 136 * 137 * Interrupt vectors are dynamically placed relative to the 138 * interrupt prefix as determined by the address of interrupt_base. 139 * The interrupt vectors offsets are programmed using the labels 140 * for each interrupt vector entry. 141 * 142 * Interrupt vectors must be aligned on a 16 byte boundary. 143 * We align on a 32 byte cache line boundary for good measure. 144 */ 145 146interrupt_base: 147 /* Critical Input Interrupt */ 148 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 149 150 /* Machine Check Interrupt */ 151 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 152 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) 153 154 /* Data Storage Interrupt */ 155 DATA_STORAGE_EXCEPTION 156 157 /* Instruction Storage Interrupt */ 158 INSTRUCTION_STORAGE_EXCEPTION 159 160 /* External Input Interrupt */ 161 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 162 163 /* Alignment Interrupt */ 164 ALIGNMENT_EXCEPTION 165 166 /* Program Interrupt */ 167 PROGRAM_EXCEPTION 168 169 /* Floating Point Unavailable Interrupt */ 170#ifdef CONFIG_PPC_FPU 171 FP_UNAVAILABLE_EXCEPTION 172#else 173 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 174#endif 175 /* System Call Interrupt */ 176 START_EXCEPTION(SystemCall) 177 NORMAL_EXCEPTION_PROLOG 178 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 179 180 /* Auxillary Processor Unavailable Interrupt */ 181 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 182 183 /* Decrementer Interrupt */ 184 DECREMENTER_EXCEPTION 185 186 /* Fixed Internal Timer Interrupt */ 187 /* TODO: Add FIT support */ 188 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 189 190 /* Watchdog Timer Interrupt */ 191 /* TODO: Add watchdog support */ 192#ifdef CONFIG_BOOKE_WDT 193 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 194#else 195 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) 196#endif 197 198 /* Data TLB Error Interrupt */ 199 START_EXCEPTION(DataTLBError44x) 200 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 201 mtspr SPRN_SPRG_WSCRATCH1, r11 202 mtspr SPRN_SPRG_WSCRATCH2, r12 203 mtspr SPRN_SPRG_WSCRATCH3, r13 204 mfcr r11 205 mtspr SPRN_SPRG_WSCRATCH4, r11 206 mfspr r10, SPRN_DEAR /* Get faulting address */ 207 208 /* If we are faulting a kernel address, we have to use the 209 * kernel page tables. 210 */ 211 lis r11, PAGE_OFFSET@h 212 cmplw r10, r11 213 blt+ 3f 214 lis r11, swapper_pg_dir@h 215 ori r11, r11, swapper_pg_dir@l 216 217 mfspr r12,SPRN_MMUCR 218 rlwinm r12,r12,0,0,23 /* Clear TID */ 219 220 b 4f 221 222 /* Get the PGD for the current thread */ 2233: 224 mfspr r11,SPRN_SPRG_THREAD 225 lwz r11,PGDIR(r11) 226 227 /* Load PID into MMUCR TID */ 228 mfspr r12,SPRN_MMUCR 229 mfspr r13,SPRN_PID /* Get PID */ 230 rlwimi r12,r13,0,24,31 /* Set TID */ 231 2324: 233 mtspr SPRN_MMUCR,r12 234 235 /* Mask of required permission bits. Note that while we 236 * do copy ESR:ST to _PAGE_RW position as trying to write 237 * to an RO page is pretty common, we don't do it with 238 * _PAGE_DIRTY. We could do it, but it's a fairly rare 239 * event so I'd rather take the overhead when it happens 240 * rather than adding an instruction here. We should measure 241 * whether the whole thing is worth it in the first place 242 * as we could avoid loading SPRN_ESR completely in the first 243 * place... 244 * 245 * TODO: Is it worth doing that mfspr & rlwimi in the first 246 * place or can we save a couple of instructions here ? 247 */ 248 mfspr r12,SPRN_ESR 249 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 250 rlwimi r13,r12,10,30,30 251 252 /* Load the PTE */ 253 /* Compute pgdir/pmd offset */ 254 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 255 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 256 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 257 beq 2f /* Bail if no table */ 258 259 /* Compute pte address */ 260 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 261 lwz r11, 0(r12) /* Get high word of pte entry */ 262 lwz r12, 4(r12) /* Get low word of pte entry */ 263 264 lis r10,tlb_44x_index@ha 265 266 andc. r13,r13,r12 /* Check permission */ 267 268 /* Load the next available TLB index */ 269 lwz r13,tlb_44x_index@l(r10) 270 271 bne 2f /* Bail if permission mismach */ 272 273 /* Increment, rollover, and store TLB index */ 274 addi r13,r13,1 275 276 /* Compare with watermark (instruction gets patched) */ 277 .globl tlb_44x_patch_hwater_D 278tlb_44x_patch_hwater_D: 279 cmpwi 0,r13,1 /* reserve entries */ 280 ble 5f 281 li r13,0 2825: 283 /* Store the next available TLB index */ 284 stw r13,tlb_44x_index@l(r10) 285 286 /* Re-load the faulting address */ 287 mfspr r10,SPRN_DEAR 288 289 /* Jump to common tlb load */ 290 b finish_tlb_load_44x 291 2922: 293 /* The bailout. Restore registers to pre-exception conditions 294 * and call the heavyweights to help us out. 295 */ 296 mfspr r11, SPRN_SPRG_RSCRATCH4 297 mtcr r11 298 mfspr r13, SPRN_SPRG_RSCRATCH3 299 mfspr r12, SPRN_SPRG_RSCRATCH2 300 mfspr r11, SPRN_SPRG_RSCRATCH1 301 mfspr r10, SPRN_SPRG_RSCRATCH0 302 b DataStorage 303 304 /* Instruction TLB Error Interrupt */ 305 /* 306 * Nearly the same as above, except we get our 307 * information from different registers and bailout 308 * to a different point. 309 */ 310 START_EXCEPTION(InstructionTLBError44x) 311 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 312 mtspr SPRN_SPRG_WSCRATCH1, r11 313 mtspr SPRN_SPRG_WSCRATCH2, r12 314 mtspr SPRN_SPRG_WSCRATCH3, r13 315 mfcr r11 316 mtspr SPRN_SPRG_WSCRATCH4, r11 317 mfspr r10, SPRN_SRR0 /* Get faulting address */ 318 319 /* If we are faulting a kernel address, we have to use the 320 * kernel page tables. 321 */ 322 lis r11, PAGE_OFFSET@h 323 cmplw r10, r11 324 blt+ 3f 325 lis r11, swapper_pg_dir@h 326 ori r11, r11, swapper_pg_dir@l 327 328 mfspr r12,SPRN_MMUCR 329 rlwinm r12,r12,0,0,23 /* Clear TID */ 330 331 b 4f 332 333 /* Get the PGD for the current thread */ 3343: 335 mfspr r11,SPRN_SPRG_THREAD 336 lwz r11,PGDIR(r11) 337 338 /* Load PID into MMUCR TID */ 339 mfspr r12,SPRN_MMUCR 340 mfspr r13,SPRN_PID /* Get PID */ 341 rlwimi r12,r13,0,24,31 /* Set TID */ 342 3434: 344 mtspr SPRN_MMUCR,r12 345 346 /* Make up the required permissions */ 347 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 348 349 /* Compute pgdir/pmd offset */ 350 rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 351 lwzx r11, r12, r11 /* Get pgd/pmd entry */ 352 rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ 353 beq 2f /* Bail if no table */ 354 355 /* Compute pte address */ 356 rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 357 lwz r11, 0(r12) /* Get high word of pte entry */ 358 lwz r12, 4(r12) /* Get low word of pte entry */ 359 360 lis r10,tlb_44x_index@ha 361 362 andc. r13,r13,r12 /* Check permission */ 363 364 /* Load the next available TLB index */ 365 lwz r13,tlb_44x_index@l(r10) 366 367 bne 2f /* Bail if permission mismach */ 368 369 /* Increment, rollover, and store TLB index */ 370 addi r13,r13,1 371 372 /* Compare with watermark (instruction gets patched) */ 373 .globl tlb_44x_patch_hwater_I 374tlb_44x_patch_hwater_I: 375 cmpwi 0,r13,1 /* reserve entries */ 376 ble 5f 377 li r13,0 3785: 379 /* Store the next available TLB index */ 380 stw r13,tlb_44x_index@l(r10) 381 382 /* Re-load the faulting address */ 383 mfspr r10,SPRN_SRR0 384 385 /* Jump to common TLB load point */ 386 b finish_tlb_load_44x 387 3882: 389 /* The bailout. Restore registers to pre-exception conditions 390 * and call the heavyweights to help us out. 391 */ 392 mfspr r11, SPRN_SPRG_RSCRATCH4 393 mtcr r11 394 mfspr r13, SPRN_SPRG_RSCRATCH3 395 mfspr r12, SPRN_SPRG_RSCRATCH2 396 mfspr r11, SPRN_SPRG_RSCRATCH1 397 mfspr r10, SPRN_SPRG_RSCRATCH0 398 b InstructionStorage 399 400/* 401 * Both the instruction and data TLB miss get to this 402 * point to load the TLB. 403 * r10 - EA of fault 404 * r11 - PTE high word value 405 * r12 - PTE low word value 406 * r13 - TLB index 407 * MMUCR - loaded with proper value when we get here 408 * Upon exit, we reload everything and RFI. 409 */ 410finish_tlb_load_44x: 411 /* Combine RPN & ERPN an write WS 0 */ 412 rlwimi r11,r12,0,0,31-PAGE_SHIFT 413 tlbwe r11,r13,PPC44x_TLB_XLAT 414 415 /* 416 * Create WS1. This is the faulting address (EPN), 417 * page size, and valid flag. 418 */ 419 li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE 420 /* Insert valid and page size */ 421 rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 422 tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ 423 424 /* And WS 2 */ 425 li r10,0xf85 /* Mask to apply from PTE */ 426 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 427 and r11,r12,r10 /* Mask PTE bits to keep */ 428 andi. r10,r12,_PAGE_USER /* User page ? */ 429 beq 1f /* nope, leave U bits empty */ 430 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 4311: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 432 433 /* Done...restore registers and get out of here. 434 */ 435 mfspr r11, SPRN_SPRG_RSCRATCH4 436 mtcr r11 437 mfspr r13, SPRN_SPRG_RSCRATCH3 438 mfspr r12, SPRN_SPRG_RSCRATCH2 439 mfspr r11, SPRN_SPRG_RSCRATCH1 440 mfspr r10, SPRN_SPRG_RSCRATCH0 441 rfi /* Force context change */ 442 443/* TLB error interrupts for 476 444 */ 445#ifdef CONFIG_PPC_47x 446 START_EXCEPTION(DataTLBError47x) 447 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 448 mtspr SPRN_SPRG_WSCRATCH1,r11 449 mtspr SPRN_SPRG_WSCRATCH2,r12 450 mtspr SPRN_SPRG_WSCRATCH3,r13 451 mfcr r11 452 mtspr SPRN_SPRG_WSCRATCH4,r11 453 mfspr r10,SPRN_DEAR /* Get faulting address */ 454 455 /* If we are faulting a kernel address, we have to use the 456 * kernel page tables. 457 */ 458 lis r11,PAGE_OFFSET@h 459 cmplw cr0,r10,r11 460 blt+ 3f 461 lis r11,swapper_pg_dir@h 462 ori r11,r11, swapper_pg_dir@l 463 li r12,0 /* MMUCR = 0 */ 464 b 4f 465 466 /* Get the PGD for the current thread and setup MMUCR */ 4673: mfspr r11,SPRN_SPRG3 468 lwz r11,PGDIR(r11) 469 mfspr r12,SPRN_PID /* Get PID */ 4704: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 471 472 /* Mask of required permission bits. Note that while we 473 * do copy ESR:ST to _PAGE_RW position as trying to write 474 * to an RO page is pretty common, we don't do it with 475 * _PAGE_DIRTY. We could do it, but it's a fairly rare 476 * event so I'd rather take the overhead when it happens 477 * rather than adding an instruction here. We should measure 478 * whether the whole thing is worth it in the first place 479 * as we could avoid loading SPRN_ESR completely in the first 480 * place... 481 * 482 * TODO: Is it worth doing that mfspr & rlwimi in the first 483 * place or can we save a couple of instructions here ? 484 */ 485 mfspr r12,SPRN_ESR 486 li r13,_PAGE_PRESENT|_PAGE_ACCESSED 487 rlwimi r13,r12,10,30,30 488 489 /* Load the PTE */ 490 /* Compute pgdir/pmd offset */ 491 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 492 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 493 494 /* Word 0 is EPN,V,TS,DSIZ */ 495 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 496 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 497 li r12,0 498 tlbwe r10,r12,0 499 500 /* XXX can we do better ? Need to make sure tlbwe has established 501 * latch V bit in MMUCR0 before the PTE is loaded further down */ 502#ifdef CONFIG_SMP 503 isync 504#endif 505 506 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 507 /* Compute pte address */ 508 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 509 beq 2f /* Bail if no table */ 510 lwz r11,0(r12) /* Get high word of pte entry */ 511 512 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the 513 * bottom of r12 to create a data dependency... We can also use r10 514 * as destination nowadays 515 */ 516#ifdef CONFIG_SMP 517 lwsync 518#endif 519 lwz r12,4(r12) /* Get low word of pte entry */ 520 521 andc. r13,r13,r12 /* Check permission */ 522 523 /* Jump to common tlb load */ 524 beq finish_tlb_load_47x 525 5262: /* The bailout. Restore registers to pre-exception conditions 527 * and call the heavyweights to help us out. 528 */ 529 mfspr r11,SPRN_SPRG_RSCRATCH4 530 mtcr r11 531 mfspr r13,SPRN_SPRG_RSCRATCH3 532 mfspr r12,SPRN_SPRG_RSCRATCH2 533 mfspr r11,SPRN_SPRG_RSCRATCH1 534 mfspr r10,SPRN_SPRG_RSCRATCH0 535 b DataStorage 536 537 /* Instruction TLB Error Interrupt */ 538 /* 539 * Nearly the same as above, except we get our 540 * information from different registers and bailout 541 * to a different point. 542 */ 543 START_EXCEPTION(InstructionTLBError47x) 544 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ 545 mtspr SPRN_SPRG_WSCRATCH1,r11 546 mtspr SPRN_SPRG_WSCRATCH2,r12 547 mtspr SPRN_SPRG_WSCRATCH3,r13 548 mfcr r11 549 mtspr SPRN_SPRG_WSCRATCH4,r11 550 mfspr r10,SPRN_SRR0 /* Get faulting address */ 551 552 /* If we are faulting a kernel address, we have to use the 553 * kernel page tables. 554 */ 555 lis r11,PAGE_OFFSET@h 556 cmplw cr0,r10,r11 557 blt+ 3f 558 lis r11,swapper_pg_dir@h 559 ori r11,r11, swapper_pg_dir@l 560 li r12,0 /* MMUCR = 0 */ 561 b 4f 562 563 /* Get the PGD for the current thread and setup MMUCR */ 5643: mfspr r11,SPRN_SPRG_THREAD 565 lwz r11,PGDIR(r11) 566 mfspr r12,SPRN_PID /* Get PID */ 5674: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ 568 569 /* Make up the required permissions */ 570 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC 571 572 /* Load PTE */ 573 /* Compute pgdir/pmd offset */ 574 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 575 lwzx r11,r12,r11 /* Get pgd/pmd entry */ 576 577 /* Word 0 is EPN,V,TS,DSIZ */ 578 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE 579 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ 580 li r12,0 581 tlbwe r10,r12,0 582 583 /* XXX can we do better ? Need to make sure tlbwe has established 584 * latch V bit in MMUCR0 before the PTE is loaded further down */ 585#ifdef CONFIG_SMP 586 isync 587#endif 588 589 rlwinm. r12,r11,0,0,20 /* Extract pt base address */ 590 /* Compute pte address */ 591 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 592 beq 2f /* Bail if no table */ 593 594 lwz r11,0(r12) /* Get high word of pte entry */ 595 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the 596 * bottom of r12 to create a data dependency... We can also use r10 597 * as destination nowadays 598 */ 599#ifdef CONFIG_SMP 600 lwsync 601#endif 602 lwz r12,4(r12) /* Get low word of pte entry */ 603 604 andc. r13,r13,r12 /* Check permission */ 605 606 /* Jump to common TLB load point */ 607 beq finish_tlb_load_47x 608 6092: /* The bailout. Restore registers to pre-exception conditions 610 * and call the heavyweights to help us out. 611 */ 612 mfspr r11, SPRN_SPRG_RSCRATCH4 613 mtcr r11 614 mfspr r13, SPRN_SPRG_RSCRATCH3 615 mfspr r12, SPRN_SPRG_RSCRATCH2 616 mfspr r11, SPRN_SPRG_RSCRATCH1 617 mfspr r10, SPRN_SPRG_RSCRATCH0 618 b InstructionStorage 619 620/* 621 * Both the instruction and data TLB miss get to this 622 * point to load the TLB. 623 * r10 - free to use 624 * r11 - PTE high word value 625 * r12 - PTE low word value 626 * r13 - free to use 627 * MMUCR - loaded with proper value when we get here 628 * Upon exit, we reload everything and RFI. 629 */ 630finish_tlb_load_47x: 631 /* Combine RPN & ERPN an write WS 1 */ 632 rlwimi r11,r12,0,0,31-PAGE_SHIFT 633 tlbwe r11,r13,1 634 635 /* And make up word 2 */ 636 li r10,0xf85 /* Mask to apply from PTE */ 637 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 638 and r11,r12,r10 /* Mask PTE bits to keep */ 639 andi. r10,r12,_PAGE_USER /* User page ? */ 640 beq 1f /* nope, leave U bits empty */ 641 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 6421: tlbwe r11,r13,2 643 644 /* Done...restore registers and get out of here. 645 */ 646 mfspr r11, SPRN_SPRG_RSCRATCH4 647 mtcr r11 648 mfspr r13, SPRN_SPRG_RSCRATCH3 649 mfspr r12, SPRN_SPRG_RSCRATCH2 650 mfspr r11, SPRN_SPRG_RSCRATCH1 651 mfspr r10, SPRN_SPRG_RSCRATCH0 652 rfi 653 654#endif /* CONFIG_PPC_47x */ 655 656 /* Debug Interrupt */ 657 /* 658 * This statement needs to exist at the end of the IVPR 659 * definition just in case you end up taking a debug 660 * exception within another exception. 661 */ 662 DEBUG_CRIT_EXCEPTION 663 664/* 665 * Global functions 666 */ 667 668/* 669 * Adjust the machine check IVOR on 440A cores 670 */ 671_GLOBAL(__fixup_440A_mcheck) 672 li r3,MachineCheckA@l 673 mtspr SPRN_IVOR1,r3 674 sync 675 blr 676 677/* 678 * extern void giveup_altivec(struct task_struct *prev) 679 * 680 * The 44x core does not have an AltiVec unit. 681 */ 682_GLOBAL(giveup_altivec) 683 blr 684 685/* 686 * extern void giveup_fpu(struct task_struct *prev) 687 * 688 * The 44x core does not have an FPU. 689 */ 690#ifndef CONFIG_PPC_FPU 691_GLOBAL(giveup_fpu) 692 blr 693#endif 694 695_GLOBAL(set_context) 696 697#ifdef CONFIG_BDI_SWITCH 698 /* Context switch the PTE pointer for the Abatron BDI2000. 699 * The PGDIR is the second parameter. 700 */ 701 lis r5, abatron_pteptrs@h 702 ori r5, r5, abatron_pteptrs@l 703 stw r4, 0x4(r5) 704#endif 705 mtspr SPRN_PID,r3 706 isync /* Force context change */ 707 blr 708 709/* 710 * Init CPU state. This is called at boot time or for secondary CPUs 711 * to setup initial TLB entries, setup IVORs, etc... 712 * 713 */ 714_GLOBAL(init_cpu_state) 715 mflr r22 716#ifdef CONFIG_PPC_47x 717 /* We use the PVR to differenciate 44x cores from 476 */ 718 mfspr r3,SPRN_PVR 719 srwi r3,r3,16 720 cmplwi cr0,r3,PVR_476@h 721 beq head_start_47x 722 cmplwi cr0,r3,PVR_476_ISS@h 723 beq head_start_47x 724#endif /* CONFIG_PPC_47x */ 725 726/* 727 * In case the firmware didn't do it, we apply some workarounds 728 * that are good for all 440 core variants here 729 */ 730 mfspr r3,SPRN_CCR0 731 rlwinm r3,r3,0,0,27 /* disable icache prefetch */ 732 isync 733 mtspr SPRN_CCR0,r3 734 isync 735 sync 736 737/* 738 * Set up the initial MMU state for 44x 739 * 740 * We are still executing code at the virtual address 741 * mappings set by the firmware for the base of RAM. 742 * 743 * We first invalidate all TLB entries but the one 744 * we are running from. We then load the KERNELBASE 745 * mappings so we can begin to use kernel addresses 746 * natively and so the interrupt vector locations are 747 * permanently pinned (necessary since Book E 748 * implementations always have translation enabled). 749 * 750 * TODO: Use the known TLB entry we are running from to 751 * determine which physical region we are located 752 * in. This can be used to determine where in RAM 753 * (on a shared CPU system) or PCI memory space 754 * (on a DRAMless system) we are located. 755 * For now, we assume a perfect world which means 756 * we are located at the base of DRAM (physical 0). 757 */ 758 759/* 760 * Search TLB for entry that we are currently using. 761 * Invalidate all entries but the one we are using. 762 */ 763 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 764 mfspr r3,SPRN_PID /* Get PID */ 765 mfmsr r4 /* Get MSR */ 766 andi. r4,r4,MSR_IS@l /* TS=1? */ 767 beq wmmucr /* If not, leave STS=0 */ 768 oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ 769wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 770 sync 771 772 bl invstr /* Find our address */ 773invstr: mflr r5 /* Make it accessible */ 774 tlbsx r23,0,r5 /* Find entry we are in */ 775 li r4,0 /* Start at TLB entry 0 */ 776 li r3,0 /* Set PAGEID inval value */ 7771: cmpw r23,r4 /* Is this our entry? */ 778 beq skpinv /* If so, skip the inval */ 779 tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ 780skpinv: addi r4,r4,1 /* Increment */ 781 cmpwi r4,64 /* Are we done? */ 782 bne 1b /* If not, repeat */ 783 isync /* If so, context change */ 784 785/* 786 * Configure and load pinned entry into TLB slot 63. 787 */ 788 789 lis r3,PAGE_OFFSET@h 790 ori r3,r3,PAGE_OFFSET@l 791 792 /* Kernel is at the base of RAM */ 793 li r4, 0 /* Load the kernel physical address */ 794 795 /* Load the kernel PID = 0 */ 796 li r0,0 797 mtspr SPRN_PID,r0 798 sync 799 800 /* Initialize MMUCR */ 801 li r5,0 802 mtspr SPRN_MMUCR,r5 803 sync 804 805 /* pageid fields */ 806 clrrwi r3,r3,10 /* Mask off the effective page number */ 807 ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M 808 809 /* xlat fields */ 810 clrrwi r4,r4,10 /* Mask off the real page number */ 811 /* ERPN is 0 for first 4GB page */ 812 813 /* attrib fields */ 814 /* Added guarded bit to protect against speculative loads/stores */ 815 li r5,0 816 ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) 817 818 li r0,63 /* TLB slot 63 */ 819 820 tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ 821 tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ 822 tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ 823 824 /* Force context change */ 825 mfmsr r0 826 mtspr SPRN_SRR1, r0 827 lis r0,3f@h 828 ori r0,r0,3f@l 829 mtspr SPRN_SRR0,r0 830 sync 831 rfi 832 833 /* If necessary, invalidate original entry we used */ 8343: cmpwi r23,63 835 beq 4f 836 li r6,0 837 tlbwe r6,r23,PPC44x_TLB_PAGEID 838 isync 839 8404: 841#ifdef CONFIG_PPC_EARLY_DEBUG_44x 842 /* Add UART mapping for early debug. */ 843 844 /* pageid fields */ 845 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 846 ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K 847 848 /* xlat fields */ 849 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 850 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 851 852 /* attrib fields */ 853 li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) 854 li r0,62 /* TLB slot 0 */ 855 856 tlbwe r3,r0,PPC44x_TLB_PAGEID 857 tlbwe r4,r0,PPC44x_TLB_XLAT 858 tlbwe r5,r0,PPC44x_TLB_ATTRIB 859 860 /* Force context change */ 861 isync 862#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 863 864 /* Establish the interrupt vector offsets */ 865 SET_IVOR(0, CriticalInput); 866 SET_IVOR(1, MachineCheck); 867 SET_IVOR(2, DataStorage); 868 SET_IVOR(3, InstructionStorage); 869 SET_IVOR(4, ExternalInput); 870 SET_IVOR(5, Alignment); 871 SET_IVOR(6, Program); 872 SET_IVOR(7, FloatingPointUnavailable); 873 SET_IVOR(8, SystemCall); 874 SET_IVOR(9, AuxillaryProcessorUnavailable); 875 SET_IVOR(10, Decrementer); 876 SET_IVOR(11, FixedIntervalTimer); 877 SET_IVOR(12, WatchdogTimer); 878 SET_IVOR(13, DataTLBError44x); 879 SET_IVOR(14, InstructionTLBError44x); 880 SET_IVOR(15, DebugCrit); 881 882 b head_start_common 883 884 885#ifdef CONFIG_PPC_47x 886 887#ifdef CONFIG_SMP 888 889/* Entry point for secondary 47x processors */ 890_GLOBAL(start_secondary_47x) 891 mr r24,r3 /* CPU number */ 892 893 bl init_cpu_state 894 895 /* Now we need to bolt the rest of kernel memory which 896 * is done in C code. We must be careful because our task 897 * struct or our stack can (and will probably) be out 898 * of reach of the initial 256M TLB entry, so we use a 899 * small temporary stack in .bss for that. This works 900 * because only one CPU at a time can be in this code 901 */ 902 lis r1,temp_boot_stack@h 903 ori r1,r1,temp_boot_stack@l 904 addi r1,r1,1024-STACK_FRAME_OVERHEAD 905 li r0,0 906 stw r0,0(r1) 907 bl mmu_init_secondary 908 909 /* Now we can get our task struct and real stack pointer */ 910 911 /* Get current_thread_info and current */ 912 lis r1,secondary_ti@ha 913 lwz r1,secondary_ti@l(r1) 914 lwz r2,TI_TASK(r1) 915 916 /* Current stack pointer */ 917 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD 918 li r0,0 919 stw r0,0(r1) 920 921 /* Kernel stack for exception entry in SPRG3 */ 922 addi r4,r2,THREAD /* init task's THREAD */ 923 mtspr SPRN_SPRG3,r4 924 925 b start_secondary 926 927#endif /* CONFIG_SMP */ 928 929/* 930 * Set up the initial MMU state for 44x 931 * 932 * We are still executing code at the virtual address 933 * mappings set by the firmware for the base of RAM. 934 */ 935 936head_start_47x: 937 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ 938 mfspr r3,SPRN_PID /* Get PID */ 939 mfmsr r4 /* Get MSR */ 940 andi. r4,r4,MSR_IS@l /* TS=1? */ 941 beq 1f /* If not, leave STS=0 */ 942 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ 9431: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ 944 sync 945 946 /* Find the entry we are running from */ 947 bl 1f 9481: mflr r23 949 tlbsx r23,0,r23 950 tlbre r24,r23,0 951 tlbre r25,r23,1 952 tlbre r26,r23,2 953 954/* 955 * Cleanup time 956 */ 957 958 /* Initialize MMUCR */ 959 li r5,0 960 mtspr SPRN_MMUCR,r5 961 sync 962 963clear_all_utlb_entries: 964 965 #; Set initial values. 966 967 addis r3,0,0x8000 968 addi r4,0,0 969 addi r5,0,0 970 b clear_utlb_entry 971 972 #; Align the loop to speed things up. 973 974 .align 6 975 976clear_utlb_entry: 977 978 tlbwe r4,r3,0 979 tlbwe r5,r3,1 980 tlbwe r5,r3,2 981 addis r3,r3,0x2000 982 cmpwi r3,0 983 bne clear_utlb_entry 984 addis r3,0,0x8000 985 addis r4,r4,0x100 986 cmpwi r4,0 987 bne clear_utlb_entry 988 989 #; Restore original entry. 990 991 oris r23,r23,0x8000 /* specify the way */ 992 tlbwe r24,r23,0 993 tlbwe r25,r23,1 994 tlbwe r26,r23,2 995 996/* 997 * Configure and load pinned entry into TLB for the kernel core 998 */ 999 1000 lis r3,PAGE_OFFSET@h 1001 ori r3,r3,PAGE_OFFSET@l 1002 1003 /* Kernel is at the base of RAM */ 1004 li r4, 0 /* Load the kernel physical address */ 1005 1006 /* Load the kernel PID = 0 */ 1007 li r0,0 1008 mtspr SPRN_PID,r0 1009 sync 1010 1011 /* Word 0 */ 1012 clrrwi r3,r3,12 /* Mask off the effective page number */ 1013 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M 1014 1015 /* Word 1 */ 1016 clrrwi r4,r4,12 /* Mask off the real page number */ 1017 /* ERPN is 0 for first 4GB page */ 1018 /* Word 2 */ 1019 li r5,0 1020 ori r5,r5,PPC47x_TLB2_S_RWX 1021#ifdef CONFIG_SMP 1022 ori r5,r5,PPC47x_TLB2_M 1023#endif 1024 1025 /* We write to way 0 and bolted 0 */ 1026 lis r0,0x8800 1027 tlbwe r3,r0,0 1028 tlbwe r4,r0,1 1029 tlbwe r5,r0,2 1030 1031/* 1032 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix 1033 * them up later 1034 */ 1035 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) 1036 mtspr SPRN_SSPCR,r3 1037 mtspr SPRN_USPCR,r3 1038 LOAD_REG_IMMEDIATE(r3, 0x12345670) 1039 mtspr SPRN_ISPCR,r3 1040 1041 /* Force context change */ 1042 mfmsr r0 1043 mtspr SPRN_SRR1, r0 1044 lis r0,3f@h 1045 ori r0,r0,3f@l 1046 mtspr SPRN_SRR0,r0 1047 sync 1048 rfi 1049 1050 /* Invalidate original entry we used */ 10513: 1052 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ 1053 tlbwe r24,r23,0 1054 addi r24,0,0 1055 tlbwe r24,r23,1 1056 tlbwe r24,r23,2 1057 isync /* Clear out the shadow TLB entries */ 1058 1059#ifdef CONFIG_PPC_EARLY_DEBUG_44x 1060 /* Add UART mapping for early debug. */ 1061 1062 /* Word 0 */ 1063 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h 1064 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M 1065 1066 /* Word 1 */ 1067 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h 1068 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH 1069 1070 /* Word 2 */ 1071 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) 1072 1073 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same 1074 * congruence class as the kernel, we need to make sure of it at 1075 * some point 1076 */ 1077 lis r0,0x8d00 1078 tlbwe r3,r0,0 1079 tlbwe r4,r0,1 1080 tlbwe r5,r0,2 1081 1082 /* Force context change */ 1083 isync 1084#endif /* CONFIG_PPC_EARLY_DEBUG_44x */ 1085 1086 /* Establish the interrupt vector offsets */ 1087 SET_IVOR(0, CriticalInput); 1088 SET_IVOR(1, MachineCheckA); 1089 SET_IVOR(2, DataStorage); 1090 SET_IVOR(3, InstructionStorage); 1091 SET_IVOR(4, ExternalInput); 1092 SET_IVOR(5, Alignment); 1093 SET_IVOR(6, Program); 1094 SET_IVOR(7, FloatingPointUnavailable); 1095 SET_IVOR(8, SystemCall); 1096 SET_IVOR(9, AuxillaryProcessorUnavailable); 1097 SET_IVOR(10, Decrementer); 1098 SET_IVOR(11, FixedIntervalTimer); 1099 SET_IVOR(12, WatchdogTimer); 1100 SET_IVOR(13, DataTLBError47x); 1101 SET_IVOR(14, InstructionTLBError47x); 1102 SET_IVOR(15, DebugCrit); 1103 1104 /* We configure icbi to invalidate 128 bytes at a time since the 1105 * current 32-bit kernel code isn't too happy with icache != dcache 1106 * block size 1107 */ 1108 mfspr r3,SPRN_CCR0 1109 oris r3,r3,0x0020 1110 mtspr SPRN_CCR0,r3 1111 isync 1112 1113#endif /* CONFIG_PPC_47x */ 1114 1115/* 1116 * Here we are back to code that is common between 44x and 47x 1117 * 1118 * We proceed to further kernel initialization and return to the 1119 * main kernel entry 1120 */ 1121head_start_common: 1122 /* Establish the interrupt vector base */ 1123 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1124 mtspr SPRN_IVPR,r4 1125 1126 addis r22,r22,KERNELBASE@h 1127 mtlr r22 1128 isync 1129 blr 1130 1131/* 1132 * We put a few things here that have to be page-aligned. This stuff 1133 * goes at the beginning of the data segment, which is page-aligned. 1134 */ 1135 .data 1136 .align PAGE_SHIFT 1137 .globl sdata 1138sdata: 1139 .globl empty_zero_page 1140empty_zero_page: 1141 .space PAGE_SIZE 1142 1143/* 1144 * To support >32-bit physical addresses, we use an 8KB pgdir. 1145 */ 1146 .globl swapper_pg_dir 1147swapper_pg_dir: 1148 .space PGD_TABLE_SIZE 1149 1150/* 1151 * Room for two PTE pointers, usually the kernel and current user pointers 1152 * to their respective root page table. 1153 */ 1154abatron_pteptrs: 1155 .space 8 1156 1157#ifdef CONFIG_SMP 1158 .align 12 1159temp_boot_stack: 1160 .space 1024 1161#endif /* CONFIG_SMP */ 1162