1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Boot code and exception vectors for Book3E processors 4 * 5 * Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. 6 */ 7 8#include <linux/threads.h> 9#include <asm/reg.h> 10#include <asm/page.h> 11#include <asm/ppc_asm.h> 12#include <asm/asm-offsets.h> 13#include <asm/cputable.h> 14#include <asm/setup.h> 15#include <asm/thread_info.h> 16#include <asm/reg_a2.h> 17#include <asm/exception-64e.h> 18#include <asm/bug.h> 19#include <asm/irqflags.h> 20#include <asm/ptrace.h> 21#include <asm/ppc-opcode.h> 22#include <asm/mmu.h> 23#include <asm/hw_irq.h> 24#include <asm/kvm_asm.h> 25#include <asm/kvm_booke_hv_asm.h> 26#include <asm/feature-fixups.h> 27#include <asm/context_tracking.h> 28 29/* XXX This will ultimately add space for a special exception save 30 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... 31 * when taking special interrupts. For now we don't support that, 32 * special interrupts from within a non-standard level will probably 33 * blow you up 34 */ 35#define SPECIAL_EXC_SRR0 0 36#define SPECIAL_EXC_SRR1 1 37#define SPECIAL_EXC_SPRG_GEN 2 38#define SPECIAL_EXC_SPRG_TLB 3 39#define SPECIAL_EXC_MAS0 4 40#define SPECIAL_EXC_MAS1 5 41#define SPECIAL_EXC_MAS2 6 42#define SPECIAL_EXC_MAS3 7 43#define SPECIAL_EXC_MAS6 8 44#define SPECIAL_EXC_MAS7 9 45#define SPECIAL_EXC_MAS5 10 /* E.HV only */ 46#define SPECIAL_EXC_MAS8 11 /* E.HV only */ 47#define SPECIAL_EXC_IRQHAPPENED 12 48#define SPECIAL_EXC_DEAR 13 49#define SPECIAL_EXC_ESR 14 50#define SPECIAL_EXC_SOFTE 15 51#define SPECIAL_EXC_CSRR0 16 52#define SPECIAL_EXC_CSRR1 17 53/* must be even to keep 16-byte stack alignment */ 54#define SPECIAL_EXC_END 18 55 56#define SPECIAL_EXC_FRAME_SIZE (INT_FRAME_SIZE + SPECIAL_EXC_END * 8) 57#define SPECIAL_EXC_FRAME_OFFS (INT_FRAME_SIZE - 288) 58 59#define SPECIAL_EXC_STORE(reg, name) \ 60 std reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) 61 62#define SPECIAL_EXC_LOAD(reg, name) \ 63 ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1) 64 65special_reg_save: 66 lbz r9,PACAIRQHAPPENED(r13) 67 RECONCILE_IRQ_STATE(r3,r4) 68 69 /* 70 * We only need (or have stack space) to save this stuff if 71 * we interrupted the kernel. 72 */ 73 ld r3,_MSR(r1) 74 andi. r3,r3,MSR_PR 75 bnelr 76 77 /* 78 * Advance to the next TLB exception frame for handler 79 * types that don't do it automatically. 80 */ 81 LOAD_REG_ADDR(r11,extlb_level_exc) 82 lwz r12,0(r11) 83 mfspr r10,SPRN_SPRG_TLB_EXFRAME 84 add r10,r10,r12 85 mtspr SPRN_SPRG_TLB_EXFRAME,r10 86 87 /* 88 * Save registers needed to allow nesting of certain exceptions 89 * (such as TLB misses) inside special exception levels 90 */ 91 mfspr r10,SPRN_SRR0 92 SPECIAL_EXC_STORE(r10,SRR0) 93 mfspr r10,SPRN_SRR1 94 SPECIAL_EXC_STORE(r10,SRR1) 95 mfspr r10,SPRN_SPRG_GEN_SCRATCH 96 SPECIAL_EXC_STORE(r10,SPRG_GEN) 97 mfspr r10,SPRN_SPRG_TLB_SCRATCH 98 SPECIAL_EXC_STORE(r10,SPRG_TLB) 99 mfspr r10,SPRN_MAS0 100 SPECIAL_EXC_STORE(r10,MAS0) 101 mfspr r10,SPRN_MAS1 102 SPECIAL_EXC_STORE(r10,MAS1) 103 mfspr r10,SPRN_MAS2 104 SPECIAL_EXC_STORE(r10,MAS2) 105 mfspr r10,SPRN_MAS3 106 SPECIAL_EXC_STORE(r10,MAS3) 107 mfspr r10,SPRN_MAS6 108 SPECIAL_EXC_STORE(r10,MAS6) 109 mfspr r10,SPRN_MAS7 110 SPECIAL_EXC_STORE(r10,MAS7) 111BEGIN_FTR_SECTION 112 mfspr r10,SPRN_MAS5 113 SPECIAL_EXC_STORE(r10,MAS5) 114 mfspr r10,SPRN_MAS8 115 SPECIAL_EXC_STORE(r10,MAS8) 116 117 /* MAS5/8 could have inappropriate values if we interrupted KVM code */ 118 li r10,0 119 mtspr SPRN_MAS5,r10 120 mtspr SPRN_MAS8,r10 121END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 122 SPECIAL_EXC_STORE(r9,IRQHAPPENED) 123 124 mfspr r10,SPRN_DEAR 125 SPECIAL_EXC_STORE(r10,DEAR) 126 mfspr r10,SPRN_ESR 127 SPECIAL_EXC_STORE(r10,ESR) 128 129 lbz r10,PACAIRQSOFTMASK(r13) 130 SPECIAL_EXC_STORE(r10,SOFTE) 131 ld r10,_NIP(r1) 132 SPECIAL_EXC_STORE(r10,CSRR0) 133 ld r10,_MSR(r1) 134 SPECIAL_EXC_STORE(r10,CSRR1) 135 136 blr 137 138ret_from_level_except: 139 ld r3,_MSR(r1) 140 andi. r3,r3,MSR_PR 141 beq 1f 142 b ret_from_except 1431: 144 145 LOAD_REG_ADDR(r11,extlb_level_exc) 146 lwz r12,0(r11) 147 mfspr r10,SPRN_SPRG_TLB_EXFRAME 148 sub r10,r10,r12 149 mtspr SPRN_SPRG_TLB_EXFRAME,r10 150 151 /* 152 * It's possible that the special level exception interrupted a 153 * TLB miss handler, and inserted the same entry that the 154 * interrupted handler was about to insert. On CPUs without TLB 155 * write conditional, this can result in a duplicate TLB entry. 156 * Wipe all non-bolted entries to be safe. 157 * 158 * Note that this doesn't protect against any TLB misses 159 * we may take accessing the stack from here to the end of 160 * the special level exception. It's not clear how we can 161 * reasonably protect against that, but only CPUs with 162 * neither TLB write conditional nor bolted kernel memory 163 * are affected. Do any such CPUs even exist? 164 */ 165 PPC_TLBILX_ALL(0,R0) 166 167 REST_NVGPRS(r1) 168 169 SPECIAL_EXC_LOAD(r10,SRR0) 170 mtspr SPRN_SRR0,r10 171 SPECIAL_EXC_LOAD(r10,SRR1) 172 mtspr SPRN_SRR1,r10 173 SPECIAL_EXC_LOAD(r10,SPRG_GEN) 174 mtspr SPRN_SPRG_GEN_SCRATCH,r10 175 SPECIAL_EXC_LOAD(r10,SPRG_TLB) 176 mtspr SPRN_SPRG_TLB_SCRATCH,r10 177 SPECIAL_EXC_LOAD(r10,MAS0) 178 mtspr SPRN_MAS0,r10 179 SPECIAL_EXC_LOAD(r10,MAS1) 180 mtspr SPRN_MAS1,r10 181 SPECIAL_EXC_LOAD(r10,MAS2) 182 mtspr SPRN_MAS2,r10 183 SPECIAL_EXC_LOAD(r10,MAS3) 184 mtspr SPRN_MAS3,r10 185 SPECIAL_EXC_LOAD(r10,MAS6) 186 mtspr SPRN_MAS6,r10 187 SPECIAL_EXC_LOAD(r10,MAS7) 188 mtspr SPRN_MAS7,r10 189BEGIN_FTR_SECTION 190 SPECIAL_EXC_LOAD(r10,MAS5) 191 mtspr SPRN_MAS5,r10 192 SPECIAL_EXC_LOAD(r10,MAS8) 193 mtspr SPRN_MAS8,r10 194END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) 195 196 lbz r6,PACAIRQSOFTMASK(r13) 197 ld r5,SOFTE(r1) 198 199 /* Interrupts had better not already be enabled... */ 200 tweqi r6,IRQS_ENABLED 201 202 andi. r6,r5,IRQS_DISABLED 203 bne 1f 204 205 TRACE_ENABLE_INTS 206 stb r5,PACAIRQSOFTMASK(r13) 2071: 208 /* 209 * Restore PACAIRQHAPPENED rather than setting it based on 210 * the return MSR[EE], since we could have interrupted 211 * __check_irq_replay() or other inconsistent transitory 212 * states that must remain that way. 213 */ 214 SPECIAL_EXC_LOAD(r10,IRQHAPPENED) 215 stb r10,PACAIRQHAPPENED(r13) 216 217 SPECIAL_EXC_LOAD(r10,DEAR) 218 mtspr SPRN_DEAR,r10 219 SPECIAL_EXC_LOAD(r10,ESR) 220 mtspr SPRN_ESR,r10 221 222 stdcx. r0,0,r1 /* to clear the reservation */ 223 224 REST_4GPRS(2, r1) 225 REST_4GPRS(6, r1) 226 227 ld r10,_CTR(r1) 228 ld r11,_XER(r1) 229 mtctr r10 230 mtxer r11 231 232 blr 233 234.macro ret_from_level srr0 srr1 paca_ex scratch 235 bl ret_from_level_except 236 237 ld r10,_LINK(r1) 238 ld r11,_CCR(r1) 239 ld r0,GPR13(r1) 240 mtlr r10 241 mtcr r11 242 243 ld r10,GPR10(r1) 244 ld r11,GPR11(r1) 245 ld r12,GPR12(r1) 246 mtspr \scratch,r0 247 248 std r10,\paca_ex+EX_R10(r13); 249 std r11,\paca_ex+EX_R11(r13); 250 ld r10,_NIP(r1) 251 ld r11,_MSR(r1) 252 ld r0,GPR0(r1) 253 ld r1,GPR1(r1) 254 mtspr \srr0,r10 255 mtspr \srr1,r11 256 ld r10,\paca_ex+EX_R10(r13) 257 ld r11,\paca_ex+EX_R11(r13) 258 mfspr r13,\scratch 259.endm 260 261ret_from_crit_except: 262 ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH 263 rfci 264 265ret_from_mc_except: 266 ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH 267 rfmci 268 269/* Exception prolog code for all exceptions */ 270#define EXCEPTION_PROLOG(n, intnum, type, addition) \ 271 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ 272 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ 273 std r10,PACA_EX##type+EX_R10(r13); \ 274 std r11,PACA_EX##type+EX_R11(r13); \ 275 mfcr r10; /* save CR */ \ 276 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \ 277 DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \ 278 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \ 279 addition; /* additional code for that exc. */ \ 280 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ 281 type##_SET_KSTACK; /* get special stack if necessary */\ 282 andi. r10,r11,MSR_PR; /* save stack pointer */ \ 283 beq 1f; /* branch around if supervisor */ \ 284 ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\ 2851: type##_BTB_FLUSH \ 286 cmpdi cr1,r1,0; /* check if SP makes sense */ \ 287 bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \ 288 mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */ 289 290/* Exception type-specific macros */ 291#define GEN_SET_KSTACK \ 292 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ 293#define SPRN_GEN_SRR0 SPRN_SRR0 294#define SPRN_GEN_SRR1 SPRN_SRR1 295 296#define GDBELL_SET_KSTACK GEN_SET_KSTACK 297#define SPRN_GDBELL_SRR0 SPRN_GSRR0 298#define SPRN_GDBELL_SRR1 SPRN_GSRR1 299 300#define CRIT_SET_KSTACK \ 301 ld r1,PACA_CRIT_STACK(r13); \ 302 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 303#define SPRN_CRIT_SRR0 SPRN_CSRR0 304#define SPRN_CRIT_SRR1 SPRN_CSRR1 305 306#define DBG_SET_KSTACK \ 307 ld r1,PACA_DBG_STACK(r13); \ 308 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 309#define SPRN_DBG_SRR0 SPRN_DSRR0 310#define SPRN_DBG_SRR1 SPRN_DSRR1 311 312#define MC_SET_KSTACK \ 313 ld r1,PACA_MC_STACK(r13); \ 314 subi r1,r1,SPECIAL_EXC_FRAME_SIZE 315#define SPRN_MC_SRR0 SPRN_MCSRR0 316#define SPRN_MC_SRR1 SPRN_MCSRR1 317 318#ifdef CONFIG_PPC_FSL_BOOK3E 319#define GEN_BTB_FLUSH \ 320 START_BTB_FLUSH_SECTION \ 321 beq 1f; \ 322 BTB_FLUSH(r10) \ 323 1: \ 324 END_BTB_FLUSH_SECTION 325 326#define CRIT_BTB_FLUSH \ 327 START_BTB_FLUSH_SECTION \ 328 BTB_FLUSH(r10) \ 329 END_BTB_FLUSH_SECTION 330 331#define DBG_BTB_FLUSH CRIT_BTB_FLUSH 332#define MC_BTB_FLUSH CRIT_BTB_FLUSH 333#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH 334#else 335#define GEN_BTB_FLUSH 336#define CRIT_BTB_FLUSH 337#define DBG_BTB_FLUSH 338#define MC_BTB_FLUSH 339#define GDBELL_BTB_FLUSH 340#endif 341 342#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \ 343 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n)) 344 345#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \ 346 EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n)) 347 348#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \ 349 EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n)) 350 351#define MC_EXCEPTION_PROLOG(n, intnum, addition) \ 352 EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n)) 353 354#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \ 355 EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n)) 356 357/* Variants of the "addition" argument for the prolog 358 */ 359#define PROLOG_ADDITION_NONE_GEN(n) 360#define PROLOG_ADDITION_NONE_GDBELL(n) 361#define PROLOG_ADDITION_NONE_CRIT(n) 362#define PROLOG_ADDITION_NONE_DBG(n) 363#define PROLOG_ADDITION_NONE_MC(n) 364 365#define PROLOG_ADDITION_MASKABLE_GEN(n) \ 366 lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ 367 andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ 368 bne masked_interrupt_book3e_##n 369 370#define PROLOG_ADDITION_2REGS_GEN(n) \ 371 std r14,PACA_EXGEN+EX_R14(r13); \ 372 std r15,PACA_EXGEN+EX_R15(r13) 373 374#define PROLOG_ADDITION_1REG_GEN(n) \ 375 std r14,PACA_EXGEN+EX_R14(r13); 376 377#define PROLOG_ADDITION_2REGS_CRIT(n) \ 378 std r14,PACA_EXCRIT+EX_R14(r13); \ 379 std r15,PACA_EXCRIT+EX_R15(r13) 380 381#define PROLOG_ADDITION_2REGS_DBG(n) \ 382 std r14,PACA_EXDBG+EX_R14(r13); \ 383 std r15,PACA_EXDBG+EX_R15(r13) 384 385#define PROLOG_ADDITION_2REGS_MC(n) \ 386 std r14,PACA_EXMC+EX_R14(r13); \ 387 std r15,PACA_EXMC+EX_R15(r13) 388 389 390/* Core exception code for all exceptions except TLB misses. */ 391#define EXCEPTION_COMMON_LVL(n, scratch, excf) \ 392exc_##n##_common: \ 393 std r0,GPR0(r1); /* save r0 in stackframe */ \ 394 std r2,GPR2(r1); /* save r2 in stackframe */ \ 395 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 396 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 397 std r9,GPR9(r1); /* save r9 in stackframe */ \ 398 std r10,_NIP(r1); /* save SRR0 to stackframe */ \ 399 std r11,_MSR(r1); /* save SRR1 to stackframe */ \ 400 beq 2f; /* if from kernel mode */ \ 4012: ld r3,excf+EX_R10(r13); /* get back r10 */ \ 402 ld r4,excf+EX_R11(r13); /* get back r11 */ \ 403 mfspr r5,scratch; /* get back r13 */ \ 404 std r12,GPR12(r1); /* save r12 in stackframe */ \ 405 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 406 mflr r6; /* save LR in stackframe */ \ 407 mfctr r7; /* save CTR in stackframe */ \ 408 mfspr r8,SPRN_XER; /* save XER in stackframe */ \ 409 ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \ 410 lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \ 411 lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \ 412 ld r12,exception_marker@toc(r2); \ 413 li r0,0; \ 414 std r3,GPR10(r1); /* save r10 to stackframe */ \ 415 std r4,GPR11(r1); /* save r11 to stackframe */ \ 416 std r5,GPR13(r1); /* save it to stackframe */ \ 417 std r6,_LINK(r1); \ 418 std r7,_CTR(r1); \ 419 std r8,_XER(r1); \ 420 li r3,(n)+1; /* indicate partial regs in trap */ \ 421 std r9,0(r1); /* store stack frame back link */ \ 422 std r10,_CCR(r1); /* store orig CR in stackframe */ \ 423 std r9,GPR1(r1); /* store stack frame back link */ \ 424 std r11,SOFTE(r1); /* and save it to stackframe */ \ 425 std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ 426 std r3,_TRAP(r1); /* set trap number */ \ 427 std r0,RESULT(r1); /* clear regs->result */ 428 429#define EXCEPTION_COMMON(n) \ 430 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN) 431#define EXCEPTION_COMMON_CRIT(n) \ 432 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT) 433#define EXCEPTION_COMMON_MC(n) \ 434 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC) 435#define EXCEPTION_COMMON_DBG(n) \ 436 EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG) 437 438/* 439 * This is meant for exceptions that don't immediately hard-enable. We 440 * set a bit in paca->irq_happened to ensure that a subsequent call to 441 * arch_local_irq_restore() will properly hard-enable and avoid the 442 * fast-path, and then reconcile irq state. 443 */ 444#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4) 445 446/* 447 * This is called by exceptions that don't use INTS_DISABLE (that did not 448 * touch irq indicators in the PACA). This will restore MSR:EE to it's 449 * previous value 450 * 451 * XXX In the long run, we may want to open-code it in order to separate the 452 * load from the wrtee, thus limiting the latency caused by the dependency 453 * but at this point, I'll favor code clarity until we have a near to final 454 * implementation 455 */ 456#define INTS_RESTORE_HARD \ 457 ld r11,_MSR(r1); \ 458 wrtee r11; 459 460/* XXX FIXME: Restore r14/r15 when necessary */ 461#define BAD_STACK_TRAMPOLINE(n) \ 462exc_##n##_bad_stack: \ 463 li r1,(n); /* get exception number */ \ 464 sth r1,PACA_TRAP_SAVE(r13); /* store trap */ \ 465 b bad_stack_book3e; /* bad stack error */ 466 467/* WARNING: If you change the layout of this stub, make sure you check 468 * the debug exception handler which handles single stepping 469 * into exceptions from userspace, and the MM code in 470 * arch/powerpc/mm/tlb_nohash.c which patches the branch here 471 * and would need to be updated if that branch is moved 472 */ 473#define EXCEPTION_STUB(loc, label) \ 474 . = interrupt_base_book3e + loc; \ 475 nop; /* To make debug interrupts happy */ \ 476 b exc_##label##_book3e; 477 478#define ACK_NONE(r) 479#define ACK_DEC(r) \ 480 lis r,TSR_DIS@h; \ 481 mtspr SPRN_TSR,r 482#define ACK_FIT(r) \ 483 lis r,TSR_FIS@h; \ 484 mtspr SPRN_TSR,r 485 486/* Used by asynchronous interrupt that may happen in the idle loop. 487 * 488 * This check if the thread was in the idle loop, and if yes, returns 489 * to the caller rather than the PC. This is to avoid a race if 490 * interrupts happen before the wait instruction. 491 */ 492#define CHECK_NAPPING() \ 493 ld r11, PACA_THREAD_INFO(r13); \ 494 ld r10,TI_LOCAL_FLAGS(r11); \ 495 andi. r9,r10,_TLF_NAPPING; \ 496 beq+ 1f; \ 497 ld r8,_LINK(r1); \ 498 rlwinm r7,r10,0,~_TLF_NAPPING; \ 499 std r8,_NIP(r1); \ 500 std r7,TI_LOCAL_FLAGS(r11); \ 5011: 502 503 504#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \ 505 START_EXCEPTION(label); \ 506 NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\ 507 EXCEPTION_COMMON(trapnum) \ 508 INTS_DISABLE; \ 509 ack(r8); \ 510 CHECK_NAPPING(); \ 511 addi r3,r1,STACK_FRAME_OVERHEAD; \ 512 bl hdlr; \ 513 b ret_from_except_lite; 514 515/* This value is used to mark exception frames on the stack. */ 516 .section ".toc","aw" 517exception_marker: 518 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER 519 520 521/* 522 * And here we have the exception vectors ! 523 */ 524 525 .text 526 .balign 0x1000 527 .globl interrupt_base_book3e 528interrupt_base_book3e: /* fake trap */ 529 EXCEPTION_STUB(0x000, machine_check) 530 EXCEPTION_STUB(0x020, critical_input) /* 0x0100 */ 531 EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ 532 EXCEPTION_STUB(0x060, data_storage) /* 0x0300 */ 533 EXCEPTION_STUB(0x080, instruction_storage) /* 0x0400 */ 534 EXCEPTION_STUB(0x0a0, external_input) /* 0x0500 */ 535 EXCEPTION_STUB(0x0c0, alignment) /* 0x0600 */ 536 EXCEPTION_STUB(0x0e0, program) /* 0x0700 */ 537 EXCEPTION_STUB(0x100, fp_unavailable) /* 0x0800 */ 538 EXCEPTION_STUB(0x120, system_call) /* 0x0c00 */ 539 EXCEPTION_STUB(0x140, ap_unavailable) /* 0x0f20 */ 540 EXCEPTION_STUB(0x160, decrementer) /* 0x0900 */ 541 EXCEPTION_STUB(0x180, fixed_interval) /* 0x0980 */ 542 EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ 543 EXCEPTION_STUB(0x1c0, data_tlb_miss) 544 EXCEPTION_STUB(0x1e0, instruction_tlb_miss) 545 EXCEPTION_STUB(0x200, altivec_unavailable) 546 EXCEPTION_STUB(0x220, altivec_assist) 547 EXCEPTION_STUB(0x260, perfmon) 548 EXCEPTION_STUB(0x280, doorbell) 549 EXCEPTION_STUB(0x2a0, doorbell_crit) 550 EXCEPTION_STUB(0x2c0, guest_doorbell) 551 EXCEPTION_STUB(0x2e0, guest_doorbell_crit) 552 EXCEPTION_STUB(0x300, hypercall) 553 EXCEPTION_STUB(0x320, ehpriv) 554 EXCEPTION_STUB(0x340, lrat_error) 555 556 .globl __end_interrupts 557__end_interrupts: 558 559/* Critical Input Interrupt */ 560 START_EXCEPTION(critical_input); 561 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, 562 PROLOG_ADDITION_NONE) 563 EXCEPTION_COMMON_CRIT(0x100) 564 bl save_nvgprs 565 bl special_reg_save 566 CHECK_NAPPING(); 567 addi r3,r1,STACK_FRAME_OVERHEAD 568 bl unknown_exception 569 b ret_from_crit_except 570 571/* Machine Check Interrupt */ 572 START_EXCEPTION(machine_check); 573 MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, 574 PROLOG_ADDITION_NONE) 575 EXCEPTION_COMMON_MC(0x000) 576 bl save_nvgprs 577 bl special_reg_save 578 CHECK_NAPPING(); 579 addi r3,r1,STACK_FRAME_OVERHEAD 580 bl machine_check_exception 581 b ret_from_mc_except 582 583/* Data Storage Interrupt */ 584 START_EXCEPTION(data_storage) 585 NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE, 586 PROLOG_ADDITION_2REGS) 587 mfspr r14,SPRN_DEAR 588 mfspr r15,SPRN_ESR 589 EXCEPTION_COMMON(0x300) 590 INTS_DISABLE 591 b storage_fault_common 592 593/* Instruction Storage Interrupt */ 594 START_EXCEPTION(instruction_storage); 595 NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE, 596 PROLOG_ADDITION_2REGS) 597 li r15,0 598 mr r14,r10 599 EXCEPTION_COMMON(0x400) 600 INTS_DISABLE 601 b storage_fault_common 602 603/* External Input Interrupt */ 604 MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, 605 external_input, do_IRQ, ACK_NONE) 606 607/* Alignment */ 608 START_EXCEPTION(alignment); 609 NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT, 610 PROLOG_ADDITION_2REGS) 611 mfspr r14,SPRN_DEAR 612 mfspr r15,SPRN_ESR 613 EXCEPTION_COMMON(0x600) 614 b alignment_more /* no room, go out of line */ 615 616/* Program Interrupt */ 617 START_EXCEPTION(program); 618 NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM, 619 PROLOG_ADDITION_1REG) 620 mfspr r14,SPRN_ESR 621 EXCEPTION_COMMON(0x700) 622 INTS_DISABLE 623 std r14,_DSISR(r1) 624 addi r3,r1,STACK_FRAME_OVERHEAD 625 ld r14,PACA_EXGEN+EX_R14(r13) 626 bl save_nvgprs 627 bl program_check_exception 628 b ret_from_except 629 630/* Floating Point Unavailable Interrupt */ 631 START_EXCEPTION(fp_unavailable); 632 NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL, 633 PROLOG_ADDITION_NONE) 634 /* we can probably do a shorter exception entry for that one... */ 635 EXCEPTION_COMMON(0x800) 636 ld r12,_MSR(r1) 637 andi. r0,r12,MSR_PR; 638 beq- 1f 639 bl load_up_fpu 640 b fast_exception_return 6411: INTS_DISABLE 642 bl save_nvgprs 643 addi r3,r1,STACK_FRAME_OVERHEAD 644 bl kernel_fp_unavailable_exception 645 b ret_from_except 646 647/* Altivec Unavailable Interrupt */ 648 START_EXCEPTION(altivec_unavailable); 649 NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, 650 PROLOG_ADDITION_NONE) 651 /* we can probably do a shorter exception entry for that one... */ 652 EXCEPTION_COMMON(0x200) 653#ifdef CONFIG_ALTIVEC 654BEGIN_FTR_SECTION 655 ld r12,_MSR(r1) 656 andi. r0,r12,MSR_PR; 657 beq- 1f 658 bl load_up_altivec 659 b fast_exception_return 6601: 661END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 662#endif 663 INTS_DISABLE 664 bl save_nvgprs 665 addi r3,r1,STACK_FRAME_OVERHEAD 666 bl altivec_unavailable_exception 667 b ret_from_except 668 669/* AltiVec Assist */ 670 START_EXCEPTION(altivec_assist); 671 NORMAL_EXCEPTION_PROLOG(0x220, 672 BOOKE_INTERRUPT_ALTIVEC_ASSIST, 673 PROLOG_ADDITION_NONE) 674 EXCEPTION_COMMON(0x220) 675 INTS_DISABLE 676 bl save_nvgprs 677 addi r3,r1,STACK_FRAME_OVERHEAD 678#ifdef CONFIG_ALTIVEC 679BEGIN_FTR_SECTION 680 bl altivec_assist_exception 681END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 682#else 683 bl unknown_exception 684#endif 685 b ret_from_except 686 687 688/* Decrementer Interrupt */ 689 MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, 690 decrementer, timer_interrupt, ACK_DEC) 691 692/* Fixed Interval Timer Interrupt */ 693 MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, 694 fixed_interval, unknown_exception, ACK_FIT) 695 696/* Watchdog Timer Interrupt */ 697 START_EXCEPTION(watchdog); 698 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, 699 PROLOG_ADDITION_NONE) 700 EXCEPTION_COMMON_CRIT(0x9f0) 701 bl save_nvgprs 702 bl special_reg_save 703 CHECK_NAPPING(); 704 addi r3,r1,STACK_FRAME_OVERHEAD 705#ifdef CONFIG_BOOKE_WDT 706 bl WatchdogException 707#else 708 bl unknown_exception 709#endif 710 b ret_from_crit_except 711 712/* System Call Interrupt */ 713 START_EXCEPTION(system_call) 714 mr r9,r13 /* keep a copy of userland r13 */ 715 mfspr r11,SPRN_SRR0 /* get return address */ 716 mfspr r12,SPRN_SRR1 /* get previous MSR */ 717 mfspr r13,SPRN_SPRG_PACA /* get our PACA */ 718 b system_call_common 719 720/* Auxiliary Processor Unavailable Interrupt */ 721 START_EXCEPTION(ap_unavailable); 722 NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL, 723 PROLOG_ADDITION_NONE) 724 EXCEPTION_COMMON(0xf20) 725 INTS_DISABLE 726 bl save_nvgprs 727 addi r3,r1,STACK_FRAME_OVERHEAD 728 bl unknown_exception 729 b ret_from_except 730 731/* Debug exception as a critical interrupt*/ 732 START_EXCEPTION(debug_crit); 733 CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, 734 PROLOG_ADDITION_2REGS) 735 736 /* 737 * If there is a single step or branch-taken exception in an 738 * exception entry sequence, it was probably meant to apply to 739 * the code where the exception occurred (since exception entry 740 * doesn't turn off DE automatically). We simulate the effect 741 * of turning off DE on entry to an exception handler by turning 742 * off DE in the CSRR1 value and clearing the debug status. 743 */ 744 745 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 746 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 747 beq+ 1f 748 749#ifdef CONFIG_RELOCATABLE 750 ld r15,PACATOC(r13) 751 ld r14,interrupt_base_book3e@got(r15) 752 ld r15,__end_interrupts@got(r15) 753 cmpld cr0,r10,r14 754 cmpld cr1,r10,r15 755#else 756 LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) 757 cmpld cr0, r10, r14 758 LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts) 759 cmpld cr1, r10, r14 760#endif 761 blt+ cr0,1f 762 bge+ cr1,1f 763 764 /* here it looks like we got an inappropriate debug exception. */ 765 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ 766 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ 767 mtspr SPRN_DBSR,r14 768 mtspr SPRN_CSRR1,r11 769 lwz r10,PACA_EXCRIT+EX_CR(r13) /* restore registers */ 770 ld r1,PACA_EXCRIT+EX_R1(r13) 771 ld r14,PACA_EXCRIT+EX_R14(r13) 772 ld r15,PACA_EXCRIT+EX_R15(r13) 773 mtcr r10 774 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ 775 ld r11,PACA_EXCRIT+EX_R11(r13) 776 mfspr r13,SPRN_SPRG_CRIT_SCRATCH 777 rfci 778 779 /* Normal debug exception */ 780 /* XXX We only handle coming from userspace for now since we can't 781 * quite save properly an interrupted kernel state yet 782 */ 7831: andi. r14,r11,MSR_PR; /* check for userspace again */ 784 beq kernel_dbg_exc; /* if from kernel mode */ 785 786 /* Now we mash up things to make it look like we are coming on a 787 * normal exception 788 */ 789 mfspr r14,SPRN_DBSR 790 EXCEPTION_COMMON_CRIT(0xd00) 791 std r14,_DSISR(r1) 792 addi r3,r1,STACK_FRAME_OVERHEAD 793 ld r14,PACA_EXCRIT+EX_R14(r13) 794 ld r15,PACA_EXCRIT+EX_R15(r13) 795 bl save_nvgprs 796 bl DebugException 797 b ret_from_except 798 799kernel_dbg_exc: 800 b . /* NYI */ 801 802/* Debug exception as a debug interrupt*/ 803 START_EXCEPTION(debug_debug); 804 DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG, 805 PROLOG_ADDITION_2REGS) 806 807 /* 808 * If there is a single step or branch-taken exception in an 809 * exception entry sequence, it was probably meant to apply to 810 * the code where the exception occurred (since exception entry 811 * doesn't turn off DE automatically). We simulate the effect 812 * of turning off DE on entry to an exception handler by turning 813 * off DE in the DSRR1 value and clearing the debug status. 814 */ 815 816 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 817 andis. r15,r14,(DBSR_IC|DBSR_BT)@h 818 beq+ 1f 819 820#ifdef CONFIG_RELOCATABLE 821 ld r15,PACATOC(r13) 822 ld r14,interrupt_base_book3e@got(r15) 823 ld r15,__end_interrupts@got(r15) 824 cmpld cr0,r10,r14 825 cmpld cr1,r10,r15 826#else 827 LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e) 828 cmpld cr0, r10, r14 829 LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts) 830 cmpld cr1, r10, r14 831#endif 832 blt+ cr0,1f 833 bge+ cr1,1f 834 835 /* here it looks like we got an inappropriate debug exception. */ 836 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ 837 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ 838 mtspr SPRN_DBSR,r14 839 mtspr SPRN_DSRR1,r11 840 lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ 841 ld r1,PACA_EXDBG+EX_R1(r13) 842 ld r14,PACA_EXDBG+EX_R14(r13) 843 ld r15,PACA_EXDBG+EX_R15(r13) 844 mtcr r10 845 ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ 846 ld r11,PACA_EXDBG+EX_R11(r13) 847 mfspr r13,SPRN_SPRG_DBG_SCRATCH 848 rfdi 849 850 /* Normal debug exception */ 851 /* XXX We only handle coming from userspace for now since we can't 852 * quite save properly an interrupted kernel state yet 853 */ 8541: andi. r14,r11,MSR_PR; /* check for userspace again */ 855 beq kernel_dbg_exc; /* if from kernel mode */ 856 857 /* Now we mash up things to make it look like we are coming on a 858 * normal exception 859 */ 860 mfspr r14,SPRN_DBSR 861 EXCEPTION_COMMON_DBG(0xd08) 862 INTS_DISABLE 863 std r14,_DSISR(r1) 864 addi r3,r1,STACK_FRAME_OVERHEAD 865 ld r14,PACA_EXDBG+EX_R14(r13) 866 ld r15,PACA_EXDBG+EX_R15(r13) 867 bl save_nvgprs 868 bl DebugException 869 b ret_from_except 870 871 START_EXCEPTION(perfmon); 872 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, 873 PROLOG_ADDITION_NONE) 874 EXCEPTION_COMMON(0x260) 875 INTS_DISABLE 876 CHECK_NAPPING() 877 addi r3,r1,STACK_FRAME_OVERHEAD 878 bl performance_monitor_exception 879 b ret_from_except_lite 880 881/* Doorbell interrupt */ 882 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, 883 doorbell, doorbell_exception, ACK_NONE) 884 885/* Doorbell critical Interrupt */ 886 START_EXCEPTION(doorbell_crit); 887 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, 888 PROLOG_ADDITION_NONE) 889 EXCEPTION_COMMON_CRIT(0x2a0) 890 bl save_nvgprs 891 bl special_reg_save 892 CHECK_NAPPING(); 893 addi r3,r1,STACK_FRAME_OVERHEAD 894 bl unknown_exception 895 b ret_from_crit_except 896 897/* 898 * Guest doorbell interrupt 899 * This general exception use GSRRx save/restore registers 900 */ 901 START_EXCEPTION(guest_doorbell); 902 GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL, 903 PROLOG_ADDITION_NONE) 904 EXCEPTION_COMMON(0x2c0) 905 addi r3,r1,STACK_FRAME_OVERHEAD 906 bl save_nvgprs 907 INTS_RESTORE_HARD 908 bl unknown_exception 909 b ret_from_except 910 911/* Guest Doorbell critical Interrupt */ 912 START_EXCEPTION(guest_doorbell_crit); 913 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, 914 PROLOG_ADDITION_NONE) 915 EXCEPTION_COMMON_CRIT(0x2e0) 916 bl save_nvgprs 917 bl special_reg_save 918 CHECK_NAPPING(); 919 addi r3,r1,STACK_FRAME_OVERHEAD 920 bl unknown_exception 921 b ret_from_crit_except 922 923/* Hypervisor call */ 924 START_EXCEPTION(hypercall); 925 NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL, 926 PROLOG_ADDITION_NONE) 927 EXCEPTION_COMMON(0x310) 928 addi r3,r1,STACK_FRAME_OVERHEAD 929 bl save_nvgprs 930 INTS_RESTORE_HARD 931 bl unknown_exception 932 b ret_from_except 933 934/* Embedded Hypervisor priviledged */ 935 START_EXCEPTION(ehpriv); 936 NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV, 937 PROLOG_ADDITION_NONE) 938 EXCEPTION_COMMON(0x320) 939 addi r3,r1,STACK_FRAME_OVERHEAD 940 bl save_nvgprs 941 INTS_RESTORE_HARD 942 bl unknown_exception 943 b ret_from_except 944 945/* LRAT Error interrupt */ 946 START_EXCEPTION(lrat_error); 947 NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR, 948 PROLOG_ADDITION_NONE) 949 EXCEPTION_COMMON(0x340) 950 addi r3,r1,STACK_FRAME_OVERHEAD 951 bl save_nvgprs 952 INTS_RESTORE_HARD 953 bl unknown_exception 954 b ret_from_except 955 956/* 957 * An interrupt came in while soft-disabled; We mark paca->irq_happened 958 * accordingly and if the interrupt is level sensitive, we hard disable 959 * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so 960 * keep these in synch. 961 */ 962 963.macro masked_interrupt_book3e paca_irq full_mask 964 lbz r10,PACAIRQHAPPENED(r13) 965 .if \full_mask == 1 966 ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS 967 .else 968 ori r10,r10,\paca_irq 969 .endif 970 stb r10,PACAIRQHAPPENED(r13) 971 972 .if \full_mask == 1 973 rldicl r10,r11,48,1 /* clear MSR_EE */ 974 rotldi r11,r10,16 975 mtspr SPRN_SRR1,r11 976 .endif 977 978 lwz r11,PACA_EXGEN+EX_CR(r13) 979 mtcr r11 980 ld r10,PACA_EXGEN+EX_R10(r13) 981 ld r11,PACA_EXGEN+EX_R11(r13) 982 mfspr r13,SPRN_SPRG_GEN_SCRATCH 983 rfi 984 b . 985.endm 986 987masked_interrupt_book3e_0x500: 988 masked_interrupt_book3e PACA_IRQ_EE 1 989 990masked_interrupt_book3e_0x900: 991 ACK_DEC(r10); 992 masked_interrupt_book3e PACA_IRQ_DEC 0 993 994masked_interrupt_book3e_0x980: 995 ACK_FIT(r10); 996 masked_interrupt_book3e PACA_IRQ_DEC 0 997 998masked_interrupt_book3e_0x280: 999masked_interrupt_book3e_0x2c0: 1000 masked_interrupt_book3e PACA_IRQ_DBELL 0 1001 1002/* 1003 * This is called from 0x300 and 0x400 handlers after the prologs with 1004 * r14 and r15 containing the fault address and error code, with the 1005 * original values stashed away in the PACA 1006 */ 1007storage_fault_common: 1008 std r14,_DAR(r1) 1009 std r15,_DSISR(r1) 1010 addi r3,r1,STACK_FRAME_OVERHEAD 1011 ld r14,PACA_EXGEN+EX_R14(r13) 1012 ld r15,PACA_EXGEN+EX_R15(r13) 1013 bl do_page_fault 1014 cmpdi r3,0 1015 bne- 1f 1016 b ret_from_except_lite 10171: bl save_nvgprs 1018 mr r4,r3 1019 addi r3,r1,STACK_FRAME_OVERHEAD 1020 bl __bad_page_fault 1021 b ret_from_except 1022 1023/* 1024 * Alignment exception doesn't fit entirely in the 0x100 bytes so it 1025 * continues here. 1026 */ 1027alignment_more: 1028 std r14,_DAR(r1) 1029 std r15,_DSISR(r1) 1030 addi r3,r1,STACK_FRAME_OVERHEAD 1031 ld r14,PACA_EXGEN+EX_R14(r13) 1032 ld r15,PACA_EXGEN+EX_R15(r13) 1033 bl save_nvgprs 1034 INTS_RESTORE_HARD 1035 bl alignment_exception 1036 b ret_from_except 1037 1038 .align 7 1039_GLOBAL(ret_from_except) 1040 ld r11,_TRAP(r1) 1041 andi. r0,r11,1 1042 bne ret_from_except_lite 1043 REST_NVGPRS(r1) 1044 1045_GLOBAL(ret_from_except_lite) 1046 /* 1047 * Disable interrupts so that current_thread_info()->flags 1048 * can't change between when we test it and when we return 1049 * from the interrupt. 1050 */ 1051 wrteei 0 1052 1053 ld r9, PACA_THREAD_INFO(r13) 1054 ld r3,_MSR(r1) 1055 ld r10,PACACURRENT(r13) 1056 ld r4,TI_FLAGS(r9) 1057 andi. r3,r3,MSR_PR 1058 beq resume_kernel 1059 lwz r3,(THREAD+THREAD_DBCR0)(r10) 1060 1061 /* Check current_thread_info()->flags */ 1062 andi. r0,r4,_TIF_USER_WORK_MASK 1063 bne 1f 1064 /* 1065 * Check to see if the dbcr0 register is set up to debug. 1066 * Use the internal debug mode bit to do this. 1067 */ 1068 andis. r0,r3,DBCR0_IDM@h 1069 beq restore 1070 mfmsr r0 1071 rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ 1072 mtmsr r0 1073 mtspr SPRN_DBCR0,r3 1074 li r10, -1 1075 mtspr SPRN_DBSR,r10 1076 b restore 10771: andi. r0,r4,_TIF_NEED_RESCHED 1078 beq 2f 1079 bl restore_interrupts 1080 SCHEDULE_USER 1081 b ret_from_except_lite 10822: 1083 bl save_nvgprs 1084 /* 1085 * Use a non volatile GPR to save and restore our thread_info flags 1086 * across the call to restore_interrupts. 1087 */ 1088 mr r30,r4 1089 bl restore_interrupts 1090 mr r4,r30 1091 addi r3,r1,STACK_FRAME_OVERHEAD 1092 bl do_notify_resume 1093 b ret_from_except 1094 1095resume_kernel: 1096 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ 1097 andis. r8,r4,_TIF_EMULATE_STACK_STORE@h 1098 beq+ 1f 1099 1100 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ 1101 1102 ld r3,GPR1(r1) 1103 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ 1104 mr r4,r1 /* src: current exception frame */ 1105 mr r1,r3 /* Reroute the trampoline frame to r1 */ 1106 1107 /* Copy from the original to the trampoline. */ 1108 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ 1109 li r6,0 /* start offset: 0 */ 1110 mtctr r5 11112: ldx r0,r6,r4 1112 stdx r0,r6,r3 1113 addi r6,r6,8 1114 bdnz 2b 1115 1116 /* Do real store operation to complete stdu */ 1117 ld r5,GPR1(r1) 1118 std r8,0(r5) 1119 1120 /* Clear _TIF_EMULATE_STACK_STORE flag */ 1121 lis r11,_TIF_EMULATE_STACK_STORE@h 1122 addi r5,r9,TI_FLAGS 11230: ldarx r4,0,r5 1124 andc r4,r4,r11 1125 stdcx. r4,0,r5 1126 bne- 0b 11271: 1128 1129#ifdef CONFIG_PREEMPT 1130 /* Check if we need to preempt */ 1131 andi. r0,r4,_TIF_NEED_RESCHED 1132 beq+ restore 1133 /* Check that preempt_count() == 0 and interrupts are enabled */ 1134 lwz r8,TI_PREEMPT(r9) 1135 cmpwi cr0,r8,0 1136 bne restore 1137 ld r0,SOFTE(r1) 1138 andi. r0,r0,IRQS_DISABLED 1139 bne restore 1140 1141 /* 1142 * Here we are preempting the current task. We want to make 1143 * sure we are soft-disabled first and reconcile irq state. 1144 */ 1145 RECONCILE_IRQ_STATE(r3,r4) 1146 bl preempt_schedule_irq 1147 1148 /* 1149 * arch_local_irq_restore() from preempt_schedule_irq above may 1150 * enable hard interrupt but we really should disable interrupts 1151 * when we return from the interrupt, and so that we don't get 1152 * interrupted after loading SRR0/1. 1153 */ 1154 wrteei 0 1155#endif /* CONFIG_PREEMPT */ 1156 1157restore: 1158 /* 1159 * This is the main kernel exit path. First we check if we 1160 * are about to re-enable interrupts 1161 */ 1162 ld r5,SOFTE(r1) 1163 lbz r6,PACAIRQSOFTMASK(r13) 1164 andi. r5,r5,IRQS_DISABLED 1165 bne .Lrestore_irq_off 1166 1167 /* We are enabling, were we already enabled ? Yes, just return */ 1168 andi. r6,r6,IRQS_DISABLED 1169 beq cr0,fast_exception_return 1170 1171 /* 1172 * We are about to soft-enable interrupts (we are hard disabled 1173 * at this point). We check if there's anything that needs to 1174 * be replayed first. 1175 */ 1176 lbz r0,PACAIRQHAPPENED(r13) 1177 cmpwi cr0,r0,0 1178 bne- .Lrestore_check_irq_replay 1179 1180 /* 1181 * Get here when nothing happened while soft-disabled, just 1182 * soft-enable and move-on. We will hard-enable as a side 1183 * effect of rfi 1184 */ 1185.Lrestore_no_replay: 1186 TRACE_ENABLE_INTS 1187 li r0,IRQS_ENABLED 1188 stb r0,PACAIRQSOFTMASK(r13); 1189 1190/* This is the return from load_up_fpu fast path which could do with 1191 * less GPR restores in fact, but for now we have a single return path 1192 */ 1193fast_exception_return: 1194 wrteei 0 11951: mr r0,r13 1196 ld r10,_MSR(r1) 1197 REST_4GPRS(2, r1) 1198 andi. r6,r10,MSR_PR 1199 REST_2GPRS(6, r1) 1200 beq 1f 1201 ACCOUNT_CPU_USER_EXIT(r13, r10, r11) 1202 ld r0,GPR13(r1) 1203 12041: stdcx. r0,0,r1 /* to clear the reservation */ 1205 1206 ld r8,_CCR(r1) 1207 ld r9,_LINK(r1) 1208 ld r10,_CTR(r1) 1209 ld r11,_XER(r1) 1210 mtcr r8 1211 mtlr r9 1212 mtctr r10 1213 mtxer r11 1214 REST_2GPRS(8, r1) 1215 ld r10,GPR10(r1) 1216 ld r11,GPR11(r1) 1217 ld r12,GPR12(r1) 1218 mtspr SPRN_SPRG_GEN_SCRATCH,r0 1219 1220 std r10,PACA_EXGEN+EX_R10(r13); 1221 std r11,PACA_EXGEN+EX_R11(r13); 1222 ld r10,_NIP(r1) 1223 ld r11,_MSR(r1) 1224 ld r0,GPR0(r1) 1225 ld r1,GPR1(r1) 1226 mtspr SPRN_SRR0,r10 1227 mtspr SPRN_SRR1,r11 1228 ld r10,PACA_EXGEN+EX_R10(r13) 1229 ld r11,PACA_EXGEN+EX_R11(r13) 1230 mfspr r13,SPRN_SPRG_GEN_SCRATCH 1231 rfi 1232 1233 /* 1234 * We are returning to a context with interrupts soft disabled. 1235 * 1236 * However, we may also about to hard enable, so we need to 1237 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS 1238 * or that bit can get out of sync and bad things will happen 1239 */ 1240.Lrestore_irq_off: 1241 ld r3,_MSR(r1) 1242 lbz r7,PACAIRQHAPPENED(r13) 1243 andi. r0,r3,MSR_EE 1244 beq 1f 1245 rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS 1246 stb r7,PACAIRQHAPPENED(r13) 12471: 1248#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) 1249 /* The interrupt should not have soft enabled. */ 1250 lbz r7,PACAIRQSOFTMASK(r13) 12511: tdeqi r7,IRQS_ENABLED 1252 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING 1253#endif 1254 b fast_exception_return 1255 1256 /* 1257 * Something did happen, check if a re-emit is needed 1258 * (this also clears paca->irq_happened) 1259 */ 1260.Lrestore_check_irq_replay: 1261 /* XXX: We could implement a fast path here where we check 1262 * for irq_happened being just 0x01, in which case we can 1263 * clear it and return. That means that we would potentially 1264 * miss a decrementer having wrapped all the way around. 1265 * 1266 * Still, this might be useful for things like hash_page 1267 */ 1268 bl __check_irq_replay 1269 cmpwi cr0,r3,0 1270 beq .Lrestore_no_replay 1271 1272 /* 1273 * We need to re-emit an interrupt. We do so by re-using our 1274 * existing exception frame. We first change the trap value, 1275 * but we need to ensure we preserve the low nibble of it 1276 */ 1277 ld r4,_TRAP(r1) 1278 clrldi r4,r4,60 1279 or r4,r4,r3 1280 std r4,_TRAP(r1) 1281 1282 /* 1283 * PACA_IRQ_HARD_DIS won't always be set here, so set it now 1284 * to reconcile the IRQ state. Tracing is already accounted for. 1285 */ 1286 lbz r4,PACAIRQHAPPENED(r13) 1287 ori r4,r4,PACA_IRQ_HARD_DIS 1288 stb r4,PACAIRQHAPPENED(r13) 1289 1290 /* 1291 * Then find the right handler and call it. Interrupts are 1292 * still soft-disabled and we keep them that way. 1293 */ 1294 cmpwi cr0,r3,0x500 1295 bne 1f 1296 addi r3,r1,STACK_FRAME_OVERHEAD; 1297 bl do_IRQ 1298 b ret_from_except 12991: cmpwi cr0,r3,0x900 1300 bne 1f 1301 addi r3,r1,STACK_FRAME_OVERHEAD; 1302 bl timer_interrupt 1303 b ret_from_except 1304#ifdef CONFIG_PPC_DOORBELL 13051: 1306 cmpwi cr0,r3,0x280 1307 bne 1f 1308 addi r3,r1,STACK_FRAME_OVERHEAD; 1309 bl doorbell_exception 1310#endif /* CONFIG_PPC_DOORBELL */ 13111: b ret_from_except /* What else to do here ? */ 1312 1313_ASM_NOKPROBE_SYMBOL(ret_from_except); 1314_ASM_NOKPROBE_SYMBOL(ret_from_except_lite); 1315_ASM_NOKPROBE_SYMBOL(resume_kernel); 1316_ASM_NOKPROBE_SYMBOL(restore); 1317_ASM_NOKPROBE_SYMBOL(fast_exception_return); 1318 1319/* 1320 * Trampolines used when spotting a bad kernel stack pointer in 1321 * the exception entry code. 1322 * 1323 * TODO: move some bits like SRR0 read to trampoline, pass PACA 1324 * index around, etc... to handle crit & mcheck 1325 */ 1326BAD_STACK_TRAMPOLINE(0x000) 1327BAD_STACK_TRAMPOLINE(0x100) 1328BAD_STACK_TRAMPOLINE(0x200) 1329BAD_STACK_TRAMPOLINE(0x220) 1330BAD_STACK_TRAMPOLINE(0x260) 1331BAD_STACK_TRAMPOLINE(0x280) 1332BAD_STACK_TRAMPOLINE(0x2a0) 1333BAD_STACK_TRAMPOLINE(0x2c0) 1334BAD_STACK_TRAMPOLINE(0x2e0) 1335BAD_STACK_TRAMPOLINE(0x300) 1336BAD_STACK_TRAMPOLINE(0x310) 1337BAD_STACK_TRAMPOLINE(0x320) 1338BAD_STACK_TRAMPOLINE(0x340) 1339BAD_STACK_TRAMPOLINE(0x400) 1340BAD_STACK_TRAMPOLINE(0x500) 1341BAD_STACK_TRAMPOLINE(0x600) 1342BAD_STACK_TRAMPOLINE(0x700) 1343BAD_STACK_TRAMPOLINE(0x800) 1344BAD_STACK_TRAMPOLINE(0x900) 1345BAD_STACK_TRAMPOLINE(0x980) 1346BAD_STACK_TRAMPOLINE(0x9f0) 1347BAD_STACK_TRAMPOLINE(0xa00) 1348BAD_STACK_TRAMPOLINE(0xb00) 1349BAD_STACK_TRAMPOLINE(0xc00) 1350BAD_STACK_TRAMPOLINE(0xd00) 1351BAD_STACK_TRAMPOLINE(0xd08) 1352BAD_STACK_TRAMPOLINE(0xe00) 1353BAD_STACK_TRAMPOLINE(0xf00) 1354BAD_STACK_TRAMPOLINE(0xf20) 1355 1356 .globl bad_stack_book3e 1357bad_stack_book3e: 1358 /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */ 1359 mfspr r10,SPRN_SRR0; /* read SRR0 before touching stack */ 1360 ld r1,PACAEMERGSP(r13) 1361 subi r1,r1,64+INT_FRAME_SIZE 1362 std r10,_NIP(r1) 1363 std r11,_MSR(r1) 1364 ld r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */ 1365 lwz r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */ 1366 std r10,GPR1(r1) 1367 std r11,_CCR(r1) 1368 mfspr r10,SPRN_DEAR 1369 mfspr r11,SPRN_ESR 1370 std r10,_DAR(r1) 1371 std r11,_DSISR(r1) 1372 std r0,GPR0(r1); /* save r0 in stackframe */ \ 1373 std r2,GPR2(r1); /* save r2 in stackframe */ \ 1374 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 1375 SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ 1376 std r9,GPR9(r1); /* save r9 in stackframe */ \ 1377 ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \ 1378 ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \ 1379 mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \ 1380 std r3,GPR10(r1); /* save r10 to stackframe */ \ 1381 std r4,GPR11(r1); /* save r11 to stackframe */ \ 1382 std r12,GPR12(r1); /* save r12 in stackframe */ \ 1383 std r5,GPR13(r1); /* save it to stackframe */ \ 1384 mflr r10 1385 mfctr r11 1386 mfxer r12 1387 std r10,_LINK(r1) 1388 std r11,_CTR(r1) 1389 std r12,_XER(r1) 1390 SAVE_10GPRS(14,r1) 1391 SAVE_8GPRS(24,r1) 1392 lhz r12,PACA_TRAP_SAVE(r13) 1393 std r12,_TRAP(r1) 1394 addi r11,r1,INT_FRAME_SIZE 1395 std r11,0(r1) 1396 li r12,0 1397 std r12,0(r11) 1398 ld r2,PACATOC(r13) 13991: addi r3,r1,STACK_FRAME_OVERHEAD 1400 bl kernel_bad_stack 1401 b 1b 1402 1403/* 1404 * Setup the initial TLB for a core. This current implementation 1405 * assume that whatever we are running off will not conflict with 1406 * the new mapping at PAGE_OFFSET. 1407 */ 1408_GLOBAL(initial_tlb_book3e) 1409 1410 /* Look for the first TLB with IPROT set */ 1411 mfspr r4,SPRN_TLB0CFG 1412 andi. r3,r4,TLBnCFG_IPROT 1413 lis r3,MAS0_TLBSEL(0)@h 1414 bne found_iprot 1415 1416 mfspr r4,SPRN_TLB1CFG 1417 andi. r3,r4,TLBnCFG_IPROT 1418 lis r3,MAS0_TLBSEL(1)@h 1419 bne found_iprot 1420 1421 mfspr r4,SPRN_TLB2CFG 1422 andi. r3,r4,TLBnCFG_IPROT 1423 lis r3,MAS0_TLBSEL(2)@h 1424 bne found_iprot 1425 1426 lis r3,MAS0_TLBSEL(3)@h 1427 mfspr r4,SPRN_TLB3CFG 1428 /* fall through */ 1429 1430found_iprot: 1431 andi. r5,r4,TLBnCFG_HES 1432 bne have_hes 1433 1434 mflr r8 /* save LR */ 1435/* 1. Find the index of the entry we're executing in 1436 * 1437 * r3 = MAS0_TLBSEL (for the iprot array) 1438 * r4 = SPRN_TLBnCFG 1439 */ 1440 bl invstr /* Find our address */ 1441invstr: mflr r6 /* Make it accessible */ 1442 mfmsr r7 1443 rlwinm r5,r7,27,31,31 /* extract MSR[IS] */ 1444 mfspr r7,SPRN_PID 1445 slwi r7,r7,16 1446 or r7,r7,r5 1447 mtspr SPRN_MAS6,r7 1448 tlbsx 0,r6 /* search MSR[IS], SPID=PID */ 1449 1450 mfspr r3,SPRN_MAS0 1451 rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */ 1452 1453 mfspr r7,SPRN_MAS1 /* Insure IPROT set */ 1454 oris r7,r7,MAS1_IPROT@h 1455 mtspr SPRN_MAS1,r7 1456 tlbwe 1457 1458/* 2. Invalidate all entries except the entry we're executing in 1459 * 1460 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1461 * r4 = SPRN_TLBnCFG 1462 * r5 = ESEL of entry we are running in 1463 */ 1464 andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */ 1465 li r6,0 /* Set Entry counter to 0 */ 14661: mr r7,r3 /* Set MAS0(TLBSEL) */ 1467 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */ 1468 mtspr SPRN_MAS0,r7 1469 tlbre 1470 mfspr r7,SPRN_MAS1 1471 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */ 1472 cmpw r5,r6 1473 beq skpinv /* Dont update the current execution TLB */ 1474 mtspr SPRN_MAS1,r7 1475 tlbwe 1476 isync 1477skpinv: addi r6,r6,1 /* Increment */ 1478 cmpw r6,r4 /* Are we done? */ 1479 bne 1b /* If not, repeat */ 1480 1481 /* Invalidate all TLBs */ 1482 PPC_TLBILX_ALL(0,R0) 1483 sync 1484 isync 1485 1486/* 3. Setup a temp mapping and jump to it 1487 * 1488 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1489 * r5 = ESEL of entry we are running in 1490 */ 1491 andi. r7,r5,0x1 /* Find an entry not used and is non-zero */ 1492 addi r7,r7,0x1 1493 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */ 1494 mtspr SPRN_MAS0,r4 1495 tlbre 1496 1497 rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */ 1498 mtspr SPRN_MAS0,r4 1499 1500 mfspr r7,SPRN_MAS1 1501 xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */ 1502 mtspr SPRN_MAS1,r6 1503 1504 tlbwe 1505 1506 mfmsr r6 1507 xori r6,r6,MSR_IS 1508 mtspr SPRN_SRR1,r6 1509 bl 1f /* Find our address */ 15101: mflr r6 1511 addi r6,r6,(2f - 1b) 1512 mtspr SPRN_SRR0,r6 1513 rfi 15142: 1515 1516/* 4. Clear out PIDs & Search info 1517 * 1518 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1519 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1520 * r5 = MAS3 1521 */ 1522 li r6,0 1523 mtspr SPRN_MAS6,r6 1524 mtspr SPRN_PID,r6 1525 1526/* 5. Invalidate mapping we started in 1527 * 1528 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1529 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1530 * r5 = MAS3 1531 */ 1532 mtspr SPRN_MAS0,r3 1533 tlbre 1534 mfspr r6,SPRN_MAS1 1535 rlwinm r6,r6,0,2,31 /* clear IPROT and VALID */ 1536 mtspr SPRN_MAS1,r6 1537 tlbwe 1538 sync 1539 isync 1540 1541/* 6. Setup KERNELBASE mapping in TLB[0] 1542 * 1543 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in 1544 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1545 * r5 = MAS3 1546 */ 1547 rlwinm r3,r3,0,16,3 /* clear ESEL */ 1548 mtspr SPRN_MAS0,r3 1549 lis r6,(MAS1_VALID|MAS1_IPROT)@h 1550 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l 1551 mtspr SPRN_MAS1,r6 1552 1553 LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED) 1554 mtspr SPRN_MAS2,r6 1555 1556 rlwinm r5,r5,0,0,25 1557 ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX 1558 mtspr SPRN_MAS3,r5 1559 li r5,-1 1560 rlwinm r5,r5,0,0,25 1561 1562 tlbwe 1563 1564/* 7. Jump to KERNELBASE mapping 1565 * 1566 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping 1567 */ 1568 /* Now we branch the new virtual address mapped by this entry */ 1569 bl 1f /* Find our address */ 15701: mflr r6 1571 addi r6,r6,(2f - 1b) 1572 tovirt(r6,r6) 1573 lis r7,MSR_KERNEL@h 1574 ori r7,r7,MSR_KERNEL@l 1575 mtspr SPRN_SRR0,r6 1576 mtspr SPRN_SRR1,r7 1577 rfi /* start execution out of TLB1[0] entry */ 15782: 1579 1580/* 8. Clear out the temp mapping 1581 * 1582 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in 1583 */ 1584 mtspr SPRN_MAS0,r4 1585 tlbre 1586 mfspr r5,SPRN_MAS1 1587 rlwinm r5,r5,0,2,31 /* clear IPROT and VALID */ 1588 mtspr SPRN_MAS1,r5 1589 tlbwe 1590 sync 1591 isync 1592 1593 /* We translate LR and return */ 1594 tovirt(r8,r8) 1595 mtlr r8 1596 blr 1597 1598have_hes: 1599 /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the 1600 * kernel linear mapping. We also set MAS8 once for all here though 1601 * that will have to be made dependent on whether we are running under 1602 * a hypervisor I suppose. 1603 */ 1604 1605 /* BEWARE, MAGIC 1606 * This code is called as an ordinary function on the boot CPU. But to 1607 * avoid duplication, this code is also used in SCOM bringup of 1608 * secondary CPUs. We read the code between the initial_tlb_code_start 1609 * and initial_tlb_code_end labels one instruction at a time and RAM it 1610 * into the new core via SCOM. That doesn't process branches, so there 1611 * must be none between those two labels. It also means if this code 1612 * ever takes any parameters, the SCOM code must also be updated to 1613 * provide them. 1614 */ 1615 .globl a2_tlbinit_code_start 1616a2_tlbinit_code_start: 1617 1618 ori r11,r3,MAS0_WQ_ALLWAYS 1619 oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ 1620 mtspr SPRN_MAS0,r11 1621 lis r3,(MAS1_VALID | MAS1_IPROT)@h 1622 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT 1623 mtspr SPRN_MAS1,r3 1624 LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M) 1625 mtspr SPRN_MAS2,r3 1626 li r3,MAS3_SR | MAS3_SW | MAS3_SX 1627 mtspr SPRN_MAS7_MAS3,r3 1628 li r3,0 1629 mtspr SPRN_MAS8,r3 1630 1631 /* Write the TLB entry */ 1632 tlbwe 1633 1634 .globl a2_tlbinit_after_linear_map 1635a2_tlbinit_after_linear_map: 1636 1637 /* Now we branch the new virtual address mapped by this entry */ 1638 LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f) 1639 mtctr r3 1640 bctr 1641 16421: /* We are now running at PAGE_OFFSET, clean the TLB of everything 1643 * else (including IPROTed things left by firmware) 1644 * r4 = TLBnCFG 1645 * r3 = current address (more or less) 1646 */ 1647 1648 li r5,0 1649 mtspr SPRN_MAS6,r5 1650 tlbsx 0,r3 1651 1652 rlwinm r9,r4,0,TLBnCFG_N_ENTRY 1653 rlwinm r10,r4,8,0xff 1654 addi r10,r10,-1 /* Get inner loop mask */ 1655 1656 li r3,1 1657 1658 mfspr r5,SPRN_MAS1 1659 rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) 1660 1661 mfspr r6,SPRN_MAS2 1662 rldicr r6,r6,0,51 /* Extract EPN */ 1663 1664 mfspr r7,SPRN_MAS0 1665 rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ 1666 1667 rlwinm r8,r7,16,0xfff /* Extract ESEL */ 1668 16692: add r4,r3,r8 1670 and r4,r4,r10 1671 1672 rlwimi r7,r4,16,MAS0_ESEL_MASK 1673 1674 mtspr SPRN_MAS0,r7 1675 mtspr SPRN_MAS1,r5 1676 mtspr SPRN_MAS2,r6 1677 tlbwe 1678 1679 addi r3,r3,1 1680 and. r4,r3,r10 1681 1682 bne 3f 1683 addis r6,r6,(1<<30)@h 16843: 1685 cmpw r3,r9 1686 blt 2b 1687 1688 .globl a2_tlbinit_after_iprot_flush 1689a2_tlbinit_after_iprot_flush: 1690 1691 PPC_TLBILX(0,0,R0) 1692 sync 1693 isync 1694 1695 .globl a2_tlbinit_code_end 1696a2_tlbinit_code_end: 1697 1698 /* We translate LR and return */ 1699 mflr r3 1700 tovirt(r3,r3) 1701 mtlr r3 1702 blr 1703 1704/* 1705 * Main entry (boot CPU, thread 0) 1706 * 1707 * We enter here from head_64.S, possibly after the prom_init trampoline 1708 * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits 1709 * mode. Anything else is as it was left by the bootloader 1710 * 1711 * Initial requirements of this port: 1712 * 1713 * - Kernel loaded at 0 physical 1714 * - A good lump of memory mapped 0:0 by UTLB entry 0 1715 * - MSR:IS & MSR:DS set to 0 1716 * 1717 * Note that some of the above requirements will be relaxed in the future 1718 * as the kernel becomes smarter at dealing with different initial conditions 1719 * but for now you have to be careful 1720 */ 1721_GLOBAL(start_initialization_book3e) 1722 mflr r28 1723 1724 /* First, we need to setup some initial TLBs to map the kernel 1725 * text, data and bss at PAGE_OFFSET. We don't have a real mode 1726 * and always use AS 0, so we just set it up to match our link 1727 * address and never use 0 based addresses. 1728 */ 1729 bl initial_tlb_book3e 1730 1731 /* Init global core bits */ 1732 bl init_core_book3e 1733 1734 /* Init per-thread bits */ 1735 bl init_thread_book3e 1736 1737 /* Return to common init code */ 1738 tovirt(r28,r28) 1739 mtlr r28 1740 blr 1741 1742 1743/* 1744 * Secondary core/processor entry 1745 * 1746 * This is entered for thread 0 of a secondary core, all other threads 1747 * are expected to be stopped. It's similar to start_initialization_book3e 1748 * except that it's generally entered from the holding loop in head_64.S 1749 * after CPUs have been gathered by Open Firmware. 1750 * 1751 * We assume we are in 32 bits mode running with whatever TLB entry was 1752 * set for us by the firmware or POR engine. 1753 */ 1754_GLOBAL(book3e_secondary_core_init_tlb_set) 1755 li r4,1 1756 b generic_secondary_smp_init 1757 1758_GLOBAL(book3e_secondary_core_init) 1759 mflr r28 1760 1761 /* Do we need to setup initial TLB entry ? */ 1762 cmplwi r4,0 1763 bne 2f 1764 1765 /* Setup TLB for this core */ 1766 bl initial_tlb_book3e 1767 1768 /* We can return from the above running at a different 1769 * address, so recalculate r2 (TOC) 1770 */ 1771 bl relative_toc 1772 1773 /* Init global core bits */ 17742: bl init_core_book3e 1775 1776 /* Init per-thread bits */ 17773: bl init_thread_book3e 1778 1779 /* Return to common init code at proper virtual address. 1780 * 1781 * Due to various previous assumptions, we know we entered this 1782 * function at either the final PAGE_OFFSET mapping or using a 1783 * 1:1 mapping at 0, so we don't bother doing a complicated check 1784 * here, we just ensure the return address has the right top bits. 1785 * 1786 * Note that if we ever want to be smarter about where we can be 1787 * started from, we have to be careful that by the time we reach 1788 * the code below we may already be running at a different location 1789 * than the one we were called from since initial_tlb_book3e can 1790 * have moved us already. 1791 */ 1792 cmpdi cr0,r28,0 1793 blt 1f 1794 lis r3,PAGE_OFFSET@highest 1795 sldi r3,r3,32 1796 or r28,r28,r3 17971: mtlr r28 1798 blr 1799 1800_GLOBAL(book3e_secondary_thread_init) 1801 mflr r28 1802 b 3b 1803 1804 .globl init_core_book3e 1805init_core_book3e: 1806 /* Establish the interrupt vector base */ 1807 tovirt(r2,r2) 1808 LOAD_REG_ADDR(r3, interrupt_base_book3e) 1809 mtspr SPRN_IVPR,r3 1810 sync 1811 blr 1812 1813init_thread_book3e: 1814 lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h 1815 mtspr SPRN_EPCR,r3 1816 1817 /* Make sure interrupts are off */ 1818 wrteei 0 1819 1820 /* disable all timers and clear out status */ 1821 li r3,0 1822 mtspr SPRN_TCR,r3 1823 mfspr r3,SPRN_TSR 1824 mtspr SPRN_TSR,r3 1825 1826 blr 1827 1828_GLOBAL(__setup_base_ivors) 1829 SET_IVOR(0, 0x020) /* Critical Input */ 1830 SET_IVOR(1, 0x000) /* Machine Check */ 1831 SET_IVOR(2, 0x060) /* Data Storage */ 1832 SET_IVOR(3, 0x080) /* Instruction Storage */ 1833 SET_IVOR(4, 0x0a0) /* External Input */ 1834 SET_IVOR(5, 0x0c0) /* Alignment */ 1835 SET_IVOR(6, 0x0e0) /* Program */ 1836 SET_IVOR(7, 0x100) /* FP Unavailable */ 1837 SET_IVOR(8, 0x120) /* System Call */ 1838 SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 1839 SET_IVOR(10, 0x160) /* Decrementer */ 1840 SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 1841 SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 1842 SET_IVOR(13, 0x1c0) /* Data TLB Error */ 1843 SET_IVOR(14, 0x1e0) /* Instruction TLB Error */ 1844 SET_IVOR(15, 0x040) /* Debug */ 1845 1846 sync 1847 1848 blr 1849 1850_GLOBAL(setup_altivec_ivors) 1851 SET_IVOR(32, 0x200) /* AltiVec Unavailable */ 1852 SET_IVOR(33, 0x220) /* AltiVec Assist */ 1853 blr 1854 1855_GLOBAL(setup_perfmon_ivor) 1856 SET_IVOR(35, 0x260) /* Performance Monitor */ 1857 blr 1858 1859_GLOBAL(setup_doorbell_ivors) 1860 SET_IVOR(36, 0x280) /* Processor Doorbell */ 1861 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ 1862 blr 1863 1864_GLOBAL(setup_ehv_ivors) 1865 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ 1866 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ 1867 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ 1868 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ 1869 blr 1870 1871_GLOBAL(setup_lrat_ivor) 1872 SET_IVOR(42, 0x340) /* LRAT Error */ 1873 blr 1874