1/* 2 * Exception handling for Microblaze 3 * 4 * Rewriten interrupt handling 5 * 6 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 7 * Copyright (C) 2008-2009 PetaLogix 8 * 9 * uClinux customisation (C) 2005 John Williams 10 * 11 * MMU code derived from arch/ppc/kernel/head_4xx.S: 12 * Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org> 13 * Initial PowerPC version. 14 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 15 * Rewritten for PReP 16 * Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 17 * Low-level exception handers, MMU support, and rewrite. 18 * Copyright (C) 1997 Dan Malek <dmalek@jlc.net> 19 * PowerPC 8xx modifications. 20 * Copyright (C) 1998-1999 TiVo, Inc. 21 * PowerPC 403GCX modifications. 22 * Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu> 23 * PowerPC 403GCX/405GP modifications. 24 * Copyright 2000 MontaVista Software Inc. 25 * PPC405 modifications 26 * PowerPC 403GCX/405GP modifications. 27 * Author: MontaVista Software, Inc. 28 * frank_rowand@mvista.com or source@mvista.com 29 * debbie_chu@mvista.com 30 * 31 * Original code 32 * Copyright (C) 2004 Xilinx, Inc. 33 * 34 * This program is free software; you can redistribute it and/or modify it 35 * under the terms of the GNU General Public License version 2 as published 36 * by the Free Software Foundation. 37 */ 38 39/* 40 * Here are the handlers which don't require enabling translation 41 * and calling other kernel code thus we can keep their design very simple 42 * and do all processing in real mode. All what they need is a valid current 43 * (that is an issue for the CONFIG_REGISTER_TASK_PTR case) 44 * This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore 45 * these registers are saved/restored 46 * The handlers which require translation are in entry.S --KAA 47 * 48 * Microblaze HW Exception Handler 49 * - Non self-modifying exception handler for the following exception conditions 50 * - Unalignment 51 * - Instruction bus error 52 * - Data bus error 53 * - Illegal instruction opcode 54 * - Divide-by-zero 55 * 56 * - Privileged instruction exception (MMU) 57 * - Data storage exception (MMU) 58 * - Instruction storage exception (MMU) 59 * - Data TLB miss exception (MMU) 60 * - Instruction TLB miss exception (MMU) 61 * 62 * Note we disable interrupts during exception handling, otherwise we will 63 * possibly get multiple re-entrancy if interrupt handles themselves cause 64 * exceptions. JW 65 */ 66 67#include <asm/exceptions.h> 68#include <asm/unistd.h> 69#include <asm/page.h> 70 71#include <asm/entry.h> 72#include <asm/current.h> 73#include <linux/linkage.h> 74 75#include <asm/mmu.h> 76#include <asm/pgtable.h> 77#include <asm/signal.h> 78#include <asm/asm-offsets.h> 79 80/* Helpful Macros */ 81#define NUM_TO_REG(num) r ## num 82 83#ifdef CONFIG_MMU 84 #define RESTORE_STATE \ 85 lwi r5, r1, 0; \ 86 mts rmsr, r5; \ 87 nop; \ 88 lwi r3, r1, PT_R3; \ 89 lwi r4, r1, PT_R4; \ 90 lwi r5, r1, PT_R5; \ 91 lwi r6, r1, PT_R6; \ 92 lwi r11, r1, PT_R11; \ 93 lwi r31, r1, PT_R31; \ 94 lwi r1, r0, TOPHYS(r0_ram + 0); 95#endif /* CONFIG_MMU */ 96 97#define LWREG_NOP \ 98 bri ex_handler_unhandled; \ 99 nop; 100 101#define SWREG_NOP \ 102 bri ex_handler_unhandled; \ 103 nop; 104 105/* FIXME this is weird - for noMMU kernel is not possible to use brid 106 * instruction which can shorten executed time 107 */ 108 109/* r3 is the source */ 110#define R3_TO_LWREG_V(regnum) \ 111 swi r3, r1, 4 * regnum; \ 112 bri ex_handler_done; 113 114/* r3 is the source */ 115#define R3_TO_LWREG(regnum) \ 116 or NUM_TO_REG (regnum), r0, r3; \ 117 bri ex_handler_done; 118 119/* r3 is the target */ 120#define SWREG_TO_R3_V(regnum) \ 121 lwi r3, r1, 4 * regnum; \ 122 bri ex_sw_tail; 123 124/* r3 is the target */ 125#define SWREG_TO_R3(regnum) \ 126 or r3, r0, NUM_TO_REG (regnum); \ 127 bri ex_sw_tail; 128 129#ifdef CONFIG_MMU 130 #define R3_TO_LWREG_VM_V(regnum) \ 131 brid ex_lw_end_vm; \ 132 swi r3, r7, 4 * regnum; 133 134 #define R3_TO_LWREG_VM(regnum) \ 135 brid ex_lw_end_vm; \ 136 or NUM_TO_REG (regnum), r0, r3; 137 138 #define SWREG_TO_R3_VM_V(regnum) \ 139 brid ex_sw_tail_vm; \ 140 lwi r3, r7, 4 * regnum; 141 142 #define SWREG_TO_R3_VM(regnum) \ 143 brid ex_sw_tail_vm; \ 144 or r3, r0, NUM_TO_REG (regnum); 145 146 /* Shift right instruction depending on available configuration */ 147 #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0 148 #define BSRLI(rD, rA, imm) \ 149 bsrli rD, rA, imm 150 #elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0 151 #define BSRLI(rD, rA, imm) \ 152 ori rD, r0, (1 << imm); \ 153 idivu rD, rD, rA 154 #else 155 #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA) 156 /* Only the used shift constants defined here - add more if needed */ 157 #define BSRLI2(rD, rA) \ 158 srl rD, rA; /* << 1 */ \ 159 srl rD, rD; /* << 2 */ 160 #define BSRLI10(rD, rA) \ 161 srl rD, rA; /* << 1 */ \ 162 srl rD, rD; /* << 2 */ \ 163 srl rD, rD; /* << 3 */ \ 164 srl rD, rD; /* << 4 */ \ 165 srl rD, rD; /* << 5 */ \ 166 srl rD, rD; /* << 6 */ \ 167 srl rD, rD; /* << 7 */ \ 168 srl rD, rD; /* << 8 */ \ 169 srl rD, rD; /* << 9 */ \ 170 srl rD, rD /* << 10 */ 171 #define BSRLI20(rD, rA) \ 172 BSRLI10(rD, rA); \ 173 BSRLI10(rD, rD) 174 #endif 175#endif /* CONFIG_MMU */ 176 177.extern other_exception_handler /* Defined in exception.c */ 178 179/* 180 * hw_exception_handler - Handler for exceptions 181 * 182 * Exception handler notes: 183 * - Handles all exceptions 184 * - Does not handle unaligned exceptions during load into r17, r1, r0. 185 * - Does not handle unaligned exceptions during store from r17 (cannot be 186 * done) and r1 (slows down common case) 187 * 188 * Relevant register structures 189 * 190 * EAR - |----|----|----|----|----|----|----|----| 191 * - < ## 32 bit faulting address ## > 192 * 193 * ESR - |----|----|----|----|----| - | - |-----|-----| 194 * - W S REG EXC 195 * 196 * 197 * STACK FRAME STRUCTURE (for NO_MMU) 198 * --------------------------------- 199 * 200 * +-------------+ + 0 201 * | MSR | 202 * +-------------+ + 4 203 * | r1 | 204 * | . | 205 * | . | 206 * | . | 207 * | . | 208 * | r18 | 209 * +-------------+ + 76 210 * | . | 211 * | . | 212 * 213 * NO_MMU kernel use the same r0_ram pointed space - look to vmlinux.lds.S 214 * which is used for storing register values - old style was, that value were 215 * stored in stack but in case of failure you lost information about register. 216 * Currently you can see register value in memory in specific place. 217 * In compare to with previous solution the speed should be the same. 218 * 219 * MMU exception handler has different handling compare to no MMU kernel. 220 * Exception handler use jump table for directing of what happen. For MMU kernel 221 * is this approach better because MMU relate exception are handled by asm code 222 * in this file. In compare to with MMU expect of unaligned exception 223 * is everything handled by C code. 224 */ 225 226/* 227 * every of these handlers is entered having R3/4/5/6/11/current saved on stack 228 * and clobbered so care should be taken to restore them if someone is going to 229 * return from exception 230 */ 231 232/* wrappers to restore state before coming to entry.S */ 233 234#ifdef CONFIG_MMU 235.section .rodata 236.align 4 237_MB_HW_ExceptionVectorTable: 238/* 0 - Undefined */ 239 .long TOPHYS(ex_handler_unhandled) 240/* 1 - Unaligned data access exception */ 241 .long TOPHYS(handle_unaligned_ex) 242/* 2 - Illegal op-code exception */ 243 .long TOPHYS(full_exception_trapw) 244/* 3 - Instruction bus error exception */ 245 .long TOPHYS(full_exception_trapw) 246/* 4 - Data bus error exception */ 247 .long TOPHYS(full_exception_trapw) 248/* 5 - Divide by zero exception */ 249 .long TOPHYS(full_exception_trapw) 250/* 6 - Floating point unit exception */ 251 .long TOPHYS(full_exception_trapw) 252/* 7 - Privileged instruction exception */ 253 .long TOPHYS(full_exception_trapw) 254/* 8 - 15 - Undefined */ 255 .long TOPHYS(ex_handler_unhandled) 256 .long TOPHYS(ex_handler_unhandled) 257 .long TOPHYS(ex_handler_unhandled) 258 .long TOPHYS(ex_handler_unhandled) 259 .long TOPHYS(ex_handler_unhandled) 260 .long TOPHYS(ex_handler_unhandled) 261 .long TOPHYS(ex_handler_unhandled) 262 .long TOPHYS(ex_handler_unhandled) 263/* 16 - Data storage exception */ 264 .long TOPHYS(handle_data_storage_exception) 265/* 17 - Instruction storage exception */ 266 .long TOPHYS(handle_instruction_storage_exception) 267/* 18 - Data TLB miss exception */ 268 .long TOPHYS(handle_data_tlb_miss_exception) 269/* 19 - Instruction TLB miss exception */ 270 .long TOPHYS(handle_instruction_tlb_miss_exception) 271/* 20 - 31 - Undefined */ 272 .long TOPHYS(ex_handler_unhandled) 273 .long TOPHYS(ex_handler_unhandled) 274 .long TOPHYS(ex_handler_unhandled) 275 .long TOPHYS(ex_handler_unhandled) 276 .long TOPHYS(ex_handler_unhandled) 277 .long TOPHYS(ex_handler_unhandled) 278 .long TOPHYS(ex_handler_unhandled) 279 .long TOPHYS(ex_handler_unhandled) 280 .long TOPHYS(ex_handler_unhandled) 281 .long TOPHYS(ex_handler_unhandled) 282 .long TOPHYS(ex_handler_unhandled) 283 .long TOPHYS(ex_handler_unhandled) 284#endif 285 286.global _hw_exception_handler 287.section .text 288.align 4 289.ent _hw_exception_handler 290_hw_exception_handler: 291#ifndef CONFIG_MMU 292 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ 293#else 294 swi r1, r0, TOPHYS(r0_ram + 0); /* GET_SP */ 295 /* Save date to kernel memory. Here is the problem 296 * when you came from user space */ 297 ori r1, r0, TOPHYS(r0_ram + 28); 298#endif 299 swi r3, r1, PT_R3 300 swi r4, r1, PT_R4 301 swi r5, r1, PT_R5 302 swi r6, r1, PT_R6 303 304#ifdef CONFIG_MMU 305 swi r11, r1, PT_R11 306 swi r31, r1, PT_R31 307 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */ 308#endif 309 310 mfs r5, rmsr; 311 nop 312 swi r5, r1, 0; 313 mfs r4, resr 314 nop 315 mfs r3, rear; 316 nop 317 318#ifndef CONFIG_MMU 319 andi r5, r4, 0x1000; /* Check ESR[DS] */ 320 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 321 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 322 nop 323not_in_delay_slot: 324 swi r17, r1, PT_R17 325#endif 326 327 andi r5, r4, 0x1F; /* Extract ESR[EXC] */ 328 329#ifdef CONFIG_MMU 330 /* Calculate exception vector offset = r5 << 2 */ 331 addk r6, r5, r5; /* << 1 */ 332 addk r6, r6, r6; /* << 2 */ 333 334#ifdef DEBUG 335/* counting which exception happen */ 336 lwi r5, r0, 0x200 + TOPHYS(r0_ram) 337 addi r5, r5, 1 338 swi r5, r0, 0x200 + TOPHYS(r0_ram) 339 lwi r5, r6, 0x200 + TOPHYS(r0_ram) 340 addi r5, r5, 1 341 swi r5, r6, 0x200 + TOPHYS(r0_ram) 342#endif 343/* end */ 344 /* Load the HW Exception vector */ 345 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 346 bra r6 347 348full_exception_trapw: 349 RESTORE_STATE 350 bri full_exception_trap 351#else 352 /* Exceptions enabled here. This will allow nested exceptions */ 353 mfs r6, rmsr; 354 nop 355 swi r6, r1, 0; /* RMSR_OFFSET */ 356 ori r6, r6, 0x100; /* Turn ON the EE bit */ 357 andi r6, r6, ~2; /* Disable interrupts */ 358 mts rmsr, r6; 359 nop 360 361 xori r6, r5, 1; /* 00001 = Unaligned Exception */ 362 /* Jump to unalignment exception handler */ 363 beqi r6, handle_unaligned_ex; 364 365handle_other_ex: /* Handle Other exceptions here */ 366 /* Save other volatiles before we make procedure calls below */ 367 swi r7, r1, PT_R7 368 swi r8, r1, PT_R8 369 swi r9, r1, PT_R9 370 swi r10, r1, PT_R10 371 swi r11, r1, PT_R11 372 swi r12, r1, PT_R12 373 swi r14, r1, PT_R14 374 swi r15, r1, PT_R15 375 swi r18, r1, PT_R18 376 377 or r5, r1, r0 378 andi r6, r4, 0x1F; /* Load ESR[EC] */ 379 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 380 swi r7, r1, PT_MODE 381 mfs r7, rfsr 382 nop 383 addk r8, r17, r0; /* Load exception address */ 384 bralid r15, full_exception; /* Branch to the handler */ 385 nop; 386 mts rfsr, r0; /* Clear sticky fsr */ 387 nop 388 389 /* 390 * Trigger execution of the signal handler by enabling 391 * interrupts and calling an invalid syscall. 392 */ 393 mfs r5, rmsr; 394 nop 395 ori r5, r5, 2; 396 mts rmsr, r5; /* enable interrupt */ 397 nop 398 addi r12, r0, __NR_syscalls; 399 brki r14, 0x08; 400 mfs r5, rmsr; /* disable interrupt */ 401 nop 402 andi r5, r5, ~2; 403 mts rmsr, r5; 404 nop 405 406 lwi r7, r1, PT_R7 407 lwi r8, r1, PT_R8 408 lwi r9, r1, PT_R9 409 lwi r10, r1, PT_R10 410 lwi r11, r1, PT_R11 411 lwi r12, r1, PT_R12 412 lwi r14, r1, PT_R14 413 lwi r15, r1, PT_R15 414 lwi r18, r1, PT_R18 415 416 bri ex_handler_done; /* Complete exception handling */ 417#endif 418 419/* 0x01 - Unaligned data access exception 420 * This occurs when a word access is not aligned on a word boundary, 421 * or when a 16-bit access is not aligned on a 16-bit boundary. 422 * This handler perform the access, and returns, except for MMU when 423 * the unaligned address is last on a 4k page or the physical address is 424 * not found in the page table, in which case unaligned_data_trap is called. 425 */ 426handle_unaligned_ex: 427 /* Working registers already saved: R3, R4, R5, R6 428 * R4 = ESR 429 * R3 = EAR 430 */ 431#ifdef CONFIG_MMU 432 andi r6, r4, 0x1000 /* Check ESR[DS] */ 433 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 434 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 435 nop 436_no_delayslot: 437 /* jump to high level unaligned handler */ 438 RESTORE_STATE; 439 bri unaligned_data_trap 440#endif 441 andi r6, r4, 0x3E0; /* Mask and extract the register operand */ 442 srl r6, r6; /* r6 >> 5 */ 443 srl r6, r6; 444 srl r6, r6; 445 srl r6, r6; 446 srl r6, r6; 447 /* Store the register operand in a temporary location */ 448 sbi r6, r0, TOPHYS(ex_reg_op); 449 450 andi r6, r4, 0x400; /* Extract ESR[S] */ 451 bnei r6, ex_sw; 452ex_lw: 453 andi r6, r4, 0x800; /* Extract ESR[W] */ 454 beqi r6, ex_lhw; 455 lbui r5, r3, 0; /* Exception address in r3 */ 456 /* Load a word, byte-by-byte from destination address 457 and save it in tmp space */ 458 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 459 lbui r5, r3, 1; 460 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 461 lbui r5, r3, 2; 462 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 463 lbui r5, r3, 3; 464 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 465 /* Get the destination register value into r4 */ 466 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); 467 bri ex_lw_tail; 468ex_lhw: 469 lbui r5, r3, 0; /* Exception address in r3 */ 470 /* Load a half-word, byte-by-byte from destination 471 address and save it in tmp space */ 472 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 473 lbui r5, r3, 1; 474 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 475 /* Get the destination register value into r4 */ 476 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); 477ex_lw_tail: 478 /* Get the destination register number into r5 */ 479 lbui r5, r0, TOPHYS(ex_reg_op); 480 /* Form load_word jump table offset (lw_table + (8 * regnum)) */ 481 la r6, r0, TOPHYS(lw_table); 482 addk r5, r5, r5; 483 addk r5, r5, r5; 484 addk r5, r5, r5; 485 addk r5, r5, r6; 486 bra r5; 487ex_lw_end: /* Exception handling of load word, ends */ 488ex_sw: 489 /* Get the destination register number into r5 */ 490 lbui r5, r0, TOPHYS(ex_reg_op); 491 /* Form store_word jump table offset (sw_table + (8 * regnum)) */ 492 la r6, r0, TOPHYS(sw_table); 493 add r5, r5, r5; 494 add r5, r5, r5; 495 add r5, r5, r5; 496 add r5, r5, r6; 497 bra r5; 498ex_sw_tail: 499 mfs r6, resr; 500 nop 501 andi r6, r6, 0x800; /* Extract ESR[W] */ 502 beqi r6, ex_shw; 503 /* Get the word - delay slot */ 504 swi r4, r0, TOPHYS(ex_tmp_data_loc_0); 505 /* Store the word, byte-by-byte into destination address */ 506 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); 507 sbi r4, r3, 0; 508 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); 509 sbi r4, r3, 1; 510 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); 511 sbi r4, r3, 2; 512 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); 513 sbi r4, r3, 3; 514 bri ex_handler_done; 515 516ex_shw: 517 /* Store the lower half-word, byte-by-byte into destination address */ 518 swi r4, r0, TOPHYS(ex_tmp_data_loc_0); 519 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); 520 sbi r4, r3, 0; 521 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); 522 sbi r4, r3, 1; 523ex_sw_end: /* Exception handling of store word, ends. */ 524 525ex_handler_done: 526#ifndef CONFIG_MMU 527 lwi r5, r1, 0 /* RMSR */ 528 mts rmsr, r5 529 nop 530 lwi r3, r1, PT_R3 531 lwi r4, r1, PT_R4 532 lwi r5, r1, PT_R5 533 lwi r6, r1, PT_R6 534 lwi r17, r1, PT_R17 535 536 rted r17, 0 537 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ 538#else 539 RESTORE_STATE; 540 rted r17, 0 541 nop 542#endif 543 544#ifdef CONFIG_MMU 545 /* Exception vector entry code. This code runs with address translation 546 * turned off (i.e. using physical addresses). */ 547 548 /* Exception vectors. */ 549 550 /* 0x10 - Data Storage Exception 551 * This happens for just a few reasons. U0 set (but we don't do that), 552 * or zone protection fault (user violation, write to protected page). 553 * If this is just an update of modified status, we do that quickly 554 * and exit. Otherwise, we call heavyweight functions to do the work. 555 */ 556 handle_data_storage_exception: 557 /* Working registers already saved: R3, R4, R5, R6 558 * R3 = ESR 559 */ 560 mfs r11, rpid 561 nop 562 /* If we are faulting a kernel address, we have to use the 563 * kernel page tables. 564 */ 565 ori r5, r0, CONFIG_KERNEL_START 566 cmpu r5, r3, r5 567 bgti r5, ex3 568 /* First, check if it was a zone fault (which means a user 569 * tried to access a kernel or read-protected page - always 570 * a SEGV). All other faults here must be stores, so no 571 * need to check ESR_S as well. */ 572 andi r4, r4, 0x800 /* ESR_Z - zone protection */ 573 bnei r4, ex2 574 575 ori r4, r0, swapper_pg_dir 576 mts rpid, r0 /* TLB will have 0 TID */ 577 nop 578 bri ex4 579 580 /* Get the PGD for the current thread. */ 581 ex3: 582 /* First, check if it was a zone fault (which means a user 583 * tried to access a kernel or read-protected page - always 584 * a SEGV). All other faults here must be stores, so no 585 * need to check ESR_S as well. */ 586 andi r4, r4, 0x800 /* ESR_Z */ 587 bnei r4, ex2 588 /* get current task address */ 589 addi r4 ,CURRENT_TASK, TOPHYS(0); 590 lwi r4, r4, TASK_THREAD+PGDIR 591 ex4: 592 tophys(r4,r4) 593 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ 594 andi r5, r5, 0xffc 595/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 596 or r4, r4, r5 597 lwi r4, r4, 0 /* Get L1 entry */ 598 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ 599 beqi r5, ex2 /* Bail if no table */ 600 601 tophys(r5,r5) 602 BSRLI(r6,r3,10) /* Compute PTE address */ 603 andi r6, r6, 0xffc 604 andi r5, r5, 0xfffff003 605 or r5, r5, r6 606 lwi r4, r5, 0 /* Get Linux PTE */ 607 608 andi r6, r4, _PAGE_RW /* Is it writeable? */ 609 beqi r6, ex2 /* Bail if not */ 610 611 /* Update 'changed' */ 612 ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE 613 swi r4, r5, 0 /* Update Linux page table */ 614 615 /* Most of the Linux PTE is ready to load into the TLB LO. 616 * We set ZSEL, where only the LS-bit determines user access. 617 * We set execute, because we don't have the granularity to 618 * properly set this at the page level (Linux problem). 619 * If shared is set, we cause a zero PID->TID load. 620 * Many of these bits are software only. Bits we don't set 621 * here we (properly should) assume have the appropriate value. 622 */ 623 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 624 ori r4, r4, _PAGE_HWEXEC /* make it executable */ 625 626 /* find the TLB index that caused the fault. It has to be here*/ 627 mts rtlbsx, r3 628 nop 629 mfs r5, rtlbx /* DEBUG: TBD */ 630 nop 631 mts rtlblo, r4 /* Load TLB LO */ 632 nop 633 /* Will sync shadow TLBs */ 634 635 /* Done...restore registers and get out of here. */ 636 mts rpid, r11 637 nop 638 bri 4 639 640 RESTORE_STATE; 641 rted r17, 0 642 nop 643 ex2: 644 /* The bailout. Restore registers to pre-exception conditions 645 * and call the heavyweights to help us out. */ 646 mts rpid, r11 647 nop 648 bri 4 649 RESTORE_STATE; 650 bri page_fault_data_trap 651 652 653 /* 0x11 - Instruction Storage Exception 654 * This is caused by a fetch from non-execute or guarded pages. */ 655 handle_instruction_storage_exception: 656 /* Working registers already saved: R3, R4, R5, R6 657 * R3 = ESR 658 */ 659 660 RESTORE_STATE; 661 bri page_fault_instr_trap 662 663 /* 0x12 - Data TLB Miss Exception 664 * As the name implies, translation is not in the MMU, so search the 665 * page tables and fix it. The only purpose of this function is to 666 * load TLB entries from the page table if they exist. 667 */ 668 handle_data_tlb_miss_exception: 669 /* Working registers already saved: R3, R4, R5, R6 670 * R3 = EAR, R4 = ESR 671 */ 672 mfs r11, rpid 673 nop 674 675 /* If we are faulting a kernel address, we have to use the 676 * kernel page tables. */ 677 ori r6, r0, CONFIG_KERNEL_START 678 cmpu r4, r3, r6 679 bgti r4, ex5 680 ori r4, r0, swapper_pg_dir 681 mts rpid, r0 /* TLB will have 0 TID */ 682 nop 683 bri ex6 684 685 /* Get the PGD for the current thread. */ 686 ex5: 687 /* get current task address */ 688 addi r4 ,CURRENT_TASK, TOPHYS(0); 689 lwi r4, r4, TASK_THREAD+PGDIR 690 ex6: 691 tophys(r4,r4) 692 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ 693 andi r5, r5, 0xffc 694/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 695 or r4, r4, r5 696 lwi r4, r4, 0 /* Get L1 entry */ 697 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ 698 beqi r5, ex7 /* Bail if no table */ 699 700 tophys(r5,r5) 701 BSRLI(r6,r3,10) /* Compute PTE address */ 702 andi r6, r6, 0xffc 703 andi r5, r5, 0xfffff003 704 or r5, r5, r6 705 lwi r4, r5, 0 /* Get Linux PTE */ 706 707 andi r6, r4, _PAGE_PRESENT 708 beqi r6, ex7 709 710 ori r4, r4, _PAGE_ACCESSED 711 swi r4, r5, 0 712 713 /* Most of the Linux PTE is ready to load into the TLB LO. 714 * We set ZSEL, where only the LS-bit determines user access. 715 * We set execute, because we don't have the granularity to 716 * properly set this at the page level (Linux problem). 717 * If shared is set, we cause a zero PID->TID load. 718 * Many of these bits are software only. Bits we don't set 719 * here we (properly should) assume have the appropriate value. 720 */ 721 brid finish_tlb_load 722 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 723 ex7: 724 /* The bailout. Restore registers to pre-exception conditions 725 * and call the heavyweights to help us out. 726 */ 727 mts rpid, r11 728 nop 729 bri 4 730 RESTORE_STATE; 731 bri page_fault_data_trap 732 733 /* 0x13 - Instruction TLB Miss Exception 734 * Nearly the same as above, except we get our information from 735 * different registers and bailout to a different point. 736 */ 737 handle_instruction_tlb_miss_exception: 738 /* Working registers already saved: R3, R4, R5, R6 739 * R3 = ESR 740 */ 741 mfs r11, rpid 742 nop 743 744 /* If we are faulting a kernel address, we have to use the 745 * kernel page tables. 746 */ 747 ori r4, r0, CONFIG_KERNEL_START 748 cmpu r4, r3, r4 749 bgti r4, ex8 750 ori r4, r0, swapper_pg_dir 751 mts rpid, r0 /* TLB will have 0 TID */ 752 nop 753 bri ex9 754 755 /* Get the PGD for the current thread. */ 756 ex8: 757 /* get current task address */ 758 addi r4 ,CURRENT_TASK, TOPHYS(0); 759 lwi r4, r4, TASK_THREAD+PGDIR 760 ex9: 761 tophys(r4,r4) 762 BSRLI(r5,r3,20) /* Create L1 (pgdir/pmd) address */ 763 andi r5, r5, 0xffc 764/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 765 or r4, r4, r5 766 lwi r4, r4, 0 /* Get L1 entry */ 767 andi r5, r4, 0xfffff000 /* Extract L2 (pte) base address */ 768 beqi r5, ex10 /* Bail if no table */ 769 770 tophys(r5,r5) 771 BSRLI(r6,r3,10) /* Compute PTE address */ 772 andi r6, r6, 0xffc 773 andi r5, r5, 0xfffff003 774 or r5, r5, r6 775 lwi r4, r5, 0 /* Get Linux PTE */ 776 777 andi r6, r4, _PAGE_PRESENT 778 beqi r6, ex10 779 780 ori r4, r4, _PAGE_ACCESSED 781 swi r4, r5, 0 782 783 /* Most of the Linux PTE is ready to load into the TLB LO. 784 * We set ZSEL, where only the LS-bit determines user access. 785 * We set execute, because we don't have the granularity to 786 * properly set this at the page level (Linux problem). 787 * If shared is set, we cause a zero PID->TID load. 788 * Many of these bits are software only. Bits we don't set 789 * here we (properly should) assume have the appropriate value. 790 */ 791 brid finish_tlb_load 792 andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */ 793 ex10: 794 /* The bailout. Restore registers to pre-exception conditions 795 * and call the heavyweights to help us out. 796 */ 797 mts rpid, r11 798 nop 799 bri 4 800 RESTORE_STATE; 801 bri page_fault_instr_trap 802 803/* Both the instruction and data TLB miss get to this point to load the TLB. 804 * r3 - EA of fault 805 * r4 - TLB LO (info from Linux PTE) 806 * r5, r6 - available to use 807 * PID - loaded with proper value when we get here 808 * Upon exit, we reload everything and RFI. 809 * A common place to load the TLB. 810 */ 811 tlb_index: 812 .long 1 /* MS: storing last used tlb index */ 813 finish_tlb_load: 814 /* MS: load the last used TLB index. */ 815 lwi r5, r0, TOPHYS(tlb_index) 816 addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ 817 818/* MS: FIXME this is potential fault, because this is mask not count */ 819 andi r5, r5, (MICROBLAZE_TLB_SIZE-1) 820 ori r6, r0, 1 821 cmp r31, r5, r6 822 blti r31, ex12 823 addik r5, r6, 1 824 ex12: 825 /* MS: save back current TLB index */ 826 swi r5, r0, TOPHYS(tlb_index) 827 828 ori r4, r4, _PAGE_HWEXEC /* make it executable */ 829 mts rtlbx, r5 /* MS: save current TLB */ 830 nop 831 mts rtlblo, r4 /* MS: save to TLB LO */ 832 nop 833 834 /* Create EPN. This is the faulting address plus a static 835 * set of bits. These are size, valid, E, U0, and ensure 836 * bits 20 and 21 are zero. 837 */ 838 andi r3, r3, 0xfffff000 839 ori r3, r3, 0x0c0 840 mts rtlbhi, r3 /* Load TLB HI */ 841 nop 842 843 /* Done...restore registers and get out of here. */ 844 mts rpid, r11 845 nop 846 bri 4 847 RESTORE_STATE; 848 rted r17, 0 849 nop 850 851 /* extern void giveup_fpu(struct task_struct *prev) 852 * 853 * The MicroBlaze processor may have an FPU, so this should not just 854 * return: TBD. 855 */ 856 .globl giveup_fpu; 857 .align 4; 858 giveup_fpu: 859 bralid r15,0 /* TBD */ 860 nop 861 862 /* At present, this routine just hangs. - extern void abort(void) */ 863 .globl abort; 864 .align 4; 865 abort: 866 br r0 867 868 .globl set_context; 869 .align 4; 870 set_context: 871 mts rpid, r5 /* Shadow TLBs are automatically */ 872 nop 873 bri 4 /* flushed by changing PID */ 874 rtsd r15,8 875 nop 876 877#endif 878.end _hw_exception_handler 879 880#ifdef CONFIG_MMU 881/* Unaligned data access exception last on a 4k page for MMU. 882 * When this is called, we are in virtual mode with exceptions enabled 883 * and registers 1-13,15,17,18 saved. 884 * 885 * R3 = ESR 886 * R4 = EAR 887 * R7 = pointer to saved registers (struct pt_regs *regs) 888 * 889 * This handler perform the access, and returns via ret_from_exc. 890 */ 891.global _unaligned_data_exception 892.ent _unaligned_data_exception 893_unaligned_data_exception: 894 andi r8, r3, 0x3E0; /* Mask and extract the register operand */ 895 BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */ 896 andi r6, r3, 0x400; /* Extract ESR[S] */ 897 bneid r6, ex_sw_vm; 898 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ 899ex_lw_vm: 900 beqid r6, ex_lhw_vm; 901load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ 902/* Load a word, byte-by-byte from destination address and save it in tmp space*/ 903 la r6, r0, ex_tmp_data_loc_0; 904 sbi r5, r6, 0; 905load2: lbui r5, r4, 1; 906 sbi r5, r6, 1; 907load3: lbui r5, r4, 2; 908 sbi r5, r6, 2; 909load4: lbui r5, r4, 3; 910 sbi r5, r6, 3; 911 brid ex_lw_tail_vm; 912/* Get the destination register value into r3 - delay slot */ 913 lwi r3, r6, 0; 914ex_lhw_vm: 915 /* Load a half-word, byte-by-byte from destination address and 916 * save it in tmp space */ 917 la r6, r0, ex_tmp_data_loc_0; 918 sbi r5, r6, 0; 919load5: lbui r5, r4, 1; 920 sbi r5, r6, 1; 921 lhui r3, r6, 0; /* Get the destination register value into r3 */ 922ex_lw_tail_vm: 923 /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */ 924 addik r5, r8, lw_table_vm; 925 bra r5; 926ex_lw_end_vm: /* Exception handling of load word, ends */ 927 brai ret_from_exc; 928ex_sw_vm: 929/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */ 930 addik r5, r8, sw_table_vm; 931 bra r5; 932ex_sw_tail_vm: 933 la r5, r0, ex_tmp_data_loc_0; 934 beqid r6, ex_shw_vm; 935 swi r3, r5, 0; /* Get the word - delay slot */ 936 /* Store the word, byte-by-byte into destination address */ 937 lbui r3, r5, 0; 938store1: sbi r3, r4, 0; 939 lbui r3, r5, 1; 940store2: sbi r3, r4, 1; 941 lbui r3, r5, 2; 942store3: sbi r3, r4, 2; 943 lbui r3, r5, 3; 944 brid ret_from_exc; 945store4: sbi r3, r4, 3; /* Delay slot */ 946ex_shw_vm: 947 /* Store the lower half-word, byte-by-byte into destination address */ 948 lbui r3, r5, 2; 949store5: sbi r3, r4, 0; 950 lbui r3, r5, 3; 951 brid ret_from_exc; 952store6: sbi r3, r4, 1; /* Delay slot */ 953ex_sw_end_vm: /* Exception handling of store word, ends. */ 954 955/* We have to prevent cases that get/put_user macros get unaligned pointer 956 * to bad page area. We have to find out which origin instruction caused it 957 * and called fixup for that origin instruction not instruction in unaligned 958 * handler */ 959ex_unaligned_fixup: 960 ori r5, r7, 0 /* setup pointer to pt_regs */ 961 lwi r6, r7, PT_PC; /* faulting address is one instruction above */ 962 addik r6, r6, -4 /* for finding proper fixup */ 963 swi r6, r7, PT_PC; /* a save back it to PT_PC */ 964 addik r7, r0, SIGSEGV 965 /* call bad_page_fault for finding aligned fixup, fixup address is saved 966 * in PT_PC which is used as return address from exception */ 967 la r15, r0, ret_from_exc-8 /* setup return address */ 968 brid bad_page_fault 969 nop 970 971/* We prevent all load/store because it could failed any attempt to access */ 972.section __ex_table,"a"; 973 .word load1,ex_unaligned_fixup; 974 .word load2,ex_unaligned_fixup; 975 .word load3,ex_unaligned_fixup; 976 .word load4,ex_unaligned_fixup; 977 .word load5,ex_unaligned_fixup; 978 .word store1,ex_unaligned_fixup; 979 .word store2,ex_unaligned_fixup; 980 .word store3,ex_unaligned_fixup; 981 .word store4,ex_unaligned_fixup; 982 .word store5,ex_unaligned_fixup; 983 .word store6,ex_unaligned_fixup; 984.previous; 985.end _unaligned_data_exception 986#endif /* CONFIG_MMU */ 987 988.global ex_handler_unhandled 989ex_handler_unhandled: 990/* FIXME add handle function for unhandled exception - dump register */ 991 bri 0 992 993/* 994 * hw_exception_handler Jump Table 995 * - Contains code snippets for each register that caused the unalign exception 996 * - Hence exception handler is NOT self-modifying 997 * - Separate table for load exceptions and store exceptions. 998 * - Each table is of size: (8 * 32) = 256 bytes 999 */ 1000 1001.section .text 1002.align 4 1003lw_table: 1004lw_r0: R3_TO_LWREG (0); 1005lw_r1: LWREG_NOP; 1006lw_r2: R3_TO_LWREG (2); 1007lw_r3: R3_TO_LWREG_V (3); 1008lw_r4: R3_TO_LWREG_V (4); 1009lw_r5: R3_TO_LWREG_V (5); 1010lw_r6: R3_TO_LWREG_V (6); 1011lw_r7: R3_TO_LWREG (7); 1012lw_r8: R3_TO_LWREG (8); 1013lw_r9: R3_TO_LWREG (9); 1014lw_r10: R3_TO_LWREG (10); 1015lw_r11: R3_TO_LWREG (11); 1016lw_r12: R3_TO_LWREG (12); 1017lw_r13: R3_TO_LWREG (13); 1018lw_r14: R3_TO_LWREG (14); 1019lw_r15: R3_TO_LWREG (15); 1020lw_r16: R3_TO_LWREG (16); 1021lw_r17: LWREG_NOP; 1022lw_r18: R3_TO_LWREG (18); 1023lw_r19: R3_TO_LWREG (19); 1024lw_r20: R3_TO_LWREG (20); 1025lw_r21: R3_TO_LWREG (21); 1026lw_r22: R3_TO_LWREG (22); 1027lw_r23: R3_TO_LWREG (23); 1028lw_r24: R3_TO_LWREG (24); 1029lw_r25: R3_TO_LWREG (25); 1030lw_r26: R3_TO_LWREG (26); 1031lw_r27: R3_TO_LWREG (27); 1032lw_r28: R3_TO_LWREG (28); 1033lw_r29: R3_TO_LWREG (29); 1034lw_r30: R3_TO_LWREG (30); 1035#ifdef CONFIG_MMU 1036lw_r31: R3_TO_LWREG_V (31); 1037#else 1038lw_r31: R3_TO_LWREG (31); 1039#endif 1040 1041sw_table: 1042sw_r0: SWREG_TO_R3 (0); 1043sw_r1: SWREG_NOP; 1044sw_r2: SWREG_TO_R3 (2); 1045sw_r3: SWREG_TO_R3_V (3); 1046sw_r4: SWREG_TO_R3_V (4); 1047sw_r5: SWREG_TO_R3_V (5); 1048sw_r6: SWREG_TO_R3_V (6); 1049sw_r7: SWREG_TO_R3 (7); 1050sw_r8: SWREG_TO_R3 (8); 1051sw_r9: SWREG_TO_R3 (9); 1052sw_r10: SWREG_TO_R3 (10); 1053sw_r11: SWREG_TO_R3 (11); 1054sw_r12: SWREG_TO_R3 (12); 1055sw_r13: SWREG_TO_R3 (13); 1056sw_r14: SWREG_TO_R3 (14); 1057sw_r15: SWREG_TO_R3 (15); 1058sw_r16: SWREG_TO_R3 (16); 1059sw_r17: SWREG_NOP; 1060sw_r18: SWREG_TO_R3 (18); 1061sw_r19: SWREG_TO_R3 (19); 1062sw_r20: SWREG_TO_R3 (20); 1063sw_r21: SWREG_TO_R3 (21); 1064sw_r22: SWREG_TO_R3 (22); 1065sw_r23: SWREG_TO_R3 (23); 1066sw_r24: SWREG_TO_R3 (24); 1067sw_r25: SWREG_TO_R3 (25); 1068sw_r26: SWREG_TO_R3 (26); 1069sw_r27: SWREG_TO_R3 (27); 1070sw_r28: SWREG_TO_R3 (28); 1071sw_r29: SWREG_TO_R3 (29); 1072sw_r30: SWREG_TO_R3 (30); 1073#ifdef CONFIG_MMU 1074sw_r31: SWREG_TO_R3_V (31); 1075#else 1076sw_r31: SWREG_TO_R3 (31); 1077#endif 1078 1079#ifdef CONFIG_MMU 1080lw_table_vm: 1081lw_r0_vm: R3_TO_LWREG_VM (0); 1082lw_r1_vm: R3_TO_LWREG_VM_V (1); 1083lw_r2_vm: R3_TO_LWREG_VM_V (2); 1084lw_r3_vm: R3_TO_LWREG_VM_V (3); 1085lw_r4_vm: R3_TO_LWREG_VM_V (4); 1086lw_r5_vm: R3_TO_LWREG_VM_V (5); 1087lw_r6_vm: R3_TO_LWREG_VM_V (6); 1088lw_r7_vm: R3_TO_LWREG_VM_V (7); 1089lw_r8_vm: R3_TO_LWREG_VM_V (8); 1090lw_r9_vm: R3_TO_LWREG_VM_V (9); 1091lw_r10_vm: R3_TO_LWREG_VM_V (10); 1092lw_r11_vm: R3_TO_LWREG_VM_V (11); 1093lw_r12_vm: R3_TO_LWREG_VM_V (12); 1094lw_r13_vm: R3_TO_LWREG_VM_V (13); 1095lw_r14_vm: R3_TO_LWREG_VM (14); 1096lw_r15_vm: R3_TO_LWREG_VM_V (15); 1097lw_r16_vm: R3_TO_LWREG_VM (16); 1098lw_r17_vm: R3_TO_LWREG_VM_V (17); 1099lw_r18_vm: R3_TO_LWREG_VM_V (18); 1100lw_r19_vm: R3_TO_LWREG_VM (19); 1101lw_r20_vm: R3_TO_LWREG_VM (20); 1102lw_r21_vm: R3_TO_LWREG_VM (21); 1103lw_r22_vm: R3_TO_LWREG_VM (22); 1104lw_r23_vm: R3_TO_LWREG_VM (23); 1105lw_r24_vm: R3_TO_LWREG_VM (24); 1106lw_r25_vm: R3_TO_LWREG_VM (25); 1107lw_r26_vm: R3_TO_LWREG_VM (26); 1108lw_r27_vm: R3_TO_LWREG_VM (27); 1109lw_r28_vm: R3_TO_LWREG_VM (28); 1110lw_r29_vm: R3_TO_LWREG_VM (29); 1111lw_r30_vm: R3_TO_LWREG_VM (30); 1112lw_r31_vm: R3_TO_LWREG_VM_V (31); 1113 1114sw_table_vm: 1115sw_r0_vm: SWREG_TO_R3_VM (0); 1116sw_r1_vm: SWREG_TO_R3_VM_V (1); 1117sw_r2_vm: SWREG_TO_R3_VM_V (2); 1118sw_r3_vm: SWREG_TO_R3_VM_V (3); 1119sw_r4_vm: SWREG_TO_R3_VM_V (4); 1120sw_r5_vm: SWREG_TO_R3_VM_V (5); 1121sw_r6_vm: SWREG_TO_R3_VM_V (6); 1122sw_r7_vm: SWREG_TO_R3_VM_V (7); 1123sw_r8_vm: SWREG_TO_R3_VM_V (8); 1124sw_r9_vm: SWREG_TO_R3_VM_V (9); 1125sw_r10_vm: SWREG_TO_R3_VM_V (10); 1126sw_r11_vm: SWREG_TO_R3_VM_V (11); 1127sw_r12_vm: SWREG_TO_R3_VM_V (12); 1128sw_r13_vm: SWREG_TO_R3_VM_V (13); 1129sw_r14_vm: SWREG_TO_R3_VM (14); 1130sw_r15_vm: SWREG_TO_R3_VM_V (15); 1131sw_r16_vm: SWREG_TO_R3_VM (16); 1132sw_r17_vm: SWREG_TO_R3_VM_V (17); 1133sw_r18_vm: SWREG_TO_R3_VM_V (18); 1134sw_r19_vm: SWREG_TO_R3_VM (19); 1135sw_r20_vm: SWREG_TO_R3_VM (20); 1136sw_r21_vm: SWREG_TO_R3_VM (21); 1137sw_r22_vm: SWREG_TO_R3_VM (22); 1138sw_r23_vm: SWREG_TO_R3_VM (23); 1139sw_r24_vm: SWREG_TO_R3_VM (24); 1140sw_r25_vm: SWREG_TO_R3_VM (25); 1141sw_r26_vm: SWREG_TO_R3_VM (26); 1142sw_r27_vm: SWREG_TO_R3_VM (27); 1143sw_r28_vm: SWREG_TO_R3_VM (28); 1144sw_r29_vm: SWREG_TO_R3_VM (29); 1145sw_r30_vm: SWREG_TO_R3_VM (30); 1146sw_r31_vm: SWREG_TO_R3_VM_V (31); 1147#endif /* CONFIG_MMU */ 1148 1149/* Temporary data structures used in the handler */ 1150.section .data 1151.align 4 1152ex_tmp_data_loc_0: 1153 .byte 0 1154ex_tmp_data_loc_1: 1155 .byte 0 1156ex_tmp_data_loc_2: 1157 .byte 0 1158ex_tmp_data_loc_3: 1159 .byte 0 1160ex_reg_op: 1161 .byte 0 1162