1/* 2 * Exception handling for Microblaze 3 * 4 * Rewriten interrupt handling 5 * 6 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu> 7 * Copyright (C) 2008-2009 PetaLogix 8 * 9 * uClinux customisation (C) 2005 John Williams 10 * 11 * MMU code derived from arch/ppc/kernel/head_4xx.S: 12 * Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org> 13 * Initial PowerPC version. 14 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 15 * Rewritten for PReP 16 * Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au> 17 * Low-level exception handers, MMU support, and rewrite. 18 * Copyright (C) 1997 Dan Malek <dmalek@jlc.net> 19 * PowerPC 8xx modifications. 20 * Copyright (C) 1998-1999 TiVo, Inc. 21 * PowerPC 403GCX modifications. 22 * Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu> 23 * PowerPC 403GCX/405GP modifications. 24 * Copyright 2000 MontaVista Software Inc. 25 * PPC405 modifications 26 * PowerPC 403GCX/405GP modifications. 27 * Author: MontaVista Software, Inc. 28 * frank_rowand@mvista.com or source@mvista.com 29 * debbie_chu@mvista.com 30 * 31 * Original code 32 * Copyright (C) 2004 Xilinx, Inc. 33 * 34 * This program is free software; you can redistribute it and/or modify it 35 * under the terms of the GNU General Public License version 2 as published 36 * by the Free Software Foundation. 37 */ 38 39/* 40 * Here are the handlers which don't require enabling translation 41 * and calling other kernel code thus we can keep their design very simple 42 * and do all processing in real mode. All what they need is a valid current 43 * (that is an issue for the CONFIG_REGISTER_TASK_PTR case) 44 * This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore 45 * these registers are saved/restored 46 * The handlers which require translation are in entry.S --KAA 47 * 48 * Microblaze HW Exception Handler 49 * - Non self-modifying exception handler for the following exception conditions 50 * - Unalignment 51 * - Instruction bus error 52 * - Data bus error 53 * - Illegal instruction opcode 54 * - Divide-by-zero 55 * 56 * - Privileged instruction exception (MMU) 57 * - Data storage exception (MMU) 58 * - Instruction storage exception (MMU) 59 * - Data TLB miss exception (MMU) 60 * - Instruction TLB miss exception (MMU) 61 * 62 * Note we disable interrupts during exception handling, otherwise we will 63 * possibly get multiple re-entrancy if interrupt handles themselves cause 64 * exceptions. JW 65 */ 66 67#include <asm/exceptions.h> 68#include <asm/unistd.h> 69#include <asm/page.h> 70 71#include <asm/entry.h> 72#include <asm/current.h> 73#include <linux/linkage.h> 74 75#include <asm/mmu.h> 76#include <asm/pgtable.h> 77#include <asm/signal.h> 78#include <asm/registers.h> 79#include <asm/asm-offsets.h> 80 81#undef DEBUG 82 83/* Helpful Macros */ 84#define NUM_TO_REG(num) r ## num 85 86#ifdef CONFIG_MMU 87 #define RESTORE_STATE \ 88 lwi r5, r1, 0; \ 89 mts rmsr, r5; \ 90 nop; \ 91 lwi r3, r1, PT_R3; \ 92 lwi r4, r1, PT_R4; \ 93 lwi r5, r1, PT_R5; \ 94 lwi r6, r1, PT_R6; \ 95 lwi r11, r1, PT_R11; \ 96 lwi r31, r1, PT_R31; \ 97 lwi r1, r1, PT_R1; 98#endif /* CONFIG_MMU */ 99 100#define LWREG_NOP \ 101 bri ex_handler_unhandled; \ 102 nop; 103 104#define SWREG_NOP \ 105 bri ex_handler_unhandled; \ 106 nop; 107 108/* FIXME this is weird - for noMMU kernel is not possible to use brid 109 * instruction which can shorten executed time 110 */ 111 112/* r3 is the source */ 113#define R3_TO_LWREG_V(regnum) \ 114 swi r3, r1, 4 * regnum; \ 115 bri ex_handler_done; 116 117/* r3 is the source */ 118#define R3_TO_LWREG(regnum) \ 119 or NUM_TO_REG (regnum), r0, r3; \ 120 bri ex_handler_done; 121 122/* r3 is the target */ 123#define SWREG_TO_R3_V(regnum) \ 124 lwi r3, r1, 4 * regnum; \ 125 bri ex_sw_tail; 126 127/* r3 is the target */ 128#define SWREG_TO_R3(regnum) \ 129 or r3, r0, NUM_TO_REG (regnum); \ 130 bri ex_sw_tail; 131 132#ifdef CONFIG_MMU 133 #define R3_TO_LWREG_VM_V(regnum) \ 134 brid ex_lw_end_vm; \ 135 swi r3, r7, 4 * regnum; 136 137 #define R3_TO_LWREG_VM(regnum) \ 138 brid ex_lw_end_vm; \ 139 or NUM_TO_REG (regnum), r0, r3; 140 141 #define SWREG_TO_R3_VM_V(regnum) \ 142 brid ex_sw_tail_vm; \ 143 lwi r3, r7, 4 * regnum; 144 145 #define SWREG_TO_R3_VM(regnum) \ 146 brid ex_sw_tail_vm; \ 147 or r3, r0, NUM_TO_REG (regnum); 148 149 /* Shift right instruction depending on available configuration */ 150 #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0 151 #define BSRLI(rD, rA, imm) \ 152 bsrli rD, rA, imm 153 #else 154 #define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA) 155 /* Only the used shift constants defined here - add more if needed */ 156 #define BSRLI2(rD, rA) \ 157 srl rD, rA; /* << 1 */ \ 158 srl rD, rD; /* << 2 */ 159 #define BSRLI10(rD, rA) \ 160 srl rD, rA; /* << 1 */ \ 161 srl rD, rD; /* << 2 */ \ 162 srl rD, rD; /* << 3 */ \ 163 srl rD, rD; /* << 4 */ \ 164 srl rD, rD; /* << 5 */ \ 165 srl rD, rD; /* << 6 */ \ 166 srl rD, rD; /* << 7 */ \ 167 srl rD, rD; /* << 8 */ \ 168 srl rD, rD; /* << 9 */ \ 169 srl rD, rD /* << 10 */ 170 #define BSRLI20(rD, rA) \ 171 BSRLI10(rD, rA); \ 172 BSRLI10(rD, rD) 173 #endif 174#endif /* CONFIG_MMU */ 175 176.extern other_exception_handler /* Defined in exception.c */ 177 178/* 179 * hw_exception_handler - Handler for exceptions 180 * 181 * Exception handler notes: 182 * - Handles all exceptions 183 * - Does not handle unaligned exceptions during load into r17, r1, r0. 184 * - Does not handle unaligned exceptions during store from r17 (cannot be 185 * done) and r1 (slows down common case) 186 * 187 * Relevant register structures 188 * 189 * EAR - |----|----|----|----|----|----|----|----| 190 * - < ## 32 bit faulting address ## > 191 * 192 * ESR - |----|----|----|----|----| - | - |-----|-----| 193 * - W S REG EXC 194 * 195 * 196 * STACK FRAME STRUCTURE (for NO_MMU) 197 * --------------------------------- 198 * 199 * +-------------+ + 0 200 * | MSR | 201 * +-------------+ + 4 202 * | r1 | 203 * | . | 204 * | . | 205 * | . | 206 * | . | 207 * | r18 | 208 * +-------------+ + 76 209 * | . | 210 * | . | 211 * 212 * MMU kernel uses the same 'pt_pool_space' pointed space 213 * which is used for storing register values - noMMu style was, that values were 214 * stored in stack but in case of failure you lost information about register. 215 * Currently you can see register value in memory in specific place. 216 * In compare to with previous solution the speed should be the same. 217 * 218 * MMU exception handler has different handling compare to no MMU kernel. 219 * Exception handler use jump table for directing of what happen. For MMU kernel 220 * is this approach better because MMU relate exception are handled by asm code 221 * in this file. In compare to with MMU expect of unaligned exception 222 * is everything handled by C code. 223 */ 224 225/* 226 * every of these handlers is entered having R3/4/5/6/11/current saved on stack 227 * and clobbered so care should be taken to restore them if someone is going to 228 * return from exception 229 */ 230 231/* wrappers to restore state before coming to entry.S */ 232#ifdef CONFIG_MMU 233.section .data 234.align 4 235pt_pool_space: 236 .space PT_SIZE 237 238#ifdef DEBUG 239/* Create space for exception counting. */ 240.section .data 241.global exception_debug_table 242.align 4 243exception_debug_table: 244 /* Look at exception vector table. There is 32 exceptions * word size */ 245 .space (32 * 4) 246#endif /* DEBUG */ 247 248.section .rodata 249.align 4 250_MB_HW_ExceptionVectorTable: 251/* 0 - Undefined */ 252 .long TOPHYS(ex_handler_unhandled) 253/* 1 - Unaligned data access exception */ 254 .long TOPHYS(handle_unaligned_ex) 255/* 2 - Illegal op-code exception */ 256 .long TOPHYS(full_exception_trapw) 257/* 3 - Instruction bus error exception */ 258 .long TOPHYS(full_exception_trapw) 259/* 4 - Data bus error exception */ 260 .long TOPHYS(full_exception_trapw) 261/* 5 - Divide by zero exception */ 262 .long TOPHYS(full_exception_trapw) 263/* 6 - Floating point unit exception */ 264 .long TOPHYS(full_exception_trapw) 265/* 7 - Privileged instruction exception */ 266 .long TOPHYS(full_exception_trapw) 267/* 8 - 15 - Undefined */ 268 .long TOPHYS(ex_handler_unhandled) 269 .long TOPHYS(ex_handler_unhandled) 270 .long TOPHYS(ex_handler_unhandled) 271 .long TOPHYS(ex_handler_unhandled) 272 .long TOPHYS(ex_handler_unhandled) 273 .long TOPHYS(ex_handler_unhandled) 274 .long TOPHYS(ex_handler_unhandled) 275 .long TOPHYS(ex_handler_unhandled) 276/* 16 - Data storage exception */ 277 .long TOPHYS(handle_data_storage_exception) 278/* 17 - Instruction storage exception */ 279 .long TOPHYS(handle_instruction_storage_exception) 280/* 18 - Data TLB miss exception */ 281 .long TOPHYS(handle_data_tlb_miss_exception) 282/* 19 - Instruction TLB miss exception */ 283 .long TOPHYS(handle_instruction_tlb_miss_exception) 284/* 20 - 31 - Undefined */ 285 .long TOPHYS(ex_handler_unhandled) 286 .long TOPHYS(ex_handler_unhandled) 287 .long TOPHYS(ex_handler_unhandled) 288 .long TOPHYS(ex_handler_unhandled) 289 .long TOPHYS(ex_handler_unhandled) 290 .long TOPHYS(ex_handler_unhandled) 291 .long TOPHYS(ex_handler_unhandled) 292 .long TOPHYS(ex_handler_unhandled) 293 .long TOPHYS(ex_handler_unhandled) 294 .long TOPHYS(ex_handler_unhandled) 295 .long TOPHYS(ex_handler_unhandled) 296 .long TOPHYS(ex_handler_unhandled) 297#endif 298 299.global _hw_exception_handler 300.section .text 301.align 4 302.ent _hw_exception_handler 303_hw_exception_handler: 304#ifndef CONFIG_MMU 305 addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */ 306#else 307 swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */ 308 /* Save date to kernel memory. Here is the problem 309 * when you came from user space */ 310 ori r1, r0, TOPHYS(pt_pool_space); 311#endif 312 swi r3, r1, PT_R3 313 swi r4, r1, PT_R4 314 swi r5, r1, PT_R5 315 swi r6, r1, PT_R6 316 317#ifdef CONFIG_MMU 318 swi r11, r1, PT_R11 319 swi r31, r1, PT_R31 320 lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */ 321#endif 322 323 mfs r5, rmsr; 324 nop 325 swi r5, r1, 0; 326 mfs r4, resr 327 nop 328 mfs r3, rear; 329 nop 330 331#ifndef CONFIG_MMU 332 andi r5, r4, 0x1000; /* Check ESR[DS] */ 333 beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */ 334 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 335 nop 336not_in_delay_slot: 337 swi r17, r1, PT_R17 338#endif 339 340 andi r5, r4, 0x1F; /* Extract ESR[EXC] */ 341 342#ifdef CONFIG_MMU 343 /* Calculate exception vector offset = r5 << 2 */ 344 addk r6, r5, r5; /* << 1 */ 345 addk r6, r6, r6; /* << 2 */ 346 347#ifdef DEBUG 348/* counting which exception happen */ 349 lwi r5, r0, TOPHYS(exception_debug_table) 350 addi r5, r5, 1 351 swi r5, r0, TOPHYS(exception_debug_table) 352 lwi r5, r6, TOPHYS(exception_debug_table) 353 addi r5, r5, 1 354 swi r5, r6, TOPHYS(exception_debug_table) 355#endif 356/* end */ 357 /* Load the HW Exception vector */ 358 lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable) 359 bra r6 360 361full_exception_trapw: 362 RESTORE_STATE 363 bri full_exception_trap 364#else 365 /* Exceptions enabled here. This will allow nested exceptions */ 366 mfs r6, rmsr; 367 nop 368 swi r6, r1, 0; /* RMSR_OFFSET */ 369 ori r6, r6, 0x100; /* Turn ON the EE bit */ 370 andi r6, r6, ~2; /* Disable interrupts */ 371 mts rmsr, r6; 372 nop 373 374 xori r6, r5, 1; /* 00001 = Unaligned Exception */ 375 /* Jump to unalignment exception handler */ 376 beqi r6, handle_unaligned_ex; 377 378handle_other_ex: /* Handle Other exceptions here */ 379 /* Save other volatiles before we make procedure calls below */ 380 swi r7, r1, PT_R7 381 swi r8, r1, PT_R8 382 swi r9, r1, PT_R9 383 swi r10, r1, PT_R10 384 swi r11, r1, PT_R11 385 swi r12, r1, PT_R12 386 swi r14, r1, PT_R14 387 swi r15, r1, PT_R15 388 swi r18, r1, PT_R18 389 390 or r5, r1, r0 391 andi r6, r4, 0x1F; /* Load ESR[EC] */ 392 lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */ 393 swi r7, r1, PT_MODE 394 mfs r7, rfsr 395 nop 396 addk r8, r17, r0; /* Load exception address */ 397 bralid r15, full_exception; /* Branch to the handler */ 398 nop; 399 mts rfsr, r0; /* Clear sticky fsr */ 400 nop 401 402 /* 403 * Trigger execution of the signal handler by enabling 404 * interrupts and calling an invalid syscall. 405 */ 406 mfs r5, rmsr; 407 nop 408 ori r5, r5, 2; 409 mts rmsr, r5; /* enable interrupt */ 410 nop 411 addi r12, r0, __NR_syscalls; 412 brki r14, 0x08; 413 mfs r5, rmsr; /* disable interrupt */ 414 nop 415 andi r5, r5, ~2; 416 mts rmsr, r5; 417 nop 418 419 lwi r7, r1, PT_R7 420 lwi r8, r1, PT_R8 421 lwi r9, r1, PT_R9 422 lwi r10, r1, PT_R10 423 lwi r11, r1, PT_R11 424 lwi r12, r1, PT_R12 425 lwi r14, r1, PT_R14 426 lwi r15, r1, PT_R15 427 lwi r18, r1, PT_R18 428 429 bri ex_handler_done; /* Complete exception handling */ 430#endif 431 432/* 0x01 - Unaligned data access exception 433 * This occurs when a word access is not aligned on a word boundary, 434 * or when a 16-bit access is not aligned on a 16-bit boundary. 435 * This handler perform the access, and returns, except for MMU when 436 * the unaligned address is last on a 4k page or the physical address is 437 * not found in the page table, in which case unaligned_data_trap is called. 438 */ 439handle_unaligned_ex: 440 /* Working registers already saved: R3, R4, R5, R6 441 * R4 = ESR 442 * R3 = EAR 443 */ 444#ifdef CONFIG_MMU 445 andi r6, r4, 0x1000 /* Check ESR[DS] */ 446 beqi r6, _no_delayslot /* Branch if ESR[DS] not set */ 447 mfs r17, rbtr; /* ESR[DS] set - return address in BTR */ 448 nop 449_no_delayslot: 450 /* jump to high level unaligned handler */ 451 RESTORE_STATE; 452 bri unaligned_data_trap 453#endif 454 andi r6, r4, 0x3E0; /* Mask and extract the register operand */ 455 srl r6, r6; /* r6 >> 5 */ 456 srl r6, r6; 457 srl r6, r6; 458 srl r6, r6; 459 srl r6, r6; 460 /* Store the register operand in a temporary location */ 461 sbi r6, r0, TOPHYS(ex_reg_op); 462 463 andi r6, r4, 0x400; /* Extract ESR[S] */ 464 bnei r6, ex_sw; 465ex_lw: 466 andi r6, r4, 0x800; /* Extract ESR[W] */ 467 beqi r6, ex_lhw; 468 lbui r5, r3, 0; /* Exception address in r3 */ 469 /* Load a word, byte-by-byte from destination address 470 and save it in tmp space */ 471 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 472 lbui r5, r3, 1; 473 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 474 lbui r5, r3, 2; 475 sbi r5, r0, TOPHYS(ex_tmp_data_loc_2); 476 lbui r5, r3, 3; 477 sbi r5, r0, TOPHYS(ex_tmp_data_loc_3); 478 /* Get the destination register value into r4 */ 479 lwi r4, r0, TOPHYS(ex_tmp_data_loc_0); 480 bri ex_lw_tail; 481ex_lhw: 482 lbui r5, r3, 0; /* Exception address in r3 */ 483 /* Load a half-word, byte-by-byte from destination 484 address and save it in tmp space */ 485 sbi r5, r0, TOPHYS(ex_tmp_data_loc_0); 486 lbui r5, r3, 1; 487 sbi r5, r0, TOPHYS(ex_tmp_data_loc_1); 488 /* Get the destination register value into r4 */ 489 lhui r4, r0, TOPHYS(ex_tmp_data_loc_0); 490ex_lw_tail: 491 /* Get the destination register number into r5 */ 492 lbui r5, r0, TOPHYS(ex_reg_op); 493 /* Form load_word jump table offset (lw_table + (8 * regnum)) */ 494 addik r6, r0, TOPHYS(lw_table); 495 addk r5, r5, r5; 496 addk r5, r5, r5; 497 addk r5, r5, r5; 498 addk r5, r5, r6; 499 bra r5; 500ex_lw_end: /* Exception handling of load word, ends */ 501ex_sw: 502 /* Get the destination register number into r5 */ 503 lbui r5, r0, TOPHYS(ex_reg_op); 504 /* Form store_word jump table offset (sw_table + (8 * regnum)) */ 505 addik r6, r0, TOPHYS(sw_table); 506 add r5, r5, r5; 507 add r5, r5, r5; 508 add r5, r5, r5; 509 add r5, r5, r6; 510 bra r5; 511ex_sw_tail: 512 mfs r6, resr; 513 nop 514 andi r6, r6, 0x800; /* Extract ESR[W] */ 515 beqi r6, ex_shw; 516 /* Get the word - delay slot */ 517 swi r4, r0, TOPHYS(ex_tmp_data_loc_0); 518 /* Store the word, byte-by-byte into destination address */ 519 lbui r4, r0, TOPHYS(ex_tmp_data_loc_0); 520 sbi r4, r3, 0; 521 lbui r4, r0, TOPHYS(ex_tmp_data_loc_1); 522 sbi r4, r3, 1; 523 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); 524 sbi r4, r3, 2; 525 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); 526 sbi r4, r3, 3; 527 bri ex_handler_done; 528 529ex_shw: 530 /* Store the lower half-word, byte-by-byte into destination address */ 531 swi r4, r0, TOPHYS(ex_tmp_data_loc_0); 532 lbui r4, r0, TOPHYS(ex_tmp_data_loc_2); 533 sbi r4, r3, 0; 534 lbui r4, r0, TOPHYS(ex_tmp_data_loc_3); 535 sbi r4, r3, 1; 536ex_sw_end: /* Exception handling of store word, ends. */ 537 538ex_handler_done: 539#ifndef CONFIG_MMU 540 lwi r5, r1, 0 /* RMSR */ 541 mts rmsr, r5 542 nop 543 lwi r3, r1, PT_R3 544 lwi r4, r1, PT_R4 545 lwi r5, r1, PT_R5 546 lwi r6, r1, PT_R6 547 lwi r17, r1, PT_R17 548 549 rted r17, 0 550 addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */ 551#else 552 RESTORE_STATE; 553 rted r17, 0 554 nop 555#endif 556 557#ifdef CONFIG_MMU 558 /* Exception vector entry code. This code runs with address translation 559 * turned off (i.e. using physical addresses). */ 560 561 /* Exception vectors. */ 562 563 /* 0x10 - Data Storage Exception 564 * This happens for just a few reasons. U0 set (but we don't do that), 565 * or zone protection fault (user violation, write to protected page). 566 * If this is just an update of modified status, we do that quickly 567 * and exit. Otherwise, we call heavyweight functions to do the work. 568 */ 569 handle_data_storage_exception: 570 /* Working registers already saved: R3, R4, R5, R6 571 * R3 = ESR 572 */ 573 mfs r11, rpid 574 nop 575 /* If we are faulting a kernel address, we have to use the 576 * kernel page tables. 577 */ 578 ori r5, r0, CONFIG_KERNEL_START 579 cmpu r5, r3, r5 580 bgti r5, ex3 581 /* First, check if it was a zone fault (which means a user 582 * tried to access a kernel or read-protected page - always 583 * a SEGV). All other faults here must be stores, so no 584 * need to check ESR_S as well. */ 585 andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */ 586 bnei r4, ex2 587 588 ori r4, r0, swapper_pg_dir 589 mts rpid, r0 /* TLB will have 0 TID */ 590 nop 591 bri ex4 592 593 /* Get the PGD for the current thread. */ 594 ex3: 595 /* First, check if it was a zone fault (which means a user 596 * tried to access a kernel or read-protected page - always 597 * a SEGV). All other faults here must be stores, so no 598 * need to check ESR_S as well. */ 599 andi r4, r4, ESR_DIZ /* ESR_Z */ 600 bnei r4, ex2 601 /* get current task address */ 602 addi r4 ,CURRENT_TASK, TOPHYS(0); 603 lwi r4, r4, TASK_THREAD+PGDIR 604 ex4: 605 tophys(r4,r4) 606 /* Create L1 (pgdir/pmd) address */ 607 BSRLI(r5,r3, PGDIR_SHIFT - 2) 608 andi r5, r5, PAGE_SIZE - 4 609/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 610 or r4, r4, r5 611 lwi r4, r4, 0 /* Get L1 entry */ 612 andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ 613 beqi r5, ex2 /* Bail if no table */ 614 615 tophys(r5,r5) 616 BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */ 617 andi r6, r6, PAGE_SIZE - 4 618 or r5, r5, r6 619 lwi r4, r5, 0 /* Get Linux PTE */ 620 621 andi r6, r4, _PAGE_RW /* Is it writeable? */ 622 beqi r6, ex2 /* Bail if not */ 623 624 /* Update 'changed' */ 625 ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE 626 swi r4, r5, 0 /* Update Linux page table */ 627 628 /* Most of the Linux PTE is ready to load into the TLB LO. 629 * We set ZSEL, where only the LS-bit determines user access. 630 * We set execute, because we don't have the granularity to 631 * properly set this at the page level (Linux problem). 632 * If shared is set, we cause a zero PID->TID load. 633 * Many of these bits are software only. Bits we don't set 634 * here we (properly should) assume have the appropriate value. 635 */ 636/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */ 637 andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ 638 TLB_ZSEL(1) | TLB_ATTR_MASK 639 ori r4, r4, _PAGE_HWEXEC /* make it executable */ 640 641 /* find the TLB index that caused the fault. It has to be here*/ 642 mts rtlbsx, r3 643 nop 644 mfs r5, rtlbx /* DEBUG: TBD */ 645 nop 646 mts rtlblo, r4 /* Load TLB LO */ 647 nop 648 /* Will sync shadow TLBs */ 649 650 /* Done...restore registers and get out of here. */ 651 mts rpid, r11 652 nop 653 bri 4 654 655 RESTORE_STATE; 656 rted r17, 0 657 nop 658 ex2: 659 /* The bailout. Restore registers to pre-exception conditions 660 * and call the heavyweights to help us out. */ 661 mts rpid, r11 662 nop 663 bri 4 664 RESTORE_STATE; 665 bri page_fault_data_trap 666 667 668 /* 0x11 - Instruction Storage Exception 669 * This is caused by a fetch from non-execute or guarded pages. */ 670 handle_instruction_storage_exception: 671 /* Working registers already saved: R3, R4, R5, R6 672 * R3 = ESR 673 */ 674 675 RESTORE_STATE; 676 bri page_fault_instr_trap 677 678 /* 0x12 - Data TLB Miss Exception 679 * As the name implies, translation is not in the MMU, so search the 680 * page tables and fix it. The only purpose of this function is to 681 * load TLB entries from the page table if they exist. 682 */ 683 handle_data_tlb_miss_exception: 684 /* Working registers already saved: R3, R4, R5, R6 685 * R3 = EAR, R4 = ESR 686 */ 687 mfs r11, rpid 688 nop 689 690 /* If we are faulting a kernel address, we have to use the 691 * kernel page tables. */ 692 ori r6, r0, CONFIG_KERNEL_START 693 cmpu r4, r3, r6 694 bgti r4, ex5 695 ori r4, r0, swapper_pg_dir 696 mts rpid, r0 /* TLB will have 0 TID */ 697 nop 698 bri ex6 699 700 /* Get the PGD for the current thread. */ 701 ex5: 702 /* get current task address */ 703 addi r4 ,CURRENT_TASK, TOPHYS(0); 704 lwi r4, r4, TASK_THREAD+PGDIR 705 ex6: 706 tophys(r4,r4) 707 /* Create L1 (pgdir/pmd) address */ 708 BSRLI(r5,r3, PGDIR_SHIFT - 2) 709 andi r5, r5, PAGE_SIZE - 4 710/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 711 or r4, r4, r5 712 lwi r4, r4, 0 /* Get L1 entry */ 713 andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ 714 beqi r5, ex7 /* Bail if no table */ 715 716 tophys(r5,r5) 717 BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */ 718 andi r6, r6, PAGE_SIZE - 4 719 or r5, r5, r6 720 lwi r4, r5, 0 /* Get Linux PTE */ 721 722 andi r6, r4, _PAGE_PRESENT 723 beqi r6, ex7 724 725 ori r4, r4, _PAGE_ACCESSED 726 swi r4, r5, 0 727 728 /* Most of the Linux PTE is ready to load into the TLB LO. 729 * We set ZSEL, where only the LS-bit determines user access. 730 * We set execute, because we don't have the granularity to 731 * properly set this at the page level (Linux problem). 732 * If shared is set, we cause a zero PID->TID load. 733 * Many of these bits are software only. Bits we don't set 734 * here we (properly should) assume have the appropriate value. 735 */ 736 brid finish_tlb_load 737 andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ 738 TLB_ZSEL(1) | TLB_ATTR_MASK 739 ex7: 740 /* The bailout. Restore registers to pre-exception conditions 741 * and call the heavyweights to help us out. 742 */ 743 mts rpid, r11 744 nop 745 bri 4 746 RESTORE_STATE; 747 bri page_fault_data_trap 748 749 /* 0x13 - Instruction TLB Miss Exception 750 * Nearly the same as above, except we get our information from 751 * different registers and bailout to a different point. 752 */ 753 handle_instruction_tlb_miss_exception: 754 /* Working registers already saved: R3, R4, R5, R6 755 * R3 = ESR 756 */ 757 mfs r11, rpid 758 nop 759 760 /* If we are faulting a kernel address, we have to use the 761 * kernel page tables. 762 */ 763 ori r4, r0, CONFIG_KERNEL_START 764 cmpu r4, r3, r4 765 bgti r4, ex8 766 ori r4, r0, swapper_pg_dir 767 mts rpid, r0 /* TLB will have 0 TID */ 768 nop 769 bri ex9 770 771 /* Get the PGD for the current thread. */ 772 ex8: 773 /* get current task address */ 774 addi r4 ,CURRENT_TASK, TOPHYS(0); 775 lwi r4, r4, TASK_THREAD+PGDIR 776 ex9: 777 tophys(r4,r4) 778 /* Create L1 (pgdir/pmd) address */ 779 BSRLI(r5,r3, PGDIR_SHIFT - 2) 780 andi r5, r5, PAGE_SIZE - 4 781/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */ 782 or r4, r4, r5 783 lwi r4, r4, 0 /* Get L1 entry */ 784 andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */ 785 beqi r5, ex10 /* Bail if no table */ 786 787 tophys(r5,r5) 788 BSRLI(r6,r3,PTE_SHIFT) /* Compute PTE address */ 789 andi r6, r6, PAGE_SIZE - 4 790 or r5, r5, r6 791 lwi r4, r5, 0 /* Get Linux PTE */ 792 793 andi r6, r4, _PAGE_PRESENT 794 beqi r6, ex10 795 796 ori r4, r4, _PAGE_ACCESSED 797 swi r4, r5, 0 798 799 /* Most of the Linux PTE is ready to load into the TLB LO. 800 * We set ZSEL, where only the LS-bit determines user access. 801 * We set execute, because we don't have the granularity to 802 * properly set this at the page level (Linux problem). 803 * If shared is set, we cause a zero PID->TID load. 804 * Many of these bits are software only. Bits we don't set 805 * here we (properly should) assume have the appropriate value. 806 */ 807 brid finish_tlb_load 808 andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \ 809 TLB_ZSEL(1) | TLB_ATTR_MASK 810 ex10: 811 /* The bailout. Restore registers to pre-exception conditions 812 * and call the heavyweights to help us out. 813 */ 814 mts rpid, r11 815 nop 816 bri 4 817 RESTORE_STATE; 818 bri page_fault_instr_trap 819 820/* Both the instruction and data TLB miss get to this point to load the TLB. 821 * r3 - EA of fault 822 * r4 - TLB LO (info from Linux PTE) 823 * r5, r6 - available to use 824 * PID - loaded with proper value when we get here 825 * Upon exit, we reload everything and RFI. 826 * A common place to load the TLB. 827 */ 828.section .data 829.align 4 830.global tlb_skip 831 tlb_skip: 832 .long MICROBLAZE_TLB_SKIP 833 tlb_index: 834 /* MS: storing last used tlb index */ 835 .long MICROBLAZE_TLB_SIZE/2 836.previous 837 finish_tlb_load: 838 /* MS: load the last used TLB index. */ 839 lwi r5, r0, TOPHYS(tlb_index) 840 addik r5, r5, 1 /* MS: inc tlb_index -> use next one */ 841 842/* MS: FIXME this is potential fault, because this is mask not count */ 843 andi r5, r5, MICROBLAZE_TLB_SIZE - 1 844 ori r6, r0, 1 845 cmp r31, r5, r6 846 blti r31, ex12 847 lwi r5, r0, TOPHYS(tlb_skip) 848 ex12: 849 /* MS: save back current TLB index */ 850 swi r5, r0, TOPHYS(tlb_index) 851 852 ori r4, r4, _PAGE_HWEXEC /* make it executable */ 853 mts rtlbx, r5 /* MS: save current TLB */ 854 nop 855 mts rtlblo, r4 /* MS: save to TLB LO */ 856 nop 857 858 /* Create EPN. This is the faulting address plus a static 859 * set of bits. These are size, valid, E, U0, and ensure 860 * bits 20 and 21 are zero. 861 */ 862 andi r3, r3, PAGE_MASK 863#ifdef CONFIG_MICROBLAZE_64K_PAGES 864 ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K) 865#elif CONFIG_MICROBLAZE_16K_PAGES 866 ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K) 867#else 868 ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K) 869#endif 870 mts rtlbhi, r3 /* Load TLB HI */ 871 nop 872 873 /* Done...restore registers and get out of here. */ 874 mts rpid, r11 875 nop 876 bri 4 877 RESTORE_STATE; 878 rted r17, 0 879 nop 880 881 /* extern void giveup_fpu(struct task_struct *prev) 882 * 883 * The MicroBlaze processor may have an FPU, so this should not just 884 * return: TBD. 885 */ 886 .globl giveup_fpu; 887 .align 4; 888 giveup_fpu: 889 bralid r15,0 /* TBD */ 890 nop 891 892 /* At present, this routine just hangs. - extern void abort(void) */ 893 .globl abort; 894 .align 4; 895 abort: 896 br r0 897 898 .globl set_context; 899 .align 4; 900 set_context: 901 mts rpid, r5 /* Shadow TLBs are automatically */ 902 nop 903 bri 4 /* flushed by changing PID */ 904 rtsd r15,8 905 nop 906 907#endif 908.end _hw_exception_handler 909 910#ifdef CONFIG_MMU 911/* Unaligned data access exception last on a 4k page for MMU. 912 * When this is called, we are in virtual mode with exceptions enabled 913 * and registers 1-13,15,17,18 saved. 914 * 915 * R3 = ESR 916 * R4 = EAR 917 * R7 = pointer to saved registers (struct pt_regs *regs) 918 * 919 * This handler perform the access, and returns via ret_from_exc. 920 */ 921.global _unaligned_data_exception 922.ent _unaligned_data_exception 923_unaligned_data_exception: 924 andi r8, r3, 0x3E0; /* Mask and extract the register operand */ 925 BSRLI(r8,r8,2); /* r8 >> 2 = register operand * 8 */ 926 andi r6, r3, 0x400; /* Extract ESR[S] */ 927 bneid r6, ex_sw_vm; 928 andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */ 929ex_lw_vm: 930 beqid r6, ex_lhw_vm; 931load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */ 932/* Load a word, byte-by-byte from destination address and save it in tmp space*/ 933 addik r6, r0, ex_tmp_data_loc_0; 934 sbi r5, r6, 0; 935load2: lbui r5, r4, 1; 936 sbi r5, r6, 1; 937load3: lbui r5, r4, 2; 938 sbi r5, r6, 2; 939load4: lbui r5, r4, 3; 940 sbi r5, r6, 3; 941 brid ex_lw_tail_vm; 942/* Get the destination register value into r3 - delay slot */ 943 lwi r3, r6, 0; 944ex_lhw_vm: 945 /* Load a half-word, byte-by-byte from destination address and 946 * save it in tmp space */ 947 addik r6, r0, ex_tmp_data_loc_0; 948 sbi r5, r6, 0; 949load5: lbui r5, r4, 1; 950 sbi r5, r6, 1; 951 lhui r3, r6, 0; /* Get the destination register value into r3 */ 952ex_lw_tail_vm: 953 /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */ 954 addik r5, r8, lw_table_vm; 955 bra r5; 956ex_lw_end_vm: /* Exception handling of load word, ends */ 957 brai ret_from_exc; 958ex_sw_vm: 959/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */ 960 addik r5, r8, sw_table_vm; 961 bra r5; 962ex_sw_tail_vm: 963 addik r5, r0, ex_tmp_data_loc_0; 964 beqid r6, ex_shw_vm; 965 swi r3, r5, 0; /* Get the word - delay slot */ 966 /* Store the word, byte-by-byte into destination address */ 967 lbui r3, r5, 0; 968store1: sbi r3, r4, 0; 969 lbui r3, r5, 1; 970store2: sbi r3, r4, 1; 971 lbui r3, r5, 2; 972store3: sbi r3, r4, 2; 973 lbui r3, r5, 3; 974 brid ret_from_exc; 975store4: sbi r3, r4, 3; /* Delay slot */ 976ex_shw_vm: 977 /* Store the lower half-word, byte-by-byte into destination address */ 978#ifdef __MICROBLAZEEL__ 979 lbui r3, r5, 0; 980store5: sbi r3, r4, 0; 981 lbui r3, r5, 1; 982 brid ret_from_exc; 983store6: sbi r3, r4, 1; /* Delay slot */ 984#else 985 lbui r3, r5, 2; 986store5: sbi r3, r4, 0; 987 lbui r3, r5, 3; 988 brid ret_from_exc; 989store6: sbi r3, r4, 1; /* Delay slot */ 990#endif 991 992ex_sw_end_vm: /* Exception handling of store word, ends. */ 993 994/* We have to prevent cases that get/put_user macros get unaligned pointer 995 * to bad page area. We have to find out which origin instruction caused it 996 * and called fixup for that origin instruction not instruction in unaligned 997 * handler */ 998ex_unaligned_fixup: 999 ori r5, r7, 0 /* setup pointer to pt_regs */ 1000 lwi r6, r7, PT_PC; /* faulting address is one instruction above */ 1001 addik r6, r6, -4 /* for finding proper fixup */ 1002 swi r6, r7, PT_PC; /* a save back it to PT_PC */ 1003 addik r7, r0, SIGSEGV 1004 /* call bad_page_fault for finding aligned fixup, fixup address is saved 1005 * in PT_PC which is used as return address from exception */ 1006 addik r15, r0, ret_from_exc-8 /* setup return address */ 1007 brid bad_page_fault 1008 nop 1009 1010/* We prevent all load/store because it could failed any attempt to access */ 1011.section __ex_table,"a"; 1012 .word load1,ex_unaligned_fixup; 1013 .word load2,ex_unaligned_fixup; 1014 .word load3,ex_unaligned_fixup; 1015 .word load4,ex_unaligned_fixup; 1016 .word load5,ex_unaligned_fixup; 1017 .word store1,ex_unaligned_fixup; 1018 .word store2,ex_unaligned_fixup; 1019 .word store3,ex_unaligned_fixup; 1020 .word store4,ex_unaligned_fixup; 1021 .word store5,ex_unaligned_fixup; 1022 .word store6,ex_unaligned_fixup; 1023.previous; 1024.end _unaligned_data_exception 1025#endif /* CONFIG_MMU */ 1026 1027.global ex_handler_unhandled 1028ex_handler_unhandled: 1029/* FIXME add handle function for unhandled exception - dump register */ 1030 bri 0 1031 1032/* 1033 * hw_exception_handler Jump Table 1034 * - Contains code snippets for each register that caused the unalign exception 1035 * - Hence exception handler is NOT self-modifying 1036 * - Separate table for load exceptions and store exceptions. 1037 * - Each table is of size: (8 * 32) = 256 bytes 1038 */ 1039 1040.section .text 1041.align 4 1042lw_table: 1043lw_r0: R3_TO_LWREG (0); 1044lw_r1: LWREG_NOP; 1045lw_r2: R3_TO_LWREG (2); 1046lw_r3: R3_TO_LWREG_V (3); 1047lw_r4: R3_TO_LWREG_V (4); 1048lw_r5: R3_TO_LWREG_V (5); 1049lw_r6: R3_TO_LWREG_V (6); 1050lw_r7: R3_TO_LWREG (7); 1051lw_r8: R3_TO_LWREG (8); 1052lw_r9: R3_TO_LWREG (9); 1053lw_r10: R3_TO_LWREG (10); 1054lw_r11: R3_TO_LWREG (11); 1055lw_r12: R3_TO_LWREG (12); 1056lw_r13: R3_TO_LWREG (13); 1057lw_r14: R3_TO_LWREG (14); 1058lw_r15: R3_TO_LWREG (15); 1059lw_r16: R3_TO_LWREG (16); 1060lw_r17: LWREG_NOP; 1061lw_r18: R3_TO_LWREG (18); 1062lw_r19: R3_TO_LWREG (19); 1063lw_r20: R3_TO_LWREG (20); 1064lw_r21: R3_TO_LWREG (21); 1065lw_r22: R3_TO_LWREG (22); 1066lw_r23: R3_TO_LWREG (23); 1067lw_r24: R3_TO_LWREG (24); 1068lw_r25: R3_TO_LWREG (25); 1069lw_r26: R3_TO_LWREG (26); 1070lw_r27: R3_TO_LWREG (27); 1071lw_r28: R3_TO_LWREG (28); 1072lw_r29: R3_TO_LWREG (29); 1073lw_r30: R3_TO_LWREG (30); 1074#ifdef CONFIG_MMU 1075lw_r31: R3_TO_LWREG_V (31); 1076#else 1077lw_r31: R3_TO_LWREG (31); 1078#endif 1079 1080sw_table: 1081sw_r0: SWREG_TO_R3 (0); 1082sw_r1: SWREG_NOP; 1083sw_r2: SWREG_TO_R3 (2); 1084sw_r3: SWREG_TO_R3_V (3); 1085sw_r4: SWREG_TO_R3_V (4); 1086sw_r5: SWREG_TO_R3_V (5); 1087sw_r6: SWREG_TO_R3_V (6); 1088sw_r7: SWREG_TO_R3 (7); 1089sw_r8: SWREG_TO_R3 (8); 1090sw_r9: SWREG_TO_R3 (9); 1091sw_r10: SWREG_TO_R3 (10); 1092sw_r11: SWREG_TO_R3 (11); 1093sw_r12: SWREG_TO_R3 (12); 1094sw_r13: SWREG_TO_R3 (13); 1095sw_r14: SWREG_TO_R3 (14); 1096sw_r15: SWREG_TO_R3 (15); 1097sw_r16: SWREG_TO_R3 (16); 1098sw_r17: SWREG_NOP; 1099sw_r18: SWREG_TO_R3 (18); 1100sw_r19: SWREG_TO_R3 (19); 1101sw_r20: SWREG_TO_R3 (20); 1102sw_r21: SWREG_TO_R3 (21); 1103sw_r22: SWREG_TO_R3 (22); 1104sw_r23: SWREG_TO_R3 (23); 1105sw_r24: SWREG_TO_R3 (24); 1106sw_r25: SWREG_TO_R3 (25); 1107sw_r26: SWREG_TO_R3 (26); 1108sw_r27: SWREG_TO_R3 (27); 1109sw_r28: SWREG_TO_R3 (28); 1110sw_r29: SWREG_TO_R3 (29); 1111sw_r30: SWREG_TO_R3 (30); 1112#ifdef CONFIG_MMU 1113sw_r31: SWREG_TO_R3_V (31); 1114#else 1115sw_r31: SWREG_TO_R3 (31); 1116#endif 1117 1118#ifdef CONFIG_MMU 1119lw_table_vm: 1120lw_r0_vm: R3_TO_LWREG_VM (0); 1121lw_r1_vm: R3_TO_LWREG_VM_V (1); 1122lw_r2_vm: R3_TO_LWREG_VM_V (2); 1123lw_r3_vm: R3_TO_LWREG_VM_V (3); 1124lw_r4_vm: R3_TO_LWREG_VM_V (4); 1125lw_r5_vm: R3_TO_LWREG_VM_V (5); 1126lw_r6_vm: R3_TO_LWREG_VM_V (6); 1127lw_r7_vm: R3_TO_LWREG_VM_V (7); 1128lw_r8_vm: R3_TO_LWREG_VM_V (8); 1129lw_r9_vm: R3_TO_LWREG_VM_V (9); 1130lw_r10_vm: R3_TO_LWREG_VM_V (10); 1131lw_r11_vm: R3_TO_LWREG_VM_V (11); 1132lw_r12_vm: R3_TO_LWREG_VM_V (12); 1133lw_r13_vm: R3_TO_LWREG_VM_V (13); 1134lw_r14_vm: R3_TO_LWREG_VM_V (14); 1135lw_r15_vm: R3_TO_LWREG_VM_V (15); 1136lw_r16_vm: R3_TO_LWREG_VM_V (16); 1137lw_r17_vm: R3_TO_LWREG_VM_V (17); 1138lw_r18_vm: R3_TO_LWREG_VM_V (18); 1139lw_r19_vm: R3_TO_LWREG_VM_V (19); 1140lw_r20_vm: R3_TO_LWREG_VM_V (20); 1141lw_r21_vm: R3_TO_LWREG_VM_V (21); 1142lw_r22_vm: R3_TO_LWREG_VM_V (22); 1143lw_r23_vm: R3_TO_LWREG_VM_V (23); 1144lw_r24_vm: R3_TO_LWREG_VM_V (24); 1145lw_r25_vm: R3_TO_LWREG_VM_V (25); 1146lw_r26_vm: R3_TO_LWREG_VM_V (26); 1147lw_r27_vm: R3_TO_LWREG_VM_V (27); 1148lw_r28_vm: R3_TO_LWREG_VM_V (28); 1149lw_r29_vm: R3_TO_LWREG_VM_V (29); 1150lw_r30_vm: R3_TO_LWREG_VM_V (30); 1151lw_r31_vm: R3_TO_LWREG_VM_V (31); 1152 1153sw_table_vm: 1154sw_r0_vm: SWREG_TO_R3_VM (0); 1155sw_r1_vm: SWREG_TO_R3_VM_V (1); 1156sw_r2_vm: SWREG_TO_R3_VM_V (2); 1157sw_r3_vm: SWREG_TO_R3_VM_V (3); 1158sw_r4_vm: SWREG_TO_R3_VM_V (4); 1159sw_r5_vm: SWREG_TO_R3_VM_V (5); 1160sw_r6_vm: SWREG_TO_R3_VM_V (6); 1161sw_r7_vm: SWREG_TO_R3_VM_V (7); 1162sw_r8_vm: SWREG_TO_R3_VM_V (8); 1163sw_r9_vm: SWREG_TO_R3_VM_V (9); 1164sw_r10_vm: SWREG_TO_R3_VM_V (10); 1165sw_r11_vm: SWREG_TO_R3_VM_V (11); 1166sw_r12_vm: SWREG_TO_R3_VM_V (12); 1167sw_r13_vm: SWREG_TO_R3_VM_V (13); 1168sw_r14_vm: SWREG_TO_R3_VM_V (14); 1169sw_r15_vm: SWREG_TO_R3_VM_V (15); 1170sw_r16_vm: SWREG_TO_R3_VM_V (16); 1171sw_r17_vm: SWREG_TO_R3_VM_V (17); 1172sw_r18_vm: SWREG_TO_R3_VM_V (18); 1173sw_r19_vm: SWREG_TO_R3_VM_V (19); 1174sw_r20_vm: SWREG_TO_R3_VM_V (20); 1175sw_r21_vm: SWREG_TO_R3_VM_V (21); 1176sw_r22_vm: SWREG_TO_R3_VM_V (22); 1177sw_r23_vm: SWREG_TO_R3_VM_V (23); 1178sw_r24_vm: SWREG_TO_R3_VM_V (24); 1179sw_r25_vm: SWREG_TO_R3_VM_V (25); 1180sw_r26_vm: SWREG_TO_R3_VM_V (26); 1181sw_r27_vm: SWREG_TO_R3_VM_V (27); 1182sw_r28_vm: SWREG_TO_R3_VM_V (28); 1183sw_r29_vm: SWREG_TO_R3_VM_V (29); 1184sw_r30_vm: SWREG_TO_R3_VM_V (30); 1185sw_r31_vm: SWREG_TO_R3_VM_V (31); 1186#endif /* CONFIG_MMU */ 1187 1188/* Temporary data structures used in the handler */ 1189.section .data 1190.align 4 1191ex_tmp_data_loc_0: 1192 .byte 0 1193ex_tmp_data_loc_1: 1194 .byte 0 1195ex_tmp_data_loc_2: 1196 .byte 0 1197ex_tmp_data_loc_3: 1198 .byte 0 1199ex_reg_op: 1200 .byte 0 1201