1/* 2 * arch/sh/kernel/cpu/sh3/entry.S 3 * 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 5 * Copyright (C) 2003 - 2006 Paul Mundt 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 */ 11#include <linux/sys.h> 12#include <linux/errno.h> 13#include <linux/linkage.h> 14#include <asm/asm-offsets.h> 15#include <asm/thread_info.h> 16#include <asm/cpu/mmu_context.h> 17#include <asm/unistd.h> 18 19! NOTE: 20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 21! to be jumped is too far, but it causes illegal slot exception. 22 23/* 24 * entry.S contains the system-call and fault low-level handling routines. 25 * This also contains the timer-interrupt handler, as well as all interrupts 26 * and faults that can result in a task-switch. 27 * 28 * NOTE: This code handles signal-recognition, which happens every time 29 * after a timer-interrupt and after each system call. 30 * 31 * NOTE: This code uses a convention that instructions in the delay slot 32 * of a transfer-control instruction are indented by an extra space, thus: 33 * 34 * jmp @k0 ! control-transfer instruction 35 * ldc k1, ssr ! delay slot 36 * 37 * Stack layout in 'ret_from_syscall': 38 * ptrace needs to have all regs on the stack. 39 * if the order here is changed, it needs to be 40 * updated in ptrace.c and ptrace.h 41 * 42 * r0 43 * ... 44 * r15 = stack pointer 45 * spc 46 * pr 47 * ssr 48 * gbr 49 * mach 50 * macl 51 * syscall # 52 * 53 */ 54#if defined(CONFIG_KGDB_NMI) 55NMI_VEC = 0x1c0 ! Must catch early for debounce 56#endif 57 58/* Offsets to the stack */ 59OFF_R0 = 0 /* Return value. New ABI also arg4 */ 60OFF_R1 = 4 /* New ABI: arg5 */ 61OFF_R2 = 8 /* New ABI: arg6 */ 62OFF_R3 = 12 /* New ABI: syscall_nr */ 63OFF_R4 = 16 /* New ABI: arg0 */ 64OFF_R5 = 20 /* New ABI: arg1 */ 65OFF_R6 = 24 /* New ABI: arg2 */ 66OFF_R7 = 28 /* New ABI: arg3 */ 67OFF_SP = (15*4) 68OFF_PC = (16*4) 69OFF_SR = (16*4+8) 70OFF_TRA = (16*4+6*4) 71 72 73#define k0 r0 74#define k1 r1 75#define k2 r2 76#define k3 r3 77#define k4 r4 78 79#define g_imask r6 /* r6_bank1 */ 80#define k_g_imask r6_bank /* r6_bank1 */ 81#define current r7 /* r7_bank1 */ 82 83#include <asm/entry-macros.S> 84 85/* 86 * Kernel mode register usage: 87 * k0 scratch 88 * k1 scratch 89 * k2 scratch (Exception code) 90 * k3 scratch (Return address) 91 * k4 scratch 92 * k5 reserved 93 * k6 Global Interrupt Mask (0--15 << 4) 94 * k7 CURRENT_THREAD_INFO (pointer to current thread info) 95 */ 96 97! 98! TLB Miss / Initial Page write exception handling 99! _and_ 100! TLB hits, but the access violate the protection. 101! It can be valid access, such as stack grow and/or C-O-W. 102! 103! 104! Find the pmd/pte entry and loadtlb 105! If it's not found, cause address error (SEGV) 106! 107! Although this could be written in assembly language (and it'd be faster), 108! this first version depends *much* on C implementation. 109! 110 111#if defined(CONFIG_MMU) 112 .align 2 113ENTRY(tlb_miss_load) 114 bra call_dpf 115 mov #0, r5 116 117 .align 2 118ENTRY(tlb_miss_store) 119 bra call_dpf 120 mov #1, r5 121 122 .align 2 123ENTRY(initial_page_write) 124 bra call_dpf 125 mov #1, r5 126 127 .align 2 128ENTRY(tlb_protection_violation_load) 129 bra call_dpf 130 mov #0, r5 131 132 .align 2 133ENTRY(tlb_protection_violation_store) 134 bra call_dpf 135 mov #1, r5 136 137call_dpf: 138 mov.l 1f, r0 139 mov r5, r8 140 mov.l @r0, r6 141 mov r6, r9 142 mov.l 2f, r0 143 sts pr, r10 144 jsr @r0 145 mov r15, r4 146 ! 147 tst r0, r0 148 bf/s 0f 149 lds r10, pr 150 rts 151 nop 1520: mov.l 3f, r0 153 mov r9, r6 154 mov r8, r5 155 jmp @r0 156 mov r15, r4 157 158 .align 2 1591: .long MMU_TEA 1602: .long __do_page_fault 1613: .long do_page_fault 162 163 .align 2 164ENTRY(address_error_load) 165 bra call_dae 166 mov #0,r5 ! writeaccess = 0 167 168 .align 2 169ENTRY(address_error_store) 170 bra call_dae 171 mov #1,r5 ! writeaccess = 1 172 173 .align 2 174call_dae: 175 mov.l 1f, r0 176 mov.l @r0, r6 ! address 177 mov.l 2f, r0 178 jmp @r0 179 mov r15, r4 ! regs 180 181 .align 2 1821: .long MMU_TEA 1832: .long do_address_error 184#endif /* CONFIG_MMU */ 185 186#if defined(CONFIG_SH_STANDARD_BIOS) 187 /* Unwind the stack and jmp to the debug entry */ 188ENTRY(sh_bios_handler) 189 mov.l @r15+, r0 190 mov.l @r15+, r1 191 mov.l @r15+, r2 192 mov.l @r15+, r3 193 mov.l @r15+, r4 194 mov.l @r15+, r5 195 mov.l @r15+, r6 196 mov.l @r15+, r7 197 stc sr, r8 198 mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F 199 or r9, r8 200 ldc r8, sr ! here, change the register bank 201 mov.l @r15+, r8 202 mov.l @r15+, r9 203 mov.l @r15+, r10 204 mov.l @r15+, r11 205 mov.l @r15+, r12 206 mov.l @r15+, r13 207 mov.l @r15+, r14 208 mov.l @r15+, k0 209 ldc.l @r15+, spc 210 lds.l @r15+, pr 211 mov.l @r15+, k1 212 ldc.l @r15+, gbr 213 lds.l @r15+, mach 214 lds.l @r15+, macl 215 mov k0, r15 216 ! 217 mov.l 2f, k0 218 mov.l @k0, k0 219 jmp @k0 220 ldc k1, ssr 221 .align 2 2221: .long 0x300000f0 2232: .long gdb_vbr_vector 224#endif /* CONFIG_SH_STANDARD_BIOS */ 225 226restore_all: 227 mov.l @r15+, r0 228 mov.l @r15+, r1 229 mov.l @r15+, r2 230 mov.l @r15+, r3 231 mov.l @r15+, r4 232 mov.l @r15+, r5 233 mov.l @r15+, r6 234 mov.l @r15+, r7 235 ! 236 stc sr, r8 237 mov.l 7f, r9 238 or r9, r8 ! BL =1, RB=1 239 ldc r8, sr ! here, change the register bank 240 ! 241 mov.l @r15+, r8 242 mov.l @r15+, r9 243 mov.l @r15+, r10 244 mov.l @r15+, r11 245 mov.l @r15+, r12 246 mov.l @r15+, r13 247 mov.l @r15+, r14 248 mov.l @r15+, k4 ! original stack pointer 249 ldc.l @r15+, spc 250 lds.l @r15+, pr 251 mov.l @r15+, k3 ! original SR 252 ldc.l @r15+, gbr 253 lds.l @r15+, mach 254 lds.l @r15+, macl 255 add #4, r15 ! Skip syscall number 256 ! 257#ifdef CONFIG_SH_DSP 258 mov.l @r15+, k0 ! DSP mode marker 259 mov.l 5f, k1 260 cmp/eq k0, k1 ! Do we have a DSP stack frame? 261 bf skip_restore 262 263 stc sr, k0 ! Enable CPU DSP mode 264 or k1, k0 ! (within kernel it may be disabled) 265 ldc k0, sr 266 mov r2, k0 ! Backup r2 267 268 ! Restore DSP registers from stack 269 mov r15, r2 270 movs.l @r2+, a1 271 movs.l @r2+, a0g 272 movs.l @r2+, a1g 273 movs.l @r2+, m0 274 movs.l @r2+, m1 275 mov r2, r15 276 277 lds.l @r15+, a0 278 lds.l @r15+, x0 279 lds.l @r15+, x1 280 lds.l @r15+, y0 281 lds.l @r15+, y1 282 lds.l @r15+, dsr 283 ldc.l @r15+, rs 284 ldc.l @r15+, re 285 ldc.l @r15+, mod 286 287 mov k0, r2 ! Restore r2 288skip_restore: 289#endif 290 ! 291 ! Calculate new SR value 292 mov k3, k2 ! original SR value 293 mov #0xf0, k1 294 extu.b k1, k1 295 not k1, k1 296 and k1, k2 ! Mask orignal SR value 297 ! 298 mov k3, k0 ! Calculate IMASK-bits 299 shlr2 k0 300 and #0x3c, k0 301 cmp/eq #0x3c, k0 302 bt/s 6f 303 shll2 k0 304 mov g_imask, k0 305 ! 3066: or k0, k2 ! Set the IMASK-bits 307 ldc k2, ssr 308 ! 309#if defined(CONFIG_KGDB_NMI) 310 ! Clear in_nmi 311 mov.l 6f, k0 312 mov #0, k1 313 mov.b k1, @k0 314#endif 315 mov.l @r15+, k2 ! restore EXPEVT 316 mov k4, r15 317 rte 318 nop 319 320 .align 2 3215: .long 0x00001000 ! DSP 322#ifdef CONFIG_KGDB_NMI 3236: .long in_nmi 324#endif 3257: .long 0x30000000 326 327! common exception handler 328#include "../../entry-common.S" 329 330! Exception Vector Base 331! 332! Should be aligned page boundary. 333! 334 .balign 4096,0,4096 335ENTRY(vbr_base) 336 .long 0 337! 338 .balign 256,0,256 339general_exception: 340 mov.l 1f, k2 341 mov.l 2f, k3 342#ifdef CONFIG_CPU_SUBTYPE_SHX3 343 mov.l @k2, k2 344 345 ! Is EXPEVT larger than 0x800? 346 mov #0x8, k0 347 shll8 k0 348 cmp/hs k0, k2 349 bf 0f 350 351 ! then add 0x580 (k2 is 0xd80 or 0xda0) 352 mov #0x58, k0 353 shll2 k0 354 shll2 k0 355 add k0, k2 3560: 357 bra handle_exception 358 nop 359#else 360 bra handle_exception 361 mov.l @k2, k2 362#endif 363 .align 2 3641: .long EXPEVT 3652: .long ret_from_exception 366! 367! 368 369 .balign 1024,0,1024 370tlb_miss: 371 mov.l 1f, k2 372 mov.l 4f, k3 373 bra handle_exception 374 mov.l @k2, k2 375! 376 .balign 512,0,512 377interrupt: 378 mov.l 2f, k2 379 mov.l 3f, k3 380#if defined(CONFIG_KGDB_NMI) 381 ! Debounce (filter nested NMI) 382 mov.l @k2, k0 383 mov.l 5f, k1 384 cmp/eq k1, k0 385 bf 0f 386 mov.l 6f, k1 387 tas.b @k1 388 bt 0f 389 rte 390 nop 391 .align 2 3925: .long NMI_VEC 3936: .long in_nmi 3940: 395#endif /* defined(CONFIG_KGDB_NMI) */ 396 bra handle_exception 397 mov #-1, k2 ! interrupt exception marker 398 399 .align 2 4001: .long EXPEVT 4012: .long INTEVT 4023: .long ret_from_irq 4034: .long ret_from_exception 404 405! 406! 407 .align 2 408ENTRY(handle_exception) 409 ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), 410 ! save all registers onto stack. 411 ! 412 stc ssr, k0 ! Is it from kernel space? 413 shll k0 ! Check MD bit (bit30) by shifting it into... 414 shll k0 ! ...the T bit 415 bt/s 1f ! It's a kernel to kernel transition. 416 mov r15, k0 ! save original stack to k0 417 /* User space to kernel */ 418 mov #(THREAD_SIZE >> 10), k1 419 shll8 k1 ! k1 := THREAD_SIZE 420 shll2 k1 421 add current, k1 422 mov k1, r15 ! change to kernel stack 423 ! 4241: mov.l 2f, k1 425 ! 426#ifdef CONFIG_SH_DSP 427 mov.l r2, @-r15 ! Save r2, we need another reg 428 stc sr, k4 429 mov.l 1f, r2 430 tst r2, k4 ! Check if in DSP mode 431 mov.l @r15+, r2 ! Restore r2 now 432 bt/s skip_save 433 mov #0, k4 ! Set marker for no stack frame 434 435 mov r2, k4 ! Backup r2 (in k4) for later 436 437 ! Save DSP registers on stack 438 stc.l mod, @-r15 439 stc.l re, @-r15 440 stc.l rs, @-r15 441 sts.l dsr, @-r15 442 sts.l y1, @-r15 443 sts.l y0, @-r15 444 sts.l x1, @-r15 445 sts.l x0, @-r15 446 sts.l a0, @-r15 447 448 ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr. 449 450 ! FIXME: Make sure that this is still the case with newer toolchains, 451 ! as we're not at all interested in supporting ancient toolchains at 452 ! this point. -- PFM. 453 454 mov r15, r2 455 .word 0xf653 ! movs.l a1, @-r2 456 .word 0xf6f3 ! movs.l a0g, @-r2 457 .word 0xf6d3 ! movs.l a1g, @-r2 458 .word 0xf6c3 ! movs.l m0, @-r2 459 .word 0xf6e3 ! movs.l m1, @-r2 460 mov r2, r15 461 462 mov k4, r2 ! Restore r2 463 mov.l 1f, k4 ! Force DSP stack frame 464skip_save: 465 mov.l k4, @-r15 ! Push DSP mode marker onto stack 466#endif 467 ! Save the user registers on the stack. 468 mov.l k2, @-r15 ! EXPEVT 469 470 mov #-1, k4 471 mov.l k4, @-r15 ! set TRA (default: -1) 472 ! 473 sts.l macl, @-r15 474 sts.l mach, @-r15 475 stc.l gbr, @-r15 476 stc.l ssr, @-r15 477 sts.l pr, @-r15 478 stc.l spc, @-r15 479 ! 480 lds k3, pr ! Set the return address to pr 481 ! 482 mov.l k0, @-r15 ! save orignal stack 483 mov.l r14, @-r15 484 mov.l r13, @-r15 485 mov.l r12, @-r15 486 mov.l r11, @-r15 487 mov.l r10, @-r15 488 mov.l r9, @-r15 489 mov.l r8, @-r15 490 ! 491 stc sr, r8 ! Back to normal register bank, and 492 or k1, r8 ! Block all interrupts 493 mov.l 3f, k1 494 and k1, r8 ! ... 495 ldc r8, sr ! ...changed here. 496 ! 497 mov.l r7, @-r15 498 mov.l r6, @-r15 499 mov.l r5, @-r15 500 mov.l r4, @-r15 501 mov.l r3, @-r15 502 mov.l r2, @-r15 503 mov.l r1, @-r15 504 mov.l r0, @-r15 505 506 /* 507 * This gets a bit tricky.. in the INTEVT case we don't want to use 508 * the VBR offset as a destination in the jump call table, since all 509 * of the destinations are the same. In this case, (interrupt) sets 510 * a marker in r2 (now r2_bank since SR.RB changed), which we check 511 * to determine the exception type. For all other exceptions, we 512 * forcibly read EXPEVT from memory and fix up the jump address, in 513 * the interrupt exception case we jump to do_IRQ() and defer the 514 * INTEVT read until there. As a bonus, we can also clean up the SR.RB 515 * checks that do_IRQ() was doing.. 516 */ 517 stc r2_bank, r8 518 cmp/pz r8 519 bf interrupt_exception 520 shlr2 r8 521 shlr r8 522 mov.l 4f, r9 523 add r8, r9 524 mov.l @r9, r9 525 jmp @r9 526 nop 527 rts 528 nop 529 530 .align 2 5311: .long 0x00001000 ! DSP=1 5322: .long 0x000080f0 ! FD=1, IMASK=15 5333: .long 0xcfffffff ! RB=0, BL=0 5344: .long exception_handling_table 535 536interrupt_exception: 537 mov.l 1f, r9 538 mov.l 2f, r4 539 mov.l @r4, r4 540 jmp @r9 541 mov r15, r5 542 rts 543 nop 544 545 .align 2 5461: .long do_IRQ 5472: .long INTEVT 548 549 .align 2 550ENTRY(exception_none) 551 rts 552 nop 553