1/* 2 * Low-level exception handling 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. 9 * Copyright (C) 2015 Cadence Design Systems Inc. 10 * 11 * Chris Zankel <chris@zankel.net> 12 * 13 */ 14 15#include <linux/linkage.h> 16#include <linux/pgtable.h> 17#include <asm/asm-offsets.h> 18#include <asm/asmmacro.h> 19#include <asm/processor.h> 20#include <asm/coprocessor.h> 21#include <asm/thread_info.h> 22#include <asm/asm-uaccess.h> 23#include <asm/unistd.h> 24#include <asm/ptrace.h> 25#include <asm/current.h> 26#include <asm/page.h> 27#include <asm/signal.h> 28#include <asm/tlbflush.h> 29#include <variant/tie-asm.h> 30 31/* 32 * Macro to find first bit set in WINDOWBASE from the left + 1 33 * 34 * 100....0 -> 1 35 * 010....0 -> 2 36 * 000....1 -> WSBITS 37 */ 38 39 .macro ffs_ws bit mask 40 41#if XCHAL_HAVE_NSA 42 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 43 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 44#else 45 movi \bit, WSBITS 46#if WSBITS > 16 47 _bltui \mask, 0x10000, 99f 48 addi \bit, \bit, -16 49 extui \mask, \mask, 16, 16 50#endif 51#if WSBITS > 8 5299: _bltui \mask, 0x100, 99f 53 addi \bit, \bit, -8 54 srli \mask, \mask, 8 55#endif 5699: _bltui \mask, 0x10, 99f 57 addi \bit, \bit, -4 58 srli \mask, \mask, 4 5999: _bltui \mask, 0x4, 99f 60 addi \bit, \bit, -2 61 srli \mask, \mask, 2 6299: _bltui \mask, 0x2, 99f 63 addi \bit, \bit, -1 6499: 65 66#endif 67 .endm 68 69 70 .macro irq_save flags tmp 71#if XTENSA_FAKE_NMI 72#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL 73 rsr \flags, ps 74 extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 75 bgei \tmp, LOCKLEVEL, 99f 76 rsil \tmp, LOCKLEVEL 7799: 78#else 79 movi \tmp, LOCKLEVEL 80 rsr \flags, ps 81 or \flags, \flags, \tmp 82 xsr \flags, ps 83 rsync 84#endif 85#else 86 rsil \flags, LOCKLEVEL 87#endif 88 .endm 89 90/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 91 92/* 93 * First-level exception handler for user exceptions. 94 * Save some special registers, extra states and all registers in the AR 95 * register file that were in use in the user task, and jump to the common 96 * exception code. 97 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 98 * save them for kernel exceptions). 99 * 100 * Entry condition for user_exception: 101 * 102 * a0: trashed, original value saved on stack (PT_AREG0) 103 * a1: a1 104 * a2: new stack pointer, original value in depc 105 * a3: a3 106 * depc: a2, original value saved on stack (PT_DEPC) 107 * excsave1: dispatch table 108 * 109 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 110 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 111 * 112 * Entry condition for _user_exception: 113 * 114 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 115 * excsave has been restored, and 116 * stack pointer (a1) has been set. 117 * 118 * Note: _user_exception might be at an odd address. Don't use call0..call12 119 */ 120 .literal_position 121 122ENTRY(user_exception) 123 124 /* Save a1, a2, a3, and set SP. */ 125 126 rsr a0, depc 127 s32i a1, a2, PT_AREG1 128 s32i a0, a2, PT_AREG2 129 s32i a3, a2, PT_AREG3 130 mov a1, a2 131 132 .globl _user_exception 133_user_exception: 134 135 /* Save SAR and turn off single stepping */ 136 137 movi a2, 0 138 wsr a2, depc # terminate user stack trace with 0 139 rsr a3, sar 140 xsr a2, icountlevel 141 s32i a3, a1, PT_SAR 142 s32i a2, a1, PT_ICOUNTLEVEL 143 144#if XCHAL_HAVE_THREADPTR 145 rur a2, threadptr 146 s32i a2, a1, PT_THREADPTR 147#endif 148 149 /* Rotate ws so that the current windowbase is at bit0. */ 150 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 151 152#if defined(USER_SUPPORT_WINDOWED) 153 rsr a2, windowbase 154 rsr a3, windowstart 155 ssr a2 156 s32i a2, a1, PT_WINDOWBASE 157 s32i a3, a1, PT_WINDOWSTART 158 slli a2, a3, 32-WSBITS 159 src a2, a3, a2 160 srli a2, a2, 32-WSBITS 161 s32i a2, a1, PT_WMASK # needed for restoring registers 162#else 163 movi a2, 0 164 movi a3, 1 165 s32i a2, a1, PT_WINDOWBASE 166 s32i a3, a1, PT_WINDOWSTART 167 s32i a3, a1, PT_WMASK 168#endif 169 170 /* Save only live registers. */ 171 172UABI_W _bbsi.l a2, 1, .Lsave_window_registers 173 s32i a4, a1, PT_AREG4 174 s32i a5, a1, PT_AREG5 175 s32i a6, a1, PT_AREG6 176 s32i a7, a1, PT_AREG7 177UABI_W _bbsi.l a2, 2, .Lsave_window_registers 178 s32i a8, a1, PT_AREG8 179 s32i a9, a1, PT_AREG9 180 s32i a10, a1, PT_AREG10 181 s32i a11, a1, PT_AREG11 182UABI_W _bbsi.l a2, 3, .Lsave_window_registers 183 s32i a12, a1, PT_AREG12 184 s32i a13, a1, PT_AREG13 185 s32i a14, a1, PT_AREG14 186 s32i a15, a1, PT_AREG15 187 188#if defined(USER_SUPPORT_WINDOWED) 189 /* If only one valid frame skip saving regs. */ 190 191 beqi a2, 1, common_exception 192 193 /* Save the remaining registers. 194 * We have to save all registers up to the first '1' from 195 * the right, except the current frame (bit 0). 196 * Assume a2 is: 001001000110001 197 * All register frames starting from the top field to the marked '1' 198 * must be saved. 199 */ 200.Lsave_window_registers: 201 addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 202 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 203 and a3, a3, a2 # max. only one bit is set 204 205 /* Find number of frames to save */ 206 207 ffs_ws a0, a3 # number of frames to the '1' from left 208 209 /* Store information into WMASK: 210 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 211 * bits 4...: number of valid 4-register frames 212 */ 213 214 slli a3, a0, 4 # number of frames to save in bits 8..4 215 extui a2, a2, 0, 4 # mask for the first 16 registers 216 or a2, a3, a2 217 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 218 219 /* Save 4 registers at a time */ 220 2211: rotw -1 222 s32i a0, a5, PT_AREG_END - 16 223 s32i a1, a5, PT_AREG_END - 12 224 s32i a2, a5, PT_AREG_END - 8 225 s32i a3, a5, PT_AREG_END - 4 226 addi a0, a4, -1 227 addi a1, a5, -16 228 _bnez a0, 1b 229 230 /* WINDOWBASE still in SAR! */ 231 232 rsr a2, sar # original WINDOWBASE 233 movi a3, 1 234 ssl a2 235 sll a3, a3 236 wsr a3, windowstart # set corresponding WINDOWSTART bit 237 wsr a2, windowbase # and WINDOWSTART 238 rsync 239 240 /* We are back to the original stack pointer (a1) */ 241#endif 242 /* Now, jump to the common exception handler. */ 243 244 j common_exception 245 246ENDPROC(user_exception) 247 248/* 249 * First-level exit handler for kernel exceptions 250 * Save special registers and the live window frame. 251 * Note: Even though we changes the stack pointer, we don't have to do a 252 * MOVSP here, as we do that when we return from the exception. 253 * (See comment in the kernel exception exit code) 254 * 255 * Entry condition for kernel_exception: 256 * 257 * a0: trashed, original value saved on stack (PT_AREG0) 258 * a1: a1 259 * a2: new stack pointer, original in DEPC 260 * a3: a3 261 * depc: a2, original value saved on stack (PT_DEPC) 262 * excsave_1: dispatch table 263 * 264 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 265 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 266 * 267 * Entry condition for _kernel_exception: 268 * 269 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 270 * excsave has been restored, and 271 * stack pointer (a1) has been set. 272 * 273 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 274 */ 275 276ENTRY(kernel_exception) 277 278 /* Save a1, a2, a3, and set SP. */ 279 280 rsr a0, depc # get a2 281 s32i a1, a2, PT_AREG1 282 s32i a0, a2, PT_AREG2 283 s32i a3, a2, PT_AREG3 284 mov a1, a2 285 286 .globl _kernel_exception 287_kernel_exception: 288 289 /* Save SAR and turn off single stepping */ 290 291 movi a2, 0 292 rsr a3, sar 293 xsr a2, icountlevel 294 s32i a3, a1, PT_SAR 295 s32i a2, a1, PT_ICOUNTLEVEL 296 297#if defined(__XTENSA_WINDOWED_ABI__) 298 /* Rotate ws so that the current windowbase is at bit0. */ 299 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 300 301 rsr a2, windowbase # don't need to save these, we only 302 rsr a3, windowstart # need shifted windowstart: windowmask 303 ssr a2 304 slli a2, a3, 32-WSBITS 305 src a2, a3, a2 306 srli a2, a2, 32-WSBITS 307 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 308#endif 309 310 /* Save only the live window-frame */ 311 312KABI_W _bbsi.l a2, 1, 1f 313 s32i a4, a1, PT_AREG4 314 s32i a5, a1, PT_AREG5 315 s32i a6, a1, PT_AREG6 316 s32i a7, a1, PT_AREG7 317KABI_W _bbsi.l a2, 2, 1f 318 s32i a8, a1, PT_AREG8 319 s32i a9, a1, PT_AREG9 320 s32i a10, a1, PT_AREG10 321 s32i a11, a1, PT_AREG11 322KABI_W _bbsi.l a2, 3, 1f 323 s32i a12, a1, PT_AREG12 324 s32i a13, a1, PT_AREG13 325 s32i a14, a1, PT_AREG14 326 s32i a15, a1, PT_AREG15 327 328#ifdef __XTENSA_WINDOWED_ABI__ 329 _bnei a2, 1, 1f 330 /* Copy spill slots of a0 and a1 to imitate movsp 331 * in order to keep exception stack continuous 332 */ 333 l32i a3, a1, PT_KERNEL_SIZE 334 l32i a0, a1, PT_KERNEL_SIZE + 4 335 s32e a3, a1, -16 336 s32e a0, a1, -12 337#endif 3381: 339 l32i a0, a1, PT_AREG0 # restore saved a0 340 wsr a0, depc 341 342/* 343 * This is the common exception handler. 344 * We get here from the user exception handler or simply by falling through 345 * from the kernel exception handler. 346 * Save the remaining special registers, switch to kernel mode, and jump 347 * to the second-level exception handler. 348 * 349 */ 350 351common_exception: 352 353 /* Save some registers, disable loops and clear the syscall flag. */ 354 355 rsr a2, debugcause 356 rsr a3, epc1 357 s32i a2, a1, PT_DEBUGCAUSE 358 s32i a3, a1, PT_PC 359 360 movi a2, NO_SYSCALL 361 rsr a3, excvaddr 362 s32i a2, a1, PT_SYSCALL 363 movi a2, 0 364 s32i a3, a1, PT_EXCVADDR 365#if XCHAL_HAVE_LOOPS 366 xsr a2, lcount 367 s32i a2, a1, PT_LCOUNT 368#endif 369 370#if XCHAL_HAVE_EXCLUSIVE 371 /* Clear exclusive access monitor set by interrupted code */ 372 clrex 373#endif 374 375 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 376 377 rsr a2, exccause 378 movi a3, 0 379 rsr a0, excsave1 380 s32i a2, a1, PT_EXCCAUSE 381 s32i a3, a0, EXC_TABLE_FIXUP 382 383 /* All unrecoverable states are saved on stack, now, and a1 is valid. 384 * Now we can allow exceptions again. In case we've got an interrupt 385 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, 386 * otherwise it's left unchanged. 387 * 388 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 389 */ 390 391 rsr a3, ps 392 s32i a3, a1, PT_PS # save ps 393 394#if XTENSA_FAKE_NMI 395 /* Correct PS needs to be saved in the PT_PS: 396 * - in case of exception or level-1 interrupt it's in the PS, 397 * and is already saved. 398 * - in case of medium level interrupt it's in the excsave2. 399 */ 400 movi a0, EXCCAUSE_MAPPED_NMI 401 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 402 beq a2, a0, .Lmedium_level_irq 403 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception 404 beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 405 406.Lmedium_level_irq: 407 rsr a0, excsave2 408 s32i a0, a1, PT_PS # save medium-level interrupt ps 409 bgei a3, LOCKLEVEL, .Lexception 410 411.Llevel1_irq: 412 movi a3, LOCKLEVEL 413 414.Lexception: 415KABI_W movi a0, PS_WOE_MASK 416KABI_W or a3, a3, a0 417#else 418 addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT 419 movi a0, LOCKLEVEL 420 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 421 # a3 = PS.INTLEVEL 422 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt 423KABI_W movi a2, PS_WOE_MASK 424KABI_W or a3, a3, a2 425#endif 426 427 /* restore return address (or 0 if return to userspace) */ 428 rsr a0, depc 429 wsr a3, ps 430 rsync # PS.WOE => rsync => overflow 431 432 /* Save lbeg, lend */ 433#if XCHAL_HAVE_LOOPS 434 rsr a4, lbeg 435 rsr a3, lend 436 s32i a4, a1, PT_LBEG 437 s32i a3, a1, PT_LEND 438#endif 439 440 /* Save SCOMPARE1 */ 441 442#if XCHAL_HAVE_S32C1I 443 rsr a3, scompare1 444 s32i a3, a1, PT_SCOMPARE1 445#endif 446 447 /* Save optional registers. */ 448 449 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT 450 451#ifdef CONFIG_TRACE_IRQFLAGS 452 rsr abi_tmp0, ps 453 extui abi_tmp0, abi_tmp0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 454 beqz abi_tmp0, 1f 455 abi_call trace_hardirqs_off 4561: 457#endif 458#ifdef CONFIG_CONTEXT_TRACKING 459 l32i abi_tmp0, a1, PT_PS 460 bbci.l abi_tmp0, PS_UM_BIT, 1f 461 abi_call context_tracking_user_exit 4621: 463#endif 464 465 /* Go to second-level dispatcher. Set up parameters to pass to the 466 * exception handler and call the exception handler. 467 */ 468 469 l32i abi_arg1, a1, PT_EXCCAUSE # pass EXCCAUSE 470 rsr abi_tmp0, excsave1 471 addx4 abi_tmp0, abi_arg1, abi_tmp0 472 l32i abi_tmp0, abi_tmp0, EXC_TABLE_DEFAULT # load handler 473 mov abi_arg0, a1 # pass stack frame 474 475 /* Call the second-level handler */ 476 477 abi_callx abi_tmp0 478 479 /* Jump here for exception exit */ 480 .global common_exception_return 481common_exception_return: 482 483#if XTENSA_FAKE_NMI 484 l32i abi_tmp0, a1, PT_EXCCAUSE 485 movi abi_tmp1, EXCCAUSE_MAPPED_NMI 486 l32i abi_saved1, a1, PT_PS 487 beq abi_tmp0, abi_tmp1, .Lrestore_state 488#endif 489.Ltif_loop: 490 irq_save abi_tmp0, abi_tmp1 491#ifdef CONFIG_TRACE_IRQFLAGS 492 abi_call trace_hardirqs_off 493#endif 494 495 /* Jump if we are returning from kernel exceptions. */ 496 497 l32i abi_saved1, a1, PT_PS 498 GET_THREAD_INFO(abi_tmp0, a1) 499 l32i abi_saved0, abi_tmp0, TI_FLAGS 500 _bbci.l abi_saved1, PS_UM_BIT, .Lexit_tif_loop_kernel 501 502 /* Specific to a user exception exit: 503 * We need to check some flags for signal handling and rescheduling, 504 * and have to restore WB and WS, extra states, and all registers 505 * in the register file that were in use in the user task. 506 * Note that we don't disable interrupts here. 507 */ 508 509 _bbsi.l abi_saved0, TIF_NEED_RESCHED, .Lresched 510 movi abi_tmp0, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL 511 bnone abi_saved0, abi_tmp0, .Lexit_tif_loop_user 512 513 l32i abi_tmp0, a1, PT_DEPC 514 bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state 515 516 /* Call do_signal() */ 517 518#ifdef CONFIG_TRACE_IRQFLAGS 519 abi_call trace_hardirqs_on 520#endif 521 rsil abi_tmp0, 0 522 mov abi_arg0, a1 523 abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) 524 j .Ltif_loop 525 526.Lresched: 527#ifdef CONFIG_TRACE_IRQFLAGS 528 abi_call trace_hardirqs_on 529#endif 530 rsil abi_tmp0, 0 531 abi_call schedule # void schedule (void) 532 j .Ltif_loop 533 534.Lexit_tif_loop_kernel: 535#ifdef CONFIG_PREEMPTION 536 _bbci.l abi_saved0, TIF_NEED_RESCHED, .Lrestore_state 537 538 /* Check current_thread_info->preempt_count */ 539 540 l32i abi_tmp1, abi_tmp0, TI_PRE_COUNT 541 bnez abi_tmp1, .Lrestore_state 542 abi_call preempt_schedule_irq 543#endif 544 j .Lrestore_state 545 546.Lexit_tif_loop_user: 547#ifdef CONFIG_CONTEXT_TRACKING 548 abi_call context_tracking_user_enter 549#endif 550#ifdef CONFIG_HAVE_HW_BREAKPOINT 551 _bbci.l abi_saved0, TIF_DB_DISABLED, 1f 552 abi_call restore_dbreak 5531: 554#endif 555#ifdef CONFIG_DEBUG_TLB_SANITY 556 l32i abi_tmp0, a1, PT_DEPC 557 bgeui abi_tmp0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lrestore_state 558 abi_call check_tlb_sanity 559#endif 560 561.Lrestore_state: 562#ifdef CONFIG_TRACE_IRQFLAGS 563 extui abi_tmp0, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 564 bgei abi_tmp0, LOCKLEVEL, 1f 565 abi_call trace_hardirqs_on 5661: 567#endif 568 /* 569 * Restore optional registers. 570 * abi_arg* are used as temporary registers here. 571 */ 572 573 load_xtregs_opt a1 abi_tmp0 abi_arg0 abi_arg1 abi_arg2 abi_arg3 PT_XTREGS_OPT 574 575 /* Restore SCOMPARE1 */ 576 577#if XCHAL_HAVE_S32C1I 578 l32i abi_tmp0, a1, PT_SCOMPARE1 579 wsr abi_tmp0, scompare1 580#endif 581 wsr abi_saved1, ps /* disable interrupts */ 582 _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit 583 584user_exception_exit: 585 586 /* Restore the state of the task and return from the exception. */ 587 588#if defined(USER_SUPPORT_WINDOWED) 589 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 590 591 l32i a2, a1, PT_WINDOWBASE 592 l32i a3, a1, PT_WINDOWSTART 593 wsr a1, depc # use DEPC as temp storage 594 wsr a3, windowstart # restore WINDOWSTART 595 ssr a2 # preserve user's WB in the SAR 596 wsr a2, windowbase # switch to user's saved WB 597 rsync 598 rsr a1, depc # restore stack pointer 599 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 600 rotw -1 # we restore a4..a7 601 _bltui a6, 16, .Lclear_regs # only have to restore current window? 602 603 /* The working registers are a0 and a3. We are restoring to 604 * a4..a7. Be careful not to destroy what we have just restored. 605 * Note: wmask has the format YYYYM: 606 * Y: number of registers saved in groups of 4 607 * M: 4 bit mask of first 16 registers 608 */ 609 610 mov a2, a6 611 mov a3, a5 612 6131: rotw -1 # a0..a3 become a4..a7 614 addi a3, a7, -4*4 # next iteration 615 addi a2, a6, -16 # decrementing Y in WMASK 616 l32i a4, a3, PT_AREG_END + 0 617 l32i a5, a3, PT_AREG_END + 4 618 l32i a6, a3, PT_AREG_END + 8 619 l32i a7, a3, PT_AREG_END + 12 620 _bgeui a2, 16, 1b 621 622 /* Clear unrestored registers (don't leak anything to user-land */ 623 624.Lclear_regs: 625 rsr a0, windowbase 626 rsr a3, sar 627 sub a3, a0, a3 628 beqz a3, 2f 629 extui a3, a3, 0, WBBITS 630 6311: rotw -1 632 addi a3, a7, -1 633 movi a4, 0 634 movi a5, 0 635 movi a6, 0 636 movi a7, 0 637 bgei a3, 1, 1b 638 639 /* We are back were we were when we started. 640 * Note: a2 still contains WMASK (if we've returned to the original 641 * frame where we had loaded a2), or at least the lower 4 bits 642 * (if we have restored WSBITS-1 frames). 643 */ 6442: 645#else 646 movi a2, 1 647#endif 648#if XCHAL_HAVE_THREADPTR 649 l32i a3, a1, PT_THREADPTR 650 wur a3, threadptr 651#endif 652 653 j common_exception_exit 654 655 /* This is the kernel exception exit. 656 * We avoided to do a MOVSP when we entered the exception, but we 657 * have to do it here. 658 */ 659 660kernel_exception_exit: 661 662#if defined(__XTENSA_WINDOWED_ABI__) 663 /* Check if we have to do a movsp. 664 * 665 * We only have to do a movsp if the previous window-frame has 666 * been spilled to the *temporary* exception stack instead of the 667 * task's stack. This is the case if the corresponding bit in 668 * WINDOWSTART for the previous window-frame was set before 669 * (not spilled) but is zero now (spilled). 670 * If this bit is zero, all other bits except the one for the 671 * current window frame are also zero. So, we can use a simple test: 672 * 'and' WINDOWSTART and WINDOWSTART-1: 673 * 674 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 675 * 676 * The result is zero only if one bit was set. 677 * 678 * (Note: We might have gone through several task switches before 679 * we come back to the current task, so WINDOWBASE might be 680 * different from the time the exception occurred.) 681 */ 682 683 /* Test WINDOWSTART before and after the exception. 684 * We actually have WMASK, so we only have to test if it is 1 or not. 685 */ 686 687 l32i a2, a1, PT_WMASK 688 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 689 690 /* Test WINDOWSTART now. If spilled, do the movsp */ 691 692 rsr a3, windowstart 693 addi a0, a3, -1 694 and a3, a3, a0 695 _bnez a3, common_exception_exit 696 697 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 698 699 addi a0, a1, -16 700 l32i a3, a0, 0 701 l32i a4, a0, 4 702 s32i a3, a1, PT_KERNEL_SIZE + 0 703 s32i a4, a1, PT_KERNEL_SIZE + 4 704 l32i a3, a0, 8 705 l32i a4, a0, 12 706 s32i a3, a1, PT_KERNEL_SIZE + 8 707 s32i a4, a1, PT_KERNEL_SIZE + 12 708 709 /* Common exception exit. 710 * We restore the special register and the current window frame, and 711 * return from the exception. 712 * 713 * Note: We expect a2 to hold PT_WMASK 714 */ 715#else 716 movi a2, 1 717#endif 718 719common_exception_exit: 720 721 /* Restore address registers. */ 722 723 _bbsi.l a2, 1, 1f 724 l32i a4, a1, PT_AREG4 725 l32i a5, a1, PT_AREG5 726 l32i a6, a1, PT_AREG6 727 l32i a7, a1, PT_AREG7 728 _bbsi.l a2, 2, 1f 729 l32i a8, a1, PT_AREG8 730 l32i a9, a1, PT_AREG9 731 l32i a10, a1, PT_AREG10 732 l32i a11, a1, PT_AREG11 733 _bbsi.l a2, 3, 1f 734 l32i a12, a1, PT_AREG12 735 l32i a13, a1, PT_AREG13 736 l32i a14, a1, PT_AREG14 737 l32i a15, a1, PT_AREG15 738 739 /* Restore PC, SAR */ 740 7411: l32i a2, a1, PT_PC 742 l32i a3, a1, PT_SAR 743 wsr a2, epc1 744 wsr a3, sar 745 746 /* Restore LBEG, LEND, LCOUNT */ 747#if XCHAL_HAVE_LOOPS 748 l32i a2, a1, PT_LBEG 749 l32i a3, a1, PT_LEND 750 wsr a2, lbeg 751 l32i a2, a1, PT_LCOUNT 752 wsr a3, lend 753 wsr a2, lcount 754#endif 755 756 /* We control single stepping through the ICOUNTLEVEL register. */ 757 758 l32i a2, a1, PT_ICOUNTLEVEL 759 movi a3, -2 760 wsr a2, icountlevel 761 wsr a3, icount 762 763 /* Check if it was double exception. */ 764 765 l32i a0, a1, PT_DEPC 766 l32i a3, a1, PT_AREG3 767 l32i a2, a1, PT_AREG2 768 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 769 770 /* Restore a0...a3 and return */ 771 772 l32i a0, a1, PT_AREG0 773 l32i a1, a1, PT_AREG1 774 rfe 775 7761: wsr a0, depc 777 l32i a0, a1, PT_AREG0 778 l32i a1, a1, PT_AREG1 779 rfde 780 781ENDPROC(kernel_exception) 782 783/* 784 * Debug exception handler. 785 * 786 * Currently, we don't support KGDB, so only user application can be debugged. 787 * 788 * When we get here, a0 is trashed and saved to excsave[debuglevel] 789 */ 790 791 .literal_position 792 793ENTRY(debug_exception) 794 795 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 796 bbsi.l a0, PS_EXCM_BIT, .Ldebug_exception_in_exception # exception mode 797 798 /* Set EPC1 and EXCCAUSE */ 799 800 wsr a2, depc # save a2 temporarily 801 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 802 wsr a2, epc1 803 804 movi a2, EXCCAUSE_MAPPED_DEBUG 805 wsr a2, exccause 806 807 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 808 809 movi a2, 1 << PS_EXCM_BIT 810 or a2, a0, a2 811 wsr a2, ps 812 813 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 814 815 bbsi.l a2, PS_UM_BIT, .Ldebug_exception_user # jump if user mode 816 addi a2, a1, -16 - PT_KERNEL_SIZE # assume kernel stack 817 818.Ldebug_exception_continue: 819 l32i a0, a3, DT_DEBUG_SAVE 820 s32i a1, a2, PT_AREG1 821 s32i a0, a2, PT_AREG0 822 movi a0, 0 823 s32i a0, a2, PT_DEPC # mark it as a regular exception 824 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 825 xsr a0, depc 826 s32i a3, a2, PT_AREG3 827 s32i a0, a2, PT_AREG2 828 mov a1, a2 829 830 /* Debug exception is handled as an exception, so interrupts will 831 * likely be enabled in the common exception handler. Disable 832 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM 833 * meaning. 834 */ 835#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT) 836 GET_THREAD_INFO(a2, a1) 837 l32i a3, a2, TI_PRE_COUNT 838 addi a3, a3, 1 839 s32i a3, a2, TI_PRE_COUNT 840#endif 841 842 rsr a2, ps 843 bbsi.l a2, PS_UM_BIT, _user_exception 844 j _kernel_exception 845 846.Ldebug_exception_user: 847 rsr a2, excsave1 848 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 849 j .Ldebug_exception_continue 850 851.Ldebug_exception_in_exception: 852#ifdef CONFIG_HAVE_HW_BREAKPOINT 853 /* Debug exception while in exception mode. This may happen when 854 * window overflow/underflow handler or fast exception handler hits 855 * data breakpoint, in which case save and disable all data 856 * breakpoints, single-step faulting instruction and restore data 857 * breakpoints. 858 */ 859 860 bbci.l a0, PS_UM_BIT, .Ldebug_exception_in_exception # jump if kernel mode 861 862 rsr a0, debugcause 863 bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak 864 865 .set _index, 0 866 .rept XCHAL_NUM_DBREAK 867 l32i a0, a3, DT_DBREAKC_SAVE + _index * 4 868 wsr a0, SREG_DBREAKC + _index 869 .set _index, _index + 1 870 .endr 871 872 l32i a0, a3, DT_ICOUNT_LEVEL_SAVE 873 wsr a0, icountlevel 874 875 l32i a0, a3, DT_ICOUNT_SAVE 876 xsr a0, icount 877 878 l32i a0, a3, DT_DEBUG_SAVE 879 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 880 rfi XCHAL_DEBUGLEVEL 881 882.Ldebug_save_dbreak: 883 .set _index, 0 884 .rept XCHAL_NUM_DBREAK 885 movi a0, 0 886 xsr a0, SREG_DBREAKC + _index 887 s32i a0, a3, DT_DBREAKC_SAVE + _index * 4 888 .set _index, _index + 1 889 .endr 890 891 movi a0, XCHAL_EXCM_LEVEL + 1 892 xsr a0, icountlevel 893 s32i a0, a3, DT_ICOUNT_LEVEL_SAVE 894 895 movi a0, 0xfffffffe 896 xsr a0, icount 897 s32i a0, a3, DT_ICOUNT_SAVE 898 899 l32i a0, a3, DT_DEBUG_SAVE 900 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 901 rfi XCHAL_DEBUGLEVEL 902#else 903 /* Debug exception while in exception mode. Should not happen. */ 904 j .Ldebug_exception_in_exception // FIXME!! 905#endif 906 907ENDPROC(debug_exception) 908 909/* 910 * We get here in case of an unrecoverable exception. 911 * The only thing we can do is to be nice and print a panic message. 912 * We only produce a single stack frame for panic, so ??? 913 * 914 * 915 * Entry conditions: 916 * 917 * - a0 contains the caller address; original value saved in excsave1. 918 * - the original a0 contains a valid return address (backtrace) or 0. 919 * - a2 contains a valid stackpointer 920 * 921 * Notes: 922 * 923 * - If the stack pointer could be invalid, the caller has to setup a 924 * dummy stack pointer (e.g. the stack of the init_task) 925 * 926 * - If the return address could be invalid, the caller has to set it 927 * to 0, so the backtrace would stop. 928 * 929 */ 930 .align 4 931unrecoverable_text: 932 .ascii "Unrecoverable error in exception handler\0" 933 934 .literal_position 935 936ENTRY(unrecoverable_exception) 937 938#if XCHAL_HAVE_WINDOWED 939 movi a0, 1 940 movi a1, 0 941 942 wsr a0, windowstart 943 wsr a1, windowbase 944 rsync 945#endif 946 947 movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL 948 wsr a1, ps 949 rsync 950 951 movi a1, init_task 952 movi a0, 0 953 addi a1, a1, PT_REGS_OFFSET 954 955 movi abi_arg0, unrecoverable_text 956 abi_call panic 957 9581: j 1b 959 960ENDPROC(unrecoverable_exception) 961 962/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 963 964 __XTENSA_HANDLER 965 .literal_position 966 967#ifdef SUPPORT_WINDOWED 968/* 969 * Fast-handler for alloca exceptions 970 * 971 * The ALLOCA handler is entered when user code executes the MOVSP 972 * instruction and the caller's frame is not in the register file. 973 * 974 * This algorithm was taken from the Ross Morley's RTOS Porting Layer: 975 * 976 * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S 977 * 978 * It leverages the existing window spill/fill routines and their support for 979 * double exceptions. The 'movsp' instruction will only cause an exception if 980 * the next window needs to be loaded. In fact this ALLOCA exception may be 981 * replaced at some point by changing the hardware to do a underflow exception 982 * of the proper size instead. 983 * 984 * This algorithm simply backs out the register changes started by the user 985 * exception handler, makes it appear that we have started a window underflow 986 * by rotating the window back and then setting the old window base (OWB) in 987 * the 'ps' register with the rolled back window base. The 'movsp' instruction 988 * will be re-executed and this time since the next window frames is in the 989 * active AR registers it won't cause an exception. 990 * 991 * If the WindowUnderflow code gets a TLB miss the page will get mapped 992 * the partial WindowUnderflow will be handled in the double exception 993 * handler. 994 * 995 * Entry condition: 996 * 997 * a0: trashed, original value saved on stack (PT_AREG0) 998 * a1: a1 999 * a2: new stack pointer, original in DEPC 1000 * a3: a3 1001 * depc: a2, original value saved on stack (PT_DEPC) 1002 * excsave_1: dispatch table 1003 * 1004 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1005 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1006 */ 1007 1008ENTRY(fast_alloca) 1009 rsr a0, windowbase 1010 rotw -1 1011 rsr a2, ps 1012 extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH 1013 xor a3, a3, a4 1014 l32i a4, a6, PT_AREG0 1015 l32i a1, a6, PT_DEPC 1016 rsr a6, depc 1017 wsr a1, depc 1018 slli a3, a3, PS_OWB_SHIFT 1019 xor a2, a2, a3 1020 wsr a2, ps 1021 rsync 1022 1023 _bbci.l a4, 31, 4f 1024 rotw -1 1025 _bbci.l a8, 30, 8f 1026 rotw -1 1027 j _WindowUnderflow12 10288: j _WindowUnderflow8 10294: j _WindowUnderflow4 1030ENDPROC(fast_alloca) 1031#endif 1032 1033#ifdef CONFIG_USER_ABI_CALL0_PROBE 1034/* 1035 * fast illegal instruction handler. 1036 * 1037 * This is used to fix up user PS.WOE on the exception caused 1038 * by the first opcode related to register window. If PS.WOE is 1039 * already set it goes directly to the common user exception handler. 1040 * 1041 * Entry condition: 1042 * 1043 * a0: trashed, original value saved on stack (PT_AREG0) 1044 * a1: a1 1045 * a2: new stack pointer, original in DEPC 1046 * a3: a3 1047 * depc: a2, original value saved on stack (PT_DEPC) 1048 * excsave_1: dispatch table 1049 */ 1050 1051ENTRY(fast_illegal_instruction_user) 1052 1053 rsr a0, ps 1054 bbsi.l a0, PS_WOE_BIT, 1f 1055 s32i a3, a2, PT_AREG3 1056 movi a3, PS_WOE_MASK 1057 or a0, a0, a3 1058 wsr a0, ps 1059#ifdef CONFIG_USER_ABI_CALL0_PROBE 1060 GET_THREAD_INFO(a3, a2) 1061 rsr a0, epc1 1062 s32i a0, a3, TI_PS_WOE_FIX_ADDR 1063#endif 1064 l32i a3, a2, PT_AREG3 1065 l32i a0, a2, PT_AREG0 1066 rsr a2, depc 1067 rfe 10681: 1069 call0 user_exception 1070 1071ENDPROC(fast_illegal_instruction_user) 1072#endif 1073 1074 /* 1075 * fast system calls. 1076 * 1077 * WARNING: The kernel doesn't save the entire user context before 1078 * handling a fast system call. These functions are small and short, 1079 * usually offering some functionality not available to user tasks. 1080 * 1081 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 1082 * 1083 * Entry condition: 1084 * 1085 * a0: trashed, original value saved on stack (PT_AREG0) 1086 * a1: a1 1087 * a2: new stack pointer, original in DEPC 1088 * a3: a3 1089 * depc: a2, original value saved on stack (PT_DEPC) 1090 * excsave_1: dispatch table 1091 */ 1092 1093ENTRY(fast_syscall_user) 1094 1095 /* Skip syscall. */ 1096 1097 rsr a0, epc1 1098 addi a0, a0, 3 1099 wsr a0, epc1 1100 1101 l32i a0, a2, PT_DEPC 1102 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1103 1104 rsr a0, depc # get syscall-nr 1105 _beqz a0, fast_syscall_spill_registers 1106 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1107 1108 call0 user_exception 1109 1110ENDPROC(fast_syscall_user) 1111 1112ENTRY(fast_syscall_unrecoverable) 1113 1114 /* Restore all states. */ 1115 1116 l32i a0, a2, PT_AREG0 # restore a0 1117 xsr a2, depc # restore a2, depc 1118 1119 wsr a0, excsave1 1120 call0 unrecoverable_exception 1121 1122ENDPROC(fast_syscall_unrecoverable) 1123 1124/* 1125 * sysxtensa syscall handler 1126 * 1127 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1128 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1129 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1130 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1131 * a2 a6 a3 a4 a5 1132 * 1133 * Entry condition: 1134 * 1135 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1136 * a1: a1 1137 * a2: new stack pointer, original in a0 and DEPC 1138 * a3: a3 1139 * a4..a15: unchanged 1140 * depc: a2, original value saved on stack (PT_DEPC) 1141 * excsave_1: dispatch table 1142 * 1143 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1144 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1145 * 1146 * Note: we don't have to save a2; a2 holds the return value 1147 */ 1148 1149 .literal_position 1150 1151#ifdef CONFIG_FAST_SYSCALL_XTENSA 1152 1153ENTRY(fast_syscall_xtensa) 1154 1155 s32i a7, a2, PT_AREG7 # we need an additional register 1156 movi a7, 4 # sizeof(unsigned int) 1157 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1158 1159 _bgeui a6, SYS_XTENSA_COUNT, .Lill 1160 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1161 1162 /* Fall through for ATOMIC_CMP_SWP. */ 1163 1164.Lswp: /* Atomic compare and swap */ 1165 1166EX(.Leac) l32i a0, a3, 0 # read old value 1167 bne a0, a4, 1f # same as old value? jump 1168EX(.Leac) s32i a5, a3, 0 # different, modify value 1169 l32i a7, a2, PT_AREG7 # restore a7 1170 l32i a0, a2, PT_AREG0 # restore a0 1171 movi a2, 1 # and return 1 1172 rfe 1173 11741: l32i a7, a2, PT_AREG7 # restore a7 1175 l32i a0, a2, PT_AREG0 # restore a0 1176 movi a2, 0 # return 0 (note that we cannot set 1177 rfe 1178 1179.Lnswp: /* Atomic set, add, and exg_add. */ 1180 1181EX(.Leac) l32i a7, a3, 0 # orig 1182 addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1183 add a0, a4, a7 # + arg 1184 moveqz a0, a4, a6 # set 1185 addi a6, a6, SYS_XTENSA_ATOMIC_SET 1186EX(.Leac) s32i a0, a3, 0 # write new value 1187 1188 mov a0, a2 1189 mov a2, a7 1190 l32i a7, a0, PT_AREG7 # restore a7 1191 l32i a0, a0, PT_AREG0 # restore a0 1192 rfe 1193 1194.Leac: l32i a7, a2, PT_AREG7 # restore a7 1195 l32i a0, a2, PT_AREG0 # restore a0 1196 movi a2, -EFAULT 1197 rfe 1198 1199.Lill: l32i a7, a2, PT_AREG7 # restore a7 1200 l32i a0, a2, PT_AREG0 # restore a0 1201 movi a2, -EINVAL 1202 rfe 1203 1204ENDPROC(fast_syscall_xtensa) 1205 1206#else /* CONFIG_FAST_SYSCALL_XTENSA */ 1207 1208ENTRY(fast_syscall_xtensa) 1209 1210 l32i a0, a2, PT_AREG0 # restore a0 1211 movi a2, -ENOSYS 1212 rfe 1213 1214ENDPROC(fast_syscall_xtensa) 1215 1216#endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1217 1218 1219/* fast_syscall_spill_registers. 1220 * 1221 * Entry condition: 1222 * 1223 * a0: trashed, original value saved on stack (PT_AREG0) 1224 * a1: a1 1225 * a2: new stack pointer, original in DEPC 1226 * a3: a3 1227 * depc: a2, original value saved on stack (PT_DEPC) 1228 * excsave_1: dispatch table 1229 * 1230 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1231 */ 1232 1233#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \ 1234 defined(USER_SUPPORT_WINDOWED) 1235 1236ENTRY(fast_syscall_spill_registers) 1237 1238 /* Register a FIXUP handler (pass current wb as a parameter) */ 1239 1240 xsr a3, excsave1 1241 movi a0, fast_syscall_spill_registers_fixup 1242 s32i a0, a3, EXC_TABLE_FIXUP 1243 rsr a0, windowbase 1244 s32i a0, a3, EXC_TABLE_PARAM 1245 xsr a3, excsave1 # restore a3 and excsave_1 1246 1247 /* Save a3, a4 and SAR on stack. */ 1248 1249 rsr a0, sar 1250 s32i a3, a2, PT_AREG3 1251 s32i a0, a2, PT_SAR 1252 1253 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1254 1255 s32i a4, a2, PT_AREG4 1256 s32i a7, a2, PT_AREG7 1257 s32i a8, a2, PT_AREG8 1258 s32i a11, a2, PT_AREG11 1259 s32i a12, a2, PT_AREG12 1260 s32i a15, a2, PT_AREG15 1261 1262 /* 1263 * Rotate ws so that the current windowbase is at bit 0. 1264 * Assume ws = xxxwww1yy (www1 current window frame). 1265 * Rotate ws right so that a4 = yyxxxwww1. 1266 */ 1267 1268 rsr a0, windowbase 1269 rsr a3, windowstart # a3 = xxxwww1yy 1270 ssr a0 # holds WB 1271 slli a0, a3, WSBITS 1272 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy 1273 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1274 1275 /* We are done if there are no more than the current register frame. */ 1276 1277 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1278 movi a0, (1 << (WSBITS-1)) 1279 _beqz a3, .Lnospill # only one active frame? jump 1280 1281 /* We want 1 at the top, so that we return to the current windowbase */ 1282 1283 or a3, a3, a0 # 1yyxxxwww 1284 1285 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1286 1287 wsr a3, windowstart # save shifted windowstart 1288 neg a0, a3 1289 and a3, a0, a3 # first bit set from right: 000010000 1290 1291 ffs_ws a0, a3 # a0: shifts to skip empty frames 1292 movi a3, WSBITS 1293 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right 1294 ssr a0 # save in SAR for later. 1295 1296 rsr a3, windowbase 1297 add a3, a3, a0 1298 wsr a3, windowbase 1299 rsync 1300 1301 rsr a3, windowstart 1302 srl a3, a3 # shift windowstart 1303 1304 /* WB is now just one frame below the oldest frame in the register 1305 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1306 and WS differ by one 4-register frame. */ 1307 1308 /* Save frames. Depending what call was used (call4, call8, call12), 1309 * we have to save 4,8. or 12 registers. 1310 */ 1311 1312 1313.Lloop: _bbsi.l a3, 1, .Lc4 1314 _bbci.l a3, 2, .Lc12 1315 1316.Lc8: s32e a4, a13, -16 1317 l32e a4, a5, -12 1318 s32e a8, a4, -32 1319 s32e a5, a13, -12 1320 s32e a6, a13, -8 1321 s32e a7, a13, -4 1322 s32e a9, a4, -28 1323 s32e a10, a4, -24 1324 s32e a11, a4, -20 1325 srli a11, a3, 2 # shift windowbase by 2 1326 rotw 2 1327 _bnei a3, 1, .Lloop 1328 j .Lexit 1329 1330.Lc4: s32e a4, a9, -16 1331 s32e a5, a9, -12 1332 s32e a6, a9, -8 1333 s32e a7, a9, -4 1334 1335 srli a7, a3, 1 1336 rotw 1 1337 _bnei a3, 1, .Lloop 1338 j .Lexit 1339 1340.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1341 1342 /* 12-register frame (call12) */ 1343 1344 l32e a0, a5, -12 1345 s32e a8, a0, -48 1346 mov a8, a0 1347 1348 s32e a9, a8, -44 1349 s32e a10, a8, -40 1350 s32e a11, a8, -36 1351 s32e a12, a8, -32 1352 s32e a13, a8, -28 1353 s32e a14, a8, -24 1354 s32e a15, a8, -20 1355 srli a15, a3, 3 1356 1357 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1358 * window, grab the stackpointer, and rotate back. 1359 * Alternatively, we could also use the following approach, but that 1360 * makes the fixup routine much more complicated: 1361 * rotw 1 1362 * s32e a0, a13, -16 1363 * ... 1364 * rotw 2 1365 */ 1366 1367 rotw 1 1368 mov a4, a13 1369 rotw -1 1370 1371 s32e a4, a8, -16 1372 s32e a5, a8, -12 1373 s32e a6, a8, -8 1374 s32e a7, a8, -4 1375 1376 rotw 3 1377 1378 _beqi a3, 1, .Lexit 1379 j .Lloop 1380 1381.Lexit: 1382 1383 /* Done. Do the final rotation and set WS */ 1384 1385 rotw 1 1386 rsr a3, windowbase 1387 ssl a3 1388 movi a3, 1 1389 sll a3, a3 1390 wsr a3, windowstart 1391.Lnospill: 1392 1393 /* Advance PC, restore registers and SAR, and return from exception. */ 1394 1395 l32i a3, a2, PT_SAR 1396 l32i a0, a2, PT_AREG0 1397 wsr a3, sar 1398 l32i a3, a2, PT_AREG3 1399 1400 /* Restore clobbered registers. */ 1401 1402 l32i a4, a2, PT_AREG4 1403 l32i a7, a2, PT_AREG7 1404 l32i a8, a2, PT_AREG8 1405 l32i a11, a2, PT_AREG11 1406 l32i a12, a2, PT_AREG12 1407 l32i a15, a2, PT_AREG15 1408 1409 movi a2, 0 1410 rfe 1411 1412.Linvalid_mask: 1413 1414 /* We get here because of an unrecoverable error in the window 1415 * registers, so set up a dummy frame and kill the user application. 1416 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1417 */ 1418 1419 movi a0, 1 1420 movi a1, 0 1421 1422 wsr a0, windowstart 1423 wsr a1, windowbase 1424 rsync 1425 1426 movi a0, 0 1427 1428 rsr a3, excsave1 1429 l32i a1, a3, EXC_TABLE_KSTK 1430 1431 movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL 1432 wsr a4, ps 1433 rsync 1434 1435 movi abi_arg0, SIGSEGV 1436 abi_call make_task_dead 1437 1438 /* shouldn't return, so panic */ 1439 1440 wsr a0, excsave1 1441 call0 unrecoverable_exception # should not return 14421: j 1b 1443 1444 1445ENDPROC(fast_syscall_spill_registers) 1446 1447/* Fixup handler. 1448 * 1449 * We get here if the spill routine causes an exception, e.g. tlb miss. 1450 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1451 * we entered the spill routine and jump to the user exception handler. 1452 * 1453 * Note that we only need to restore the bits in windowstart that have not 1454 * been spilled yet by the _spill_register routine. Luckily, a3 contains a 1455 * rotated windowstart with only those bits set for frames that haven't been 1456 * spilled yet. Because a3 is rotated such that bit 0 represents the register 1457 * frame for the current windowbase - 1, we need to rotate a3 left by the 1458 * value of the current windowbase + 1 and move it to windowstart. 1459 * 1460 * a0: value of depc, original value in depc 1461 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1462 * a3: exctable, original value in excsave1 1463 */ 1464 1465ENTRY(fast_syscall_spill_registers_fixup) 1466 1467 rsr a2, windowbase # get current windowbase (a2 is saved) 1468 xsr a0, depc # restore depc and a0 1469 ssl a2 # set shift (32 - WB) 1470 1471 /* We need to make sure the current registers (a0-a3) are preserved. 1472 * To do this, we simply set the bit for the current window frame 1473 * in WS, so that the exception handlers save them to the task stack. 1474 * 1475 * Note: we use a3 to set the windowbase, so we take a special care 1476 * of it, saving it in the original _spill_registers frame across 1477 * the exception handler call. 1478 */ 1479 1480 xsr a3, excsave1 # get spill-mask 1481 slli a3, a3, 1 # shift left by one 1482 addi a3, a3, 1 # set the bit for the current window frame 1483 1484 slli a2, a3, 32-WSBITS 1485 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... 1486 wsr a2, windowstart # set corrected windowstart 1487 1488 srli a3, a3, 1 1489 rsr a2, excsave1 1490 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 1491 xsr a2, excsave1 1492 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 1493 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) 1494 xsr a2, excsave1 1495 1496 /* Return to the original (user task) WINDOWBASE. 1497 * We leave the following frame behind: 1498 * a0, a1, a2 same 1499 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) 1500 * depc: depc (we have to return to that address) 1501 * excsave_1: exctable 1502 */ 1503 1504 wsr a3, windowbase 1505 rsync 1506 1507 /* We are now in the original frame when we entered _spill_registers: 1508 * a0: return address 1509 * a1: used, stack pointer 1510 * a2: kernel stack pointer 1511 * a3: available 1512 * depc: exception address 1513 * excsave: exctable 1514 * Note: This frame might be the same as above. 1515 */ 1516 1517 /* Setup stack pointer. */ 1518 1519 addi a2, a2, -PT_USER_SIZE 1520 s32i a0, a2, PT_AREG0 1521 1522 /* Make sure we return to this fixup handler. */ 1523 1524 movi a3, fast_syscall_spill_registers_fixup_return 1525 s32i a3, a2, PT_DEPC # setup depc 1526 1527 /* Jump to the exception handler. */ 1528 1529 rsr a3, excsave1 1530 rsr a0, exccause 1531 addx4 a0, a0, a3 # find entry in table 1532 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1533 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1534 jx a0 1535 1536ENDPROC(fast_syscall_spill_registers_fixup) 1537 1538ENTRY(fast_syscall_spill_registers_fixup_return) 1539 1540 /* When we return here, all registers have been restored (a2: DEPC) */ 1541 1542 wsr a2, depc # exception address 1543 1544 /* Restore fixup handler. */ 1545 1546 rsr a2, excsave1 1547 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE 1548 movi a3, fast_syscall_spill_registers_fixup 1549 s32i a3, a2, EXC_TABLE_FIXUP 1550 rsr a3, windowbase 1551 s32i a3, a2, EXC_TABLE_PARAM 1552 l32i a2, a2, EXC_TABLE_KSTK 1553 1554 /* Load WB at the time the exception occurred. */ 1555 1556 rsr a3, sar # WB is still in SAR 1557 neg a3, a3 1558 wsr a3, windowbase 1559 rsync 1560 1561 rsr a3, excsave1 1562 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1563 1564 rfde 1565 1566ENDPROC(fast_syscall_spill_registers_fixup_return) 1567 1568#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1569 1570ENTRY(fast_syscall_spill_registers) 1571 1572 l32i a0, a2, PT_AREG0 # restore a0 1573 movi a2, -ENOSYS 1574 rfe 1575 1576ENDPROC(fast_syscall_spill_registers) 1577 1578#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1579 1580#ifdef CONFIG_MMU 1581/* 1582 * We should never get here. Bail out! 1583 */ 1584 1585ENTRY(fast_second_level_miss_double_kernel) 1586 15871: 1588 call0 unrecoverable_exception # should not return 15891: j 1b 1590 1591ENDPROC(fast_second_level_miss_double_kernel) 1592 1593/* First-level entry handler for user, kernel, and double 2nd-level 1594 * TLB miss exceptions. Note that for now, user and kernel miss 1595 * exceptions share the same entry point and are handled identically. 1596 * 1597 * An old, less-efficient C version of this function used to exist. 1598 * We include it below, interleaved as comments, for reference. 1599 * 1600 * Entry condition: 1601 * 1602 * a0: trashed, original value saved on stack (PT_AREG0) 1603 * a1: a1 1604 * a2: new stack pointer, original in DEPC 1605 * a3: a3 1606 * depc: a2, original value saved on stack (PT_DEPC) 1607 * excsave_1: dispatch table 1608 * 1609 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1610 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1611 */ 1612 1613ENTRY(fast_second_level_miss) 1614 1615 /* Save a1 and a3. Note: we don't expect a double exception. */ 1616 1617 s32i a1, a2, PT_AREG1 1618 s32i a3, a2, PT_AREG3 1619 1620 /* We need to map the page of PTEs for the user task. Find 1621 * the pointer to that page. Also, it's possible for tsk->mm 1622 * to be NULL while tsk->active_mm is nonzero if we faulted on 1623 * a vmalloc address. In that rare case, we must use 1624 * active_mm instead to avoid a fault in this handler. See 1625 * 1626 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1627 * (or search Internet on "mm vs. active_mm") 1628 * 1629 * if (!mm) 1630 * mm = tsk->active_mm; 1631 * pgd = pgd_offset (mm, regs->excvaddr); 1632 * pmd = pmd_offset (pgd, regs->excvaddr); 1633 * pmdval = *pmd; 1634 */ 1635 1636 GET_CURRENT(a1,a2) 1637 l32i a0, a1, TASK_MM # tsk->mm 1638 beqz a0, .Lfast_second_level_miss_no_mm 1639 1640.Lfast_second_level_miss_continue: 1641 rsr a3, excvaddr # fault address 1642 _PGD_OFFSET(a0, a3, a1) 1643 l32i a0, a0, 0 # read pmdval 1644 beqz a0, .Lfast_second_level_miss_no_pmd 1645 1646 /* Read ptevaddr and convert to top of page-table page. 1647 * 1648 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1649 * vpnval += DTLB_WAY_PGTABLE; 1650 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1651 * write_dtlb_entry (pteval, vpnval); 1652 * 1653 * The messy computation for 'pteval' above really simplifies 1654 * into the following: 1655 * 1656 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) 1657 * | PAGE_DIRECTORY 1658 */ 1659 1660 movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff 1661 add a0, a0, a1 # pmdval - PAGE_OFFSET 1662 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1663 xor a0, a0, a1 1664 1665 movi a1, _PAGE_DIRECTORY 1666 or a0, a0, a1 # ... | PAGE_DIRECTORY 1667 1668 /* 1669 * We utilize all three wired-ways (7-9) to hold pmd translations. 1670 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1671 * This allows to map the three most common regions to three different 1672 * DTLBs: 1673 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1674 * 2 -> way 8 shared libaries (2000.0000) 1675 * 3 -> way 0 stack (3000.0000) 1676 */ 1677 1678 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1679 rsr a1, ptevaddr 1680 addx2 a3, a3, a3 # -> 0,3,6,9 1681 srli a1, a1, PAGE_SHIFT 1682 extui a3, a3, 2, 2 # -> 0,0,1,2 1683 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1684 addi a3, a3, DTLB_WAY_PGD 1685 add a1, a1, a3 # ... + way_number 1686 1687.Lfast_second_level_miss_wdtlb: 1688 wdtlb a0, a1 1689 dsync 1690 1691 /* Exit critical section. */ 1692.Lfast_second_level_miss_skip_wdtlb: 1693 rsr a3, excsave1 1694 movi a0, 0 1695 s32i a0, a3, EXC_TABLE_FIXUP 1696 1697 /* Restore the working registers, and return. */ 1698 1699 l32i a0, a2, PT_AREG0 1700 l32i a1, a2, PT_AREG1 1701 l32i a3, a2, PT_AREG3 1702 l32i a2, a2, PT_DEPC 1703 1704 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1705 1706 /* Restore excsave1 and return. */ 1707 1708 rsr a2, depc 1709 rfe 1710 1711 /* Return from double exception. */ 1712 17131: xsr a2, depc 1714 esync 1715 rfde 1716 1717.Lfast_second_level_miss_no_mm: 1718 l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1719 bnez a0, .Lfast_second_level_miss_continue 1720 1721 /* Even more unlikely case active_mm == 0. 1722 * We can get here with NMI in the middle of context_switch that 1723 * touches vmalloc area. 1724 */ 1725 movi a0, init_mm 1726 j .Lfast_second_level_miss_continue 1727 1728.Lfast_second_level_miss_no_pmd: 1729#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1730 1731 /* Special case for cache aliasing. 1732 * We (should) only get here if a clear_user_page, copy_user_page 1733 * or the aliased cache flush functions got preemptively interrupted 1734 * by another task. Re-establish temporary mapping to the 1735 * TLBTEMP_BASE areas. 1736 */ 1737 1738 /* We shouldn't be in a double exception */ 1739 1740 l32i a0, a2, PT_DEPC 1741 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lfast_second_level_miss_slow 1742 1743 /* Make sure the exception originated in the special functions */ 1744 1745 movi a0, __tlbtemp_mapping_start 1746 rsr a3, epc1 1747 bltu a3, a0, .Lfast_second_level_miss_slow 1748 movi a0, __tlbtemp_mapping_end 1749 bgeu a3, a0, .Lfast_second_level_miss_slow 1750 1751 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1752 1753 movi a3, TLBTEMP_BASE_1 1754 rsr a0, excvaddr 1755 bltu a0, a3, .Lfast_second_level_miss_slow 1756 1757 addi a1, a0, -TLBTEMP_SIZE 1758 bgeu a1, a3, .Lfast_second_level_miss_slow 1759 1760 /* Check if we have to restore an ITLB mapping. */ 1761 1762 movi a1, __tlbtemp_mapping_itlb 1763 rsr a3, epc1 1764 sub a3, a3, a1 1765 1766 /* Calculate VPN */ 1767 1768 movi a1, PAGE_MASK 1769 and a1, a1, a0 1770 1771 /* Jump for ITLB entry */ 1772 1773 bgez a3, 1f 1774 1775 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1776 1777 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1778 add a1, a3, a1 1779 1780 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1781 1782 mov a0, a6 1783 movnez a0, a7, a3 1784 j .Lfast_second_level_miss_wdtlb 1785 1786 /* ITLB entry. We only use dst in a6. */ 1787 17881: witlb a6, a1 1789 isync 1790 j .Lfast_second_level_miss_skip_wdtlb 1791 1792 1793#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1794 1795 /* Invalid PGD, default exception handling */ 1796.Lfast_second_level_miss_slow: 1797 1798 rsr a1, depc 1799 s32i a1, a2, PT_AREG2 1800 mov a1, a2 1801 1802 rsr a2, ps 1803 bbsi.l a2, PS_UM_BIT, 1f 1804 call0 _kernel_exception 18051: call0 _user_exception 1806 1807ENDPROC(fast_second_level_miss) 1808 1809/* 1810 * StoreProhibitedException 1811 * 1812 * Update the pte and invalidate the itlb mapping for this pte. 1813 * 1814 * Entry condition: 1815 * 1816 * a0: trashed, original value saved on stack (PT_AREG0) 1817 * a1: a1 1818 * a2: new stack pointer, original in DEPC 1819 * a3: a3 1820 * depc: a2, original value saved on stack (PT_DEPC) 1821 * excsave_1: dispatch table 1822 * 1823 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1824 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1825 */ 1826 1827ENTRY(fast_store_prohibited) 1828 1829 /* Save a1 and a3. */ 1830 1831 s32i a1, a2, PT_AREG1 1832 s32i a3, a2, PT_AREG3 1833 1834 GET_CURRENT(a1,a2) 1835 l32i a0, a1, TASK_MM # tsk->mm 1836 beqz a0, .Lfast_store_no_mm 1837 1838.Lfast_store_continue: 1839 rsr a1, excvaddr # fault address 1840 _PGD_OFFSET(a0, a1, a3) 1841 l32i a0, a0, 0 1842 beqz a0, .Lfast_store_slow 1843 1844 /* 1845 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present 1846 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. 1847 */ 1848 1849 _PTE_OFFSET(a0, a1, a3) 1850 l32i a3, a0, 0 # read pteval 1851 movi a1, _PAGE_CA_INVALID 1852 ball a3, a1, .Lfast_store_slow 1853 bbci.l a3, _PAGE_WRITABLE_BIT, .Lfast_store_slow 1854 1855 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1856 or a3, a3, a1 1857 rsr a1, excvaddr 1858 s32i a3, a0, 0 1859 1860 /* We need to flush the cache if we have page coloring. */ 1861#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1862 dhwb a0, 0 1863#endif 1864 pdtlb a0, a1 1865 wdtlb a3, a0 1866 1867 /* Exit critical section. */ 1868 1869 movi a0, 0 1870 rsr a3, excsave1 1871 s32i a0, a3, EXC_TABLE_FIXUP 1872 1873 /* Restore the working registers, and return. */ 1874 1875 l32i a3, a2, PT_AREG3 1876 l32i a1, a2, PT_AREG1 1877 l32i a0, a2, PT_AREG0 1878 l32i a2, a2, PT_DEPC 1879 1880 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1881 rsr a2, depc 1882 rfe 1883 1884 /* Double exception. Restore FIXUP handler and return. */ 1885 18861: xsr a2, depc 1887 esync 1888 rfde 1889 1890.Lfast_store_no_mm: 1891 l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1892 j .Lfast_store_continue 1893 1894 /* If there was a problem, handle fault in C */ 1895.Lfast_store_slow: 1896 rsr a1, excvaddr 1897 pdtlb a0, a1 1898 bbci.l a0, DTLB_HIT_BIT, 1f 1899 idtlb a0 19001: 1901 rsr a3, depc # still holds a2 1902 s32i a3, a2, PT_AREG2 1903 mov a1, a2 1904 1905 rsr a2, ps 1906 bbsi.l a2, PS_UM_BIT, 1f 1907 call0 _kernel_exception 19081: call0 _user_exception 1909 1910ENDPROC(fast_store_prohibited) 1911 1912#endif /* CONFIG_MMU */ 1913 1914 .text 1915/* 1916 * System Calls. 1917 * 1918 * void system_call (struct pt_regs* regs, int exccause) 1919 * a2 a3 1920 */ 1921 .literal_position 1922 1923ENTRY(system_call) 1924 1925#if defined(__XTENSA_WINDOWED_ABI__) 1926 abi_entry_default 1927#elif defined(__XTENSA_CALL0_ABI__) 1928 abi_entry(12) 1929 1930 s32i a0, sp, 0 1931 s32i abi_saved0, sp, 4 1932 s32i abi_saved1, sp, 8 1933 mov abi_saved0, a2 1934#else 1935#error Unsupported Xtensa ABI 1936#endif 1937 1938 /* regs->syscall = regs->areg[2] */ 1939 1940 l32i a7, abi_saved0, PT_AREG2 1941 s32i a7, abi_saved0, PT_SYSCALL 1942 1943 GET_THREAD_INFO(a4, a1) 1944 l32i abi_saved1, a4, TI_FLAGS 1945 movi a4, _TIF_WORK_MASK 1946 and abi_saved1, abi_saved1, a4 1947 beqz abi_saved1, 1f 1948 1949 mov abi_arg0, abi_saved0 1950 abi_call do_syscall_trace_enter 1951 beqz abi_rv, .Lsyscall_exit 1952 l32i a7, abi_saved0, PT_SYSCALL 1953 19541: 1955 /* syscall = sys_call_table[syscall_nr] */ 1956 1957 movi a4, sys_call_table 1958 movi a5, __NR_syscalls 1959 movi abi_rv, -ENOSYS 1960 bgeu a7, a5, 1f 1961 1962 addx4 a4, a7, a4 1963 l32i abi_tmp0, a4, 0 1964 1965 /* Load args: arg0 - arg5 are passed via regs. */ 1966 1967 l32i abi_arg0, abi_saved0, PT_AREG6 1968 l32i abi_arg1, abi_saved0, PT_AREG3 1969 l32i abi_arg2, abi_saved0, PT_AREG4 1970 l32i abi_arg3, abi_saved0, PT_AREG5 1971 l32i abi_arg4, abi_saved0, PT_AREG8 1972 l32i abi_arg5, abi_saved0, PT_AREG9 1973 1974 abi_callx abi_tmp0 1975 19761: /* regs->areg[2] = return_value */ 1977 1978 s32i abi_rv, abi_saved0, PT_AREG2 1979 bnez abi_saved1, 1f 1980.Lsyscall_exit: 1981#if defined(__XTENSA_WINDOWED_ABI__) 1982 abi_ret_default 1983#elif defined(__XTENSA_CALL0_ABI__) 1984 l32i a0, sp, 0 1985 l32i abi_saved0, sp, 4 1986 l32i abi_saved1, sp, 8 1987 abi_ret(12) 1988#else 1989#error Unsupported Xtensa ABI 1990#endif 1991 19921: 1993 mov abi_arg0, abi_saved0 1994 abi_call do_syscall_trace_leave 1995 j .Lsyscall_exit 1996 1997ENDPROC(system_call) 1998 1999/* 2000 * Spill live registers on the kernel stack macro. 2001 * 2002 * Entry condition: ps.woe is set, ps.excm is cleared 2003 * Exit condition: windowstart has single bit set 2004 * May clobber: a12, a13 2005 */ 2006 .macro spill_registers_kernel 2007 2008#if XCHAL_NUM_AREGS > 16 2009 call12 1f 2010 _j 2f 2011 retw 2012 .align 4 20131: 2014 _entry a1, 48 2015 addi a12, a0, 3 2016#if XCHAL_NUM_AREGS > 32 2017 .rept (XCHAL_NUM_AREGS - 32) / 12 2018 _entry a1, 48 2019 mov a12, a0 2020 .endr 2021#endif 2022 _entry a1, 16 2023#if XCHAL_NUM_AREGS % 12 == 0 2024 mov a8, a8 2025#elif XCHAL_NUM_AREGS % 12 == 4 2026 mov a12, a12 2027#elif XCHAL_NUM_AREGS % 12 == 8 2028 mov a4, a4 2029#endif 2030 retw 20312: 2032#else 2033 mov a12, a12 2034#endif 2035 .endm 2036 2037/* 2038 * Task switch. 2039 * 2040 * struct task* _switch_to (struct task* prev, struct task* next) 2041 * a2 a2 a3 2042 */ 2043 2044ENTRY(_switch_to) 2045 2046#if defined(__XTENSA_WINDOWED_ABI__) 2047 abi_entry(XTENSA_SPILL_STACK_RESERVE) 2048#elif defined(__XTENSA_CALL0_ABI__) 2049 abi_entry(16) 2050 2051 s32i a12, sp, 0 2052 s32i a13, sp, 4 2053 s32i a14, sp, 8 2054 s32i a15, sp, 12 2055#else 2056#error Unsupported Xtensa ABI 2057#endif 2058 mov a11, a3 # and 'next' (a3) 2059 2060 l32i a4, a2, TASK_THREAD_INFO 2061 l32i a5, a3, TASK_THREAD_INFO 2062 2063 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2064 2065#if THREAD_RA > 1020 || THREAD_SP > 1020 2066 addi a10, a2, TASK_THREAD 2067 s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 2068 s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 2069#else 2070 s32i a0, a2, THREAD_RA # save return address 2071 s32i a1, a2, THREAD_SP # save stack pointer 2072#endif 2073 2074#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) 2075 movi a6, __stack_chk_guard 2076 l32i a8, a3, TASK_STACK_CANARY 2077 s32i a8, a6, 0 2078#endif 2079 2080 /* Disable ints while we manipulate the stack pointer. */ 2081 2082 irq_save a14, a3 2083 rsync 2084 2085 /* Switch CPENABLE */ 2086 2087#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 2088 l32i a3, a5, THREAD_CPENABLE 2089#ifdef CONFIG_SMP 2090 beqz a3, 1f 2091 memw # pairs with memw (2) in fast_coprocessor 2092 l32i a6, a5, THREAD_CP_OWNER_CPU 2093 l32i a7, a5, THREAD_CPU 2094 beq a6, a7, 1f # load 0 into CPENABLE if current CPU is not the owner 2095 movi a3, 0 20961: 2097#endif 2098 wsr a3, cpenable 2099#endif 2100 2101#if XCHAL_HAVE_EXCLUSIVE 2102 l32i a3, a5, THREAD_ATOMCTL8 2103 getex a3 2104 s32i a3, a4, THREAD_ATOMCTL8 2105#endif 2106 2107 /* Flush register file. */ 2108 2109#if defined(__XTENSA_WINDOWED_ABI__) 2110 spill_registers_kernel 2111#endif 2112 2113 /* Set kernel stack (and leave critical section) 2114 * Note: It's save to set it here. The stack will not be overwritten 2115 * because the kernel stack will only be loaded again after 2116 * we return from kernel space. 2117 */ 2118 2119 rsr a3, excsave1 # exc_table 2120 addi a7, a5, PT_REGS_OFFSET 2121 s32i a7, a3, EXC_TABLE_KSTK 2122 2123 /* restore context of the task 'next' */ 2124 2125 l32i a0, a11, THREAD_RA # restore return address 2126 l32i a1, a11, THREAD_SP # restore stack pointer 2127 2128 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2129 2130 wsr a14, ps 2131 rsync 2132 2133#if defined(__XTENSA_WINDOWED_ABI__) 2134 abi_ret(XTENSA_SPILL_STACK_RESERVE) 2135#elif defined(__XTENSA_CALL0_ABI__) 2136 l32i a12, sp, 0 2137 l32i a13, sp, 4 2138 l32i a14, sp, 8 2139 l32i a15, sp, 12 2140 abi_ret(16) 2141#else 2142#error Unsupported Xtensa ABI 2143#endif 2144 2145ENDPROC(_switch_to) 2146 2147ENTRY(ret_from_fork) 2148 2149 /* void schedule_tail (struct task_struct *prev) 2150 * Note: prev is still in abi_arg0 (return value from fake call frame) 2151 */ 2152 abi_call schedule_tail 2153 2154 mov abi_arg0, a1 2155 abi_call do_syscall_trace_leave 2156 j common_exception_return 2157 2158ENDPROC(ret_from_fork) 2159 2160/* 2161 * Kernel thread creation helper 2162 * On entry, set up by copy_thread: abi_saved0 = thread_fn, 2163 * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev 2164 */ 2165ENTRY(ret_from_kernel_thread) 2166 2167 abi_call schedule_tail 2168 mov abi_arg0, abi_saved1 2169 abi_callx abi_saved0 2170 j common_exception_return 2171 2172ENDPROC(ret_from_kernel_thread) 2173 2174#ifdef CONFIG_HIBERNATION 2175 2176 .bss 2177 .align 4 2178.Lsaved_regs: 2179#if defined(__XTENSA_WINDOWED_ABI__) 2180 .fill 2, 4 2181#elif defined(__XTENSA_CALL0_ABI__) 2182 .fill 6, 4 2183#else 2184#error Unsupported Xtensa ABI 2185#endif 2186 .align XCHAL_NCP_SA_ALIGN 2187.Lsaved_user_regs: 2188 .fill XTREGS_USER_SIZE, 1 2189 2190 .previous 2191 2192ENTRY(swsusp_arch_suspend) 2193 2194 abi_entry_default 2195 2196 movi a2, .Lsaved_regs 2197 movi a3, .Lsaved_user_regs 2198 s32i a0, a2, 0 2199 s32i a1, a2, 4 2200 save_xtregs_user a3 a4 a5 a6 a7 a8 0 2201#if defined(__XTENSA_WINDOWED_ABI__) 2202 spill_registers_kernel 2203#elif defined(__XTENSA_CALL0_ABI__) 2204 s32i a12, a2, 8 2205 s32i a13, a2, 12 2206 s32i a14, a2, 16 2207 s32i a15, a2, 20 2208#else 2209#error Unsupported Xtensa ABI 2210#endif 2211 abi_call swsusp_save 2212 mov a2, abi_rv 2213 abi_ret_default 2214 2215ENDPROC(swsusp_arch_suspend) 2216 2217ENTRY(swsusp_arch_resume) 2218 2219 abi_entry_default 2220 2221#if defined(__XTENSA_WINDOWED_ABI__) 2222 spill_registers_kernel 2223#endif 2224 2225 movi a2, restore_pblist 2226 l32i a2, a2, 0 2227 2228.Lcopy_pbe: 2229 l32i a3, a2, PBE_ADDRESS 2230 l32i a4, a2, PBE_ORIG_ADDRESS 2231 2232 __loopi a3, a9, PAGE_SIZE, 16 2233 l32i a5, a3, 0 2234 l32i a6, a3, 4 2235 l32i a7, a3, 8 2236 l32i a8, a3, 12 2237 addi a3, a3, 16 2238 s32i a5, a4, 0 2239 s32i a6, a4, 4 2240 s32i a7, a4, 8 2241 s32i a8, a4, 12 2242 addi a4, a4, 16 2243 __endl a3, a9 2244 2245 l32i a2, a2, PBE_NEXT 2246 bnez a2, .Lcopy_pbe 2247 2248 movi a2, .Lsaved_regs 2249 movi a3, .Lsaved_user_regs 2250 l32i a0, a2, 0 2251 l32i a1, a2, 4 2252 load_xtregs_user a3 a4 a5 a6 a7 a8 0 2253#if defined(__XTENSA_CALL0_ABI__) 2254 l32i a12, a2, 8 2255 l32i a13, a2, 12 2256 l32i a14, a2, 16 2257 l32i a15, a2, 20 2258#endif 2259 movi a2, 0 2260 abi_ret_default 2261 2262ENDPROC(swsusp_arch_resume) 2263 2264#endif 2265