1/* 2 * Low-level exception handling 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. 9 * Copyright (C) 2015 Cadence Design Systems Inc. 10 * 11 * Chris Zankel <chris@zankel.net> 12 * 13 */ 14 15#include <linux/linkage.h> 16#include <linux/pgtable.h> 17#include <asm/asm-offsets.h> 18#include <asm/asmmacro.h> 19#include <asm/processor.h> 20#include <asm/coprocessor.h> 21#include <asm/thread_info.h> 22#include <asm/asm-uaccess.h> 23#include <asm/unistd.h> 24#include <asm/ptrace.h> 25#include <asm/current.h> 26#include <asm/page.h> 27#include <asm/signal.h> 28#include <asm/tlbflush.h> 29#include <variant/tie-asm.h> 30 31/* Unimplemented features. */ 32 33#undef KERNEL_STACK_OVERFLOW_CHECK 34 35/* Not well tested. 36 * 37 * - fast_coprocessor 38 */ 39 40/* 41 * Macro to find first bit set in WINDOWBASE from the left + 1 42 * 43 * 100....0 -> 1 44 * 010....0 -> 2 45 * 000....1 -> WSBITS 46 */ 47 48 .macro ffs_ws bit mask 49 50#if XCHAL_HAVE_NSA 51 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 52 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 53#else 54 movi \bit, WSBITS 55#if WSBITS > 16 56 _bltui \mask, 0x10000, 99f 57 addi \bit, \bit, -16 58 extui \mask, \mask, 16, 16 59#endif 60#if WSBITS > 8 6199: _bltui \mask, 0x100, 99f 62 addi \bit, \bit, -8 63 srli \mask, \mask, 8 64#endif 6599: _bltui \mask, 0x10, 99f 66 addi \bit, \bit, -4 67 srli \mask, \mask, 4 6899: _bltui \mask, 0x4, 99f 69 addi \bit, \bit, -2 70 srli \mask, \mask, 2 7199: _bltui \mask, 0x2, 99f 72 addi \bit, \bit, -1 7399: 74 75#endif 76 .endm 77 78 79 .macro irq_save flags tmp 80#if XTENSA_FAKE_NMI 81#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL 82 rsr \flags, ps 83 extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 84 bgei \tmp, LOCKLEVEL, 99f 85 rsil \tmp, LOCKLEVEL 8699: 87#else 88 movi \tmp, LOCKLEVEL 89 rsr \flags, ps 90 or \flags, \flags, \tmp 91 xsr \flags, ps 92 rsync 93#endif 94#else 95 rsil \flags, LOCKLEVEL 96#endif 97 .endm 98 99/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 100 101/* 102 * First-level exception handler for user exceptions. 103 * Save some special registers, extra states and all registers in the AR 104 * register file that were in use in the user task, and jump to the common 105 * exception code. 106 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 107 * save them for kernel exceptions). 108 * 109 * Entry condition for user_exception: 110 * 111 * a0: trashed, original value saved on stack (PT_AREG0) 112 * a1: a1 113 * a2: new stack pointer, original value in depc 114 * a3: a3 115 * depc: a2, original value saved on stack (PT_DEPC) 116 * excsave1: dispatch table 117 * 118 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 119 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 120 * 121 * Entry condition for _user_exception: 122 * 123 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 124 * excsave has been restored, and 125 * stack pointer (a1) has been set. 126 * 127 * Note: _user_exception might be at an odd address. Don't use call0..call12 128 */ 129 .literal_position 130 131ENTRY(user_exception) 132 133 /* Save a1, a2, a3, and set SP. */ 134 135 rsr a0, depc 136 s32i a1, a2, PT_AREG1 137 s32i a0, a2, PT_AREG2 138 s32i a3, a2, PT_AREG3 139 mov a1, a2 140 141 .globl _user_exception 142_user_exception: 143 144 /* Save SAR and turn off single stepping */ 145 146 movi a2, 0 147 wsr a2, depc # terminate user stack trace with 0 148 rsr a3, sar 149 xsr a2, icountlevel 150 s32i a3, a1, PT_SAR 151 s32i a2, a1, PT_ICOUNTLEVEL 152 153#if XCHAL_HAVE_THREADPTR 154 rur a2, threadptr 155 s32i a2, a1, PT_THREADPTR 156#endif 157 158 /* Rotate ws so that the current windowbase is at bit0. */ 159 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 160 161#if defined(USER_SUPPORT_WINDOWED) 162 rsr a2, windowbase 163 rsr a3, windowstart 164 ssr a2 165 s32i a2, a1, PT_WINDOWBASE 166 s32i a3, a1, PT_WINDOWSTART 167 slli a2, a3, 32-WSBITS 168 src a2, a3, a2 169 srli a2, a2, 32-WSBITS 170 s32i a2, a1, PT_WMASK # needed for restoring registers 171#else 172 movi a2, 0 173 movi a3, 1 174 s32i a2, a1, PT_WINDOWBASE 175 s32i a3, a1, PT_WINDOWSTART 176 s32i a3, a1, PT_WMASK 177#endif 178 179 /* Save only live registers. */ 180 181UABI_W _bbsi.l a2, 1, 1f 182 s32i a4, a1, PT_AREG4 183 s32i a5, a1, PT_AREG5 184 s32i a6, a1, PT_AREG6 185 s32i a7, a1, PT_AREG7 186UABI_W _bbsi.l a2, 2, 1f 187 s32i a8, a1, PT_AREG8 188 s32i a9, a1, PT_AREG9 189 s32i a10, a1, PT_AREG10 190 s32i a11, a1, PT_AREG11 191UABI_W _bbsi.l a2, 3, 1f 192 s32i a12, a1, PT_AREG12 193 s32i a13, a1, PT_AREG13 194 s32i a14, a1, PT_AREG14 195 s32i a15, a1, PT_AREG15 196 197#if defined(USER_SUPPORT_WINDOWED) 198 _bnei a2, 1, 1f # only one valid frame? 199 200 /* Only one valid frame, skip saving regs. */ 201 202 j 2f 203 204 /* Save the remaining registers. 205 * We have to save all registers up to the first '1' from 206 * the right, except the current frame (bit 0). 207 * Assume a2 is: 001001000110001 208 * All register frames starting from the top field to the marked '1' 209 * must be saved. 210 */ 211 2121: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 213 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 214 and a3, a3, a2 # max. only one bit is set 215 216 /* Find number of frames to save */ 217 218 ffs_ws a0, a3 # number of frames to the '1' from left 219 220 /* Store information into WMASK: 221 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 222 * bits 4...: number of valid 4-register frames 223 */ 224 225 slli a3, a0, 4 # number of frames to save in bits 8..4 226 extui a2, a2, 0, 4 # mask for the first 16 registers 227 or a2, a3, a2 228 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 229 230 /* Save 4 registers at a time */ 231 2321: rotw -1 233 s32i a0, a5, PT_AREG_END - 16 234 s32i a1, a5, PT_AREG_END - 12 235 s32i a2, a5, PT_AREG_END - 8 236 s32i a3, a5, PT_AREG_END - 4 237 addi a0, a4, -1 238 addi a1, a5, -16 239 _bnez a0, 1b 240 241 /* WINDOWBASE still in SAR! */ 242 243 rsr a2, sar # original WINDOWBASE 244 movi a3, 1 245 ssl a2 246 sll a3, a3 247 wsr a3, windowstart # set corresponding WINDOWSTART bit 248 wsr a2, windowbase # and WINDOWSTART 249 rsync 250 251 /* We are back to the original stack pointer (a1) */ 252#endif 2532: /* Now, jump to the common exception handler. */ 254 255 j common_exception 256 257ENDPROC(user_exception) 258 259/* 260 * First-level exit handler for kernel exceptions 261 * Save special registers and the live window frame. 262 * Note: Even though we changes the stack pointer, we don't have to do a 263 * MOVSP here, as we do that when we return from the exception. 264 * (See comment in the kernel exception exit code) 265 * 266 * Entry condition for kernel_exception: 267 * 268 * a0: trashed, original value saved on stack (PT_AREG0) 269 * a1: a1 270 * a2: new stack pointer, original in DEPC 271 * a3: a3 272 * depc: a2, original value saved on stack (PT_DEPC) 273 * excsave_1: dispatch table 274 * 275 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 276 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 277 * 278 * Entry condition for _kernel_exception: 279 * 280 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 281 * excsave has been restored, and 282 * stack pointer (a1) has been set. 283 * 284 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 285 */ 286 287ENTRY(kernel_exception) 288 289 /* Save a1, a2, a3, and set SP. */ 290 291 rsr a0, depc # get a2 292 s32i a1, a2, PT_AREG1 293 s32i a0, a2, PT_AREG2 294 s32i a3, a2, PT_AREG3 295 mov a1, a2 296 297 .globl _kernel_exception 298_kernel_exception: 299 300 /* Save SAR and turn off single stepping */ 301 302 movi a2, 0 303 rsr a3, sar 304 xsr a2, icountlevel 305 s32i a3, a1, PT_SAR 306 s32i a2, a1, PT_ICOUNTLEVEL 307 308#if defined(__XTENSA_WINDOWED_ABI__) 309 /* Rotate ws so that the current windowbase is at bit0. */ 310 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 311 312 rsr a2, windowbase # don't need to save these, we only 313 rsr a3, windowstart # need shifted windowstart: windowmask 314 ssr a2 315 slli a2, a3, 32-WSBITS 316 src a2, a3, a2 317 srli a2, a2, 32-WSBITS 318 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 319#endif 320 321 /* Save only the live window-frame */ 322 323KABI_W _bbsi.l a2, 1, 1f 324 s32i a4, a1, PT_AREG4 325 s32i a5, a1, PT_AREG5 326 s32i a6, a1, PT_AREG6 327 s32i a7, a1, PT_AREG7 328KABI_W _bbsi.l a2, 2, 1f 329 s32i a8, a1, PT_AREG8 330 s32i a9, a1, PT_AREG9 331 s32i a10, a1, PT_AREG10 332 s32i a11, a1, PT_AREG11 333KABI_W _bbsi.l a2, 3, 1f 334 s32i a12, a1, PT_AREG12 335 s32i a13, a1, PT_AREG13 336 s32i a14, a1, PT_AREG14 337 s32i a15, a1, PT_AREG15 338 339#ifdef __XTENSA_WINDOWED_ABI__ 340 _bnei a2, 1, 1f 341 /* Copy spill slots of a0 and a1 to imitate movsp 342 * in order to keep exception stack continuous 343 */ 344 l32i a3, a1, PT_SIZE 345 l32i a0, a1, PT_SIZE + 4 346 s32e a3, a1, -16 347 s32e a0, a1, -12 348#endif 3491: 350 l32i a0, a1, PT_AREG0 # restore saved a0 351 wsr a0, depc 352 353#ifdef KERNEL_STACK_OVERFLOW_CHECK 354 355 /* Stack overflow check, for debugging */ 356 extui a2, a1, TASK_SIZE_BITS,XX 357 movi a3, SIZE?? 358 _bge a2, a3, out_of_stack_panic 359 360#endif 361 362/* 363 * This is the common exception handler. 364 * We get here from the user exception handler or simply by falling through 365 * from the kernel exception handler. 366 * Save the remaining special registers, switch to kernel mode, and jump 367 * to the second-level exception handler. 368 * 369 */ 370 371common_exception: 372 373 /* Save some registers, disable loops and clear the syscall flag. */ 374 375 rsr a2, debugcause 376 rsr a3, epc1 377 s32i a2, a1, PT_DEBUGCAUSE 378 s32i a3, a1, PT_PC 379 380 movi a2, NO_SYSCALL 381 rsr a3, excvaddr 382 s32i a2, a1, PT_SYSCALL 383 movi a2, 0 384 s32i a3, a1, PT_EXCVADDR 385#if XCHAL_HAVE_LOOPS 386 xsr a2, lcount 387 s32i a2, a1, PT_LCOUNT 388#endif 389 390#if XCHAL_HAVE_EXCLUSIVE 391 /* Clear exclusive access monitor set by interrupted code */ 392 clrex 393#endif 394 395 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 396 397 rsr a2, exccause 398 movi a3, 0 399 rsr a0, excsave1 400 s32i a2, a1, PT_EXCCAUSE 401 s32i a3, a0, EXC_TABLE_FIXUP 402 403 /* All unrecoverable states are saved on stack, now, and a1 is valid. 404 * Now we can allow exceptions again. In case we've got an interrupt 405 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, 406 * otherwise it's left unchanged. 407 * 408 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 409 */ 410 411 rsr a3, ps 412 s32i a3, a1, PT_PS # save ps 413 414#if XTENSA_FAKE_NMI 415 /* Correct PS needs to be saved in the PT_PS: 416 * - in case of exception or level-1 interrupt it's in the PS, 417 * and is already saved. 418 * - in case of medium level interrupt it's in the excsave2. 419 */ 420 movi a0, EXCCAUSE_MAPPED_NMI 421 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 422 beq a2, a0, .Lmedium_level_irq 423 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception 424 beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 425 426.Lmedium_level_irq: 427 rsr a0, excsave2 428 s32i a0, a1, PT_PS # save medium-level interrupt ps 429 bgei a3, LOCKLEVEL, .Lexception 430 431.Llevel1_irq: 432 movi a3, LOCKLEVEL 433 434.Lexception: 435KABI_W movi a0, PS_WOE_MASK 436KABI_W or a3, a3, a0 437#else 438 addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT 439 movi a0, LOCKLEVEL 440 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 441 # a3 = PS.INTLEVEL 442 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt 443KABI_W movi a2, PS_WOE_MASK 444KABI_W or a3, a3, a2 445 rsr a2, exccause 446#endif 447 448 /* restore return address (or 0 if return to userspace) */ 449 rsr a0, depc 450 wsr a3, ps 451 rsync # PS.WOE => rsync => overflow 452 453 /* Save lbeg, lend */ 454#if XCHAL_HAVE_LOOPS 455 rsr a4, lbeg 456 rsr a3, lend 457 s32i a4, a1, PT_LBEG 458 s32i a3, a1, PT_LEND 459#endif 460 461 /* Save SCOMPARE1 */ 462 463#if XCHAL_HAVE_S32C1I 464 rsr a3, scompare1 465 s32i a3, a1, PT_SCOMPARE1 466#endif 467 468 /* Save optional registers. */ 469 470 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT 471 472 /* Go to second-level dispatcher. Set up parameters to pass to the 473 * exception handler and call the exception handler. 474 */ 475 476 rsr a4, excsave1 477 addx4 a4, a2, a4 478 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 479 mov abi_arg1, a2 # pass EXCCAUSE 480 mov abi_arg0, a1 # pass stack frame 481 482 /* Call the second-level handler */ 483 484 abi_callx a4 485 486 /* Jump here for exception exit */ 487 .global common_exception_return 488common_exception_return: 489 490#if XTENSA_FAKE_NMI 491 l32i a2, a1, PT_EXCCAUSE 492 movi a3, EXCCAUSE_MAPPED_NMI 493 beq a2, a3, .LNMIexit 494#endif 4951: 496 irq_save a2, a3 497#ifdef CONFIG_TRACE_IRQFLAGS 498 abi_call trace_hardirqs_off 499#endif 500 501 /* Jump if we are returning from kernel exceptions. */ 502 503 l32i abi_saved1, a1, PT_PS 504 GET_THREAD_INFO(a2, a1) 505 l32i a4, a2, TI_FLAGS 506 _bbci.l abi_saved1, PS_UM_BIT, 6f 507 508 /* Specific to a user exception exit: 509 * We need to check some flags for signal handling and rescheduling, 510 * and have to restore WB and WS, extra states, and all registers 511 * in the register file that were in use in the user task. 512 * Note that we don't disable interrupts here. 513 */ 514 515 _bbsi.l a4, TIF_NEED_RESCHED, 3f 516 movi a2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NOTIFY_SIGNAL 517 bnone a4, a2, 5f 518 5192: l32i a4, a1, PT_DEPC 520 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 521 522 /* Call do_signal() */ 523 524#ifdef CONFIG_TRACE_IRQFLAGS 525 abi_call trace_hardirqs_on 526#endif 527 rsil a2, 0 528 mov abi_arg0, a1 529 abi_call do_notify_resume # int do_notify_resume(struct pt_regs*) 530 j 1b 531 5323: /* Reschedule */ 533 534#ifdef CONFIG_TRACE_IRQFLAGS 535 abi_call trace_hardirqs_on 536#endif 537 rsil a2, 0 538 abi_call schedule # void schedule (void) 539 j 1b 540 541#ifdef CONFIG_PREEMPTION 5426: 543 _bbci.l a4, TIF_NEED_RESCHED, 4f 544 545 /* Check current_thread_info->preempt_count */ 546 547 l32i a4, a2, TI_PRE_COUNT 548 bnez a4, 4f 549 abi_call preempt_schedule_irq 550 j 4f 551#endif 552 553#if XTENSA_FAKE_NMI 554.LNMIexit: 555 l32i abi_saved1, a1, PT_PS 556 _bbci.l abi_saved1, PS_UM_BIT, 4f 557#endif 558 5595: 560#ifdef CONFIG_HAVE_HW_BREAKPOINT 561 _bbci.l a4, TIF_DB_DISABLED, 7f 562 abi_call restore_dbreak 5637: 564#endif 565#ifdef CONFIG_DEBUG_TLB_SANITY 566 l32i a4, a1, PT_DEPC 567 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 568 abi_call check_tlb_sanity 569#endif 5706: 5714: 572#ifdef CONFIG_TRACE_IRQFLAGS 573 extui a4, abi_saved1, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 574 bgei a4, LOCKLEVEL, 1f 575 abi_call trace_hardirqs_on 5761: 577#endif 578 /* Restore optional registers. */ 579 580 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 581 582 /* Restore SCOMPARE1 */ 583 584#if XCHAL_HAVE_S32C1I 585 l32i a2, a1, PT_SCOMPARE1 586 wsr a2, scompare1 587#endif 588 wsr abi_saved1, ps /* disable interrupts */ 589 590 _bbci.l abi_saved1, PS_UM_BIT, kernel_exception_exit 591 592user_exception_exit: 593 594 /* Restore the state of the task and return from the exception. */ 595 596#if defined(USER_SUPPORT_WINDOWED) 597 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 598 599 l32i a2, a1, PT_WINDOWBASE 600 l32i a3, a1, PT_WINDOWSTART 601 wsr a1, depc # use DEPC as temp storage 602 wsr a3, windowstart # restore WINDOWSTART 603 ssr a2 # preserve user's WB in the SAR 604 wsr a2, windowbase # switch to user's saved WB 605 rsync 606 rsr a1, depc # restore stack pointer 607 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 608 rotw -1 # we restore a4..a7 609 _bltui a6, 16, 1f # only have to restore current window? 610 611 /* The working registers are a0 and a3. We are restoring to 612 * a4..a7. Be careful not to destroy what we have just restored. 613 * Note: wmask has the format YYYYM: 614 * Y: number of registers saved in groups of 4 615 * M: 4 bit mask of first 16 registers 616 */ 617 618 mov a2, a6 619 mov a3, a5 620 6212: rotw -1 # a0..a3 become a4..a7 622 addi a3, a7, -4*4 # next iteration 623 addi a2, a6, -16 # decrementing Y in WMASK 624 l32i a4, a3, PT_AREG_END + 0 625 l32i a5, a3, PT_AREG_END + 4 626 l32i a6, a3, PT_AREG_END + 8 627 l32i a7, a3, PT_AREG_END + 12 628 _bgeui a2, 16, 2b 629 630 /* Clear unrestored registers (don't leak anything to user-land */ 631 6321: rsr a0, windowbase 633 rsr a3, sar 634 sub a3, a0, a3 635 beqz a3, 2f 636 extui a3, a3, 0, WBBITS 637 6381: rotw -1 639 addi a3, a7, -1 640 movi a4, 0 641 movi a5, 0 642 movi a6, 0 643 movi a7, 0 644 bgei a3, 1, 1b 645 646 /* We are back were we were when we started. 647 * Note: a2 still contains WMASK (if we've returned to the original 648 * frame where we had loaded a2), or at least the lower 4 bits 649 * (if we have restored WSBITS-1 frames). 650 */ 6512: 652#else 653 movi a2, 1 654#endif 655#if XCHAL_HAVE_THREADPTR 656 l32i a3, a1, PT_THREADPTR 657 wur a3, threadptr 658#endif 659 660 j common_exception_exit 661 662 /* This is the kernel exception exit. 663 * We avoided to do a MOVSP when we entered the exception, but we 664 * have to do it here. 665 */ 666 667kernel_exception_exit: 668 669#if defined(__XTENSA_WINDOWED_ABI__) 670 /* Check if we have to do a movsp. 671 * 672 * We only have to do a movsp if the previous window-frame has 673 * been spilled to the *temporary* exception stack instead of the 674 * task's stack. This is the case if the corresponding bit in 675 * WINDOWSTART for the previous window-frame was set before 676 * (not spilled) but is zero now (spilled). 677 * If this bit is zero, all other bits except the one for the 678 * current window frame are also zero. So, we can use a simple test: 679 * 'and' WINDOWSTART and WINDOWSTART-1: 680 * 681 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 682 * 683 * The result is zero only if one bit was set. 684 * 685 * (Note: We might have gone through several task switches before 686 * we come back to the current task, so WINDOWBASE might be 687 * different from the time the exception occurred.) 688 */ 689 690 /* Test WINDOWSTART before and after the exception. 691 * We actually have WMASK, so we only have to test if it is 1 or not. 692 */ 693 694 l32i a2, a1, PT_WMASK 695 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 696 697 /* Test WINDOWSTART now. If spilled, do the movsp */ 698 699 rsr a3, windowstart 700 addi a0, a3, -1 701 and a3, a3, a0 702 _bnez a3, common_exception_exit 703 704 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 705 706 addi a0, a1, -16 707 l32i a3, a0, 0 708 l32i a4, a0, 4 709 s32i a3, a1, PT_SIZE+0 710 s32i a4, a1, PT_SIZE+4 711 l32i a3, a0, 8 712 l32i a4, a0, 12 713 s32i a3, a1, PT_SIZE+8 714 s32i a4, a1, PT_SIZE+12 715 716 /* Common exception exit. 717 * We restore the special register and the current window frame, and 718 * return from the exception. 719 * 720 * Note: We expect a2 to hold PT_WMASK 721 */ 722#else 723 movi a2, 1 724#endif 725 726common_exception_exit: 727 728 /* Restore address registers. */ 729 730 _bbsi.l a2, 1, 1f 731 l32i a4, a1, PT_AREG4 732 l32i a5, a1, PT_AREG5 733 l32i a6, a1, PT_AREG6 734 l32i a7, a1, PT_AREG7 735 _bbsi.l a2, 2, 1f 736 l32i a8, a1, PT_AREG8 737 l32i a9, a1, PT_AREG9 738 l32i a10, a1, PT_AREG10 739 l32i a11, a1, PT_AREG11 740 _bbsi.l a2, 3, 1f 741 l32i a12, a1, PT_AREG12 742 l32i a13, a1, PT_AREG13 743 l32i a14, a1, PT_AREG14 744 l32i a15, a1, PT_AREG15 745 746 /* Restore PC, SAR */ 747 7481: l32i a2, a1, PT_PC 749 l32i a3, a1, PT_SAR 750 wsr a2, epc1 751 wsr a3, sar 752 753 /* Restore LBEG, LEND, LCOUNT */ 754#if XCHAL_HAVE_LOOPS 755 l32i a2, a1, PT_LBEG 756 l32i a3, a1, PT_LEND 757 wsr a2, lbeg 758 l32i a2, a1, PT_LCOUNT 759 wsr a3, lend 760 wsr a2, lcount 761#endif 762 763 /* We control single stepping through the ICOUNTLEVEL register. */ 764 765 l32i a2, a1, PT_ICOUNTLEVEL 766 movi a3, -2 767 wsr a2, icountlevel 768 wsr a3, icount 769 770 /* Check if it was double exception. */ 771 772 l32i a0, a1, PT_DEPC 773 l32i a3, a1, PT_AREG3 774 l32i a2, a1, PT_AREG2 775 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 776 777 /* Restore a0...a3 and return */ 778 779 l32i a0, a1, PT_AREG0 780 l32i a1, a1, PT_AREG1 781 rfe 782 7831: wsr a0, depc 784 l32i a0, a1, PT_AREG0 785 l32i a1, a1, PT_AREG1 786 rfde 787 788ENDPROC(kernel_exception) 789 790/* 791 * Debug exception handler. 792 * 793 * Currently, we don't support KGDB, so only user application can be debugged. 794 * 795 * When we get here, a0 is trashed and saved to excsave[debuglevel] 796 */ 797 798 .literal_position 799 800ENTRY(debug_exception) 801 802 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 803 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 804 805 /* Set EPC1 and EXCCAUSE */ 806 807 wsr a2, depc # save a2 temporarily 808 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 809 wsr a2, epc1 810 811 movi a2, EXCCAUSE_MAPPED_DEBUG 812 wsr a2, exccause 813 814 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 815 816 movi a2, 1 << PS_EXCM_BIT 817 or a2, a0, a2 818 wsr a2, ps 819 820 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 821 822 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 823 824 addi a2, a1, -16-PT_SIZE # assume kernel stack 8253: 826 l32i a0, a3, DT_DEBUG_SAVE 827 s32i a1, a2, PT_AREG1 828 s32i a0, a2, PT_AREG0 829 movi a0, 0 830 s32i a0, a2, PT_DEPC # mark it as a regular exception 831 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 832 xsr a0, depc 833 s32i a3, a2, PT_AREG3 834 s32i a0, a2, PT_AREG2 835 mov a1, a2 836 837 /* Debug exception is handled as an exception, so interrupts will 838 * likely be enabled in the common exception handler. Disable 839 * preemption if we have HW breakpoints to preserve DEBUGCAUSE.DBNUM 840 * meaning. 841 */ 842#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_HAVE_HW_BREAKPOINT) 843 GET_THREAD_INFO(a2, a1) 844 l32i a3, a2, TI_PRE_COUNT 845 addi a3, a3, 1 846 s32i a3, a2, TI_PRE_COUNT 847#endif 848 849 rsr a2, ps 850 bbsi.l a2, PS_UM_BIT, _user_exception 851 j _kernel_exception 852 8532: rsr a2, excsave1 854 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 855 j 3b 856 857#ifdef CONFIG_HAVE_HW_BREAKPOINT 858 /* Debug exception while in exception mode. This may happen when 859 * window overflow/underflow handler or fast exception handler hits 860 * data breakpoint, in which case save and disable all data 861 * breakpoints, single-step faulting instruction and restore data 862 * breakpoints. 863 */ 8641: 865 bbci.l a0, PS_UM_BIT, 1b # jump if kernel mode 866 867 rsr a0, debugcause 868 bbsi.l a0, DEBUGCAUSE_DBREAK_BIT, .Ldebug_save_dbreak 869 870 .set _index, 0 871 .rept XCHAL_NUM_DBREAK 872 l32i a0, a3, DT_DBREAKC_SAVE + _index * 4 873 wsr a0, SREG_DBREAKC + _index 874 .set _index, _index + 1 875 .endr 876 877 l32i a0, a3, DT_ICOUNT_LEVEL_SAVE 878 wsr a0, icountlevel 879 880 l32i a0, a3, DT_ICOUNT_SAVE 881 xsr a0, icount 882 883 l32i a0, a3, DT_DEBUG_SAVE 884 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 885 rfi XCHAL_DEBUGLEVEL 886 887.Ldebug_save_dbreak: 888 .set _index, 0 889 .rept XCHAL_NUM_DBREAK 890 movi a0, 0 891 xsr a0, SREG_DBREAKC + _index 892 s32i a0, a3, DT_DBREAKC_SAVE + _index * 4 893 .set _index, _index + 1 894 .endr 895 896 movi a0, XCHAL_EXCM_LEVEL + 1 897 xsr a0, icountlevel 898 s32i a0, a3, DT_ICOUNT_LEVEL_SAVE 899 900 movi a0, 0xfffffffe 901 xsr a0, icount 902 s32i a0, a3, DT_ICOUNT_SAVE 903 904 l32i a0, a3, DT_DEBUG_SAVE 905 xsr a3, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 906 rfi XCHAL_DEBUGLEVEL 907#else 908 /* Debug exception while in exception mode. Should not happen. */ 9091: j 1b // FIXME!! 910#endif 911 912ENDPROC(debug_exception) 913 914/* 915 * We get here in case of an unrecoverable exception. 916 * The only thing we can do is to be nice and print a panic message. 917 * We only produce a single stack frame for panic, so ??? 918 * 919 * 920 * Entry conditions: 921 * 922 * - a0 contains the caller address; original value saved in excsave1. 923 * - the original a0 contains a valid return address (backtrace) or 0. 924 * - a2 contains a valid stackpointer 925 * 926 * Notes: 927 * 928 * - If the stack pointer could be invalid, the caller has to setup a 929 * dummy stack pointer (e.g. the stack of the init_task) 930 * 931 * - If the return address could be invalid, the caller has to set it 932 * to 0, so the backtrace would stop. 933 * 934 */ 935 .align 4 936unrecoverable_text: 937 .ascii "Unrecoverable error in exception handler\0" 938 939 .literal_position 940 941ENTRY(unrecoverable_exception) 942 943#if XCHAL_HAVE_WINDOWED 944 movi a0, 1 945 movi a1, 0 946 947 wsr a0, windowstart 948 wsr a1, windowbase 949 rsync 950#endif 951 952 movi a1, KERNEL_PS_WOE_MASK | LOCKLEVEL 953 wsr a1, ps 954 rsync 955 956 movi a1, init_task 957 movi a0, 0 958 addi a1, a1, PT_REGS_OFFSET 959 960 movi abi_arg0, unrecoverable_text 961 abi_call panic 962 9631: j 1b 964 965ENDPROC(unrecoverable_exception) 966 967/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 968 969 __XTENSA_HANDLER 970 .literal_position 971 972#ifdef SUPPORT_WINDOWED 973/* 974 * Fast-handler for alloca exceptions 975 * 976 * The ALLOCA handler is entered when user code executes the MOVSP 977 * instruction and the caller's frame is not in the register file. 978 * 979 * This algorithm was taken from the Ross Morley's RTOS Porting Layer: 980 * 981 * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S 982 * 983 * It leverages the existing window spill/fill routines and their support for 984 * double exceptions. The 'movsp' instruction will only cause an exception if 985 * the next window needs to be loaded. In fact this ALLOCA exception may be 986 * replaced at some point by changing the hardware to do a underflow exception 987 * of the proper size instead. 988 * 989 * This algorithm simply backs out the register changes started by the user 990 * exception handler, makes it appear that we have started a window underflow 991 * by rotating the window back and then setting the old window base (OWB) in 992 * the 'ps' register with the rolled back window base. The 'movsp' instruction 993 * will be re-executed and this time since the next window frames is in the 994 * active AR registers it won't cause an exception. 995 * 996 * If the WindowUnderflow code gets a TLB miss the page will get mapped 997 * the partial WindowUnderflow will be handled in the double exception 998 * handler. 999 * 1000 * Entry condition: 1001 * 1002 * a0: trashed, original value saved on stack (PT_AREG0) 1003 * a1: a1 1004 * a2: new stack pointer, original in DEPC 1005 * a3: a3 1006 * depc: a2, original value saved on stack (PT_DEPC) 1007 * excsave_1: dispatch table 1008 * 1009 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1010 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1011 */ 1012 1013ENTRY(fast_alloca) 1014 rsr a0, windowbase 1015 rotw -1 1016 rsr a2, ps 1017 extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH 1018 xor a3, a3, a4 1019 l32i a4, a6, PT_AREG0 1020 l32i a1, a6, PT_DEPC 1021 rsr a6, depc 1022 wsr a1, depc 1023 slli a3, a3, PS_OWB_SHIFT 1024 xor a2, a2, a3 1025 wsr a2, ps 1026 rsync 1027 1028 _bbci.l a4, 31, 4f 1029 rotw -1 1030 _bbci.l a8, 30, 8f 1031 rotw -1 1032 j _WindowUnderflow12 10338: j _WindowUnderflow8 10344: j _WindowUnderflow4 1035ENDPROC(fast_alloca) 1036#endif 1037 1038#ifdef CONFIG_USER_ABI_CALL0_PROBE 1039/* 1040 * fast illegal instruction handler. 1041 * 1042 * This is used to fix up user PS.WOE on the exception caused 1043 * by the first opcode related to register window. If PS.WOE is 1044 * already set it goes directly to the common user exception handler. 1045 * 1046 * Entry condition: 1047 * 1048 * a0: trashed, original value saved on stack (PT_AREG0) 1049 * a1: a1 1050 * a2: new stack pointer, original in DEPC 1051 * a3: a3 1052 * depc: a2, original value saved on stack (PT_DEPC) 1053 * excsave_1: dispatch table 1054 */ 1055 1056ENTRY(fast_illegal_instruction_user) 1057 1058 rsr a0, ps 1059 bbsi.l a0, PS_WOE_BIT, 1f 1060 s32i a3, a2, PT_AREG3 1061 movi a3, PS_WOE_MASK 1062 or a0, a0, a3 1063 wsr a0, ps 1064 l32i a3, a2, PT_AREG3 1065 l32i a0, a2, PT_AREG0 1066 rsr a2, depc 1067 rfe 10681: 1069 call0 user_exception 1070 1071ENDPROC(fast_illegal_instruction_user) 1072#endif 1073 1074 /* 1075 * fast system calls. 1076 * 1077 * WARNING: The kernel doesn't save the entire user context before 1078 * handling a fast system call. These functions are small and short, 1079 * usually offering some functionality not available to user tasks. 1080 * 1081 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 1082 * 1083 * Entry condition: 1084 * 1085 * a0: trashed, original value saved on stack (PT_AREG0) 1086 * a1: a1 1087 * a2: new stack pointer, original in DEPC 1088 * a3: a3 1089 * depc: a2, original value saved on stack (PT_DEPC) 1090 * excsave_1: dispatch table 1091 */ 1092 1093ENTRY(fast_syscall_user) 1094 1095 /* Skip syscall. */ 1096 1097 rsr a0, epc1 1098 addi a0, a0, 3 1099 wsr a0, epc1 1100 1101 l32i a0, a2, PT_DEPC 1102 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1103 1104 rsr a0, depc # get syscall-nr 1105 _beqz a0, fast_syscall_spill_registers 1106 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1107 1108 call0 user_exception 1109 1110ENDPROC(fast_syscall_user) 1111 1112ENTRY(fast_syscall_unrecoverable) 1113 1114 /* Restore all states. */ 1115 1116 l32i a0, a2, PT_AREG0 # restore a0 1117 xsr a2, depc # restore a2, depc 1118 1119 wsr a0, excsave1 1120 call0 unrecoverable_exception 1121 1122ENDPROC(fast_syscall_unrecoverable) 1123 1124/* 1125 * sysxtensa syscall handler 1126 * 1127 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1128 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1129 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1130 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1131 * a2 a6 a3 a4 a5 1132 * 1133 * Entry condition: 1134 * 1135 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1136 * a1: a1 1137 * a2: new stack pointer, original in a0 and DEPC 1138 * a3: a3 1139 * a4..a15: unchanged 1140 * depc: a2, original value saved on stack (PT_DEPC) 1141 * excsave_1: dispatch table 1142 * 1143 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1144 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1145 * 1146 * Note: we don't have to save a2; a2 holds the return value 1147 */ 1148 1149 .literal_position 1150 1151#ifdef CONFIG_FAST_SYSCALL_XTENSA 1152 1153ENTRY(fast_syscall_xtensa) 1154 1155 s32i a7, a2, PT_AREG7 # we need an additional register 1156 movi a7, 4 # sizeof(unsigned int) 1157 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1158 1159 _bgeui a6, SYS_XTENSA_COUNT, .Lill 1160 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1161 1162 /* Fall through for ATOMIC_CMP_SWP. */ 1163 1164.Lswp: /* Atomic compare and swap */ 1165 1166EX(.Leac) l32i a0, a3, 0 # read old value 1167 bne a0, a4, 1f # same as old value? jump 1168EX(.Leac) s32i a5, a3, 0 # different, modify value 1169 l32i a7, a2, PT_AREG7 # restore a7 1170 l32i a0, a2, PT_AREG0 # restore a0 1171 movi a2, 1 # and return 1 1172 rfe 1173 11741: l32i a7, a2, PT_AREG7 # restore a7 1175 l32i a0, a2, PT_AREG0 # restore a0 1176 movi a2, 0 # return 0 (note that we cannot set 1177 rfe 1178 1179.Lnswp: /* Atomic set, add, and exg_add. */ 1180 1181EX(.Leac) l32i a7, a3, 0 # orig 1182 addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1183 add a0, a4, a7 # + arg 1184 moveqz a0, a4, a6 # set 1185 addi a6, a6, SYS_XTENSA_ATOMIC_SET 1186EX(.Leac) s32i a0, a3, 0 # write new value 1187 1188 mov a0, a2 1189 mov a2, a7 1190 l32i a7, a0, PT_AREG7 # restore a7 1191 l32i a0, a0, PT_AREG0 # restore a0 1192 rfe 1193 1194.Leac: l32i a7, a2, PT_AREG7 # restore a7 1195 l32i a0, a2, PT_AREG0 # restore a0 1196 movi a2, -EFAULT 1197 rfe 1198 1199.Lill: l32i a7, a2, PT_AREG7 # restore a7 1200 l32i a0, a2, PT_AREG0 # restore a0 1201 movi a2, -EINVAL 1202 rfe 1203 1204ENDPROC(fast_syscall_xtensa) 1205 1206#else /* CONFIG_FAST_SYSCALL_XTENSA */ 1207 1208ENTRY(fast_syscall_xtensa) 1209 1210 l32i a0, a2, PT_AREG0 # restore a0 1211 movi a2, -ENOSYS 1212 rfe 1213 1214ENDPROC(fast_syscall_xtensa) 1215 1216#endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1217 1218 1219/* fast_syscall_spill_registers. 1220 * 1221 * Entry condition: 1222 * 1223 * a0: trashed, original value saved on stack (PT_AREG0) 1224 * a1: a1 1225 * a2: new stack pointer, original in DEPC 1226 * a3: a3 1227 * depc: a2, original value saved on stack (PT_DEPC) 1228 * excsave_1: dispatch table 1229 * 1230 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1231 */ 1232 1233#if defined(CONFIG_FAST_SYSCALL_SPILL_REGISTERS) && \ 1234 defined(USER_SUPPORT_WINDOWED) 1235 1236ENTRY(fast_syscall_spill_registers) 1237 1238 /* Register a FIXUP handler (pass current wb as a parameter) */ 1239 1240 xsr a3, excsave1 1241 movi a0, fast_syscall_spill_registers_fixup 1242 s32i a0, a3, EXC_TABLE_FIXUP 1243 rsr a0, windowbase 1244 s32i a0, a3, EXC_TABLE_PARAM 1245 xsr a3, excsave1 # restore a3 and excsave_1 1246 1247 /* Save a3, a4 and SAR on stack. */ 1248 1249 rsr a0, sar 1250 s32i a3, a2, PT_AREG3 1251 s32i a0, a2, PT_SAR 1252 1253 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1254 1255 s32i a4, a2, PT_AREG4 1256 s32i a7, a2, PT_AREG7 1257 s32i a8, a2, PT_AREG8 1258 s32i a11, a2, PT_AREG11 1259 s32i a12, a2, PT_AREG12 1260 s32i a15, a2, PT_AREG15 1261 1262 /* 1263 * Rotate ws so that the current windowbase is at bit 0. 1264 * Assume ws = xxxwww1yy (www1 current window frame). 1265 * Rotate ws right so that a4 = yyxxxwww1. 1266 */ 1267 1268 rsr a0, windowbase 1269 rsr a3, windowstart # a3 = xxxwww1yy 1270 ssr a0 # holds WB 1271 slli a0, a3, WSBITS 1272 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy 1273 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1274 1275 /* We are done if there are no more than the current register frame. */ 1276 1277 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1278 movi a0, (1 << (WSBITS-1)) 1279 _beqz a3, .Lnospill # only one active frame? jump 1280 1281 /* We want 1 at the top, so that we return to the current windowbase */ 1282 1283 or a3, a3, a0 # 1yyxxxwww 1284 1285 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1286 1287 wsr a3, windowstart # save shifted windowstart 1288 neg a0, a3 1289 and a3, a0, a3 # first bit set from right: 000010000 1290 1291 ffs_ws a0, a3 # a0: shifts to skip empty frames 1292 movi a3, WSBITS 1293 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right 1294 ssr a0 # save in SAR for later. 1295 1296 rsr a3, windowbase 1297 add a3, a3, a0 1298 wsr a3, windowbase 1299 rsync 1300 1301 rsr a3, windowstart 1302 srl a3, a3 # shift windowstart 1303 1304 /* WB is now just one frame below the oldest frame in the register 1305 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1306 and WS differ by one 4-register frame. */ 1307 1308 /* Save frames. Depending what call was used (call4, call8, call12), 1309 * we have to save 4,8. or 12 registers. 1310 */ 1311 1312 1313.Lloop: _bbsi.l a3, 1, .Lc4 1314 _bbci.l a3, 2, .Lc12 1315 1316.Lc8: s32e a4, a13, -16 1317 l32e a4, a5, -12 1318 s32e a8, a4, -32 1319 s32e a5, a13, -12 1320 s32e a6, a13, -8 1321 s32e a7, a13, -4 1322 s32e a9, a4, -28 1323 s32e a10, a4, -24 1324 s32e a11, a4, -20 1325 srli a11, a3, 2 # shift windowbase by 2 1326 rotw 2 1327 _bnei a3, 1, .Lloop 1328 j .Lexit 1329 1330.Lc4: s32e a4, a9, -16 1331 s32e a5, a9, -12 1332 s32e a6, a9, -8 1333 s32e a7, a9, -4 1334 1335 srli a7, a3, 1 1336 rotw 1 1337 _bnei a3, 1, .Lloop 1338 j .Lexit 1339 1340.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1341 1342 /* 12-register frame (call12) */ 1343 1344 l32e a0, a5, -12 1345 s32e a8, a0, -48 1346 mov a8, a0 1347 1348 s32e a9, a8, -44 1349 s32e a10, a8, -40 1350 s32e a11, a8, -36 1351 s32e a12, a8, -32 1352 s32e a13, a8, -28 1353 s32e a14, a8, -24 1354 s32e a15, a8, -20 1355 srli a15, a3, 3 1356 1357 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1358 * window, grab the stackpointer, and rotate back. 1359 * Alternatively, we could also use the following approach, but that 1360 * makes the fixup routine much more complicated: 1361 * rotw 1 1362 * s32e a0, a13, -16 1363 * ... 1364 * rotw 2 1365 */ 1366 1367 rotw 1 1368 mov a4, a13 1369 rotw -1 1370 1371 s32e a4, a8, -16 1372 s32e a5, a8, -12 1373 s32e a6, a8, -8 1374 s32e a7, a8, -4 1375 1376 rotw 3 1377 1378 _beqi a3, 1, .Lexit 1379 j .Lloop 1380 1381.Lexit: 1382 1383 /* Done. Do the final rotation and set WS */ 1384 1385 rotw 1 1386 rsr a3, windowbase 1387 ssl a3 1388 movi a3, 1 1389 sll a3, a3 1390 wsr a3, windowstart 1391.Lnospill: 1392 1393 /* Advance PC, restore registers and SAR, and return from exception. */ 1394 1395 l32i a3, a2, PT_SAR 1396 l32i a0, a2, PT_AREG0 1397 wsr a3, sar 1398 l32i a3, a2, PT_AREG3 1399 1400 /* Restore clobbered registers. */ 1401 1402 l32i a4, a2, PT_AREG4 1403 l32i a7, a2, PT_AREG7 1404 l32i a8, a2, PT_AREG8 1405 l32i a11, a2, PT_AREG11 1406 l32i a12, a2, PT_AREG12 1407 l32i a15, a2, PT_AREG15 1408 1409 movi a2, 0 1410 rfe 1411 1412.Linvalid_mask: 1413 1414 /* We get here because of an unrecoverable error in the window 1415 * registers, so set up a dummy frame and kill the user application. 1416 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1417 */ 1418 1419 movi a0, 1 1420 movi a1, 0 1421 1422 wsr a0, windowstart 1423 wsr a1, windowbase 1424 rsync 1425 1426 movi a0, 0 1427 1428 rsr a3, excsave1 1429 l32i a1, a3, EXC_TABLE_KSTK 1430 1431 movi a4, KERNEL_PS_WOE_MASK | LOCKLEVEL 1432 wsr a4, ps 1433 rsync 1434 1435 movi abi_arg0, SIGSEGV 1436 abi_call make_task_dead 1437 1438 /* shouldn't return, so panic */ 1439 1440 wsr a0, excsave1 1441 call0 unrecoverable_exception # should not return 14421: j 1b 1443 1444 1445ENDPROC(fast_syscall_spill_registers) 1446 1447/* Fixup handler. 1448 * 1449 * We get here if the spill routine causes an exception, e.g. tlb miss. 1450 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1451 * we entered the spill routine and jump to the user exception handler. 1452 * 1453 * Note that we only need to restore the bits in windowstart that have not 1454 * been spilled yet by the _spill_register routine. Luckily, a3 contains a 1455 * rotated windowstart with only those bits set for frames that haven't been 1456 * spilled yet. Because a3 is rotated such that bit 0 represents the register 1457 * frame for the current windowbase - 1, we need to rotate a3 left by the 1458 * value of the current windowbase + 1 and move it to windowstart. 1459 * 1460 * a0: value of depc, original value in depc 1461 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1462 * a3: exctable, original value in excsave1 1463 */ 1464 1465ENTRY(fast_syscall_spill_registers_fixup) 1466 1467 rsr a2, windowbase # get current windowbase (a2 is saved) 1468 xsr a0, depc # restore depc and a0 1469 ssl a2 # set shift (32 - WB) 1470 1471 /* We need to make sure the current registers (a0-a3) are preserved. 1472 * To do this, we simply set the bit for the current window frame 1473 * in WS, so that the exception handlers save them to the task stack. 1474 * 1475 * Note: we use a3 to set the windowbase, so we take a special care 1476 * of it, saving it in the original _spill_registers frame across 1477 * the exception handler call. 1478 */ 1479 1480 xsr a3, excsave1 # get spill-mask 1481 slli a3, a3, 1 # shift left by one 1482 addi a3, a3, 1 # set the bit for the current window frame 1483 1484 slli a2, a3, 32-WSBITS 1485 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... 1486 wsr a2, windowstart # set corrected windowstart 1487 1488 srli a3, a3, 1 1489 rsr a2, excsave1 1490 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 1491 xsr a2, excsave1 1492 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 1493 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) 1494 xsr a2, excsave1 1495 1496 /* Return to the original (user task) WINDOWBASE. 1497 * We leave the following frame behind: 1498 * a0, a1, a2 same 1499 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) 1500 * depc: depc (we have to return to that address) 1501 * excsave_1: exctable 1502 */ 1503 1504 wsr a3, windowbase 1505 rsync 1506 1507 /* We are now in the original frame when we entered _spill_registers: 1508 * a0: return address 1509 * a1: used, stack pointer 1510 * a2: kernel stack pointer 1511 * a3: available 1512 * depc: exception address 1513 * excsave: exctable 1514 * Note: This frame might be the same as above. 1515 */ 1516 1517 /* Setup stack pointer. */ 1518 1519 addi a2, a2, -PT_USER_SIZE 1520 s32i a0, a2, PT_AREG0 1521 1522 /* Make sure we return to this fixup handler. */ 1523 1524 movi a3, fast_syscall_spill_registers_fixup_return 1525 s32i a3, a2, PT_DEPC # setup depc 1526 1527 /* Jump to the exception handler. */ 1528 1529 rsr a3, excsave1 1530 rsr a0, exccause 1531 addx4 a0, a0, a3 # find entry in table 1532 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1533 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1534 jx a0 1535 1536ENDPROC(fast_syscall_spill_registers_fixup) 1537 1538ENTRY(fast_syscall_spill_registers_fixup_return) 1539 1540 /* When we return here, all registers have been restored (a2: DEPC) */ 1541 1542 wsr a2, depc # exception address 1543 1544 /* Restore fixup handler. */ 1545 1546 rsr a2, excsave1 1547 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE 1548 movi a3, fast_syscall_spill_registers_fixup 1549 s32i a3, a2, EXC_TABLE_FIXUP 1550 rsr a3, windowbase 1551 s32i a3, a2, EXC_TABLE_PARAM 1552 l32i a2, a2, EXC_TABLE_KSTK 1553 1554 /* Load WB at the time the exception occurred. */ 1555 1556 rsr a3, sar # WB is still in SAR 1557 neg a3, a3 1558 wsr a3, windowbase 1559 rsync 1560 1561 rsr a3, excsave1 1562 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1563 1564 rfde 1565 1566ENDPROC(fast_syscall_spill_registers_fixup_return) 1567 1568#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1569 1570ENTRY(fast_syscall_spill_registers) 1571 1572 l32i a0, a2, PT_AREG0 # restore a0 1573 movi a2, -ENOSYS 1574 rfe 1575 1576ENDPROC(fast_syscall_spill_registers) 1577 1578#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1579 1580#ifdef CONFIG_MMU 1581/* 1582 * We should never get here. Bail out! 1583 */ 1584 1585ENTRY(fast_second_level_miss_double_kernel) 1586 15871: 1588 call0 unrecoverable_exception # should not return 15891: j 1b 1590 1591ENDPROC(fast_second_level_miss_double_kernel) 1592 1593/* First-level entry handler for user, kernel, and double 2nd-level 1594 * TLB miss exceptions. Note that for now, user and kernel miss 1595 * exceptions share the same entry point and are handled identically. 1596 * 1597 * An old, less-efficient C version of this function used to exist. 1598 * We include it below, interleaved as comments, for reference. 1599 * 1600 * Entry condition: 1601 * 1602 * a0: trashed, original value saved on stack (PT_AREG0) 1603 * a1: a1 1604 * a2: new stack pointer, original in DEPC 1605 * a3: a3 1606 * depc: a2, original value saved on stack (PT_DEPC) 1607 * excsave_1: dispatch table 1608 * 1609 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1610 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1611 */ 1612 1613ENTRY(fast_second_level_miss) 1614 1615 /* Save a1 and a3. Note: we don't expect a double exception. */ 1616 1617 s32i a1, a2, PT_AREG1 1618 s32i a3, a2, PT_AREG3 1619 1620 /* We need to map the page of PTEs for the user task. Find 1621 * the pointer to that page. Also, it's possible for tsk->mm 1622 * to be NULL while tsk->active_mm is nonzero if we faulted on 1623 * a vmalloc address. In that rare case, we must use 1624 * active_mm instead to avoid a fault in this handler. See 1625 * 1626 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1627 * (or search Internet on "mm vs. active_mm") 1628 * 1629 * if (!mm) 1630 * mm = tsk->active_mm; 1631 * pgd = pgd_offset (mm, regs->excvaddr); 1632 * pmd = pmd_offset (pgd, regs->excvaddr); 1633 * pmdval = *pmd; 1634 */ 1635 1636 GET_CURRENT(a1,a2) 1637 l32i a0, a1, TASK_MM # tsk->mm 1638 beqz a0, 9f 1639 16408: rsr a3, excvaddr # fault address 1641 _PGD_OFFSET(a0, a3, a1) 1642 l32i a0, a0, 0 # read pmdval 1643 beqz a0, 2f 1644 1645 /* Read ptevaddr and convert to top of page-table page. 1646 * 1647 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1648 * vpnval += DTLB_WAY_PGTABLE; 1649 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1650 * write_dtlb_entry (pteval, vpnval); 1651 * 1652 * The messy computation for 'pteval' above really simplifies 1653 * into the following: 1654 * 1655 * pteval = ((pmdval - PAGE_OFFSET + PHYS_OFFSET) & PAGE_MASK) 1656 * | PAGE_DIRECTORY 1657 */ 1658 1659 movi a1, (PHYS_OFFSET - PAGE_OFFSET) & 0xffffffff 1660 add a0, a0, a1 # pmdval - PAGE_OFFSET 1661 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1662 xor a0, a0, a1 1663 1664 movi a1, _PAGE_DIRECTORY 1665 or a0, a0, a1 # ... | PAGE_DIRECTORY 1666 1667 /* 1668 * We utilize all three wired-ways (7-9) to hold pmd translations. 1669 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1670 * This allows to map the three most common regions to three different 1671 * DTLBs: 1672 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1673 * 2 -> way 8 shared libaries (2000.0000) 1674 * 3 -> way 0 stack (3000.0000) 1675 */ 1676 1677 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1678 rsr a1, ptevaddr 1679 addx2 a3, a3, a3 # -> 0,3,6,9 1680 srli a1, a1, PAGE_SHIFT 1681 extui a3, a3, 2, 2 # -> 0,0,1,2 1682 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1683 addi a3, a3, DTLB_WAY_PGD 1684 add a1, a1, a3 # ... + way_number 1685 16863: wdtlb a0, a1 1687 dsync 1688 1689 /* Exit critical section. */ 1690 16914: rsr a3, excsave1 1692 movi a0, 0 1693 s32i a0, a3, EXC_TABLE_FIXUP 1694 1695 /* Restore the working registers, and return. */ 1696 1697 l32i a0, a2, PT_AREG0 1698 l32i a1, a2, PT_AREG1 1699 l32i a3, a2, PT_AREG3 1700 l32i a2, a2, PT_DEPC 1701 1702 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1703 1704 /* Restore excsave1 and return. */ 1705 1706 rsr a2, depc 1707 rfe 1708 1709 /* Return from double exception. */ 1710 17111: xsr a2, depc 1712 esync 1713 rfde 1714 17159: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1716 bnez a0, 8b 1717 1718 /* Even more unlikely case active_mm == 0. 1719 * We can get here with NMI in the middle of context_switch that 1720 * touches vmalloc area. 1721 */ 1722 movi a0, init_mm 1723 j 8b 1724 1725#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1726 17272: /* Special case for cache aliasing. 1728 * We (should) only get here if a clear_user_page, copy_user_page 1729 * or the aliased cache flush functions got preemptively interrupted 1730 * by another task. Re-establish temporary mapping to the 1731 * TLBTEMP_BASE areas. 1732 */ 1733 1734 /* We shouldn't be in a double exception */ 1735 1736 l32i a0, a2, PT_DEPC 1737 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f 1738 1739 /* Make sure the exception originated in the special functions */ 1740 1741 movi a0, __tlbtemp_mapping_start 1742 rsr a3, epc1 1743 bltu a3, a0, 2f 1744 movi a0, __tlbtemp_mapping_end 1745 bgeu a3, a0, 2f 1746 1747 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1748 1749 movi a3, TLBTEMP_BASE_1 1750 rsr a0, excvaddr 1751 bltu a0, a3, 2f 1752 1753 addi a1, a0, -TLBTEMP_SIZE 1754 bgeu a1, a3, 2f 1755 1756 /* Check if we have to restore an ITLB mapping. */ 1757 1758 movi a1, __tlbtemp_mapping_itlb 1759 rsr a3, epc1 1760 sub a3, a3, a1 1761 1762 /* Calculate VPN */ 1763 1764 movi a1, PAGE_MASK 1765 and a1, a1, a0 1766 1767 /* Jump for ITLB entry */ 1768 1769 bgez a3, 1f 1770 1771 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1772 1773 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1774 add a1, a3, a1 1775 1776 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1777 1778 mov a0, a6 1779 movnez a0, a7, a3 1780 j 3b 1781 1782 /* ITLB entry. We only use dst in a6. */ 1783 17841: witlb a6, a1 1785 isync 1786 j 4b 1787 1788 1789#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1790 1791 17922: /* Invalid PGD, default exception handling */ 1793 1794 rsr a1, depc 1795 s32i a1, a2, PT_AREG2 1796 mov a1, a2 1797 1798 rsr a2, ps 1799 bbsi.l a2, PS_UM_BIT, 1f 1800 call0 _kernel_exception 18011: call0 _user_exception 1802 1803ENDPROC(fast_second_level_miss) 1804 1805/* 1806 * StoreProhibitedException 1807 * 1808 * Update the pte and invalidate the itlb mapping for this pte. 1809 * 1810 * Entry condition: 1811 * 1812 * a0: trashed, original value saved on stack (PT_AREG0) 1813 * a1: a1 1814 * a2: new stack pointer, original in DEPC 1815 * a3: a3 1816 * depc: a2, original value saved on stack (PT_DEPC) 1817 * excsave_1: dispatch table 1818 * 1819 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1820 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1821 */ 1822 1823ENTRY(fast_store_prohibited) 1824 1825 /* Save a1 and a3. */ 1826 1827 s32i a1, a2, PT_AREG1 1828 s32i a3, a2, PT_AREG3 1829 1830 GET_CURRENT(a1,a2) 1831 l32i a0, a1, TASK_MM # tsk->mm 1832 beqz a0, 9f 1833 18348: rsr a1, excvaddr # fault address 1835 _PGD_OFFSET(a0, a1, a3) 1836 l32i a0, a0, 0 1837 beqz a0, 2f 1838 1839 /* 1840 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present 1841 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. 1842 */ 1843 1844 _PTE_OFFSET(a0, a1, a3) 1845 l32i a3, a0, 0 # read pteval 1846 movi a1, _PAGE_CA_INVALID 1847 ball a3, a1, 2f 1848 bbci.l a3, _PAGE_WRITABLE_BIT, 2f 1849 1850 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1851 or a3, a3, a1 1852 rsr a1, excvaddr 1853 s32i a3, a0, 0 1854 1855 /* We need to flush the cache if we have page coloring. */ 1856#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1857 dhwb a0, 0 1858#endif 1859 pdtlb a0, a1 1860 wdtlb a3, a0 1861 1862 /* Exit critical section. */ 1863 1864 movi a0, 0 1865 rsr a3, excsave1 1866 s32i a0, a3, EXC_TABLE_FIXUP 1867 1868 /* Restore the working registers, and return. */ 1869 1870 l32i a3, a2, PT_AREG3 1871 l32i a1, a2, PT_AREG1 1872 l32i a0, a2, PT_AREG0 1873 l32i a2, a2, PT_DEPC 1874 1875 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1876 1877 rsr a2, depc 1878 rfe 1879 1880 /* Double exception. Restore FIXUP handler and return. */ 1881 18821: xsr a2, depc 1883 esync 1884 rfde 1885 18869: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1887 j 8b 1888 18892: /* If there was a problem, handle fault in C */ 1890 1891 rsr a3, depc # still holds a2 1892 s32i a3, a2, PT_AREG2 1893 mov a1, a2 1894 1895 rsr a2, ps 1896 bbsi.l a2, PS_UM_BIT, 1f 1897 call0 _kernel_exception 18981: call0 _user_exception 1899 1900ENDPROC(fast_store_prohibited) 1901 1902#endif /* CONFIG_MMU */ 1903 1904 .text 1905/* 1906 * System Calls. 1907 * 1908 * void system_call (struct pt_regs* regs, int exccause) 1909 * a2 a3 1910 */ 1911 .literal_position 1912 1913ENTRY(system_call) 1914 1915#if defined(__XTENSA_WINDOWED_ABI__) 1916 abi_entry_default 1917#elif defined(__XTENSA_CALL0_ABI__) 1918 abi_entry(12) 1919 1920 s32i a0, sp, 0 1921 s32i abi_saved0, sp, 4 1922 s32i abi_saved1, sp, 8 1923 mov abi_saved0, a2 1924#else 1925#error Unsupported Xtensa ABI 1926#endif 1927 1928 /* regs->syscall = regs->areg[2] */ 1929 1930 l32i a7, abi_saved0, PT_AREG2 1931 s32i a7, abi_saved0, PT_SYSCALL 1932 1933 GET_THREAD_INFO(a4, a1) 1934 l32i abi_saved1, a4, TI_FLAGS 1935 movi a4, _TIF_WORK_MASK 1936 and abi_saved1, abi_saved1, a4 1937 beqz abi_saved1, 1f 1938 1939 mov abi_arg0, abi_saved0 1940 abi_call do_syscall_trace_enter 1941 beqz abi_rv, .Lsyscall_exit 1942 l32i a7, abi_saved0, PT_SYSCALL 1943 19441: 1945 /* syscall = sys_call_table[syscall_nr] */ 1946 1947 movi a4, sys_call_table 1948 movi a5, __NR_syscalls 1949 movi abi_rv, -ENOSYS 1950 bgeu a7, a5, 1f 1951 1952 addx4 a4, a7, a4 1953 l32i abi_tmp0, a4, 0 1954 1955 /* Load args: arg0 - arg5 are passed via regs. */ 1956 1957 l32i abi_arg0, abi_saved0, PT_AREG6 1958 l32i abi_arg1, abi_saved0, PT_AREG3 1959 l32i abi_arg2, abi_saved0, PT_AREG4 1960 l32i abi_arg3, abi_saved0, PT_AREG5 1961 l32i abi_arg4, abi_saved0, PT_AREG8 1962 l32i abi_arg5, abi_saved0, PT_AREG9 1963 1964 abi_callx abi_tmp0 1965 19661: /* regs->areg[2] = return_value */ 1967 1968 s32i abi_rv, abi_saved0, PT_AREG2 1969 bnez abi_saved1, 1f 1970.Lsyscall_exit: 1971#if defined(__XTENSA_WINDOWED_ABI__) 1972 abi_ret_default 1973#elif defined(__XTENSA_CALL0_ABI__) 1974 l32i a0, sp, 0 1975 l32i abi_saved0, sp, 4 1976 l32i abi_saved1, sp, 8 1977 abi_ret(12) 1978#else 1979#error Unsupported Xtensa ABI 1980#endif 1981 19821: 1983 mov abi_arg0, abi_saved0 1984 abi_call do_syscall_trace_leave 1985 j .Lsyscall_exit 1986 1987ENDPROC(system_call) 1988 1989/* 1990 * Spill live registers on the kernel stack macro. 1991 * 1992 * Entry condition: ps.woe is set, ps.excm is cleared 1993 * Exit condition: windowstart has single bit set 1994 * May clobber: a12, a13 1995 */ 1996 .macro spill_registers_kernel 1997 1998#if XCHAL_NUM_AREGS > 16 1999 call12 1f 2000 _j 2f 2001 retw 2002 .align 4 20031: 2004 _entry a1, 48 2005 addi a12, a0, 3 2006#if XCHAL_NUM_AREGS > 32 2007 .rept (XCHAL_NUM_AREGS - 32) / 12 2008 _entry a1, 48 2009 mov a12, a0 2010 .endr 2011#endif 2012 _entry a1, 16 2013#if XCHAL_NUM_AREGS % 12 == 0 2014 mov a8, a8 2015#elif XCHAL_NUM_AREGS % 12 == 4 2016 mov a12, a12 2017#elif XCHAL_NUM_AREGS % 12 == 8 2018 mov a4, a4 2019#endif 2020 retw 20212: 2022#else 2023 mov a12, a12 2024#endif 2025 .endm 2026 2027/* 2028 * Task switch. 2029 * 2030 * struct task* _switch_to (struct task* prev, struct task* next) 2031 * a2 a2 a3 2032 */ 2033 2034ENTRY(_switch_to) 2035 2036#if defined(__XTENSA_WINDOWED_ABI__) 2037 abi_entry(XTENSA_SPILL_STACK_RESERVE) 2038#elif defined(__XTENSA_CALL0_ABI__) 2039 abi_entry(16) 2040 2041 s32i a12, sp, 0 2042 s32i a13, sp, 4 2043 s32i a14, sp, 8 2044 s32i a15, sp, 12 2045#else 2046#error Unsupported Xtensa ABI 2047#endif 2048 mov a11, a3 # and 'next' (a3) 2049 2050 l32i a4, a2, TASK_THREAD_INFO 2051 l32i a5, a3, TASK_THREAD_INFO 2052 2053 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2054 2055#if THREAD_RA > 1020 || THREAD_SP > 1020 2056 addi a10, a2, TASK_THREAD 2057 s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 2058 s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 2059#else 2060 s32i a0, a2, THREAD_RA # save return address 2061 s32i a1, a2, THREAD_SP # save stack pointer 2062#endif 2063 2064#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) 2065 movi a6, __stack_chk_guard 2066 l32i a8, a3, TASK_STACK_CANARY 2067 s32i a8, a6, 0 2068#endif 2069 2070 /* Disable ints while we manipulate the stack pointer. */ 2071 2072 irq_save a14, a3 2073 rsync 2074 2075 /* Switch CPENABLE */ 2076 2077#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 2078 l32i a3, a5, THREAD_CPENABLE 2079 xsr a3, cpenable 2080 s32i a3, a4, THREAD_CPENABLE 2081#endif 2082 2083#if XCHAL_HAVE_EXCLUSIVE 2084 l32i a3, a5, THREAD_ATOMCTL8 2085 getex a3 2086 s32i a3, a4, THREAD_ATOMCTL8 2087#endif 2088 2089 /* Flush register file. */ 2090 2091#if defined(__XTENSA_WINDOWED_ABI__) 2092 spill_registers_kernel 2093#endif 2094 2095 /* Set kernel stack (and leave critical section) 2096 * Note: It's save to set it here. The stack will not be overwritten 2097 * because the kernel stack will only be loaded again after 2098 * we return from kernel space. 2099 */ 2100 2101 rsr a3, excsave1 # exc_table 2102 addi a7, a5, PT_REGS_OFFSET 2103 s32i a7, a3, EXC_TABLE_KSTK 2104 2105 /* restore context of the task 'next' */ 2106 2107 l32i a0, a11, THREAD_RA # restore return address 2108 l32i a1, a11, THREAD_SP # restore stack pointer 2109 2110 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 2111 2112 wsr a14, ps 2113 rsync 2114 2115#if defined(__XTENSA_WINDOWED_ABI__) 2116 abi_ret(XTENSA_SPILL_STACK_RESERVE) 2117#elif defined(__XTENSA_CALL0_ABI__) 2118 l32i a12, sp, 0 2119 l32i a13, sp, 4 2120 l32i a14, sp, 8 2121 l32i a15, sp, 12 2122 abi_ret(16) 2123#else 2124#error Unsupported Xtensa ABI 2125#endif 2126 2127ENDPROC(_switch_to) 2128 2129ENTRY(ret_from_fork) 2130 2131 /* void schedule_tail (struct task_struct *prev) 2132 * Note: prev is still in abi_arg0 (return value from fake call frame) 2133 */ 2134 abi_call schedule_tail 2135 2136 mov abi_arg0, a1 2137 abi_call do_syscall_trace_leave 2138 j common_exception_return 2139 2140ENDPROC(ret_from_fork) 2141 2142/* 2143 * Kernel thread creation helper 2144 * On entry, set up by copy_thread: abi_saved0 = thread_fn, 2145 * abi_saved1 = thread_fn arg. Left from _switch_to: abi_arg0 = prev 2146 */ 2147ENTRY(ret_from_kernel_thread) 2148 2149 abi_call schedule_tail 2150 mov abi_arg0, abi_saved1 2151 abi_callx abi_saved0 2152 j common_exception_return 2153 2154ENDPROC(ret_from_kernel_thread) 2155