1/* 2 * Low-level exception handling 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2004 - 2008 by Tensilica Inc. 9 * Copyright (C) 2015 Cadence Design Systems Inc. 10 * 11 * Chris Zankel <chris@zankel.net> 12 * 13 */ 14 15#include <linux/linkage.h> 16#include <asm/asm-offsets.h> 17#include <asm/processor.h> 18#include <asm/coprocessor.h> 19#include <asm/thread_info.h> 20#include <asm/uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28#include <variant/tie-asm.h> 29 30/* Unimplemented features. */ 31 32#undef KERNEL_STACK_OVERFLOW_CHECK 33 34/* Not well tested. 35 * 36 * - fast_coprocessor 37 */ 38 39/* 40 * Macro to find first bit set in WINDOWBASE from the left + 1 41 * 42 * 100....0 -> 1 43 * 010....0 -> 2 44 * 000....1 -> WSBITS 45 */ 46 47 .macro ffs_ws bit mask 48 49#if XCHAL_HAVE_NSA 50 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 51 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 52#else 53 movi \bit, WSBITS 54#if WSBITS > 16 55 _bltui \mask, 0x10000, 99f 56 addi \bit, \bit, -16 57 extui \mask, \mask, 16, 16 58#endif 59#if WSBITS > 8 6099: _bltui \mask, 0x100, 99f 61 addi \bit, \bit, -8 62 srli \mask, \mask, 8 63#endif 6499: _bltui \mask, 0x10, 99f 65 addi \bit, \bit, -4 66 srli \mask, \mask, 4 6799: _bltui \mask, 0x4, 99f 68 addi \bit, \bit, -2 69 srli \mask, \mask, 2 7099: _bltui \mask, 0x2, 99f 71 addi \bit, \bit, -1 7299: 73 74#endif 75 .endm 76 77 78 .macro irq_save flags tmp 79#if XTENSA_FAKE_NMI 80#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL 81 rsr \flags, ps 82 extui \tmp, \flags, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 83 bgei \tmp, LOCKLEVEL, 99f 84 rsil \tmp, LOCKLEVEL 8599: 86#else 87 movi \tmp, LOCKLEVEL 88 rsr \flags, ps 89 or \flags, \flags, \tmp 90 xsr \flags, ps 91 rsync 92#endif 93#else 94 rsil \flags, LOCKLEVEL 95#endif 96 .endm 97 98/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 99 100/* 101 * First-level exception handler for user exceptions. 102 * Save some special registers, extra states and all registers in the AR 103 * register file that were in use in the user task, and jump to the common 104 * exception code. 105 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 106 * save them for kernel exceptions). 107 * 108 * Entry condition for user_exception: 109 * 110 * a0: trashed, original value saved on stack (PT_AREG0) 111 * a1: a1 112 * a2: new stack pointer, original value in depc 113 * a3: a3 114 * depc: a2, original value saved on stack (PT_DEPC) 115 * excsave1: dispatch table 116 * 117 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 118 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 119 * 120 * Entry condition for _user_exception: 121 * 122 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 123 * excsave has been restored, and 124 * stack pointer (a1) has been set. 125 * 126 * Note: _user_exception might be at an odd address. Don't use call0..call12 127 */ 128 129ENTRY(user_exception) 130 131 /* Save a1, a2, a3, and set SP. */ 132 133 rsr a0, depc 134 s32i a1, a2, PT_AREG1 135 s32i a0, a2, PT_AREG2 136 s32i a3, a2, PT_AREG3 137 mov a1, a2 138 139 .globl _user_exception 140_user_exception: 141 142 /* Save SAR and turn off single stepping */ 143 144 movi a2, 0 145 wsr a2, depc # terminate user stack trace with 0 146 rsr a3, sar 147 xsr a2, icountlevel 148 s32i a3, a1, PT_SAR 149 s32i a2, a1, PT_ICOUNTLEVEL 150 151#if XCHAL_HAVE_THREADPTR 152 rur a2, threadptr 153 s32i a2, a1, PT_THREADPTR 154#endif 155 156 /* Rotate ws so that the current windowbase is at bit0. */ 157 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 158 159 rsr a2, windowbase 160 rsr a3, windowstart 161 ssr a2 162 s32i a2, a1, PT_WINDOWBASE 163 s32i a3, a1, PT_WINDOWSTART 164 slli a2, a3, 32-WSBITS 165 src a2, a3, a2 166 srli a2, a2, 32-WSBITS 167 s32i a2, a1, PT_WMASK # needed for restoring registers 168 169 /* Save only live registers. */ 170 171 _bbsi.l a2, 1, 1f 172 s32i a4, a1, PT_AREG4 173 s32i a5, a1, PT_AREG5 174 s32i a6, a1, PT_AREG6 175 s32i a7, a1, PT_AREG7 176 _bbsi.l a2, 2, 1f 177 s32i a8, a1, PT_AREG8 178 s32i a9, a1, PT_AREG9 179 s32i a10, a1, PT_AREG10 180 s32i a11, a1, PT_AREG11 181 _bbsi.l a2, 3, 1f 182 s32i a12, a1, PT_AREG12 183 s32i a13, a1, PT_AREG13 184 s32i a14, a1, PT_AREG14 185 s32i a15, a1, PT_AREG15 186 _bnei a2, 1, 1f # only one valid frame? 187 188 /* Only one valid frame, skip saving regs. */ 189 190 j 2f 191 192 /* Save the remaining registers. 193 * We have to save all registers up to the first '1' from 194 * the right, except the current frame (bit 0). 195 * Assume a2 is: 001001000110001 196 * All register frames starting from the top field to the marked '1' 197 * must be saved. 198 */ 199 2001: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 201 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 202 and a3, a3, a2 # max. only one bit is set 203 204 /* Find number of frames to save */ 205 206 ffs_ws a0, a3 # number of frames to the '1' from left 207 208 /* Store information into WMASK: 209 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 210 * bits 4...: number of valid 4-register frames 211 */ 212 213 slli a3, a0, 4 # number of frames to save in bits 8..4 214 extui a2, a2, 0, 4 # mask for the first 16 registers 215 or a2, a3, a2 216 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 217 218 /* Save 4 registers at a time */ 219 2201: rotw -1 221 s32i a0, a5, PT_AREG_END - 16 222 s32i a1, a5, PT_AREG_END - 12 223 s32i a2, a5, PT_AREG_END - 8 224 s32i a3, a5, PT_AREG_END - 4 225 addi a0, a4, -1 226 addi a1, a5, -16 227 _bnez a0, 1b 228 229 /* WINDOWBASE still in SAR! */ 230 231 rsr a2, sar # original WINDOWBASE 232 movi a3, 1 233 ssl a2 234 sll a3, a3 235 wsr a3, windowstart # set corresponding WINDOWSTART bit 236 wsr a2, windowbase # and WINDOWSTART 237 rsync 238 239 /* We are back to the original stack pointer (a1) */ 240 2412: /* Now, jump to the common exception handler. */ 242 243 j common_exception 244 245ENDPROC(user_exception) 246 247/* 248 * First-level exit handler for kernel exceptions 249 * Save special registers and the live window frame. 250 * Note: Even though we changes the stack pointer, we don't have to do a 251 * MOVSP here, as we do that when we return from the exception. 252 * (See comment in the kernel exception exit code) 253 * 254 * Entry condition for kernel_exception: 255 * 256 * a0: trashed, original value saved on stack (PT_AREG0) 257 * a1: a1 258 * a2: new stack pointer, original in DEPC 259 * a3: a3 260 * depc: a2, original value saved on stack (PT_DEPC) 261 * excsave_1: dispatch table 262 * 263 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 264 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 265 * 266 * Entry condition for _kernel_exception: 267 * 268 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 269 * excsave has been restored, and 270 * stack pointer (a1) has been set. 271 * 272 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 273 */ 274 275ENTRY(kernel_exception) 276 277 /* Save a1, a2, a3, and set SP. */ 278 279 rsr a0, depc # get a2 280 s32i a1, a2, PT_AREG1 281 s32i a0, a2, PT_AREG2 282 s32i a3, a2, PT_AREG3 283 mov a1, a2 284 285 .globl _kernel_exception 286_kernel_exception: 287 288 /* Save SAR and turn off single stepping */ 289 290 movi a2, 0 291 rsr a3, sar 292 xsr a2, icountlevel 293 s32i a3, a1, PT_SAR 294 s32i a2, a1, PT_ICOUNTLEVEL 295 296 /* Rotate ws so that the current windowbase is at bit0. */ 297 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 298 299 rsr a2, windowbase # don't need to save these, we only 300 rsr a3, windowstart # need shifted windowstart: windowmask 301 ssr a2 302 slli a2, a3, 32-WSBITS 303 src a2, a3, a2 304 srli a2, a2, 32-WSBITS 305 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 306 307 /* Save only the live window-frame */ 308 309 _bbsi.l a2, 1, 1f 310 s32i a4, a1, PT_AREG4 311 s32i a5, a1, PT_AREG5 312 s32i a6, a1, PT_AREG6 313 s32i a7, a1, PT_AREG7 314 _bbsi.l a2, 2, 1f 315 s32i a8, a1, PT_AREG8 316 s32i a9, a1, PT_AREG9 317 s32i a10, a1, PT_AREG10 318 s32i a11, a1, PT_AREG11 319 _bbsi.l a2, 3, 1f 320 s32i a12, a1, PT_AREG12 321 s32i a13, a1, PT_AREG13 322 s32i a14, a1, PT_AREG14 323 s32i a15, a1, PT_AREG15 324 325 _bnei a2, 1, 1f 326 327 /* Copy spill slots of a0 and a1 to imitate movsp 328 * in order to keep exception stack continuous 329 */ 330 l32i a3, a1, PT_SIZE 331 l32i a0, a1, PT_SIZE + 4 332 s32e a3, a1, -16 333 s32e a0, a1, -12 3341: 335 l32i a0, a1, PT_AREG0 # restore saved a0 336 wsr a0, depc 337 338#ifdef KERNEL_STACK_OVERFLOW_CHECK 339 340 /* Stack overflow check, for debugging */ 341 extui a2, a1, TASK_SIZE_BITS,XX 342 movi a3, SIZE?? 343 _bge a2, a3, out_of_stack_panic 344 345#endif 346 347/* 348 * This is the common exception handler. 349 * We get here from the user exception handler or simply by falling through 350 * from the kernel exception handler. 351 * Save the remaining special registers, switch to kernel mode, and jump 352 * to the second-level exception handler. 353 * 354 */ 355 356common_exception: 357 358 /* Save some registers, disable loops and clear the syscall flag. */ 359 360 rsr a2, debugcause 361 rsr a3, epc1 362 s32i a2, a1, PT_DEBUGCAUSE 363 s32i a3, a1, PT_PC 364 365 movi a2, -1 366 rsr a3, excvaddr 367 s32i a2, a1, PT_SYSCALL 368 movi a2, 0 369 s32i a3, a1, PT_EXCVADDR 370 xsr a2, lcount 371 s32i a2, a1, PT_LCOUNT 372 373 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 374 375 rsr a2, exccause 376 movi a3, 0 377 rsr a0, excsave1 378 s32i a2, a1, PT_EXCCAUSE 379 s32i a3, a0, EXC_TABLE_FIXUP 380 381 /* All unrecoverable states are saved on stack, now, and a1 is valid. 382 * Now we can allow exceptions again. In case we've got an interrupt 383 * PS.INTLEVEL is set to LOCKLEVEL disabling furhter interrupts, 384 * otherwise it's left unchanged. 385 * 386 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 387 */ 388 389 rsr a3, ps 390 s32i a3, a1, PT_PS # save ps 391 392#if XTENSA_FAKE_NMI 393 /* Correct PS needs to be saved in the PT_PS: 394 * - in case of exception or level-1 interrupt it's in the PS, 395 * and is already saved. 396 * - in case of medium level interrupt it's in the excsave2. 397 */ 398 movi a0, EXCCAUSE_MAPPED_NMI 399 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 400 beq a2, a0, .Lmedium_level_irq 401 bnei a2, EXCCAUSE_LEVEL1_INTERRUPT, .Lexception 402 beqz a3, .Llevel1_irq # level-1 IRQ sets ps.intlevel to 0 403 404.Lmedium_level_irq: 405 rsr a0, excsave2 406 s32i a0, a1, PT_PS # save medium-level interrupt ps 407 bgei a3, LOCKLEVEL, .Lexception 408 409.Llevel1_irq: 410 movi a3, LOCKLEVEL 411 412.Lexception: 413 movi a0, 1 << PS_WOE_BIT 414 or a3, a3, a0 415#else 416 addi a2, a2, -EXCCAUSE_LEVEL1_INTERRUPT 417 movi a0, LOCKLEVEL 418 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 419 # a3 = PS.INTLEVEL 420 moveqz a3, a0, a2 # a3 = LOCKLEVEL iff interrupt 421 movi a2, 1 << PS_WOE_BIT 422 or a3, a3, a2 423 rsr a2, exccause 424#endif 425 426 /* restore return address (or 0 if return to userspace) */ 427 rsr a0, depc 428 wsr a3, ps 429 rsync # PS.WOE => rsync => overflow 430 431 /* Save lbeg, lend */ 432 433 rsr a4, lbeg 434 rsr a3, lend 435 s32i a4, a1, PT_LBEG 436 s32i a3, a1, PT_LEND 437 438 /* Save SCOMPARE1 */ 439 440#if XCHAL_HAVE_S32C1I 441 rsr a3, scompare1 442 s32i a3, a1, PT_SCOMPARE1 443#endif 444 445 /* Save optional registers. */ 446 447 save_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT 448 449 /* Go to second-level dispatcher. Set up parameters to pass to the 450 * exception handler and call the exception handler. 451 */ 452 453 rsr a4, excsave1 454 mov a6, a1 # pass stack frame 455 mov a7, a2 # pass EXCCAUSE 456 addx4 a4, a2, a4 457 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 458 459 /* Call the second-level handler */ 460 461 callx4 a4 462 463 /* Jump here for exception exit */ 464 .global common_exception_return 465common_exception_return: 466 467#if XTENSA_FAKE_NMI 468 l32i a2, a1, PT_EXCCAUSE 469 movi a3, EXCCAUSE_MAPPED_NMI 470 beq a2, a3, .LNMIexit 471#endif 4721: 473 irq_save a2, a3 474#ifdef CONFIG_TRACE_IRQFLAGS 475 movi a4, trace_hardirqs_off 476 callx4 a4 477#endif 478 479 /* Jump if we are returning from kernel exceptions. */ 480 481 l32i a3, a1, PT_PS 482 GET_THREAD_INFO(a2, a1) 483 l32i a4, a2, TI_FLAGS 484 _bbci.l a3, PS_UM_BIT, 6f 485 486 /* Specific to a user exception exit: 487 * We need to check some flags for signal handling and rescheduling, 488 * and have to restore WB and WS, extra states, and all registers 489 * in the register file that were in use in the user task. 490 * Note that we don't disable interrupts here. 491 */ 492 493 _bbsi.l a4, TIF_NEED_RESCHED, 3f 494 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f 495 _bbci.l a4, TIF_SIGPENDING, 5f 496 4972: l32i a4, a1, PT_DEPC 498 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 499 500 /* Call do_signal() */ 501 502#ifdef CONFIG_TRACE_IRQFLAGS 503 movi a4, trace_hardirqs_on 504 callx4 a4 505#endif 506 rsil a2, 0 507 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) 508 mov a6, a1 509 callx4 a4 510 j 1b 511 5123: /* Reschedule */ 513 514#ifdef CONFIG_TRACE_IRQFLAGS 515 movi a4, trace_hardirqs_on 516 callx4 a4 517#endif 518 rsil a2, 0 519 movi a4, schedule # void schedule (void) 520 callx4 a4 521 j 1b 522 523#ifdef CONFIG_PREEMPT 5246: 525 _bbci.l a4, TIF_NEED_RESCHED, 4f 526 527 /* Check current_thread_info->preempt_count */ 528 529 l32i a4, a2, TI_PRE_COUNT 530 bnez a4, 4f 531 movi a4, preempt_schedule_irq 532 callx4 a4 533 j 1b 534#endif 535 536#if XTENSA_FAKE_NMI 537.LNMIexit: 538 l32i a3, a1, PT_PS 539 _bbci.l a3, PS_UM_BIT, 4f 540#endif 541 5425: 543#ifdef CONFIG_DEBUG_TLB_SANITY 544 l32i a4, a1, PT_DEPC 545 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 546 movi a4, check_tlb_sanity 547 callx4 a4 548#endif 5496: 5504: 551#ifdef CONFIG_TRACE_IRQFLAGS 552 extui a4, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 553 bgei a4, LOCKLEVEL, 1f 554 movi a4, trace_hardirqs_on 555 callx4 a4 5561: 557#endif 558 /* Restore optional registers. */ 559 560 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 561 562 /* Restore SCOMPARE1 */ 563 564#if XCHAL_HAVE_S32C1I 565 l32i a2, a1, PT_SCOMPARE1 566 wsr a2, scompare1 567#endif 568 wsr a3, ps /* disable interrupts */ 569 570 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 571 572user_exception_exit: 573 574 /* Restore the state of the task and return from the exception. */ 575 576 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 577 578 l32i a2, a1, PT_WINDOWBASE 579 l32i a3, a1, PT_WINDOWSTART 580 wsr a1, depc # use DEPC as temp storage 581 wsr a3, windowstart # restore WINDOWSTART 582 ssr a2 # preserve user's WB in the SAR 583 wsr a2, windowbase # switch to user's saved WB 584 rsync 585 rsr a1, depc # restore stack pointer 586 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 587 rotw -1 # we restore a4..a7 588 _bltui a6, 16, 1f # only have to restore current window? 589 590 /* The working registers are a0 and a3. We are restoring to 591 * a4..a7. Be careful not to destroy what we have just restored. 592 * Note: wmask has the format YYYYM: 593 * Y: number of registers saved in groups of 4 594 * M: 4 bit mask of first 16 registers 595 */ 596 597 mov a2, a6 598 mov a3, a5 599 6002: rotw -1 # a0..a3 become a4..a7 601 addi a3, a7, -4*4 # next iteration 602 addi a2, a6, -16 # decrementing Y in WMASK 603 l32i a4, a3, PT_AREG_END + 0 604 l32i a5, a3, PT_AREG_END + 4 605 l32i a6, a3, PT_AREG_END + 8 606 l32i a7, a3, PT_AREG_END + 12 607 _bgeui a2, 16, 2b 608 609 /* Clear unrestored registers (don't leak anything to user-land */ 610 6111: rsr a0, windowbase 612 rsr a3, sar 613 sub a3, a0, a3 614 beqz a3, 2f 615 extui a3, a3, 0, WBBITS 616 6171: rotw -1 618 addi a3, a7, -1 619 movi a4, 0 620 movi a5, 0 621 movi a6, 0 622 movi a7, 0 623 bgei a3, 1, 1b 624 625 /* We are back were we were when we started. 626 * Note: a2 still contains WMASK (if we've returned to the original 627 * frame where we had loaded a2), or at least the lower 4 bits 628 * (if we have restored WSBITS-1 frames). 629 */ 630 6312: 632#if XCHAL_HAVE_THREADPTR 633 l32i a3, a1, PT_THREADPTR 634 wur a3, threadptr 635#endif 636 637 j common_exception_exit 638 639 /* This is the kernel exception exit. 640 * We avoided to do a MOVSP when we entered the exception, but we 641 * have to do it here. 642 */ 643 644kernel_exception_exit: 645 646 /* Check if we have to do a movsp. 647 * 648 * We only have to do a movsp if the previous window-frame has 649 * been spilled to the *temporary* exception stack instead of the 650 * task's stack. This is the case if the corresponding bit in 651 * WINDOWSTART for the previous window-frame was set before 652 * (not spilled) but is zero now (spilled). 653 * If this bit is zero, all other bits except the one for the 654 * current window frame are also zero. So, we can use a simple test: 655 * 'and' WINDOWSTART and WINDOWSTART-1: 656 * 657 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 658 * 659 * The result is zero only if one bit was set. 660 * 661 * (Note: We might have gone through several task switches before 662 * we come back to the current task, so WINDOWBASE might be 663 * different from the time the exception occurred.) 664 */ 665 666 /* Test WINDOWSTART before and after the exception. 667 * We actually have WMASK, so we only have to test if it is 1 or not. 668 */ 669 670 l32i a2, a1, PT_WMASK 671 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 672 673 /* Test WINDOWSTART now. If spilled, do the movsp */ 674 675 rsr a3, windowstart 676 addi a0, a3, -1 677 and a3, a3, a0 678 _bnez a3, common_exception_exit 679 680 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 681 682 addi a0, a1, -16 683 l32i a3, a0, 0 684 l32i a4, a0, 4 685 s32i a3, a1, PT_SIZE+0 686 s32i a4, a1, PT_SIZE+4 687 l32i a3, a0, 8 688 l32i a4, a0, 12 689 s32i a3, a1, PT_SIZE+8 690 s32i a4, a1, PT_SIZE+12 691 692 /* Common exception exit. 693 * We restore the special register and the current window frame, and 694 * return from the exception. 695 * 696 * Note: We expect a2 to hold PT_WMASK 697 */ 698 699common_exception_exit: 700 701 /* Restore address registers. */ 702 703 _bbsi.l a2, 1, 1f 704 l32i a4, a1, PT_AREG4 705 l32i a5, a1, PT_AREG5 706 l32i a6, a1, PT_AREG6 707 l32i a7, a1, PT_AREG7 708 _bbsi.l a2, 2, 1f 709 l32i a8, a1, PT_AREG8 710 l32i a9, a1, PT_AREG9 711 l32i a10, a1, PT_AREG10 712 l32i a11, a1, PT_AREG11 713 _bbsi.l a2, 3, 1f 714 l32i a12, a1, PT_AREG12 715 l32i a13, a1, PT_AREG13 716 l32i a14, a1, PT_AREG14 717 l32i a15, a1, PT_AREG15 718 719 /* Restore PC, SAR */ 720 7211: l32i a2, a1, PT_PC 722 l32i a3, a1, PT_SAR 723 wsr a2, epc1 724 wsr a3, sar 725 726 /* Restore LBEG, LEND, LCOUNT */ 727 728 l32i a2, a1, PT_LBEG 729 l32i a3, a1, PT_LEND 730 wsr a2, lbeg 731 l32i a2, a1, PT_LCOUNT 732 wsr a3, lend 733 wsr a2, lcount 734 735 /* We control single stepping through the ICOUNTLEVEL register. */ 736 737 l32i a2, a1, PT_ICOUNTLEVEL 738 movi a3, -2 739 wsr a2, icountlevel 740 wsr a3, icount 741 742 /* Check if it was double exception. */ 743 744 l32i a0, a1, PT_DEPC 745 l32i a3, a1, PT_AREG3 746 l32i a2, a1, PT_AREG2 747 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 748 749 /* Restore a0...a3 and return */ 750 751 l32i a0, a1, PT_AREG0 752 l32i a1, a1, PT_AREG1 753 rfe 754 7551: wsr a0, depc 756 l32i a0, a1, PT_AREG0 757 l32i a1, a1, PT_AREG1 758 rfde 759 760ENDPROC(kernel_exception) 761 762/* 763 * Debug exception handler. 764 * 765 * Currently, we don't support KGDB, so only user application can be debugged. 766 * 767 * When we get here, a0 is trashed and saved to excsave[debuglevel] 768 */ 769 770ENTRY(debug_exception) 771 772 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 773 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 774 775 /* Set EPC1 and EXCCAUSE */ 776 777 wsr a2, depc # save a2 temporarily 778 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 779 wsr a2, epc1 780 781 movi a2, EXCCAUSE_MAPPED_DEBUG 782 wsr a2, exccause 783 784 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 785 786 movi a2, 1 << PS_EXCM_BIT 787 or a2, a0, a2 788 movi a0, debug_exception # restore a3, debug jump vector 789 wsr a2, ps 790 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 791 792 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 793 794 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 795 796 addi a2, a1, -16-PT_SIZE # assume kernel stack 797 s32i a0, a2, PT_AREG0 798 movi a0, 0 799 s32i a1, a2, PT_AREG1 800 s32i a0, a2, PT_DEPC # mark it as a regular exception 801 xsr a0, depc 802 s32i a3, a2, PT_AREG3 803 s32i a0, a2, PT_AREG2 804 mov a1, a2 805 j _kernel_exception 806 8072: rsr a2, excsave1 808 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 809 s32i a0, a2, PT_AREG0 810 movi a0, 0 811 s32i a1, a2, PT_AREG1 812 s32i a0, a2, PT_DEPC 813 xsr a0, depc 814 s32i a3, a2, PT_AREG3 815 s32i a0, a2, PT_AREG2 816 mov a1, a2 817 j _user_exception 818 819 /* Debug exception while in exception mode. */ 8201: j 1b // FIXME!! 821 822ENDPROC(debug_exception) 823 824/* 825 * We get here in case of an unrecoverable exception. 826 * The only thing we can do is to be nice and print a panic message. 827 * We only produce a single stack frame for panic, so ??? 828 * 829 * 830 * Entry conditions: 831 * 832 * - a0 contains the caller address; original value saved in excsave1. 833 * - the original a0 contains a valid return address (backtrace) or 0. 834 * - a2 contains a valid stackpointer 835 * 836 * Notes: 837 * 838 * - If the stack pointer could be invalid, the caller has to setup a 839 * dummy stack pointer (e.g. the stack of the init_task) 840 * 841 * - If the return address could be invalid, the caller has to set it 842 * to 0, so the backtrace would stop. 843 * 844 */ 845 .align 4 846unrecoverable_text: 847 .ascii "Unrecoverable error in exception handler\0" 848 849ENTRY(unrecoverable_exception) 850 851 movi a0, 1 852 movi a1, 0 853 854 wsr a0, windowstart 855 wsr a1, windowbase 856 rsync 857 858 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL 859 wsr a1, ps 860 rsync 861 862 movi a1, init_task 863 movi a0, 0 864 addi a1, a1, PT_REGS_OFFSET 865 866 movi a4, panic 867 movi a6, unrecoverable_text 868 869 callx4 a4 870 8711: j 1b 872 873ENDPROC(unrecoverable_exception) 874 875/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 876 877/* 878 * Fast-handler for alloca exceptions 879 * 880 * The ALLOCA handler is entered when user code executes the MOVSP 881 * instruction and the caller's frame is not in the register file. 882 * 883 * This algorithm was taken from the Ross Morley's RTOS Porting Layer: 884 * 885 * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S 886 * 887 * It leverages the existing window spill/fill routines and their support for 888 * double exceptions. The 'movsp' instruction will only cause an exception if 889 * the next window needs to be loaded. In fact this ALLOCA exception may be 890 * replaced at some point by changing the hardware to do a underflow exception 891 * of the proper size instead. 892 * 893 * This algorithm simply backs out the register changes started by the user 894 * excpetion handler, makes it appear that we have started a window underflow 895 * by rotating the window back and then setting the old window base (OWB) in 896 * the 'ps' register with the rolled back window base. The 'movsp' instruction 897 * will be re-executed and this time since the next window frames is in the 898 * active AR registers it won't cause an exception. 899 * 900 * If the WindowUnderflow code gets a TLB miss the page will get mapped 901 * the the partial windeowUnderflow will be handeled in the double exception 902 * handler. 903 * 904 * Entry condition: 905 * 906 * a0: trashed, original value saved on stack (PT_AREG0) 907 * a1: a1 908 * a2: new stack pointer, original in DEPC 909 * a3: a3 910 * depc: a2, original value saved on stack (PT_DEPC) 911 * excsave_1: dispatch table 912 * 913 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 914 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 915 */ 916 917ENTRY(fast_alloca) 918 rsr a0, windowbase 919 rotw -1 920 rsr a2, ps 921 extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH 922 xor a3, a3, a4 923 l32i a4, a6, PT_AREG0 924 l32i a1, a6, PT_DEPC 925 rsr a6, depc 926 wsr a1, depc 927 slli a3, a3, PS_OWB_SHIFT 928 xor a2, a2, a3 929 wsr a2, ps 930 rsync 931 932 _bbci.l a4, 31, 4f 933 rotw -1 934 _bbci.l a8, 30, 8f 935 rotw -1 936 j _WindowUnderflow12 9378: j _WindowUnderflow8 9384: j _WindowUnderflow4 939ENDPROC(fast_alloca) 940 941/* 942 * fast system calls. 943 * 944 * WARNING: The kernel doesn't save the entire user context before 945 * handling a fast system call. These functions are small and short, 946 * usually offering some functionality not available to user tasks. 947 * 948 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 949 * 950 * Entry condition: 951 * 952 * a0: trashed, original value saved on stack (PT_AREG0) 953 * a1: a1 954 * a2: new stack pointer, original in DEPC 955 * a3: a3 956 * depc: a2, original value saved on stack (PT_DEPC) 957 * excsave_1: dispatch table 958 */ 959 960ENTRY(fast_syscall_kernel) 961 962 /* Skip syscall. */ 963 964 rsr a0, epc1 965 addi a0, a0, 3 966 wsr a0, epc1 967 968 l32i a0, a2, PT_DEPC 969 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 970 971 rsr a0, depc # get syscall-nr 972 _beqz a0, fast_syscall_spill_registers 973 _beqi a0, __NR_xtensa, fast_syscall_xtensa 974 975 j kernel_exception 976 977ENDPROC(fast_syscall_kernel) 978 979ENTRY(fast_syscall_user) 980 981 /* Skip syscall. */ 982 983 rsr a0, epc1 984 addi a0, a0, 3 985 wsr a0, epc1 986 987 l32i a0, a2, PT_DEPC 988 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 989 990 rsr a0, depc # get syscall-nr 991 _beqz a0, fast_syscall_spill_registers 992 _beqi a0, __NR_xtensa, fast_syscall_xtensa 993 994 j user_exception 995 996ENDPROC(fast_syscall_user) 997 998ENTRY(fast_syscall_unrecoverable) 999 1000 /* Restore all states. */ 1001 1002 l32i a0, a2, PT_AREG0 # restore a0 1003 xsr a2, depc # restore a2, depc 1004 1005 wsr a0, excsave1 1006 movi a0, unrecoverable_exception 1007 callx0 a0 1008 1009ENDPROC(fast_syscall_unrecoverable) 1010 1011/* 1012 * sysxtensa syscall handler 1013 * 1014 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1015 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1016 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1017 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1018 * a2 a6 a3 a4 a5 1019 * 1020 * Entry condition: 1021 * 1022 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1023 * a1: a1 1024 * a2: new stack pointer, original in a0 and DEPC 1025 * a3: a3 1026 * a4..a15: unchanged 1027 * depc: a2, original value saved on stack (PT_DEPC) 1028 * excsave_1: dispatch table 1029 * 1030 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1031 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1032 * 1033 * Note: we don't have to save a2; a2 holds the return value 1034 * 1035 * We use the two macros TRY and CATCH: 1036 * 1037 * TRY adds an entry to the __ex_table fixup table for the immediately 1038 * following instruction. 1039 * 1040 * CATCH catches any exception that occurred at one of the preceding TRY 1041 * statements and continues from there 1042 * 1043 * Usage TRY l32i a0, a1, 0 1044 * <other code> 1045 * done: rfe 1046 * CATCH <set return code> 1047 * j done 1048 */ 1049 1050#ifdef CONFIG_FAST_SYSCALL_XTENSA 1051 1052#define TRY \ 1053 .section __ex_table, "a"; \ 1054 .word 66f, 67f; \ 1055 .text; \ 105666: 1057 1058#define CATCH \ 105967: 1060 1061ENTRY(fast_syscall_xtensa) 1062 1063 s32i a7, a2, PT_AREG7 # we need an additional register 1064 movi a7, 4 # sizeof(unsigned int) 1065 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1066 1067 _bgeui a6, SYS_XTENSA_COUNT, .Lill 1068 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp 1069 1070 /* Fall through for ATOMIC_CMP_SWP. */ 1071 1072.Lswp: /* Atomic compare and swap */ 1073 1074TRY l32i a0, a3, 0 # read old value 1075 bne a0, a4, 1f # same as old value? jump 1076TRY s32i a5, a3, 0 # different, modify value 1077 l32i a7, a2, PT_AREG7 # restore a7 1078 l32i a0, a2, PT_AREG0 # restore a0 1079 movi a2, 1 # and return 1 1080 rfe 1081 10821: l32i a7, a2, PT_AREG7 # restore a7 1083 l32i a0, a2, PT_AREG0 # restore a0 1084 movi a2, 0 # return 0 (note that we cannot set 1085 rfe 1086 1087.Lnswp: /* Atomic set, add, and exg_add. */ 1088 1089TRY l32i a7, a3, 0 # orig 1090 addi a6, a6, -SYS_XTENSA_ATOMIC_SET 1091 add a0, a4, a7 # + arg 1092 moveqz a0, a4, a6 # set 1093 addi a6, a6, SYS_XTENSA_ATOMIC_SET 1094TRY s32i a0, a3, 0 # write new value 1095 1096 mov a0, a2 1097 mov a2, a7 1098 l32i a7, a0, PT_AREG7 # restore a7 1099 l32i a0, a0, PT_AREG0 # restore a0 1100 rfe 1101 1102CATCH 1103.Leac: l32i a7, a2, PT_AREG7 # restore a7 1104 l32i a0, a2, PT_AREG0 # restore a0 1105 movi a2, -EFAULT 1106 rfe 1107 1108.Lill: l32i a7, a2, PT_AREG7 # restore a7 1109 l32i a0, a2, PT_AREG0 # restore a0 1110 movi a2, -EINVAL 1111 rfe 1112 1113ENDPROC(fast_syscall_xtensa) 1114 1115#else /* CONFIG_FAST_SYSCALL_XTENSA */ 1116 1117ENTRY(fast_syscall_xtensa) 1118 1119 l32i a0, a2, PT_AREG0 # restore a0 1120 movi a2, -ENOSYS 1121 rfe 1122 1123ENDPROC(fast_syscall_xtensa) 1124 1125#endif /* CONFIG_FAST_SYSCALL_XTENSA */ 1126 1127 1128/* fast_syscall_spill_registers. 1129 * 1130 * Entry condition: 1131 * 1132 * a0: trashed, original value saved on stack (PT_AREG0) 1133 * a1: a1 1134 * a2: new stack pointer, original in DEPC 1135 * a3: a3 1136 * depc: a2, original value saved on stack (PT_DEPC) 1137 * excsave_1: dispatch table 1138 * 1139 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1140 */ 1141 1142#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS 1143 1144ENTRY(fast_syscall_spill_registers) 1145 1146 /* Register a FIXUP handler (pass current wb as a parameter) */ 1147 1148 xsr a3, excsave1 1149 movi a0, fast_syscall_spill_registers_fixup 1150 s32i a0, a3, EXC_TABLE_FIXUP 1151 rsr a0, windowbase 1152 s32i a0, a3, EXC_TABLE_PARAM 1153 xsr a3, excsave1 # restore a3 and excsave_1 1154 1155 /* Save a3, a4 and SAR on stack. */ 1156 1157 rsr a0, sar 1158 s32i a3, a2, PT_AREG3 1159 s32i a0, a2, PT_SAR 1160 1161 /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ 1162 1163 s32i a4, a2, PT_AREG4 1164 s32i a7, a2, PT_AREG7 1165 s32i a8, a2, PT_AREG8 1166 s32i a11, a2, PT_AREG11 1167 s32i a12, a2, PT_AREG12 1168 s32i a15, a2, PT_AREG15 1169 1170 /* 1171 * Rotate ws so that the current windowbase is at bit 0. 1172 * Assume ws = xxxwww1yy (www1 current window frame). 1173 * Rotate ws right so that a4 = yyxxxwww1. 1174 */ 1175 1176 rsr a0, windowbase 1177 rsr a3, windowstart # a3 = xxxwww1yy 1178 ssr a0 # holds WB 1179 slli a0, a3, WSBITS 1180 or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy 1181 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1182 1183 /* We are done if there are no more than the current register frame. */ 1184 1185 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1186 movi a0, (1 << (WSBITS-1)) 1187 _beqz a3, .Lnospill # only one active frame? jump 1188 1189 /* We want 1 at the top, so that we return to the current windowbase */ 1190 1191 or a3, a3, a0 # 1yyxxxwww 1192 1193 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1194 1195 wsr a3, windowstart # save shifted windowstart 1196 neg a0, a3 1197 and a3, a0, a3 # first bit set from right: 000010000 1198 1199 ffs_ws a0, a3 # a0: shifts to skip empty frames 1200 movi a3, WSBITS 1201 sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right 1202 ssr a0 # save in SAR for later. 1203 1204 rsr a3, windowbase 1205 add a3, a3, a0 1206 wsr a3, windowbase 1207 rsync 1208 1209 rsr a3, windowstart 1210 srl a3, a3 # shift windowstart 1211 1212 /* WB is now just one frame below the oldest frame in the register 1213 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1214 and WS differ by one 4-register frame. */ 1215 1216 /* Save frames. Depending what call was used (call4, call8, call12), 1217 * we have to save 4,8. or 12 registers. 1218 */ 1219 1220 1221.Lloop: _bbsi.l a3, 1, .Lc4 1222 _bbci.l a3, 2, .Lc12 1223 1224.Lc8: s32e a4, a13, -16 1225 l32e a4, a5, -12 1226 s32e a8, a4, -32 1227 s32e a5, a13, -12 1228 s32e a6, a13, -8 1229 s32e a7, a13, -4 1230 s32e a9, a4, -28 1231 s32e a10, a4, -24 1232 s32e a11, a4, -20 1233 srli a11, a3, 2 # shift windowbase by 2 1234 rotw 2 1235 _bnei a3, 1, .Lloop 1236 j .Lexit 1237 1238.Lc4: s32e a4, a9, -16 1239 s32e a5, a9, -12 1240 s32e a6, a9, -8 1241 s32e a7, a9, -4 1242 1243 srli a7, a3, 1 1244 rotw 1 1245 _bnei a3, 1, .Lloop 1246 j .Lexit 1247 1248.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1249 1250 /* 12-register frame (call12) */ 1251 1252 l32e a0, a5, -12 1253 s32e a8, a0, -48 1254 mov a8, a0 1255 1256 s32e a9, a8, -44 1257 s32e a10, a8, -40 1258 s32e a11, a8, -36 1259 s32e a12, a8, -32 1260 s32e a13, a8, -28 1261 s32e a14, a8, -24 1262 s32e a15, a8, -20 1263 srli a15, a3, 3 1264 1265 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1266 * window, grab the stackpointer, and rotate back. 1267 * Alternatively, we could also use the following approach, but that 1268 * makes the fixup routine much more complicated: 1269 * rotw 1 1270 * s32e a0, a13, -16 1271 * ... 1272 * rotw 2 1273 */ 1274 1275 rotw 1 1276 mov a4, a13 1277 rotw -1 1278 1279 s32e a4, a8, -16 1280 s32e a5, a8, -12 1281 s32e a6, a8, -8 1282 s32e a7, a8, -4 1283 1284 rotw 3 1285 1286 _beqi a3, 1, .Lexit 1287 j .Lloop 1288 1289.Lexit: 1290 1291 /* Done. Do the final rotation and set WS */ 1292 1293 rotw 1 1294 rsr a3, windowbase 1295 ssl a3 1296 movi a3, 1 1297 sll a3, a3 1298 wsr a3, windowstart 1299.Lnospill: 1300 1301 /* Advance PC, restore registers and SAR, and return from exception. */ 1302 1303 l32i a3, a2, PT_SAR 1304 l32i a0, a2, PT_AREG0 1305 wsr a3, sar 1306 l32i a3, a2, PT_AREG3 1307 1308 /* Restore clobbered registers. */ 1309 1310 l32i a4, a2, PT_AREG4 1311 l32i a7, a2, PT_AREG7 1312 l32i a8, a2, PT_AREG8 1313 l32i a11, a2, PT_AREG11 1314 l32i a12, a2, PT_AREG12 1315 l32i a15, a2, PT_AREG15 1316 1317 movi a2, 0 1318 rfe 1319 1320.Linvalid_mask: 1321 1322 /* We get here because of an unrecoverable error in the window 1323 * registers, so set up a dummy frame and kill the user application. 1324 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1325 */ 1326 1327 movi a0, 1 1328 movi a1, 0 1329 1330 wsr a0, windowstart 1331 wsr a1, windowbase 1332 rsync 1333 1334 movi a0, 0 1335 1336 rsr a3, excsave1 1337 l32i a1, a3, EXC_TABLE_KSTK 1338 1339 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL 1340 wsr a4, ps 1341 rsync 1342 1343 movi a6, SIGSEGV 1344 movi a4, do_exit 1345 callx4 a4 1346 1347 /* shouldn't return, so panic */ 1348 1349 wsr a0, excsave1 1350 movi a0, unrecoverable_exception 1351 callx0 a0 # should not return 13521: j 1b 1353 1354 1355ENDPROC(fast_syscall_spill_registers) 1356 1357/* Fixup handler. 1358 * 1359 * We get here if the spill routine causes an exception, e.g. tlb miss. 1360 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1361 * we entered the spill routine and jump to the user exception handler. 1362 * 1363 * Note that we only need to restore the bits in windowstart that have not 1364 * been spilled yet by the _spill_register routine. Luckily, a3 contains a 1365 * rotated windowstart with only those bits set for frames that haven't been 1366 * spilled yet. Because a3 is rotated such that bit 0 represents the register 1367 * frame for the current windowbase - 1, we need to rotate a3 left by the 1368 * value of the current windowbase + 1 and move it to windowstart. 1369 * 1370 * a0: value of depc, original value in depc 1371 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1372 * a3: exctable, original value in excsave1 1373 */ 1374 1375ENTRY(fast_syscall_spill_registers_fixup) 1376 1377 rsr a2, windowbase # get current windowbase (a2 is saved) 1378 xsr a0, depc # restore depc and a0 1379 ssl a2 # set shift (32 - WB) 1380 1381 /* We need to make sure the current registers (a0-a3) are preserved. 1382 * To do this, we simply set the bit for the current window frame 1383 * in WS, so that the exception handlers save them to the task stack. 1384 * 1385 * Note: we use a3 to set the windowbase, so we take a special care 1386 * of it, saving it in the original _spill_registers frame across 1387 * the exception handler call. 1388 */ 1389 1390 xsr a3, excsave1 # get spill-mask 1391 slli a3, a3, 1 # shift left by one 1392 addi a3, a3, 1 # set the bit for the current window frame 1393 1394 slli a2, a3, 32-WSBITS 1395 src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... 1396 wsr a2, windowstart # set corrected windowstart 1397 1398 srli a3, a3, 1 1399 rsr a2, excsave1 1400 l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 1401 xsr a2, excsave1 1402 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 1403 l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) 1404 xsr a2, excsave1 1405 1406 /* Return to the original (user task) WINDOWBASE. 1407 * We leave the following frame behind: 1408 * a0, a1, a2 same 1409 * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) 1410 * depc: depc (we have to return to that address) 1411 * excsave_1: exctable 1412 */ 1413 1414 wsr a3, windowbase 1415 rsync 1416 1417 /* We are now in the original frame when we entered _spill_registers: 1418 * a0: return address 1419 * a1: used, stack pointer 1420 * a2: kernel stack pointer 1421 * a3: available 1422 * depc: exception address 1423 * excsave: exctable 1424 * Note: This frame might be the same as above. 1425 */ 1426 1427 /* Setup stack pointer. */ 1428 1429 addi a2, a2, -PT_USER_SIZE 1430 s32i a0, a2, PT_AREG0 1431 1432 /* Make sure we return to this fixup handler. */ 1433 1434 movi a3, fast_syscall_spill_registers_fixup_return 1435 s32i a3, a2, PT_DEPC # setup depc 1436 1437 /* Jump to the exception handler. */ 1438 1439 rsr a3, excsave1 1440 rsr a0, exccause 1441 addx4 a0, a0, a3 # find entry in table 1442 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1443 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1444 jx a0 1445 1446ENDPROC(fast_syscall_spill_registers_fixup) 1447 1448ENTRY(fast_syscall_spill_registers_fixup_return) 1449 1450 /* When we return here, all registers have been restored (a2: DEPC) */ 1451 1452 wsr a2, depc # exception address 1453 1454 /* Restore fixup handler. */ 1455 1456 rsr a2, excsave1 1457 s32i a3, a2, EXC_TABLE_DOUBLE_SAVE 1458 movi a3, fast_syscall_spill_registers_fixup 1459 s32i a3, a2, EXC_TABLE_FIXUP 1460 rsr a3, windowbase 1461 s32i a3, a2, EXC_TABLE_PARAM 1462 l32i a2, a2, EXC_TABLE_KSTK 1463 1464 /* Load WB at the time the exception occurred. */ 1465 1466 rsr a3, sar # WB is still in SAR 1467 neg a3, a3 1468 wsr a3, windowbase 1469 rsync 1470 1471 rsr a3, excsave1 1472 l32i a3, a3, EXC_TABLE_DOUBLE_SAVE 1473 1474 rfde 1475 1476ENDPROC(fast_syscall_spill_registers_fixup_return) 1477 1478#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1479 1480ENTRY(fast_syscall_spill_registers) 1481 1482 l32i a0, a2, PT_AREG0 # restore a0 1483 movi a2, -ENOSYS 1484 rfe 1485 1486ENDPROC(fast_syscall_spill_registers) 1487 1488#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ 1489 1490#ifdef CONFIG_MMU 1491/* 1492 * We should never get here. Bail out! 1493 */ 1494 1495ENTRY(fast_second_level_miss_double_kernel) 1496 14971: movi a0, unrecoverable_exception 1498 callx0 a0 # should not return 14991: j 1b 1500 1501ENDPROC(fast_second_level_miss_double_kernel) 1502 1503/* First-level entry handler for user, kernel, and double 2nd-level 1504 * TLB miss exceptions. Note that for now, user and kernel miss 1505 * exceptions share the same entry point and are handled identically. 1506 * 1507 * An old, less-efficient C version of this function used to exist. 1508 * We include it below, interleaved as comments, for reference. 1509 * 1510 * Entry condition: 1511 * 1512 * a0: trashed, original value saved on stack (PT_AREG0) 1513 * a1: a1 1514 * a2: new stack pointer, original in DEPC 1515 * a3: a3 1516 * depc: a2, original value saved on stack (PT_DEPC) 1517 * excsave_1: dispatch table 1518 * 1519 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1520 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1521 */ 1522 1523ENTRY(fast_second_level_miss) 1524 1525 /* Save a1 and a3. Note: we don't expect a double exception. */ 1526 1527 s32i a1, a2, PT_AREG1 1528 s32i a3, a2, PT_AREG3 1529 1530 /* We need to map the page of PTEs for the user task. Find 1531 * the pointer to that page. Also, it's possible for tsk->mm 1532 * to be NULL while tsk->active_mm is nonzero if we faulted on 1533 * a vmalloc address. In that rare case, we must use 1534 * active_mm instead to avoid a fault in this handler. See 1535 * 1536 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1537 * (or search Internet on "mm vs. active_mm") 1538 * 1539 * if (!mm) 1540 * mm = tsk->active_mm; 1541 * pgd = pgd_offset (mm, regs->excvaddr); 1542 * pmd = pmd_offset (pgd, regs->excvaddr); 1543 * pmdval = *pmd; 1544 */ 1545 1546 GET_CURRENT(a1,a2) 1547 l32i a0, a1, TASK_MM # tsk->mm 1548 beqz a0, 9f 1549 15508: rsr a3, excvaddr # fault address 1551 _PGD_OFFSET(a0, a3, a1) 1552 l32i a0, a0, 0 # read pmdval 1553 beqz a0, 2f 1554 1555 /* Read ptevaddr and convert to top of page-table page. 1556 * 1557 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1558 * vpnval += DTLB_WAY_PGTABLE; 1559 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1560 * write_dtlb_entry (pteval, vpnval); 1561 * 1562 * The messy computation for 'pteval' above really simplifies 1563 * into the following: 1564 * 1565 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY 1566 */ 1567 1568 movi a1, (-PAGE_OFFSET) & 0xffffffff 1569 add a0, a0, a1 # pmdval - PAGE_OFFSET 1570 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1571 xor a0, a0, a1 1572 1573 movi a1, _PAGE_DIRECTORY 1574 or a0, a0, a1 # ... | PAGE_DIRECTORY 1575 1576 /* 1577 * We utilize all three wired-ways (7-9) to hold pmd translations. 1578 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1579 * This allows to map the three most common regions to three different 1580 * DTLBs: 1581 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1582 * 2 -> way 8 shared libaries (2000.0000) 1583 * 3 -> way 0 stack (3000.0000) 1584 */ 1585 1586 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1587 rsr a1, ptevaddr 1588 addx2 a3, a3, a3 # -> 0,3,6,9 1589 srli a1, a1, PAGE_SHIFT 1590 extui a3, a3, 2, 2 # -> 0,0,1,2 1591 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1592 addi a3, a3, DTLB_WAY_PGD 1593 add a1, a1, a3 # ... + way_number 1594 15953: wdtlb a0, a1 1596 dsync 1597 1598 /* Exit critical section. */ 1599 16004: rsr a3, excsave1 1601 movi a0, 0 1602 s32i a0, a3, EXC_TABLE_FIXUP 1603 1604 /* Restore the working registers, and return. */ 1605 1606 l32i a0, a2, PT_AREG0 1607 l32i a1, a2, PT_AREG1 1608 l32i a3, a2, PT_AREG3 1609 l32i a2, a2, PT_DEPC 1610 1611 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1612 1613 /* Restore excsave1 and return. */ 1614 1615 rsr a2, depc 1616 rfe 1617 1618 /* Return from double exception. */ 1619 16201: xsr a2, depc 1621 esync 1622 rfde 1623 16249: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1625 bnez a0, 8b 1626 1627 /* Even more unlikely case active_mm == 0. 1628 * We can get here with NMI in the middle of context_switch that 1629 * touches vmalloc area. 1630 */ 1631 movi a0, init_mm 1632 j 8b 1633 1634#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1635 16362: /* Special case for cache aliasing. 1637 * We (should) only get here if a clear_user_page, copy_user_page 1638 * or the aliased cache flush functions got preemptively interrupted 1639 * by another task. Re-establish temporary mapping to the 1640 * TLBTEMP_BASE areas. 1641 */ 1642 1643 /* We shouldn't be in a double exception */ 1644 1645 l32i a0, a2, PT_DEPC 1646 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f 1647 1648 /* Make sure the exception originated in the special functions */ 1649 1650 movi a0, __tlbtemp_mapping_start 1651 rsr a3, epc1 1652 bltu a3, a0, 2f 1653 movi a0, __tlbtemp_mapping_end 1654 bgeu a3, a0, 2f 1655 1656 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1657 1658 movi a3, TLBTEMP_BASE_1 1659 rsr a0, excvaddr 1660 bltu a0, a3, 2f 1661 1662 addi a1, a0, -TLBTEMP_SIZE 1663 bgeu a1, a3, 2f 1664 1665 /* Check if we have to restore an ITLB mapping. */ 1666 1667 movi a1, __tlbtemp_mapping_itlb 1668 rsr a3, epc1 1669 sub a3, a3, a1 1670 1671 /* Calculate VPN */ 1672 1673 movi a1, PAGE_MASK 1674 and a1, a1, a0 1675 1676 /* Jump for ITLB entry */ 1677 1678 bgez a3, 1f 1679 1680 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1681 1682 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1683 add a1, a3, a1 1684 1685 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1686 1687 mov a0, a6 1688 movnez a0, a7, a3 1689 j 3b 1690 1691 /* ITLB entry. We only use dst in a6. */ 1692 16931: witlb a6, a1 1694 isync 1695 j 4b 1696 1697 1698#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1699 1700 17012: /* Invalid PGD, default exception handling */ 1702 1703 rsr a1, depc 1704 s32i a1, a2, PT_AREG2 1705 mov a1, a2 1706 1707 rsr a2, ps 1708 bbsi.l a2, PS_UM_BIT, 1f 1709 j _kernel_exception 17101: j _user_exception 1711 1712ENDPROC(fast_second_level_miss) 1713 1714/* 1715 * StoreProhibitedException 1716 * 1717 * Update the pte and invalidate the itlb mapping for this pte. 1718 * 1719 * Entry condition: 1720 * 1721 * a0: trashed, original value saved on stack (PT_AREG0) 1722 * a1: a1 1723 * a2: new stack pointer, original in DEPC 1724 * a3: a3 1725 * depc: a2, original value saved on stack (PT_DEPC) 1726 * excsave_1: dispatch table 1727 * 1728 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1729 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1730 */ 1731 1732ENTRY(fast_store_prohibited) 1733 1734 /* Save a1 and a3. */ 1735 1736 s32i a1, a2, PT_AREG1 1737 s32i a3, a2, PT_AREG3 1738 1739 GET_CURRENT(a1,a2) 1740 l32i a0, a1, TASK_MM # tsk->mm 1741 beqz a0, 9f 1742 17438: rsr a1, excvaddr # fault address 1744 _PGD_OFFSET(a0, a1, a3) 1745 l32i a0, a0, 0 1746 beqz a0, 2f 1747 1748 /* 1749 * Note that we test _PAGE_WRITABLE_BIT only if PTE is present 1750 * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. 1751 */ 1752 1753 _PTE_OFFSET(a0, a1, a3) 1754 l32i a3, a0, 0 # read pteval 1755 movi a1, _PAGE_CA_INVALID 1756 ball a3, a1, 2f 1757 bbci.l a3, _PAGE_WRITABLE_BIT, 2f 1758 1759 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1760 or a3, a3, a1 1761 rsr a1, excvaddr 1762 s32i a3, a0, 0 1763 1764 /* We need to flush the cache if we have page coloring. */ 1765#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1766 dhwb a0, 0 1767#endif 1768 pdtlb a0, a1 1769 wdtlb a3, a0 1770 1771 /* Exit critical section. */ 1772 1773 movi a0, 0 1774 rsr a3, excsave1 1775 s32i a0, a3, EXC_TABLE_FIXUP 1776 1777 /* Restore the working registers, and return. */ 1778 1779 l32i a3, a2, PT_AREG3 1780 l32i a1, a2, PT_AREG1 1781 l32i a0, a2, PT_AREG0 1782 l32i a2, a2, PT_DEPC 1783 1784 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1785 1786 rsr a2, depc 1787 rfe 1788 1789 /* Double exception. Restore FIXUP handler and return. */ 1790 17911: xsr a2, depc 1792 esync 1793 rfde 1794 17959: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1796 j 8b 1797 17982: /* If there was a problem, handle fault in C */ 1799 1800 rsr a3, depc # still holds a2 1801 s32i a3, a2, PT_AREG2 1802 mov a1, a2 1803 1804 rsr a2, ps 1805 bbsi.l a2, PS_UM_BIT, 1f 1806 j _kernel_exception 18071: j _user_exception 1808 1809ENDPROC(fast_store_prohibited) 1810 1811#endif /* CONFIG_MMU */ 1812 1813/* 1814 * System Calls. 1815 * 1816 * void system_call (struct pt_regs* regs, int exccause) 1817 * a2 a3 1818 */ 1819 1820ENTRY(system_call) 1821 1822 entry a1, 32 1823 1824 /* regs->syscall = regs->areg[2] */ 1825 1826 l32i a3, a2, PT_AREG2 1827 mov a6, a2 1828 movi a4, do_syscall_trace_enter 1829 s32i a3, a2, PT_SYSCALL 1830 callx4 a4 1831 1832 /* syscall = sys_call_table[syscall_nr] */ 1833 1834 movi a4, sys_call_table; 1835 movi a5, __NR_syscall_count 1836 movi a6, -ENOSYS 1837 bgeu a3, a5, 1f 1838 1839 addx4 a4, a3, a4 1840 l32i a4, a4, 0 1841 movi a5, sys_ni_syscall; 1842 beq a4, a5, 1f 1843 1844 /* Load args: arg0 - arg5 are passed via regs. */ 1845 1846 l32i a6, a2, PT_AREG6 1847 l32i a7, a2, PT_AREG3 1848 l32i a8, a2, PT_AREG4 1849 l32i a9, a2, PT_AREG5 1850 l32i a10, a2, PT_AREG8 1851 l32i a11, a2, PT_AREG9 1852 1853 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1854 s32i a2, a1, 0 1855 1856 callx4 a4 1857 18581: /* regs->areg[2] = return_value */ 1859 1860 s32i a6, a2, PT_AREG2 1861 movi a4, do_syscall_trace_leave 1862 mov a6, a2 1863 callx4 a4 1864 retw 1865 1866ENDPROC(system_call) 1867 1868/* 1869 * Spill live registers on the kernel stack macro. 1870 * 1871 * Entry condition: ps.woe is set, ps.excm is cleared 1872 * Exit condition: windowstart has single bit set 1873 * May clobber: a12, a13 1874 */ 1875 .macro spill_registers_kernel 1876 1877#if XCHAL_NUM_AREGS > 16 1878 call12 1f 1879 _j 2f 1880 retw 1881 .align 4 18821: 1883 _entry a1, 48 1884 addi a12, a0, 3 1885#if XCHAL_NUM_AREGS > 32 1886 .rept (XCHAL_NUM_AREGS - 32) / 12 1887 _entry a1, 48 1888 mov a12, a0 1889 .endr 1890#endif 1891 _entry a1, 16 1892#if XCHAL_NUM_AREGS % 12 == 0 1893 mov a8, a8 1894#elif XCHAL_NUM_AREGS % 12 == 4 1895 mov a12, a12 1896#elif XCHAL_NUM_AREGS % 12 == 8 1897 mov a4, a4 1898#endif 1899 retw 19002: 1901#else 1902 mov a12, a12 1903#endif 1904 .endm 1905 1906/* 1907 * Task switch. 1908 * 1909 * struct task* _switch_to (struct task* prev, struct task* next) 1910 * a2 a2 a3 1911 */ 1912 1913ENTRY(_switch_to) 1914 1915 entry a1, 48 1916 1917 mov a11, a3 # and 'next' (a3) 1918 1919 l32i a4, a2, TASK_THREAD_INFO 1920 l32i a5, a3, TASK_THREAD_INFO 1921 1922 save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1923 1924#if THREAD_RA > 1020 || THREAD_SP > 1020 1925 addi a10, a2, TASK_THREAD 1926 s32i a0, a10, THREAD_RA - TASK_THREAD # save return address 1927 s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer 1928#else 1929 s32i a0, a2, THREAD_RA # save return address 1930 s32i a1, a2, THREAD_SP # save stack pointer 1931#endif 1932 1933 /* Disable ints while we manipulate the stack pointer. */ 1934 1935 irq_save a14, a3 1936 rsync 1937 1938 /* Switch CPENABLE */ 1939 1940#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 1941 l32i a3, a5, THREAD_CPENABLE 1942 xsr a3, cpenable 1943 s32i a3, a4, THREAD_CPENABLE 1944#endif 1945 1946 /* Flush register file. */ 1947 1948 spill_registers_kernel 1949 1950 /* Set kernel stack (and leave critical section) 1951 * Note: It's save to set it here. The stack will not be overwritten 1952 * because the kernel stack will only be loaded again after 1953 * we return from kernel space. 1954 */ 1955 1956 rsr a3, excsave1 # exc_table 1957 addi a7, a5, PT_REGS_OFFSET 1958 s32i a7, a3, EXC_TABLE_KSTK 1959 1960 /* restore context of the task 'next' */ 1961 1962 l32i a0, a11, THREAD_RA # restore return address 1963 l32i a1, a11, THREAD_SP # restore stack pointer 1964 1965 load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER 1966 1967 wsr a14, ps 1968 rsync 1969 1970 retw 1971 1972ENDPROC(_switch_to) 1973 1974ENTRY(ret_from_fork) 1975 1976 /* void schedule_tail (struct task_struct *prev) 1977 * Note: prev is still in a6 (return value from fake call4 frame) 1978 */ 1979 movi a4, schedule_tail 1980 callx4 a4 1981 1982 movi a4, do_syscall_trace_leave 1983 mov a6, a1 1984 callx4 a4 1985 1986 j common_exception_return 1987 1988ENDPROC(ret_from_fork) 1989 1990/* 1991 * Kernel thread creation helper 1992 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 1993 * left from _switch_to: a6 = prev 1994 */ 1995ENTRY(ret_from_kernel_thread) 1996 1997 call4 schedule_tail 1998 mov a6, a3 1999 callx4 a2 2000 j common_exception_return 2001 2002ENDPROC(ret_from_kernel_thread) 2003