1/* 2 * arch/xtensa/kernel/entry.S 3 * 4 * Low-level exception handling 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2004 - 2008 by Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * 14 */ 15 16#include <linux/linkage.h> 17#include <asm/asm-offsets.h> 18#include <asm/processor.h> 19#include <asm/coprocessor.h> 20#include <asm/thread_info.h> 21#include <asm/uaccess.h> 22#include <asm/unistd.h> 23#include <asm/ptrace.h> 24#include <asm/current.h> 25#include <asm/pgtable.h> 26#include <asm/page.h> 27#include <asm/signal.h> 28#include <asm/tlbflush.h> 29#include <variant/tie-asm.h> 30 31/* Unimplemented features. */ 32 33#undef KERNEL_STACK_OVERFLOW_CHECK 34#undef PREEMPTIBLE_KERNEL 35#undef ALLOCA_EXCEPTION_IN_IRAM 36 37/* Not well tested. 38 * 39 * - fast_coprocessor 40 */ 41 42/* 43 * Macro to find first bit set in WINDOWBASE from the left + 1 44 * 45 * 100....0 -> 1 46 * 010....0 -> 2 47 * 000....1 -> WSBITS 48 */ 49 50 .macro ffs_ws bit mask 51 52#if XCHAL_HAVE_NSA 53 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 54 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 55#else 56 movi \bit, WSBITS 57#if WSBITS > 16 58 _bltui \mask, 0x10000, 99f 59 addi \bit, \bit, -16 60 extui \mask, \mask, 16, 16 61#endif 62#if WSBITS > 8 6399: _bltui \mask, 0x100, 99f 64 addi \bit, \bit, -8 65 srli \mask, \mask, 8 66#endif 6799: _bltui \mask, 0x10, 99f 68 addi \bit, \bit, -4 69 srli \mask, \mask, 4 7099: _bltui \mask, 0x4, 99f 71 addi \bit, \bit, -2 72 srli \mask, \mask, 2 7399: _bltui \mask, 0x2, 99f 74 addi \bit, \bit, -1 7599: 76 77#endif 78 .endm 79 80/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 81 82/* 83 * First-level exception handler for user exceptions. 84 * Save some special registers, extra states and all registers in the AR 85 * register file that were in use in the user task, and jump to the common 86 * exception code. 87 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 88 * save them for kernel exceptions). 89 * 90 * Entry condition for user_exception: 91 * 92 * a0: trashed, original value saved on stack (PT_AREG0) 93 * a1: a1 94 * a2: new stack pointer, original value in depc 95 * a3: dispatch table 96 * depc: a2, original value saved on stack (PT_DEPC) 97 * excsave1: a3 98 * 99 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 100 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 101 * 102 * Entry condition for _user_exception: 103 * 104 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 105 * excsave has been restored, and 106 * stack pointer (a1) has been set. 107 * 108 * Note: _user_exception might be at an odd address. Don't use call0..call12 109 */ 110 111ENTRY(user_exception) 112 113 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 114 115 xsr a3, excsave1 116 rsr a0, depc 117 s32i a1, a2, PT_AREG1 118 s32i a0, a2, PT_AREG2 119 s32i a3, a2, PT_AREG3 120 mov a1, a2 121 122 .globl _user_exception 123_user_exception: 124 125 /* Save SAR and turn off single stepping */ 126 127 movi a2, 0 128 rsr a3, sar 129 xsr a2, icountlevel 130 s32i a3, a1, PT_SAR 131 s32i a2, a1, PT_ICOUNTLEVEL 132 133#if XCHAL_HAVE_THREADPTR 134 rur a2, threadptr 135 s32i a2, a1, PT_THREADPTR 136#endif 137 138 /* Rotate ws so that the current windowbase is at bit0. */ 139 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 140 141 rsr a2, windowbase 142 rsr a3, windowstart 143 ssr a2 144 s32i a2, a1, PT_WINDOWBASE 145 s32i a3, a1, PT_WINDOWSTART 146 slli a2, a3, 32-WSBITS 147 src a2, a3, a2 148 srli a2, a2, 32-WSBITS 149 s32i a2, a1, PT_WMASK # needed for restoring registers 150 151 /* Save only live registers. */ 152 153 _bbsi.l a2, 1, 1f 154 s32i a4, a1, PT_AREG4 155 s32i a5, a1, PT_AREG5 156 s32i a6, a1, PT_AREG6 157 s32i a7, a1, PT_AREG7 158 _bbsi.l a2, 2, 1f 159 s32i a8, a1, PT_AREG8 160 s32i a9, a1, PT_AREG9 161 s32i a10, a1, PT_AREG10 162 s32i a11, a1, PT_AREG11 163 _bbsi.l a2, 3, 1f 164 s32i a12, a1, PT_AREG12 165 s32i a13, a1, PT_AREG13 166 s32i a14, a1, PT_AREG14 167 s32i a15, a1, PT_AREG15 168 _bnei a2, 1, 1f # only one valid frame? 169 170 /* Only one valid frame, skip saving regs. */ 171 172 j 2f 173 174 /* Save the remaining registers. 175 * We have to save all registers up to the first '1' from 176 * the right, except the current frame (bit 0). 177 * Assume a2 is: 001001000110001 178 * All register frames starting from the top field to the marked '1' 179 * must be saved. 180 */ 181 1821: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 183 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 184 and a3, a3, a2 # max. only one bit is set 185 186 /* Find number of frames to save */ 187 188 ffs_ws a0, a3 # number of frames to the '1' from left 189 190 /* Store information into WMASK: 191 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 192 * bits 4...: number of valid 4-register frames 193 */ 194 195 slli a3, a0, 4 # number of frames to save in bits 8..4 196 extui a2, a2, 0, 4 # mask for the first 16 registers 197 or a2, a3, a2 198 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 199 200 /* Save 4 registers at a time */ 201 2021: rotw -1 203 s32i a0, a5, PT_AREG_END - 16 204 s32i a1, a5, PT_AREG_END - 12 205 s32i a2, a5, PT_AREG_END - 8 206 s32i a3, a5, PT_AREG_END - 4 207 addi a0, a4, -1 208 addi a1, a5, -16 209 _bnez a0, 1b 210 211 /* WINDOWBASE still in SAR! */ 212 213 rsr a2, sar # original WINDOWBASE 214 movi a3, 1 215 ssl a2 216 sll a3, a3 217 wsr a3, windowstart # set corresponding WINDOWSTART bit 218 wsr a2, windowbase # and WINDOWSTART 219 rsync 220 221 /* We are back to the original stack pointer (a1) */ 222 2232: /* Now, jump to the common exception handler. */ 224 225 j common_exception 226 227ENDPROC(user_exception) 228 229/* 230 * First-level exit handler for kernel exceptions 231 * Save special registers and the live window frame. 232 * Note: Even though we changes the stack pointer, we don't have to do a 233 * MOVSP here, as we do that when we return from the exception. 234 * (See comment in the kernel exception exit code) 235 * 236 * Entry condition for kernel_exception: 237 * 238 * a0: trashed, original value saved on stack (PT_AREG0) 239 * a1: a1 240 * a2: new stack pointer, original in DEPC 241 * a3: dispatch table 242 * depc: a2, original value saved on stack (PT_DEPC) 243 * excsave_1: a3 244 * 245 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 246 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 247 * 248 * Entry condition for _kernel_exception: 249 * 250 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 251 * excsave has been restored, and 252 * stack pointer (a1) has been set. 253 * 254 * Note: _kernel_exception might be at an odd address. Don't use call0..call12 255 */ 256 257ENTRY(kernel_exception) 258 259 /* Save a0, a2, a3, DEPC and set SP. */ 260 261 xsr a3, excsave1 # restore a3, excsave_1 262 rsr a0, depc # get a2 263 s32i a1, a2, PT_AREG1 264 s32i a0, a2, PT_AREG2 265 s32i a3, a2, PT_AREG3 266 mov a1, a2 267 268 .globl _kernel_exception 269_kernel_exception: 270 271 /* Save SAR and turn off single stepping */ 272 273 movi a2, 0 274 rsr a3, sar 275 xsr a2, icountlevel 276 s32i a3, a1, PT_SAR 277 s32i a2, a1, PT_ICOUNTLEVEL 278 279 /* Rotate ws so that the current windowbase is at bit0. */ 280 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 281 282 rsr a2, windowbase # don't need to save these, we only 283 rsr a3, windowstart # need shifted windowstart: windowmask 284 ssr a2 285 slli a2, a3, 32-WSBITS 286 src a2, a3, a2 287 srli a2, a2, 32-WSBITS 288 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 289 290 /* Save only the live window-frame */ 291 292 _bbsi.l a2, 1, 1f 293 s32i a4, a1, PT_AREG4 294 s32i a5, a1, PT_AREG5 295 s32i a6, a1, PT_AREG6 296 s32i a7, a1, PT_AREG7 297 _bbsi.l a2, 2, 1f 298 s32i a8, a1, PT_AREG8 299 s32i a9, a1, PT_AREG9 300 s32i a10, a1, PT_AREG10 301 s32i a11, a1, PT_AREG11 302 _bbsi.l a2, 3, 1f 303 s32i a12, a1, PT_AREG12 304 s32i a13, a1, PT_AREG13 305 s32i a14, a1, PT_AREG14 306 s32i a15, a1, PT_AREG15 307 3081: 309 310#ifdef KERNEL_STACK_OVERFLOW_CHECK 311 312 /* Stack overflow check, for debugging */ 313 extui a2, a1, TASK_SIZE_BITS,XX 314 movi a3, SIZE?? 315 _bge a2, a3, out_of_stack_panic 316 317#endif 318 319/* 320 * This is the common exception handler. 321 * We get here from the user exception handler or simply by falling through 322 * from the kernel exception handler. 323 * Save the remaining special registers, switch to kernel mode, and jump 324 * to the second-level exception handler. 325 * 326 */ 327 328common_exception: 329 330 /* Save some registers, disable loops and clear the syscall flag. */ 331 332 rsr a2, debugcause 333 rsr a3, epc1 334 s32i a2, a1, PT_DEBUGCAUSE 335 s32i a3, a1, PT_PC 336 337 movi a2, -1 338 rsr a3, excvaddr 339 s32i a2, a1, PT_SYSCALL 340 movi a2, 0 341 s32i a3, a1, PT_EXCVADDR 342 xsr a2, lcount 343 s32i a2, a1, PT_LCOUNT 344 345 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 346 347 rsr a0, exccause 348 movi a3, 0 349 rsr a2, excsave1 350 s32i a0, a1, PT_EXCCAUSE 351 s32i a3, a2, EXC_TABLE_FIXUP 352 353 /* All unrecoverable states are saved on stack, now, and a1 is valid, 354 * so we can allow exceptions and interrupts (*) again. 355 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 356 * 357 * (*) We only allow interrupts if they were previously enabled and 358 * we're not handling an IRQ 359 */ 360 361 rsr a3, ps 362 addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT 363 movi a2, LOCKLEVEL 364 extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH 365 # a3 = PS.INTLEVEL 366 moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt 367 movi a2, 1 << PS_WOE_BIT 368 or a3, a3, a2 369 rsr a0, exccause 370 xsr a3, ps 371 372 s32i a3, a1, PT_PS # save ps 373 374 /* Save lbeg, lend */ 375 376 rsr a2, lbeg 377 rsr a3, lend 378 s32i a2, a1, PT_LBEG 379 s32i a3, a1, PT_LEND 380 381 /* Save SCOMPARE1 */ 382 383#if XCHAL_HAVE_S32C1I 384 rsr a2, scompare1 385 s32i a2, a1, PT_SCOMPARE1 386#endif 387 388 /* Save optional registers. */ 389 390 save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 391 392#ifdef CONFIG_TRACE_IRQFLAGS 393 l32i a4, a1, PT_DEPC 394 /* Double exception means we came here with an exception 395 * while PS.EXCM was set, i.e. interrupts disabled. 396 */ 397 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 398 l32i a4, a1, PT_EXCCAUSE 399 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f 400 /* We came here with an interrupt means interrupts were enabled 401 * and we've just disabled them. 402 */ 403 movi a4, trace_hardirqs_off 404 callx4 a4 4051: 406#endif 407 408 /* Go to second-level dispatcher. Set up parameters to pass to the 409 * exception handler and call the exception handler. 410 */ 411 412 movi a4, exc_table 413 mov a6, a1 # pass stack frame 414 mov a7, a0 # pass EXCCAUSE 415 addx4 a4, a0, a4 416 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 417 418 /* Call the second-level handler */ 419 420 callx4 a4 421 422 /* Jump here for exception exit */ 423 .global common_exception_return 424common_exception_return: 425 426#ifdef CONFIG_TRACE_IRQFLAGS 427 l32i a4, a1, PT_DEPC 428 /* Double exception means we came here with an exception 429 * while PS.EXCM was set, i.e. interrupts disabled. 430 */ 431 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 432 l32i a4, a1, PT_EXCCAUSE 433 bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f 434 /* We came here with an interrupt means interrupts were enabled 435 * and we'll reenable them on return. 436 */ 437 movi a4, trace_hardirqs_on 438 callx4 a4 4391: 440#endif 441 442 /* Jump if we are returning from kernel exceptions. */ 443 4441: l32i a3, a1, PT_PS 445 _bbci.l a3, PS_UM_BIT, 4f 446 447 rsil a2, 0 448 449 /* Specific to a user exception exit: 450 * We need to check some flags for signal handling and rescheduling, 451 * and have to restore WB and WS, extra states, and all registers 452 * in the register file that were in use in the user task. 453 * Note that we don't disable interrupts here. 454 */ 455 456 GET_THREAD_INFO(a2,a1) 457 l32i a4, a2, TI_FLAGS 458 459 _bbsi.l a4, TIF_NEED_RESCHED, 3f 460 _bbsi.l a4, TIF_NOTIFY_RESUME, 2f 461 _bbci.l a4, TIF_SIGPENDING, 4f 462 4632: l32i a4, a1, PT_DEPC 464 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 465 466 /* Call do_signal() */ 467 468 movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) 469 mov a6, a1 470 callx4 a4 471 j 1b 472 4733: /* Reschedule */ 474 475 movi a4, schedule # void schedule (void) 476 callx4 a4 477 j 1b 478 4794: /* Restore optional registers. */ 480 481 load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT 482 483 /* Restore SCOMPARE1 */ 484 485#if XCHAL_HAVE_S32C1I 486 l32i a2, a1, PT_SCOMPARE1 487 wsr a2, scompare1 488#endif 489 wsr a3, ps /* disable interrupts */ 490 491 _bbci.l a3, PS_UM_BIT, kernel_exception_exit 492 493user_exception_exit: 494 495 /* Restore the state of the task and return from the exception. */ 496 497 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 498 499 l32i a2, a1, PT_WINDOWBASE 500 l32i a3, a1, PT_WINDOWSTART 501 wsr a1, depc # use DEPC as temp storage 502 wsr a3, windowstart # restore WINDOWSTART 503 ssr a2 # preserve user's WB in the SAR 504 wsr a2, windowbase # switch to user's saved WB 505 rsync 506 rsr a1, depc # restore stack pointer 507 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 508 rotw -1 # we restore a4..a7 509 _bltui a6, 16, 1f # only have to restore current window? 510 511 /* The working registers are a0 and a3. We are restoring to 512 * a4..a7. Be careful not to destroy what we have just restored. 513 * Note: wmask has the format YYYYM: 514 * Y: number of registers saved in groups of 4 515 * M: 4 bit mask of first 16 registers 516 */ 517 518 mov a2, a6 519 mov a3, a5 520 5212: rotw -1 # a0..a3 become a4..a7 522 addi a3, a7, -4*4 # next iteration 523 addi a2, a6, -16 # decrementing Y in WMASK 524 l32i a4, a3, PT_AREG_END + 0 525 l32i a5, a3, PT_AREG_END + 4 526 l32i a6, a3, PT_AREG_END + 8 527 l32i a7, a3, PT_AREG_END + 12 528 _bgeui a2, 16, 2b 529 530 /* Clear unrestored registers (don't leak anything to user-land */ 531 5321: rsr a0, windowbase 533 rsr a3, sar 534 sub a3, a0, a3 535 beqz a3, 2f 536 extui a3, a3, 0, WBBITS 537 5381: rotw -1 539 addi a3, a7, -1 540 movi a4, 0 541 movi a5, 0 542 movi a6, 0 543 movi a7, 0 544 bgei a3, 1, 1b 545 546 /* We are back were we were when we started. 547 * Note: a2 still contains WMASK (if we've returned to the original 548 * frame where we had loaded a2), or at least the lower 4 bits 549 * (if we have restored WSBITS-1 frames). 550 */ 551 552#if XCHAL_HAVE_THREADPTR 553 l32i a3, a1, PT_THREADPTR 554 wur a3, threadptr 555#endif 556 5572: j common_exception_exit 558 559 /* This is the kernel exception exit. 560 * We avoided to do a MOVSP when we entered the exception, but we 561 * have to do it here. 562 */ 563 564kernel_exception_exit: 565 566#ifdef PREEMPTIBLE_KERNEL 567 568#ifdef CONFIG_PREEMPT 569 570 /* 571 * Note: We've just returned from a call4, so we have 572 * at least 4 addt'l regs. 573 */ 574 575 /* Check current_thread_info->preempt_count */ 576 577 GET_THREAD_INFO(a2) 578 l32i a3, a2, TI_PREEMPT 579 bnez a3, 1f 580 581 l32i a2, a2, TI_FLAGS 582 5831: 584 585#endif 586 587#endif 588 589 /* Check if we have to do a movsp. 590 * 591 * We only have to do a movsp if the previous window-frame has 592 * been spilled to the *temporary* exception stack instead of the 593 * task's stack. This is the case if the corresponding bit in 594 * WINDOWSTART for the previous window-frame was set before 595 * (not spilled) but is zero now (spilled). 596 * If this bit is zero, all other bits except the one for the 597 * current window frame are also zero. So, we can use a simple test: 598 * 'and' WINDOWSTART and WINDOWSTART-1: 599 * 600 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 601 * 602 * The result is zero only if one bit was set. 603 * 604 * (Note: We might have gone through several task switches before 605 * we come back to the current task, so WINDOWBASE might be 606 * different from the time the exception occurred.) 607 */ 608 609 /* Test WINDOWSTART before and after the exception. 610 * We actually have WMASK, so we only have to test if it is 1 or not. 611 */ 612 613 l32i a2, a1, PT_WMASK 614 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 615 616 /* Test WINDOWSTART now. If spilled, do the movsp */ 617 618 rsr a3, windowstart 619 addi a0, a3, -1 620 and a3, a3, a0 621 _bnez a3, common_exception_exit 622 623 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 624 625 addi a0, a1, -16 626 l32i a3, a0, 0 627 l32i a4, a0, 4 628 s32i a3, a1, PT_SIZE+0 629 s32i a4, a1, PT_SIZE+4 630 l32i a3, a0, 8 631 l32i a4, a0, 12 632 s32i a3, a1, PT_SIZE+8 633 s32i a4, a1, PT_SIZE+12 634 635 /* Common exception exit. 636 * We restore the special register and the current window frame, and 637 * return from the exception. 638 * 639 * Note: We expect a2 to hold PT_WMASK 640 */ 641 642common_exception_exit: 643 644 /* Restore address registers. */ 645 646 _bbsi.l a2, 1, 1f 647 l32i a4, a1, PT_AREG4 648 l32i a5, a1, PT_AREG5 649 l32i a6, a1, PT_AREG6 650 l32i a7, a1, PT_AREG7 651 _bbsi.l a2, 2, 1f 652 l32i a8, a1, PT_AREG8 653 l32i a9, a1, PT_AREG9 654 l32i a10, a1, PT_AREG10 655 l32i a11, a1, PT_AREG11 656 _bbsi.l a2, 3, 1f 657 l32i a12, a1, PT_AREG12 658 l32i a13, a1, PT_AREG13 659 l32i a14, a1, PT_AREG14 660 l32i a15, a1, PT_AREG15 661 662 /* Restore PC, SAR */ 663 6641: l32i a2, a1, PT_PC 665 l32i a3, a1, PT_SAR 666 wsr a2, epc1 667 wsr a3, sar 668 669 /* Restore LBEG, LEND, LCOUNT */ 670 671 l32i a2, a1, PT_LBEG 672 l32i a3, a1, PT_LEND 673 wsr a2, lbeg 674 l32i a2, a1, PT_LCOUNT 675 wsr a3, lend 676 wsr a2, lcount 677 678 /* We control single stepping through the ICOUNTLEVEL register. */ 679 680 l32i a2, a1, PT_ICOUNTLEVEL 681 movi a3, -2 682 wsr a2, icountlevel 683 wsr a3, icount 684 685 /* Check if it was double exception. */ 686 687 l32i a0, a1, PT_DEPC 688 l32i a3, a1, PT_AREG3 689 l32i a2, a1, PT_AREG2 690 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 691 692 /* Restore a0...a3 and return */ 693 694 l32i a0, a1, PT_AREG0 695 l32i a1, a1, PT_AREG1 696 rfe 697 6981: wsr a0, depc 699 l32i a0, a1, PT_AREG0 700 l32i a1, a1, PT_AREG1 701 rfde 702 703ENDPROC(kernel_exception) 704 705/* 706 * Debug exception handler. 707 * 708 * Currently, we don't support KGDB, so only user application can be debugged. 709 * 710 * When we get here, a0 is trashed and saved to excsave[debuglevel] 711 */ 712 713ENTRY(debug_exception) 714 715 rsr a0, SREG_EPS + XCHAL_DEBUGLEVEL 716 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 717 718 /* Set EPC1 and EXCCAUSE */ 719 720 wsr a2, depc # save a2 temporarily 721 rsr a2, SREG_EPC + XCHAL_DEBUGLEVEL 722 wsr a2, epc1 723 724 movi a2, EXCCAUSE_MAPPED_DEBUG 725 wsr a2, exccause 726 727 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 728 729 movi a2, 1 << PS_EXCM_BIT 730 or a2, a0, a2 731 movi a0, debug_exception # restore a3, debug jump vector 732 wsr a2, ps 733 xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL 734 735 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 736 737 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 738 739 addi a2, a1, -16-PT_SIZE # assume kernel stack 740 s32i a0, a2, PT_AREG0 741 movi a0, 0 742 s32i a1, a2, PT_AREG1 743 s32i a0, a2, PT_DEPC # mark it as a regular exception 744 xsr a0, depc 745 s32i a3, a2, PT_AREG3 746 s32i a0, a2, PT_AREG2 747 mov a1, a2 748 j _kernel_exception 749 7502: rsr a2, excsave1 751 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 752 s32i a0, a2, PT_AREG0 753 movi a0, 0 754 s32i a1, a2, PT_AREG1 755 s32i a0, a2, PT_DEPC 756 xsr a0, depc 757 s32i a3, a2, PT_AREG3 758 s32i a0, a2, PT_AREG2 759 mov a1, a2 760 j _user_exception 761 762 /* Debug exception while in exception mode. */ 7631: j 1b // FIXME!! 764 765ENDPROC(debug_exception) 766 767/* 768 * We get here in case of an unrecoverable exception. 769 * The only thing we can do is to be nice and print a panic message. 770 * We only produce a single stack frame for panic, so ??? 771 * 772 * 773 * Entry conditions: 774 * 775 * - a0 contains the caller address; original value saved in excsave1. 776 * - the original a0 contains a valid return address (backtrace) or 0. 777 * - a2 contains a valid stackpointer 778 * 779 * Notes: 780 * 781 * - If the stack pointer could be invalid, the caller has to setup a 782 * dummy stack pointer (e.g. the stack of the init_task) 783 * 784 * - If the return address could be invalid, the caller has to set it 785 * to 0, so the backtrace would stop. 786 * 787 */ 788 .align 4 789unrecoverable_text: 790 .ascii "Unrecoverable error in exception handler\0" 791 792ENTRY(unrecoverable_exception) 793 794 movi a0, 1 795 movi a1, 0 796 797 wsr a0, windowstart 798 wsr a1, windowbase 799 rsync 800 801 movi a1, (1 << PS_WOE_BIT) | LOCKLEVEL 802 wsr a1, ps 803 rsync 804 805 movi a1, init_task 806 movi a0, 0 807 addi a1, a1, PT_REGS_OFFSET 808 809 movi a4, panic 810 movi a6, unrecoverable_text 811 812 callx4 a4 813 8141: j 1b 815 816ENDPROC(unrecoverable_exception) 817 818/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 819 820/* 821 * Fast-handler for alloca exceptions 822 * 823 * The ALLOCA handler is entered when user code executes the MOVSP 824 * instruction and the caller's frame is not in the register file. 825 * In this case, the caller frame's a0..a3 are on the stack just 826 * below sp (a1), and this handler moves them. 827 * 828 * For "MOVSP <ar>,<as>" without destination register a1, this routine 829 * simply moves the value from <as> to <ar> without moving the save area. 830 * 831 * Entry condition: 832 * 833 * a0: trashed, original value saved on stack (PT_AREG0) 834 * a1: a1 835 * a2: new stack pointer, original in DEPC 836 * a3: dispatch table 837 * depc: a2, original value saved on stack (PT_DEPC) 838 * excsave_1: a3 839 * 840 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 841 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 842 */ 843 844#if XCHAL_HAVE_BE 845#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 846#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 847#else 848#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 849#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 850#endif 851 852ENTRY(fast_alloca) 853 854 /* We shouldn't be in a double exception. */ 855 856 l32i a0, a2, PT_DEPC 857 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 858 859 rsr a0, depc # get a2 860 s32i a4, a2, PT_AREG4 # save a4 and 861 s32i a0, a2, PT_AREG2 # a2 to stack 862 863 /* Exit critical section. */ 864 865 movi a0, 0 866 s32i a0, a3, EXC_TABLE_FIXUP 867 868 /* Restore a3, excsave_1 */ 869 870 xsr a3, excsave1 # make sure excsave_1 is valid for dbl. 871 rsr a4, epc1 # get exception address 872 s32i a3, a2, PT_AREG3 # save a3 to stack 873 874#ifdef ALLOCA_EXCEPTION_IN_IRAM 875#error iram not supported 876#else 877 /* Note: l8ui not allowed in IRAM/IROM!! */ 878 l8ui a0, a4, 1 # read as(src) from MOVSP instruction 879#endif 880 movi a3, .Lmovsp_src 881 _EXTUI_MOVSP_SRC(a0) # extract source register number 882 addx8 a3, a0, a3 883 jx a3 884 885.Lunhandled_double: 886 wsr a0, excsave1 887 movi a0, unrecoverable_exception 888 callx0 a0 889 890 .align 8 891.Lmovsp_src: 892 l32i a3, a2, PT_AREG0; _j 1f; .align 8 893 mov a3, a1; _j 1f; .align 8 894 l32i a3, a2, PT_AREG2; _j 1f; .align 8 895 l32i a3, a2, PT_AREG3; _j 1f; .align 8 896 l32i a3, a2, PT_AREG4; _j 1f; .align 8 897 mov a3, a5; _j 1f; .align 8 898 mov a3, a6; _j 1f; .align 8 899 mov a3, a7; _j 1f; .align 8 900 mov a3, a8; _j 1f; .align 8 901 mov a3, a9; _j 1f; .align 8 902 mov a3, a10; _j 1f; .align 8 903 mov a3, a11; _j 1f; .align 8 904 mov a3, a12; _j 1f; .align 8 905 mov a3, a13; _j 1f; .align 8 906 mov a3, a14; _j 1f; .align 8 907 mov a3, a15; _j 1f; .align 8 908 9091: 910 911#ifdef ALLOCA_EXCEPTION_IN_IRAM 912#error iram not supported 913#else 914 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction 915#endif 916 addi a4, a4, 3 # step over movsp 917 _EXTUI_MOVSP_DST(a0) # extract destination register 918 wsr a4, epc1 # save new epc_1 919 920 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 921 922 /* Move the save area. This implies the use of the L32E 923 * and S32E instructions, because this move must be done with 924 * the user's PS.RING privilege levels, not with ring 0 925 * (kernel's) privileges currently active with PS.EXCM 926 * set. Note that we have stil registered a fixup routine with the 927 * double exception vector in case a double exception occurs. 928 */ 929 930 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ 931 932 l32e a0, a1, -16 933 l32e a4, a1, -12 934 s32e a0, a3, -16 935 s32e a4, a3, -12 936 l32e a0, a1, -8 937 l32e a4, a1, -4 938 s32e a0, a3, -8 939 s32e a4, a3, -4 940 941 /* Restore stack-pointer and all the other saved registers. */ 942 943 mov a1, a3 944 945 l32i a4, a2, PT_AREG4 946 l32i a3, a2, PT_AREG3 947 l32i a0, a2, PT_AREG0 948 l32i a2, a2, PT_AREG2 949 rfe 950 951 /* MOVSP <at>,<as> was invoked with <at> != a1. 952 * Because the stack pointer is not being modified, 953 * we should be able to just modify the pointer 954 * without moving any save area. 955 * The processor only traps these occurrences if the 956 * caller window isn't live, so unfortunately we can't 957 * use this as an alternate trap mechanism. 958 * So we just do the move. This requires that we 959 * resolve the destination register, not just the source, 960 * so there's some extra work. 961 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) 962 */ 963 964 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ 965 9661: movi a4, .Lmovsp_dst 967 addx8 a4, a0, a4 968 jx a4 969 970 .align 8 971.Lmovsp_dst: 972 s32i a3, a2, PT_AREG0; _j 1f; .align 8 973 mov a1, a3; _j 1f; .align 8 974 s32i a3, a2, PT_AREG2; _j 1f; .align 8 975 s32i a3, a2, PT_AREG3; _j 1f; .align 8 976 s32i a3, a2, PT_AREG4; _j 1f; .align 8 977 mov a5, a3; _j 1f; .align 8 978 mov a6, a3; _j 1f; .align 8 979 mov a7, a3; _j 1f; .align 8 980 mov a8, a3; _j 1f; .align 8 981 mov a9, a3; _j 1f; .align 8 982 mov a10, a3; _j 1f; .align 8 983 mov a11, a3; _j 1f; .align 8 984 mov a12, a3; _j 1f; .align 8 985 mov a13, a3; _j 1f; .align 8 986 mov a14, a3; _j 1f; .align 8 987 mov a15, a3; _j 1f; .align 8 988 9891: l32i a4, a2, PT_AREG4 990 l32i a3, a2, PT_AREG3 991 l32i a0, a2, PT_AREG0 992 l32i a2, a2, PT_AREG2 993 rfe 994 995ENDPROC(fast_alloca) 996 997/* 998 * fast system calls. 999 * 1000 * WARNING: The kernel doesn't save the entire user context before 1001 * handling a fast system call. These functions are small and short, 1002 * usually offering some functionality not available to user tasks. 1003 * 1004 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 1005 * 1006 * Entry condition: 1007 * 1008 * a0: trashed, original value saved on stack (PT_AREG0) 1009 * a1: a1 1010 * a2: new stack pointer, original in DEPC 1011 * a3: dispatch table 1012 * depc: a2, original value saved on stack (PT_DEPC) 1013 * excsave_1: a3 1014 */ 1015 1016ENTRY(fast_syscall_kernel) 1017 1018 /* Skip syscall. */ 1019 1020 rsr a0, epc1 1021 addi a0, a0, 3 1022 wsr a0, epc1 1023 1024 l32i a0, a2, PT_DEPC 1025 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1026 1027 rsr a0, depc # get syscall-nr 1028 _beqz a0, fast_syscall_spill_registers 1029 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1030 1031 j kernel_exception 1032 1033ENDPROC(fast_syscall_kernel) 1034 1035ENTRY(fast_syscall_user) 1036 1037 /* Skip syscall. */ 1038 1039 rsr a0, epc1 1040 addi a0, a0, 3 1041 wsr a0, epc1 1042 1043 l32i a0, a2, PT_DEPC 1044 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1045 1046 rsr a0, depc # get syscall-nr 1047 _beqz a0, fast_syscall_spill_registers 1048 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1049 1050 j user_exception 1051 1052ENDPROC(fast_syscall_user) 1053 1054ENTRY(fast_syscall_unrecoverable) 1055 1056 /* Restore all states. */ 1057 1058 l32i a0, a2, PT_AREG0 # restore a0 1059 xsr a2, depc # restore a2, depc 1060 rsr a3, excsave1 1061 1062 wsr a0, excsave1 1063 movi a0, unrecoverable_exception 1064 callx0 a0 1065 1066ENDPROC(fast_syscall_unrecoverable) 1067 1068/* 1069 * sysxtensa syscall handler 1070 * 1071 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1072 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1073 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1074 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1075 * a2 a6 a3 a4 a5 1076 * 1077 * Entry condition: 1078 * 1079 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1080 * a1: a1 1081 * a2: new stack pointer, original in a0 and DEPC 1082 * a3: dispatch table, original in excsave_1 1083 * a4..a15: unchanged 1084 * depc: a2, original value saved on stack (PT_DEPC) 1085 * excsave_1: a3 1086 * 1087 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1088 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1089 * 1090 * Note: we don't have to save a2; a2 holds the return value 1091 * 1092 * We use the two macros TRY and CATCH: 1093 * 1094 * TRY adds an entry to the __ex_table fixup table for the immediately 1095 * following instruction. 1096 * 1097 * CATCH catches any exception that occurred at one of the preceding TRY 1098 * statements and continues from there 1099 * 1100 * Usage TRY l32i a0, a1, 0 1101 * <other code> 1102 * done: rfe 1103 * CATCH <set return code> 1104 * j done 1105 */ 1106 1107#define TRY \ 1108 .section __ex_table, "a"; \ 1109 .word 66f, 67f; \ 1110 .text; \ 111166: 1112 1113#define CATCH \ 111467: 1115 1116ENTRY(fast_syscall_xtensa) 1117 1118 xsr a3, excsave1 # restore a3, excsave1 1119 1120 s32i a7, a2, PT_AREG7 # we need an additional register 1121 movi a7, 4 # sizeof(unsigned int) 1122 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1123 1124 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1125 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1126 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1127 1128 /* Fall through for ATOMIC_CMP_SWP. */ 1129 1130.Lswp: /* Atomic compare and swap */ 1131 1132TRY l32i a0, a3, 0 # read old value 1133 bne a0, a4, 1f # same as old value? jump 1134TRY s32i a5, a3, 0 # different, modify value 1135 l32i a7, a2, PT_AREG7 # restore a7 1136 l32i a0, a2, PT_AREG0 # restore a0 1137 movi a2, 1 # and return 1 1138 addi a6, a6, 1 # restore a6 (really necessary?) 1139 rfe 1140 11411: l32i a7, a2, PT_AREG7 # restore a7 1142 l32i a0, a2, PT_AREG0 # restore a0 1143 movi a2, 0 # return 0 (note that we cannot set 1144 addi a6, a6, 1 # restore a6 (really necessary?) 1145 rfe 1146 1147.Lnswp: /* Atomic set, add, and exg_add. */ 1148 1149TRY l32i a7, a3, 0 # orig 1150 add a0, a4, a7 # + arg 1151 moveqz a0, a4, a6 # set 1152TRY s32i a0, a3, 0 # write new value 1153 1154 mov a0, a2 1155 mov a2, a7 1156 l32i a7, a0, PT_AREG7 # restore a7 1157 l32i a0, a0, PT_AREG0 # restore a0 1158 addi a6, a6, 1 # restore a6 (really necessary?) 1159 rfe 1160 1161CATCH 1162.Leac: l32i a7, a2, PT_AREG7 # restore a7 1163 l32i a0, a2, PT_AREG0 # restore a0 1164 movi a2, -EFAULT 1165 rfe 1166 1167.Lill: l32i a7, a2, PT_AREG0 # restore a7 1168 l32i a0, a2, PT_AREG0 # restore a0 1169 movi a2, -EINVAL 1170 rfe 1171 1172ENDPROC(fast_syscall_xtensa) 1173 1174 1175/* fast_syscall_spill_registers. 1176 * 1177 * Entry condition: 1178 * 1179 * a0: trashed, original value saved on stack (PT_AREG0) 1180 * a1: a1 1181 * a2: new stack pointer, original in DEPC 1182 * a3: dispatch table 1183 * depc: a2, original value saved on stack (PT_DEPC) 1184 * excsave_1: a3 1185 * 1186 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1187 */ 1188 1189ENTRY(fast_syscall_spill_registers) 1190 1191 /* Register a FIXUP handler (pass current wb as a parameter) */ 1192 1193 movi a0, fast_syscall_spill_registers_fixup 1194 s32i a0, a3, EXC_TABLE_FIXUP 1195 rsr a0, windowbase 1196 s32i a0, a3, EXC_TABLE_PARAM 1197 1198 /* Save a3 and SAR on stack. */ 1199 1200 rsr a0, sar 1201 xsr a3, excsave1 # restore a3 and excsave_1 1202 s32i a3, a2, PT_AREG3 1203 s32i a4, a2, PT_AREG4 1204 s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 1205 1206 /* The spill routine might clobber a7, a11, and a15. */ 1207 1208 s32i a7, a2, PT_AREG7 1209 s32i a11, a2, PT_AREG11 1210 s32i a15, a2, PT_AREG15 1211 1212 call0 _spill_registers # destroys a3, a4, and SAR 1213 1214 /* Advance PC, restore registers and SAR, and return from exception. */ 1215 1216 l32i a3, a2, PT_AREG5 1217 l32i a4, a2, PT_AREG4 1218 l32i a0, a2, PT_AREG0 1219 wsr a3, sar 1220 l32i a3, a2, PT_AREG3 1221 1222 /* Restore clobbered registers. */ 1223 1224 l32i a7, a2, PT_AREG7 1225 l32i a11, a2, PT_AREG11 1226 l32i a15, a2, PT_AREG15 1227 1228 movi a2, 0 1229 rfe 1230 1231ENDPROC(fast_syscall_spill_registers) 1232 1233/* Fixup handler. 1234 * 1235 * We get here if the spill routine causes an exception, e.g. tlb miss. 1236 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1237 * we entered the spill routine and jump to the user exception handler. 1238 * 1239 * a0: value of depc, original value in depc 1240 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1241 * a3: exctable, original value in excsave1 1242 */ 1243 1244fast_syscall_spill_registers_fixup: 1245 1246 rsr a2, windowbase # get current windowbase (a2 is saved) 1247 xsr a0, depc # restore depc and a0 1248 ssl a2 # set shift (32 - WB) 1249 1250 /* We need to make sure the current registers (a0-a3) are preserved. 1251 * To do this, we simply set the bit for the current window frame 1252 * in WS, so that the exception handlers save them to the task stack. 1253 */ 1254 1255 rsr a3, excsave1 # get spill-mask 1256 slli a2, a3, 1 # shift left by one 1257 1258 slli a3, a2, 32-WSBITS 1259 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1260 wsr a2, windowstart # set corrected windowstart 1261 1262 movi a3, exc_table 1263 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1264 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1265 1266 /* Return to the original (user task) WINDOWBASE. 1267 * We leave the following frame behind: 1268 * a0, a1, a2 same 1269 * a3: trashed (saved in excsave_1) 1270 * depc: depc (we have to return to that address) 1271 * excsave_1: a3 1272 */ 1273 1274 wsr a3, windowbase 1275 rsync 1276 1277 /* We are now in the original frame when we entered _spill_registers: 1278 * a0: return address 1279 * a1: used, stack pointer 1280 * a2: kernel stack pointer 1281 * a3: available, saved in EXCSAVE_1 1282 * depc: exception address 1283 * excsave: a3 1284 * Note: This frame might be the same as above. 1285 */ 1286 1287 /* Setup stack pointer. */ 1288 1289 addi a2, a2, -PT_USER_SIZE 1290 s32i a0, a2, PT_AREG0 1291 1292 /* Make sure we return to this fixup handler. */ 1293 1294 movi a3, fast_syscall_spill_registers_fixup_return 1295 s32i a3, a2, PT_DEPC # setup depc 1296 1297 /* Jump to the exception handler. */ 1298 1299 movi a3, exc_table 1300 rsr a0, exccause 1301 addx4 a0, a0, a3 # find entry in table 1302 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1303 jx a0 1304 1305fast_syscall_spill_registers_fixup_return: 1306 1307 /* When we return here, all registers have been restored (a2: DEPC) */ 1308 1309 wsr a2, depc # exception address 1310 1311 /* Restore fixup handler. */ 1312 1313 xsr a3, excsave1 1314 movi a2, fast_syscall_spill_registers_fixup 1315 s32i a2, a3, EXC_TABLE_FIXUP 1316 rsr a2, windowbase 1317 s32i a2, a3, EXC_TABLE_PARAM 1318 l32i a2, a3, EXC_TABLE_KSTK 1319 1320 /* Load WB at the time the exception occurred. */ 1321 1322 rsr a3, sar # WB is still in SAR 1323 neg a3, a3 1324 wsr a3, windowbase 1325 rsync 1326 1327 /* Restore a3 and return. */ 1328 1329 movi a3, exc_table 1330 xsr a3, excsave1 1331 1332 rfde 1333 1334 1335/* 1336 * spill all registers. 1337 * 1338 * This is not a real function. The following conditions must be met: 1339 * 1340 * - must be called with call0. 1341 * - uses a3, a4 and SAR. 1342 * - the last 'valid' register of each frame are clobbered. 1343 * - the caller must have registered a fixup handler 1344 * (or be inside a critical section) 1345 * - PS_EXCM must be set (PS_WOE cleared?) 1346 */ 1347 1348ENTRY(_spill_registers) 1349 1350 /* 1351 * Rotate ws so that the current windowbase is at bit 0. 1352 * Assume ws = xxxwww1yy (www1 current window frame). 1353 * Rotate ws right so that a4 = yyxxxwww1. 1354 */ 1355 1356 rsr a4, windowbase 1357 rsr a3, windowstart # a3 = xxxwww1yy 1358 ssr a4 # holds WB 1359 slli a4, a3, WSBITS 1360 or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy 1361 srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 1362 1363 /* We are done if there are no more than the current register frame. */ 1364 1365 extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww 1366 movi a4, (1 << (WSBITS-1)) 1367 _beqz a3, .Lnospill # only one active frame? jump 1368 1369 /* We want 1 at the top, so that we return to the current windowbase */ 1370 1371 or a3, a3, a4 # 1yyxxxwww 1372 1373 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1374 1375 wsr a3, windowstart # save shifted windowstart 1376 neg a4, a3 1377 and a3, a4, a3 # first bit set from right: 000010000 1378 1379 ffs_ws a4, a3 # a4: shifts to skip empty frames 1380 movi a3, WSBITS 1381 sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right 1382 ssr a4 # save in SAR for later. 1383 1384 rsr a3, windowbase 1385 add a3, a3, a4 1386 wsr a3, windowbase 1387 rsync 1388 1389 rsr a3, windowstart 1390 srl a3, a3 # shift windowstart 1391 1392 /* WB is now just one frame below the oldest frame in the register 1393 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1394 and WS differ by one 4-register frame. */ 1395 1396 /* Save frames. Depending what call was used (call4, call8, call12), 1397 * we have to save 4,8. or 12 registers. 1398 */ 1399 1400 _bbsi.l a3, 1, .Lc4 1401 _bbsi.l a3, 2, .Lc8 1402 1403 /* Special case: we have a call12-frame starting at a4. */ 1404 1405 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) 1406 1407 s32e a4, a1, -16 # a1 is valid with an empty spill area 1408 l32e a4, a5, -12 1409 s32e a8, a4, -48 1410 mov a8, a4 1411 l32e a4, a1, -16 1412 j .Lc12c 1413 1414.Lnospill: 1415 ret 1416 1417.Lloop: _bbsi.l a3, 1, .Lc4 1418 _bbci.l a3, 2, .Lc12 1419 1420.Lc8: s32e a4, a13, -16 1421 l32e a4, a5, -12 1422 s32e a8, a4, -32 1423 s32e a5, a13, -12 1424 s32e a6, a13, -8 1425 s32e a7, a13, -4 1426 s32e a9, a4, -28 1427 s32e a10, a4, -24 1428 s32e a11, a4, -20 1429 1430 srli a11, a3, 2 # shift windowbase by 2 1431 rotw 2 1432 _bnei a3, 1, .Lloop 1433 1434.Lexit: /* Done. Do the final rotation, set WS, and return. */ 1435 1436 rotw 1 1437 rsr a3, windowbase 1438 ssl a3 1439 movi a3, 1 1440 sll a3, a3 1441 wsr a3, windowstart 1442 ret 1443 1444.Lc4: s32e a4, a9, -16 1445 s32e a5, a9, -12 1446 s32e a6, a9, -8 1447 s32e a7, a9, -4 1448 1449 srli a7, a3, 1 1450 rotw 1 1451 _bnei a3, 1, .Lloop 1452 j .Lexit 1453 1454.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1455 1456 /* 12-register frame (call12) */ 1457 1458 l32e a2, a5, -12 1459 s32e a8, a2, -48 1460 mov a8, a2 1461 1462.Lc12c: s32e a9, a8, -44 1463 s32e a10, a8, -40 1464 s32e a11, a8, -36 1465 s32e a12, a8, -32 1466 s32e a13, a8, -28 1467 s32e a14, a8, -24 1468 s32e a15, a8, -20 1469 srli a15, a3, 3 1470 1471 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1472 * window, grab the stackpointer, and rotate back. 1473 * Alternatively, we could also use the following approach, but that 1474 * makes the fixup routine much more complicated: 1475 * rotw 1 1476 * s32e a0, a13, -16 1477 * ... 1478 * rotw 2 1479 */ 1480 1481 rotw 1 1482 mov a5, a13 1483 rotw -1 1484 1485 s32e a4, a9, -16 1486 s32e a5, a9, -12 1487 s32e a6, a9, -8 1488 s32e a7, a9, -4 1489 1490 rotw 3 1491 1492 _beqi a3, 1, .Lexit 1493 j .Lloop 1494 1495.Linvalid_mask: 1496 1497 /* We get here because of an unrecoverable error in the window 1498 * registers. If we are in user space, we kill the application, 1499 * however, this condition is unrecoverable in kernel space. 1500 */ 1501 1502 rsr a0, ps 1503 _bbci.l a0, PS_UM_BIT, 1f 1504 1505 /* User space: Setup a dummy frame and kill application. 1506 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1507 */ 1508 1509 movi a0, 1 1510 movi a1, 0 1511 1512 wsr a0, windowstart 1513 wsr a1, windowbase 1514 rsync 1515 1516 movi a0, 0 1517 1518 movi a3, exc_table 1519 l32i a1, a3, EXC_TABLE_KSTK 1520 wsr a3, excsave1 1521 1522 movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL 1523 wsr a4, ps 1524 rsync 1525 1526 movi a6, SIGSEGV 1527 movi a4, do_exit 1528 callx4 a4 1529 15301: /* Kernel space: PANIC! */ 1531 1532 wsr a0, excsave1 1533 movi a0, unrecoverable_exception 1534 callx0 a0 # should not return 15351: j 1b 1536 1537ENDPROC(_spill_registers) 1538 1539#ifdef CONFIG_MMU 1540/* 1541 * We should never get here. Bail out! 1542 */ 1543 1544ENTRY(fast_second_level_miss_double_kernel) 1545 15461: movi a0, unrecoverable_exception 1547 callx0 a0 # should not return 15481: j 1b 1549 1550ENDPROC(fast_second_level_miss_double_kernel) 1551 1552/* First-level entry handler for user, kernel, and double 2nd-level 1553 * TLB miss exceptions. Note that for now, user and kernel miss 1554 * exceptions share the same entry point and are handled identically. 1555 * 1556 * An old, less-efficient C version of this function used to exist. 1557 * We include it below, interleaved as comments, for reference. 1558 * 1559 * Entry condition: 1560 * 1561 * a0: trashed, original value saved on stack (PT_AREG0) 1562 * a1: a1 1563 * a2: new stack pointer, original in DEPC 1564 * a3: dispatch table 1565 * depc: a2, original value saved on stack (PT_DEPC) 1566 * excsave_1: a3 1567 * 1568 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1569 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1570 */ 1571 1572ENTRY(fast_second_level_miss) 1573 1574 /* Save a1. Note: we don't expect a double exception. */ 1575 1576 s32i a1, a2, PT_AREG1 1577 1578 /* We need to map the page of PTEs for the user task. Find 1579 * the pointer to that page. Also, it's possible for tsk->mm 1580 * to be NULL while tsk->active_mm is nonzero if we faulted on 1581 * a vmalloc address. In that rare case, we must use 1582 * active_mm instead to avoid a fault in this handler. See 1583 * 1584 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1585 * (or search Internet on "mm vs. active_mm") 1586 * 1587 * if (!mm) 1588 * mm = tsk->active_mm; 1589 * pgd = pgd_offset (mm, regs->excvaddr); 1590 * pmd = pmd_offset (pgd, regs->excvaddr); 1591 * pmdval = *pmd; 1592 */ 1593 1594 GET_CURRENT(a1,a2) 1595 l32i a0, a1, TASK_MM # tsk->mm 1596 beqz a0, 9f 1597 1598 1599 /* We deliberately destroy a3 that holds the exception table. */ 1600 16018: rsr a3, excvaddr # fault address 1602 _PGD_OFFSET(a0, a3, a1) 1603 l32i a0, a0, 0 # read pmdval 1604 beqz a0, 2f 1605 1606 /* Read ptevaddr and convert to top of page-table page. 1607 * 1608 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1609 * vpnval += DTLB_WAY_PGTABLE; 1610 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1611 * write_dtlb_entry (pteval, vpnval); 1612 * 1613 * The messy computation for 'pteval' above really simplifies 1614 * into the following: 1615 * 1616 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_DIRECTORY 1617 */ 1618 1619 movi a1, (-PAGE_OFFSET) & 0xffffffff 1620 add a0, a0, a1 # pmdval - PAGE_OFFSET 1621 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1622 xor a0, a0, a1 1623 1624 movi a1, _PAGE_DIRECTORY 1625 or a0, a0, a1 # ... | PAGE_DIRECTORY 1626 1627 /* 1628 * We utilize all three wired-ways (7-9) to hold pmd translations. 1629 * Memory regions are mapped to the DTLBs according to bits 28 and 29. 1630 * This allows to map the three most common regions to three different 1631 * DTLBs: 1632 * 0,1 -> way 7 program (0040.0000) and virtual (c000.0000) 1633 * 2 -> way 8 shared libaries (2000.0000) 1634 * 3 -> way 0 stack (3000.0000) 1635 */ 1636 1637 extui a3, a3, 28, 2 # addr. bit 28 and 29 0,1,2,3 1638 rsr a1, ptevaddr 1639 addx2 a3, a3, a3 # -> 0,3,6,9 1640 srli a1, a1, PAGE_SHIFT 1641 extui a3, a3, 2, 2 # -> 0,0,1,2 1642 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1643 addi a3, a3, DTLB_WAY_PGD 1644 add a1, a1, a3 # ... + way_number 1645 16463: wdtlb a0, a1 1647 dsync 1648 1649 /* Exit critical section. */ 1650 16514: movi a3, exc_table # restore a3 1652 movi a0, 0 1653 s32i a0, a3, EXC_TABLE_FIXUP 1654 1655 /* Restore the working registers, and return. */ 1656 1657 l32i a0, a2, PT_AREG0 1658 l32i a1, a2, PT_AREG1 1659 l32i a2, a2, PT_DEPC 1660 xsr a3, excsave1 1661 1662 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1663 1664 /* Restore excsave1 and return. */ 1665 1666 rsr a2, depc 1667 rfe 1668 1669 /* Return from double exception. */ 1670 16711: xsr a2, depc 1672 esync 1673 rfde 1674 16759: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1676 j 8b 1677 1678#if (DCACHE_WAY_SIZE > PAGE_SIZE) 1679 16802: /* Special case for cache aliasing. 1681 * We (should) only get here if a clear_user_page, copy_user_page 1682 * or the aliased cache flush functions got preemptively interrupted 1683 * by another task. Re-establish temporary mapping to the 1684 * TLBTEMP_BASE areas. 1685 */ 1686 1687 /* We shouldn't be in a double exception */ 1688 1689 l32i a0, a2, PT_DEPC 1690 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 2f 1691 1692 /* Make sure the exception originated in the special functions */ 1693 1694 movi a0, __tlbtemp_mapping_start 1695 rsr a3, epc1 1696 bltu a3, a0, 2f 1697 movi a0, __tlbtemp_mapping_end 1698 bgeu a3, a0, 2f 1699 1700 /* Check if excvaddr was in one of the TLBTEMP_BASE areas. */ 1701 1702 movi a3, TLBTEMP_BASE_1 1703 rsr a0, excvaddr 1704 bltu a0, a3, 2f 1705 1706 addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) 1707 bgeu a1, a3, 2f 1708 1709 /* Check if we have to restore an ITLB mapping. */ 1710 1711 movi a1, __tlbtemp_mapping_itlb 1712 rsr a3, epc1 1713 sub a3, a3, a1 1714 1715 /* Calculate VPN */ 1716 1717 movi a1, PAGE_MASK 1718 and a1, a1, a0 1719 1720 /* Jump for ITLB entry */ 1721 1722 bgez a3, 1f 1723 1724 /* We can use up to two TLBTEMP areas, one for src and one for dst. */ 1725 1726 extui a3, a0, PAGE_SHIFT + DCACHE_ALIAS_ORDER, 1 1727 add a1, a3, a1 1728 1729 /* PPN is in a6 for the first TLBTEMP area and in a7 for the second. */ 1730 1731 mov a0, a6 1732 movnez a0, a7, a3 1733 j 3b 1734 1735 /* ITLB entry. We only use dst in a6. */ 1736 17371: witlb a6, a1 1738 isync 1739 j 4b 1740 1741 1742#endif // DCACHE_WAY_SIZE > PAGE_SIZE 1743 1744 17452: /* Invalid PGD, default exception handling */ 1746 1747 movi a3, exc_table 1748 rsr a1, depc 1749 xsr a3, excsave1 1750 s32i a1, a2, PT_AREG2 1751 s32i a3, a2, PT_AREG3 1752 mov a1, a2 1753 1754 rsr a2, ps 1755 bbsi.l a2, PS_UM_BIT, 1f 1756 j _kernel_exception 17571: j _user_exception 1758 1759ENDPROC(fast_second_level_miss) 1760 1761/* 1762 * StoreProhibitedException 1763 * 1764 * Update the pte and invalidate the itlb mapping for this pte. 1765 * 1766 * Entry condition: 1767 * 1768 * a0: trashed, original value saved on stack (PT_AREG0) 1769 * a1: a1 1770 * a2: new stack pointer, original in DEPC 1771 * a3: dispatch table 1772 * depc: a2, original value saved on stack (PT_DEPC) 1773 * excsave_1: a3 1774 * 1775 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1776 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1777 */ 1778 1779ENTRY(fast_store_prohibited) 1780 1781 /* Save a1 and a4. */ 1782 1783 s32i a1, a2, PT_AREG1 1784 s32i a4, a2, PT_AREG4 1785 1786 GET_CURRENT(a1,a2) 1787 l32i a0, a1, TASK_MM # tsk->mm 1788 beqz a0, 9f 1789 17908: rsr a1, excvaddr # fault address 1791 _PGD_OFFSET(a0, a1, a4) 1792 l32i a0, a0, 0 1793 beqz a0, 2f 1794 1795 /* Note that we assume _PAGE_WRITABLE_BIT is only set if pte is valid.*/ 1796 1797 _PTE_OFFSET(a0, a1, a4) 1798 l32i a4, a0, 0 # read pteval 1799 bbci.l a4, _PAGE_WRITABLE_BIT, 2f 1800 1801 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE 1802 or a4, a4, a1 1803 rsr a1, excvaddr 1804 s32i a4, a0, 0 1805 1806 /* We need to flush the cache if we have page coloring. */ 1807#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1808 dhwb a0, 0 1809#endif 1810 pdtlb a0, a1 1811 wdtlb a4, a0 1812 1813 /* Exit critical section. */ 1814 1815 movi a0, 0 1816 s32i a0, a3, EXC_TABLE_FIXUP 1817 1818 /* Restore the working registers, and return. */ 1819 1820 l32i a4, a2, PT_AREG4 1821 l32i a1, a2, PT_AREG1 1822 l32i a0, a2, PT_AREG0 1823 l32i a2, a2, PT_DEPC 1824 1825 /* Restore excsave1 and a3. */ 1826 1827 xsr a3, excsave1 1828 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1829 1830 rsr a2, depc 1831 rfe 1832 1833 /* Double exception. Restore FIXUP handler and return. */ 1834 18351: xsr a2, depc 1836 esync 1837 rfde 1838 18399: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1840 j 8b 1841 18422: /* If there was a problem, handle fault in C */ 1843 1844 rsr a4, depc # still holds a2 1845 xsr a3, excsave1 1846 s32i a4, a2, PT_AREG2 1847 s32i a3, a2, PT_AREG3 1848 l32i a4, a2, PT_AREG4 1849 mov a1, a2 1850 1851 rsr a2, ps 1852 bbsi.l a2, PS_UM_BIT, 1f 1853 j _kernel_exception 18541: j _user_exception 1855 1856ENDPROC(fast_store_prohibited) 1857 1858#endif /* CONFIG_MMU */ 1859 1860/* 1861 * System Calls. 1862 * 1863 * void system_call (struct pt_regs* regs, int exccause) 1864 * a2 a3 1865 */ 1866 1867ENTRY(system_call) 1868 1869 entry a1, 32 1870 1871 /* regs->syscall = regs->areg[2] */ 1872 1873 l32i a3, a2, PT_AREG2 1874 mov a6, a2 1875 movi a4, do_syscall_trace_enter 1876 s32i a3, a2, PT_SYSCALL 1877 callx4 a4 1878 1879 /* syscall = sys_call_table[syscall_nr] */ 1880 1881 movi a4, sys_call_table; 1882 movi a5, __NR_syscall_count 1883 movi a6, -ENOSYS 1884 bgeu a3, a5, 1f 1885 1886 addx4 a4, a3, a4 1887 l32i a4, a4, 0 1888 movi a5, sys_ni_syscall; 1889 beq a4, a5, 1f 1890 1891 /* Load args: arg0 - arg5 are passed via regs. */ 1892 1893 l32i a6, a2, PT_AREG6 1894 l32i a7, a2, PT_AREG3 1895 l32i a8, a2, PT_AREG4 1896 l32i a9, a2, PT_AREG5 1897 l32i a10, a2, PT_AREG8 1898 l32i a11, a2, PT_AREG9 1899 1900 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1901 s32i a2, a1, 0 1902 1903 callx4 a4 1904 19051: /* regs->areg[2] = return_value */ 1906 1907 s32i a6, a2, PT_AREG2 1908 movi a4, do_syscall_trace_leave 1909 mov a6, a2 1910 callx4 a4 1911 retw 1912 1913ENDPROC(system_call) 1914 1915 1916/* 1917 * Task switch. 1918 * 1919 * struct task* _switch_to (struct task* prev, struct task* next) 1920 * a2 a2 a3 1921 */ 1922 1923ENTRY(_switch_to) 1924 1925 entry a1, 16 1926 1927 mov a12, a2 # preserve 'prev' (a2) 1928 mov a13, a3 # and 'next' (a3) 1929 1930 l32i a4, a2, TASK_THREAD_INFO 1931 l32i a5, a3, TASK_THREAD_INFO 1932 1933 save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1934 1935 s32i a0, a12, THREAD_RA # save return address 1936 s32i a1, a12, THREAD_SP # save stack pointer 1937 1938 /* Disable ints while we manipulate the stack pointer. */ 1939 1940 movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL 1941 xsr a14, ps 1942 rsr a3, excsave1 1943 rsync 1944 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 1945 1946 /* Switch CPENABLE */ 1947 1948#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS) 1949 l32i a3, a5, THREAD_CPENABLE 1950 xsr a3, cpenable 1951 s32i a3, a4, THREAD_CPENABLE 1952#endif 1953 1954 /* Flush register file. */ 1955 1956 call0 _spill_registers # destroys a3, a4, and SAR 1957 1958 /* Set kernel stack (and leave critical section) 1959 * Note: It's save to set it here. The stack will not be overwritten 1960 * because the kernel stack will only be loaded again after 1961 * we return from kernel space. 1962 */ 1963 1964 rsr a3, excsave1 # exc_table 1965 movi a6, 0 1966 addi a7, a5, PT_REGS_OFFSET 1967 s32i a6, a3, EXC_TABLE_FIXUP 1968 s32i a7, a3, EXC_TABLE_KSTK 1969 1970 /* restore context of the task 'next' */ 1971 1972 l32i a0, a13, THREAD_RA # restore return address 1973 l32i a1, a13, THREAD_SP # restore stack pointer 1974 1975 load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER 1976 1977 wsr a14, ps 1978 mov a2, a12 # return 'prev' 1979 rsync 1980 1981 retw 1982 1983ENDPROC(_switch_to) 1984 1985ENTRY(ret_from_fork) 1986 1987 /* void schedule_tail (struct task_struct *prev) 1988 * Note: prev is still in a6 (return value from fake call4 frame) 1989 */ 1990 movi a4, schedule_tail 1991 callx4 a4 1992 1993 movi a4, do_syscall_trace_leave 1994 mov a6, a1 1995 callx4 a4 1996 1997 j common_exception_return 1998 1999ENDPROC(ret_from_fork) 2000 2001/* 2002 * Kernel thread creation helper 2003 * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg 2004 * left from _switch_to: a6 = prev 2005 */ 2006ENTRY(ret_from_kernel_thread) 2007 2008 call4 schedule_tail 2009 mov a6, a3 2010 callx4 a2 2011 j common_exception_return 2012 2013ENDPROC(ret_from_kernel_thread) 2014