1/* 2 * arch/xtensa/kernel/entry.S 3 * 4 * Low-level exception handling 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2004-2005 by Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * 14 */ 15 16#include <linux/linkage.h> 17#include <asm/asm-offsets.h> 18#include <asm/processor.h> 19#include <asm/thread_info.h> 20#include <asm/uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28 29/* Unimplemented features. */ 30 31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 32#undef KERNEL_STACK_OVERFLOW_CHECK 33#undef PREEMPTIBLE_KERNEL 34#undef ALLOCA_EXCEPTION_IN_IRAM 35 36/* Not well tested. 37 * 38 * - fast_coprocessor 39 */ 40 41/* 42 * Macro to find first bit set in WINDOWBASE from the left + 1 43 * 44 * 100....0 -> 1 45 * 010....0 -> 2 46 * 000....1 -> WSBITS 47 */ 48 49 .macro ffs_ws bit mask 50 51#if XCHAL_HAVE_NSA 52 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 53 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 54#else 55 movi \bit, WSBITS 56#if WSBITS > 16 57 _bltui \mask, 0x10000, 99f 58 addi \bit, \bit, -16 59 extui \mask, \mask, 16, 16 60#endif 61#if WSBITS > 8 6299: _bltui \mask, 0x100, 99f 63 addi \bit, \bit, -8 64 srli \mask, \mask, 8 65#endif 6699: _bltui \mask, 0x10, 99f 67 addi \bit, \bit, -4 68 srli \mask, \mask, 4 6999: _bltui \mask, 0x4, 99f 70 addi \bit, \bit, -2 71 srli \mask, \mask, 2 7299: _bltui \mask, 0x2, 99f 73 addi \bit, \bit, -1 7499: 75 76#endif 77 .endm 78 79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 80 81/* 82 * First-level exception handler for user exceptions. 83 * Save some special registers, extra states and all registers in the AR 84 * register file that were in use in the user task, and jump to the common 85 * exception code. 86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 87 * save them for kernel exceptions). 88 * 89 * Entry condition for user_exception: 90 * 91 * a0: trashed, original value saved on stack (PT_AREG0) 92 * a1: a1 93 * a2: new stack pointer, original value in depc 94 * a3: dispatch table 95 * depc: a2, original value saved on stack (PT_DEPC) 96 * excsave1: a3 97 * 98 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 99 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 100 * 101 * Entry condition for _user_exception: 102 * 103 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 104 * excsave has been restored, and 105 * stack pointer (a1) has been set. 106 * 107 * Note: _user_exception might be at an odd adress. Don't use call0..call12 108 */ 109 110ENTRY(user_exception) 111 112 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 113 114 xsr a3, EXCSAVE_1 115 rsr a0, DEPC 116 s32i a1, a2, PT_AREG1 117 s32i a0, a2, PT_AREG2 118 s32i a3, a2, PT_AREG3 119 mov a1, a2 120 121 .globl _user_exception 122_user_exception: 123 124 /* Save SAR and turn off single stepping */ 125 126 movi a2, 0 127 rsr a3, SAR 128 xsr a2, ICOUNTLEVEL 129 s32i a3, a1, PT_SAR 130 s32i a2, a1, PT_ICOUNTLEVEL 131 132 /* Rotate ws so that the current windowbase is at bit0. */ 133 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 134 135 rsr a2, WINDOWBASE 136 rsr a3, WINDOWSTART 137 ssr a2 138 s32i a2, a1, PT_WINDOWBASE 139 s32i a3, a1, PT_WINDOWSTART 140 slli a2, a3, 32-WSBITS 141 src a2, a3, a2 142 srli a2, a2, 32-WSBITS 143 s32i a2, a1, PT_WMASK # needed for restoring registers 144 145 /* Save only live registers. */ 146 147 _bbsi.l a2, 1, 1f 148 s32i a4, a1, PT_AREG4 149 s32i a5, a1, PT_AREG5 150 s32i a6, a1, PT_AREG6 151 s32i a7, a1, PT_AREG7 152 _bbsi.l a2, 2, 1f 153 s32i a8, a1, PT_AREG8 154 s32i a9, a1, PT_AREG9 155 s32i a10, a1, PT_AREG10 156 s32i a11, a1, PT_AREG11 157 _bbsi.l a2, 3, 1f 158 s32i a12, a1, PT_AREG12 159 s32i a13, a1, PT_AREG13 160 s32i a14, a1, PT_AREG14 161 s32i a15, a1, PT_AREG15 162 _bnei a2, 1, 1f # only one valid frame? 163 164 /* Only one valid frame, skip saving regs. */ 165 166 j 2f 167 168 /* Save the remaining registers. 169 * We have to save all registers up to the first '1' from 170 * the right, except the current frame (bit 0). 171 * Assume a2 is: 001001000110001 172 * All regiser frames starting from the top fiel to the marked '1' 173 * must be saved. 174 */ 175 1761: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 177 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 178 and a3, a3, a2 # max. only one bit is set 179 180 /* Find number of frames to save */ 181 182 ffs_ws a0, a3 # number of frames to the '1' from left 183 184 /* Store information into WMASK: 185 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 186 * bits 4...: number of valid 4-register frames 187 */ 188 189 slli a3, a0, 4 # number of frames to save in bits 8..4 190 extui a2, a2, 0, 4 # mask for the first 16 registers 191 or a2, a3, a2 192 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 193 194 /* Save 4 registers at a time */ 195 1961: rotw -1 197 s32i a0, a5, PT_AREG_END - 16 198 s32i a1, a5, PT_AREG_END - 12 199 s32i a2, a5, PT_AREG_END - 8 200 s32i a3, a5, PT_AREG_END - 4 201 addi a0, a4, -1 202 addi a1, a5, -16 203 _bnez a0, 1b 204 205 /* WINDOWBASE still in SAR! */ 206 207 rsr a2, SAR # original WINDOWBASE 208 movi a3, 1 209 ssl a2 210 sll a3, a3 211 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit 212 wsr a2, WINDOWBASE # and WINDOWSTART 213 rsync 214 215 /* We are back to the original stack pointer (a1) */ 216 2172: 218#if XCHAL_EXTRA_SA_SIZE 219 220 /* For user exceptions, save the extra state into the user's TCB. 221 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15 222 */ 223 224 GET_CURRENT(a2,a1) 225 addi a2, a2, THREAD_CP_SAVE 226 xchal_extra_store_funcbody 227#endif 228 229 /* Now, jump to the common exception handler. */ 230 231 j common_exception 232 233 234/* 235 * First-level exit handler for kernel exceptions 236 * Save special registers and the live window frame. 237 * Note: Even though we changes the stack pointer, we don't have to do a 238 * MOVSP here, as we do that when we return from the exception. 239 * (See comment in the kernel exception exit code) 240 * 241 * Entry condition for kernel_exception: 242 * 243 * a0: trashed, original value saved on stack (PT_AREG0) 244 * a1: a1 245 * a2: new stack pointer, original in DEPC 246 * a3: dispatch table 247 * depc: a2, original value saved on stack (PT_DEPC) 248 * excsave_1: a3 249 * 250 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 251 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 252 * 253 * Entry condition for _kernel_exception: 254 * 255 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 256 * excsave has been restored, and 257 * stack pointer (a1) has been set. 258 * 259 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12 260 */ 261 262ENTRY(kernel_exception) 263 264 /* Save a0, a2, a3, DEPC and set SP. */ 265 266 xsr a3, EXCSAVE_1 # restore a3, excsave_1 267 rsr a0, DEPC # get a2 268 s32i a1, a2, PT_AREG1 269 s32i a0, a2, PT_AREG2 270 s32i a3, a2, PT_AREG3 271 mov a1, a2 272 273 .globl _kernel_exception 274_kernel_exception: 275 276 /* Save SAR and turn off single stepping */ 277 278 movi a2, 0 279 rsr a3, SAR 280 xsr a2, ICOUNTLEVEL 281 s32i a3, a1, PT_SAR 282 s32i a2, a1, PT_ICOUNTLEVEL 283 284 /* Rotate ws so that the current windowbase is at bit0. */ 285 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 286 287 rsr a2, WINDOWBASE # don't need to save these, we only 288 rsr a3, WINDOWSTART # need shifted windowstart: windowmask 289 ssr a2 290 slli a2, a3, 32-WSBITS 291 src a2, a3, a2 292 srli a2, a2, 32-WSBITS 293 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 294 295 /* Save only the live window-frame */ 296 297 _bbsi.l a2, 1, 1f 298 s32i a4, a1, PT_AREG4 299 s32i a5, a1, PT_AREG5 300 s32i a6, a1, PT_AREG6 301 s32i a7, a1, PT_AREG7 302 _bbsi.l a2, 2, 1f 303 s32i a8, a1, PT_AREG8 304 s32i a9, a1, PT_AREG9 305 s32i a10, a1, PT_AREG10 306 s32i a11, a1, PT_AREG11 307 _bbsi.l a2, 3, 1f 308 s32i a12, a1, PT_AREG12 309 s32i a13, a1, PT_AREG13 310 s32i a14, a1, PT_AREG14 311 s32i a15, a1, PT_AREG15 312 3131: 314 315#ifdef KERNEL_STACK_OVERFLOW_CHECK 316 317 /* Stack overflow check, for debugging */ 318 extui a2, a1, TASK_SIZE_BITS,XX 319 movi a3, SIZE?? 320 _bge a2, a3, out_of_stack_panic 321 322#endif 323 324/* 325 * This is the common exception handler. 326 * We get here from the user exception handler or simply by falling through 327 * from the kernel exception handler. 328 * Save the remaining special registers, switch to kernel mode, and jump 329 * to the second-level exception handler. 330 * 331 */ 332 333common_exception: 334 335 /* Save some registers, disable loops and clear the syscall flag. */ 336 337 rsr a2, DEBUGCAUSE 338 rsr a3, EPC_1 339 s32i a2, a1, PT_DEBUGCAUSE 340 s32i a3, a1, PT_PC 341 342 movi a2, -1 343 rsr a3, EXCVADDR 344 s32i a2, a1, PT_SYSCALL 345 movi a2, 0 346 s32i a3, a1, PT_EXCVADDR 347 xsr a2, LCOUNT 348 s32i a2, a1, PT_LCOUNT 349 350 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 351 352 rsr a0, EXCCAUSE 353 movi a3, 0 354 rsr a2, EXCSAVE_1 355 s32i a0, a1, PT_EXCCAUSE 356 s32i a3, a2, EXC_TABLE_FIXUP 357 358 /* All unrecoverable states are saved on stack, now, and a1 is valid, 359 * so we can allow exceptions and interrupts (*) again. 360 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 361 * 362 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before 363 * (interrupts disabled) and if this exception is not an interrupt. 364 */ 365 366 rsr a3, PS 367 addi a0, a0, -4 368 movi a2, 1 369 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 370 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 371 movi a2, 1 << PS_WOE_BIT 372 or a3, a3, a2 373 rsr a0, EXCCAUSE 374 xsr a3, PS 375 376 s32i a3, a1, PT_PS # save ps 377 378 /* Save LBEG, LEND */ 379 380 rsr a2, LBEG 381 rsr a3, LEND 382 s32i a2, a1, PT_LBEG 383 s32i a3, a1, PT_LEND 384 385 /* Go to second-level dispatcher. Set up parameters to pass to the 386 * exception handler and call the exception handler. 387 */ 388 389 movi a4, exc_table 390 mov a6, a1 # pass stack frame 391 mov a7, a0 # pass EXCCAUSE 392 addx4 a4, a0, a4 393 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 394 395 /* Call the second-level handler */ 396 397 callx4 a4 398 399 /* Jump here for exception exit */ 400 401common_exception_return: 402 403 /* Jump if we are returning from kernel exceptions. */ 404 4051: l32i a3, a1, PT_PS 406 _bbsi.l a3, PS_UM_BIT, 2f 407 j kernel_exception_exit 408 409 /* Specific to a user exception exit: 410 * We need to check some flags for signal handling and rescheduling, 411 * and have to restore WB and WS, extra states, and all registers 412 * in the register file that were in use in the user task. 413 */ 414 4152: wsr a3, PS /* disable interrupts */ 416 417 /* Check for signals (keep interrupts disabled while we read TI_FLAGS) 418 * Note: PS.INTLEVEL = 0, PS.EXCM = 1 419 */ 420 421 GET_THREAD_INFO(a2,a1) 422 l32i a4, a2, TI_FLAGS 423 424 /* Enable interrupts again. 425 * Note: When we get here, we certainly have handled any interrupts. 426 * (Hint: There is only one user exception frame on stack) 427 */ 428 429 movi a3, 1 << PS_WOE_BIT 430 431 _bbsi.l a4, TIF_NEED_RESCHED, 3f 432 _bbci.l a4, TIF_SIGPENDING, 4f 433 434#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 435 l32i a4, a1, PT_DEPC 436 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 437#endif 438 439 /* Reenable interrupts and call do_signal() */ 440 441 wsr a3, PS 442 movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*) 443 mov a6, a1 444 movi a7, 0 445 callx4 a4 446 j 1b 447 4483: /* Reenable interrupts and reschedule */ 449 450 wsr a3, PS 451 movi a4, schedule # void schedule (void) 452 callx4 a4 453 j 1b 454 455 /* Restore the state of the task and return from the exception. */ 456 4574: /* a2 holds GET_CURRENT(a2,a1) */ 458 459#if XCHAL_EXTRA_SA_SIZE 460 461 /* For user exceptions, restore the extra state from the user's TCB. */ 462 463 /* Note: a2 still contains GET_CURRENT(a2,a1) */ 464 addi a2, a2, THREAD_CP_SAVE 465 xchal_extra_load_funcbody 466 467 /* We must assume that xchal_extra_store_funcbody destroys 468 * registers a2..a15. FIXME, this list can eventually be 469 * reduced once real register requirements of the macro are 470 * finalized. */ 471 472#endif /* XCHAL_EXTRA_SA_SIZE */ 473 474 475 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 476 477 l32i a2, a1, PT_WINDOWBASE 478 l32i a3, a1, PT_WINDOWSTART 479 wsr a1, DEPC # use DEPC as temp storage 480 wsr a3, WINDOWSTART # restore WINDOWSTART 481 ssr a2 # preserve user's WB in the SAR 482 wsr a2, WINDOWBASE # switch to user's saved WB 483 rsync 484 rsr a1, DEPC # restore stack pointer 485 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 486 rotw -1 # we restore a4..a7 487 _bltui a6, 16, 1f # only have to restore current window? 488 489 /* The working registers are a0 and a3. We are restoring to 490 * a4..a7. Be careful not to destroy what we have just restored. 491 * Note: wmask has the format YYYYM: 492 * Y: number of registers saved in groups of 4 493 * M: 4 bit mask of first 16 registers 494 */ 495 496 mov a2, a6 497 mov a3, a5 498 4992: rotw -1 # a0..a3 become a4..a7 500 addi a3, a7, -4*4 # next iteration 501 addi a2, a6, -16 # decrementing Y in WMASK 502 l32i a4, a3, PT_AREG_END + 0 503 l32i a5, a3, PT_AREG_END + 4 504 l32i a6, a3, PT_AREG_END + 8 505 l32i a7, a3, PT_AREG_END + 12 506 _bgeui a2, 16, 2b 507 508 /* Clear unrestored registers (don't leak anything to user-land */ 509 5101: rsr a0, WINDOWBASE 511 rsr a3, SAR 512 sub a3, a0, a3 513 beqz a3, 2f 514 extui a3, a3, 0, WBBITS 515 5161: rotw -1 517 addi a3, a7, -1 518 movi a4, 0 519 movi a5, 0 520 movi a6, 0 521 movi a7, 0 522 bgei a3, 1, 1b 523 524 /* We are back were we were when we started. 525 * Note: a2 still contains WMASK (if we've returned to the original 526 * frame where we had loaded a2), or at least the lower 4 bits 527 * (if we have restored WSBITS-1 frames). 528 */ 529 5302: j common_exception_exit 531 532 /* This is the kernel exception exit. 533 * We avoided to do a MOVSP when we entered the exception, but we 534 * have to do it here. 535 */ 536 537kernel_exception_exit: 538 539 /* Disable interrupts (a3 holds PT_PS) */ 540 541 wsr a3, PS 542 543#ifdef PREEMPTIBLE_KERNEL 544 545#ifdef CONFIG_PREEMPT 546 547 /* 548 * Note: We've just returned from a call4, so we have 549 * at least 4 addt'l regs. 550 */ 551 552 /* Check current_thread_info->preempt_count */ 553 554 GET_THREAD_INFO(a2) 555 l32i a3, a2, TI_PREEMPT 556 bnez a3, 1f 557 558 l32i a2, a2, TI_FLAGS 559 5601: 561 562#endif 563 564#endif 565 566 /* Check if we have to do a movsp. 567 * 568 * We only have to do a movsp if the previous window-frame has 569 * been spilled to the *temporary* exception stack instead of the 570 * task's stack. This is the case if the corresponding bit in 571 * WINDOWSTART for the previous window-frame was set before 572 * (not spilled) but is zero now (spilled). 573 * If this bit is zero, all other bits except the one for the 574 * current window frame are also zero. So, we can use a simple test: 575 * 'and' WINDOWSTART and WINDOWSTART-1: 576 * 577 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 578 * 579 * The result is zero only if one bit was set. 580 * 581 * (Note: We might have gone through several task switches before 582 * we come back to the current task, so WINDOWBASE might be 583 * different from the time the exception occurred.) 584 */ 585 586 /* Test WINDOWSTART before and after the exception. 587 * We actually have WMASK, so we only have to test if it is 1 or not. 588 */ 589 590 l32i a2, a1, PT_WMASK 591 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 592 593 /* Test WINDOWSTART now. If spilled, do the movsp */ 594 595 rsr a3, WINDOWSTART 596 addi a0, a3, -1 597 and a3, a3, a0 598 _bnez a3, common_exception_exit 599 600 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 601 602 addi a0, a1, -16 603 l32i a3, a0, 0 604 l32i a4, a0, 4 605 s32i a3, a1, PT_SIZE+0 606 s32i a4, a1, PT_SIZE+4 607 l32i a3, a0, 8 608 l32i a4, a0, 12 609 s32i a3, a1, PT_SIZE+8 610 s32i a4, a1, PT_SIZE+12 611 612 /* Common exception exit. 613 * We restore the special register and the current window frame, and 614 * return from the exception. 615 * 616 * Note: We expect a2 to hold PT_WMASK 617 */ 618 619common_exception_exit: 620 621 _bbsi.l a2, 1, 1f 622 l32i a4, a1, PT_AREG4 623 l32i a5, a1, PT_AREG5 624 l32i a6, a1, PT_AREG6 625 l32i a7, a1, PT_AREG7 626 _bbsi.l a2, 2, 1f 627 l32i a8, a1, PT_AREG8 628 l32i a9, a1, PT_AREG9 629 l32i a10, a1, PT_AREG10 630 l32i a11, a1, PT_AREG11 631 _bbsi.l a2, 3, 1f 632 l32i a12, a1, PT_AREG12 633 l32i a13, a1, PT_AREG13 634 l32i a14, a1, PT_AREG14 635 l32i a15, a1, PT_AREG15 636 637 /* Restore PC, SAR */ 638 6391: l32i a2, a1, PT_PC 640 l32i a3, a1, PT_SAR 641 wsr a2, EPC_1 642 wsr a3, SAR 643 644 /* Restore LBEG, LEND, LCOUNT */ 645 646 l32i a2, a1, PT_LBEG 647 l32i a3, a1, PT_LEND 648 wsr a2, LBEG 649 l32i a2, a1, PT_LCOUNT 650 wsr a3, LEND 651 wsr a2, LCOUNT 652 653 /* We control single stepping through the ICOUNTLEVEL register. */ 654 655 l32i a2, a1, PT_ICOUNTLEVEL 656 movi a3, -2 657 wsr a2, ICOUNTLEVEL 658 wsr a3, ICOUNT 659 660 /* Check if it was double exception. */ 661 662 l32i a0, a1, PT_DEPC 663 l32i a3, a1, PT_AREG3 664 l32i a2, a1, PT_AREG2 665 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 666 667 /* Restore a0...a3 and return */ 668 669 l32i a0, a1, PT_AREG0 670 l32i a1, a1, PT_AREG1 671 rfe 672 6731: wsr a0, DEPC 674 l32i a0, a1, PT_AREG0 675 l32i a1, a1, PT_AREG1 676 rfde 677 678/* 679 * Debug exception handler. 680 * 681 * Currently, we don't support KGDB, so only user application can be debugged. 682 * 683 * When we get here, a0 is trashed and saved to excsave[debuglevel] 684 */ 685 686ENTRY(debug_exception) 687 688 rsr a0, EPS + XCHAL_DEBUGLEVEL 689 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 690 691 /* Set EPC_1 and EXCCAUSE */ 692 693 wsr a2, DEPC # save a2 temporarily 694 rsr a2, EPC + XCHAL_DEBUGLEVEL 695 wsr a2, EPC_1 696 697 movi a2, EXCCAUSE_MAPPED_DEBUG 698 wsr a2, EXCCAUSE 699 700 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 701 702 movi a2, 1 << PS_EXCM_BIT 703 or a2, a0, a2 704 movi a0, debug_exception # restore a3, debug jump vector 705 wsr a2, PS 706 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 707 708 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 709 710 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 711 712 addi a2, a1, -16-PT_SIZE # assume kernel stack 713 s32i a0, a2, PT_AREG0 714 movi a0, 0 715 s32i a1, a2, PT_AREG1 716 s32i a0, a2, PT_DEPC # mark it as a regular exception 717 xsr a0, DEPC 718 s32i a3, a2, PT_AREG3 719 s32i a0, a2, PT_AREG2 720 mov a1, a2 721 j _kernel_exception 722 7232: rsr a2, EXCSAVE_1 724 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 725 s32i a0, a2, PT_AREG0 726 movi a0, 0 727 s32i a1, a2, PT_AREG1 728 s32i a0, a2, PT_DEPC 729 xsr a0, DEPC 730 s32i a3, a2, PT_AREG3 731 s32i a0, a2, PT_AREG2 732 mov a1, a2 733 j _user_exception 734 735 /* Debug exception while in exception mode. */ 7361: j 1b // FIXME!! 737 738 739/* 740 * We get here in case of an unrecoverable exception. 741 * The only thing we can do is to be nice and print a panic message. 742 * We only produce a single stack frame for panic, so ??? 743 * 744 * 745 * Entry conditions: 746 * 747 * - a0 contains the caller address; original value saved in excsave1. 748 * - the original a0 contains a valid return address (backtrace) or 0. 749 * - a2 contains a valid stackpointer 750 * 751 * Notes: 752 * 753 * - If the stack pointer could be invalid, the caller has to setup a 754 * dummy stack pointer (e.g. the stack of the init_task) 755 * 756 * - If the return address could be invalid, the caller has to set it 757 * to 0, so the backtrace would stop. 758 * 759 */ 760 .align 4 761unrecoverable_text: 762 .ascii "Unrecoverable error in exception handler\0" 763 764ENTRY(unrecoverable_exception) 765 766 movi a0, 1 767 movi a1, 0 768 769 wsr a0, WINDOWSTART 770 wsr a1, WINDOWBASE 771 rsync 772 773 movi a1, (1 << PS_WOE_BIT) | 1 774 wsr a1, PS 775 rsync 776 777 movi a1, init_task 778 movi a0, 0 779 addi a1, a1, PT_REGS_OFFSET 780 781 movi a4, panic 782 movi a6, unrecoverable_text 783 784 callx4 a4 785 7861: j 1b 787 788 789/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 790 791/* 792 * Fast-handler for alloca exceptions 793 * 794 * The ALLOCA handler is entered when user code executes the MOVSP 795 * instruction and the caller's frame is not in the register file. 796 * In this case, the caller frame's a0..a3 are on the stack just 797 * below sp (a1), and this handler moves them. 798 * 799 * For "MOVSP <ar>,<as>" without destination register a1, this routine 800 * simply moves the value from <as> to <ar> without moving the save area. 801 * 802 * Entry condition: 803 * 804 * a0: trashed, original value saved on stack (PT_AREG0) 805 * a1: a1 806 * a2: new stack pointer, original in DEPC 807 * a3: dispatch table 808 * depc: a2, original value saved on stack (PT_DEPC) 809 * excsave_1: a3 810 * 811 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 812 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 813 */ 814 815#if XCHAL_HAVE_BE 816#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 817#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 818#else 819#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 820#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 821#endif 822 823ENTRY(fast_alloca) 824 825 /* We shouldn't be in a double exception. */ 826 827 l32i a0, a2, PT_DEPC 828 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 829 830 rsr a0, DEPC # get a2 831 s32i a4, a2, PT_AREG4 # save a4 and 832 s32i a0, a2, PT_AREG2 # a2 to stack 833 834 /* Exit critical section. */ 835 836 movi a0, 0 837 s32i a0, a3, EXC_TABLE_FIXUP 838 839 /* Restore a3, excsave_1 */ 840 841 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl. 842 rsr a4, EPC_1 # get exception address 843 s32i a3, a2, PT_AREG3 # save a3 to stack 844 845#ifdef ALLOCA_EXCEPTION_IN_IRAM 846#error iram not supported 847#else 848 /* Note: l8ui not allowed in IRAM/IROM!! */ 849 l8ui a0, a4, 1 # read as(src) from MOVSP instruction 850#endif 851 movi a3, .Lmovsp_src 852 _EXTUI_MOVSP_SRC(a0) # extract source register number 853 addx8 a3, a0, a3 854 jx a3 855 856.Lunhandled_double: 857 wsr a0, EXCSAVE_1 858 movi a0, unrecoverable_exception 859 callx0 a0 860 861 .align 8 862.Lmovsp_src: 863 l32i a3, a2, PT_AREG0; _j 1f; .align 8 864 mov a3, a1; _j 1f; .align 8 865 l32i a3, a2, PT_AREG2; _j 1f; .align 8 866 l32i a3, a2, PT_AREG3; _j 1f; .align 8 867 l32i a3, a2, PT_AREG4; _j 1f; .align 8 868 mov a3, a5; _j 1f; .align 8 869 mov a3, a6; _j 1f; .align 8 870 mov a3, a7; _j 1f; .align 8 871 mov a3, a8; _j 1f; .align 8 872 mov a3, a9; _j 1f; .align 8 873 mov a3, a10; _j 1f; .align 8 874 mov a3, a11; _j 1f; .align 8 875 mov a3, a12; _j 1f; .align 8 876 mov a3, a13; _j 1f; .align 8 877 mov a3, a14; _j 1f; .align 8 878 mov a3, a15; _j 1f; .align 8 879 8801: 881 882#ifdef ALLOCA_EXCEPTION_IN_IRAM 883#error iram not supported 884#else 885 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction 886#endif 887 addi a4, a4, 3 # step over movsp 888 _EXTUI_MOVSP_DST(a0) # extract destination register 889 wsr a4, EPC_1 # save new epc_1 890 891 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 892 893 /* Move the save area. This implies the use of the L32E 894 * and S32E instructions, because this move must be done with 895 * the user's PS.RING privilege levels, not with ring 0 896 * (kernel's) privileges currently active with PS.EXCM 897 * set. Note that we have stil registered a fixup routine with the 898 * double exception vector in case a double exception occurs. 899 */ 900 901 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ 902 903 l32e a0, a1, -16 904 l32e a4, a1, -12 905 s32e a0, a3, -16 906 s32e a4, a3, -12 907 l32e a0, a1, -8 908 l32e a4, a1, -4 909 s32e a0, a3, -8 910 s32e a4, a3, -4 911 912 /* Restore stack-pointer and all the other saved registers. */ 913 914 mov a1, a3 915 916 l32i a4, a2, PT_AREG4 917 l32i a3, a2, PT_AREG3 918 l32i a0, a2, PT_AREG0 919 l32i a2, a2, PT_AREG2 920 rfe 921 922 /* MOVSP <at>,<as> was invoked with <at> != a1. 923 * Because the stack pointer is not being modified, 924 * we should be able to just modify the pointer 925 * without moving any save area. 926 * The processor only traps these occurrences if the 927 * caller window isn't live, so unfortunately we can't 928 * use this as an alternate trap mechanism. 929 * So we just do the move. This requires that we 930 * resolve the destination register, not just the source, 931 * so there's some extra work. 932 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) 933 */ 934 935 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ 936 9371: movi a4, .Lmovsp_dst 938 addx8 a4, a0, a4 939 jx a4 940 941 .align 8 942.Lmovsp_dst: 943 s32i a3, a2, PT_AREG0; _j 1f; .align 8 944 mov a1, a3; _j 1f; .align 8 945 s32i a3, a2, PT_AREG2; _j 1f; .align 8 946 s32i a3, a2, PT_AREG3; _j 1f; .align 8 947 s32i a3, a2, PT_AREG4; _j 1f; .align 8 948 mov a5, a3; _j 1f; .align 8 949 mov a6, a3; _j 1f; .align 8 950 mov a7, a3; _j 1f; .align 8 951 mov a8, a3; _j 1f; .align 8 952 mov a9, a3; _j 1f; .align 8 953 mov a10, a3; _j 1f; .align 8 954 mov a11, a3; _j 1f; .align 8 955 mov a12, a3; _j 1f; .align 8 956 mov a13, a3; _j 1f; .align 8 957 mov a14, a3; _j 1f; .align 8 958 mov a15, a3; _j 1f; .align 8 959 9601: l32i a4, a2, PT_AREG4 961 l32i a3, a2, PT_AREG3 962 l32i a0, a2, PT_AREG0 963 l32i a2, a2, PT_AREG2 964 rfe 965 966 967/* 968 * fast system calls. 969 * 970 * WARNING: The kernel doesn't save the entire user context before 971 * handling a fast system call. These functions are small and short, 972 * usually offering some functionality not available to user tasks. 973 * 974 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 975 * 976 * Entry condition: 977 * 978 * a0: trashed, original value saved on stack (PT_AREG0) 979 * a1: a1 980 * a2: new stack pointer, original in DEPC 981 * a3: dispatch table 982 * depc: a2, original value saved on stack (PT_DEPC) 983 * excsave_1: a3 984 */ 985 986ENTRY(fast_syscall_kernel) 987 988 /* Skip syscall. */ 989 990 rsr a0, EPC_1 991 addi a0, a0, 3 992 wsr a0, EPC_1 993 994 l32i a0, a2, PT_DEPC 995 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 996 997 rsr a0, DEPC # get syscall-nr 998 _beqz a0, fast_syscall_spill_registers 999 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1000 1001 j kernel_exception 1002 1003ENTRY(fast_syscall_user) 1004 1005 /* Skip syscall. */ 1006 1007 rsr a0, EPC_1 1008 addi a0, a0, 3 1009 wsr a0, EPC_1 1010 1011 l32i a0, a2, PT_DEPC 1012 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1013 1014 rsr a0, DEPC # get syscall-nr 1015 _beqz a0, fast_syscall_spill_registers 1016 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1017 1018 j user_exception 1019 1020ENTRY(fast_syscall_unrecoverable) 1021 1022 /* Restore all states. */ 1023 1024 l32i a0, a2, PT_AREG0 # restore a0 1025 xsr a2, DEPC # restore a2, depc 1026 rsr a3, EXCSAVE_1 1027 1028 wsr a0, EXCSAVE_1 1029 movi a0, unrecoverable_exception 1030 callx0 a0 1031 1032 1033 1034/* 1035 * sysxtensa syscall handler 1036 * 1037 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1038 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1039 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1040 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1041 * a2 a6 a3 a4 a5 1042 * 1043 * Entry condition: 1044 * 1045 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1046 * a1: a1 1047 * a2: new stack pointer, original in a0 and DEPC 1048 * a3: dispatch table, original in excsave_1 1049 * a4..a15: unchanged 1050 * depc: a2, original value saved on stack (PT_DEPC) 1051 * excsave_1: a3 1052 * 1053 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1054 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1055 * 1056 * Note: we don't have to save a2; a2 holds the return value 1057 * 1058 * We use the two macros TRY and CATCH: 1059 * 1060 * TRY adds an entry to the __ex_table fixup table for the immediately 1061 * following instruction. 1062 * 1063 * CATCH catches any exception that occurred at one of the preceeding TRY 1064 * statements and continues from there 1065 * 1066 * Usage TRY l32i a0, a1, 0 1067 * <other code> 1068 * done: rfe 1069 * CATCH <set return code> 1070 * j done 1071 */ 1072 1073#define TRY \ 1074 .section __ex_table, "a"; \ 1075 .word 66f, 67f; \ 1076 .text; \ 107766: 1078 1079#define CATCH \ 108067: 1081 1082ENTRY(fast_syscall_xtensa) 1083 1084 xsr a3, EXCSAVE_1 # restore a3, excsave1 1085 1086 s32i a7, a2, PT_AREG7 # we need an additional register 1087 movi a7, 4 # sizeof(unsigned int) 1088 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1089 1090 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1091 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1092 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1093 1094 /* Fall through for ATOMIC_CMP_SWP. */ 1095 1096.Lswp: /* Atomic compare and swap */ 1097 1098TRY l32i a0, a3, 0 # read old value 1099 bne a0, a4, 1f # same as old value? jump 1100TRY s32i a5, a3, 0 # different, modify value 1101 l32i a7, a2, PT_AREG7 # restore a7 1102 l32i a0, a2, PT_AREG0 # restore a0 1103 movi a2, 1 # and return 1 1104 addi a6, a6, 1 # restore a6 (really necessary?) 1105 rfe 1106 11071: l32i a7, a2, PT_AREG7 # restore a7 1108 l32i a0, a2, PT_AREG0 # restore a0 1109 movi a2, 0 # return 0 (note that we cannot set 1110 addi a6, a6, 1 # restore a6 (really necessary?) 1111 rfe 1112 1113.Lnswp: /* Atomic set, add, and exg_add. */ 1114 1115TRY l32i a7, a3, 0 # orig 1116 add a0, a4, a7 # + arg 1117 moveqz a0, a4, a6 # set 1118TRY s32i a0, a3, 0 # write new value 1119 1120 mov a0, a2 1121 mov a2, a7 1122 l32i a7, a0, PT_AREG7 # restore a7 1123 l32i a0, a0, PT_AREG0 # restore a0 1124 addi a6, a6, 1 # restore a6 (really necessary?) 1125 rfe 1126 1127CATCH 1128.Leac: l32i a7, a2, PT_AREG7 # restore a7 1129 l32i a0, a2, PT_AREG0 # restore a0 1130 movi a2, -EFAULT 1131 rfe 1132 1133.Lill: l32i a7, a2, PT_AREG0 # restore a7 1134 l32i a0, a2, PT_AREG0 # restore a0 1135 movi a2, -EINVAL 1136 rfe 1137 1138 1139 1140 1141/* fast_syscall_spill_registers. 1142 * 1143 * Entry condition: 1144 * 1145 * a0: trashed, original value saved on stack (PT_AREG0) 1146 * a1: a1 1147 * a2: new stack pointer, original in DEPC 1148 * a3: dispatch table 1149 * depc: a2, original value saved on stack (PT_DEPC) 1150 * excsave_1: a3 1151 * 1152 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1153 * Note: We don't need to save a2 in depc (return value) 1154 */ 1155 1156ENTRY(fast_syscall_spill_registers) 1157 1158 /* Register a FIXUP handler (pass current wb as a parameter) */ 1159 1160 movi a0, fast_syscall_spill_registers_fixup 1161 s32i a0, a3, EXC_TABLE_FIXUP 1162 rsr a0, WINDOWBASE 1163 s32i a0, a3, EXC_TABLE_PARAM 1164 1165 /* Save a3 and SAR on stack. */ 1166 1167 rsr a0, SAR 1168 xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1169 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4 1170 s32i a3, a2, PT_AREG3 1171 1172 /* The spill routine might clobber a7, a11, and a15. */ 1173 1174 s32i a7, a2, PT_AREG5 1175 s32i a11, a2, PT_AREG6 1176 s32i a15, a2, PT_AREG7 1177 1178 call0 _spill_registers # destroys a3, DEPC, and SAR 1179 1180 /* Advance PC, restore registers and SAR, and return from exception. */ 1181 1182 l32i a3, a2, PT_AREG4 1183 l32i a0, a2, PT_AREG0 1184 wsr a3, SAR 1185 l32i a3, a2, PT_AREG3 1186 1187 /* Restore clobbered registers. */ 1188 1189 l32i a7, a2, PT_AREG5 1190 l32i a11, a2, PT_AREG6 1191 l32i a15, a2, PT_AREG7 1192 1193 movi a2, 0 1194 rfe 1195 1196/* Fixup handler. 1197 * 1198 * We get here if the spill routine causes an exception, e.g. tlb miss. 1199 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1200 * we entered the spill routine and jump to the user exception handler. 1201 * 1202 * a0: value of depc, original value in depc 1203 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1204 * a3: exctable, original value in excsave1 1205 */ 1206 1207fast_syscall_spill_registers_fixup: 1208 1209 rsr a2, WINDOWBASE # get current windowbase (a2 is saved) 1210 xsr a0, DEPC # restore depc and a0 1211 ssl a2 # set shift (32 - WB) 1212 1213 /* We need to make sure the current registers (a0-a3) are preserved. 1214 * To do this, we simply set the bit for the current window frame 1215 * in WS, so that the exception handlers save them to the task stack. 1216 */ 1217 1218 rsr a3, EXCSAVE_1 # get spill-mask 1219 slli a2, a3, 1 # shift left by one 1220 1221 slli a3, a2, 32-WSBITS 1222 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1223 wsr a2, WINDOWSTART # set corrected windowstart 1224 1225 movi a3, exc_table 1226 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1227 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1228 1229 /* Return to the original (user task) WINDOWBASE. 1230 * We leave the following frame behind: 1231 * a0, a1, a2 same 1232 * a3: trashed (saved in excsave_1) 1233 * depc: depc (we have to return to that address) 1234 * excsave_1: a3 1235 */ 1236 1237 wsr a3, WINDOWBASE 1238 rsync 1239 1240 /* We are now in the original frame when we entered _spill_registers: 1241 * a0: return address 1242 * a1: used, stack pointer 1243 * a2: kernel stack pointer 1244 * a3: available, saved in EXCSAVE_1 1245 * depc: exception address 1246 * excsave: a3 1247 * Note: This frame might be the same as above. 1248 */ 1249 1250#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 1251 /* Restore registers we precautiously saved. 1252 * We have the value of the 'right' a3 1253 */ 1254 1255 l32i a7, a2, PT_AREG5 1256 l32i a11, a2, PT_AREG6 1257 l32i a15, a2, PT_AREG7 1258#endif 1259 1260 /* Setup stack pointer. */ 1261 1262 addi a2, a2, -PT_USER_SIZE 1263 s32i a0, a2, PT_AREG0 1264 1265 /* Make sure we return to this fixup handler. */ 1266 1267 movi a3, fast_syscall_spill_registers_fixup_return 1268 s32i a3, a2, PT_DEPC # setup depc 1269 1270 /* Jump to the exception handler. */ 1271 1272 movi a3, exc_table 1273 rsr a0, EXCCAUSE 1274 addx4 a0, a0, a3 # find entry in table 1275 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1276 jx a0 1277 1278fast_syscall_spill_registers_fixup_return: 1279 1280 /* When we return here, all registers have been restored (a2: DEPC) */ 1281 1282 wsr a2, DEPC # exception address 1283 1284 /* Restore fixup handler. */ 1285 1286 xsr a3, EXCSAVE_1 1287 movi a2, fast_syscall_spill_registers_fixup 1288 s32i a2, a3, EXC_TABLE_FIXUP 1289 rsr a2, WINDOWBASE 1290 s32i a2, a3, EXC_TABLE_PARAM 1291 l32i a2, a3, EXC_TABLE_KSTK 1292 1293#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 1294 /* Save registers again that might be clobbered. */ 1295 1296 s32i a7, a2, PT_AREG5 1297 s32i a11, a2, PT_AREG6 1298 s32i a15, a2, PT_AREG7 1299#endif 1300 1301 /* Load WB at the time the exception occurred. */ 1302 1303 rsr a3, SAR # WB is still in SAR 1304 neg a3, a3 1305 wsr a3, WINDOWBASE 1306 rsync 1307 1308 /* Restore a3 and return. */ 1309 1310 movi a3, exc_table 1311 xsr a3, EXCSAVE_1 1312 1313 rfde 1314 1315 1316/* 1317 * spill all registers. 1318 * 1319 * This is not a real function. The following conditions must be met: 1320 * 1321 * - must be called with call0. 1322 * - uses DEPC, a3 and SAR. 1323 * - the last 'valid' register of each frame are clobbered. 1324 * - the caller must have registered a fixup handler 1325 * (or be inside a critical section) 1326 * - PS_EXCM must be set (PS_WOE cleared?) 1327 */ 1328 1329ENTRY(_spill_registers) 1330 1331 /* 1332 * Rotate ws so that the current windowbase is at bit 0. 1333 * Assume ws = xxxwww1yy (www1 current window frame). 1334 * Rotate ws right so that a2 = yyxxxwww1. 1335 */ 1336 1337 wsr a2, DEPC # preserve a2 1338 rsr a2, WINDOWBASE 1339 rsr a3, WINDOWSTART 1340 ssr a2 # holds WB 1341 slli a2, a3, WSBITS 1342 or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy 1343 srl a3, a3 1344 1345 /* We are done if there are no more than the current register frame. */ 1346 1347 extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww 1348 movi a2, (1 << (WSBITS-1)) 1349 _beqz a3, .Lnospill # only one active frame? jump 1350 1351 /* We want 1 at the top, so that we return to the current windowbase */ 1352 1353 or a3, a3, a2 # 1yyxxxwww 1354 1355 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1356 1357 wsr a3, WINDOWSTART # save shifted windowstart 1358 neg a2, a3 1359 and a3, a2, a3 # first bit set from right: 000010000 1360 1361 ffs_ws a2, a3 # a2: shifts to skip empty frames 1362 movi a3, WSBITS 1363 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right 1364 ssr a2 # save in SAR for later. 1365 1366 rsr a3, WINDOWBASE 1367 add a3, a3, a2 1368 rsr a2, DEPC # restore a2 1369 wsr a3, WINDOWBASE 1370 rsync 1371 1372 rsr a3, WINDOWSTART 1373 srl a3, a3 # shift windowstart 1374 1375 /* WB is now just one frame below the oldest frame in the register 1376 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1377 and WS differ by one 4-register frame. */ 1378 1379 /* Save frames. Depending what call was used (call4, call8, call12), 1380 * we have to save 4,8. or 12 registers. 1381 */ 1382 1383 _bbsi.l a3, 1, .Lc4 1384 _bbsi.l a3, 2, .Lc8 1385 1386 /* Special case: we have a call12-frame starting at a4. */ 1387 1388 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) 1389 1390 s32e a4, a1, -16 # a1 is valid with an empty spill area 1391 l32e a4, a5, -12 1392 s32e a8, a4, -48 1393 mov a8, a4 1394 l32e a4, a1, -16 1395 j .Lc12c 1396 1397.Lloop: _bbsi.l a3, 1, .Lc4 1398 _bbci.l a3, 2, .Lc12 1399 1400.Lc8: s32e a4, a13, -16 1401 l32e a4, a5, -12 1402 s32e a8, a4, -32 1403 s32e a5, a13, -12 1404 s32e a6, a13, -8 1405 s32e a7, a13, -4 1406 s32e a9, a4, -28 1407 s32e a10, a4, -24 1408 s32e a11, a4, -20 1409 1410 srli a11, a3, 2 # shift windowbase by 2 1411 rotw 2 1412 _bnei a3, 1, .Lloop 1413 1414.Lexit: /* Done. Do the final rotation, set WS, and return. */ 1415 1416 rotw 1 1417 rsr a3, WINDOWBASE 1418 ssl a3 1419 movi a3, 1 1420 sll a3, a3 1421 wsr a3, WINDOWSTART 1422 1423.Lnospill: 1424 jx a0 1425 1426.Lc4: s32e a4, a9, -16 1427 s32e a5, a9, -12 1428 s32e a6, a9, -8 1429 s32e a7, a9, -4 1430 1431 srli a7, a3, 1 1432 rotw 1 1433 _bnei a3, 1, .Lloop 1434 j .Lexit 1435 1436.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1437 1438 /* 12-register frame (call12) */ 1439 1440 l32e a2, a5, -12 1441 s32e a8, a2, -48 1442 mov a8, a2 1443 1444.Lc12c: s32e a9, a8, -44 1445 s32e a10, a8, -40 1446 s32e a11, a8, -36 1447 s32e a12, a8, -32 1448 s32e a13, a8, -28 1449 s32e a14, a8, -24 1450 s32e a15, a8, -20 1451 srli a15, a3, 3 1452 1453 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1454 * window, grab the stackpointer, and rotate back. 1455 * Alternatively, we could also use the following approach, but that 1456 * makes the fixup routine much more complicated: 1457 * rotw 1 1458 * s32e a0, a13, -16 1459 * ... 1460 * rotw 2 1461 */ 1462 1463 rotw 1 1464 mov a5, a13 1465 rotw -1 1466 1467 s32e a4, a9, -16 1468 s32e a5, a9, -12 1469 s32e a6, a9, -8 1470 s32e a7, a9, -4 1471 1472 rotw 3 1473 1474 _beqi a3, 1, .Lexit 1475 j .Lloop 1476 1477.Linvalid_mask: 1478 1479 /* We get here because of an unrecoverable error in the window 1480 * registers. If we are in user space, we kill the application, 1481 * however, this condition is unrecoverable in kernel space. 1482 */ 1483 1484 rsr a0, PS 1485 _bbci.l a0, PS_UM_BIT, 1f 1486 1487 /* User space: Setup a dummy frame and kill application. 1488 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1489 */ 1490 1491 movi a0, 1 1492 movi a1, 0 1493 1494 wsr a0, WINDOWSTART 1495 wsr a1, WINDOWBASE 1496 rsync 1497 1498 movi a0, 0 1499 1500 movi a3, exc_table 1501 l32i a1, a3, EXC_TABLE_KSTK 1502 wsr a3, EXCSAVE_1 1503 1504 movi a4, (1 << PS_WOE_BIT) | 1 1505 wsr a4, PS 1506 rsync 1507 1508 movi a6, SIGSEGV 1509 movi a4, do_exit 1510 callx4 a4 1511 15121: /* Kernel space: PANIC! */ 1513 1514 wsr a0, EXCSAVE_1 1515 movi a0, unrecoverable_exception 1516 callx0 a0 # should not return 15171: j 1b 1518 1519/* 1520 * We should never get here. Bail out! 1521 */ 1522 1523ENTRY(fast_second_level_miss_double_kernel) 1524 15251: movi a0, unrecoverable_exception 1526 callx0 a0 # should not return 15271: j 1b 1528 1529/* First-level entry handler for user, kernel, and double 2nd-level 1530 * TLB miss exceptions. Note that for now, user and kernel miss 1531 * exceptions share the same entry point and are handled identically. 1532 * 1533 * An old, less-efficient C version of this function used to exist. 1534 * We include it below, interleaved as comments, for reference. 1535 * 1536 * Entry condition: 1537 * 1538 * a0: trashed, original value saved on stack (PT_AREG0) 1539 * a1: a1 1540 * a2: new stack pointer, original in DEPC 1541 * a3: dispatch table 1542 * depc: a2, original value saved on stack (PT_DEPC) 1543 * excsave_1: a3 1544 * 1545 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1546 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1547 */ 1548 1549ENTRY(fast_second_level_miss) 1550 1551 /* Save a1. Note: we don't expect a double exception. */ 1552 1553 s32i a1, a2, PT_AREG1 1554 1555 /* We need to map the page of PTEs for the user task. Find 1556 * the pointer to that page. Also, it's possible for tsk->mm 1557 * to be NULL while tsk->active_mm is nonzero if we faulted on 1558 * a vmalloc address. In that rare case, we must use 1559 * active_mm instead to avoid a fault in this handler. See 1560 * 1561 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1562 * (or search Internet on "mm vs. active_mm") 1563 * 1564 * if (!mm) 1565 * mm = tsk->active_mm; 1566 * pgd = pgd_offset (mm, regs->excvaddr); 1567 * pmd = pmd_offset (pgd, regs->excvaddr); 1568 * pmdval = *pmd; 1569 */ 1570 1571 GET_CURRENT(a1,a2) 1572 l32i a0, a1, TASK_MM # tsk->mm 1573 beqz a0, 9f 1574 15758: rsr a1, EXCVADDR # fault address 1576 _PGD_OFFSET(a0, a1, a1) 1577 l32i a0, a0, 0 # read pmdval 1578 //beqi a0, _PAGE_USER, 2f 1579 beqz a0, 2f 1580 1581 /* Read ptevaddr and convert to top of page-table page. 1582 * 1583 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1584 * vpnval += DTLB_WAY_PGTABLE; 1585 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1586 * write_dtlb_entry (pteval, vpnval); 1587 * 1588 * The messy computation for 'pteval' above really simplifies 1589 * into the following: 1590 * 1591 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL 1592 */ 1593 1594 movi a1, -PAGE_OFFSET 1595 add a0, a0, a1 # pmdval - PAGE_OFFSET 1596 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1597 xor a0, a0, a1 1598 1599 1600 movi a1, PAGE_DIRECTORY 1601 or a0, a0, a1 # ... | PAGE_DIRECTORY 1602 1603 rsr a1, PTEVADDR 1604 srli a1, a1, PAGE_SHIFT 1605 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1606 addi a1, a1, DTLB_WAY_PGD # ... + way_number 1607 1608 wdtlb a0, a1 1609 dsync 1610 1611 /* Exit critical section. */ 1612 1613 movi a0, 0 1614 s32i a0, a3, EXC_TABLE_FIXUP 1615 1616 /* Restore the working registers, and return. */ 1617 1618 l32i a0, a2, PT_AREG0 1619 l32i a1, a2, PT_AREG1 1620 l32i a2, a2, PT_DEPC 1621 xsr a3, EXCSAVE_1 1622 1623 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1624 1625 /* Restore excsave1 and return. */ 1626 1627 rsr a2, DEPC 1628 rfe 1629 1630 /* Return from double exception. */ 1631 16321: xsr a2, DEPC 1633 esync 1634 rfde 1635 16369: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1637 j 8b 1638 16392: /* Invalid PGD, default exception handling */ 1640 1641 rsr a1, DEPC 1642 xsr a3, EXCSAVE_1 1643 s32i a1, a2, PT_AREG2 1644 s32i a3, a2, PT_AREG3 1645 mov a1, a2 1646 1647 rsr a2, PS 1648 bbsi.l a2, PS_UM_BIT, 1f 1649 j _kernel_exception 16501: j _user_exception 1651 1652 1653/* 1654 * StoreProhibitedException 1655 * 1656 * Update the pte and invalidate the itlb mapping for this pte. 1657 * 1658 * Entry condition: 1659 * 1660 * a0: trashed, original value saved on stack (PT_AREG0) 1661 * a1: a1 1662 * a2: new stack pointer, original in DEPC 1663 * a3: dispatch table 1664 * depc: a2, original value saved on stack (PT_DEPC) 1665 * excsave_1: a3 1666 * 1667 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1668 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1669 */ 1670 1671ENTRY(fast_store_prohibited) 1672 1673 /* Save a1 and a4. */ 1674 1675 s32i a1, a2, PT_AREG1 1676 s32i a4, a2, PT_AREG4 1677 1678 GET_CURRENT(a1,a2) 1679 l32i a0, a1, TASK_MM # tsk->mm 1680 beqz a0, 9f 1681 16828: rsr a1, EXCVADDR # fault address 1683 _PGD_OFFSET(a0, a1, a4) 1684 l32i a0, a0, 0 1685 //beqi a0, _PAGE_USER, 2f # FIXME use _PAGE_INVALID 1686 beqz a0, 2f 1687 1688 _PTE_OFFSET(a0, a1, a4) 1689 l32i a4, a0, 0 # read pteval 1690 movi a1, _PAGE_VALID | _PAGE_RW 1691 bnall a4, a1, 2f 1692 1693 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE 1694 or a4, a4, a1 1695 rsr a1, EXCVADDR 1696 s32i a4, a0, 0 1697 1698 /* We need to flush the cache if we have page coloring. */ 1699#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1700 dhwb a0, 0 1701#endif 1702 pdtlb a0, a1 1703 beqz a0, 1f 1704 idtlb a0 // FIXME do we need this? 1705 wdtlb a4, a0 17061: 1707 1708 /* Exit critical section. */ 1709 1710 movi a0, 0 1711 s32i a0, a3, EXC_TABLE_FIXUP 1712 1713 /* Restore the working registers, and return. */ 1714 1715 l32i a4, a2, PT_AREG4 1716 l32i a1, a2, PT_AREG1 1717 l32i a0, a2, PT_AREG0 1718 l32i a2, a2, PT_DEPC 1719 1720 /* Restore excsave1 and a3. */ 1721 1722 xsr a3, EXCSAVE_1 1723 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1724 1725 rsr a2, DEPC 1726 rfe 1727 1728 /* Double exception. Restore FIXUP handler and return. */ 1729 17301: xsr a2, DEPC 1731 esync 1732 rfde 1733 17349: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1735 j 8b 1736 17372: /* If there was a problem, handle fault in C */ 1738 1739 rsr a4, DEPC # still holds a2 1740 xsr a3, EXCSAVE_1 1741 s32i a4, a2, PT_AREG2 1742 s32i a3, a2, PT_AREG3 1743 l32i a4, a2, PT_AREG4 1744 mov a1, a2 1745 1746 rsr a2, PS 1747 bbsi.l a2, PS_UM_BIT, 1f 1748 j _kernel_exception 17491: j _user_exception 1750 1751 1752#if XCHAL_EXTRA_SA_SIZE 1753 1754#warning fast_coprocessor untested 1755 1756/* 1757 * Entry condition: 1758 * 1759 * a0: trashed, original value saved on stack (PT_AREG0) 1760 * a1: a1 1761 * a2: new stack pointer, original in DEPC 1762 * a3: dispatch table 1763 * depc: a2, original value saved on stack (PT_DEPC) 1764 * excsave_1: a3 1765 * 1766 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1767 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1768 */ 1769 1770ENTRY(fast_coprocessor_double) 1771 wsr a0, EXCSAVE_1 1772 movi a0, unrecoverable_exception 1773 callx0 a0 1774 1775ENTRY(fast_coprocessor) 1776 1777 /* Fatal if we are in a double exception. */ 1778 1779 l32i a0, a2, PT_DEPC 1780 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double 1781 1782 /* Save some registers a1, a3, a4, SAR */ 1783 1784 xsr a3, EXCSAVE_1 1785 s32i a3, a2, PT_AREG3 1786 rsr a3, SAR 1787 s32i a4, a2, PT_AREG4 1788 s32i a1, a2, PT_AREG1 1789 s32i a5, a1, PT_AREG5 1790 s32i a3, a2, PT_SAR 1791 mov a1, a2 1792 1793 /* Currently, the HAL macros only guarantee saving a0 and a1. 1794 * These can and will be refined in the future, but for now, 1795 * just save the remaining registers of a2...a15. 1796 */ 1797 s32i a6, a1, PT_AREG6 1798 s32i a7, a1, PT_AREG7 1799 s32i a8, a1, PT_AREG8 1800 s32i a9, a1, PT_AREG9 1801 s32i a10, a1, PT_AREG10 1802 s32i a11, a1, PT_AREG11 1803 s32i a12, a1, PT_AREG12 1804 s32i a13, a1, PT_AREG13 1805 s32i a14, a1, PT_AREG14 1806 s32i a15, a1, PT_AREG15 1807 1808 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 1809 1810 rsr a0, EXCCAUSE 1811 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED 1812 1813 /* Set corresponding CPENABLE bit */ 1814 1815 movi a4, 1 1816 ssl a3 # SAR: 32 - coprocessor_number 1817 rsr a5, CPENABLE 1818 sll a4, a4 1819 or a4, a5, a4 1820 wsr a4, CPENABLE 1821 rsync 1822 movi a5, coprocessor_info # list of owner and offset into cp_save 1823 addx8 a0, a4, a5 # entry for CP 1824 1825 bne a4, a5, .Lload # bit wasn't set before, cp not in use 1826 1827 /* Now compare the current task with the owner of the coprocessor. 1828 * If they are the same, there is no reason to save or restore any 1829 * coprocessor state. Having already enabled the coprocessor, 1830 * branch ahead to return. 1831 */ 1832 GET_CURRENT(a5,a1) 1833 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP 1834 beq a4, a5, .Ldone 1835 1836 /* Find location to dump current coprocessor state: 1837 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor] 1838 * 1839 * Note: a0 pointer to the entry in the coprocessor owner table, 1840 * a3 coprocessor number, 1841 * a4 current owner of coprocessor. 1842 */ 1843 l32i a5, a0, COPROCESSOR_INFO_OFFSET 1844 addi a2, a4, THREAD_CP_SAVE 1845 add a2, a2, a5 1846 1847 /* Store current coprocessor states. (a5 still has CP number) */ 1848 1849 xchal_cpi_store_funcbody 1850 1851 /* The macro might have destroyed a3 (coprocessor number), but 1852 * SAR still has 32 - coprocessor_number! 1853 */ 1854 movi a3, 32 1855 rsr a4, SAR 1856 sub a3, a3, a4 1857 1858.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into 1859 * the coprocessor owner table. 1860 * 1861 * Note: a0 pointer to the entry in the coprocessor owner table, 1862 * a3 coprocessor number. 1863 */ 1864 GET_CURRENT(a4,a1) 1865 s32i a4, a0, 0 1866 1867 /* Find location from where to restore the current coprocessor state.*/ 1868 1869 l32i a5, a0, COPROCESSOR_INFO_OFFSET 1870 addi a2, a4, THREAD_CP_SAVE 1871 add a2, a2, a4 1872 1873 xchal_cpi_load_funcbody 1874 1875 /* We must assume that the xchal_cpi_store_funcbody macro destroyed 1876 * registers a2..a15. 1877 */ 1878 1879.Ldone: l32i a15, a1, PT_AREG15 1880 l32i a14, a1, PT_AREG14 1881 l32i a13, a1, PT_AREG13 1882 l32i a12, a1, PT_AREG12 1883 l32i a11, a1, PT_AREG11 1884 l32i a10, a1, PT_AREG10 1885 l32i a9, a1, PT_AREG9 1886 l32i a8, a1, PT_AREG8 1887 l32i a7, a1, PT_AREG7 1888 l32i a6, a1, PT_AREG6 1889 l32i a5, a1, PT_AREG5 1890 l32i a4, a1, PT_AREG4 1891 l32i a3, a1, PT_AREG3 1892 l32i a2, a1, PT_AREG2 1893 l32i a0, a1, PT_AREG0 1894 l32i a1, a1, PT_AREG1 1895 1896 rfe 1897 1898#endif /* XCHAL_EXTRA_SA_SIZE */ 1899 1900/* 1901 * System Calls. 1902 * 1903 * void system_call (struct pt_regs* regs, int exccause) 1904 * a2 a3 1905 */ 1906 1907ENTRY(system_call) 1908 entry a1, 32 1909 1910 /* regs->syscall = regs->areg[2] */ 1911 1912 l32i a3, a2, PT_AREG2 1913 mov a6, a2 1914 movi a4, do_syscall_trace_enter 1915 s32i a3, a2, PT_SYSCALL 1916 callx4 a4 1917 1918 /* syscall = sys_call_table[syscall_nr] */ 1919 1920 movi a4, sys_call_table; 1921 movi a5, __NR_syscall_count 1922 movi a6, -ENOSYS 1923 bgeu a3, a5, 1f 1924 1925 addx4 a4, a3, a4 1926 l32i a4, a4, 0 1927 movi a5, sys_ni_syscall; 1928 beq a4, a5, 1f 1929 1930 /* Load args: arg0 - arg5 are passed via regs. */ 1931 1932 l32i a6, a2, PT_AREG6 1933 l32i a7, a2, PT_AREG3 1934 l32i a8, a2, PT_AREG4 1935 l32i a9, a2, PT_AREG5 1936 l32i a10, a2, PT_AREG8 1937 l32i a11, a2, PT_AREG9 1938 1939 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1940 s32i a2, a1, 0 1941 1942 callx4 a4 1943 19441: /* regs->areg[2] = return_value */ 1945 1946 s32i a6, a2, PT_AREG2 1947 movi a4, do_syscall_trace_leave 1948 mov a6, a2 1949 callx4 a4 1950 retw 1951 1952 1953/* 1954 * Create a kernel thread 1955 * 1956 * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 1957 * a2 a2 a3 a4 1958 */ 1959 1960ENTRY(kernel_thread) 1961 entry a1, 16 1962 1963 mov a5, a2 # preserve fn over syscall 1964 mov a7, a3 # preserve args over syscall 1965 1966 movi a3, _CLONE_VM | _CLONE_UNTRACED 1967 movi a2, __NR_clone 1968 or a6, a4, a3 # arg0: flags 1969 mov a3, a1 # arg1: sp 1970 syscall 1971 1972 beq a3, a1, 1f # branch if parent 1973 mov a6, a7 # args 1974 callx4 a5 # fn(args) 1975 1976 movi a2, __NR_exit 1977 syscall # return value of fn(args) still in a6 1978 19791: retw 1980 1981/* 1982 * Do a system call from kernel instead of calling sys_execve, so we end up 1983 * with proper pt_regs. 1984 * 1985 * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) 1986 * a2 a2 a3 a4 1987 */ 1988 1989ENTRY(kernel_execve) 1990 entry a1, 16 1991 mov a6, a2 # arg0 is in a6 1992 movi a2, __NR_execve 1993 syscall 1994 1995 retw 1996 1997/* 1998 * Task switch. 1999 * 2000 * struct task* _switch_to (struct task* prev, struct task* next) 2001 * a2 a2 a3 2002 */ 2003 2004ENTRY(_switch_to) 2005 2006 entry a1, 16 2007 2008 mov a4, a3 # preserve a3 2009 2010 s32i a0, a2, THREAD_RA # save return address 2011 s32i a1, a2, THREAD_SP # save stack pointer 2012 2013 /* Disable ints while we manipulate the stack pointer; spill regs. */ 2014 2015 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL 2016 xsr a5, PS 2017 rsr a3, EXCSAVE_1 2018 rsync 2019 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 2020 2021 call0 _spill_registers 2022 2023 /* Set kernel stack (and leave critical section) 2024 * Note: It's save to set it here. The stack will not be overwritten 2025 * because the kernel stack will only be loaded again after 2026 * we return from kernel space. 2027 */ 2028 2029 l32i a0, a4, TASK_THREAD_INFO 2030 rsr a3, EXCSAVE_1 # exc_table 2031 movi a1, 0 2032 addi a0, a0, PT_REGS_OFFSET 2033 s32i a1, a3, EXC_TABLE_FIXUP 2034 s32i a0, a3, EXC_TABLE_KSTK 2035 2036 /* restore context of the task that 'next' addresses */ 2037 2038 l32i a0, a4, THREAD_RA /* restore return address */ 2039 l32i a1, a4, THREAD_SP /* restore stack pointer */ 2040 2041 wsr a5, PS 2042 rsync 2043 2044 retw 2045 2046 2047ENTRY(ret_from_fork) 2048 2049 /* void schedule_tail (struct task_struct *prev) 2050 * Note: prev is still in a6 (return value from fake call4 frame) 2051 */ 2052 movi a4, schedule_tail 2053 callx4 a4 2054 2055 movi a4, do_syscall_trace_leave 2056 mov a6, a1 2057 callx4 a4 2058 2059 j common_exception_return 2060 2061