1 /* 2 * unaligned.c: Unaligned load/store trap handling with special 3 * cases for the kernel to do them more quickly. 4 * 5 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net) 6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 */ 8 9 10 #include <linux/jiffies.h> 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/mm.h> 14 #include <linux/module.h> 15 #include <asm/asi.h> 16 #include <asm/ptrace.h> 17 #include <asm/pstate.h> 18 #include <asm/processor.h> 19 #include <asm/uaccess.h> 20 #include <linux/smp.h> 21 #include <linux/bitops.h> 22 #include <linux/perf_event.h> 23 #include <linux/ratelimit.h> 24 #include <linux/context_tracking.h> 25 #include <asm/fpumacro.h> 26 #include <asm/cacheflush.h> 27 #include <asm/setup.h> 28 29 #include "entry.h" 30 #include "kernel.h" 31 32 enum direction { 33 load, /* ld, ldd, ldh, ldsh */ 34 store, /* st, std, sth, stsh */ 35 both, /* Swap, ldstub, cas, ... */ 36 fpld, 37 fpst, 38 invalid, 39 }; 40 41 static inline enum direction decode_direction(unsigned int insn) 42 { 43 unsigned long tmp = (insn >> 21) & 1; 44 45 if (!tmp) 46 return load; 47 else { 48 switch ((insn>>19)&0xf) { 49 case 15: /* swap* */ 50 return both; 51 default: 52 return store; 53 } 54 } 55 } 56 57 /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */ 58 static inline int decode_access_size(struct pt_regs *regs, unsigned int insn) 59 { 60 unsigned int tmp; 61 62 tmp = ((insn >> 19) & 0xf); 63 if (tmp == 11 || tmp == 14) /* ldx/stx */ 64 return 8; 65 tmp &= 3; 66 if (!tmp) 67 return 4; 68 else if (tmp == 3) 69 return 16; /* ldd/std - Although it is actually 8 */ 70 else if (tmp == 2) 71 return 2; 72 else { 73 printk("Impossible unaligned trap. insn=%08x\n", insn); 74 die_if_kernel("Byte sized unaligned access?!?!", regs); 75 76 /* GCC should never warn that control reaches the end 77 * of this function without returning a value because 78 * die_if_kernel() is marked with attribute 'noreturn'. 79 * Alas, some versions do... 80 */ 81 82 return 0; 83 } 84 } 85 86 static inline int decode_asi(unsigned int insn, struct pt_regs *regs) 87 { 88 if (insn & 0x800000) { 89 if (insn & 0x2000) 90 return (unsigned char)(regs->tstate >> 24); /* %asi */ 91 else 92 return (unsigned char)(insn >> 5); /* imm_asi */ 93 } else 94 return ASI_P; 95 } 96 97 /* 0x400000 = signed, 0 = unsigned */ 98 static inline int decode_signedness(unsigned int insn) 99 { 100 return (insn & 0x400000); 101 } 102 103 static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, 104 unsigned int rd, int from_kernel) 105 { 106 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { 107 if (from_kernel != 0) 108 __asm__ __volatile__("flushw"); 109 else 110 flushw_user(); 111 } 112 } 113 114 static inline long sign_extend_imm13(long imm) 115 { 116 return imm << 51 >> 51; 117 } 118 119 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) 120 { 121 unsigned long value, fp; 122 123 if (reg < 16) 124 return (!reg ? 0 : regs->u_regs[reg]); 125 126 fp = regs->u_regs[UREG_FP]; 127 128 if (regs->tstate & TSTATE_PRIV) { 129 struct reg_window *win; 130 win = (struct reg_window *)(fp + STACK_BIAS); 131 value = win->locals[reg - 16]; 132 } else if (!test_thread_64bit_stack(fp)) { 133 struct reg_window32 __user *win32; 134 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 135 get_user(value, &win32->locals[reg - 16]); 136 } else { 137 struct reg_window __user *win; 138 win = (struct reg_window __user *)(fp + STACK_BIAS); 139 get_user(value, &win->locals[reg - 16]); 140 } 141 return value; 142 } 143 144 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) 145 { 146 unsigned long fp; 147 148 if (reg < 16) 149 return ®s->u_regs[reg]; 150 151 fp = regs->u_regs[UREG_FP]; 152 153 if (regs->tstate & TSTATE_PRIV) { 154 struct reg_window *win; 155 win = (struct reg_window *)(fp + STACK_BIAS); 156 return &win->locals[reg - 16]; 157 } else if (!test_thread_64bit_stack(fp)) { 158 struct reg_window32 *win32; 159 win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); 160 return (unsigned long *)&win32->locals[reg - 16]; 161 } else { 162 struct reg_window *win; 163 win = (struct reg_window *)(fp + STACK_BIAS); 164 return &win->locals[reg - 16]; 165 } 166 } 167 168 unsigned long compute_effective_address(struct pt_regs *regs, 169 unsigned int insn, unsigned int rd) 170 { 171 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 172 unsigned int rs1 = (insn >> 14) & 0x1f; 173 unsigned int rs2 = insn & 0x1f; 174 unsigned long addr; 175 176 if (insn & 0x2000) { 177 maybe_flush_windows(rs1, 0, rd, from_kernel); 178 addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); 179 } else { 180 maybe_flush_windows(rs1, rs2, rd, from_kernel); 181 addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); 182 } 183 184 if (!from_kernel && test_thread_flag(TIF_32BIT)) 185 addr &= 0xffffffff; 186 187 return addr; 188 } 189 190 /* This is just to make gcc think die_if_kernel does return... */ 191 static void __used unaligned_panic(char *str, struct pt_regs *regs) 192 { 193 die_if_kernel(str, regs); 194 } 195 196 extern int do_int_load(unsigned long *dest_reg, int size, 197 unsigned long *saddr, int is_signed, int asi); 198 199 extern int __do_int_store(unsigned long *dst_addr, int size, 200 unsigned long src_val, int asi); 201 202 static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, 203 struct pt_regs *regs, int asi, int orig_asi) 204 { 205 unsigned long zero = 0; 206 unsigned long *src_val_p = &zero; 207 unsigned long src_val; 208 209 if (size == 16) { 210 size = 8; 211 zero = (((long)(reg_num ? 212 (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) | 213 (unsigned int)fetch_reg(reg_num + 1, regs); 214 } else if (reg_num) { 215 src_val_p = fetch_reg_addr(reg_num, regs); 216 } 217 src_val = *src_val_p; 218 if (unlikely(asi != orig_asi)) { 219 switch (size) { 220 case 2: 221 src_val = swab16(src_val); 222 break; 223 case 4: 224 src_val = swab32(src_val); 225 break; 226 case 8: 227 src_val = swab64(src_val); 228 break; 229 case 16: 230 default: 231 BUG(); 232 break; 233 } 234 } 235 return __do_int_store(dst_addr, size, src_val, asi); 236 } 237 238 static inline void advance(struct pt_regs *regs) 239 { 240 regs->tpc = regs->tnpc; 241 regs->tnpc += 4; 242 if (test_thread_flag(TIF_32BIT)) { 243 regs->tpc &= 0xffffffff; 244 regs->tnpc &= 0xffffffff; 245 } 246 } 247 248 static inline int floating_point_load_or_store_p(unsigned int insn) 249 { 250 return (insn >> 24) & 1; 251 } 252 253 static inline int ok_for_kernel(unsigned int insn) 254 { 255 return !floating_point_load_or_store_p(insn); 256 } 257 258 static void kernel_mna_trap_fault(int fixup_tstate_asi) 259 { 260 struct pt_regs *regs = current_thread_info()->kern_una_regs; 261 unsigned int insn = current_thread_info()->kern_una_insn; 262 const struct exception_table_entry *entry; 263 264 entry = search_exception_tables(regs->tpc); 265 if (!entry) { 266 unsigned long address; 267 268 address = compute_effective_address(regs, insn, 269 ((insn >> 25) & 0x1f)); 270 if (address < PAGE_SIZE) { 271 printk(KERN_ALERT "Unable to handle kernel NULL " 272 "pointer dereference in mna handler"); 273 } else 274 printk(KERN_ALERT "Unable to handle kernel paging " 275 "request in mna handler"); 276 printk(KERN_ALERT " at virtual address %016lx\n",address); 277 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", 278 (current->mm ? CTX_HWBITS(current->mm->context) : 279 CTX_HWBITS(current->active_mm->context))); 280 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", 281 (current->mm ? (unsigned long) current->mm->pgd : 282 (unsigned long) current->active_mm->pgd)); 283 die_if_kernel("Oops", regs); 284 /* Not reached */ 285 } 286 regs->tpc = entry->fixup; 287 regs->tnpc = regs->tpc + 4; 288 289 if (fixup_tstate_asi) { 290 regs->tstate &= ~TSTATE_ASI; 291 regs->tstate |= (ASI_AIUS << 24UL); 292 } 293 } 294 295 static void log_unaligned(struct pt_regs *regs) 296 { 297 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); 298 299 if (__ratelimit(&ratelimit)) { 300 printk("Kernel unaligned access at TPC[%lx] %pS\n", 301 regs->tpc, (void *) regs->tpc); 302 } 303 } 304 305 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) 306 { 307 enum direction dir = decode_direction(insn); 308 int size = decode_access_size(regs, insn); 309 int orig_asi, asi; 310 311 current_thread_info()->kern_una_regs = regs; 312 current_thread_info()->kern_una_insn = insn; 313 314 orig_asi = asi = decode_asi(insn, regs); 315 316 /* If this is a {get,put}_user() on an unaligned userspace pointer, 317 * just signal a fault and do not log the event. 318 */ 319 if (asi == ASI_AIUS) { 320 kernel_mna_trap_fault(0); 321 return; 322 } 323 324 log_unaligned(regs); 325 326 if (!ok_for_kernel(insn) || dir == both) { 327 printk("Unsupported unaligned load/store trap for kernel " 328 "at <%016lx>.\n", regs->tpc); 329 unaligned_panic("Kernel does fpu/atomic " 330 "unaligned load/store.", regs); 331 332 kernel_mna_trap_fault(0); 333 } else { 334 unsigned long addr, *reg_addr; 335 int err; 336 337 addr = compute_effective_address(regs, insn, 338 ((insn >> 25) & 0x1f)); 339 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); 340 switch (asi) { 341 case ASI_NL: 342 case ASI_AIUPL: 343 case ASI_AIUSL: 344 case ASI_PL: 345 case ASI_SL: 346 case ASI_PNFL: 347 case ASI_SNFL: 348 asi &= ~0x08; 349 break; 350 } 351 switch (dir) { 352 case load: 353 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); 354 err = do_int_load(reg_addr, size, 355 (unsigned long *) addr, 356 decode_signedness(insn), asi); 357 if (likely(!err) && unlikely(asi != orig_asi)) { 358 unsigned long val_in = *reg_addr; 359 switch (size) { 360 case 2: 361 val_in = swab16(val_in); 362 break; 363 case 4: 364 val_in = swab32(val_in); 365 break; 366 case 8: 367 val_in = swab64(val_in); 368 break; 369 case 16: 370 default: 371 BUG(); 372 break; 373 } 374 *reg_addr = val_in; 375 } 376 break; 377 378 case store: 379 err = do_int_store(((insn>>25)&0x1f), size, 380 (unsigned long *) addr, regs, 381 asi, orig_asi); 382 break; 383 384 default: 385 panic("Impossible kernel unaligned trap."); 386 /* Not reached... */ 387 } 388 if (unlikely(err)) 389 kernel_mna_trap_fault(1); 390 else 391 advance(regs); 392 } 393 } 394 395 int handle_popc(u32 insn, struct pt_regs *regs) 396 { 397 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 398 int ret, rd = ((insn >> 25) & 0x1f); 399 u64 value; 400 401 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 402 if (insn & 0x2000) { 403 maybe_flush_windows(0, 0, rd, from_kernel); 404 value = sign_extend_imm13(insn); 405 } else { 406 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); 407 value = fetch_reg(insn & 0x1f, regs); 408 } 409 ret = hweight64(value); 410 if (rd < 16) { 411 if (rd) 412 regs->u_regs[rd] = ret; 413 } else { 414 unsigned long fp = regs->u_regs[UREG_FP]; 415 416 if (!test_thread_64bit_stack(fp)) { 417 struct reg_window32 __user *win32; 418 win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 419 put_user(ret, &win32->locals[rd - 16]); 420 } else { 421 struct reg_window __user *win; 422 win = (struct reg_window __user *)(fp + STACK_BIAS); 423 put_user(ret, &win->locals[rd - 16]); 424 } 425 } 426 advance(regs); 427 return 1; 428 } 429 430 extern void do_fpother(struct pt_regs *regs); 431 extern void do_privact(struct pt_regs *regs); 432 extern void sun4v_data_access_exception(struct pt_regs *regs, 433 unsigned long addr, 434 unsigned long type_ctx); 435 436 int handle_ldf_stq(u32 insn, struct pt_regs *regs) 437 { 438 unsigned long addr = compute_effective_address(regs, insn, 0); 439 int freg; 440 struct fpustate *f = FPUSTATE; 441 int asi = decode_asi(insn, regs); 442 int flag; 443 444 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 445 446 save_and_clear_fpu(); 447 current_thread_info()->xfsr[0] &= ~0x1c000; 448 if (insn & 0x200000) { 449 /* STQ */ 450 u64 first = 0, second = 0; 451 452 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); 453 flag = (freg < 32) ? FPRS_DL : FPRS_DU; 454 if (freg & 3) { 455 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */; 456 do_fpother(regs); 457 return 0; 458 } 459 if (current_thread_info()->fpsaved[0] & flag) { 460 first = *(u64 *)&f->regs[freg]; 461 second = *(u64 *)&f->regs[freg+2]; 462 } 463 if (asi < 0x80) { 464 do_privact(regs); 465 return 1; 466 } 467 switch (asi) { 468 case ASI_P: 469 case ASI_S: break; 470 case ASI_PL: 471 case ASI_SL: 472 { 473 /* Need to convert endians */ 474 u64 tmp = __swab64p(&first); 475 476 first = __swab64p(&second); 477 second = tmp; 478 break; 479 } 480 default: 481 if (tlb_type == hypervisor) 482 sun4v_data_access_exception(regs, addr, 0); 483 else 484 spitfire_data_access_exception(regs, 0, addr); 485 return 1; 486 } 487 if (put_user (first >> 32, (u32 __user *)addr) || 488 __put_user ((u32)first, (u32 __user *)(addr + 4)) || 489 __put_user (second >> 32, (u32 __user *)(addr + 8)) || 490 __put_user ((u32)second, (u32 __user *)(addr + 12))) { 491 if (tlb_type == hypervisor) 492 sun4v_data_access_exception(regs, addr, 0); 493 else 494 spitfire_data_access_exception(regs, 0, addr); 495 return 1; 496 } 497 } else { 498 /* LDF, LDDF, LDQF */ 499 u32 data[4] __attribute__ ((aligned(8))); 500 int size, i; 501 int err; 502 503 if (asi < 0x80) { 504 do_privact(regs); 505 return 1; 506 } else if (asi > ASI_SNFL) { 507 if (tlb_type == hypervisor) 508 sun4v_data_access_exception(regs, addr, 0); 509 else 510 spitfire_data_access_exception(regs, 0, addr); 511 return 1; 512 } 513 switch (insn & 0x180000) { 514 case 0x000000: size = 1; break; 515 case 0x100000: size = 4; break; 516 default: size = 2; break; 517 } 518 if (size == 1) 519 freg = (insn >> 25) & 0x1f; 520 else 521 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); 522 flag = (freg < 32) ? FPRS_DL : FPRS_DU; 523 524 for (i = 0; i < size; i++) 525 data[i] = 0; 526 527 err = get_user (data[0], (u32 __user *) addr); 528 if (!err) { 529 for (i = 1; i < size; i++) 530 err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); 531 } 532 if (err && !(asi & 0x2 /* NF */)) { 533 if (tlb_type == hypervisor) 534 sun4v_data_access_exception(regs, addr, 0); 535 else 536 spitfire_data_access_exception(regs, 0, addr); 537 return 1; 538 } 539 if (asi & 0x8) /* Little */ { 540 u64 tmp; 541 542 switch (size) { 543 case 1: data[0] = le32_to_cpup(data + 0); break; 544 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0)); 545 break; 546 case 4: tmp = le64_to_cpup((u64 *)(data + 0)); 547 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2)); 548 *(u64 *)(data + 2) = tmp; 549 break; 550 } 551 } 552 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { 553 current_thread_info()->fpsaved[0] = FPRS_FEF; 554 current_thread_info()->gsr[0] = 0; 555 } 556 if (!(current_thread_info()->fpsaved[0] & flag)) { 557 if (freg < 32) 558 memset(f->regs, 0, 32*sizeof(u32)); 559 else 560 memset(f->regs+32, 0, 32*sizeof(u32)); 561 } 562 memcpy(f->regs + freg, data, size * 4); 563 current_thread_info()->fpsaved[0] |= flag; 564 } 565 advance(regs); 566 return 1; 567 } 568 569 void handle_ld_nf(u32 insn, struct pt_regs *regs) 570 { 571 int rd = ((insn >> 25) & 0x1f); 572 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; 573 unsigned long *reg; 574 575 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 576 577 maybe_flush_windows(0, 0, rd, from_kernel); 578 reg = fetch_reg_addr(rd, regs); 579 if (from_kernel || rd < 16) { 580 reg[0] = 0; 581 if ((insn & 0x780000) == 0x180000) 582 reg[1] = 0; 583 } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { 584 put_user(0, (int __user *) reg); 585 if ((insn & 0x780000) == 0x180000) 586 put_user(0, ((int __user *) reg) + 1); 587 } else { 588 put_user(0, (unsigned long __user *) reg); 589 if ((insn & 0x780000) == 0x180000) 590 put_user(0, (unsigned long __user *) reg + 1); 591 } 592 advance(regs); 593 } 594 595 void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) 596 { 597 enum ctx_state prev_state = exception_enter(); 598 unsigned long pc = regs->tpc; 599 unsigned long tstate = regs->tstate; 600 u32 insn; 601 u64 value; 602 u8 freg; 603 int flag; 604 struct fpustate *f = FPUSTATE; 605 606 if (tstate & TSTATE_PRIV) 607 die_if_kernel("lddfmna from kernel", regs); 608 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); 609 if (test_thread_flag(TIF_32BIT)) 610 pc = (u32)pc; 611 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 612 int asi = decode_asi(insn, regs); 613 u32 first, second; 614 int err; 615 616 if ((asi > ASI_SNFL) || 617 (asi < ASI_P)) 618 goto daex; 619 first = second = 0; 620 err = get_user(first, (u32 __user *)sfar); 621 if (!err) 622 err = get_user(second, (u32 __user *)(sfar + 4)); 623 if (err) { 624 if (!(asi & 0x2)) 625 goto daex; 626 first = second = 0; 627 } 628 save_and_clear_fpu(); 629 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); 630 value = (((u64)first) << 32) | second; 631 if (asi & 0x8) /* Little */ 632 value = __swab64p(&value); 633 flag = (freg < 32) ? FPRS_DL : FPRS_DU; 634 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) { 635 current_thread_info()->fpsaved[0] = FPRS_FEF; 636 current_thread_info()->gsr[0] = 0; 637 } 638 if (!(current_thread_info()->fpsaved[0] & flag)) { 639 if (freg < 32) 640 memset(f->regs, 0, 32*sizeof(u32)); 641 else 642 memset(f->regs+32, 0, 32*sizeof(u32)); 643 } 644 *(u64 *)(f->regs + freg) = value; 645 current_thread_info()->fpsaved[0] |= flag; 646 } else { 647 daex: 648 if (tlb_type == hypervisor) 649 sun4v_data_access_exception(regs, sfar, sfsr); 650 else 651 spitfire_data_access_exception(regs, sfsr, sfar); 652 goto out; 653 } 654 advance(regs); 655 out: 656 exception_exit(prev_state); 657 } 658 659 void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) 660 { 661 enum ctx_state prev_state = exception_enter(); 662 unsigned long pc = regs->tpc; 663 unsigned long tstate = regs->tstate; 664 u32 insn; 665 u64 value; 666 u8 freg; 667 int flag; 668 struct fpustate *f = FPUSTATE; 669 670 if (tstate & TSTATE_PRIV) 671 die_if_kernel("stdfmna from kernel", regs); 672 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); 673 if (test_thread_flag(TIF_32BIT)) 674 pc = (u32)pc; 675 if (get_user(insn, (u32 __user *) pc) != -EFAULT) { 676 int asi = decode_asi(insn, regs); 677 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20); 678 value = 0; 679 flag = (freg < 32) ? FPRS_DL : FPRS_DU; 680 if ((asi > ASI_SNFL) || 681 (asi < ASI_P)) 682 goto daex; 683 save_and_clear_fpu(); 684 if (current_thread_info()->fpsaved[0] & flag) 685 value = *(u64 *)&f->regs[freg]; 686 switch (asi) { 687 case ASI_P: 688 case ASI_S: break; 689 case ASI_PL: 690 case ASI_SL: 691 value = __swab64p(&value); break; 692 default: goto daex; 693 } 694 if (put_user (value >> 32, (u32 __user *) sfar) || 695 __put_user ((u32)value, (u32 __user *)(sfar + 4))) 696 goto daex; 697 } else { 698 daex: 699 if (tlb_type == hypervisor) 700 sun4v_data_access_exception(regs, sfar, sfsr); 701 else 702 spitfire_data_access_exception(regs, sfsr, sfar); 703 goto out; 704 } 705 advance(regs); 706 out: 707 exception_exit(prev_state); 708 } 709