1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * 11 * This file contains exception handler for address error exception with the 12 * special capability to execute faulting instructions in software. The 13 * handler does not try to handle the case when the program counter points 14 * to an address not aligned to a word boundary. 15 * 16 * Putting data to unaligned addresses is a bad practice even on Intel where 17 * only the performance is affected. Much worse is that such code is non- 18 * portable. Due to several programs that die on MIPS due to alignment 19 * problems I decided to implement this handler anyway though I originally 20 * didn't intend to do this at all for user code. 21 * 22 * For now I enable fixing of address errors by default to make life easier. 23 * I however intend to disable this somewhen in the future when the alignment 24 * problems with user programs have been fixed. For programmers this is the 25 * right way to go. 26 * 27 * Fixing address errors is a per process option. The option is inherited 28 * across fork(2) and execve(2) calls. If you really want to use the 29 * option in your user programs - I discourage the use of the software 30 * emulation strongly - use the following code in your userland stuff: 31 * 32 * #include <sys/sysmips.h> 33 * 34 * ... 35 * sysmips(MIPS_FIXADE, x); 36 * ... 37 * 38 * The argument x is 0 for disabling software emulation, enabled otherwise. 39 * 40 * Below a little program to play around with this feature. 41 * 42 * #include <stdio.h> 43 * #include <sys/sysmips.h> 44 * 45 * struct foo { 46 * unsigned char bar[8]; 47 * }; 48 * 49 * main(int argc, char *argv[]) 50 * { 51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 52 * unsigned int *p = (unsigned int *) (x.bar + 3); 53 * int i; 54 * 55 * if (argc > 1) 56 * sysmips(MIPS_FIXADE, atoi(argv[1])); 57 * 58 * printf("*p = %08lx\n", *p); 59 * 60 * *p = 0xdeadface; 61 * 62 * for(i = 0; i <= 7; i++) 63 * printf("%02x ", x.bar[i]); 64 * printf("\n"); 65 * } 66 * 67 * Coprocessor loads are not supported; I think this case is unimportant 68 * in the practice. 69 * 70 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 71 * exception for the R6000. 72 * A store crossing a page boundary might be executed only partially. 73 * Undo the partial store in this case. 74 */ 75 #include <linux/mm.h> 76 #include <linux/module.h> 77 #include <linux/signal.h> 78 #include <linux/smp.h> 79 #include <linux/sched.h> 80 #include <linux/debugfs.h> 81 #include <asm/asm.h> 82 #include <asm/branch.h> 83 #include <asm/byteorder.h> 84 #include <asm/inst.h> 85 #include <asm/uaccess.h> 86 #include <asm/system.h> 87 88 #define STR(x) __STR(x) 89 #define __STR(x) #x 90 91 enum { 92 UNALIGNED_ACTION_QUIET, 93 UNALIGNED_ACTION_SIGNAL, 94 UNALIGNED_ACTION_SHOW, 95 }; 96 #ifdef CONFIG_DEBUG_FS 97 static u32 unaligned_instructions; 98 static u32 unaligned_action; 99 #else 100 #define unaligned_action UNALIGNED_ACTION_QUIET 101 #endif 102 extern void show_registers(struct pt_regs *regs); 103 104 static void emulate_load_store_insn(struct pt_regs *regs, 105 void __user *addr, unsigned int __user *pc) 106 { 107 union mips_instruction insn; 108 unsigned long value; 109 unsigned int res; 110 111 regs->regs[0] = 0; 112 113 /* 114 * This load never faults. 115 */ 116 __get_user(insn.word, pc); 117 118 switch (insn.i_format.opcode) { 119 /* 120 * These are instructions that a compiler doesn't generate. We 121 * can assume therefore that the code is MIPS-aware and 122 * really buggy. Emulating these instructions would break the 123 * semantics anyway. 124 */ 125 case ll_op: 126 case lld_op: 127 case sc_op: 128 case scd_op: 129 130 /* 131 * For these instructions the only way to create an address 132 * error is an attempted access to kernel/supervisor address 133 * space. 134 */ 135 case ldl_op: 136 case ldr_op: 137 case lwl_op: 138 case lwr_op: 139 case sdl_op: 140 case sdr_op: 141 case swl_op: 142 case swr_op: 143 case lb_op: 144 case lbu_op: 145 case sb_op: 146 goto sigbus; 147 148 /* 149 * The remaining opcodes are the ones that are really of interest. 150 */ 151 case lh_op: 152 if (!access_ok(VERIFY_READ, addr, 2)) 153 goto sigbus; 154 155 __asm__ __volatile__ (".set\tnoat\n" 156 #ifdef __BIG_ENDIAN 157 "1:\tlb\t%0, 0(%2)\n" 158 "2:\tlbu\t$1, 1(%2)\n\t" 159 #endif 160 #ifdef __LITTLE_ENDIAN 161 "1:\tlb\t%0, 1(%2)\n" 162 "2:\tlbu\t$1, 0(%2)\n\t" 163 #endif 164 "sll\t%0, 0x8\n\t" 165 "or\t%0, $1\n\t" 166 "li\t%1, 0\n" 167 "3:\t.set\tat\n\t" 168 ".section\t.fixup,\"ax\"\n\t" 169 "4:\tli\t%1, %3\n\t" 170 "j\t3b\n\t" 171 ".previous\n\t" 172 ".section\t__ex_table,\"a\"\n\t" 173 STR(PTR)"\t1b, 4b\n\t" 174 STR(PTR)"\t2b, 4b\n\t" 175 ".previous" 176 : "=&r" (value), "=r" (res) 177 : "r" (addr), "i" (-EFAULT)); 178 if (res) 179 goto fault; 180 compute_return_epc(regs); 181 regs->regs[insn.i_format.rt] = value; 182 break; 183 184 case lw_op: 185 if (!access_ok(VERIFY_READ, addr, 4)) 186 goto sigbus; 187 188 __asm__ __volatile__ ( 189 #ifdef __BIG_ENDIAN 190 "1:\tlwl\t%0, (%2)\n" 191 "2:\tlwr\t%0, 3(%2)\n\t" 192 #endif 193 #ifdef __LITTLE_ENDIAN 194 "1:\tlwl\t%0, 3(%2)\n" 195 "2:\tlwr\t%0, (%2)\n\t" 196 #endif 197 "li\t%1, 0\n" 198 "3:\t.section\t.fixup,\"ax\"\n\t" 199 "4:\tli\t%1, %3\n\t" 200 "j\t3b\n\t" 201 ".previous\n\t" 202 ".section\t__ex_table,\"a\"\n\t" 203 STR(PTR)"\t1b, 4b\n\t" 204 STR(PTR)"\t2b, 4b\n\t" 205 ".previous" 206 : "=&r" (value), "=r" (res) 207 : "r" (addr), "i" (-EFAULT)); 208 if (res) 209 goto fault; 210 compute_return_epc(regs); 211 regs->regs[insn.i_format.rt] = value; 212 break; 213 214 case lhu_op: 215 if (!access_ok(VERIFY_READ, addr, 2)) 216 goto sigbus; 217 218 __asm__ __volatile__ ( 219 ".set\tnoat\n" 220 #ifdef __BIG_ENDIAN 221 "1:\tlbu\t%0, 0(%2)\n" 222 "2:\tlbu\t$1, 1(%2)\n\t" 223 #endif 224 #ifdef __LITTLE_ENDIAN 225 "1:\tlbu\t%0, 1(%2)\n" 226 "2:\tlbu\t$1, 0(%2)\n\t" 227 #endif 228 "sll\t%0, 0x8\n\t" 229 "or\t%0, $1\n\t" 230 "li\t%1, 0\n" 231 "3:\t.set\tat\n\t" 232 ".section\t.fixup,\"ax\"\n\t" 233 "4:\tli\t%1, %3\n\t" 234 "j\t3b\n\t" 235 ".previous\n\t" 236 ".section\t__ex_table,\"a\"\n\t" 237 STR(PTR)"\t1b, 4b\n\t" 238 STR(PTR)"\t2b, 4b\n\t" 239 ".previous" 240 : "=&r" (value), "=r" (res) 241 : "r" (addr), "i" (-EFAULT)); 242 if (res) 243 goto fault; 244 compute_return_epc(regs); 245 regs->regs[insn.i_format.rt] = value; 246 break; 247 248 case lwu_op: 249 #ifdef CONFIG_64BIT 250 /* 251 * A 32-bit kernel might be running on a 64-bit processor. But 252 * if we're on a 32-bit processor and an i-cache incoherency 253 * or race makes us see a 64-bit instruction here the sdl/sdr 254 * would blow up, so for now we don't handle unaligned 64-bit 255 * instructions on 32-bit kernels. 256 */ 257 if (!access_ok(VERIFY_READ, addr, 4)) 258 goto sigbus; 259 260 __asm__ __volatile__ ( 261 #ifdef __BIG_ENDIAN 262 "1:\tlwl\t%0, (%2)\n" 263 "2:\tlwr\t%0, 3(%2)\n\t" 264 #endif 265 #ifdef __LITTLE_ENDIAN 266 "1:\tlwl\t%0, 3(%2)\n" 267 "2:\tlwr\t%0, (%2)\n\t" 268 #endif 269 "dsll\t%0, %0, 32\n\t" 270 "dsrl\t%0, %0, 32\n\t" 271 "li\t%1, 0\n" 272 "3:\t.section\t.fixup,\"ax\"\n\t" 273 "4:\tli\t%1, %3\n\t" 274 "j\t3b\n\t" 275 ".previous\n\t" 276 ".section\t__ex_table,\"a\"\n\t" 277 STR(PTR)"\t1b, 4b\n\t" 278 STR(PTR)"\t2b, 4b\n\t" 279 ".previous" 280 : "=&r" (value), "=r" (res) 281 : "r" (addr), "i" (-EFAULT)); 282 if (res) 283 goto fault; 284 compute_return_epc(regs); 285 regs->regs[insn.i_format.rt] = value; 286 break; 287 #endif /* CONFIG_64BIT */ 288 289 /* Cannot handle 64-bit instructions in 32-bit kernel */ 290 goto sigill; 291 292 case ld_op: 293 #ifdef CONFIG_64BIT 294 /* 295 * A 32-bit kernel might be running on a 64-bit processor. But 296 * if we're on a 32-bit processor and an i-cache incoherency 297 * or race makes us see a 64-bit instruction here the sdl/sdr 298 * would blow up, so for now we don't handle unaligned 64-bit 299 * instructions on 32-bit kernels. 300 */ 301 if (!access_ok(VERIFY_READ, addr, 8)) 302 goto sigbus; 303 304 __asm__ __volatile__ ( 305 #ifdef __BIG_ENDIAN 306 "1:\tldl\t%0, (%2)\n" 307 "2:\tldr\t%0, 7(%2)\n\t" 308 #endif 309 #ifdef __LITTLE_ENDIAN 310 "1:\tldl\t%0, 7(%2)\n" 311 "2:\tldr\t%0, (%2)\n\t" 312 #endif 313 "li\t%1, 0\n" 314 "3:\t.section\t.fixup,\"ax\"\n\t" 315 "4:\tli\t%1, %3\n\t" 316 "j\t3b\n\t" 317 ".previous\n\t" 318 ".section\t__ex_table,\"a\"\n\t" 319 STR(PTR)"\t1b, 4b\n\t" 320 STR(PTR)"\t2b, 4b\n\t" 321 ".previous" 322 : "=&r" (value), "=r" (res) 323 : "r" (addr), "i" (-EFAULT)); 324 if (res) 325 goto fault; 326 compute_return_epc(regs); 327 regs->regs[insn.i_format.rt] = value; 328 break; 329 #endif /* CONFIG_64BIT */ 330 331 /* Cannot handle 64-bit instructions in 32-bit kernel */ 332 goto sigill; 333 334 case sh_op: 335 if (!access_ok(VERIFY_WRITE, addr, 2)) 336 goto sigbus; 337 338 value = regs->regs[insn.i_format.rt]; 339 __asm__ __volatile__ ( 340 #ifdef __BIG_ENDIAN 341 ".set\tnoat\n" 342 "1:\tsb\t%1, 1(%2)\n\t" 343 "srl\t$1, %1, 0x8\n" 344 "2:\tsb\t$1, 0(%2)\n\t" 345 ".set\tat\n\t" 346 #endif 347 #ifdef __LITTLE_ENDIAN 348 ".set\tnoat\n" 349 "1:\tsb\t%1, 0(%2)\n\t" 350 "srl\t$1,%1, 0x8\n" 351 "2:\tsb\t$1, 1(%2)\n\t" 352 ".set\tat\n\t" 353 #endif 354 "li\t%0, 0\n" 355 "3:\n\t" 356 ".section\t.fixup,\"ax\"\n\t" 357 "4:\tli\t%0, %3\n\t" 358 "j\t3b\n\t" 359 ".previous\n\t" 360 ".section\t__ex_table,\"a\"\n\t" 361 STR(PTR)"\t1b, 4b\n\t" 362 STR(PTR)"\t2b, 4b\n\t" 363 ".previous" 364 : "=r" (res) 365 : "r" (value), "r" (addr), "i" (-EFAULT)); 366 if (res) 367 goto fault; 368 compute_return_epc(regs); 369 break; 370 371 case sw_op: 372 if (!access_ok(VERIFY_WRITE, addr, 4)) 373 goto sigbus; 374 375 value = regs->regs[insn.i_format.rt]; 376 __asm__ __volatile__ ( 377 #ifdef __BIG_ENDIAN 378 "1:\tswl\t%1,(%2)\n" 379 "2:\tswr\t%1, 3(%2)\n\t" 380 #endif 381 #ifdef __LITTLE_ENDIAN 382 "1:\tswl\t%1, 3(%2)\n" 383 "2:\tswr\t%1, (%2)\n\t" 384 #endif 385 "li\t%0, 0\n" 386 "3:\n\t" 387 ".section\t.fixup,\"ax\"\n\t" 388 "4:\tli\t%0, %3\n\t" 389 "j\t3b\n\t" 390 ".previous\n\t" 391 ".section\t__ex_table,\"a\"\n\t" 392 STR(PTR)"\t1b, 4b\n\t" 393 STR(PTR)"\t2b, 4b\n\t" 394 ".previous" 395 : "=r" (res) 396 : "r" (value), "r" (addr), "i" (-EFAULT)); 397 if (res) 398 goto fault; 399 compute_return_epc(regs); 400 break; 401 402 case sd_op: 403 #ifdef CONFIG_64BIT 404 /* 405 * A 32-bit kernel might be running on a 64-bit processor. But 406 * if we're on a 32-bit processor and an i-cache incoherency 407 * or race makes us see a 64-bit instruction here the sdl/sdr 408 * would blow up, so for now we don't handle unaligned 64-bit 409 * instructions on 32-bit kernels. 410 */ 411 if (!access_ok(VERIFY_WRITE, addr, 8)) 412 goto sigbus; 413 414 value = regs->regs[insn.i_format.rt]; 415 __asm__ __volatile__ ( 416 #ifdef __BIG_ENDIAN 417 "1:\tsdl\t%1,(%2)\n" 418 "2:\tsdr\t%1, 7(%2)\n\t" 419 #endif 420 #ifdef __LITTLE_ENDIAN 421 "1:\tsdl\t%1, 7(%2)\n" 422 "2:\tsdr\t%1, (%2)\n\t" 423 #endif 424 "li\t%0, 0\n" 425 "3:\n\t" 426 ".section\t.fixup,\"ax\"\n\t" 427 "4:\tli\t%0, %3\n\t" 428 "j\t3b\n\t" 429 ".previous\n\t" 430 ".section\t__ex_table,\"a\"\n\t" 431 STR(PTR)"\t1b, 4b\n\t" 432 STR(PTR)"\t2b, 4b\n\t" 433 ".previous" 434 : "=r" (res) 435 : "r" (value), "r" (addr), "i" (-EFAULT)); 436 if (res) 437 goto fault; 438 compute_return_epc(regs); 439 break; 440 #endif /* CONFIG_64BIT */ 441 442 /* Cannot handle 64-bit instructions in 32-bit kernel */ 443 goto sigill; 444 445 case lwc1_op: 446 case ldc1_op: 447 case swc1_op: 448 case sdc1_op: 449 /* 450 * I herewith declare: this does not happen. So send SIGBUS. 451 */ 452 goto sigbus; 453 454 case lwc2_op: 455 case ldc2_op: 456 case swc2_op: 457 case sdc2_op: 458 /* 459 * These are the coprocessor 2 load/stores. The current 460 * implementations don't use cp2 and cp2 should always be 461 * disabled in c0_status. So send SIGILL. 462 * (No longer true: The Sony Praystation uses cp2 for 463 * 3D matrix operations. Dunno if that thingy has a MMU ...) 464 */ 465 default: 466 /* 467 * Pheeee... We encountered an yet unknown instruction or 468 * cache coherence problem. Die sucker, die ... 469 */ 470 goto sigill; 471 } 472 473 #ifdef CONFIG_DEBUG_FS 474 unaligned_instructions++; 475 #endif 476 477 return; 478 479 fault: 480 /* Did we have an exception handler installed? */ 481 if (fixup_exception(regs)) 482 return; 483 484 die_if_kernel("Unhandled kernel unaligned access", regs); 485 send_sig(SIGSEGV, current, 1); 486 487 return; 488 489 sigbus: 490 die_if_kernel("Unhandled kernel unaligned access", regs); 491 send_sig(SIGBUS, current, 1); 492 493 return; 494 495 sigill: 496 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); 497 send_sig(SIGILL, current, 1); 498 } 499 500 asmlinkage void do_ade(struct pt_regs *regs) 501 { 502 extern int do_dsemulret(struct pt_regs *); 503 unsigned int __user *pc; 504 mm_segment_t seg; 505 506 /* 507 * Address errors may be deliberately induced by the FPU emulator to 508 * retake control of the CPU after executing the instruction in the 509 * delay slot of an emulated branch. 510 */ 511 /* Terminate if exception was recognized as a delay slot return */ 512 if (do_dsemulret(regs)) 513 return; 514 515 /* Otherwise handle as normal */ 516 517 /* 518 * Did we catch a fault trying to load an instruction? 519 * Or are we running in MIPS16 mode? 520 */ 521 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) 522 goto sigbus; 523 524 pc = (unsigned int __user *) exception_epc(regs); 525 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 526 goto sigbus; 527 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 528 goto sigbus; 529 else if (unaligned_action == UNALIGNED_ACTION_SHOW) 530 show_registers(regs); 531 532 /* 533 * Do branch emulation only if we didn't forward the exception. 534 * This is all so but ugly ... 535 */ 536 seg = get_fs(); 537 if (!user_mode(regs)) 538 set_fs(KERNEL_DS); 539 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 540 set_fs(seg); 541 542 return; 543 544 sigbus: 545 die_if_kernel("Kernel unaligned instruction access", regs); 546 force_sig(SIGBUS, current); 547 548 /* 549 * XXX On return from the signal handler we should advance the epc 550 */ 551 } 552 553 #ifdef CONFIG_DEBUG_FS 554 extern struct dentry *mips_debugfs_dir; 555 static int __init debugfs_unaligned(void) 556 { 557 struct dentry *d; 558 559 if (!mips_debugfs_dir) 560 return -ENODEV; 561 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 562 mips_debugfs_dir, &unaligned_instructions); 563 if (IS_ERR(d)) 564 return PTR_ERR(d); 565 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 566 mips_debugfs_dir, &unaligned_action); 567 if (IS_ERR(d)) 568 return PTR_ERR(d); 569 return 0; 570 } 571 __initcall(debugfs_unaligned); 572 #endif 573