1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * 11 * This file contains exception handler for address error exception with the 12 * special capability to execute faulting instructions in software. The 13 * handler does not try to handle the case when the program counter points 14 * to an address not aligned to a word boundary. 15 * 16 * Putting data to unaligned addresses is a bad practice even on Intel where 17 * only the performance is affected. Much worse is that such code is non- 18 * portable. Due to several programs that die on MIPS due to alignment 19 * problems I decided to implement this handler anyway though I originally 20 * didn't intend to do this at all for user code. 21 * 22 * For now I enable fixing of address errors by default to make life easier. 23 * I however intend to disable this somewhen in the future when the alignment 24 * problems with user programs have been fixed. For programmers this is the 25 * right way to go. 26 * 27 * Fixing address errors is a per process option. The option is inherited 28 * across fork(2) and execve(2) calls. If you really want to use the 29 * option in your user programs - I discourage the use of the software 30 * emulation strongly - use the following code in your userland stuff: 31 * 32 * #include <sys/sysmips.h> 33 * 34 * ... 35 * sysmips(MIPS_FIXADE, x); 36 * ... 37 * 38 * The argument x is 0 for disabling software emulation, enabled otherwise. 39 * 40 * Below a little program to play around with this feature. 41 * 42 * #include <stdio.h> 43 * #include <sys/sysmips.h> 44 * 45 * struct foo { 46 * unsigned char bar[8]; 47 * }; 48 * 49 * main(int argc, char *argv[]) 50 * { 51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 52 * unsigned int *p = (unsigned int *) (x.bar + 3); 53 * int i; 54 * 55 * if (argc > 1) 56 * sysmips(MIPS_FIXADE, atoi(argv[1])); 57 * 58 * printf("*p = %08lx\n", *p); 59 * 60 * *p = 0xdeadface; 61 * 62 * for(i = 0; i <= 7; i++) 63 * printf("%02x ", x.bar[i]); 64 * printf("\n"); 65 * } 66 * 67 * Coprocessor loads are not supported; I think this case is unimportant 68 * in the practice. 69 * 70 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 71 * exception for the R6000. 72 * A store crossing a page boundary might be executed only partially. 73 * Undo the partial store in this case. 74 */ 75 #include <linux/config.h> 76 #include <linux/mm.h> 77 #include <linux/module.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/smp_lock.h> 81 82 #include <asm/asm.h> 83 #include <asm/branch.h> 84 #include <asm/byteorder.h> 85 #include <asm/inst.h> 86 #include <asm/uaccess.h> 87 #include <asm/system.h> 88 89 #define STR(x) __STR(x) 90 #define __STR(x) #x 91 92 #ifdef CONFIG_PROC_FS 93 unsigned long unaligned_instructions; 94 #endif 95 96 static inline int emulate_load_store_insn(struct pt_regs *regs, 97 void __user *addr, unsigned int __user *pc, 98 unsigned long **regptr, unsigned long *newvalue) 99 { 100 union mips_instruction insn; 101 unsigned long value; 102 unsigned int res; 103 104 regs->regs[0] = 0; 105 *regptr=NULL; 106 107 /* 108 * This load never faults. 109 */ 110 __get_user(insn.word, pc); 111 112 switch (insn.i_format.opcode) { 113 /* 114 * These are instructions that a compiler doesn't generate. We 115 * can assume therefore that the code is MIPS-aware and 116 * really buggy. Emulating these instructions would break the 117 * semantics anyway. 118 */ 119 case ll_op: 120 case lld_op: 121 case sc_op: 122 case scd_op: 123 124 /* 125 * For these instructions the only way to create an address 126 * error is an attempted access to kernel/supervisor address 127 * space. 128 */ 129 case ldl_op: 130 case ldr_op: 131 case lwl_op: 132 case lwr_op: 133 case sdl_op: 134 case sdr_op: 135 case swl_op: 136 case swr_op: 137 case lb_op: 138 case lbu_op: 139 case sb_op: 140 goto sigbus; 141 142 /* 143 * The remaining opcodes are the ones that are really of interest. 144 */ 145 case lh_op: 146 if (!access_ok(VERIFY_READ, addr, 2)) 147 goto sigbus; 148 149 __asm__ __volatile__ (".set\tnoat\n" 150 #ifdef __BIG_ENDIAN 151 "1:\tlb\t%0, 0(%2)\n" 152 "2:\tlbu\t$1, 1(%2)\n\t" 153 #endif 154 #ifdef __LITTLE_ENDIAN 155 "1:\tlb\t%0, 1(%2)\n" 156 "2:\tlbu\t$1, 0(%2)\n\t" 157 #endif 158 "sll\t%0, 0x8\n\t" 159 "or\t%0, $1\n\t" 160 "li\t%1, 0\n" 161 "3:\t.set\tat\n\t" 162 ".section\t.fixup,\"ax\"\n\t" 163 "4:\tli\t%1, %3\n\t" 164 "j\t3b\n\t" 165 ".previous\n\t" 166 ".section\t__ex_table,\"a\"\n\t" 167 STR(PTR)"\t1b, 4b\n\t" 168 STR(PTR)"\t2b, 4b\n\t" 169 ".previous" 170 : "=&r" (value), "=r" (res) 171 : "r" (addr), "i" (-EFAULT)); 172 if (res) 173 goto fault; 174 *newvalue = value; 175 *regptr = ®s->regs[insn.i_format.rt]; 176 break; 177 178 case lw_op: 179 if (!access_ok(VERIFY_READ, addr, 4)) 180 goto sigbus; 181 182 __asm__ __volatile__ ( 183 #ifdef __BIG_ENDIAN 184 "1:\tlwl\t%0, (%2)\n" 185 "2:\tlwr\t%0, 3(%2)\n\t" 186 #endif 187 #ifdef __LITTLE_ENDIAN 188 "1:\tlwl\t%0, 3(%2)\n" 189 "2:\tlwr\t%0, (%2)\n\t" 190 #endif 191 "li\t%1, 0\n" 192 "3:\t.section\t.fixup,\"ax\"\n\t" 193 "4:\tli\t%1, %3\n\t" 194 "j\t3b\n\t" 195 ".previous\n\t" 196 ".section\t__ex_table,\"a\"\n\t" 197 STR(PTR)"\t1b, 4b\n\t" 198 STR(PTR)"\t2b, 4b\n\t" 199 ".previous" 200 : "=&r" (value), "=r" (res) 201 : "r" (addr), "i" (-EFAULT)); 202 if (res) 203 goto fault; 204 *newvalue = value; 205 *regptr = ®s->regs[insn.i_format.rt]; 206 break; 207 208 case lhu_op: 209 if (!access_ok(VERIFY_READ, addr, 2)) 210 goto sigbus; 211 212 __asm__ __volatile__ ( 213 ".set\tnoat\n" 214 #ifdef __BIG_ENDIAN 215 "1:\tlbu\t%0, 0(%2)\n" 216 "2:\tlbu\t$1, 1(%2)\n\t" 217 #endif 218 #ifdef __LITTLE_ENDIAN 219 "1:\tlbu\t%0, 1(%2)\n" 220 "2:\tlbu\t$1, 0(%2)\n\t" 221 #endif 222 "sll\t%0, 0x8\n\t" 223 "or\t%0, $1\n\t" 224 "li\t%1, 0\n" 225 "3:\t.set\tat\n\t" 226 ".section\t.fixup,\"ax\"\n\t" 227 "4:\tli\t%1, %3\n\t" 228 "j\t3b\n\t" 229 ".previous\n\t" 230 ".section\t__ex_table,\"a\"\n\t" 231 STR(PTR)"\t1b, 4b\n\t" 232 STR(PTR)"\t2b, 4b\n\t" 233 ".previous" 234 : "=&r" (value), "=r" (res) 235 : "r" (addr), "i" (-EFAULT)); 236 if (res) 237 goto fault; 238 *newvalue = value; 239 *regptr = ®s->regs[insn.i_format.rt]; 240 break; 241 242 case lwu_op: 243 #ifdef CONFIG_64BIT 244 /* 245 * A 32-bit kernel might be running on a 64-bit processor. But 246 * if we're on a 32-bit processor and an i-cache incoherency 247 * or race makes us see a 64-bit instruction here the sdl/sdr 248 * would blow up, so for now we don't handle unaligned 64-bit 249 * instructions on 32-bit kernels. 250 */ 251 if (!access_ok(VERIFY_READ, addr, 4)) 252 goto sigbus; 253 254 __asm__ __volatile__ ( 255 #ifdef __BIG_ENDIAN 256 "1:\tlwl\t%0, (%2)\n" 257 "2:\tlwr\t%0, 3(%2)\n\t" 258 #endif 259 #ifdef __LITTLE_ENDIAN 260 "1:\tlwl\t%0, 3(%2)\n" 261 "2:\tlwr\t%0, (%2)\n\t" 262 #endif 263 "dsll\t%0, %0, 32\n\t" 264 "dsrl\t%0, %0, 32\n\t" 265 "li\t%1, 0\n" 266 "3:\t.section\t.fixup,\"ax\"\n\t" 267 "4:\tli\t%1, %3\n\t" 268 "j\t3b\n\t" 269 ".previous\n\t" 270 ".section\t__ex_table,\"a\"\n\t" 271 STR(PTR)"\t1b, 4b\n\t" 272 STR(PTR)"\t2b, 4b\n\t" 273 ".previous" 274 : "=&r" (value), "=r" (res) 275 : "r" (addr), "i" (-EFAULT)); 276 if (res) 277 goto fault; 278 *newvalue = value; 279 *regptr = ®s->regs[insn.i_format.rt]; 280 break; 281 #endif /* CONFIG_64BIT */ 282 283 /* Cannot handle 64-bit instructions in 32-bit kernel */ 284 goto sigill; 285 286 case ld_op: 287 #ifdef CONFIG_64BIT 288 /* 289 * A 32-bit kernel might be running on a 64-bit processor. But 290 * if we're on a 32-bit processor and an i-cache incoherency 291 * or race makes us see a 64-bit instruction here the sdl/sdr 292 * would blow up, so for now we don't handle unaligned 64-bit 293 * instructions on 32-bit kernels. 294 */ 295 if (!access_ok(VERIFY_READ, addr, 8)) 296 goto sigbus; 297 298 __asm__ __volatile__ ( 299 #ifdef __BIG_ENDIAN 300 "1:\tldl\t%0, (%2)\n" 301 "2:\tldr\t%0, 7(%2)\n\t" 302 #endif 303 #ifdef __LITTLE_ENDIAN 304 "1:\tldl\t%0, 7(%2)\n" 305 "2:\tldr\t%0, (%2)\n\t" 306 #endif 307 "li\t%1, 0\n" 308 "3:\t.section\t.fixup,\"ax\"\n\t" 309 "4:\tli\t%1, %3\n\t" 310 "j\t3b\n\t" 311 ".previous\n\t" 312 ".section\t__ex_table,\"a\"\n\t" 313 STR(PTR)"\t1b, 4b\n\t" 314 STR(PTR)"\t2b, 4b\n\t" 315 ".previous" 316 : "=&r" (value), "=r" (res) 317 : "r" (addr), "i" (-EFAULT)); 318 if (res) 319 goto fault; 320 *newvalue = value; 321 *regptr = ®s->regs[insn.i_format.rt]; 322 break; 323 #endif /* CONFIG_64BIT */ 324 325 /* Cannot handle 64-bit instructions in 32-bit kernel */ 326 goto sigill; 327 328 case sh_op: 329 if (!access_ok(VERIFY_WRITE, addr, 2)) 330 goto sigbus; 331 332 value = regs->regs[insn.i_format.rt]; 333 __asm__ __volatile__ ( 334 #ifdef __BIG_ENDIAN 335 ".set\tnoat\n" 336 "1:\tsb\t%1, 1(%2)\n\t" 337 "srl\t$1, %1, 0x8\n" 338 "2:\tsb\t$1, 0(%2)\n\t" 339 ".set\tat\n\t" 340 #endif 341 #ifdef __LITTLE_ENDIAN 342 ".set\tnoat\n" 343 "1:\tsb\t%1, 0(%2)\n\t" 344 "srl\t$1,%1, 0x8\n" 345 "2:\tsb\t$1, 1(%2)\n\t" 346 ".set\tat\n\t" 347 #endif 348 "li\t%0, 0\n" 349 "3:\n\t" 350 ".section\t.fixup,\"ax\"\n\t" 351 "4:\tli\t%0, %3\n\t" 352 "j\t3b\n\t" 353 ".previous\n\t" 354 ".section\t__ex_table,\"a\"\n\t" 355 STR(PTR)"\t1b, 4b\n\t" 356 STR(PTR)"\t2b, 4b\n\t" 357 ".previous" 358 : "=r" (res) 359 : "r" (value), "r" (addr), "i" (-EFAULT)); 360 if (res) 361 goto fault; 362 break; 363 364 case sw_op: 365 if (!access_ok(VERIFY_WRITE, addr, 4)) 366 goto sigbus; 367 368 value = regs->regs[insn.i_format.rt]; 369 __asm__ __volatile__ ( 370 #ifdef __BIG_ENDIAN 371 "1:\tswl\t%1,(%2)\n" 372 "2:\tswr\t%1, 3(%2)\n\t" 373 #endif 374 #ifdef __LITTLE_ENDIAN 375 "1:\tswl\t%1, 3(%2)\n" 376 "2:\tswr\t%1, (%2)\n\t" 377 #endif 378 "li\t%0, 0\n" 379 "3:\n\t" 380 ".section\t.fixup,\"ax\"\n\t" 381 "4:\tli\t%0, %3\n\t" 382 "j\t3b\n\t" 383 ".previous\n\t" 384 ".section\t__ex_table,\"a\"\n\t" 385 STR(PTR)"\t1b, 4b\n\t" 386 STR(PTR)"\t2b, 4b\n\t" 387 ".previous" 388 : "=r" (res) 389 : "r" (value), "r" (addr), "i" (-EFAULT)); 390 if (res) 391 goto fault; 392 break; 393 394 case sd_op: 395 #ifdef CONFIG_64BIT 396 /* 397 * A 32-bit kernel might be running on a 64-bit processor. But 398 * if we're on a 32-bit processor and an i-cache incoherency 399 * or race makes us see a 64-bit instruction here the sdl/sdr 400 * would blow up, so for now we don't handle unaligned 64-bit 401 * instructions on 32-bit kernels. 402 */ 403 if (!access_ok(VERIFY_WRITE, addr, 8)) 404 goto sigbus; 405 406 value = regs->regs[insn.i_format.rt]; 407 __asm__ __volatile__ ( 408 #ifdef __BIG_ENDIAN 409 "1:\tsdl\t%1,(%2)\n" 410 "2:\tsdr\t%1, 7(%2)\n\t" 411 #endif 412 #ifdef __LITTLE_ENDIAN 413 "1:\tsdl\t%1, 7(%2)\n" 414 "2:\tsdr\t%1, (%2)\n\t" 415 #endif 416 "li\t%0, 0\n" 417 "3:\n\t" 418 ".section\t.fixup,\"ax\"\n\t" 419 "4:\tli\t%0, %3\n\t" 420 "j\t3b\n\t" 421 ".previous\n\t" 422 ".section\t__ex_table,\"a\"\n\t" 423 STR(PTR)"\t1b, 4b\n\t" 424 STR(PTR)"\t2b, 4b\n\t" 425 ".previous" 426 : "=r" (res) 427 : "r" (value), "r" (addr), "i" (-EFAULT)); 428 if (res) 429 goto fault; 430 break; 431 #endif /* CONFIG_64BIT */ 432 433 /* Cannot handle 64-bit instructions in 32-bit kernel */ 434 goto sigill; 435 436 case lwc1_op: 437 case ldc1_op: 438 case swc1_op: 439 case sdc1_op: 440 /* 441 * I herewith declare: this does not happen. So send SIGBUS. 442 */ 443 goto sigbus; 444 445 case lwc2_op: 446 case ldc2_op: 447 case swc2_op: 448 case sdc2_op: 449 /* 450 * These are the coprocessor 2 load/stores. The current 451 * implementations don't use cp2 and cp2 should always be 452 * disabled in c0_status. So send SIGILL. 453 * (No longer true: The Sony Praystation uses cp2 for 454 * 3D matrix operations. Dunno if that thingy has a MMU ...) 455 */ 456 default: 457 /* 458 * Pheeee... We encountered an yet unknown instruction or 459 * cache coherence problem. Die sucker, die ... 460 */ 461 goto sigill; 462 } 463 464 #ifdef CONFIG_PROC_FS 465 unaligned_instructions++; 466 #endif 467 468 return 0; 469 470 fault: 471 /* Did we have an exception handler installed? */ 472 if (fixup_exception(regs)) 473 return 1; 474 475 die_if_kernel ("Unhandled kernel unaligned access", regs); 476 send_sig(SIGSEGV, current, 1); 477 478 return 0; 479 480 sigbus: 481 die_if_kernel("Unhandled kernel unaligned access", regs); 482 send_sig(SIGBUS, current, 1); 483 484 return 0; 485 486 sigill: 487 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); 488 send_sig(SIGILL, current, 1); 489 490 return 0; 491 } 492 493 asmlinkage void do_ade(struct pt_regs *regs) 494 { 495 unsigned long *regptr, newval; 496 extern int do_dsemulret(struct pt_regs *); 497 unsigned int __user *pc; 498 mm_segment_t seg; 499 500 /* 501 * Address errors may be deliberately induced by the FPU emulator to 502 * retake control of the CPU after executing the instruction in the 503 * delay slot of an emulated branch. 504 */ 505 /* Terminate if exception was recognized as a delay slot return */ 506 if (do_dsemulret(regs)) 507 return; 508 509 /* Otherwise handle as normal */ 510 511 /* 512 * Did we catch a fault trying to load an instruction? 513 * Or are we running in MIPS16 mode? 514 */ 515 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) 516 goto sigbus; 517 518 pc = (unsigned int __user *) exception_epc(regs); 519 if ((current->thread.mflags & MF_FIXADE) == 0) 520 goto sigbus; 521 522 /* 523 * Do branch emulation only if we didn't forward the exception. 524 * This is all so but ugly ... 525 */ 526 seg = get_fs(); 527 if (!user_mode(regs)) 528 set_fs(KERNEL_DS); 529 if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc, 530 ®ptr, &newval)) { 531 compute_return_epc(regs); 532 /* 533 * Now that branch is evaluated, update the dest 534 * register if necessary 535 */ 536 if (regptr) 537 *regptr = newval; 538 } 539 set_fs(seg); 540 541 return; 542 543 sigbus: 544 die_if_kernel("Kernel unaligned instruction access", regs); 545 force_sig(SIGBUS, current); 546 547 /* 548 * XXX On return from the signal handler we should advance the epc 549 */ 550 } 551