1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/debug.h> 89 #include <asm/fpu.h> 90 #include <asm/fpu_emulator.h> 91 #include <asm/inst.h> 92 #include <linux/uaccess.h> 93 94 #define STR(x) __STR(x) 95 #define __STR(x) #x 96 97 enum { 98 UNALIGNED_ACTION_QUIET, 99 UNALIGNED_ACTION_SIGNAL, 100 UNALIGNED_ACTION_SHOW, 101 }; 102 #ifdef CONFIG_DEBUG_FS 103 static u32 unaligned_instructions; 104 static u32 unaligned_action; 105 #else 106 #define unaligned_action UNALIGNED_ACTION_QUIET 107 #endif 108 extern void show_registers(struct pt_regs *regs); 109 110 #ifdef __BIG_ENDIAN 111 #define _LoadHW(addr, value, res, type) \ 112 do { \ 113 __asm__ __volatile__ (".set\tnoat\n" \ 114 "1:\t"type##_lb("%0", "0(%2)")"\n" \ 115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 116 "sll\t%0, 0x8\n\t" \ 117 "or\t%0, $1\n\t" \ 118 "li\t%1, 0\n" \ 119 "3:\t.set\tat\n\t" \ 120 ".insn\n\t" \ 121 ".section\t.fixup,\"ax\"\n\t" \ 122 "4:\tli\t%1, %3\n\t" \ 123 "j\t3b\n\t" \ 124 ".previous\n\t" \ 125 ".section\t__ex_table,\"a\"\n\t" \ 126 STR(PTR)"\t1b, 4b\n\t" \ 127 STR(PTR)"\t2b, 4b\n\t" \ 128 ".previous" \ 129 : "=&r" (value), "=r" (res) \ 130 : "r" (addr), "i" (-EFAULT)); \ 131 } while(0) 132 133 #ifndef CONFIG_CPU_MIPSR6 134 #define _LoadW(addr, value, res, type) \ 135 do { \ 136 __asm__ __volatile__ ( \ 137 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 138 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 139 "li\t%1, 0\n" \ 140 "3:\n\t" \ 141 ".insn\n\t" \ 142 ".section\t.fixup,\"ax\"\n\t" \ 143 "4:\tli\t%1, %3\n\t" \ 144 "j\t3b\n\t" \ 145 ".previous\n\t" \ 146 ".section\t__ex_table,\"a\"\n\t" \ 147 STR(PTR)"\t1b, 4b\n\t" \ 148 STR(PTR)"\t2b, 4b\n\t" \ 149 ".previous" \ 150 : "=&r" (value), "=r" (res) \ 151 : "r" (addr), "i" (-EFAULT)); \ 152 } while(0) 153 154 #else 155 /* MIPSR6 has no lwl instruction */ 156 #define _LoadW(addr, value, res, type) \ 157 do { \ 158 __asm__ __volatile__ ( \ 159 ".set\tpush\n" \ 160 ".set\tnoat\n\t" \ 161 "1:"type##_lb("%0", "0(%2)")"\n\t" \ 162 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 163 "sll\t%0, 0x8\n\t" \ 164 "or\t%0, $1\n\t" \ 165 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 166 "sll\t%0, 0x8\n\t" \ 167 "or\t%0, $1\n\t" \ 168 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 169 "sll\t%0, 0x8\n\t" \ 170 "or\t%0, $1\n\t" \ 171 "li\t%1, 0\n" \ 172 ".set\tpop\n" \ 173 "10:\n\t" \ 174 ".insn\n\t" \ 175 ".section\t.fixup,\"ax\"\n\t" \ 176 "11:\tli\t%1, %3\n\t" \ 177 "j\t10b\n\t" \ 178 ".previous\n\t" \ 179 ".section\t__ex_table,\"a\"\n\t" \ 180 STR(PTR)"\t1b, 11b\n\t" \ 181 STR(PTR)"\t2b, 11b\n\t" \ 182 STR(PTR)"\t3b, 11b\n\t" \ 183 STR(PTR)"\t4b, 11b\n\t" \ 184 ".previous" \ 185 : "=&r" (value), "=r" (res) \ 186 : "r" (addr), "i" (-EFAULT)); \ 187 } while(0) 188 189 #endif /* CONFIG_CPU_MIPSR6 */ 190 191 #define _LoadHWU(addr, value, res, type) \ 192 do { \ 193 __asm__ __volatile__ ( \ 194 ".set\tnoat\n" \ 195 "1:\t"type##_lbu("%0", "0(%2)")"\n" \ 196 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 197 "sll\t%0, 0x8\n\t" \ 198 "or\t%0, $1\n\t" \ 199 "li\t%1, 0\n" \ 200 "3:\n\t" \ 201 ".insn\n\t" \ 202 ".set\tat\n\t" \ 203 ".section\t.fixup,\"ax\"\n\t" \ 204 "4:\tli\t%1, %3\n\t" \ 205 "j\t3b\n\t" \ 206 ".previous\n\t" \ 207 ".section\t__ex_table,\"a\"\n\t" \ 208 STR(PTR)"\t1b, 4b\n\t" \ 209 STR(PTR)"\t2b, 4b\n\t" \ 210 ".previous" \ 211 : "=&r" (value), "=r" (res) \ 212 : "r" (addr), "i" (-EFAULT)); \ 213 } while(0) 214 215 #ifndef CONFIG_CPU_MIPSR6 216 #define _LoadWU(addr, value, res, type) \ 217 do { \ 218 __asm__ __volatile__ ( \ 219 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 220 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 221 "dsll\t%0, %0, 32\n\t" \ 222 "dsrl\t%0, %0, 32\n\t" \ 223 "li\t%1, 0\n" \ 224 "3:\n\t" \ 225 ".insn\n\t" \ 226 "\t.section\t.fixup,\"ax\"\n\t" \ 227 "4:\tli\t%1, %3\n\t" \ 228 "j\t3b\n\t" \ 229 ".previous\n\t" \ 230 ".section\t__ex_table,\"a\"\n\t" \ 231 STR(PTR)"\t1b, 4b\n\t" \ 232 STR(PTR)"\t2b, 4b\n\t" \ 233 ".previous" \ 234 : "=&r" (value), "=r" (res) \ 235 : "r" (addr), "i" (-EFAULT)); \ 236 } while(0) 237 238 #define _LoadDW(addr, value, res) \ 239 do { \ 240 __asm__ __volatile__ ( \ 241 "1:\tldl\t%0, (%2)\n" \ 242 "2:\tldr\t%0, 7(%2)\n\t" \ 243 "li\t%1, 0\n" \ 244 "3:\n\t" \ 245 ".insn\n\t" \ 246 "\t.section\t.fixup,\"ax\"\n\t" \ 247 "4:\tli\t%1, %3\n\t" \ 248 "j\t3b\n\t" \ 249 ".previous\n\t" \ 250 ".section\t__ex_table,\"a\"\n\t" \ 251 STR(PTR)"\t1b, 4b\n\t" \ 252 STR(PTR)"\t2b, 4b\n\t" \ 253 ".previous" \ 254 : "=&r" (value), "=r" (res) \ 255 : "r" (addr), "i" (-EFAULT)); \ 256 } while(0) 257 258 #else 259 /* MIPSR6 has not lwl and ldl instructions */ 260 #define _LoadWU(addr, value, res, type) \ 261 do { \ 262 __asm__ __volatile__ ( \ 263 ".set\tpush\n\t" \ 264 ".set\tnoat\n\t" \ 265 "1:"type##_lbu("%0", "0(%2)")"\n\t" \ 266 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 267 "sll\t%0, 0x8\n\t" \ 268 "or\t%0, $1\n\t" \ 269 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 270 "sll\t%0, 0x8\n\t" \ 271 "or\t%0, $1\n\t" \ 272 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 273 "sll\t%0, 0x8\n\t" \ 274 "or\t%0, $1\n\t" \ 275 "li\t%1, 0\n" \ 276 ".set\tpop\n" \ 277 "10:\n\t" \ 278 ".insn\n\t" \ 279 ".section\t.fixup,\"ax\"\n\t" \ 280 "11:\tli\t%1, %3\n\t" \ 281 "j\t10b\n\t" \ 282 ".previous\n\t" \ 283 ".section\t__ex_table,\"a\"\n\t" \ 284 STR(PTR)"\t1b, 11b\n\t" \ 285 STR(PTR)"\t2b, 11b\n\t" \ 286 STR(PTR)"\t3b, 11b\n\t" \ 287 STR(PTR)"\t4b, 11b\n\t" \ 288 ".previous" \ 289 : "=&r" (value), "=r" (res) \ 290 : "r" (addr), "i" (-EFAULT)); \ 291 } while(0) 292 293 #define _LoadDW(addr, value, res) \ 294 do { \ 295 __asm__ __volatile__ ( \ 296 ".set\tpush\n\t" \ 297 ".set\tnoat\n\t" \ 298 "1:lb\t%0, 0(%2)\n\t" \ 299 "2:lbu\t $1, 1(%2)\n\t" \ 300 "dsll\t%0, 0x8\n\t" \ 301 "or\t%0, $1\n\t" \ 302 "3:lbu\t$1, 2(%2)\n\t" \ 303 "dsll\t%0, 0x8\n\t" \ 304 "or\t%0, $1\n\t" \ 305 "4:lbu\t$1, 3(%2)\n\t" \ 306 "dsll\t%0, 0x8\n\t" \ 307 "or\t%0, $1\n\t" \ 308 "5:lbu\t$1, 4(%2)\n\t" \ 309 "dsll\t%0, 0x8\n\t" \ 310 "or\t%0, $1\n\t" \ 311 "6:lbu\t$1, 5(%2)\n\t" \ 312 "dsll\t%0, 0x8\n\t" \ 313 "or\t%0, $1\n\t" \ 314 "7:lbu\t$1, 6(%2)\n\t" \ 315 "dsll\t%0, 0x8\n\t" \ 316 "or\t%0, $1\n\t" \ 317 "8:lbu\t$1, 7(%2)\n\t" \ 318 "dsll\t%0, 0x8\n\t" \ 319 "or\t%0, $1\n\t" \ 320 "li\t%1, 0\n" \ 321 ".set\tpop\n\t" \ 322 "10:\n\t" \ 323 ".insn\n\t" \ 324 ".section\t.fixup,\"ax\"\n\t" \ 325 "11:\tli\t%1, %3\n\t" \ 326 "j\t10b\n\t" \ 327 ".previous\n\t" \ 328 ".section\t__ex_table,\"a\"\n\t" \ 329 STR(PTR)"\t1b, 11b\n\t" \ 330 STR(PTR)"\t2b, 11b\n\t" \ 331 STR(PTR)"\t3b, 11b\n\t" \ 332 STR(PTR)"\t4b, 11b\n\t" \ 333 STR(PTR)"\t5b, 11b\n\t" \ 334 STR(PTR)"\t6b, 11b\n\t" \ 335 STR(PTR)"\t7b, 11b\n\t" \ 336 STR(PTR)"\t8b, 11b\n\t" \ 337 ".previous" \ 338 : "=&r" (value), "=r" (res) \ 339 : "r" (addr), "i" (-EFAULT)); \ 340 } while(0) 341 342 #endif /* CONFIG_CPU_MIPSR6 */ 343 344 345 #define _StoreHW(addr, value, res, type) \ 346 do { \ 347 __asm__ __volatile__ ( \ 348 ".set\tnoat\n" \ 349 "1:\t"type##_sb("%1", "1(%2)")"\n" \ 350 "srl\t$1, %1, 0x8\n" \ 351 "2:\t"type##_sb("$1", "0(%2)")"\n" \ 352 ".set\tat\n\t" \ 353 "li\t%0, 0\n" \ 354 "3:\n\t" \ 355 ".insn\n\t" \ 356 ".section\t.fixup,\"ax\"\n\t" \ 357 "4:\tli\t%0, %3\n\t" \ 358 "j\t3b\n\t" \ 359 ".previous\n\t" \ 360 ".section\t__ex_table,\"a\"\n\t" \ 361 STR(PTR)"\t1b, 4b\n\t" \ 362 STR(PTR)"\t2b, 4b\n\t" \ 363 ".previous" \ 364 : "=r" (res) \ 365 : "r" (value), "r" (addr), "i" (-EFAULT));\ 366 } while(0) 367 368 #ifndef CONFIG_CPU_MIPSR6 369 #define _StoreW(addr, value, res, type) \ 370 do { \ 371 __asm__ __volatile__ ( \ 372 "1:\t"type##_swl("%1", "(%2)")"\n" \ 373 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ 374 "li\t%0, 0\n" \ 375 "3:\n\t" \ 376 ".insn\n\t" \ 377 ".section\t.fixup,\"ax\"\n\t" \ 378 "4:\tli\t%0, %3\n\t" \ 379 "j\t3b\n\t" \ 380 ".previous\n\t" \ 381 ".section\t__ex_table,\"a\"\n\t" \ 382 STR(PTR)"\t1b, 4b\n\t" \ 383 STR(PTR)"\t2b, 4b\n\t" \ 384 ".previous" \ 385 : "=r" (res) \ 386 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 387 } while(0) 388 389 #define _StoreDW(addr, value, res) \ 390 do { \ 391 __asm__ __volatile__ ( \ 392 "1:\tsdl\t%1,(%2)\n" \ 393 "2:\tsdr\t%1, 7(%2)\n\t" \ 394 "li\t%0, 0\n" \ 395 "3:\n\t" \ 396 ".insn\n\t" \ 397 ".section\t.fixup,\"ax\"\n\t" \ 398 "4:\tli\t%0, %3\n\t" \ 399 "j\t3b\n\t" \ 400 ".previous\n\t" \ 401 ".section\t__ex_table,\"a\"\n\t" \ 402 STR(PTR)"\t1b, 4b\n\t" \ 403 STR(PTR)"\t2b, 4b\n\t" \ 404 ".previous" \ 405 : "=r" (res) \ 406 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 407 } while(0) 408 409 #else 410 /* MIPSR6 has no swl and sdl instructions */ 411 #define _StoreW(addr, value, res, type) \ 412 do { \ 413 __asm__ __volatile__ ( \ 414 ".set\tpush\n\t" \ 415 ".set\tnoat\n\t" \ 416 "1:"type##_sb("%1", "3(%2)")"\n\t" \ 417 "srl\t$1, %1, 0x8\n\t" \ 418 "2:"type##_sb("$1", "2(%2)")"\n\t" \ 419 "srl\t$1, $1, 0x8\n\t" \ 420 "3:"type##_sb("$1", "1(%2)")"\n\t" \ 421 "srl\t$1, $1, 0x8\n\t" \ 422 "4:"type##_sb("$1", "0(%2)")"\n\t" \ 423 ".set\tpop\n\t" \ 424 "li\t%0, 0\n" \ 425 "10:\n\t" \ 426 ".insn\n\t" \ 427 ".section\t.fixup,\"ax\"\n\t" \ 428 "11:\tli\t%0, %3\n\t" \ 429 "j\t10b\n\t" \ 430 ".previous\n\t" \ 431 ".section\t__ex_table,\"a\"\n\t" \ 432 STR(PTR)"\t1b, 11b\n\t" \ 433 STR(PTR)"\t2b, 11b\n\t" \ 434 STR(PTR)"\t3b, 11b\n\t" \ 435 STR(PTR)"\t4b, 11b\n\t" \ 436 ".previous" \ 437 : "=&r" (res) \ 438 : "r" (value), "r" (addr), "i" (-EFAULT) \ 439 : "memory"); \ 440 } while(0) 441 442 #define _StoreDW(addr, value, res) \ 443 do { \ 444 __asm__ __volatile__ ( \ 445 ".set\tpush\n\t" \ 446 ".set\tnoat\n\t" \ 447 "1:sb\t%1, 7(%2)\n\t" \ 448 "dsrl\t$1, %1, 0x8\n\t" \ 449 "2:sb\t$1, 6(%2)\n\t" \ 450 "dsrl\t$1, $1, 0x8\n\t" \ 451 "3:sb\t$1, 5(%2)\n\t" \ 452 "dsrl\t$1, $1, 0x8\n\t" \ 453 "4:sb\t$1, 4(%2)\n\t" \ 454 "dsrl\t$1, $1, 0x8\n\t" \ 455 "5:sb\t$1, 3(%2)\n\t" \ 456 "dsrl\t$1, $1, 0x8\n\t" \ 457 "6:sb\t$1, 2(%2)\n\t" \ 458 "dsrl\t$1, $1, 0x8\n\t" \ 459 "7:sb\t$1, 1(%2)\n\t" \ 460 "dsrl\t$1, $1, 0x8\n\t" \ 461 "8:sb\t$1, 0(%2)\n\t" \ 462 "dsrl\t$1, $1, 0x8\n\t" \ 463 ".set\tpop\n\t" \ 464 "li\t%0, 0\n" \ 465 "10:\n\t" \ 466 ".insn\n\t" \ 467 ".section\t.fixup,\"ax\"\n\t" \ 468 "11:\tli\t%0, %3\n\t" \ 469 "j\t10b\n\t" \ 470 ".previous\n\t" \ 471 ".section\t__ex_table,\"a\"\n\t" \ 472 STR(PTR)"\t1b, 11b\n\t" \ 473 STR(PTR)"\t2b, 11b\n\t" \ 474 STR(PTR)"\t3b, 11b\n\t" \ 475 STR(PTR)"\t4b, 11b\n\t" \ 476 STR(PTR)"\t5b, 11b\n\t" \ 477 STR(PTR)"\t6b, 11b\n\t" \ 478 STR(PTR)"\t7b, 11b\n\t" \ 479 STR(PTR)"\t8b, 11b\n\t" \ 480 ".previous" \ 481 : "=&r" (res) \ 482 : "r" (value), "r" (addr), "i" (-EFAULT) \ 483 : "memory"); \ 484 } while(0) 485 486 #endif /* CONFIG_CPU_MIPSR6 */ 487 488 #else /* __BIG_ENDIAN */ 489 490 #define _LoadHW(addr, value, res, type) \ 491 do { \ 492 __asm__ __volatile__ (".set\tnoat\n" \ 493 "1:\t"type##_lb("%0", "1(%2)")"\n" \ 494 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 495 "sll\t%0, 0x8\n\t" \ 496 "or\t%0, $1\n\t" \ 497 "li\t%1, 0\n" \ 498 "3:\t.set\tat\n\t" \ 499 ".insn\n\t" \ 500 ".section\t.fixup,\"ax\"\n\t" \ 501 "4:\tli\t%1, %3\n\t" \ 502 "j\t3b\n\t" \ 503 ".previous\n\t" \ 504 ".section\t__ex_table,\"a\"\n\t" \ 505 STR(PTR)"\t1b, 4b\n\t" \ 506 STR(PTR)"\t2b, 4b\n\t" \ 507 ".previous" \ 508 : "=&r" (value), "=r" (res) \ 509 : "r" (addr), "i" (-EFAULT)); \ 510 } while(0) 511 512 #ifndef CONFIG_CPU_MIPSR6 513 #define _LoadW(addr, value, res, type) \ 514 do { \ 515 __asm__ __volatile__ ( \ 516 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 517 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 518 "li\t%1, 0\n" \ 519 "3:\n\t" \ 520 ".insn\n\t" \ 521 ".section\t.fixup,\"ax\"\n\t" \ 522 "4:\tli\t%1, %3\n\t" \ 523 "j\t3b\n\t" \ 524 ".previous\n\t" \ 525 ".section\t__ex_table,\"a\"\n\t" \ 526 STR(PTR)"\t1b, 4b\n\t" \ 527 STR(PTR)"\t2b, 4b\n\t" \ 528 ".previous" \ 529 : "=&r" (value), "=r" (res) \ 530 : "r" (addr), "i" (-EFAULT)); \ 531 } while(0) 532 533 #else 534 /* MIPSR6 has no lwl instruction */ 535 #define _LoadW(addr, value, res, type) \ 536 do { \ 537 __asm__ __volatile__ ( \ 538 ".set\tpush\n" \ 539 ".set\tnoat\n\t" \ 540 "1:"type##_lb("%0", "3(%2)")"\n\t" \ 541 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 542 "sll\t%0, 0x8\n\t" \ 543 "or\t%0, $1\n\t" \ 544 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 545 "sll\t%0, 0x8\n\t" \ 546 "or\t%0, $1\n\t" \ 547 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 548 "sll\t%0, 0x8\n\t" \ 549 "or\t%0, $1\n\t" \ 550 "li\t%1, 0\n" \ 551 ".set\tpop\n" \ 552 "10:\n\t" \ 553 ".insn\n\t" \ 554 ".section\t.fixup,\"ax\"\n\t" \ 555 "11:\tli\t%1, %3\n\t" \ 556 "j\t10b\n\t" \ 557 ".previous\n\t" \ 558 ".section\t__ex_table,\"a\"\n\t" \ 559 STR(PTR)"\t1b, 11b\n\t" \ 560 STR(PTR)"\t2b, 11b\n\t" \ 561 STR(PTR)"\t3b, 11b\n\t" \ 562 STR(PTR)"\t4b, 11b\n\t" \ 563 ".previous" \ 564 : "=&r" (value), "=r" (res) \ 565 : "r" (addr), "i" (-EFAULT)); \ 566 } while(0) 567 568 #endif /* CONFIG_CPU_MIPSR6 */ 569 570 571 #define _LoadHWU(addr, value, res, type) \ 572 do { \ 573 __asm__ __volatile__ ( \ 574 ".set\tnoat\n" \ 575 "1:\t"type##_lbu("%0", "1(%2)")"\n" \ 576 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 577 "sll\t%0, 0x8\n\t" \ 578 "or\t%0, $1\n\t" \ 579 "li\t%1, 0\n" \ 580 "3:\n\t" \ 581 ".insn\n\t" \ 582 ".set\tat\n\t" \ 583 ".section\t.fixup,\"ax\"\n\t" \ 584 "4:\tli\t%1, %3\n\t" \ 585 "j\t3b\n\t" \ 586 ".previous\n\t" \ 587 ".section\t__ex_table,\"a\"\n\t" \ 588 STR(PTR)"\t1b, 4b\n\t" \ 589 STR(PTR)"\t2b, 4b\n\t" \ 590 ".previous" \ 591 : "=&r" (value), "=r" (res) \ 592 : "r" (addr), "i" (-EFAULT)); \ 593 } while(0) 594 595 #ifndef CONFIG_CPU_MIPSR6 596 #define _LoadWU(addr, value, res, type) \ 597 do { \ 598 __asm__ __volatile__ ( \ 599 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 600 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 601 "dsll\t%0, %0, 32\n\t" \ 602 "dsrl\t%0, %0, 32\n\t" \ 603 "li\t%1, 0\n" \ 604 "3:\n\t" \ 605 ".insn\n\t" \ 606 "\t.section\t.fixup,\"ax\"\n\t" \ 607 "4:\tli\t%1, %3\n\t" \ 608 "j\t3b\n\t" \ 609 ".previous\n\t" \ 610 ".section\t__ex_table,\"a\"\n\t" \ 611 STR(PTR)"\t1b, 4b\n\t" \ 612 STR(PTR)"\t2b, 4b\n\t" \ 613 ".previous" \ 614 : "=&r" (value), "=r" (res) \ 615 : "r" (addr), "i" (-EFAULT)); \ 616 } while(0) 617 618 #define _LoadDW(addr, value, res) \ 619 do { \ 620 __asm__ __volatile__ ( \ 621 "1:\tldl\t%0, 7(%2)\n" \ 622 "2:\tldr\t%0, (%2)\n\t" \ 623 "li\t%1, 0\n" \ 624 "3:\n\t" \ 625 ".insn\n\t" \ 626 "\t.section\t.fixup,\"ax\"\n\t" \ 627 "4:\tli\t%1, %3\n\t" \ 628 "j\t3b\n\t" \ 629 ".previous\n\t" \ 630 ".section\t__ex_table,\"a\"\n\t" \ 631 STR(PTR)"\t1b, 4b\n\t" \ 632 STR(PTR)"\t2b, 4b\n\t" \ 633 ".previous" \ 634 : "=&r" (value), "=r" (res) \ 635 : "r" (addr), "i" (-EFAULT)); \ 636 } while(0) 637 638 #else 639 /* MIPSR6 has not lwl and ldl instructions */ 640 #define _LoadWU(addr, value, res, type) \ 641 do { \ 642 __asm__ __volatile__ ( \ 643 ".set\tpush\n\t" \ 644 ".set\tnoat\n\t" \ 645 "1:"type##_lbu("%0", "3(%2)")"\n\t" \ 646 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 647 "sll\t%0, 0x8\n\t" \ 648 "or\t%0, $1\n\t" \ 649 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 650 "sll\t%0, 0x8\n\t" \ 651 "or\t%0, $1\n\t" \ 652 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 653 "sll\t%0, 0x8\n\t" \ 654 "or\t%0, $1\n\t" \ 655 "li\t%1, 0\n" \ 656 ".set\tpop\n" \ 657 "10:\n\t" \ 658 ".insn\n\t" \ 659 ".section\t.fixup,\"ax\"\n\t" \ 660 "11:\tli\t%1, %3\n\t" \ 661 "j\t10b\n\t" \ 662 ".previous\n\t" \ 663 ".section\t__ex_table,\"a\"\n\t" \ 664 STR(PTR)"\t1b, 11b\n\t" \ 665 STR(PTR)"\t2b, 11b\n\t" \ 666 STR(PTR)"\t3b, 11b\n\t" \ 667 STR(PTR)"\t4b, 11b\n\t" \ 668 ".previous" \ 669 : "=&r" (value), "=r" (res) \ 670 : "r" (addr), "i" (-EFAULT)); \ 671 } while(0) 672 673 #define _LoadDW(addr, value, res) \ 674 do { \ 675 __asm__ __volatile__ ( \ 676 ".set\tpush\n\t" \ 677 ".set\tnoat\n\t" \ 678 "1:lb\t%0, 7(%2)\n\t" \ 679 "2:lbu\t$1, 6(%2)\n\t" \ 680 "dsll\t%0, 0x8\n\t" \ 681 "or\t%0, $1\n\t" \ 682 "3:lbu\t$1, 5(%2)\n\t" \ 683 "dsll\t%0, 0x8\n\t" \ 684 "or\t%0, $1\n\t" \ 685 "4:lbu\t$1, 4(%2)\n\t" \ 686 "dsll\t%0, 0x8\n\t" \ 687 "or\t%0, $1\n\t" \ 688 "5:lbu\t$1, 3(%2)\n\t" \ 689 "dsll\t%0, 0x8\n\t" \ 690 "or\t%0, $1\n\t" \ 691 "6:lbu\t$1, 2(%2)\n\t" \ 692 "dsll\t%0, 0x8\n\t" \ 693 "or\t%0, $1\n\t" \ 694 "7:lbu\t$1, 1(%2)\n\t" \ 695 "dsll\t%0, 0x8\n\t" \ 696 "or\t%0, $1\n\t" \ 697 "8:lbu\t$1, 0(%2)\n\t" \ 698 "dsll\t%0, 0x8\n\t" \ 699 "or\t%0, $1\n\t" \ 700 "li\t%1, 0\n" \ 701 ".set\tpop\n\t" \ 702 "10:\n\t" \ 703 ".insn\n\t" \ 704 ".section\t.fixup,\"ax\"\n\t" \ 705 "11:\tli\t%1, %3\n\t" \ 706 "j\t10b\n\t" \ 707 ".previous\n\t" \ 708 ".section\t__ex_table,\"a\"\n\t" \ 709 STR(PTR)"\t1b, 11b\n\t" \ 710 STR(PTR)"\t2b, 11b\n\t" \ 711 STR(PTR)"\t3b, 11b\n\t" \ 712 STR(PTR)"\t4b, 11b\n\t" \ 713 STR(PTR)"\t5b, 11b\n\t" \ 714 STR(PTR)"\t6b, 11b\n\t" \ 715 STR(PTR)"\t7b, 11b\n\t" \ 716 STR(PTR)"\t8b, 11b\n\t" \ 717 ".previous" \ 718 : "=&r" (value), "=r" (res) \ 719 : "r" (addr), "i" (-EFAULT)); \ 720 } while(0) 721 #endif /* CONFIG_CPU_MIPSR6 */ 722 723 #define _StoreHW(addr, value, res, type) \ 724 do { \ 725 __asm__ __volatile__ ( \ 726 ".set\tnoat\n" \ 727 "1:\t"type##_sb("%1", "0(%2)")"\n" \ 728 "srl\t$1,%1, 0x8\n" \ 729 "2:\t"type##_sb("$1", "1(%2)")"\n" \ 730 ".set\tat\n\t" \ 731 "li\t%0, 0\n" \ 732 "3:\n\t" \ 733 ".insn\n\t" \ 734 ".section\t.fixup,\"ax\"\n\t" \ 735 "4:\tli\t%0, %3\n\t" \ 736 "j\t3b\n\t" \ 737 ".previous\n\t" \ 738 ".section\t__ex_table,\"a\"\n\t" \ 739 STR(PTR)"\t1b, 4b\n\t" \ 740 STR(PTR)"\t2b, 4b\n\t" \ 741 ".previous" \ 742 : "=r" (res) \ 743 : "r" (value), "r" (addr), "i" (-EFAULT));\ 744 } while(0) 745 746 #ifndef CONFIG_CPU_MIPSR6 747 #define _StoreW(addr, value, res, type) \ 748 do { \ 749 __asm__ __volatile__ ( \ 750 "1:\t"type##_swl("%1", "3(%2)")"\n" \ 751 "2:\t"type##_swr("%1", "(%2)")"\n\t"\ 752 "li\t%0, 0\n" \ 753 "3:\n\t" \ 754 ".insn\n\t" \ 755 ".section\t.fixup,\"ax\"\n\t" \ 756 "4:\tli\t%0, %3\n\t" \ 757 "j\t3b\n\t" \ 758 ".previous\n\t" \ 759 ".section\t__ex_table,\"a\"\n\t" \ 760 STR(PTR)"\t1b, 4b\n\t" \ 761 STR(PTR)"\t2b, 4b\n\t" \ 762 ".previous" \ 763 : "=r" (res) \ 764 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 765 } while(0) 766 767 #define _StoreDW(addr, value, res) \ 768 do { \ 769 __asm__ __volatile__ ( \ 770 "1:\tsdl\t%1, 7(%2)\n" \ 771 "2:\tsdr\t%1, (%2)\n\t" \ 772 "li\t%0, 0\n" \ 773 "3:\n\t" \ 774 ".insn\n\t" \ 775 ".section\t.fixup,\"ax\"\n\t" \ 776 "4:\tli\t%0, %3\n\t" \ 777 "j\t3b\n\t" \ 778 ".previous\n\t" \ 779 ".section\t__ex_table,\"a\"\n\t" \ 780 STR(PTR)"\t1b, 4b\n\t" \ 781 STR(PTR)"\t2b, 4b\n\t" \ 782 ".previous" \ 783 : "=r" (res) \ 784 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 785 } while(0) 786 787 #else 788 /* MIPSR6 has no swl and sdl instructions */ 789 #define _StoreW(addr, value, res, type) \ 790 do { \ 791 __asm__ __volatile__ ( \ 792 ".set\tpush\n\t" \ 793 ".set\tnoat\n\t" \ 794 "1:"type##_sb("%1", "0(%2)")"\n\t" \ 795 "srl\t$1, %1, 0x8\n\t" \ 796 "2:"type##_sb("$1", "1(%2)")"\n\t" \ 797 "srl\t$1, $1, 0x8\n\t" \ 798 "3:"type##_sb("$1", "2(%2)")"\n\t" \ 799 "srl\t$1, $1, 0x8\n\t" \ 800 "4:"type##_sb("$1", "3(%2)")"\n\t" \ 801 ".set\tpop\n\t" \ 802 "li\t%0, 0\n" \ 803 "10:\n\t" \ 804 ".insn\n\t" \ 805 ".section\t.fixup,\"ax\"\n\t" \ 806 "11:\tli\t%0, %3\n\t" \ 807 "j\t10b\n\t" \ 808 ".previous\n\t" \ 809 ".section\t__ex_table,\"a\"\n\t" \ 810 STR(PTR)"\t1b, 11b\n\t" \ 811 STR(PTR)"\t2b, 11b\n\t" \ 812 STR(PTR)"\t3b, 11b\n\t" \ 813 STR(PTR)"\t4b, 11b\n\t" \ 814 ".previous" \ 815 : "=&r" (res) \ 816 : "r" (value), "r" (addr), "i" (-EFAULT) \ 817 : "memory"); \ 818 } while(0) 819 820 #define _StoreDW(addr, value, res) \ 821 do { \ 822 __asm__ __volatile__ ( \ 823 ".set\tpush\n\t" \ 824 ".set\tnoat\n\t" \ 825 "1:sb\t%1, 0(%2)\n\t" \ 826 "dsrl\t$1, %1, 0x8\n\t" \ 827 "2:sb\t$1, 1(%2)\n\t" \ 828 "dsrl\t$1, $1, 0x8\n\t" \ 829 "3:sb\t$1, 2(%2)\n\t" \ 830 "dsrl\t$1, $1, 0x8\n\t" \ 831 "4:sb\t$1, 3(%2)\n\t" \ 832 "dsrl\t$1, $1, 0x8\n\t" \ 833 "5:sb\t$1, 4(%2)\n\t" \ 834 "dsrl\t$1, $1, 0x8\n\t" \ 835 "6:sb\t$1, 5(%2)\n\t" \ 836 "dsrl\t$1, $1, 0x8\n\t" \ 837 "7:sb\t$1, 6(%2)\n\t" \ 838 "dsrl\t$1, $1, 0x8\n\t" \ 839 "8:sb\t$1, 7(%2)\n\t" \ 840 "dsrl\t$1, $1, 0x8\n\t" \ 841 ".set\tpop\n\t" \ 842 "li\t%0, 0\n" \ 843 "10:\n\t" \ 844 ".insn\n\t" \ 845 ".section\t.fixup,\"ax\"\n\t" \ 846 "11:\tli\t%0, %3\n\t" \ 847 "j\t10b\n\t" \ 848 ".previous\n\t" \ 849 ".section\t__ex_table,\"a\"\n\t" \ 850 STR(PTR)"\t1b, 11b\n\t" \ 851 STR(PTR)"\t2b, 11b\n\t" \ 852 STR(PTR)"\t3b, 11b\n\t" \ 853 STR(PTR)"\t4b, 11b\n\t" \ 854 STR(PTR)"\t5b, 11b\n\t" \ 855 STR(PTR)"\t6b, 11b\n\t" \ 856 STR(PTR)"\t7b, 11b\n\t" \ 857 STR(PTR)"\t8b, 11b\n\t" \ 858 ".previous" \ 859 : "=&r" (res) \ 860 : "r" (value), "r" (addr), "i" (-EFAULT) \ 861 : "memory"); \ 862 } while(0) 863 864 #endif /* CONFIG_CPU_MIPSR6 */ 865 #endif 866 867 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) 868 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) 869 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) 870 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) 871 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) 872 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) 873 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) 874 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user) 875 #define LoadDW(addr, value, res) _LoadDW(addr, value, res) 876 877 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) 878 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) 879 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) 880 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user) 881 #define StoreDW(addr, value, res) _StoreDW(addr, value, res) 882 883 static void emulate_load_store_insn(struct pt_regs *regs, 884 void __user *addr, unsigned int __user *pc) 885 { 886 union mips_instruction insn; 887 unsigned long value; 888 unsigned int res, preempted; 889 unsigned long origpc; 890 unsigned long orig31; 891 void __user *fault_addr = NULL; 892 #ifdef CONFIG_EVA 893 mm_segment_t seg; 894 #endif 895 union fpureg *fpr; 896 enum msa_2b_fmt df; 897 unsigned int wd; 898 origpc = (unsigned long)pc; 899 orig31 = regs->regs[31]; 900 901 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 902 903 /* 904 * This load never faults. 905 */ 906 __get_user(insn.word, pc); 907 908 switch (insn.i_format.opcode) { 909 /* 910 * These are instructions that a compiler doesn't generate. We 911 * can assume therefore that the code is MIPS-aware and 912 * really buggy. Emulating these instructions would break the 913 * semantics anyway. 914 */ 915 case ll_op: 916 case lld_op: 917 case sc_op: 918 case scd_op: 919 920 /* 921 * For these instructions the only way to create an address 922 * error is an attempted access to kernel/supervisor address 923 * space. 924 */ 925 case ldl_op: 926 case ldr_op: 927 case lwl_op: 928 case lwr_op: 929 case sdl_op: 930 case sdr_op: 931 case swl_op: 932 case swr_op: 933 case lb_op: 934 case lbu_op: 935 case sb_op: 936 goto sigbus; 937 938 /* 939 * The remaining opcodes are the ones that are really of 940 * interest. 941 */ 942 case spec3_op: 943 if (insn.dsp_format.func == lx_op) { 944 switch (insn.dsp_format.op) { 945 case lwx_op: 946 if (!access_ok(VERIFY_READ, addr, 4)) 947 goto sigbus; 948 LoadW(addr, value, res); 949 if (res) 950 goto fault; 951 compute_return_epc(regs); 952 regs->regs[insn.dsp_format.rd] = value; 953 break; 954 case lhx_op: 955 if (!access_ok(VERIFY_READ, addr, 2)) 956 goto sigbus; 957 LoadHW(addr, value, res); 958 if (res) 959 goto fault; 960 compute_return_epc(regs); 961 regs->regs[insn.dsp_format.rd] = value; 962 break; 963 default: 964 goto sigill; 965 } 966 } 967 #ifdef CONFIG_EVA 968 else { 969 /* 970 * we can land here only from kernel accessing user 971 * memory, so we need to "switch" the address limit to 972 * user space, so that address check can work properly. 973 */ 974 seg = get_fs(); 975 set_fs(USER_DS); 976 switch (insn.spec3_format.func) { 977 case lhe_op: 978 if (!access_ok(VERIFY_READ, addr, 2)) { 979 set_fs(seg); 980 goto sigbus; 981 } 982 LoadHWE(addr, value, res); 983 if (res) { 984 set_fs(seg); 985 goto fault; 986 } 987 compute_return_epc(regs); 988 regs->regs[insn.spec3_format.rt] = value; 989 break; 990 case lwe_op: 991 if (!access_ok(VERIFY_READ, addr, 4)) { 992 set_fs(seg); 993 goto sigbus; 994 } 995 LoadWE(addr, value, res); 996 if (res) { 997 set_fs(seg); 998 goto fault; 999 } 1000 compute_return_epc(regs); 1001 regs->regs[insn.spec3_format.rt] = value; 1002 break; 1003 case lhue_op: 1004 if (!access_ok(VERIFY_READ, addr, 2)) { 1005 set_fs(seg); 1006 goto sigbus; 1007 } 1008 LoadHWUE(addr, value, res); 1009 if (res) { 1010 set_fs(seg); 1011 goto fault; 1012 } 1013 compute_return_epc(regs); 1014 regs->regs[insn.spec3_format.rt] = value; 1015 break; 1016 case she_op: 1017 if (!access_ok(VERIFY_WRITE, addr, 2)) { 1018 set_fs(seg); 1019 goto sigbus; 1020 } 1021 compute_return_epc(regs); 1022 value = regs->regs[insn.spec3_format.rt]; 1023 StoreHWE(addr, value, res); 1024 if (res) { 1025 set_fs(seg); 1026 goto fault; 1027 } 1028 break; 1029 case swe_op: 1030 if (!access_ok(VERIFY_WRITE, addr, 4)) { 1031 set_fs(seg); 1032 goto sigbus; 1033 } 1034 compute_return_epc(regs); 1035 value = regs->regs[insn.spec3_format.rt]; 1036 StoreWE(addr, value, res); 1037 if (res) { 1038 set_fs(seg); 1039 goto fault; 1040 } 1041 break; 1042 default: 1043 set_fs(seg); 1044 goto sigill; 1045 } 1046 set_fs(seg); 1047 } 1048 #endif 1049 break; 1050 case lh_op: 1051 if (!access_ok(VERIFY_READ, addr, 2)) 1052 goto sigbus; 1053 1054 if (IS_ENABLED(CONFIG_EVA)) { 1055 if (uaccess_kernel()) 1056 LoadHW(addr, value, res); 1057 else 1058 LoadHWE(addr, value, res); 1059 } else { 1060 LoadHW(addr, value, res); 1061 } 1062 1063 if (res) 1064 goto fault; 1065 compute_return_epc(regs); 1066 regs->regs[insn.i_format.rt] = value; 1067 break; 1068 1069 case lw_op: 1070 if (!access_ok(VERIFY_READ, addr, 4)) 1071 goto sigbus; 1072 1073 if (IS_ENABLED(CONFIG_EVA)) { 1074 if (uaccess_kernel()) 1075 LoadW(addr, value, res); 1076 else 1077 LoadWE(addr, value, res); 1078 } else { 1079 LoadW(addr, value, res); 1080 } 1081 1082 if (res) 1083 goto fault; 1084 compute_return_epc(regs); 1085 regs->regs[insn.i_format.rt] = value; 1086 break; 1087 1088 case lhu_op: 1089 if (!access_ok(VERIFY_READ, addr, 2)) 1090 goto sigbus; 1091 1092 if (IS_ENABLED(CONFIG_EVA)) { 1093 if (uaccess_kernel()) 1094 LoadHWU(addr, value, res); 1095 else 1096 LoadHWUE(addr, value, res); 1097 } else { 1098 LoadHWU(addr, value, res); 1099 } 1100 1101 if (res) 1102 goto fault; 1103 compute_return_epc(regs); 1104 regs->regs[insn.i_format.rt] = value; 1105 break; 1106 1107 case lwu_op: 1108 #ifdef CONFIG_64BIT 1109 /* 1110 * A 32-bit kernel might be running on a 64-bit processor. But 1111 * if we're on a 32-bit processor and an i-cache incoherency 1112 * or race makes us see a 64-bit instruction here the sdl/sdr 1113 * would blow up, so for now we don't handle unaligned 64-bit 1114 * instructions on 32-bit kernels. 1115 */ 1116 if (!access_ok(VERIFY_READ, addr, 4)) 1117 goto sigbus; 1118 1119 LoadWU(addr, value, res); 1120 if (res) 1121 goto fault; 1122 compute_return_epc(regs); 1123 regs->regs[insn.i_format.rt] = value; 1124 break; 1125 #endif /* CONFIG_64BIT */ 1126 1127 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1128 goto sigill; 1129 1130 case ld_op: 1131 #ifdef CONFIG_64BIT 1132 /* 1133 * A 32-bit kernel might be running on a 64-bit processor. But 1134 * if we're on a 32-bit processor and an i-cache incoherency 1135 * or race makes us see a 64-bit instruction here the sdl/sdr 1136 * would blow up, so for now we don't handle unaligned 64-bit 1137 * instructions on 32-bit kernels. 1138 */ 1139 if (!access_ok(VERIFY_READ, addr, 8)) 1140 goto sigbus; 1141 1142 LoadDW(addr, value, res); 1143 if (res) 1144 goto fault; 1145 compute_return_epc(regs); 1146 regs->regs[insn.i_format.rt] = value; 1147 break; 1148 #endif /* CONFIG_64BIT */ 1149 1150 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1151 goto sigill; 1152 1153 case sh_op: 1154 if (!access_ok(VERIFY_WRITE, addr, 2)) 1155 goto sigbus; 1156 1157 compute_return_epc(regs); 1158 value = regs->regs[insn.i_format.rt]; 1159 1160 if (IS_ENABLED(CONFIG_EVA)) { 1161 if (uaccess_kernel()) 1162 StoreHW(addr, value, res); 1163 else 1164 StoreHWE(addr, value, res); 1165 } else { 1166 StoreHW(addr, value, res); 1167 } 1168 1169 if (res) 1170 goto fault; 1171 break; 1172 1173 case sw_op: 1174 if (!access_ok(VERIFY_WRITE, addr, 4)) 1175 goto sigbus; 1176 1177 compute_return_epc(regs); 1178 value = regs->regs[insn.i_format.rt]; 1179 1180 if (IS_ENABLED(CONFIG_EVA)) { 1181 if (uaccess_kernel()) 1182 StoreW(addr, value, res); 1183 else 1184 StoreWE(addr, value, res); 1185 } else { 1186 StoreW(addr, value, res); 1187 } 1188 1189 if (res) 1190 goto fault; 1191 break; 1192 1193 case sd_op: 1194 #ifdef CONFIG_64BIT 1195 /* 1196 * A 32-bit kernel might be running on a 64-bit processor. But 1197 * if we're on a 32-bit processor and an i-cache incoherency 1198 * or race makes us see a 64-bit instruction here the sdl/sdr 1199 * would blow up, so for now we don't handle unaligned 64-bit 1200 * instructions on 32-bit kernels. 1201 */ 1202 if (!access_ok(VERIFY_WRITE, addr, 8)) 1203 goto sigbus; 1204 1205 compute_return_epc(regs); 1206 value = regs->regs[insn.i_format.rt]; 1207 StoreDW(addr, value, res); 1208 if (res) 1209 goto fault; 1210 break; 1211 #endif /* CONFIG_64BIT */ 1212 1213 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1214 goto sigill; 1215 1216 case lwc1_op: 1217 case ldc1_op: 1218 case swc1_op: 1219 case sdc1_op: 1220 case cop1x_op: 1221 die_if_kernel("Unaligned FP access in kernel code", regs); 1222 BUG_ON(!used_math()); 1223 1224 lose_fpu(1); /* Save FPU state for the emulator. */ 1225 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1226 &fault_addr); 1227 own_fpu(1); /* Restore FPU state. */ 1228 1229 /* Signal if something went wrong. */ 1230 process_fpemu_return(res, fault_addr, 0); 1231 1232 if (res == 0) 1233 break; 1234 return; 1235 1236 case msa_op: 1237 if (!cpu_has_msa) 1238 goto sigill; 1239 1240 /* 1241 * If we've reached this point then userland should have taken 1242 * the MSA disabled exception & initialised vector context at 1243 * some point in the past. 1244 */ 1245 BUG_ON(!thread_msa_context_live()); 1246 1247 df = insn.msa_mi10_format.df; 1248 wd = insn.msa_mi10_format.wd; 1249 fpr = ¤t->thread.fpu.fpr[wd]; 1250 1251 switch (insn.msa_mi10_format.func) { 1252 case msa_ld_op: 1253 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1254 goto sigbus; 1255 1256 do { 1257 /* 1258 * If we have live MSA context keep track of 1259 * whether we get preempted in order to avoid 1260 * the register context we load being clobbered 1261 * by the live context as it's saved during 1262 * preemption. If we don't have live context 1263 * then it can't be saved to clobber the value 1264 * we load. 1265 */ 1266 preempted = test_thread_flag(TIF_USEDMSA); 1267 1268 res = __copy_from_user_inatomic(fpr, addr, 1269 sizeof(*fpr)); 1270 if (res) 1271 goto fault; 1272 1273 /* 1274 * Update the hardware register if it is in use 1275 * by the task in this quantum, in order to 1276 * avoid having to save & restore the whole 1277 * vector context. 1278 */ 1279 preempt_disable(); 1280 if (test_thread_flag(TIF_USEDMSA)) { 1281 write_msa_wr(wd, fpr, df); 1282 preempted = 0; 1283 } 1284 preempt_enable(); 1285 } while (preempted); 1286 break; 1287 1288 case msa_st_op: 1289 if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr))) 1290 goto sigbus; 1291 1292 /* 1293 * Update from the hardware register if it is in use by 1294 * the task in this quantum, in order to avoid having to 1295 * save & restore the whole vector context. 1296 */ 1297 preempt_disable(); 1298 if (test_thread_flag(TIF_USEDMSA)) 1299 read_msa_wr(wd, fpr, df); 1300 preempt_enable(); 1301 1302 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 1303 if (res) 1304 goto fault; 1305 break; 1306 1307 default: 1308 goto sigbus; 1309 } 1310 1311 compute_return_epc(regs); 1312 break; 1313 1314 #ifndef CONFIG_CPU_MIPSR6 1315 /* 1316 * COP2 is available to implementor for application specific use. 1317 * It's up to applications to register a notifier chain and do 1318 * whatever they have to do, including possible sending of signals. 1319 * 1320 * This instruction has been reallocated in Release 6 1321 */ 1322 case lwc2_op: 1323 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1324 break; 1325 1326 case ldc2_op: 1327 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 1328 break; 1329 1330 case swc2_op: 1331 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 1332 break; 1333 1334 case sdc2_op: 1335 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1336 break; 1337 #endif 1338 default: 1339 /* 1340 * Pheeee... We encountered an yet unknown instruction or 1341 * cache coherence problem. Die sucker, die ... 1342 */ 1343 goto sigill; 1344 } 1345 1346 #ifdef CONFIG_DEBUG_FS 1347 unaligned_instructions++; 1348 #endif 1349 1350 return; 1351 1352 fault: 1353 /* roll back jump/branch */ 1354 regs->cp0_epc = origpc; 1355 regs->regs[31] = orig31; 1356 /* Did we have an exception handler installed? */ 1357 if (fixup_exception(regs)) 1358 return; 1359 1360 die_if_kernel("Unhandled kernel unaligned access", regs); 1361 force_sig(SIGSEGV, current); 1362 1363 return; 1364 1365 sigbus: 1366 die_if_kernel("Unhandled kernel unaligned access", regs); 1367 force_sig(SIGBUS, current); 1368 1369 return; 1370 1371 sigill: 1372 die_if_kernel 1373 ("Unhandled kernel unaligned access or invalid instruction", regs); 1374 force_sig(SIGILL, current); 1375 } 1376 1377 /* Recode table from 16-bit register notation to 32-bit GPR. */ 1378 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 1379 1380 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 1381 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 1382 1383 static void emulate_load_store_microMIPS(struct pt_regs *regs, 1384 void __user *addr) 1385 { 1386 unsigned long value; 1387 unsigned int res; 1388 int i; 1389 unsigned int reg = 0, rvar; 1390 unsigned long orig31; 1391 u16 __user *pc16; 1392 u16 halfword; 1393 unsigned int word; 1394 unsigned long origpc, contpc; 1395 union mips_instruction insn; 1396 struct mm_decoded_insn mminsn; 1397 void __user *fault_addr = NULL; 1398 1399 origpc = regs->cp0_epc; 1400 orig31 = regs->regs[31]; 1401 1402 mminsn.micro_mips_mode = 1; 1403 1404 /* 1405 * This load never faults. 1406 */ 1407 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 1408 __get_user(halfword, pc16); 1409 pc16++; 1410 contpc = regs->cp0_epc + 2; 1411 word = ((unsigned int)halfword << 16); 1412 mminsn.pc_inc = 2; 1413 1414 if (!mm_insn_16bit(halfword)) { 1415 __get_user(halfword, pc16); 1416 pc16++; 1417 contpc = regs->cp0_epc + 4; 1418 mminsn.pc_inc = 4; 1419 word |= halfword; 1420 } 1421 mminsn.insn = word; 1422 1423 if (get_user(halfword, pc16)) 1424 goto fault; 1425 mminsn.next_pc_inc = 2; 1426 word = ((unsigned int)halfword << 16); 1427 1428 if (!mm_insn_16bit(halfword)) { 1429 pc16++; 1430 if (get_user(halfword, pc16)) 1431 goto fault; 1432 mminsn.next_pc_inc = 4; 1433 word |= halfword; 1434 } 1435 mminsn.next_insn = word; 1436 1437 insn = (union mips_instruction)(mminsn.insn); 1438 if (mm_isBranchInstr(regs, mminsn, &contpc)) 1439 insn = (union mips_instruction)(mminsn.next_insn); 1440 1441 /* Parse instruction to find what to do */ 1442 1443 switch (insn.mm_i_format.opcode) { 1444 1445 case mm_pool32a_op: 1446 switch (insn.mm_x_format.func) { 1447 case mm_lwxs_op: 1448 reg = insn.mm_x_format.rd; 1449 goto loadW; 1450 } 1451 1452 goto sigbus; 1453 1454 case mm_pool32b_op: 1455 switch (insn.mm_m_format.func) { 1456 case mm_lwp_func: 1457 reg = insn.mm_m_format.rd; 1458 if (reg == 31) 1459 goto sigbus; 1460 1461 if (!access_ok(VERIFY_READ, addr, 8)) 1462 goto sigbus; 1463 1464 LoadW(addr, value, res); 1465 if (res) 1466 goto fault; 1467 regs->regs[reg] = value; 1468 addr += 4; 1469 LoadW(addr, value, res); 1470 if (res) 1471 goto fault; 1472 regs->regs[reg + 1] = value; 1473 goto success; 1474 1475 case mm_swp_func: 1476 reg = insn.mm_m_format.rd; 1477 if (reg == 31) 1478 goto sigbus; 1479 1480 if (!access_ok(VERIFY_WRITE, addr, 8)) 1481 goto sigbus; 1482 1483 value = regs->regs[reg]; 1484 StoreW(addr, value, res); 1485 if (res) 1486 goto fault; 1487 addr += 4; 1488 value = regs->regs[reg + 1]; 1489 StoreW(addr, value, res); 1490 if (res) 1491 goto fault; 1492 goto success; 1493 1494 case mm_ldp_func: 1495 #ifdef CONFIG_64BIT 1496 reg = insn.mm_m_format.rd; 1497 if (reg == 31) 1498 goto sigbus; 1499 1500 if (!access_ok(VERIFY_READ, addr, 16)) 1501 goto sigbus; 1502 1503 LoadDW(addr, value, res); 1504 if (res) 1505 goto fault; 1506 regs->regs[reg] = value; 1507 addr += 8; 1508 LoadDW(addr, value, res); 1509 if (res) 1510 goto fault; 1511 regs->regs[reg + 1] = value; 1512 goto success; 1513 #endif /* CONFIG_64BIT */ 1514 1515 goto sigill; 1516 1517 case mm_sdp_func: 1518 #ifdef CONFIG_64BIT 1519 reg = insn.mm_m_format.rd; 1520 if (reg == 31) 1521 goto sigbus; 1522 1523 if (!access_ok(VERIFY_WRITE, addr, 16)) 1524 goto sigbus; 1525 1526 value = regs->regs[reg]; 1527 StoreDW(addr, value, res); 1528 if (res) 1529 goto fault; 1530 addr += 8; 1531 value = regs->regs[reg + 1]; 1532 StoreDW(addr, value, res); 1533 if (res) 1534 goto fault; 1535 goto success; 1536 #endif /* CONFIG_64BIT */ 1537 1538 goto sigill; 1539 1540 case mm_lwm32_func: 1541 reg = insn.mm_m_format.rd; 1542 rvar = reg & 0xf; 1543 if ((rvar > 9) || !reg) 1544 goto sigill; 1545 if (reg & 0x10) { 1546 if (!access_ok 1547 (VERIFY_READ, addr, 4 * (rvar + 1))) 1548 goto sigbus; 1549 } else { 1550 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1551 goto sigbus; 1552 } 1553 if (rvar == 9) 1554 rvar = 8; 1555 for (i = 16; rvar; rvar--, i++) { 1556 LoadW(addr, value, res); 1557 if (res) 1558 goto fault; 1559 addr += 4; 1560 regs->regs[i] = value; 1561 } 1562 if ((reg & 0xf) == 9) { 1563 LoadW(addr, value, res); 1564 if (res) 1565 goto fault; 1566 addr += 4; 1567 regs->regs[30] = value; 1568 } 1569 if (reg & 0x10) { 1570 LoadW(addr, value, res); 1571 if (res) 1572 goto fault; 1573 regs->regs[31] = value; 1574 } 1575 goto success; 1576 1577 case mm_swm32_func: 1578 reg = insn.mm_m_format.rd; 1579 rvar = reg & 0xf; 1580 if ((rvar > 9) || !reg) 1581 goto sigill; 1582 if (reg & 0x10) { 1583 if (!access_ok 1584 (VERIFY_WRITE, addr, 4 * (rvar + 1))) 1585 goto sigbus; 1586 } else { 1587 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1588 goto sigbus; 1589 } 1590 if (rvar == 9) 1591 rvar = 8; 1592 for (i = 16; rvar; rvar--, i++) { 1593 value = regs->regs[i]; 1594 StoreW(addr, value, res); 1595 if (res) 1596 goto fault; 1597 addr += 4; 1598 } 1599 if ((reg & 0xf) == 9) { 1600 value = regs->regs[30]; 1601 StoreW(addr, value, res); 1602 if (res) 1603 goto fault; 1604 addr += 4; 1605 } 1606 if (reg & 0x10) { 1607 value = regs->regs[31]; 1608 StoreW(addr, value, res); 1609 if (res) 1610 goto fault; 1611 } 1612 goto success; 1613 1614 case mm_ldm_func: 1615 #ifdef CONFIG_64BIT 1616 reg = insn.mm_m_format.rd; 1617 rvar = reg & 0xf; 1618 if ((rvar > 9) || !reg) 1619 goto sigill; 1620 if (reg & 0x10) { 1621 if (!access_ok 1622 (VERIFY_READ, addr, 8 * (rvar + 1))) 1623 goto sigbus; 1624 } else { 1625 if (!access_ok(VERIFY_READ, addr, 8 * rvar)) 1626 goto sigbus; 1627 } 1628 if (rvar == 9) 1629 rvar = 8; 1630 1631 for (i = 16; rvar; rvar--, i++) { 1632 LoadDW(addr, value, res); 1633 if (res) 1634 goto fault; 1635 addr += 4; 1636 regs->regs[i] = value; 1637 } 1638 if ((reg & 0xf) == 9) { 1639 LoadDW(addr, value, res); 1640 if (res) 1641 goto fault; 1642 addr += 8; 1643 regs->regs[30] = value; 1644 } 1645 if (reg & 0x10) { 1646 LoadDW(addr, value, res); 1647 if (res) 1648 goto fault; 1649 regs->regs[31] = value; 1650 } 1651 goto success; 1652 #endif /* CONFIG_64BIT */ 1653 1654 goto sigill; 1655 1656 case mm_sdm_func: 1657 #ifdef CONFIG_64BIT 1658 reg = insn.mm_m_format.rd; 1659 rvar = reg & 0xf; 1660 if ((rvar > 9) || !reg) 1661 goto sigill; 1662 if (reg & 0x10) { 1663 if (!access_ok 1664 (VERIFY_WRITE, addr, 8 * (rvar + 1))) 1665 goto sigbus; 1666 } else { 1667 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) 1668 goto sigbus; 1669 } 1670 if (rvar == 9) 1671 rvar = 8; 1672 1673 for (i = 16; rvar; rvar--, i++) { 1674 value = regs->regs[i]; 1675 StoreDW(addr, value, res); 1676 if (res) 1677 goto fault; 1678 addr += 8; 1679 } 1680 if ((reg & 0xf) == 9) { 1681 value = regs->regs[30]; 1682 StoreDW(addr, value, res); 1683 if (res) 1684 goto fault; 1685 addr += 8; 1686 } 1687 if (reg & 0x10) { 1688 value = regs->regs[31]; 1689 StoreDW(addr, value, res); 1690 if (res) 1691 goto fault; 1692 } 1693 goto success; 1694 #endif /* CONFIG_64BIT */ 1695 1696 goto sigill; 1697 1698 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 1699 } 1700 1701 goto sigbus; 1702 1703 case mm_pool32c_op: 1704 switch (insn.mm_m_format.func) { 1705 case mm_lwu_func: 1706 reg = insn.mm_m_format.rd; 1707 goto loadWU; 1708 } 1709 1710 /* LL,SC,LLD,SCD are not serviced */ 1711 goto sigbus; 1712 1713 case mm_pool32f_op: 1714 switch (insn.mm_x_format.func) { 1715 case mm_lwxc1_func: 1716 case mm_swxc1_func: 1717 case mm_ldxc1_func: 1718 case mm_sdxc1_func: 1719 goto fpu_emul; 1720 } 1721 1722 goto sigbus; 1723 1724 case mm_ldc132_op: 1725 case mm_sdc132_op: 1726 case mm_lwc132_op: 1727 case mm_swc132_op: 1728 fpu_emul: 1729 /* roll back jump/branch */ 1730 regs->cp0_epc = origpc; 1731 regs->regs[31] = orig31; 1732 1733 die_if_kernel("Unaligned FP access in kernel code", regs); 1734 BUG_ON(!used_math()); 1735 BUG_ON(!is_fpu_owner()); 1736 1737 lose_fpu(1); /* save the FPU state for the emulator */ 1738 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1739 &fault_addr); 1740 own_fpu(1); /* restore FPU state */ 1741 1742 /* If something went wrong, signal */ 1743 process_fpemu_return(res, fault_addr, 0); 1744 1745 if (res == 0) 1746 goto success; 1747 return; 1748 1749 case mm_lh32_op: 1750 reg = insn.mm_i_format.rt; 1751 goto loadHW; 1752 1753 case mm_lhu32_op: 1754 reg = insn.mm_i_format.rt; 1755 goto loadHWU; 1756 1757 case mm_lw32_op: 1758 reg = insn.mm_i_format.rt; 1759 goto loadW; 1760 1761 case mm_sh32_op: 1762 reg = insn.mm_i_format.rt; 1763 goto storeHW; 1764 1765 case mm_sw32_op: 1766 reg = insn.mm_i_format.rt; 1767 goto storeW; 1768 1769 case mm_ld32_op: 1770 reg = insn.mm_i_format.rt; 1771 goto loadDW; 1772 1773 case mm_sd32_op: 1774 reg = insn.mm_i_format.rt; 1775 goto storeDW; 1776 1777 case mm_pool16c_op: 1778 switch (insn.mm16_m_format.func) { 1779 case mm_lwm16_op: 1780 reg = insn.mm16_m_format.rlist; 1781 rvar = reg + 1; 1782 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1783 goto sigbus; 1784 1785 for (i = 16; rvar; rvar--, i++) { 1786 LoadW(addr, value, res); 1787 if (res) 1788 goto fault; 1789 addr += 4; 1790 regs->regs[i] = value; 1791 } 1792 LoadW(addr, value, res); 1793 if (res) 1794 goto fault; 1795 regs->regs[31] = value; 1796 1797 goto success; 1798 1799 case mm_swm16_op: 1800 reg = insn.mm16_m_format.rlist; 1801 rvar = reg + 1; 1802 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1803 goto sigbus; 1804 1805 for (i = 16; rvar; rvar--, i++) { 1806 value = regs->regs[i]; 1807 StoreW(addr, value, res); 1808 if (res) 1809 goto fault; 1810 addr += 4; 1811 } 1812 value = regs->regs[31]; 1813 StoreW(addr, value, res); 1814 if (res) 1815 goto fault; 1816 1817 goto success; 1818 1819 } 1820 1821 goto sigbus; 1822 1823 case mm_lhu16_op: 1824 reg = reg16to32[insn.mm16_rb_format.rt]; 1825 goto loadHWU; 1826 1827 case mm_lw16_op: 1828 reg = reg16to32[insn.mm16_rb_format.rt]; 1829 goto loadW; 1830 1831 case mm_sh16_op: 1832 reg = reg16to32st[insn.mm16_rb_format.rt]; 1833 goto storeHW; 1834 1835 case mm_sw16_op: 1836 reg = reg16to32st[insn.mm16_rb_format.rt]; 1837 goto storeW; 1838 1839 case mm_lwsp16_op: 1840 reg = insn.mm16_r5_format.rt; 1841 goto loadW; 1842 1843 case mm_swsp16_op: 1844 reg = insn.mm16_r5_format.rt; 1845 goto storeW; 1846 1847 case mm_lwgp16_op: 1848 reg = reg16to32[insn.mm16_r3_format.rt]; 1849 goto loadW; 1850 1851 default: 1852 goto sigill; 1853 } 1854 1855 loadHW: 1856 if (!access_ok(VERIFY_READ, addr, 2)) 1857 goto sigbus; 1858 1859 LoadHW(addr, value, res); 1860 if (res) 1861 goto fault; 1862 regs->regs[reg] = value; 1863 goto success; 1864 1865 loadHWU: 1866 if (!access_ok(VERIFY_READ, addr, 2)) 1867 goto sigbus; 1868 1869 LoadHWU(addr, value, res); 1870 if (res) 1871 goto fault; 1872 regs->regs[reg] = value; 1873 goto success; 1874 1875 loadW: 1876 if (!access_ok(VERIFY_READ, addr, 4)) 1877 goto sigbus; 1878 1879 LoadW(addr, value, res); 1880 if (res) 1881 goto fault; 1882 regs->regs[reg] = value; 1883 goto success; 1884 1885 loadWU: 1886 #ifdef CONFIG_64BIT 1887 /* 1888 * A 32-bit kernel might be running on a 64-bit processor. But 1889 * if we're on a 32-bit processor and an i-cache incoherency 1890 * or race makes us see a 64-bit instruction here the sdl/sdr 1891 * would blow up, so for now we don't handle unaligned 64-bit 1892 * instructions on 32-bit kernels. 1893 */ 1894 if (!access_ok(VERIFY_READ, addr, 4)) 1895 goto sigbus; 1896 1897 LoadWU(addr, value, res); 1898 if (res) 1899 goto fault; 1900 regs->regs[reg] = value; 1901 goto success; 1902 #endif /* CONFIG_64BIT */ 1903 1904 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1905 goto sigill; 1906 1907 loadDW: 1908 #ifdef CONFIG_64BIT 1909 /* 1910 * A 32-bit kernel might be running on a 64-bit processor. But 1911 * if we're on a 32-bit processor and an i-cache incoherency 1912 * or race makes us see a 64-bit instruction here the sdl/sdr 1913 * would blow up, so for now we don't handle unaligned 64-bit 1914 * instructions on 32-bit kernels. 1915 */ 1916 if (!access_ok(VERIFY_READ, addr, 8)) 1917 goto sigbus; 1918 1919 LoadDW(addr, value, res); 1920 if (res) 1921 goto fault; 1922 regs->regs[reg] = value; 1923 goto success; 1924 #endif /* CONFIG_64BIT */ 1925 1926 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1927 goto sigill; 1928 1929 storeHW: 1930 if (!access_ok(VERIFY_WRITE, addr, 2)) 1931 goto sigbus; 1932 1933 value = regs->regs[reg]; 1934 StoreHW(addr, value, res); 1935 if (res) 1936 goto fault; 1937 goto success; 1938 1939 storeW: 1940 if (!access_ok(VERIFY_WRITE, addr, 4)) 1941 goto sigbus; 1942 1943 value = regs->regs[reg]; 1944 StoreW(addr, value, res); 1945 if (res) 1946 goto fault; 1947 goto success; 1948 1949 storeDW: 1950 #ifdef CONFIG_64BIT 1951 /* 1952 * A 32-bit kernel might be running on a 64-bit processor. But 1953 * if we're on a 32-bit processor and an i-cache incoherency 1954 * or race makes us see a 64-bit instruction here the sdl/sdr 1955 * would blow up, so for now we don't handle unaligned 64-bit 1956 * instructions on 32-bit kernels. 1957 */ 1958 if (!access_ok(VERIFY_WRITE, addr, 8)) 1959 goto sigbus; 1960 1961 value = regs->regs[reg]; 1962 StoreDW(addr, value, res); 1963 if (res) 1964 goto fault; 1965 goto success; 1966 #endif /* CONFIG_64BIT */ 1967 1968 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1969 goto sigill; 1970 1971 success: 1972 regs->cp0_epc = contpc; /* advance or branch */ 1973 1974 #ifdef CONFIG_DEBUG_FS 1975 unaligned_instructions++; 1976 #endif 1977 return; 1978 1979 fault: 1980 /* roll back jump/branch */ 1981 regs->cp0_epc = origpc; 1982 regs->regs[31] = orig31; 1983 /* Did we have an exception handler installed? */ 1984 if (fixup_exception(regs)) 1985 return; 1986 1987 die_if_kernel("Unhandled kernel unaligned access", regs); 1988 force_sig(SIGSEGV, current); 1989 1990 return; 1991 1992 sigbus: 1993 die_if_kernel("Unhandled kernel unaligned access", regs); 1994 force_sig(SIGBUS, current); 1995 1996 return; 1997 1998 sigill: 1999 die_if_kernel 2000 ("Unhandled kernel unaligned access or invalid instruction", regs); 2001 force_sig(SIGILL, current); 2002 } 2003 2004 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 2005 { 2006 unsigned long value; 2007 unsigned int res; 2008 int reg; 2009 unsigned long orig31; 2010 u16 __user *pc16; 2011 unsigned long origpc; 2012 union mips16e_instruction mips16inst, oldinst; 2013 unsigned int opcode; 2014 int extended = 0; 2015 2016 origpc = regs->cp0_epc; 2017 orig31 = regs->regs[31]; 2018 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 2019 /* 2020 * This load never faults. 2021 */ 2022 __get_user(mips16inst.full, pc16); 2023 oldinst = mips16inst; 2024 2025 /* skip EXTEND instruction */ 2026 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 2027 extended = 1; 2028 pc16++; 2029 __get_user(mips16inst.full, pc16); 2030 } else if (delay_slot(regs)) { 2031 /* skip jump instructions */ 2032 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 2033 if (mips16inst.ri.opcode == MIPS16e_jal_op) 2034 pc16++; 2035 pc16++; 2036 if (get_user(mips16inst.full, pc16)) 2037 goto sigbus; 2038 } 2039 2040 opcode = mips16inst.ri.opcode; 2041 switch (opcode) { 2042 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 2043 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 2044 case MIPS16e_ldpc_func: 2045 case MIPS16e_ldsp_func: 2046 reg = reg16to32[mips16inst.ri64.ry]; 2047 goto loadDW; 2048 2049 case MIPS16e_sdsp_func: 2050 reg = reg16to32[mips16inst.ri64.ry]; 2051 goto writeDW; 2052 2053 case MIPS16e_sdrasp_func: 2054 reg = 29; /* GPRSP */ 2055 goto writeDW; 2056 } 2057 2058 goto sigbus; 2059 2060 case MIPS16e_swsp_op: 2061 reg = reg16to32[mips16inst.ri.rx]; 2062 if (extended && cpu_has_mips16e2) 2063 switch (mips16inst.ri.imm >> 5) { 2064 case 0: /* SWSP */ 2065 case 1: /* SWGP */ 2066 break; 2067 case 2: /* SHGP */ 2068 opcode = MIPS16e_sh_op; 2069 break; 2070 default: 2071 goto sigbus; 2072 } 2073 break; 2074 2075 case MIPS16e_lwpc_op: 2076 reg = reg16to32[mips16inst.ri.rx]; 2077 break; 2078 2079 case MIPS16e_lwsp_op: 2080 reg = reg16to32[mips16inst.ri.rx]; 2081 if (extended && cpu_has_mips16e2) 2082 switch (mips16inst.ri.imm >> 5) { 2083 case 0: /* LWSP */ 2084 case 1: /* LWGP */ 2085 break; 2086 case 2: /* LHGP */ 2087 opcode = MIPS16e_lh_op; 2088 break; 2089 case 4: /* LHUGP */ 2090 opcode = MIPS16e_lhu_op; 2091 break; 2092 default: 2093 goto sigbus; 2094 } 2095 break; 2096 2097 case MIPS16e_i8_op: 2098 if (mips16inst.i8.func != MIPS16e_swrasp_func) 2099 goto sigbus; 2100 reg = 29; /* GPRSP */ 2101 break; 2102 2103 default: 2104 reg = reg16to32[mips16inst.rri.ry]; 2105 break; 2106 } 2107 2108 switch (opcode) { 2109 2110 case MIPS16e_lb_op: 2111 case MIPS16e_lbu_op: 2112 case MIPS16e_sb_op: 2113 goto sigbus; 2114 2115 case MIPS16e_lh_op: 2116 if (!access_ok(VERIFY_READ, addr, 2)) 2117 goto sigbus; 2118 2119 LoadHW(addr, value, res); 2120 if (res) 2121 goto fault; 2122 MIPS16e_compute_return_epc(regs, &oldinst); 2123 regs->regs[reg] = value; 2124 break; 2125 2126 case MIPS16e_lhu_op: 2127 if (!access_ok(VERIFY_READ, addr, 2)) 2128 goto sigbus; 2129 2130 LoadHWU(addr, value, res); 2131 if (res) 2132 goto fault; 2133 MIPS16e_compute_return_epc(regs, &oldinst); 2134 regs->regs[reg] = value; 2135 break; 2136 2137 case MIPS16e_lw_op: 2138 case MIPS16e_lwpc_op: 2139 case MIPS16e_lwsp_op: 2140 if (!access_ok(VERIFY_READ, addr, 4)) 2141 goto sigbus; 2142 2143 LoadW(addr, value, res); 2144 if (res) 2145 goto fault; 2146 MIPS16e_compute_return_epc(regs, &oldinst); 2147 regs->regs[reg] = value; 2148 break; 2149 2150 case MIPS16e_lwu_op: 2151 #ifdef CONFIG_64BIT 2152 /* 2153 * A 32-bit kernel might be running on a 64-bit processor. But 2154 * if we're on a 32-bit processor and an i-cache incoherency 2155 * or race makes us see a 64-bit instruction here the sdl/sdr 2156 * would blow up, so for now we don't handle unaligned 64-bit 2157 * instructions on 32-bit kernels. 2158 */ 2159 if (!access_ok(VERIFY_READ, addr, 4)) 2160 goto sigbus; 2161 2162 LoadWU(addr, value, res); 2163 if (res) 2164 goto fault; 2165 MIPS16e_compute_return_epc(regs, &oldinst); 2166 regs->regs[reg] = value; 2167 break; 2168 #endif /* CONFIG_64BIT */ 2169 2170 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2171 goto sigill; 2172 2173 case MIPS16e_ld_op: 2174 loadDW: 2175 #ifdef CONFIG_64BIT 2176 /* 2177 * A 32-bit kernel might be running on a 64-bit processor. But 2178 * if we're on a 32-bit processor and an i-cache incoherency 2179 * or race makes us see a 64-bit instruction here the sdl/sdr 2180 * would blow up, so for now we don't handle unaligned 64-bit 2181 * instructions on 32-bit kernels. 2182 */ 2183 if (!access_ok(VERIFY_READ, addr, 8)) 2184 goto sigbus; 2185 2186 LoadDW(addr, value, res); 2187 if (res) 2188 goto fault; 2189 MIPS16e_compute_return_epc(regs, &oldinst); 2190 regs->regs[reg] = value; 2191 break; 2192 #endif /* CONFIG_64BIT */ 2193 2194 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2195 goto sigill; 2196 2197 case MIPS16e_sh_op: 2198 if (!access_ok(VERIFY_WRITE, addr, 2)) 2199 goto sigbus; 2200 2201 MIPS16e_compute_return_epc(regs, &oldinst); 2202 value = regs->regs[reg]; 2203 StoreHW(addr, value, res); 2204 if (res) 2205 goto fault; 2206 break; 2207 2208 case MIPS16e_sw_op: 2209 case MIPS16e_swsp_op: 2210 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 2211 if (!access_ok(VERIFY_WRITE, addr, 4)) 2212 goto sigbus; 2213 2214 MIPS16e_compute_return_epc(regs, &oldinst); 2215 value = regs->regs[reg]; 2216 StoreW(addr, value, res); 2217 if (res) 2218 goto fault; 2219 break; 2220 2221 case MIPS16e_sd_op: 2222 writeDW: 2223 #ifdef CONFIG_64BIT 2224 /* 2225 * A 32-bit kernel might be running on a 64-bit processor. But 2226 * if we're on a 32-bit processor and an i-cache incoherency 2227 * or race makes us see a 64-bit instruction here the sdl/sdr 2228 * would blow up, so for now we don't handle unaligned 64-bit 2229 * instructions on 32-bit kernels. 2230 */ 2231 if (!access_ok(VERIFY_WRITE, addr, 8)) 2232 goto sigbus; 2233 2234 MIPS16e_compute_return_epc(regs, &oldinst); 2235 value = regs->regs[reg]; 2236 StoreDW(addr, value, res); 2237 if (res) 2238 goto fault; 2239 break; 2240 #endif /* CONFIG_64BIT */ 2241 2242 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2243 goto sigill; 2244 2245 default: 2246 /* 2247 * Pheeee... We encountered an yet unknown instruction or 2248 * cache coherence problem. Die sucker, die ... 2249 */ 2250 goto sigill; 2251 } 2252 2253 #ifdef CONFIG_DEBUG_FS 2254 unaligned_instructions++; 2255 #endif 2256 2257 return; 2258 2259 fault: 2260 /* roll back jump/branch */ 2261 regs->cp0_epc = origpc; 2262 regs->regs[31] = orig31; 2263 /* Did we have an exception handler installed? */ 2264 if (fixup_exception(regs)) 2265 return; 2266 2267 die_if_kernel("Unhandled kernel unaligned access", regs); 2268 force_sig(SIGSEGV, current); 2269 2270 return; 2271 2272 sigbus: 2273 die_if_kernel("Unhandled kernel unaligned access", regs); 2274 force_sig(SIGBUS, current); 2275 2276 return; 2277 2278 sigill: 2279 die_if_kernel 2280 ("Unhandled kernel unaligned access or invalid instruction", regs); 2281 force_sig(SIGILL, current); 2282 } 2283 2284 asmlinkage void do_ade(struct pt_regs *regs) 2285 { 2286 enum ctx_state prev_state; 2287 unsigned int __user *pc; 2288 mm_segment_t seg; 2289 2290 prev_state = exception_enter(); 2291 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 2292 1, regs, regs->cp0_badvaddr); 2293 /* 2294 * Did we catch a fault trying to load an instruction? 2295 */ 2296 if (regs->cp0_badvaddr == regs->cp0_epc) 2297 goto sigbus; 2298 2299 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 2300 goto sigbus; 2301 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 2302 goto sigbus; 2303 2304 /* 2305 * Do branch emulation only if we didn't forward the exception. 2306 * This is all so but ugly ... 2307 */ 2308 2309 /* 2310 * Are we running in microMIPS mode? 2311 */ 2312 if (get_isa16_mode(regs->cp0_epc)) { 2313 /* 2314 * Did we catch a fault trying to load an instruction in 2315 * 16-bit mode? 2316 */ 2317 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 2318 goto sigbus; 2319 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2320 show_registers(regs); 2321 2322 if (cpu_has_mmips) { 2323 seg = get_fs(); 2324 if (!user_mode(regs)) 2325 set_fs(KERNEL_DS); 2326 emulate_load_store_microMIPS(regs, 2327 (void __user *)regs->cp0_badvaddr); 2328 set_fs(seg); 2329 2330 return; 2331 } 2332 2333 if (cpu_has_mips16) { 2334 seg = get_fs(); 2335 if (!user_mode(regs)) 2336 set_fs(KERNEL_DS); 2337 emulate_load_store_MIPS16e(regs, 2338 (void __user *)regs->cp0_badvaddr); 2339 set_fs(seg); 2340 2341 return; 2342 } 2343 2344 goto sigbus; 2345 } 2346 2347 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2348 show_registers(regs); 2349 pc = (unsigned int __user *)exception_epc(regs); 2350 2351 seg = get_fs(); 2352 if (!user_mode(regs)) 2353 set_fs(KERNEL_DS); 2354 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 2355 set_fs(seg); 2356 2357 return; 2358 2359 sigbus: 2360 die_if_kernel("Kernel unaligned instruction access", regs); 2361 force_sig(SIGBUS, current); 2362 2363 /* 2364 * XXX On return from the signal handler we should advance the epc 2365 */ 2366 exception_exit(prev_state); 2367 } 2368 2369 #ifdef CONFIG_DEBUG_FS 2370 static int __init debugfs_unaligned(void) 2371 { 2372 struct dentry *d; 2373 2374 if (!mips_debugfs_dir) 2375 return -ENODEV; 2376 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 2377 mips_debugfs_dir, &unaligned_instructions); 2378 if (!d) 2379 return -ENOMEM; 2380 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 2381 mips_debugfs_dir, &unaligned_action); 2382 if (!d) 2383 return -ENOMEM; 2384 return 0; 2385 } 2386 arch_initcall(debugfs_unaligned); 2387 #endif 2388