1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/debug.h> 89 #include <asm/fpu.h> 90 #include <asm/fpu_emulator.h> 91 #include <asm/inst.h> 92 #include <linux/uaccess.h> 93 94 #define STR(x) __STR(x) 95 #define __STR(x) #x 96 97 enum { 98 UNALIGNED_ACTION_QUIET, 99 UNALIGNED_ACTION_SIGNAL, 100 UNALIGNED_ACTION_SHOW, 101 }; 102 #ifdef CONFIG_DEBUG_FS 103 static u32 unaligned_instructions; 104 static u32 unaligned_action; 105 #else 106 #define unaligned_action UNALIGNED_ACTION_QUIET 107 #endif 108 extern void show_registers(struct pt_regs *regs); 109 110 #ifdef __BIG_ENDIAN 111 #define _LoadHW(addr, value, res, type) \ 112 do { \ 113 __asm__ __volatile__ (".set\tnoat\n" \ 114 "1:\t"type##_lb("%0", "0(%2)")"\n" \ 115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 116 "sll\t%0, 0x8\n\t" \ 117 "or\t%0, $1\n\t" \ 118 "li\t%1, 0\n" \ 119 "3:\t.set\tat\n\t" \ 120 ".insn\n\t" \ 121 ".section\t.fixup,\"ax\"\n\t" \ 122 "4:\tli\t%1, %3\n\t" \ 123 "j\t3b\n\t" \ 124 ".previous\n\t" \ 125 ".section\t__ex_table,\"a\"\n\t" \ 126 STR(PTR)"\t1b, 4b\n\t" \ 127 STR(PTR)"\t2b, 4b\n\t" \ 128 ".previous" \ 129 : "=&r" (value), "=r" (res) \ 130 : "r" (addr), "i" (-EFAULT)); \ 131 } while(0) 132 133 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 134 #define _LoadW(addr, value, res, type) \ 135 do { \ 136 __asm__ __volatile__ ( \ 137 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 138 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 139 "li\t%1, 0\n" \ 140 "3:\n\t" \ 141 ".insn\n\t" \ 142 ".section\t.fixup,\"ax\"\n\t" \ 143 "4:\tli\t%1, %3\n\t" \ 144 "j\t3b\n\t" \ 145 ".previous\n\t" \ 146 ".section\t__ex_table,\"a\"\n\t" \ 147 STR(PTR)"\t1b, 4b\n\t" \ 148 STR(PTR)"\t2b, 4b\n\t" \ 149 ".previous" \ 150 : "=&r" (value), "=r" (res) \ 151 : "r" (addr), "i" (-EFAULT)); \ 152 } while(0) 153 154 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 155 /* For CPUs without lwl instruction */ 156 #define _LoadW(addr, value, res, type) \ 157 do { \ 158 __asm__ __volatile__ ( \ 159 ".set\tpush\n" \ 160 ".set\tnoat\n\t" \ 161 "1:"type##_lb("%0", "0(%2)")"\n\t" \ 162 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 163 "sll\t%0, 0x8\n\t" \ 164 "or\t%0, $1\n\t" \ 165 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 166 "sll\t%0, 0x8\n\t" \ 167 "or\t%0, $1\n\t" \ 168 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 169 "sll\t%0, 0x8\n\t" \ 170 "or\t%0, $1\n\t" \ 171 "li\t%1, 0\n" \ 172 ".set\tpop\n" \ 173 "10:\n\t" \ 174 ".insn\n\t" \ 175 ".section\t.fixup,\"ax\"\n\t" \ 176 "11:\tli\t%1, %3\n\t" \ 177 "j\t10b\n\t" \ 178 ".previous\n\t" \ 179 ".section\t__ex_table,\"a\"\n\t" \ 180 STR(PTR)"\t1b, 11b\n\t" \ 181 STR(PTR)"\t2b, 11b\n\t" \ 182 STR(PTR)"\t3b, 11b\n\t" \ 183 STR(PTR)"\t4b, 11b\n\t" \ 184 ".previous" \ 185 : "=&r" (value), "=r" (res) \ 186 : "r" (addr), "i" (-EFAULT)); \ 187 } while(0) 188 189 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 190 191 #define _LoadHWU(addr, value, res, type) \ 192 do { \ 193 __asm__ __volatile__ ( \ 194 ".set\tnoat\n" \ 195 "1:\t"type##_lbu("%0", "0(%2)")"\n" \ 196 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 197 "sll\t%0, 0x8\n\t" \ 198 "or\t%0, $1\n\t" \ 199 "li\t%1, 0\n" \ 200 "3:\n\t" \ 201 ".insn\n\t" \ 202 ".set\tat\n\t" \ 203 ".section\t.fixup,\"ax\"\n\t" \ 204 "4:\tli\t%1, %3\n\t" \ 205 "j\t3b\n\t" \ 206 ".previous\n\t" \ 207 ".section\t__ex_table,\"a\"\n\t" \ 208 STR(PTR)"\t1b, 4b\n\t" \ 209 STR(PTR)"\t2b, 4b\n\t" \ 210 ".previous" \ 211 : "=&r" (value), "=r" (res) \ 212 : "r" (addr), "i" (-EFAULT)); \ 213 } while(0) 214 215 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 216 #define _LoadWU(addr, value, res, type) \ 217 do { \ 218 __asm__ __volatile__ ( \ 219 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 220 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 221 "dsll\t%0, %0, 32\n\t" \ 222 "dsrl\t%0, %0, 32\n\t" \ 223 "li\t%1, 0\n" \ 224 "3:\n\t" \ 225 ".insn\n\t" \ 226 "\t.section\t.fixup,\"ax\"\n\t" \ 227 "4:\tli\t%1, %3\n\t" \ 228 "j\t3b\n\t" \ 229 ".previous\n\t" \ 230 ".section\t__ex_table,\"a\"\n\t" \ 231 STR(PTR)"\t1b, 4b\n\t" \ 232 STR(PTR)"\t2b, 4b\n\t" \ 233 ".previous" \ 234 : "=&r" (value), "=r" (res) \ 235 : "r" (addr), "i" (-EFAULT)); \ 236 } while(0) 237 238 #define _LoadDW(addr, value, res) \ 239 do { \ 240 __asm__ __volatile__ ( \ 241 "1:\tldl\t%0, (%2)\n" \ 242 "2:\tldr\t%0, 7(%2)\n\t" \ 243 "li\t%1, 0\n" \ 244 "3:\n\t" \ 245 ".insn\n\t" \ 246 "\t.section\t.fixup,\"ax\"\n\t" \ 247 "4:\tli\t%1, %3\n\t" \ 248 "j\t3b\n\t" \ 249 ".previous\n\t" \ 250 ".section\t__ex_table,\"a\"\n\t" \ 251 STR(PTR)"\t1b, 4b\n\t" \ 252 STR(PTR)"\t2b, 4b\n\t" \ 253 ".previous" \ 254 : "=&r" (value), "=r" (res) \ 255 : "r" (addr), "i" (-EFAULT)); \ 256 } while(0) 257 258 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 259 /* For CPUs without lwl and ldl instructions */ 260 #define _LoadWU(addr, value, res, type) \ 261 do { \ 262 __asm__ __volatile__ ( \ 263 ".set\tpush\n\t" \ 264 ".set\tnoat\n\t" \ 265 "1:"type##_lbu("%0", "0(%2)")"\n\t" \ 266 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 267 "sll\t%0, 0x8\n\t" \ 268 "or\t%0, $1\n\t" \ 269 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 270 "sll\t%0, 0x8\n\t" \ 271 "or\t%0, $1\n\t" \ 272 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 273 "sll\t%0, 0x8\n\t" \ 274 "or\t%0, $1\n\t" \ 275 "li\t%1, 0\n" \ 276 ".set\tpop\n" \ 277 "10:\n\t" \ 278 ".insn\n\t" \ 279 ".section\t.fixup,\"ax\"\n\t" \ 280 "11:\tli\t%1, %3\n\t" \ 281 "j\t10b\n\t" \ 282 ".previous\n\t" \ 283 ".section\t__ex_table,\"a\"\n\t" \ 284 STR(PTR)"\t1b, 11b\n\t" \ 285 STR(PTR)"\t2b, 11b\n\t" \ 286 STR(PTR)"\t3b, 11b\n\t" \ 287 STR(PTR)"\t4b, 11b\n\t" \ 288 ".previous" \ 289 : "=&r" (value), "=r" (res) \ 290 : "r" (addr), "i" (-EFAULT)); \ 291 } while(0) 292 293 #define _LoadDW(addr, value, res) \ 294 do { \ 295 __asm__ __volatile__ ( \ 296 ".set\tpush\n\t" \ 297 ".set\tnoat\n\t" \ 298 "1:lb\t%0, 0(%2)\n\t" \ 299 "2:lbu\t $1, 1(%2)\n\t" \ 300 "dsll\t%0, 0x8\n\t" \ 301 "or\t%0, $1\n\t" \ 302 "3:lbu\t$1, 2(%2)\n\t" \ 303 "dsll\t%0, 0x8\n\t" \ 304 "or\t%0, $1\n\t" \ 305 "4:lbu\t$1, 3(%2)\n\t" \ 306 "dsll\t%0, 0x8\n\t" \ 307 "or\t%0, $1\n\t" \ 308 "5:lbu\t$1, 4(%2)\n\t" \ 309 "dsll\t%0, 0x8\n\t" \ 310 "or\t%0, $1\n\t" \ 311 "6:lbu\t$1, 5(%2)\n\t" \ 312 "dsll\t%0, 0x8\n\t" \ 313 "or\t%0, $1\n\t" \ 314 "7:lbu\t$1, 6(%2)\n\t" \ 315 "dsll\t%0, 0x8\n\t" \ 316 "or\t%0, $1\n\t" \ 317 "8:lbu\t$1, 7(%2)\n\t" \ 318 "dsll\t%0, 0x8\n\t" \ 319 "or\t%0, $1\n\t" \ 320 "li\t%1, 0\n" \ 321 ".set\tpop\n\t" \ 322 "10:\n\t" \ 323 ".insn\n\t" \ 324 ".section\t.fixup,\"ax\"\n\t" \ 325 "11:\tli\t%1, %3\n\t" \ 326 "j\t10b\n\t" \ 327 ".previous\n\t" \ 328 ".section\t__ex_table,\"a\"\n\t" \ 329 STR(PTR)"\t1b, 11b\n\t" \ 330 STR(PTR)"\t2b, 11b\n\t" \ 331 STR(PTR)"\t3b, 11b\n\t" \ 332 STR(PTR)"\t4b, 11b\n\t" \ 333 STR(PTR)"\t5b, 11b\n\t" \ 334 STR(PTR)"\t6b, 11b\n\t" \ 335 STR(PTR)"\t7b, 11b\n\t" \ 336 STR(PTR)"\t8b, 11b\n\t" \ 337 ".previous" \ 338 : "=&r" (value), "=r" (res) \ 339 : "r" (addr), "i" (-EFAULT)); \ 340 } while(0) 341 342 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 343 344 345 #define _StoreHW(addr, value, res, type) \ 346 do { \ 347 __asm__ __volatile__ ( \ 348 ".set\tnoat\n" \ 349 "1:\t"type##_sb("%1", "1(%2)")"\n" \ 350 "srl\t$1, %1, 0x8\n" \ 351 "2:\t"type##_sb("$1", "0(%2)")"\n" \ 352 ".set\tat\n\t" \ 353 "li\t%0, 0\n" \ 354 "3:\n\t" \ 355 ".insn\n\t" \ 356 ".section\t.fixup,\"ax\"\n\t" \ 357 "4:\tli\t%0, %3\n\t" \ 358 "j\t3b\n\t" \ 359 ".previous\n\t" \ 360 ".section\t__ex_table,\"a\"\n\t" \ 361 STR(PTR)"\t1b, 4b\n\t" \ 362 STR(PTR)"\t2b, 4b\n\t" \ 363 ".previous" \ 364 : "=r" (res) \ 365 : "r" (value), "r" (addr), "i" (-EFAULT));\ 366 } while(0) 367 368 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 369 #define _StoreW(addr, value, res, type) \ 370 do { \ 371 __asm__ __volatile__ ( \ 372 "1:\t"type##_swl("%1", "(%2)")"\n" \ 373 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ 374 "li\t%0, 0\n" \ 375 "3:\n\t" \ 376 ".insn\n\t" \ 377 ".section\t.fixup,\"ax\"\n\t" \ 378 "4:\tli\t%0, %3\n\t" \ 379 "j\t3b\n\t" \ 380 ".previous\n\t" \ 381 ".section\t__ex_table,\"a\"\n\t" \ 382 STR(PTR)"\t1b, 4b\n\t" \ 383 STR(PTR)"\t2b, 4b\n\t" \ 384 ".previous" \ 385 : "=r" (res) \ 386 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 387 } while(0) 388 389 #define _StoreDW(addr, value, res) \ 390 do { \ 391 __asm__ __volatile__ ( \ 392 "1:\tsdl\t%1,(%2)\n" \ 393 "2:\tsdr\t%1, 7(%2)\n\t" \ 394 "li\t%0, 0\n" \ 395 "3:\n\t" \ 396 ".insn\n\t" \ 397 ".section\t.fixup,\"ax\"\n\t" \ 398 "4:\tli\t%0, %3\n\t" \ 399 "j\t3b\n\t" \ 400 ".previous\n\t" \ 401 ".section\t__ex_table,\"a\"\n\t" \ 402 STR(PTR)"\t1b, 4b\n\t" \ 403 STR(PTR)"\t2b, 4b\n\t" \ 404 ".previous" \ 405 : "=r" (res) \ 406 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 407 } while(0) 408 409 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 410 #define _StoreW(addr, value, res, type) \ 411 do { \ 412 __asm__ __volatile__ ( \ 413 ".set\tpush\n\t" \ 414 ".set\tnoat\n\t" \ 415 "1:"type##_sb("%1", "3(%2)")"\n\t" \ 416 "srl\t$1, %1, 0x8\n\t" \ 417 "2:"type##_sb("$1", "2(%2)")"\n\t" \ 418 "srl\t$1, $1, 0x8\n\t" \ 419 "3:"type##_sb("$1", "1(%2)")"\n\t" \ 420 "srl\t$1, $1, 0x8\n\t" \ 421 "4:"type##_sb("$1", "0(%2)")"\n\t" \ 422 ".set\tpop\n\t" \ 423 "li\t%0, 0\n" \ 424 "10:\n\t" \ 425 ".insn\n\t" \ 426 ".section\t.fixup,\"ax\"\n\t" \ 427 "11:\tli\t%0, %3\n\t" \ 428 "j\t10b\n\t" \ 429 ".previous\n\t" \ 430 ".section\t__ex_table,\"a\"\n\t" \ 431 STR(PTR)"\t1b, 11b\n\t" \ 432 STR(PTR)"\t2b, 11b\n\t" \ 433 STR(PTR)"\t3b, 11b\n\t" \ 434 STR(PTR)"\t4b, 11b\n\t" \ 435 ".previous" \ 436 : "=&r" (res) \ 437 : "r" (value), "r" (addr), "i" (-EFAULT) \ 438 : "memory"); \ 439 } while(0) 440 441 #define _StoreDW(addr, value, res) \ 442 do { \ 443 __asm__ __volatile__ ( \ 444 ".set\tpush\n\t" \ 445 ".set\tnoat\n\t" \ 446 "1:sb\t%1, 7(%2)\n\t" \ 447 "dsrl\t$1, %1, 0x8\n\t" \ 448 "2:sb\t$1, 6(%2)\n\t" \ 449 "dsrl\t$1, $1, 0x8\n\t" \ 450 "3:sb\t$1, 5(%2)\n\t" \ 451 "dsrl\t$1, $1, 0x8\n\t" \ 452 "4:sb\t$1, 4(%2)\n\t" \ 453 "dsrl\t$1, $1, 0x8\n\t" \ 454 "5:sb\t$1, 3(%2)\n\t" \ 455 "dsrl\t$1, $1, 0x8\n\t" \ 456 "6:sb\t$1, 2(%2)\n\t" \ 457 "dsrl\t$1, $1, 0x8\n\t" \ 458 "7:sb\t$1, 1(%2)\n\t" \ 459 "dsrl\t$1, $1, 0x8\n\t" \ 460 "8:sb\t$1, 0(%2)\n\t" \ 461 "dsrl\t$1, $1, 0x8\n\t" \ 462 ".set\tpop\n\t" \ 463 "li\t%0, 0\n" \ 464 "10:\n\t" \ 465 ".insn\n\t" \ 466 ".section\t.fixup,\"ax\"\n\t" \ 467 "11:\tli\t%0, %3\n\t" \ 468 "j\t10b\n\t" \ 469 ".previous\n\t" \ 470 ".section\t__ex_table,\"a\"\n\t" \ 471 STR(PTR)"\t1b, 11b\n\t" \ 472 STR(PTR)"\t2b, 11b\n\t" \ 473 STR(PTR)"\t3b, 11b\n\t" \ 474 STR(PTR)"\t4b, 11b\n\t" \ 475 STR(PTR)"\t5b, 11b\n\t" \ 476 STR(PTR)"\t6b, 11b\n\t" \ 477 STR(PTR)"\t7b, 11b\n\t" \ 478 STR(PTR)"\t8b, 11b\n\t" \ 479 ".previous" \ 480 : "=&r" (res) \ 481 : "r" (value), "r" (addr), "i" (-EFAULT) \ 482 : "memory"); \ 483 } while(0) 484 485 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 486 487 #else /* __BIG_ENDIAN */ 488 489 #define _LoadHW(addr, value, res, type) \ 490 do { \ 491 __asm__ __volatile__ (".set\tnoat\n" \ 492 "1:\t"type##_lb("%0", "1(%2)")"\n" \ 493 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 494 "sll\t%0, 0x8\n\t" \ 495 "or\t%0, $1\n\t" \ 496 "li\t%1, 0\n" \ 497 "3:\t.set\tat\n\t" \ 498 ".insn\n\t" \ 499 ".section\t.fixup,\"ax\"\n\t" \ 500 "4:\tli\t%1, %3\n\t" \ 501 "j\t3b\n\t" \ 502 ".previous\n\t" \ 503 ".section\t__ex_table,\"a\"\n\t" \ 504 STR(PTR)"\t1b, 4b\n\t" \ 505 STR(PTR)"\t2b, 4b\n\t" \ 506 ".previous" \ 507 : "=&r" (value), "=r" (res) \ 508 : "r" (addr), "i" (-EFAULT)); \ 509 } while(0) 510 511 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 512 #define _LoadW(addr, value, res, type) \ 513 do { \ 514 __asm__ __volatile__ ( \ 515 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 516 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 517 "li\t%1, 0\n" \ 518 "3:\n\t" \ 519 ".insn\n\t" \ 520 ".section\t.fixup,\"ax\"\n\t" \ 521 "4:\tli\t%1, %3\n\t" \ 522 "j\t3b\n\t" \ 523 ".previous\n\t" \ 524 ".section\t__ex_table,\"a\"\n\t" \ 525 STR(PTR)"\t1b, 4b\n\t" \ 526 STR(PTR)"\t2b, 4b\n\t" \ 527 ".previous" \ 528 : "=&r" (value), "=r" (res) \ 529 : "r" (addr), "i" (-EFAULT)); \ 530 } while(0) 531 532 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 533 /* For CPUs without lwl instruction */ 534 #define _LoadW(addr, value, res, type) \ 535 do { \ 536 __asm__ __volatile__ ( \ 537 ".set\tpush\n" \ 538 ".set\tnoat\n\t" \ 539 "1:"type##_lb("%0", "3(%2)")"\n\t" \ 540 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 541 "sll\t%0, 0x8\n\t" \ 542 "or\t%0, $1\n\t" \ 543 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 544 "sll\t%0, 0x8\n\t" \ 545 "or\t%0, $1\n\t" \ 546 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 547 "sll\t%0, 0x8\n\t" \ 548 "or\t%0, $1\n\t" \ 549 "li\t%1, 0\n" \ 550 ".set\tpop\n" \ 551 "10:\n\t" \ 552 ".insn\n\t" \ 553 ".section\t.fixup,\"ax\"\n\t" \ 554 "11:\tli\t%1, %3\n\t" \ 555 "j\t10b\n\t" \ 556 ".previous\n\t" \ 557 ".section\t__ex_table,\"a\"\n\t" \ 558 STR(PTR)"\t1b, 11b\n\t" \ 559 STR(PTR)"\t2b, 11b\n\t" \ 560 STR(PTR)"\t3b, 11b\n\t" \ 561 STR(PTR)"\t4b, 11b\n\t" \ 562 ".previous" \ 563 : "=&r" (value), "=r" (res) \ 564 : "r" (addr), "i" (-EFAULT)); \ 565 } while(0) 566 567 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 568 569 570 #define _LoadHWU(addr, value, res, type) \ 571 do { \ 572 __asm__ __volatile__ ( \ 573 ".set\tnoat\n" \ 574 "1:\t"type##_lbu("%0", "1(%2)")"\n" \ 575 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 576 "sll\t%0, 0x8\n\t" \ 577 "or\t%0, $1\n\t" \ 578 "li\t%1, 0\n" \ 579 "3:\n\t" \ 580 ".insn\n\t" \ 581 ".set\tat\n\t" \ 582 ".section\t.fixup,\"ax\"\n\t" \ 583 "4:\tli\t%1, %3\n\t" \ 584 "j\t3b\n\t" \ 585 ".previous\n\t" \ 586 ".section\t__ex_table,\"a\"\n\t" \ 587 STR(PTR)"\t1b, 4b\n\t" \ 588 STR(PTR)"\t2b, 4b\n\t" \ 589 ".previous" \ 590 : "=&r" (value), "=r" (res) \ 591 : "r" (addr), "i" (-EFAULT)); \ 592 } while(0) 593 594 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 595 #define _LoadWU(addr, value, res, type) \ 596 do { \ 597 __asm__ __volatile__ ( \ 598 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 599 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 600 "dsll\t%0, %0, 32\n\t" \ 601 "dsrl\t%0, %0, 32\n\t" \ 602 "li\t%1, 0\n" \ 603 "3:\n\t" \ 604 ".insn\n\t" \ 605 "\t.section\t.fixup,\"ax\"\n\t" \ 606 "4:\tli\t%1, %3\n\t" \ 607 "j\t3b\n\t" \ 608 ".previous\n\t" \ 609 ".section\t__ex_table,\"a\"\n\t" \ 610 STR(PTR)"\t1b, 4b\n\t" \ 611 STR(PTR)"\t2b, 4b\n\t" \ 612 ".previous" \ 613 : "=&r" (value), "=r" (res) \ 614 : "r" (addr), "i" (-EFAULT)); \ 615 } while(0) 616 617 #define _LoadDW(addr, value, res) \ 618 do { \ 619 __asm__ __volatile__ ( \ 620 "1:\tldl\t%0, 7(%2)\n" \ 621 "2:\tldr\t%0, (%2)\n\t" \ 622 "li\t%1, 0\n" \ 623 "3:\n\t" \ 624 ".insn\n\t" \ 625 "\t.section\t.fixup,\"ax\"\n\t" \ 626 "4:\tli\t%1, %3\n\t" \ 627 "j\t3b\n\t" \ 628 ".previous\n\t" \ 629 ".section\t__ex_table,\"a\"\n\t" \ 630 STR(PTR)"\t1b, 4b\n\t" \ 631 STR(PTR)"\t2b, 4b\n\t" \ 632 ".previous" \ 633 : "=&r" (value), "=r" (res) \ 634 : "r" (addr), "i" (-EFAULT)); \ 635 } while(0) 636 637 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 638 /* For CPUs without lwl and ldl instructions */ 639 #define _LoadWU(addr, value, res, type) \ 640 do { \ 641 __asm__ __volatile__ ( \ 642 ".set\tpush\n\t" \ 643 ".set\tnoat\n\t" \ 644 "1:"type##_lbu("%0", "3(%2)")"\n\t" \ 645 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 646 "sll\t%0, 0x8\n\t" \ 647 "or\t%0, $1\n\t" \ 648 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 649 "sll\t%0, 0x8\n\t" \ 650 "or\t%0, $1\n\t" \ 651 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 652 "sll\t%0, 0x8\n\t" \ 653 "or\t%0, $1\n\t" \ 654 "li\t%1, 0\n" \ 655 ".set\tpop\n" \ 656 "10:\n\t" \ 657 ".insn\n\t" \ 658 ".section\t.fixup,\"ax\"\n\t" \ 659 "11:\tli\t%1, %3\n\t" \ 660 "j\t10b\n\t" \ 661 ".previous\n\t" \ 662 ".section\t__ex_table,\"a\"\n\t" \ 663 STR(PTR)"\t1b, 11b\n\t" \ 664 STR(PTR)"\t2b, 11b\n\t" \ 665 STR(PTR)"\t3b, 11b\n\t" \ 666 STR(PTR)"\t4b, 11b\n\t" \ 667 ".previous" \ 668 : "=&r" (value), "=r" (res) \ 669 : "r" (addr), "i" (-EFAULT)); \ 670 } while(0) 671 672 #define _LoadDW(addr, value, res) \ 673 do { \ 674 __asm__ __volatile__ ( \ 675 ".set\tpush\n\t" \ 676 ".set\tnoat\n\t" \ 677 "1:lb\t%0, 7(%2)\n\t" \ 678 "2:lbu\t$1, 6(%2)\n\t" \ 679 "dsll\t%0, 0x8\n\t" \ 680 "or\t%0, $1\n\t" \ 681 "3:lbu\t$1, 5(%2)\n\t" \ 682 "dsll\t%0, 0x8\n\t" \ 683 "or\t%0, $1\n\t" \ 684 "4:lbu\t$1, 4(%2)\n\t" \ 685 "dsll\t%0, 0x8\n\t" \ 686 "or\t%0, $1\n\t" \ 687 "5:lbu\t$1, 3(%2)\n\t" \ 688 "dsll\t%0, 0x8\n\t" \ 689 "or\t%0, $1\n\t" \ 690 "6:lbu\t$1, 2(%2)\n\t" \ 691 "dsll\t%0, 0x8\n\t" \ 692 "or\t%0, $1\n\t" \ 693 "7:lbu\t$1, 1(%2)\n\t" \ 694 "dsll\t%0, 0x8\n\t" \ 695 "or\t%0, $1\n\t" \ 696 "8:lbu\t$1, 0(%2)\n\t" \ 697 "dsll\t%0, 0x8\n\t" \ 698 "or\t%0, $1\n\t" \ 699 "li\t%1, 0\n" \ 700 ".set\tpop\n\t" \ 701 "10:\n\t" \ 702 ".insn\n\t" \ 703 ".section\t.fixup,\"ax\"\n\t" \ 704 "11:\tli\t%1, %3\n\t" \ 705 "j\t10b\n\t" \ 706 ".previous\n\t" \ 707 ".section\t__ex_table,\"a\"\n\t" \ 708 STR(PTR)"\t1b, 11b\n\t" \ 709 STR(PTR)"\t2b, 11b\n\t" \ 710 STR(PTR)"\t3b, 11b\n\t" \ 711 STR(PTR)"\t4b, 11b\n\t" \ 712 STR(PTR)"\t5b, 11b\n\t" \ 713 STR(PTR)"\t6b, 11b\n\t" \ 714 STR(PTR)"\t7b, 11b\n\t" \ 715 STR(PTR)"\t8b, 11b\n\t" \ 716 ".previous" \ 717 : "=&r" (value), "=r" (res) \ 718 : "r" (addr), "i" (-EFAULT)); \ 719 } while(0) 720 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 721 722 #define _StoreHW(addr, value, res, type) \ 723 do { \ 724 __asm__ __volatile__ ( \ 725 ".set\tnoat\n" \ 726 "1:\t"type##_sb("%1", "0(%2)")"\n" \ 727 "srl\t$1,%1, 0x8\n" \ 728 "2:\t"type##_sb("$1", "1(%2)")"\n" \ 729 ".set\tat\n\t" \ 730 "li\t%0, 0\n" \ 731 "3:\n\t" \ 732 ".insn\n\t" \ 733 ".section\t.fixup,\"ax\"\n\t" \ 734 "4:\tli\t%0, %3\n\t" \ 735 "j\t3b\n\t" \ 736 ".previous\n\t" \ 737 ".section\t__ex_table,\"a\"\n\t" \ 738 STR(PTR)"\t1b, 4b\n\t" \ 739 STR(PTR)"\t2b, 4b\n\t" \ 740 ".previous" \ 741 : "=r" (res) \ 742 : "r" (value), "r" (addr), "i" (-EFAULT));\ 743 } while(0) 744 745 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 746 #define _StoreW(addr, value, res, type) \ 747 do { \ 748 __asm__ __volatile__ ( \ 749 "1:\t"type##_swl("%1", "3(%2)")"\n" \ 750 "2:\t"type##_swr("%1", "(%2)")"\n\t"\ 751 "li\t%0, 0\n" \ 752 "3:\n\t" \ 753 ".insn\n\t" \ 754 ".section\t.fixup,\"ax\"\n\t" \ 755 "4:\tli\t%0, %3\n\t" \ 756 "j\t3b\n\t" \ 757 ".previous\n\t" \ 758 ".section\t__ex_table,\"a\"\n\t" \ 759 STR(PTR)"\t1b, 4b\n\t" \ 760 STR(PTR)"\t2b, 4b\n\t" \ 761 ".previous" \ 762 : "=r" (res) \ 763 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 764 } while(0) 765 766 #define _StoreDW(addr, value, res) \ 767 do { \ 768 __asm__ __volatile__ ( \ 769 "1:\tsdl\t%1, 7(%2)\n" \ 770 "2:\tsdr\t%1, (%2)\n\t" \ 771 "li\t%0, 0\n" \ 772 "3:\n\t" \ 773 ".insn\n\t" \ 774 ".section\t.fixup,\"ax\"\n\t" \ 775 "4:\tli\t%0, %3\n\t" \ 776 "j\t3b\n\t" \ 777 ".previous\n\t" \ 778 ".section\t__ex_table,\"a\"\n\t" \ 779 STR(PTR)"\t1b, 4b\n\t" \ 780 STR(PTR)"\t2b, 4b\n\t" \ 781 ".previous" \ 782 : "=r" (res) \ 783 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 784 } while(0) 785 786 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 787 /* For CPUs without swl and sdl instructions */ 788 #define _StoreW(addr, value, res, type) \ 789 do { \ 790 __asm__ __volatile__ ( \ 791 ".set\tpush\n\t" \ 792 ".set\tnoat\n\t" \ 793 "1:"type##_sb("%1", "0(%2)")"\n\t" \ 794 "srl\t$1, %1, 0x8\n\t" \ 795 "2:"type##_sb("$1", "1(%2)")"\n\t" \ 796 "srl\t$1, $1, 0x8\n\t" \ 797 "3:"type##_sb("$1", "2(%2)")"\n\t" \ 798 "srl\t$1, $1, 0x8\n\t" \ 799 "4:"type##_sb("$1", "3(%2)")"\n\t" \ 800 ".set\tpop\n\t" \ 801 "li\t%0, 0\n" \ 802 "10:\n\t" \ 803 ".insn\n\t" \ 804 ".section\t.fixup,\"ax\"\n\t" \ 805 "11:\tli\t%0, %3\n\t" \ 806 "j\t10b\n\t" \ 807 ".previous\n\t" \ 808 ".section\t__ex_table,\"a\"\n\t" \ 809 STR(PTR)"\t1b, 11b\n\t" \ 810 STR(PTR)"\t2b, 11b\n\t" \ 811 STR(PTR)"\t3b, 11b\n\t" \ 812 STR(PTR)"\t4b, 11b\n\t" \ 813 ".previous" \ 814 : "=&r" (res) \ 815 : "r" (value), "r" (addr), "i" (-EFAULT) \ 816 : "memory"); \ 817 } while(0) 818 819 #define _StoreDW(addr, value, res) \ 820 do { \ 821 __asm__ __volatile__ ( \ 822 ".set\tpush\n\t" \ 823 ".set\tnoat\n\t" \ 824 "1:sb\t%1, 0(%2)\n\t" \ 825 "dsrl\t$1, %1, 0x8\n\t" \ 826 "2:sb\t$1, 1(%2)\n\t" \ 827 "dsrl\t$1, $1, 0x8\n\t" \ 828 "3:sb\t$1, 2(%2)\n\t" \ 829 "dsrl\t$1, $1, 0x8\n\t" \ 830 "4:sb\t$1, 3(%2)\n\t" \ 831 "dsrl\t$1, $1, 0x8\n\t" \ 832 "5:sb\t$1, 4(%2)\n\t" \ 833 "dsrl\t$1, $1, 0x8\n\t" \ 834 "6:sb\t$1, 5(%2)\n\t" \ 835 "dsrl\t$1, $1, 0x8\n\t" \ 836 "7:sb\t$1, 6(%2)\n\t" \ 837 "dsrl\t$1, $1, 0x8\n\t" \ 838 "8:sb\t$1, 7(%2)\n\t" \ 839 "dsrl\t$1, $1, 0x8\n\t" \ 840 ".set\tpop\n\t" \ 841 "li\t%0, 0\n" \ 842 "10:\n\t" \ 843 ".insn\n\t" \ 844 ".section\t.fixup,\"ax\"\n\t" \ 845 "11:\tli\t%0, %3\n\t" \ 846 "j\t10b\n\t" \ 847 ".previous\n\t" \ 848 ".section\t__ex_table,\"a\"\n\t" \ 849 STR(PTR)"\t1b, 11b\n\t" \ 850 STR(PTR)"\t2b, 11b\n\t" \ 851 STR(PTR)"\t3b, 11b\n\t" \ 852 STR(PTR)"\t4b, 11b\n\t" \ 853 STR(PTR)"\t5b, 11b\n\t" \ 854 STR(PTR)"\t6b, 11b\n\t" \ 855 STR(PTR)"\t7b, 11b\n\t" \ 856 STR(PTR)"\t8b, 11b\n\t" \ 857 ".previous" \ 858 : "=&r" (res) \ 859 : "r" (value), "r" (addr), "i" (-EFAULT) \ 860 : "memory"); \ 861 } while(0) 862 863 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 864 #endif 865 866 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) 867 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) 868 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) 869 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) 870 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) 871 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) 872 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) 873 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user) 874 #define LoadDW(addr, value, res) _LoadDW(addr, value, res) 875 876 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) 877 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) 878 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) 879 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user) 880 #define StoreDW(addr, value, res) _StoreDW(addr, value, res) 881 882 static void emulate_load_store_insn(struct pt_regs *regs, 883 void __user *addr, unsigned int __user *pc) 884 { 885 unsigned long origpc, orig31, value; 886 union mips_instruction insn; 887 unsigned int res; 888 #ifdef CONFIG_EVA 889 mm_segment_t seg; 890 #endif 891 origpc = (unsigned long)pc; 892 orig31 = regs->regs[31]; 893 894 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 895 896 /* 897 * This load never faults. 898 */ 899 __get_user(insn.word, pc); 900 901 switch (insn.i_format.opcode) { 902 /* 903 * These are instructions that a compiler doesn't generate. We 904 * can assume therefore that the code is MIPS-aware and 905 * really buggy. Emulating these instructions would break the 906 * semantics anyway. 907 */ 908 case ll_op: 909 case lld_op: 910 case sc_op: 911 case scd_op: 912 913 /* 914 * For these instructions the only way to create an address 915 * error is an attempted access to kernel/supervisor address 916 * space. 917 */ 918 case ldl_op: 919 case ldr_op: 920 case lwl_op: 921 case lwr_op: 922 case sdl_op: 923 case sdr_op: 924 case swl_op: 925 case swr_op: 926 case lb_op: 927 case lbu_op: 928 case sb_op: 929 goto sigbus; 930 931 /* 932 * The remaining opcodes are the ones that are really of 933 * interest. 934 */ 935 case spec3_op: 936 if (insn.dsp_format.func == lx_op) { 937 switch (insn.dsp_format.op) { 938 case lwx_op: 939 if (!access_ok(VERIFY_READ, addr, 4)) 940 goto sigbus; 941 LoadW(addr, value, res); 942 if (res) 943 goto fault; 944 compute_return_epc(regs); 945 regs->regs[insn.dsp_format.rd] = value; 946 break; 947 case lhx_op: 948 if (!access_ok(VERIFY_READ, addr, 2)) 949 goto sigbus; 950 LoadHW(addr, value, res); 951 if (res) 952 goto fault; 953 compute_return_epc(regs); 954 regs->regs[insn.dsp_format.rd] = value; 955 break; 956 default: 957 goto sigill; 958 } 959 } 960 #ifdef CONFIG_EVA 961 else { 962 /* 963 * we can land here only from kernel accessing user 964 * memory, so we need to "switch" the address limit to 965 * user space, so that address check can work properly. 966 */ 967 seg = get_fs(); 968 set_fs(USER_DS); 969 switch (insn.spec3_format.func) { 970 case lhe_op: 971 if (!access_ok(VERIFY_READ, addr, 2)) { 972 set_fs(seg); 973 goto sigbus; 974 } 975 LoadHWE(addr, value, res); 976 if (res) { 977 set_fs(seg); 978 goto fault; 979 } 980 compute_return_epc(regs); 981 regs->regs[insn.spec3_format.rt] = value; 982 break; 983 case lwe_op: 984 if (!access_ok(VERIFY_READ, addr, 4)) { 985 set_fs(seg); 986 goto sigbus; 987 } 988 LoadWE(addr, value, res); 989 if (res) { 990 set_fs(seg); 991 goto fault; 992 } 993 compute_return_epc(regs); 994 regs->regs[insn.spec3_format.rt] = value; 995 break; 996 case lhue_op: 997 if (!access_ok(VERIFY_READ, addr, 2)) { 998 set_fs(seg); 999 goto sigbus; 1000 } 1001 LoadHWUE(addr, value, res); 1002 if (res) { 1003 set_fs(seg); 1004 goto fault; 1005 } 1006 compute_return_epc(regs); 1007 regs->regs[insn.spec3_format.rt] = value; 1008 break; 1009 case she_op: 1010 if (!access_ok(VERIFY_WRITE, addr, 2)) { 1011 set_fs(seg); 1012 goto sigbus; 1013 } 1014 compute_return_epc(regs); 1015 value = regs->regs[insn.spec3_format.rt]; 1016 StoreHWE(addr, value, res); 1017 if (res) { 1018 set_fs(seg); 1019 goto fault; 1020 } 1021 break; 1022 case swe_op: 1023 if (!access_ok(VERIFY_WRITE, addr, 4)) { 1024 set_fs(seg); 1025 goto sigbus; 1026 } 1027 compute_return_epc(regs); 1028 value = regs->regs[insn.spec3_format.rt]; 1029 StoreWE(addr, value, res); 1030 if (res) { 1031 set_fs(seg); 1032 goto fault; 1033 } 1034 break; 1035 default: 1036 set_fs(seg); 1037 goto sigill; 1038 } 1039 set_fs(seg); 1040 } 1041 #endif 1042 break; 1043 case lh_op: 1044 if (!access_ok(VERIFY_READ, addr, 2)) 1045 goto sigbus; 1046 1047 if (IS_ENABLED(CONFIG_EVA)) { 1048 if (uaccess_kernel()) 1049 LoadHW(addr, value, res); 1050 else 1051 LoadHWE(addr, value, res); 1052 } else { 1053 LoadHW(addr, value, res); 1054 } 1055 1056 if (res) 1057 goto fault; 1058 compute_return_epc(regs); 1059 regs->regs[insn.i_format.rt] = value; 1060 break; 1061 1062 case lw_op: 1063 if (!access_ok(VERIFY_READ, addr, 4)) 1064 goto sigbus; 1065 1066 if (IS_ENABLED(CONFIG_EVA)) { 1067 if (uaccess_kernel()) 1068 LoadW(addr, value, res); 1069 else 1070 LoadWE(addr, value, res); 1071 } else { 1072 LoadW(addr, value, res); 1073 } 1074 1075 if (res) 1076 goto fault; 1077 compute_return_epc(regs); 1078 regs->regs[insn.i_format.rt] = value; 1079 break; 1080 1081 case lhu_op: 1082 if (!access_ok(VERIFY_READ, addr, 2)) 1083 goto sigbus; 1084 1085 if (IS_ENABLED(CONFIG_EVA)) { 1086 if (uaccess_kernel()) 1087 LoadHWU(addr, value, res); 1088 else 1089 LoadHWUE(addr, value, res); 1090 } else { 1091 LoadHWU(addr, value, res); 1092 } 1093 1094 if (res) 1095 goto fault; 1096 compute_return_epc(regs); 1097 regs->regs[insn.i_format.rt] = value; 1098 break; 1099 1100 case lwu_op: 1101 #ifdef CONFIG_64BIT 1102 /* 1103 * A 32-bit kernel might be running on a 64-bit processor. But 1104 * if we're on a 32-bit processor and an i-cache incoherency 1105 * or race makes us see a 64-bit instruction here the sdl/sdr 1106 * would blow up, so for now we don't handle unaligned 64-bit 1107 * instructions on 32-bit kernels. 1108 */ 1109 if (!access_ok(VERIFY_READ, addr, 4)) 1110 goto sigbus; 1111 1112 LoadWU(addr, value, res); 1113 if (res) 1114 goto fault; 1115 compute_return_epc(regs); 1116 regs->regs[insn.i_format.rt] = value; 1117 break; 1118 #endif /* CONFIG_64BIT */ 1119 1120 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1121 goto sigill; 1122 1123 case ld_op: 1124 #ifdef CONFIG_64BIT 1125 /* 1126 * A 32-bit kernel might be running on a 64-bit processor. But 1127 * if we're on a 32-bit processor and an i-cache incoherency 1128 * or race makes us see a 64-bit instruction here the sdl/sdr 1129 * would blow up, so for now we don't handle unaligned 64-bit 1130 * instructions on 32-bit kernels. 1131 */ 1132 if (!access_ok(VERIFY_READ, addr, 8)) 1133 goto sigbus; 1134 1135 LoadDW(addr, value, res); 1136 if (res) 1137 goto fault; 1138 compute_return_epc(regs); 1139 regs->regs[insn.i_format.rt] = value; 1140 break; 1141 #endif /* CONFIG_64BIT */ 1142 1143 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1144 goto sigill; 1145 1146 case sh_op: 1147 if (!access_ok(VERIFY_WRITE, addr, 2)) 1148 goto sigbus; 1149 1150 compute_return_epc(regs); 1151 value = regs->regs[insn.i_format.rt]; 1152 1153 if (IS_ENABLED(CONFIG_EVA)) { 1154 if (uaccess_kernel()) 1155 StoreHW(addr, value, res); 1156 else 1157 StoreHWE(addr, value, res); 1158 } else { 1159 StoreHW(addr, value, res); 1160 } 1161 1162 if (res) 1163 goto fault; 1164 break; 1165 1166 case sw_op: 1167 if (!access_ok(VERIFY_WRITE, addr, 4)) 1168 goto sigbus; 1169 1170 compute_return_epc(regs); 1171 value = regs->regs[insn.i_format.rt]; 1172 1173 if (IS_ENABLED(CONFIG_EVA)) { 1174 if (uaccess_kernel()) 1175 StoreW(addr, value, res); 1176 else 1177 StoreWE(addr, value, res); 1178 } else { 1179 StoreW(addr, value, res); 1180 } 1181 1182 if (res) 1183 goto fault; 1184 break; 1185 1186 case sd_op: 1187 #ifdef CONFIG_64BIT 1188 /* 1189 * A 32-bit kernel might be running on a 64-bit processor. But 1190 * if we're on a 32-bit processor and an i-cache incoherency 1191 * or race makes us see a 64-bit instruction here the sdl/sdr 1192 * would blow up, so for now we don't handle unaligned 64-bit 1193 * instructions on 32-bit kernels. 1194 */ 1195 if (!access_ok(VERIFY_WRITE, addr, 8)) 1196 goto sigbus; 1197 1198 compute_return_epc(regs); 1199 value = regs->regs[insn.i_format.rt]; 1200 StoreDW(addr, value, res); 1201 if (res) 1202 goto fault; 1203 break; 1204 #endif /* CONFIG_64BIT */ 1205 1206 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1207 goto sigill; 1208 1209 #ifdef CONFIG_MIPS_FP_SUPPORT 1210 1211 case lwc1_op: 1212 case ldc1_op: 1213 case swc1_op: 1214 case sdc1_op: 1215 case cop1x_op: { 1216 void __user *fault_addr = NULL; 1217 1218 die_if_kernel("Unaligned FP access in kernel code", regs); 1219 BUG_ON(!used_math()); 1220 1221 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1222 &fault_addr); 1223 own_fpu(1); /* Restore FPU state. */ 1224 1225 /* Signal if something went wrong. */ 1226 process_fpemu_return(res, fault_addr, 0); 1227 1228 if (res == 0) 1229 break; 1230 return; 1231 } 1232 #endif /* CONFIG_MIPS_FP_SUPPORT */ 1233 1234 #ifdef CONFIG_CPU_HAS_MSA 1235 1236 case msa_op: { 1237 unsigned int wd, preempted; 1238 enum msa_2b_fmt df; 1239 union fpureg *fpr; 1240 1241 if (!cpu_has_msa) 1242 goto sigill; 1243 1244 /* 1245 * If we've reached this point then userland should have taken 1246 * the MSA disabled exception & initialised vector context at 1247 * some point in the past. 1248 */ 1249 BUG_ON(!thread_msa_context_live()); 1250 1251 df = insn.msa_mi10_format.df; 1252 wd = insn.msa_mi10_format.wd; 1253 fpr = ¤t->thread.fpu.fpr[wd]; 1254 1255 switch (insn.msa_mi10_format.func) { 1256 case msa_ld_op: 1257 if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) 1258 goto sigbus; 1259 1260 do { 1261 /* 1262 * If we have live MSA context keep track of 1263 * whether we get preempted in order to avoid 1264 * the register context we load being clobbered 1265 * by the live context as it's saved during 1266 * preemption. If we don't have live context 1267 * then it can't be saved to clobber the value 1268 * we load. 1269 */ 1270 preempted = test_thread_flag(TIF_USEDMSA); 1271 1272 res = __copy_from_user_inatomic(fpr, addr, 1273 sizeof(*fpr)); 1274 if (res) 1275 goto fault; 1276 1277 /* 1278 * Update the hardware register if it is in use 1279 * by the task in this quantum, in order to 1280 * avoid having to save & restore the whole 1281 * vector context. 1282 */ 1283 preempt_disable(); 1284 if (test_thread_flag(TIF_USEDMSA)) { 1285 write_msa_wr(wd, fpr, df); 1286 preempted = 0; 1287 } 1288 preempt_enable(); 1289 } while (preempted); 1290 break; 1291 1292 case msa_st_op: 1293 if (!access_ok(VERIFY_WRITE, addr, sizeof(*fpr))) 1294 goto sigbus; 1295 1296 /* 1297 * Update from the hardware register if it is in use by 1298 * the task in this quantum, in order to avoid having to 1299 * save & restore the whole vector context. 1300 */ 1301 preempt_disable(); 1302 if (test_thread_flag(TIF_USEDMSA)) 1303 read_msa_wr(wd, fpr, df); 1304 preempt_enable(); 1305 1306 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 1307 if (res) 1308 goto fault; 1309 break; 1310 1311 default: 1312 goto sigbus; 1313 } 1314 1315 compute_return_epc(regs); 1316 break; 1317 } 1318 #endif /* CONFIG_CPU_HAS_MSA */ 1319 1320 #ifndef CONFIG_CPU_MIPSR6 1321 /* 1322 * COP2 is available to implementor for application specific use. 1323 * It's up to applications to register a notifier chain and do 1324 * whatever they have to do, including possible sending of signals. 1325 * 1326 * This instruction has been reallocated in Release 6 1327 */ 1328 case lwc2_op: 1329 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1330 break; 1331 1332 case ldc2_op: 1333 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 1334 break; 1335 1336 case swc2_op: 1337 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 1338 break; 1339 1340 case sdc2_op: 1341 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1342 break; 1343 #endif 1344 default: 1345 /* 1346 * Pheeee... We encountered an yet unknown instruction or 1347 * cache coherence problem. Die sucker, die ... 1348 */ 1349 goto sigill; 1350 } 1351 1352 #ifdef CONFIG_DEBUG_FS 1353 unaligned_instructions++; 1354 #endif 1355 1356 return; 1357 1358 fault: 1359 /* roll back jump/branch */ 1360 regs->cp0_epc = origpc; 1361 regs->regs[31] = orig31; 1362 /* Did we have an exception handler installed? */ 1363 if (fixup_exception(regs)) 1364 return; 1365 1366 die_if_kernel("Unhandled kernel unaligned access", regs); 1367 force_sig(SIGSEGV, current); 1368 1369 return; 1370 1371 sigbus: 1372 die_if_kernel("Unhandled kernel unaligned access", regs); 1373 force_sig(SIGBUS, current); 1374 1375 return; 1376 1377 sigill: 1378 die_if_kernel 1379 ("Unhandled kernel unaligned access or invalid instruction", regs); 1380 force_sig(SIGILL, current); 1381 } 1382 1383 /* Recode table from 16-bit register notation to 32-bit GPR. */ 1384 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 1385 1386 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 1387 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 1388 1389 static void emulate_load_store_microMIPS(struct pt_regs *regs, 1390 void __user *addr) 1391 { 1392 unsigned long value; 1393 unsigned int res; 1394 int i; 1395 unsigned int reg = 0, rvar; 1396 unsigned long orig31; 1397 u16 __user *pc16; 1398 u16 halfword; 1399 unsigned int word; 1400 unsigned long origpc, contpc; 1401 union mips_instruction insn; 1402 struct mm_decoded_insn mminsn; 1403 1404 origpc = regs->cp0_epc; 1405 orig31 = regs->regs[31]; 1406 1407 mminsn.micro_mips_mode = 1; 1408 1409 /* 1410 * This load never faults. 1411 */ 1412 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 1413 __get_user(halfword, pc16); 1414 pc16++; 1415 contpc = regs->cp0_epc + 2; 1416 word = ((unsigned int)halfword << 16); 1417 mminsn.pc_inc = 2; 1418 1419 if (!mm_insn_16bit(halfword)) { 1420 __get_user(halfword, pc16); 1421 pc16++; 1422 contpc = regs->cp0_epc + 4; 1423 mminsn.pc_inc = 4; 1424 word |= halfword; 1425 } 1426 mminsn.insn = word; 1427 1428 if (get_user(halfword, pc16)) 1429 goto fault; 1430 mminsn.next_pc_inc = 2; 1431 word = ((unsigned int)halfword << 16); 1432 1433 if (!mm_insn_16bit(halfword)) { 1434 pc16++; 1435 if (get_user(halfword, pc16)) 1436 goto fault; 1437 mminsn.next_pc_inc = 4; 1438 word |= halfword; 1439 } 1440 mminsn.next_insn = word; 1441 1442 insn = (union mips_instruction)(mminsn.insn); 1443 if (mm_isBranchInstr(regs, mminsn, &contpc)) 1444 insn = (union mips_instruction)(mminsn.next_insn); 1445 1446 /* Parse instruction to find what to do */ 1447 1448 switch (insn.mm_i_format.opcode) { 1449 1450 case mm_pool32a_op: 1451 switch (insn.mm_x_format.func) { 1452 case mm_lwxs_op: 1453 reg = insn.mm_x_format.rd; 1454 goto loadW; 1455 } 1456 1457 goto sigbus; 1458 1459 case mm_pool32b_op: 1460 switch (insn.mm_m_format.func) { 1461 case mm_lwp_func: 1462 reg = insn.mm_m_format.rd; 1463 if (reg == 31) 1464 goto sigbus; 1465 1466 if (!access_ok(VERIFY_READ, addr, 8)) 1467 goto sigbus; 1468 1469 LoadW(addr, value, res); 1470 if (res) 1471 goto fault; 1472 regs->regs[reg] = value; 1473 addr += 4; 1474 LoadW(addr, value, res); 1475 if (res) 1476 goto fault; 1477 regs->regs[reg + 1] = value; 1478 goto success; 1479 1480 case mm_swp_func: 1481 reg = insn.mm_m_format.rd; 1482 if (reg == 31) 1483 goto sigbus; 1484 1485 if (!access_ok(VERIFY_WRITE, addr, 8)) 1486 goto sigbus; 1487 1488 value = regs->regs[reg]; 1489 StoreW(addr, value, res); 1490 if (res) 1491 goto fault; 1492 addr += 4; 1493 value = regs->regs[reg + 1]; 1494 StoreW(addr, value, res); 1495 if (res) 1496 goto fault; 1497 goto success; 1498 1499 case mm_ldp_func: 1500 #ifdef CONFIG_64BIT 1501 reg = insn.mm_m_format.rd; 1502 if (reg == 31) 1503 goto sigbus; 1504 1505 if (!access_ok(VERIFY_READ, addr, 16)) 1506 goto sigbus; 1507 1508 LoadDW(addr, value, res); 1509 if (res) 1510 goto fault; 1511 regs->regs[reg] = value; 1512 addr += 8; 1513 LoadDW(addr, value, res); 1514 if (res) 1515 goto fault; 1516 regs->regs[reg + 1] = value; 1517 goto success; 1518 #endif /* CONFIG_64BIT */ 1519 1520 goto sigill; 1521 1522 case mm_sdp_func: 1523 #ifdef CONFIG_64BIT 1524 reg = insn.mm_m_format.rd; 1525 if (reg == 31) 1526 goto sigbus; 1527 1528 if (!access_ok(VERIFY_WRITE, addr, 16)) 1529 goto sigbus; 1530 1531 value = regs->regs[reg]; 1532 StoreDW(addr, value, res); 1533 if (res) 1534 goto fault; 1535 addr += 8; 1536 value = regs->regs[reg + 1]; 1537 StoreDW(addr, value, res); 1538 if (res) 1539 goto fault; 1540 goto success; 1541 #endif /* CONFIG_64BIT */ 1542 1543 goto sigill; 1544 1545 case mm_lwm32_func: 1546 reg = insn.mm_m_format.rd; 1547 rvar = reg & 0xf; 1548 if ((rvar > 9) || !reg) 1549 goto sigill; 1550 if (reg & 0x10) { 1551 if (!access_ok 1552 (VERIFY_READ, addr, 4 * (rvar + 1))) 1553 goto sigbus; 1554 } else { 1555 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1556 goto sigbus; 1557 } 1558 if (rvar == 9) 1559 rvar = 8; 1560 for (i = 16; rvar; rvar--, i++) { 1561 LoadW(addr, value, res); 1562 if (res) 1563 goto fault; 1564 addr += 4; 1565 regs->regs[i] = value; 1566 } 1567 if ((reg & 0xf) == 9) { 1568 LoadW(addr, value, res); 1569 if (res) 1570 goto fault; 1571 addr += 4; 1572 regs->regs[30] = value; 1573 } 1574 if (reg & 0x10) { 1575 LoadW(addr, value, res); 1576 if (res) 1577 goto fault; 1578 regs->regs[31] = value; 1579 } 1580 goto success; 1581 1582 case mm_swm32_func: 1583 reg = insn.mm_m_format.rd; 1584 rvar = reg & 0xf; 1585 if ((rvar > 9) || !reg) 1586 goto sigill; 1587 if (reg & 0x10) { 1588 if (!access_ok 1589 (VERIFY_WRITE, addr, 4 * (rvar + 1))) 1590 goto sigbus; 1591 } else { 1592 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1593 goto sigbus; 1594 } 1595 if (rvar == 9) 1596 rvar = 8; 1597 for (i = 16; rvar; rvar--, i++) { 1598 value = regs->regs[i]; 1599 StoreW(addr, value, res); 1600 if (res) 1601 goto fault; 1602 addr += 4; 1603 } 1604 if ((reg & 0xf) == 9) { 1605 value = regs->regs[30]; 1606 StoreW(addr, value, res); 1607 if (res) 1608 goto fault; 1609 addr += 4; 1610 } 1611 if (reg & 0x10) { 1612 value = regs->regs[31]; 1613 StoreW(addr, value, res); 1614 if (res) 1615 goto fault; 1616 } 1617 goto success; 1618 1619 case mm_ldm_func: 1620 #ifdef CONFIG_64BIT 1621 reg = insn.mm_m_format.rd; 1622 rvar = reg & 0xf; 1623 if ((rvar > 9) || !reg) 1624 goto sigill; 1625 if (reg & 0x10) { 1626 if (!access_ok 1627 (VERIFY_READ, addr, 8 * (rvar + 1))) 1628 goto sigbus; 1629 } else { 1630 if (!access_ok(VERIFY_READ, addr, 8 * rvar)) 1631 goto sigbus; 1632 } 1633 if (rvar == 9) 1634 rvar = 8; 1635 1636 for (i = 16; rvar; rvar--, i++) { 1637 LoadDW(addr, value, res); 1638 if (res) 1639 goto fault; 1640 addr += 4; 1641 regs->regs[i] = value; 1642 } 1643 if ((reg & 0xf) == 9) { 1644 LoadDW(addr, value, res); 1645 if (res) 1646 goto fault; 1647 addr += 8; 1648 regs->regs[30] = value; 1649 } 1650 if (reg & 0x10) { 1651 LoadDW(addr, value, res); 1652 if (res) 1653 goto fault; 1654 regs->regs[31] = value; 1655 } 1656 goto success; 1657 #endif /* CONFIG_64BIT */ 1658 1659 goto sigill; 1660 1661 case mm_sdm_func: 1662 #ifdef CONFIG_64BIT 1663 reg = insn.mm_m_format.rd; 1664 rvar = reg & 0xf; 1665 if ((rvar > 9) || !reg) 1666 goto sigill; 1667 if (reg & 0x10) { 1668 if (!access_ok 1669 (VERIFY_WRITE, addr, 8 * (rvar + 1))) 1670 goto sigbus; 1671 } else { 1672 if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) 1673 goto sigbus; 1674 } 1675 if (rvar == 9) 1676 rvar = 8; 1677 1678 for (i = 16; rvar; rvar--, i++) { 1679 value = regs->regs[i]; 1680 StoreDW(addr, value, res); 1681 if (res) 1682 goto fault; 1683 addr += 8; 1684 } 1685 if ((reg & 0xf) == 9) { 1686 value = regs->regs[30]; 1687 StoreDW(addr, value, res); 1688 if (res) 1689 goto fault; 1690 addr += 8; 1691 } 1692 if (reg & 0x10) { 1693 value = regs->regs[31]; 1694 StoreDW(addr, value, res); 1695 if (res) 1696 goto fault; 1697 } 1698 goto success; 1699 #endif /* CONFIG_64BIT */ 1700 1701 goto sigill; 1702 1703 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 1704 } 1705 1706 goto sigbus; 1707 1708 case mm_pool32c_op: 1709 switch (insn.mm_m_format.func) { 1710 case mm_lwu_func: 1711 reg = insn.mm_m_format.rd; 1712 goto loadWU; 1713 } 1714 1715 /* LL,SC,LLD,SCD are not serviced */ 1716 goto sigbus; 1717 1718 #ifdef CONFIG_MIPS_FP_SUPPORT 1719 case mm_pool32f_op: 1720 switch (insn.mm_x_format.func) { 1721 case mm_lwxc1_func: 1722 case mm_swxc1_func: 1723 case mm_ldxc1_func: 1724 case mm_sdxc1_func: 1725 goto fpu_emul; 1726 } 1727 1728 goto sigbus; 1729 1730 case mm_ldc132_op: 1731 case mm_sdc132_op: 1732 case mm_lwc132_op: 1733 case mm_swc132_op: { 1734 void __user *fault_addr = NULL; 1735 1736 fpu_emul: 1737 /* roll back jump/branch */ 1738 regs->cp0_epc = origpc; 1739 regs->regs[31] = orig31; 1740 1741 die_if_kernel("Unaligned FP access in kernel code", regs); 1742 BUG_ON(!used_math()); 1743 BUG_ON(!is_fpu_owner()); 1744 1745 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1746 &fault_addr); 1747 own_fpu(1); /* restore FPU state */ 1748 1749 /* If something went wrong, signal */ 1750 process_fpemu_return(res, fault_addr, 0); 1751 1752 if (res == 0) 1753 goto success; 1754 return; 1755 } 1756 #endif /* CONFIG_MIPS_FP_SUPPORT */ 1757 1758 case mm_lh32_op: 1759 reg = insn.mm_i_format.rt; 1760 goto loadHW; 1761 1762 case mm_lhu32_op: 1763 reg = insn.mm_i_format.rt; 1764 goto loadHWU; 1765 1766 case mm_lw32_op: 1767 reg = insn.mm_i_format.rt; 1768 goto loadW; 1769 1770 case mm_sh32_op: 1771 reg = insn.mm_i_format.rt; 1772 goto storeHW; 1773 1774 case mm_sw32_op: 1775 reg = insn.mm_i_format.rt; 1776 goto storeW; 1777 1778 case mm_ld32_op: 1779 reg = insn.mm_i_format.rt; 1780 goto loadDW; 1781 1782 case mm_sd32_op: 1783 reg = insn.mm_i_format.rt; 1784 goto storeDW; 1785 1786 case mm_pool16c_op: 1787 switch (insn.mm16_m_format.func) { 1788 case mm_lwm16_op: 1789 reg = insn.mm16_m_format.rlist; 1790 rvar = reg + 1; 1791 if (!access_ok(VERIFY_READ, addr, 4 * rvar)) 1792 goto sigbus; 1793 1794 for (i = 16; rvar; rvar--, i++) { 1795 LoadW(addr, value, res); 1796 if (res) 1797 goto fault; 1798 addr += 4; 1799 regs->regs[i] = value; 1800 } 1801 LoadW(addr, value, res); 1802 if (res) 1803 goto fault; 1804 regs->regs[31] = value; 1805 1806 goto success; 1807 1808 case mm_swm16_op: 1809 reg = insn.mm16_m_format.rlist; 1810 rvar = reg + 1; 1811 if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) 1812 goto sigbus; 1813 1814 for (i = 16; rvar; rvar--, i++) { 1815 value = regs->regs[i]; 1816 StoreW(addr, value, res); 1817 if (res) 1818 goto fault; 1819 addr += 4; 1820 } 1821 value = regs->regs[31]; 1822 StoreW(addr, value, res); 1823 if (res) 1824 goto fault; 1825 1826 goto success; 1827 1828 } 1829 1830 goto sigbus; 1831 1832 case mm_lhu16_op: 1833 reg = reg16to32[insn.mm16_rb_format.rt]; 1834 goto loadHWU; 1835 1836 case mm_lw16_op: 1837 reg = reg16to32[insn.mm16_rb_format.rt]; 1838 goto loadW; 1839 1840 case mm_sh16_op: 1841 reg = reg16to32st[insn.mm16_rb_format.rt]; 1842 goto storeHW; 1843 1844 case mm_sw16_op: 1845 reg = reg16to32st[insn.mm16_rb_format.rt]; 1846 goto storeW; 1847 1848 case mm_lwsp16_op: 1849 reg = insn.mm16_r5_format.rt; 1850 goto loadW; 1851 1852 case mm_swsp16_op: 1853 reg = insn.mm16_r5_format.rt; 1854 goto storeW; 1855 1856 case mm_lwgp16_op: 1857 reg = reg16to32[insn.mm16_r3_format.rt]; 1858 goto loadW; 1859 1860 default: 1861 goto sigill; 1862 } 1863 1864 loadHW: 1865 if (!access_ok(VERIFY_READ, addr, 2)) 1866 goto sigbus; 1867 1868 LoadHW(addr, value, res); 1869 if (res) 1870 goto fault; 1871 regs->regs[reg] = value; 1872 goto success; 1873 1874 loadHWU: 1875 if (!access_ok(VERIFY_READ, addr, 2)) 1876 goto sigbus; 1877 1878 LoadHWU(addr, value, res); 1879 if (res) 1880 goto fault; 1881 regs->regs[reg] = value; 1882 goto success; 1883 1884 loadW: 1885 if (!access_ok(VERIFY_READ, addr, 4)) 1886 goto sigbus; 1887 1888 LoadW(addr, value, res); 1889 if (res) 1890 goto fault; 1891 regs->regs[reg] = value; 1892 goto success; 1893 1894 loadWU: 1895 #ifdef CONFIG_64BIT 1896 /* 1897 * A 32-bit kernel might be running on a 64-bit processor. But 1898 * if we're on a 32-bit processor and an i-cache incoherency 1899 * or race makes us see a 64-bit instruction here the sdl/sdr 1900 * would blow up, so for now we don't handle unaligned 64-bit 1901 * instructions on 32-bit kernels. 1902 */ 1903 if (!access_ok(VERIFY_READ, addr, 4)) 1904 goto sigbus; 1905 1906 LoadWU(addr, value, res); 1907 if (res) 1908 goto fault; 1909 regs->regs[reg] = value; 1910 goto success; 1911 #endif /* CONFIG_64BIT */ 1912 1913 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1914 goto sigill; 1915 1916 loadDW: 1917 #ifdef CONFIG_64BIT 1918 /* 1919 * A 32-bit kernel might be running on a 64-bit processor. But 1920 * if we're on a 32-bit processor and an i-cache incoherency 1921 * or race makes us see a 64-bit instruction here the sdl/sdr 1922 * would blow up, so for now we don't handle unaligned 64-bit 1923 * instructions on 32-bit kernels. 1924 */ 1925 if (!access_ok(VERIFY_READ, addr, 8)) 1926 goto sigbus; 1927 1928 LoadDW(addr, value, res); 1929 if (res) 1930 goto fault; 1931 regs->regs[reg] = value; 1932 goto success; 1933 #endif /* CONFIG_64BIT */ 1934 1935 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1936 goto sigill; 1937 1938 storeHW: 1939 if (!access_ok(VERIFY_WRITE, addr, 2)) 1940 goto sigbus; 1941 1942 value = regs->regs[reg]; 1943 StoreHW(addr, value, res); 1944 if (res) 1945 goto fault; 1946 goto success; 1947 1948 storeW: 1949 if (!access_ok(VERIFY_WRITE, addr, 4)) 1950 goto sigbus; 1951 1952 value = regs->regs[reg]; 1953 StoreW(addr, value, res); 1954 if (res) 1955 goto fault; 1956 goto success; 1957 1958 storeDW: 1959 #ifdef CONFIG_64BIT 1960 /* 1961 * A 32-bit kernel might be running on a 64-bit processor. But 1962 * if we're on a 32-bit processor and an i-cache incoherency 1963 * or race makes us see a 64-bit instruction here the sdl/sdr 1964 * would blow up, so for now we don't handle unaligned 64-bit 1965 * instructions on 32-bit kernels. 1966 */ 1967 if (!access_ok(VERIFY_WRITE, addr, 8)) 1968 goto sigbus; 1969 1970 value = regs->regs[reg]; 1971 StoreDW(addr, value, res); 1972 if (res) 1973 goto fault; 1974 goto success; 1975 #endif /* CONFIG_64BIT */ 1976 1977 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1978 goto sigill; 1979 1980 success: 1981 regs->cp0_epc = contpc; /* advance or branch */ 1982 1983 #ifdef CONFIG_DEBUG_FS 1984 unaligned_instructions++; 1985 #endif 1986 return; 1987 1988 fault: 1989 /* roll back jump/branch */ 1990 regs->cp0_epc = origpc; 1991 regs->regs[31] = orig31; 1992 /* Did we have an exception handler installed? */ 1993 if (fixup_exception(regs)) 1994 return; 1995 1996 die_if_kernel("Unhandled kernel unaligned access", regs); 1997 force_sig(SIGSEGV, current); 1998 1999 return; 2000 2001 sigbus: 2002 die_if_kernel("Unhandled kernel unaligned access", regs); 2003 force_sig(SIGBUS, current); 2004 2005 return; 2006 2007 sigill: 2008 die_if_kernel 2009 ("Unhandled kernel unaligned access or invalid instruction", regs); 2010 force_sig(SIGILL, current); 2011 } 2012 2013 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 2014 { 2015 unsigned long value; 2016 unsigned int res; 2017 int reg; 2018 unsigned long orig31; 2019 u16 __user *pc16; 2020 unsigned long origpc; 2021 union mips16e_instruction mips16inst, oldinst; 2022 unsigned int opcode; 2023 int extended = 0; 2024 2025 origpc = regs->cp0_epc; 2026 orig31 = regs->regs[31]; 2027 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 2028 /* 2029 * This load never faults. 2030 */ 2031 __get_user(mips16inst.full, pc16); 2032 oldinst = mips16inst; 2033 2034 /* skip EXTEND instruction */ 2035 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 2036 extended = 1; 2037 pc16++; 2038 __get_user(mips16inst.full, pc16); 2039 } else if (delay_slot(regs)) { 2040 /* skip jump instructions */ 2041 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 2042 if (mips16inst.ri.opcode == MIPS16e_jal_op) 2043 pc16++; 2044 pc16++; 2045 if (get_user(mips16inst.full, pc16)) 2046 goto sigbus; 2047 } 2048 2049 opcode = mips16inst.ri.opcode; 2050 switch (opcode) { 2051 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 2052 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 2053 case MIPS16e_ldpc_func: 2054 case MIPS16e_ldsp_func: 2055 reg = reg16to32[mips16inst.ri64.ry]; 2056 goto loadDW; 2057 2058 case MIPS16e_sdsp_func: 2059 reg = reg16to32[mips16inst.ri64.ry]; 2060 goto writeDW; 2061 2062 case MIPS16e_sdrasp_func: 2063 reg = 29; /* GPRSP */ 2064 goto writeDW; 2065 } 2066 2067 goto sigbus; 2068 2069 case MIPS16e_swsp_op: 2070 reg = reg16to32[mips16inst.ri.rx]; 2071 if (extended && cpu_has_mips16e2) 2072 switch (mips16inst.ri.imm >> 5) { 2073 case 0: /* SWSP */ 2074 case 1: /* SWGP */ 2075 break; 2076 case 2: /* SHGP */ 2077 opcode = MIPS16e_sh_op; 2078 break; 2079 default: 2080 goto sigbus; 2081 } 2082 break; 2083 2084 case MIPS16e_lwpc_op: 2085 reg = reg16to32[mips16inst.ri.rx]; 2086 break; 2087 2088 case MIPS16e_lwsp_op: 2089 reg = reg16to32[mips16inst.ri.rx]; 2090 if (extended && cpu_has_mips16e2) 2091 switch (mips16inst.ri.imm >> 5) { 2092 case 0: /* LWSP */ 2093 case 1: /* LWGP */ 2094 break; 2095 case 2: /* LHGP */ 2096 opcode = MIPS16e_lh_op; 2097 break; 2098 case 4: /* LHUGP */ 2099 opcode = MIPS16e_lhu_op; 2100 break; 2101 default: 2102 goto sigbus; 2103 } 2104 break; 2105 2106 case MIPS16e_i8_op: 2107 if (mips16inst.i8.func != MIPS16e_swrasp_func) 2108 goto sigbus; 2109 reg = 29; /* GPRSP */ 2110 break; 2111 2112 default: 2113 reg = reg16to32[mips16inst.rri.ry]; 2114 break; 2115 } 2116 2117 switch (opcode) { 2118 2119 case MIPS16e_lb_op: 2120 case MIPS16e_lbu_op: 2121 case MIPS16e_sb_op: 2122 goto sigbus; 2123 2124 case MIPS16e_lh_op: 2125 if (!access_ok(VERIFY_READ, addr, 2)) 2126 goto sigbus; 2127 2128 LoadHW(addr, value, res); 2129 if (res) 2130 goto fault; 2131 MIPS16e_compute_return_epc(regs, &oldinst); 2132 regs->regs[reg] = value; 2133 break; 2134 2135 case MIPS16e_lhu_op: 2136 if (!access_ok(VERIFY_READ, addr, 2)) 2137 goto sigbus; 2138 2139 LoadHWU(addr, value, res); 2140 if (res) 2141 goto fault; 2142 MIPS16e_compute_return_epc(regs, &oldinst); 2143 regs->regs[reg] = value; 2144 break; 2145 2146 case MIPS16e_lw_op: 2147 case MIPS16e_lwpc_op: 2148 case MIPS16e_lwsp_op: 2149 if (!access_ok(VERIFY_READ, addr, 4)) 2150 goto sigbus; 2151 2152 LoadW(addr, value, res); 2153 if (res) 2154 goto fault; 2155 MIPS16e_compute_return_epc(regs, &oldinst); 2156 regs->regs[reg] = value; 2157 break; 2158 2159 case MIPS16e_lwu_op: 2160 #ifdef CONFIG_64BIT 2161 /* 2162 * A 32-bit kernel might be running on a 64-bit processor. But 2163 * if we're on a 32-bit processor and an i-cache incoherency 2164 * or race makes us see a 64-bit instruction here the sdl/sdr 2165 * would blow up, so for now we don't handle unaligned 64-bit 2166 * instructions on 32-bit kernels. 2167 */ 2168 if (!access_ok(VERIFY_READ, addr, 4)) 2169 goto sigbus; 2170 2171 LoadWU(addr, value, res); 2172 if (res) 2173 goto fault; 2174 MIPS16e_compute_return_epc(regs, &oldinst); 2175 regs->regs[reg] = value; 2176 break; 2177 #endif /* CONFIG_64BIT */ 2178 2179 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2180 goto sigill; 2181 2182 case MIPS16e_ld_op: 2183 loadDW: 2184 #ifdef CONFIG_64BIT 2185 /* 2186 * A 32-bit kernel might be running on a 64-bit processor. But 2187 * if we're on a 32-bit processor and an i-cache incoherency 2188 * or race makes us see a 64-bit instruction here the sdl/sdr 2189 * would blow up, so for now we don't handle unaligned 64-bit 2190 * instructions on 32-bit kernels. 2191 */ 2192 if (!access_ok(VERIFY_READ, addr, 8)) 2193 goto sigbus; 2194 2195 LoadDW(addr, value, res); 2196 if (res) 2197 goto fault; 2198 MIPS16e_compute_return_epc(regs, &oldinst); 2199 regs->regs[reg] = value; 2200 break; 2201 #endif /* CONFIG_64BIT */ 2202 2203 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2204 goto sigill; 2205 2206 case MIPS16e_sh_op: 2207 if (!access_ok(VERIFY_WRITE, addr, 2)) 2208 goto sigbus; 2209 2210 MIPS16e_compute_return_epc(regs, &oldinst); 2211 value = regs->regs[reg]; 2212 StoreHW(addr, value, res); 2213 if (res) 2214 goto fault; 2215 break; 2216 2217 case MIPS16e_sw_op: 2218 case MIPS16e_swsp_op: 2219 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 2220 if (!access_ok(VERIFY_WRITE, addr, 4)) 2221 goto sigbus; 2222 2223 MIPS16e_compute_return_epc(regs, &oldinst); 2224 value = regs->regs[reg]; 2225 StoreW(addr, value, res); 2226 if (res) 2227 goto fault; 2228 break; 2229 2230 case MIPS16e_sd_op: 2231 writeDW: 2232 #ifdef CONFIG_64BIT 2233 /* 2234 * A 32-bit kernel might be running on a 64-bit processor. But 2235 * if we're on a 32-bit processor and an i-cache incoherency 2236 * or race makes us see a 64-bit instruction here the sdl/sdr 2237 * would blow up, so for now we don't handle unaligned 64-bit 2238 * instructions on 32-bit kernels. 2239 */ 2240 if (!access_ok(VERIFY_WRITE, addr, 8)) 2241 goto sigbus; 2242 2243 MIPS16e_compute_return_epc(regs, &oldinst); 2244 value = regs->regs[reg]; 2245 StoreDW(addr, value, res); 2246 if (res) 2247 goto fault; 2248 break; 2249 #endif /* CONFIG_64BIT */ 2250 2251 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2252 goto sigill; 2253 2254 default: 2255 /* 2256 * Pheeee... We encountered an yet unknown instruction or 2257 * cache coherence problem. Die sucker, die ... 2258 */ 2259 goto sigill; 2260 } 2261 2262 #ifdef CONFIG_DEBUG_FS 2263 unaligned_instructions++; 2264 #endif 2265 2266 return; 2267 2268 fault: 2269 /* roll back jump/branch */ 2270 regs->cp0_epc = origpc; 2271 regs->regs[31] = orig31; 2272 /* Did we have an exception handler installed? */ 2273 if (fixup_exception(regs)) 2274 return; 2275 2276 die_if_kernel("Unhandled kernel unaligned access", regs); 2277 force_sig(SIGSEGV, current); 2278 2279 return; 2280 2281 sigbus: 2282 die_if_kernel("Unhandled kernel unaligned access", regs); 2283 force_sig(SIGBUS, current); 2284 2285 return; 2286 2287 sigill: 2288 die_if_kernel 2289 ("Unhandled kernel unaligned access or invalid instruction", regs); 2290 force_sig(SIGILL, current); 2291 } 2292 2293 asmlinkage void do_ade(struct pt_regs *regs) 2294 { 2295 enum ctx_state prev_state; 2296 unsigned int __user *pc; 2297 mm_segment_t seg; 2298 2299 prev_state = exception_enter(); 2300 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 2301 1, regs, regs->cp0_badvaddr); 2302 /* 2303 * Did we catch a fault trying to load an instruction? 2304 */ 2305 if (regs->cp0_badvaddr == regs->cp0_epc) 2306 goto sigbus; 2307 2308 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 2309 goto sigbus; 2310 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 2311 goto sigbus; 2312 2313 /* 2314 * Do branch emulation only if we didn't forward the exception. 2315 * This is all so but ugly ... 2316 */ 2317 2318 /* 2319 * Are we running in microMIPS mode? 2320 */ 2321 if (get_isa16_mode(regs->cp0_epc)) { 2322 /* 2323 * Did we catch a fault trying to load an instruction in 2324 * 16-bit mode? 2325 */ 2326 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 2327 goto sigbus; 2328 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2329 show_registers(regs); 2330 2331 if (cpu_has_mmips) { 2332 seg = get_fs(); 2333 if (!user_mode(regs)) 2334 set_fs(KERNEL_DS); 2335 emulate_load_store_microMIPS(regs, 2336 (void __user *)regs->cp0_badvaddr); 2337 set_fs(seg); 2338 2339 return; 2340 } 2341 2342 if (cpu_has_mips16) { 2343 seg = get_fs(); 2344 if (!user_mode(regs)) 2345 set_fs(KERNEL_DS); 2346 emulate_load_store_MIPS16e(regs, 2347 (void __user *)regs->cp0_badvaddr); 2348 set_fs(seg); 2349 2350 return; 2351 } 2352 2353 goto sigbus; 2354 } 2355 2356 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2357 show_registers(regs); 2358 pc = (unsigned int __user *)exception_epc(regs); 2359 2360 seg = get_fs(); 2361 if (!user_mode(regs)) 2362 set_fs(KERNEL_DS); 2363 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 2364 set_fs(seg); 2365 2366 return; 2367 2368 sigbus: 2369 die_if_kernel("Kernel unaligned instruction access", regs); 2370 force_sig(SIGBUS, current); 2371 2372 /* 2373 * XXX On return from the signal handler we should advance the epc 2374 */ 2375 exception_exit(prev_state); 2376 } 2377 2378 #ifdef CONFIG_DEBUG_FS 2379 static int __init debugfs_unaligned(void) 2380 { 2381 struct dentry *d; 2382 2383 if (!mips_debugfs_dir) 2384 return -ENODEV; 2385 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 2386 mips_debugfs_dir, &unaligned_instructions); 2387 if (!d) 2388 return -ENOMEM; 2389 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 2390 mips_debugfs_dir, &unaligned_action); 2391 if (!d) 2392 return -ENOMEM; 2393 return 0; 2394 } 2395 arch_initcall(debugfs_unaligned); 2396 #endif 2397