1 /* 2 * Handle unaligned accesses by emulation. 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 9 * Copyright (C) 1999 Silicon Graphics, Inc. 10 * Copyright (C) 2014 Imagination Technologies Ltd. 11 * 12 * This file contains exception handler for address error exception with the 13 * special capability to execute faulting instructions in software. The 14 * handler does not try to handle the case when the program counter points 15 * to an address not aligned to a word boundary. 16 * 17 * Putting data to unaligned addresses is a bad practice even on Intel where 18 * only the performance is affected. Much worse is that such code is non- 19 * portable. Due to several programs that die on MIPS due to alignment 20 * problems I decided to implement this handler anyway though I originally 21 * didn't intend to do this at all for user code. 22 * 23 * For now I enable fixing of address errors by default to make life easier. 24 * I however intend to disable this somewhen in the future when the alignment 25 * problems with user programs have been fixed. For programmers this is the 26 * right way to go. 27 * 28 * Fixing address errors is a per process option. The option is inherited 29 * across fork(2) and execve(2) calls. If you really want to use the 30 * option in your user programs - I discourage the use of the software 31 * emulation strongly - use the following code in your userland stuff: 32 * 33 * #include <sys/sysmips.h> 34 * 35 * ... 36 * sysmips(MIPS_FIXADE, x); 37 * ... 38 * 39 * The argument x is 0 for disabling software emulation, enabled otherwise. 40 * 41 * Below a little program to play around with this feature. 42 * 43 * #include <stdio.h> 44 * #include <sys/sysmips.h> 45 * 46 * struct foo { 47 * unsigned char bar[8]; 48 * }; 49 * 50 * main(int argc, char *argv[]) 51 * { 52 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; 53 * unsigned int *p = (unsigned int *) (x.bar + 3); 54 * int i; 55 * 56 * if (argc > 1) 57 * sysmips(MIPS_FIXADE, atoi(argv[1])); 58 * 59 * printf("*p = %08lx\n", *p); 60 * 61 * *p = 0xdeadface; 62 * 63 * for(i = 0; i <= 7; i++) 64 * printf("%02x ", x.bar[i]); 65 * printf("\n"); 66 * } 67 * 68 * Coprocessor loads are not supported; I think this case is unimportant 69 * in the practice. 70 * 71 * TODO: Handle ndc (attempted store to doubleword in uncached memory) 72 * exception for the R6000. 73 * A store crossing a page boundary might be executed only partially. 74 * Undo the partial store in this case. 75 */ 76 #include <linux/context_tracking.h> 77 #include <linux/mm.h> 78 #include <linux/signal.h> 79 #include <linux/smp.h> 80 #include <linux/sched.h> 81 #include <linux/debugfs.h> 82 #include <linux/perf_event.h> 83 84 #include <asm/asm.h> 85 #include <asm/branch.h> 86 #include <asm/byteorder.h> 87 #include <asm/cop2.h> 88 #include <asm/debug.h> 89 #include <asm/fpu.h> 90 #include <asm/fpu_emulator.h> 91 #include <asm/inst.h> 92 #include <linux/uaccess.h> 93 94 #define STR(x) __STR(x) 95 #define __STR(x) #x 96 97 enum { 98 UNALIGNED_ACTION_QUIET, 99 UNALIGNED_ACTION_SIGNAL, 100 UNALIGNED_ACTION_SHOW, 101 }; 102 #ifdef CONFIG_DEBUG_FS 103 static u32 unaligned_instructions; 104 static u32 unaligned_action; 105 #else 106 #define unaligned_action UNALIGNED_ACTION_QUIET 107 #endif 108 extern void show_registers(struct pt_regs *regs); 109 110 #ifdef __BIG_ENDIAN 111 #define _LoadHW(addr, value, res, type) \ 112 do { \ 113 __asm__ __volatile__ (".set\tnoat\n" \ 114 "1:\t"type##_lb("%0", "0(%2)")"\n" \ 115 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 116 "sll\t%0, 0x8\n\t" \ 117 "or\t%0, $1\n\t" \ 118 "li\t%1, 0\n" \ 119 "3:\t.set\tat\n\t" \ 120 ".insn\n\t" \ 121 ".section\t.fixup,\"ax\"\n\t" \ 122 "4:\tli\t%1, %3\n\t" \ 123 "j\t3b\n\t" \ 124 ".previous\n\t" \ 125 ".section\t__ex_table,\"a\"\n\t" \ 126 STR(PTR)"\t1b, 4b\n\t" \ 127 STR(PTR)"\t2b, 4b\n\t" \ 128 ".previous" \ 129 : "=&r" (value), "=r" (res) \ 130 : "r" (addr), "i" (-EFAULT)); \ 131 } while(0) 132 133 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 134 #define _LoadW(addr, value, res, type) \ 135 do { \ 136 __asm__ __volatile__ ( \ 137 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 138 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 139 "li\t%1, 0\n" \ 140 "3:\n\t" \ 141 ".insn\n\t" \ 142 ".section\t.fixup,\"ax\"\n\t" \ 143 "4:\tli\t%1, %3\n\t" \ 144 "j\t3b\n\t" \ 145 ".previous\n\t" \ 146 ".section\t__ex_table,\"a\"\n\t" \ 147 STR(PTR)"\t1b, 4b\n\t" \ 148 STR(PTR)"\t2b, 4b\n\t" \ 149 ".previous" \ 150 : "=&r" (value), "=r" (res) \ 151 : "r" (addr), "i" (-EFAULT)); \ 152 } while(0) 153 154 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 155 /* For CPUs without lwl instruction */ 156 #define _LoadW(addr, value, res, type) \ 157 do { \ 158 __asm__ __volatile__ ( \ 159 ".set\tpush\n" \ 160 ".set\tnoat\n\t" \ 161 "1:"type##_lb("%0", "0(%2)")"\n\t" \ 162 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 163 "sll\t%0, 0x8\n\t" \ 164 "or\t%0, $1\n\t" \ 165 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 166 "sll\t%0, 0x8\n\t" \ 167 "or\t%0, $1\n\t" \ 168 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 169 "sll\t%0, 0x8\n\t" \ 170 "or\t%0, $1\n\t" \ 171 "li\t%1, 0\n" \ 172 ".set\tpop\n" \ 173 "10:\n\t" \ 174 ".insn\n\t" \ 175 ".section\t.fixup,\"ax\"\n\t" \ 176 "11:\tli\t%1, %3\n\t" \ 177 "j\t10b\n\t" \ 178 ".previous\n\t" \ 179 ".section\t__ex_table,\"a\"\n\t" \ 180 STR(PTR)"\t1b, 11b\n\t" \ 181 STR(PTR)"\t2b, 11b\n\t" \ 182 STR(PTR)"\t3b, 11b\n\t" \ 183 STR(PTR)"\t4b, 11b\n\t" \ 184 ".previous" \ 185 : "=&r" (value), "=r" (res) \ 186 : "r" (addr), "i" (-EFAULT)); \ 187 } while(0) 188 189 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 190 191 #define _LoadHWU(addr, value, res, type) \ 192 do { \ 193 __asm__ __volatile__ ( \ 194 ".set\tnoat\n" \ 195 "1:\t"type##_lbu("%0", "0(%2)")"\n" \ 196 "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\ 197 "sll\t%0, 0x8\n\t" \ 198 "or\t%0, $1\n\t" \ 199 "li\t%1, 0\n" \ 200 "3:\n\t" \ 201 ".insn\n\t" \ 202 ".set\tat\n\t" \ 203 ".section\t.fixup,\"ax\"\n\t" \ 204 "4:\tli\t%1, %3\n\t" \ 205 "j\t3b\n\t" \ 206 ".previous\n\t" \ 207 ".section\t__ex_table,\"a\"\n\t" \ 208 STR(PTR)"\t1b, 4b\n\t" \ 209 STR(PTR)"\t2b, 4b\n\t" \ 210 ".previous" \ 211 : "=&r" (value), "=r" (res) \ 212 : "r" (addr), "i" (-EFAULT)); \ 213 } while(0) 214 215 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 216 #define _LoadWU(addr, value, res, type) \ 217 do { \ 218 __asm__ __volatile__ ( \ 219 "1:\t"type##_lwl("%0", "(%2)")"\n" \ 220 "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\ 221 "dsll\t%0, %0, 32\n\t" \ 222 "dsrl\t%0, %0, 32\n\t" \ 223 "li\t%1, 0\n" \ 224 "3:\n\t" \ 225 ".insn\n\t" \ 226 "\t.section\t.fixup,\"ax\"\n\t" \ 227 "4:\tli\t%1, %3\n\t" \ 228 "j\t3b\n\t" \ 229 ".previous\n\t" \ 230 ".section\t__ex_table,\"a\"\n\t" \ 231 STR(PTR)"\t1b, 4b\n\t" \ 232 STR(PTR)"\t2b, 4b\n\t" \ 233 ".previous" \ 234 : "=&r" (value), "=r" (res) \ 235 : "r" (addr), "i" (-EFAULT)); \ 236 } while(0) 237 238 #define _LoadDW(addr, value, res) \ 239 do { \ 240 __asm__ __volatile__ ( \ 241 "1:\tldl\t%0, (%2)\n" \ 242 "2:\tldr\t%0, 7(%2)\n\t" \ 243 "li\t%1, 0\n" \ 244 "3:\n\t" \ 245 ".insn\n\t" \ 246 "\t.section\t.fixup,\"ax\"\n\t" \ 247 "4:\tli\t%1, %3\n\t" \ 248 "j\t3b\n\t" \ 249 ".previous\n\t" \ 250 ".section\t__ex_table,\"a\"\n\t" \ 251 STR(PTR)"\t1b, 4b\n\t" \ 252 STR(PTR)"\t2b, 4b\n\t" \ 253 ".previous" \ 254 : "=&r" (value), "=r" (res) \ 255 : "r" (addr), "i" (-EFAULT)); \ 256 } while(0) 257 258 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 259 /* For CPUs without lwl and ldl instructions */ 260 #define _LoadWU(addr, value, res, type) \ 261 do { \ 262 __asm__ __volatile__ ( \ 263 ".set\tpush\n\t" \ 264 ".set\tnoat\n\t" \ 265 "1:"type##_lbu("%0", "0(%2)")"\n\t" \ 266 "2:"type##_lbu("$1", "1(%2)")"\n\t" \ 267 "sll\t%0, 0x8\n\t" \ 268 "or\t%0, $1\n\t" \ 269 "3:"type##_lbu("$1", "2(%2)")"\n\t" \ 270 "sll\t%0, 0x8\n\t" \ 271 "or\t%0, $1\n\t" \ 272 "4:"type##_lbu("$1", "3(%2)")"\n\t" \ 273 "sll\t%0, 0x8\n\t" \ 274 "or\t%0, $1\n\t" \ 275 "li\t%1, 0\n" \ 276 ".set\tpop\n" \ 277 "10:\n\t" \ 278 ".insn\n\t" \ 279 ".section\t.fixup,\"ax\"\n\t" \ 280 "11:\tli\t%1, %3\n\t" \ 281 "j\t10b\n\t" \ 282 ".previous\n\t" \ 283 ".section\t__ex_table,\"a\"\n\t" \ 284 STR(PTR)"\t1b, 11b\n\t" \ 285 STR(PTR)"\t2b, 11b\n\t" \ 286 STR(PTR)"\t3b, 11b\n\t" \ 287 STR(PTR)"\t4b, 11b\n\t" \ 288 ".previous" \ 289 : "=&r" (value), "=r" (res) \ 290 : "r" (addr), "i" (-EFAULT)); \ 291 } while(0) 292 293 #define _LoadDW(addr, value, res) \ 294 do { \ 295 __asm__ __volatile__ ( \ 296 ".set\tpush\n\t" \ 297 ".set\tnoat\n\t" \ 298 "1:lb\t%0, 0(%2)\n\t" \ 299 "2:lbu\t $1, 1(%2)\n\t" \ 300 "dsll\t%0, 0x8\n\t" \ 301 "or\t%0, $1\n\t" \ 302 "3:lbu\t$1, 2(%2)\n\t" \ 303 "dsll\t%0, 0x8\n\t" \ 304 "or\t%0, $1\n\t" \ 305 "4:lbu\t$1, 3(%2)\n\t" \ 306 "dsll\t%0, 0x8\n\t" \ 307 "or\t%0, $1\n\t" \ 308 "5:lbu\t$1, 4(%2)\n\t" \ 309 "dsll\t%0, 0x8\n\t" \ 310 "or\t%0, $1\n\t" \ 311 "6:lbu\t$1, 5(%2)\n\t" \ 312 "dsll\t%0, 0x8\n\t" \ 313 "or\t%0, $1\n\t" \ 314 "7:lbu\t$1, 6(%2)\n\t" \ 315 "dsll\t%0, 0x8\n\t" \ 316 "or\t%0, $1\n\t" \ 317 "8:lbu\t$1, 7(%2)\n\t" \ 318 "dsll\t%0, 0x8\n\t" \ 319 "or\t%0, $1\n\t" \ 320 "li\t%1, 0\n" \ 321 ".set\tpop\n\t" \ 322 "10:\n\t" \ 323 ".insn\n\t" \ 324 ".section\t.fixup,\"ax\"\n\t" \ 325 "11:\tli\t%1, %3\n\t" \ 326 "j\t10b\n\t" \ 327 ".previous\n\t" \ 328 ".section\t__ex_table,\"a\"\n\t" \ 329 STR(PTR)"\t1b, 11b\n\t" \ 330 STR(PTR)"\t2b, 11b\n\t" \ 331 STR(PTR)"\t3b, 11b\n\t" \ 332 STR(PTR)"\t4b, 11b\n\t" \ 333 STR(PTR)"\t5b, 11b\n\t" \ 334 STR(PTR)"\t6b, 11b\n\t" \ 335 STR(PTR)"\t7b, 11b\n\t" \ 336 STR(PTR)"\t8b, 11b\n\t" \ 337 ".previous" \ 338 : "=&r" (value), "=r" (res) \ 339 : "r" (addr), "i" (-EFAULT)); \ 340 } while(0) 341 342 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 343 344 345 #define _StoreHW(addr, value, res, type) \ 346 do { \ 347 __asm__ __volatile__ ( \ 348 ".set\tnoat\n" \ 349 "1:\t"type##_sb("%1", "1(%2)")"\n" \ 350 "srl\t$1, %1, 0x8\n" \ 351 "2:\t"type##_sb("$1", "0(%2)")"\n" \ 352 ".set\tat\n\t" \ 353 "li\t%0, 0\n" \ 354 "3:\n\t" \ 355 ".insn\n\t" \ 356 ".section\t.fixup,\"ax\"\n\t" \ 357 "4:\tli\t%0, %3\n\t" \ 358 "j\t3b\n\t" \ 359 ".previous\n\t" \ 360 ".section\t__ex_table,\"a\"\n\t" \ 361 STR(PTR)"\t1b, 4b\n\t" \ 362 STR(PTR)"\t2b, 4b\n\t" \ 363 ".previous" \ 364 : "=r" (res) \ 365 : "r" (value), "r" (addr), "i" (-EFAULT));\ 366 } while(0) 367 368 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 369 #define _StoreW(addr, value, res, type) \ 370 do { \ 371 __asm__ __volatile__ ( \ 372 "1:\t"type##_swl("%1", "(%2)")"\n" \ 373 "2:\t"type##_swr("%1", "3(%2)")"\n\t"\ 374 "li\t%0, 0\n" \ 375 "3:\n\t" \ 376 ".insn\n\t" \ 377 ".section\t.fixup,\"ax\"\n\t" \ 378 "4:\tli\t%0, %3\n\t" \ 379 "j\t3b\n\t" \ 380 ".previous\n\t" \ 381 ".section\t__ex_table,\"a\"\n\t" \ 382 STR(PTR)"\t1b, 4b\n\t" \ 383 STR(PTR)"\t2b, 4b\n\t" \ 384 ".previous" \ 385 : "=r" (res) \ 386 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 387 } while(0) 388 389 #define _StoreDW(addr, value, res) \ 390 do { \ 391 __asm__ __volatile__ ( \ 392 "1:\tsdl\t%1,(%2)\n" \ 393 "2:\tsdr\t%1, 7(%2)\n\t" \ 394 "li\t%0, 0\n" \ 395 "3:\n\t" \ 396 ".insn\n\t" \ 397 ".section\t.fixup,\"ax\"\n\t" \ 398 "4:\tli\t%0, %3\n\t" \ 399 "j\t3b\n\t" \ 400 ".previous\n\t" \ 401 ".section\t__ex_table,\"a\"\n\t" \ 402 STR(PTR)"\t1b, 4b\n\t" \ 403 STR(PTR)"\t2b, 4b\n\t" \ 404 ".previous" \ 405 : "=r" (res) \ 406 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 407 } while(0) 408 409 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 410 #define _StoreW(addr, value, res, type) \ 411 do { \ 412 __asm__ __volatile__ ( \ 413 ".set\tpush\n\t" \ 414 ".set\tnoat\n\t" \ 415 "1:"type##_sb("%1", "3(%2)")"\n\t" \ 416 "srl\t$1, %1, 0x8\n\t" \ 417 "2:"type##_sb("$1", "2(%2)")"\n\t" \ 418 "srl\t$1, $1, 0x8\n\t" \ 419 "3:"type##_sb("$1", "1(%2)")"\n\t" \ 420 "srl\t$1, $1, 0x8\n\t" \ 421 "4:"type##_sb("$1", "0(%2)")"\n\t" \ 422 ".set\tpop\n\t" \ 423 "li\t%0, 0\n" \ 424 "10:\n\t" \ 425 ".insn\n\t" \ 426 ".section\t.fixup,\"ax\"\n\t" \ 427 "11:\tli\t%0, %3\n\t" \ 428 "j\t10b\n\t" \ 429 ".previous\n\t" \ 430 ".section\t__ex_table,\"a\"\n\t" \ 431 STR(PTR)"\t1b, 11b\n\t" \ 432 STR(PTR)"\t2b, 11b\n\t" \ 433 STR(PTR)"\t3b, 11b\n\t" \ 434 STR(PTR)"\t4b, 11b\n\t" \ 435 ".previous" \ 436 : "=&r" (res) \ 437 : "r" (value), "r" (addr), "i" (-EFAULT) \ 438 : "memory"); \ 439 } while(0) 440 441 #define _StoreDW(addr, value, res) \ 442 do { \ 443 __asm__ __volatile__ ( \ 444 ".set\tpush\n\t" \ 445 ".set\tnoat\n\t" \ 446 "1:sb\t%1, 7(%2)\n\t" \ 447 "dsrl\t$1, %1, 0x8\n\t" \ 448 "2:sb\t$1, 6(%2)\n\t" \ 449 "dsrl\t$1, $1, 0x8\n\t" \ 450 "3:sb\t$1, 5(%2)\n\t" \ 451 "dsrl\t$1, $1, 0x8\n\t" \ 452 "4:sb\t$1, 4(%2)\n\t" \ 453 "dsrl\t$1, $1, 0x8\n\t" \ 454 "5:sb\t$1, 3(%2)\n\t" \ 455 "dsrl\t$1, $1, 0x8\n\t" \ 456 "6:sb\t$1, 2(%2)\n\t" \ 457 "dsrl\t$1, $1, 0x8\n\t" \ 458 "7:sb\t$1, 1(%2)\n\t" \ 459 "dsrl\t$1, $1, 0x8\n\t" \ 460 "8:sb\t$1, 0(%2)\n\t" \ 461 "dsrl\t$1, $1, 0x8\n\t" \ 462 ".set\tpop\n\t" \ 463 "li\t%0, 0\n" \ 464 "10:\n\t" \ 465 ".insn\n\t" \ 466 ".section\t.fixup,\"ax\"\n\t" \ 467 "11:\tli\t%0, %3\n\t" \ 468 "j\t10b\n\t" \ 469 ".previous\n\t" \ 470 ".section\t__ex_table,\"a\"\n\t" \ 471 STR(PTR)"\t1b, 11b\n\t" \ 472 STR(PTR)"\t2b, 11b\n\t" \ 473 STR(PTR)"\t3b, 11b\n\t" \ 474 STR(PTR)"\t4b, 11b\n\t" \ 475 STR(PTR)"\t5b, 11b\n\t" \ 476 STR(PTR)"\t6b, 11b\n\t" \ 477 STR(PTR)"\t7b, 11b\n\t" \ 478 STR(PTR)"\t8b, 11b\n\t" \ 479 ".previous" \ 480 : "=&r" (res) \ 481 : "r" (value), "r" (addr), "i" (-EFAULT) \ 482 : "memory"); \ 483 } while(0) 484 485 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 486 487 #else /* __BIG_ENDIAN */ 488 489 #define _LoadHW(addr, value, res, type) \ 490 do { \ 491 __asm__ __volatile__ (".set\tnoat\n" \ 492 "1:\t"type##_lb("%0", "1(%2)")"\n" \ 493 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 494 "sll\t%0, 0x8\n\t" \ 495 "or\t%0, $1\n\t" \ 496 "li\t%1, 0\n" \ 497 "3:\t.set\tat\n\t" \ 498 ".insn\n\t" \ 499 ".section\t.fixup,\"ax\"\n\t" \ 500 "4:\tli\t%1, %3\n\t" \ 501 "j\t3b\n\t" \ 502 ".previous\n\t" \ 503 ".section\t__ex_table,\"a\"\n\t" \ 504 STR(PTR)"\t1b, 4b\n\t" \ 505 STR(PTR)"\t2b, 4b\n\t" \ 506 ".previous" \ 507 : "=&r" (value), "=r" (res) \ 508 : "r" (addr), "i" (-EFAULT)); \ 509 } while(0) 510 511 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 512 #define _LoadW(addr, value, res, type) \ 513 do { \ 514 __asm__ __volatile__ ( \ 515 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 516 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 517 "li\t%1, 0\n" \ 518 "3:\n\t" \ 519 ".insn\n\t" \ 520 ".section\t.fixup,\"ax\"\n\t" \ 521 "4:\tli\t%1, %3\n\t" \ 522 "j\t3b\n\t" \ 523 ".previous\n\t" \ 524 ".section\t__ex_table,\"a\"\n\t" \ 525 STR(PTR)"\t1b, 4b\n\t" \ 526 STR(PTR)"\t2b, 4b\n\t" \ 527 ".previous" \ 528 : "=&r" (value), "=r" (res) \ 529 : "r" (addr), "i" (-EFAULT)); \ 530 } while(0) 531 532 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 533 /* For CPUs without lwl instruction */ 534 #define _LoadW(addr, value, res, type) \ 535 do { \ 536 __asm__ __volatile__ ( \ 537 ".set\tpush\n" \ 538 ".set\tnoat\n\t" \ 539 "1:"type##_lb("%0", "3(%2)")"\n\t" \ 540 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 541 "sll\t%0, 0x8\n\t" \ 542 "or\t%0, $1\n\t" \ 543 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 544 "sll\t%0, 0x8\n\t" \ 545 "or\t%0, $1\n\t" \ 546 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 547 "sll\t%0, 0x8\n\t" \ 548 "or\t%0, $1\n\t" \ 549 "li\t%1, 0\n" \ 550 ".set\tpop\n" \ 551 "10:\n\t" \ 552 ".insn\n\t" \ 553 ".section\t.fixup,\"ax\"\n\t" \ 554 "11:\tli\t%1, %3\n\t" \ 555 "j\t10b\n\t" \ 556 ".previous\n\t" \ 557 ".section\t__ex_table,\"a\"\n\t" \ 558 STR(PTR)"\t1b, 11b\n\t" \ 559 STR(PTR)"\t2b, 11b\n\t" \ 560 STR(PTR)"\t3b, 11b\n\t" \ 561 STR(PTR)"\t4b, 11b\n\t" \ 562 ".previous" \ 563 : "=&r" (value), "=r" (res) \ 564 : "r" (addr), "i" (-EFAULT)); \ 565 } while(0) 566 567 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 568 569 570 #define _LoadHWU(addr, value, res, type) \ 571 do { \ 572 __asm__ __volatile__ ( \ 573 ".set\tnoat\n" \ 574 "1:\t"type##_lbu("%0", "1(%2)")"\n" \ 575 "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\ 576 "sll\t%0, 0x8\n\t" \ 577 "or\t%0, $1\n\t" \ 578 "li\t%1, 0\n" \ 579 "3:\n\t" \ 580 ".insn\n\t" \ 581 ".set\tat\n\t" \ 582 ".section\t.fixup,\"ax\"\n\t" \ 583 "4:\tli\t%1, %3\n\t" \ 584 "j\t3b\n\t" \ 585 ".previous\n\t" \ 586 ".section\t__ex_table,\"a\"\n\t" \ 587 STR(PTR)"\t1b, 4b\n\t" \ 588 STR(PTR)"\t2b, 4b\n\t" \ 589 ".previous" \ 590 : "=&r" (value), "=r" (res) \ 591 : "r" (addr), "i" (-EFAULT)); \ 592 } while(0) 593 594 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 595 #define _LoadWU(addr, value, res, type) \ 596 do { \ 597 __asm__ __volatile__ ( \ 598 "1:\t"type##_lwl("%0", "3(%2)")"\n" \ 599 "2:\t"type##_lwr("%0", "(%2)")"\n\t"\ 600 "dsll\t%0, %0, 32\n\t" \ 601 "dsrl\t%0, %0, 32\n\t" \ 602 "li\t%1, 0\n" \ 603 "3:\n\t" \ 604 ".insn\n\t" \ 605 "\t.section\t.fixup,\"ax\"\n\t" \ 606 "4:\tli\t%1, %3\n\t" \ 607 "j\t3b\n\t" \ 608 ".previous\n\t" \ 609 ".section\t__ex_table,\"a\"\n\t" \ 610 STR(PTR)"\t1b, 4b\n\t" \ 611 STR(PTR)"\t2b, 4b\n\t" \ 612 ".previous" \ 613 : "=&r" (value), "=r" (res) \ 614 : "r" (addr), "i" (-EFAULT)); \ 615 } while(0) 616 617 #define _LoadDW(addr, value, res) \ 618 do { \ 619 __asm__ __volatile__ ( \ 620 "1:\tldl\t%0, 7(%2)\n" \ 621 "2:\tldr\t%0, (%2)\n\t" \ 622 "li\t%1, 0\n" \ 623 "3:\n\t" \ 624 ".insn\n\t" \ 625 "\t.section\t.fixup,\"ax\"\n\t" \ 626 "4:\tli\t%1, %3\n\t" \ 627 "j\t3b\n\t" \ 628 ".previous\n\t" \ 629 ".section\t__ex_table,\"a\"\n\t" \ 630 STR(PTR)"\t1b, 4b\n\t" \ 631 STR(PTR)"\t2b, 4b\n\t" \ 632 ".previous" \ 633 : "=&r" (value), "=r" (res) \ 634 : "r" (addr), "i" (-EFAULT)); \ 635 } while(0) 636 637 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 638 /* For CPUs without lwl and ldl instructions */ 639 #define _LoadWU(addr, value, res, type) \ 640 do { \ 641 __asm__ __volatile__ ( \ 642 ".set\tpush\n\t" \ 643 ".set\tnoat\n\t" \ 644 "1:"type##_lbu("%0", "3(%2)")"\n\t" \ 645 "2:"type##_lbu("$1", "2(%2)")"\n\t" \ 646 "sll\t%0, 0x8\n\t" \ 647 "or\t%0, $1\n\t" \ 648 "3:"type##_lbu("$1", "1(%2)")"\n\t" \ 649 "sll\t%0, 0x8\n\t" \ 650 "or\t%0, $1\n\t" \ 651 "4:"type##_lbu("$1", "0(%2)")"\n\t" \ 652 "sll\t%0, 0x8\n\t" \ 653 "or\t%0, $1\n\t" \ 654 "li\t%1, 0\n" \ 655 ".set\tpop\n" \ 656 "10:\n\t" \ 657 ".insn\n\t" \ 658 ".section\t.fixup,\"ax\"\n\t" \ 659 "11:\tli\t%1, %3\n\t" \ 660 "j\t10b\n\t" \ 661 ".previous\n\t" \ 662 ".section\t__ex_table,\"a\"\n\t" \ 663 STR(PTR)"\t1b, 11b\n\t" \ 664 STR(PTR)"\t2b, 11b\n\t" \ 665 STR(PTR)"\t3b, 11b\n\t" \ 666 STR(PTR)"\t4b, 11b\n\t" \ 667 ".previous" \ 668 : "=&r" (value), "=r" (res) \ 669 : "r" (addr), "i" (-EFAULT)); \ 670 } while(0) 671 672 #define _LoadDW(addr, value, res) \ 673 do { \ 674 __asm__ __volatile__ ( \ 675 ".set\tpush\n\t" \ 676 ".set\tnoat\n\t" \ 677 "1:lb\t%0, 7(%2)\n\t" \ 678 "2:lbu\t$1, 6(%2)\n\t" \ 679 "dsll\t%0, 0x8\n\t" \ 680 "or\t%0, $1\n\t" \ 681 "3:lbu\t$1, 5(%2)\n\t" \ 682 "dsll\t%0, 0x8\n\t" \ 683 "or\t%0, $1\n\t" \ 684 "4:lbu\t$1, 4(%2)\n\t" \ 685 "dsll\t%0, 0x8\n\t" \ 686 "or\t%0, $1\n\t" \ 687 "5:lbu\t$1, 3(%2)\n\t" \ 688 "dsll\t%0, 0x8\n\t" \ 689 "or\t%0, $1\n\t" \ 690 "6:lbu\t$1, 2(%2)\n\t" \ 691 "dsll\t%0, 0x8\n\t" \ 692 "or\t%0, $1\n\t" \ 693 "7:lbu\t$1, 1(%2)\n\t" \ 694 "dsll\t%0, 0x8\n\t" \ 695 "or\t%0, $1\n\t" \ 696 "8:lbu\t$1, 0(%2)\n\t" \ 697 "dsll\t%0, 0x8\n\t" \ 698 "or\t%0, $1\n\t" \ 699 "li\t%1, 0\n" \ 700 ".set\tpop\n\t" \ 701 "10:\n\t" \ 702 ".insn\n\t" \ 703 ".section\t.fixup,\"ax\"\n\t" \ 704 "11:\tli\t%1, %3\n\t" \ 705 "j\t10b\n\t" \ 706 ".previous\n\t" \ 707 ".section\t__ex_table,\"a\"\n\t" \ 708 STR(PTR)"\t1b, 11b\n\t" \ 709 STR(PTR)"\t2b, 11b\n\t" \ 710 STR(PTR)"\t3b, 11b\n\t" \ 711 STR(PTR)"\t4b, 11b\n\t" \ 712 STR(PTR)"\t5b, 11b\n\t" \ 713 STR(PTR)"\t6b, 11b\n\t" \ 714 STR(PTR)"\t7b, 11b\n\t" \ 715 STR(PTR)"\t8b, 11b\n\t" \ 716 ".previous" \ 717 : "=&r" (value), "=r" (res) \ 718 : "r" (addr), "i" (-EFAULT)); \ 719 } while(0) 720 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 721 722 #define _StoreHW(addr, value, res, type) \ 723 do { \ 724 __asm__ __volatile__ ( \ 725 ".set\tnoat\n" \ 726 "1:\t"type##_sb("%1", "0(%2)")"\n" \ 727 "srl\t$1,%1, 0x8\n" \ 728 "2:\t"type##_sb("$1", "1(%2)")"\n" \ 729 ".set\tat\n\t" \ 730 "li\t%0, 0\n" \ 731 "3:\n\t" \ 732 ".insn\n\t" \ 733 ".section\t.fixup,\"ax\"\n\t" \ 734 "4:\tli\t%0, %3\n\t" \ 735 "j\t3b\n\t" \ 736 ".previous\n\t" \ 737 ".section\t__ex_table,\"a\"\n\t" \ 738 STR(PTR)"\t1b, 4b\n\t" \ 739 STR(PTR)"\t2b, 4b\n\t" \ 740 ".previous" \ 741 : "=r" (res) \ 742 : "r" (value), "r" (addr), "i" (-EFAULT));\ 743 } while(0) 744 745 #ifdef CONFIG_CPU_HAS_LOAD_STORE_LR 746 #define _StoreW(addr, value, res, type) \ 747 do { \ 748 __asm__ __volatile__ ( \ 749 "1:\t"type##_swl("%1", "3(%2)")"\n" \ 750 "2:\t"type##_swr("%1", "(%2)")"\n\t"\ 751 "li\t%0, 0\n" \ 752 "3:\n\t" \ 753 ".insn\n\t" \ 754 ".section\t.fixup,\"ax\"\n\t" \ 755 "4:\tli\t%0, %3\n\t" \ 756 "j\t3b\n\t" \ 757 ".previous\n\t" \ 758 ".section\t__ex_table,\"a\"\n\t" \ 759 STR(PTR)"\t1b, 4b\n\t" \ 760 STR(PTR)"\t2b, 4b\n\t" \ 761 ".previous" \ 762 : "=r" (res) \ 763 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 764 } while(0) 765 766 #define _StoreDW(addr, value, res) \ 767 do { \ 768 __asm__ __volatile__ ( \ 769 "1:\tsdl\t%1, 7(%2)\n" \ 770 "2:\tsdr\t%1, (%2)\n\t" \ 771 "li\t%0, 0\n" \ 772 "3:\n\t" \ 773 ".insn\n\t" \ 774 ".section\t.fixup,\"ax\"\n\t" \ 775 "4:\tli\t%0, %3\n\t" \ 776 "j\t3b\n\t" \ 777 ".previous\n\t" \ 778 ".section\t__ex_table,\"a\"\n\t" \ 779 STR(PTR)"\t1b, 4b\n\t" \ 780 STR(PTR)"\t2b, 4b\n\t" \ 781 ".previous" \ 782 : "=r" (res) \ 783 : "r" (value), "r" (addr), "i" (-EFAULT)); \ 784 } while(0) 785 786 #else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 787 /* For CPUs without swl and sdl instructions */ 788 #define _StoreW(addr, value, res, type) \ 789 do { \ 790 __asm__ __volatile__ ( \ 791 ".set\tpush\n\t" \ 792 ".set\tnoat\n\t" \ 793 "1:"type##_sb("%1", "0(%2)")"\n\t" \ 794 "srl\t$1, %1, 0x8\n\t" \ 795 "2:"type##_sb("$1", "1(%2)")"\n\t" \ 796 "srl\t$1, $1, 0x8\n\t" \ 797 "3:"type##_sb("$1", "2(%2)")"\n\t" \ 798 "srl\t$1, $1, 0x8\n\t" \ 799 "4:"type##_sb("$1", "3(%2)")"\n\t" \ 800 ".set\tpop\n\t" \ 801 "li\t%0, 0\n" \ 802 "10:\n\t" \ 803 ".insn\n\t" \ 804 ".section\t.fixup,\"ax\"\n\t" \ 805 "11:\tli\t%0, %3\n\t" \ 806 "j\t10b\n\t" \ 807 ".previous\n\t" \ 808 ".section\t__ex_table,\"a\"\n\t" \ 809 STR(PTR)"\t1b, 11b\n\t" \ 810 STR(PTR)"\t2b, 11b\n\t" \ 811 STR(PTR)"\t3b, 11b\n\t" \ 812 STR(PTR)"\t4b, 11b\n\t" \ 813 ".previous" \ 814 : "=&r" (res) \ 815 : "r" (value), "r" (addr), "i" (-EFAULT) \ 816 : "memory"); \ 817 } while(0) 818 819 #define _StoreDW(addr, value, res) \ 820 do { \ 821 __asm__ __volatile__ ( \ 822 ".set\tpush\n\t" \ 823 ".set\tnoat\n\t" \ 824 "1:sb\t%1, 0(%2)\n\t" \ 825 "dsrl\t$1, %1, 0x8\n\t" \ 826 "2:sb\t$1, 1(%2)\n\t" \ 827 "dsrl\t$1, $1, 0x8\n\t" \ 828 "3:sb\t$1, 2(%2)\n\t" \ 829 "dsrl\t$1, $1, 0x8\n\t" \ 830 "4:sb\t$1, 3(%2)\n\t" \ 831 "dsrl\t$1, $1, 0x8\n\t" \ 832 "5:sb\t$1, 4(%2)\n\t" \ 833 "dsrl\t$1, $1, 0x8\n\t" \ 834 "6:sb\t$1, 5(%2)\n\t" \ 835 "dsrl\t$1, $1, 0x8\n\t" \ 836 "7:sb\t$1, 6(%2)\n\t" \ 837 "dsrl\t$1, $1, 0x8\n\t" \ 838 "8:sb\t$1, 7(%2)\n\t" \ 839 "dsrl\t$1, $1, 0x8\n\t" \ 840 ".set\tpop\n\t" \ 841 "li\t%0, 0\n" \ 842 "10:\n\t" \ 843 ".insn\n\t" \ 844 ".section\t.fixup,\"ax\"\n\t" \ 845 "11:\tli\t%0, %3\n\t" \ 846 "j\t10b\n\t" \ 847 ".previous\n\t" \ 848 ".section\t__ex_table,\"a\"\n\t" \ 849 STR(PTR)"\t1b, 11b\n\t" \ 850 STR(PTR)"\t2b, 11b\n\t" \ 851 STR(PTR)"\t3b, 11b\n\t" \ 852 STR(PTR)"\t4b, 11b\n\t" \ 853 STR(PTR)"\t5b, 11b\n\t" \ 854 STR(PTR)"\t6b, 11b\n\t" \ 855 STR(PTR)"\t7b, 11b\n\t" \ 856 STR(PTR)"\t8b, 11b\n\t" \ 857 ".previous" \ 858 : "=&r" (res) \ 859 : "r" (value), "r" (addr), "i" (-EFAULT) \ 860 : "memory"); \ 861 } while(0) 862 863 #endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */ 864 #endif 865 866 #define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel) 867 #define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user) 868 #define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel) 869 #define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user) 870 #define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel) 871 #define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user) 872 #define LoadW(addr, value, res) _LoadW(addr, value, res, kernel) 873 #define LoadWE(addr, value, res) _LoadW(addr, value, res, user) 874 #define LoadDW(addr, value, res) _LoadDW(addr, value, res) 875 876 #define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel) 877 #define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user) 878 #define StoreW(addr, value, res) _StoreW(addr, value, res, kernel) 879 #define StoreWE(addr, value, res) _StoreW(addr, value, res, user) 880 #define StoreDW(addr, value, res) _StoreDW(addr, value, res) 881 882 static void emulate_load_store_insn(struct pt_regs *regs, 883 void __user *addr, unsigned int __user *pc) 884 { 885 unsigned long origpc, orig31, value; 886 union mips_instruction insn; 887 unsigned int res; 888 #ifdef CONFIG_EVA 889 mm_segment_t seg; 890 #endif 891 origpc = (unsigned long)pc; 892 orig31 = regs->regs[31]; 893 894 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); 895 896 /* 897 * This load never faults. 898 */ 899 __get_user(insn.word, pc); 900 901 switch (insn.i_format.opcode) { 902 /* 903 * These are instructions that a compiler doesn't generate. We 904 * can assume therefore that the code is MIPS-aware and 905 * really buggy. Emulating these instructions would break the 906 * semantics anyway. 907 */ 908 case ll_op: 909 case lld_op: 910 case sc_op: 911 case scd_op: 912 913 /* 914 * For these instructions the only way to create an address 915 * error is an attempted access to kernel/supervisor address 916 * space. 917 */ 918 case ldl_op: 919 case ldr_op: 920 case lwl_op: 921 case lwr_op: 922 case sdl_op: 923 case sdr_op: 924 case swl_op: 925 case swr_op: 926 case lb_op: 927 case lbu_op: 928 case sb_op: 929 goto sigbus; 930 931 /* 932 * The remaining opcodes are the ones that are really of 933 * interest. 934 */ 935 case spec3_op: 936 if (insn.dsp_format.func == lx_op) { 937 switch (insn.dsp_format.op) { 938 case lwx_op: 939 if (!access_ok(addr, 4)) 940 goto sigbus; 941 LoadW(addr, value, res); 942 if (res) 943 goto fault; 944 compute_return_epc(regs); 945 regs->regs[insn.dsp_format.rd] = value; 946 break; 947 case lhx_op: 948 if (!access_ok(addr, 2)) 949 goto sigbus; 950 LoadHW(addr, value, res); 951 if (res) 952 goto fault; 953 compute_return_epc(regs); 954 regs->regs[insn.dsp_format.rd] = value; 955 break; 956 default: 957 goto sigill; 958 } 959 } 960 #ifdef CONFIG_EVA 961 else { 962 /* 963 * we can land here only from kernel accessing user 964 * memory, so we need to "switch" the address limit to 965 * user space, so that address check can work properly. 966 */ 967 seg = get_fs(); 968 set_fs(USER_DS); 969 switch (insn.spec3_format.func) { 970 case lhe_op: 971 if (!access_ok(addr, 2)) { 972 set_fs(seg); 973 goto sigbus; 974 } 975 LoadHWE(addr, value, res); 976 if (res) { 977 set_fs(seg); 978 goto fault; 979 } 980 compute_return_epc(regs); 981 regs->regs[insn.spec3_format.rt] = value; 982 break; 983 case lwe_op: 984 if (!access_ok(addr, 4)) { 985 set_fs(seg); 986 goto sigbus; 987 } 988 LoadWE(addr, value, res); 989 if (res) { 990 set_fs(seg); 991 goto fault; 992 } 993 compute_return_epc(regs); 994 regs->regs[insn.spec3_format.rt] = value; 995 break; 996 case lhue_op: 997 if (!access_ok(addr, 2)) { 998 set_fs(seg); 999 goto sigbus; 1000 } 1001 LoadHWUE(addr, value, res); 1002 if (res) { 1003 set_fs(seg); 1004 goto fault; 1005 } 1006 compute_return_epc(regs); 1007 regs->regs[insn.spec3_format.rt] = value; 1008 break; 1009 case she_op: 1010 if (!access_ok(addr, 2)) { 1011 set_fs(seg); 1012 goto sigbus; 1013 } 1014 compute_return_epc(regs); 1015 value = regs->regs[insn.spec3_format.rt]; 1016 StoreHWE(addr, value, res); 1017 if (res) { 1018 set_fs(seg); 1019 goto fault; 1020 } 1021 break; 1022 case swe_op: 1023 if (!access_ok(addr, 4)) { 1024 set_fs(seg); 1025 goto sigbus; 1026 } 1027 compute_return_epc(regs); 1028 value = regs->regs[insn.spec3_format.rt]; 1029 StoreWE(addr, value, res); 1030 if (res) { 1031 set_fs(seg); 1032 goto fault; 1033 } 1034 break; 1035 default: 1036 set_fs(seg); 1037 goto sigill; 1038 } 1039 set_fs(seg); 1040 } 1041 #endif 1042 break; 1043 case lh_op: 1044 if (!access_ok(addr, 2)) 1045 goto sigbus; 1046 1047 if (IS_ENABLED(CONFIG_EVA)) { 1048 if (uaccess_kernel()) 1049 LoadHW(addr, value, res); 1050 else 1051 LoadHWE(addr, value, res); 1052 } else { 1053 LoadHW(addr, value, res); 1054 } 1055 1056 if (res) 1057 goto fault; 1058 compute_return_epc(regs); 1059 regs->regs[insn.i_format.rt] = value; 1060 break; 1061 1062 case lw_op: 1063 if (!access_ok(addr, 4)) 1064 goto sigbus; 1065 1066 if (IS_ENABLED(CONFIG_EVA)) { 1067 if (uaccess_kernel()) 1068 LoadW(addr, value, res); 1069 else 1070 LoadWE(addr, value, res); 1071 } else { 1072 LoadW(addr, value, res); 1073 } 1074 1075 if (res) 1076 goto fault; 1077 compute_return_epc(regs); 1078 regs->regs[insn.i_format.rt] = value; 1079 break; 1080 1081 case lhu_op: 1082 if (!access_ok(addr, 2)) 1083 goto sigbus; 1084 1085 if (IS_ENABLED(CONFIG_EVA)) { 1086 if (uaccess_kernel()) 1087 LoadHWU(addr, value, res); 1088 else 1089 LoadHWUE(addr, value, res); 1090 } else { 1091 LoadHWU(addr, value, res); 1092 } 1093 1094 if (res) 1095 goto fault; 1096 compute_return_epc(regs); 1097 regs->regs[insn.i_format.rt] = value; 1098 break; 1099 1100 case lwu_op: 1101 #ifdef CONFIG_64BIT 1102 /* 1103 * A 32-bit kernel might be running on a 64-bit processor. But 1104 * if we're on a 32-bit processor and an i-cache incoherency 1105 * or race makes us see a 64-bit instruction here the sdl/sdr 1106 * would blow up, so for now we don't handle unaligned 64-bit 1107 * instructions on 32-bit kernels. 1108 */ 1109 if (!access_ok(addr, 4)) 1110 goto sigbus; 1111 1112 LoadWU(addr, value, res); 1113 if (res) 1114 goto fault; 1115 compute_return_epc(regs); 1116 regs->regs[insn.i_format.rt] = value; 1117 break; 1118 #endif /* CONFIG_64BIT */ 1119 1120 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1121 goto sigill; 1122 1123 case ld_op: 1124 #ifdef CONFIG_64BIT 1125 /* 1126 * A 32-bit kernel might be running on a 64-bit processor. But 1127 * if we're on a 32-bit processor and an i-cache incoherency 1128 * or race makes us see a 64-bit instruction here the sdl/sdr 1129 * would blow up, so for now we don't handle unaligned 64-bit 1130 * instructions on 32-bit kernels. 1131 */ 1132 if (!access_ok(addr, 8)) 1133 goto sigbus; 1134 1135 LoadDW(addr, value, res); 1136 if (res) 1137 goto fault; 1138 compute_return_epc(regs); 1139 regs->regs[insn.i_format.rt] = value; 1140 break; 1141 #endif /* CONFIG_64BIT */ 1142 1143 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1144 goto sigill; 1145 1146 case sh_op: 1147 if (!access_ok(addr, 2)) 1148 goto sigbus; 1149 1150 compute_return_epc(regs); 1151 value = regs->regs[insn.i_format.rt]; 1152 1153 if (IS_ENABLED(CONFIG_EVA)) { 1154 if (uaccess_kernel()) 1155 StoreHW(addr, value, res); 1156 else 1157 StoreHWE(addr, value, res); 1158 } else { 1159 StoreHW(addr, value, res); 1160 } 1161 1162 if (res) 1163 goto fault; 1164 break; 1165 1166 case sw_op: 1167 if (!access_ok(addr, 4)) 1168 goto sigbus; 1169 1170 compute_return_epc(regs); 1171 value = regs->regs[insn.i_format.rt]; 1172 1173 if (IS_ENABLED(CONFIG_EVA)) { 1174 if (uaccess_kernel()) 1175 StoreW(addr, value, res); 1176 else 1177 StoreWE(addr, value, res); 1178 } else { 1179 StoreW(addr, value, res); 1180 } 1181 1182 if (res) 1183 goto fault; 1184 break; 1185 1186 case sd_op: 1187 #ifdef CONFIG_64BIT 1188 /* 1189 * A 32-bit kernel might be running on a 64-bit processor. But 1190 * if we're on a 32-bit processor and an i-cache incoherency 1191 * or race makes us see a 64-bit instruction here the sdl/sdr 1192 * would blow up, so for now we don't handle unaligned 64-bit 1193 * instructions on 32-bit kernels. 1194 */ 1195 if (!access_ok(addr, 8)) 1196 goto sigbus; 1197 1198 compute_return_epc(regs); 1199 value = regs->regs[insn.i_format.rt]; 1200 StoreDW(addr, value, res); 1201 if (res) 1202 goto fault; 1203 break; 1204 #endif /* CONFIG_64BIT */ 1205 1206 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1207 goto sigill; 1208 1209 #ifdef CONFIG_MIPS_FP_SUPPORT 1210 1211 case lwc1_op: 1212 case ldc1_op: 1213 case swc1_op: 1214 case sdc1_op: 1215 case cop1x_op: { 1216 void __user *fault_addr = NULL; 1217 1218 die_if_kernel("Unaligned FP access in kernel code", regs); 1219 BUG_ON(!used_math()); 1220 1221 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1222 &fault_addr); 1223 own_fpu(1); /* Restore FPU state. */ 1224 1225 /* Signal if something went wrong. */ 1226 process_fpemu_return(res, fault_addr, 0); 1227 1228 if (res == 0) 1229 break; 1230 return; 1231 } 1232 #endif /* CONFIG_MIPS_FP_SUPPORT */ 1233 1234 #ifdef CONFIG_CPU_HAS_MSA 1235 1236 case msa_op: { 1237 unsigned int wd, preempted; 1238 enum msa_2b_fmt df; 1239 union fpureg *fpr; 1240 1241 if (!cpu_has_msa) 1242 goto sigill; 1243 1244 /* 1245 * If we've reached this point then userland should have taken 1246 * the MSA disabled exception & initialised vector context at 1247 * some point in the past. 1248 */ 1249 BUG_ON(!thread_msa_context_live()); 1250 1251 df = insn.msa_mi10_format.df; 1252 wd = insn.msa_mi10_format.wd; 1253 fpr = ¤t->thread.fpu.fpr[wd]; 1254 1255 switch (insn.msa_mi10_format.func) { 1256 case msa_ld_op: 1257 if (!access_ok(addr, sizeof(*fpr))) 1258 goto sigbus; 1259 1260 do { 1261 /* 1262 * If we have live MSA context keep track of 1263 * whether we get preempted in order to avoid 1264 * the register context we load being clobbered 1265 * by the live context as it's saved during 1266 * preemption. If we don't have live context 1267 * then it can't be saved to clobber the value 1268 * we load. 1269 */ 1270 preempted = test_thread_flag(TIF_USEDMSA); 1271 1272 res = __copy_from_user_inatomic(fpr, addr, 1273 sizeof(*fpr)); 1274 if (res) 1275 goto fault; 1276 1277 /* 1278 * Update the hardware register if it is in use 1279 * by the task in this quantum, in order to 1280 * avoid having to save & restore the whole 1281 * vector context. 1282 */ 1283 preempt_disable(); 1284 if (test_thread_flag(TIF_USEDMSA)) { 1285 write_msa_wr(wd, fpr, df); 1286 preempted = 0; 1287 } 1288 preempt_enable(); 1289 } while (preempted); 1290 break; 1291 1292 case msa_st_op: 1293 if (!access_ok(addr, sizeof(*fpr))) 1294 goto sigbus; 1295 1296 /* 1297 * Update from the hardware register if it is in use by 1298 * the task in this quantum, in order to avoid having to 1299 * save & restore the whole vector context. 1300 */ 1301 preempt_disable(); 1302 if (test_thread_flag(TIF_USEDMSA)) 1303 read_msa_wr(wd, fpr, df); 1304 preempt_enable(); 1305 1306 res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); 1307 if (res) 1308 goto fault; 1309 break; 1310 1311 default: 1312 goto sigbus; 1313 } 1314 1315 compute_return_epc(regs); 1316 break; 1317 } 1318 #endif /* CONFIG_CPU_HAS_MSA */ 1319 1320 #ifndef CONFIG_CPU_MIPSR6 1321 /* 1322 * COP2 is available to implementor for application specific use. 1323 * It's up to applications to register a notifier chain and do 1324 * whatever they have to do, including possible sending of signals. 1325 * 1326 * This instruction has been reallocated in Release 6 1327 */ 1328 case lwc2_op: 1329 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1330 break; 1331 1332 case ldc2_op: 1333 cu2_notifier_call_chain(CU2_LDC2_OP, regs); 1334 break; 1335 1336 case swc2_op: 1337 cu2_notifier_call_chain(CU2_SWC2_OP, regs); 1338 break; 1339 1340 case sdc2_op: 1341 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1342 break; 1343 #endif 1344 default: 1345 /* 1346 * Pheeee... We encountered an yet unknown instruction or 1347 * cache coherence problem. Die sucker, die ... 1348 */ 1349 goto sigill; 1350 } 1351 1352 #ifdef CONFIG_DEBUG_FS 1353 unaligned_instructions++; 1354 #endif 1355 1356 return; 1357 1358 fault: 1359 /* roll back jump/branch */ 1360 regs->cp0_epc = origpc; 1361 regs->regs[31] = orig31; 1362 /* Did we have an exception handler installed? */ 1363 if (fixup_exception(regs)) 1364 return; 1365 1366 die_if_kernel("Unhandled kernel unaligned access", regs); 1367 force_sig(SIGSEGV, current); 1368 1369 return; 1370 1371 sigbus: 1372 die_if_kernel("Unhandled kernel unaligned access", regs); 1373 force_sig(SIGBUS, current); 1374 1375 return; 1376 1377 sigill: 1378 die_if_kernel 1379 ("Unhandled kernel unaligned access or invalid instruction", regs); 1380 force_sig(SIGILL, current); 1381 } 1382 1383 /* Recode table from 16-bit register notation to 32-bit GPR. */ 1384 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; 1385 1386 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ 1387 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; 1388 1389 static void emulate_load_store_microMIPS(struct pt_regs *regs, 1390 void __user *addr) 1391 { 1392 unsigned long value; 1393 unsigned int res; 1394 int i; 1395 unsigned int reg = 0, rvar; 1396 unsigned long orig31; 1397 u16 __user *pc16; 1398 u16 halfword; 1399 unsigned int word; 1400 unsigned long origpc, contpc; 1401 union mips_instruction insn; 1402 struct mm_decoded_insn mminsn; 1403 1404 origpc = regs->cp0_epc; 1405 orig31 = regs->regs[31]; 1406 1407 mminsn.micro_mips_mode = 1; 1408 1409 /* 1410 * This load never faults. 1411 */ 1412 pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); 1413 __get_user(halfword, pc16); 1414 pc16++; 1415 contpc = regs->cp0_epc + 2; 1416 word = ((unsigned int)halfword << 16); 1417 mminsn.pc_inc = 2; 1418 1419 if (!mm_insn_16bit(halfword)) { 1420 __get_user(halfword, pc16); 1421 pc16++; 1422 contpc = regs->cp0_epc + 4; 1423 mminsn.pc_inc = 4; 1424 word |= halfword; 1425 } 1426 mminsn.insn = word; 1427 1428 if (get_user(halfword, pc16)) 1429 goto fault; 1430 mminsn.next_pc_inc = 2; 1431 word = ((unsigned int)halfword << 16); 1432 1433 if (!mm_insn_16bit(halfword)) { 1434 pc16++; 1435 if (get_user(halfword, pc16)) 1436 goto fault; 1437 mminsn.next_pc_inc = 4; 1438 word |= halfword; 1439 } 1440 mminsn.next_insn = word; 1441 1442 insn = (union mips_instruction)(mminsn.insn); 1443 if (mm_isBranchInstr(regs, mminsn, &contpc)) 1444 insn = (union mips_instruction)(mminsn.next_insn); 1445 1446 /* Parse instruction to find what to do */ 1447 1448 switch (insn.mm_i_format.opcode) { 1449 1450 case mm_pool32a_op: 1451 switch (insn.mm_x_format.func) { 1452 case mm_lwxs_op: 1453 reg = insn.mm_x_format.rd; 1454 goto loadW; 1455 } 1456 1457 goto sigbus; 1458 1459 case mm_pool32b_op: 1460 switch (insn.mm_m_format.func) { 1461 case mm_lwp_func: 1462 reg = insn.mm_m_format.rd; 1463 if (reg == 31) 1464 goto sigbus; 1465 1466 if (!access_ok(addr, 8)) 1467 goto sigbus; 1468 1469 LoadW(addr, value, res); 1470 if (res) 1471 goto fault; 1472 regs->regs[reg] = value; 1473 addr += 4; 1474 LoadW(addr, value, res); 1475 if (res) 1476 goto fault; 1477 regs->regs[reg + 1] = value; 1478 goto success; 1479 1480 case mm_swp_func: 1481 reg = insn.mm_m_format.rd; 1482 if (reg == 31) 1483 goto sigbus; 1484 1485 if (!access_ok(addr, 8)) 1486 goto sigbus; 1487 1488 value = regs->regs[reg]; 1489 StoreW(addr, value, res); 1490 if (res) 1491 goto fault; 1492 addr += 4; 1493 value = regs->regs[reg + 1]; 1494 StoreW(addr, value, res); 1495 if (res) 1496 goto fault; 1497 goto success; 1498 1499 case mm_ldp_func: 1500 #ifdef CONFIG_64BIT 1501 reg = insn.mm_m_format.rd; 1502 if (reg == 31) 1503 goto sigbus; 1504 1505 if (!access_ok(addr, 16)) 1506 goto sigbus; 1507 1508 LoadDW(addr, value, res); 1509 if (res) 1510 goto fault; 1511 regs->regs[reg] = value; 1512 addr += 8; 1513 LoadDW(addr, value, res); 1514 if (res) 1515 goto fault; 1516 regs->regs[reg + 1] = value; 1517 goto success; 1518 #endif /* CONFIG_64BIT */ 1519 1520 goto sigill; 1521 1522 case mm_sdp_func: 1523 #ifdef CONFIG_64BIT 1524 reg = insn.mm_m_format.rd; 1525 if (reg == 31) 1526 goto sigbus; 1527 1528 if (!access_ok(addr, 16)) 1529 goto sigbus; 1530 1531 value = regs->regs[reg]; 1532 StoreDW(addr, value, res); 1533 if (res) 1534 goto fault; 1535 addr += 8; 1536 value = regs->regs[reg + 1]; 1537 StoreDW(addr, value, res); 1538 if (res) 1539 goto fault; 1540 goto success; 1541 #endif /* CONFIG_64BIT */ 1542 1543 goto sigill; 1544 1545 case mm_lwm32_func: 1546 reg = insn.mm_m_format.rd; 1547 rvar = reg & 0xf; 1548 if ((rvar > 9) || !reg) 1549 goto sigill; 1550 if (reg & 0x10) { 1551 if (!access_ok(addr, 4 * (rvar + 1))) 1552 goto sigbus; 1553 } else { 1554 if (!access_ok(addr, 4 * rvar)) 1555 goto sigbus; 1556 } 1557 if (rvar == 9) 1558 rvar = 8; 1559 for (i = 16; rvar; rvar--, i++) { 1560 LoadW(addr, value, res); 1561 if (res) 1562 goto fault; 1563 addr += 4; 1564 regs->regs[i] = value; 1565 } 1566 if ((reg & 0xf) == 9) { 1567 LoadW(addr, value, res); 1568 if (res) 1569 goto fault; 1570 addr += 4; 1571 regs->regs[30] = value; 1572 } 1573 if (reg & 0x10) { 1574 LoadW(addr, value, res); 1575 if (res) 1576 goto fault; 1577 regs->regs[31] = value; 1578 } 1579 goto success; 1580 1581 case mm_swm32_func: 1582 reg = insn.mm_m_format.rd; 1583 rvar = reg & 0xf; 1584 if ((rvar > 9) || !reg) 1585 goto sigill; 1586 if (reg & 0x10) { 1587 if (!access_ok(addr, 4 * (rvar + 1))) 1588 goto sigbus; 1589 } else { 1590 if (!access_ok(addr, 4 * rvar)) 1591 goto sigbus; 1592 } 1593 if (rvar == 9) 1594 rvar = 8; 1595 for (i = 16; rvar; rvar--, i++) { 1596 value = regs->regs[i]; 1597 StoreW(addr, value, res); 1598 if (res) 1599 goto fault; 1600 addr += 4; 1601 } 1602 if ((reg & 0xf) == 9) { 1603 value = regs->regs[30]; 1604 StoreW(addr, value, res); 1605 if (res) 1606 goto fault; 1607 addr += 4; 1608 } 1609 if (reg & 0x10) { 1610 value = regs->regs[31]; 1611 StoreW(addr, value, res); 1612 if (res) 1613 goto fault; 1614 } 1615 goto success; 1616 1617 case mm_ldm_func: 1618 #ifdef CONFIG_64BIT 1619 reg = insn.mm_m_format.rd; 1620 rvar = reg & 0xf; 1621 if ((rvar > 9) || !reg) 1622 goto sigill; 1623 if (reg & 0x10) { 1624 if (!access_ok(addr, 8 * (rvar + 1))) 1625 goto sigbus; 1626 } else { 1627 if (!access_ok(addr, 8 * rvar)) 1628 goto sigbus; 1629 } 1630 if (rvar == 9) 1631 rvar = 8; 1632 1633 for (i = 16; rvar; rvar--, i++) { 1634 LoadDW(addr, value, res); 1635 if (res) 1636 goto fault; 1637 addr += 4; 1638 regs->regs[i] = value; 1639 } 1640 if ((reg & 0xf) == 9) { 1641 LoadDW(addr, value, res); 1642 if (res) 1643 goto fault; 1644 addr += 8; 1645 regs->regs[30] = value; 1646 } 1647 if (reg & 0x10) { 1648 LoadDW(addr, value, res); 1649 if (res) 1650 goto fault; 1651 regs->regs[31] = value; 1652 } 1653 goto success; 1654 #endif /* CONFIG_64BIT */ 1655 1656 goto sigill; 1657 1658 case mm_sdm_func: 1659 #ifdef CONFIG_64BIT 1660 reg = insn.mm_m_format.rd; 1661 rvar = reg & 0xf; 1662 if ((rvar > 9) || !reg) 1663 goto sigill; 1664 if (reg & 0x10) { 1665 if (!access_ok(addr, 8 * (rvar + 1))) 1666 goto sigbus; 1667 } else { 1668 if (!access_ok(addr, 8 * rvar)) 1669 goto sigbus; 1670 } 1671 if (rvar == 9) 1672 rvar = 8; 1673 1674 for (i = 16; rvar; rvar--, i++) { 1675 value = regs->regs[i]; 1676 StoreDW(addr, value, res); 1677 if (res) 1678 goto fault; 1679 addr += 8; 1680 } 1681 if ((reg & 0xf) == 9) { 1682 value = regs->regs[30]; 1683 StoreDW(addr, value, res); 1684 if (res) 1685 goto fault; 1686 addr += 8; 1687 } 1688 if (reg & 0x10) { 1689 value = regs->regs[31]; 1690 StoreDW(addr, value, res); 1691 if (res) 1692 goto fault; 1693 } 1694 goto success; 1695 #endif /* CONFIG_64BIT */ 1696 1697 goto sigill; 1698 1699 /* LWC2, SWC2, LDC2, SDC2 are not serviced */ 1700 } 1701 1702 goto sigbus; 1703 1704 case mm_pool32c_op: 1705 switch (insn.mm_m_format.func) { 1706 case mm_lwu_func: 1707 reg = insn.mm_m_format.rd; 1708 goto loadWU; 1709 } 1710 1711 /* LL,SC,LLD,SCD are not serviced */ 1712 goto sigbus; 1713 1714 #ifdef CONFIG_MIPS_FP_SUPPORT 1715 case mm_pool32f_op: 1716 switch (insn.mm_x_format.func) { 1717 case mm_lwxc1_func: 1718 case mm_swxc1_func: 1719 case mm_ldxc1_func: 1720 case mm_sdxc1_func: 1721 goto fpu_emul; 1722 } 1723 1724 goto sigbus; 1725 1726 case mm_ldc132_op: 1727 case mm_sdc132_op: 1728 case mm_lwc132_op: 1729 case mm_swc132_op: { 1730 void __user *fault_addr = NULL; 1731 1732 fpu_emul: 1733 /* roll back jump/branch */ 1734 regs->cp0_epc = origpc; 1735 regs->regs[31] = orig31; 1736 1737 die_if_kernel("Unaligned FP access in kernel code", regs); 1738 BUG_ON(!used_math()); 1739 BUG_ON(!is_fpu_owner()); 1740 1741 res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, 1742 &fault_addr); 1743 own_fpu(1); /* restore FPU state */ 1744 1745 /* If something went wrong, signal */ 1746 process_fpemu_return(res, fault_addr, 0); 1747 1748 if (res == 0) 1749 goto success; 1750 return; 1751 } 1752 #endif /* CONFIG_MIPS_FP_SUPPORT */ 1753 1754 case mm_lh32_op: 1755 reg = insn.mm_i_format.rt; 1756 goto loadHW; 1757 1758 case mm_lhu32_op: 1759 reg = insn.mm_i_format.rt; 1760 goto loadHWU; 1761 1762 case mm_lw32_op: 1763 reg = insn.mm_i_format.rt; 1764 goto loadW; 1765 1766 case mm_sh32_op: 1767 reg = insn.mm_i_format.rt; 1768 goto storeHW; 1769 1770 case mm_sw32_op: 1771 reg = insn.mm_i_format.rt; 1772 goto storeW; 1773 1774 case mm_ld32_op: 1775 reg = insn.mm_i_format.rt; 1776 goto loadDW; 1777 1778 case mm_sd32_op: 1779 reg = insn.mm_i_format.rt; 1780 goto storeDW; 1781 1782 case mm_pool16c_op: 1783 switch (insn.mm16_m_format.func) { 1784 case mm_lwm16_op: 1785 reg = insn.mm16_m_format.rlist; 1786 rvar = reg + 1; 1787 if (!access_ok(addr, 4 * rvar)) 1788 goto sigbus; 1789 1790 for (i = 16; rvar; rvar--, i++) { 1791 LoadW(addr, value, res); 1792 if (res) 1793 goto fault; 1794 addr += 4; 1795 regs->regs[i] = value; 1796 } 1797 LoadW(addr, value, res); 1798 if (res) 1799 goto fault; 1800 regs->regs[31] = value; 1801 1802 goto success; 1803 1804 case mm_swm16_op: 1805 reg = insn.mm16_m_format.rlist; 1806 rvar = reg + 1; 1807 if (!access_ok(addr, 4 * rvar)) 1808 goto sigbus; 1809 1810 for (i = 16; rvar; rvar--, i++) { 1811 value = regs->regs[i]; 1812 StoreW(addr, value, res); 1813 if (res) 1814 goto fault; 1815 addr += 4; 1816 } 1817 value = regs->regs[31]; 1818 StoreW(addr, value, res); 1819 if (res) 1820 goto fault; 1821 1822 goto success; 1823 1824 } 1825 1826 goto sigbus; 1827 1828 case mm_lhu16_op: 1829 reg = reg16to32[insn.mm16_rb_format.rt]; 1830 goto loadHWU; 1831 1832 case mm_lw16_op: 1833 reg = reg16to32[insn.mm16_rb_format.rt]; 1834 goto loadW; 1835 1836 case mm_sh16_op: 1837 reg = reg16to32st[insn.mm16_rb_format.rt]; 1838 goto storeHW; 1839 1840 case mm_sw16_op: 1841 reg = reg16to32st[insn.mm16_rb_format.rt]; 1842 goto storeW; 1843 1844 case mm_lwsp16_op: 1845 reg = insn.mm16_r5_format.rt; 1846 goto loadW; 1847 1848 case mm_swsp16_op: 1849 reg = insn.mm16_r5_format.rt; 1850 goto storeW; 1851 1852 case mm_lwgp16_op: 1853 reg = reg16to32[insn.mm16_r3_format.rt]; 1854 goto loadW; 1855 1856 default: 1857 goto sigill; 1858 } 1859 1860 loadHW: 1861 if (!access_ok(addr, 2)) 1862 goto sigbus; 1863 1864 LoadHW(addr, value, res); 1865 if (res) 1866 goto fault; 1867 regs->regs[reg] = value; 1868 goto success; 1869 1870 loadHWU: 1871 if (!access_ok(addr, 2)) 1872 goto sigbus; 1873 1874 LoadHWU(addr, value, res); 1875 if (res) 1876 goto fault; 1877 regs->regs[reg] = value; 1878 goto success; 1879 1880 loadW: 1881 if (!access_ok(addr, 4)) 1882 goto sigbus; 1883 1884 LoadW(addr, value, res); 1885 if (res) 1886 goto fault; 1887 regs->regs[reg] = value; 1888 goto success; 1889 1890 loadWU: 1891 #ifdef CONFIG_64BIT 1892 /* 1893 * A 32-bit kernel might be running on a 64-bit processor. But 1894 * if we're on a 32-bit processor and an i-cache incoherency 1895 * or race makes us see a 64-bit instruction here the sdl/sdr 1896 * would blow up, so for now we don't handle unaligned 64-bit 1897 * instructions on 32-bit kernels. 1898 */ 1899 if (!access_ok(addr, 4)) 1900 goto sigbus; 1901 1902 LoadWU(addr, value, res); 1903 if (res) 1904 goto fault; 1905 regs->regs[reg] = value; 1906 goto success; 1907 #endif /* CONFIG_64BIT */ 1908 1909 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1910 goto sigill; 1911 1912 loadDW: 1913 #ifdef CONFIG_64BIT 1914 /* 1915 * A 32-bit kernel might be running on a 64-bit processor. But 1916 * if we're on a 32-bit processor and an i-cache incoherency 1917 * or race makes us see a 64-bit instruction here the sdl/sdr 1918 * would blow up, so for now we don't handle unaligned 64-bit 1919 * instructions on 32-bit kernels. 1920 */ 1921 if (!access_ok(addr, 8)) 1922 goto sigbus; 1923 1924 LoadDW(addr, value, res); 1925 if (res) 1926 goto fault; 1927 regs->regs[reg] = value; 1928 goto success; 1929 #endif /* CONFIG_64BIT */ 1930 1931 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1932 goto sigill; 1933 1934 storeHW: 1935 if (!access_ok(addr, 2)) 1936 goto sigbus; 1937 1938 value = regs->regs[reg]; 1939 StoreHW(addr, value, res); 1940 if (res) 1941 goto fault; 1942 goto success; 1943 1944 storeW: 1945 if (!access_ok(addr, 4)) 1946 goto sigbus; 1947 1948 value = regs->regs[reg]; 1949 StoreW(addr, value, res); 1950 if (res) 1951 goto fault; 1952 goto success; 1953 1954 storeDW: 1955 #ifdef CONFIG_64BIT 1956 /* 1957 * A 32-bit kernel might be running on a 64-bit processor. But 1958 * if we're on a 32-bit processor and an i-cache incoherency 1959 * or race makes us see a 64-bit instruction here the sdl/sdr 1960 * would blow up, so for now we don't handle unaligned 64-bit 1961 * instructions on 32-bit kernels. 1962 */ 1963 if (!access_ok(addr, 8)) 1964 goto sigbus; 1965 1966 value = regs->regs[reg]; 1967 StoreDW(addr, value, res); 1968 if (res) 1969 goto fault; 1970 goto success; 1971 #endif /* CONFIG_64BIT */ 1972 1973 /* Cannot handle 64-bit instructions in 32-bit kernel */ 1974 goto sigill; 1975 1976 success: 1977 regs->cp0_epc = contpc; /* advance or branch */ 1978 1979 #ifdef CONFIG_DEBUG_FS 1980 unaligned_instructions++; 1981 #endif 1982 return; 1983 1984 fault: 1985 /* roll back jump/branch */ 1986 regs->cp0_epc = origpc; 1987 regs->regs[31] = orig31; 1988 /* Did we have an exception handler installed? */ 1989 if (fixup_exception(regs)) 1990 return; 1991 1992 die_if_kernel("Unhandled kernel unaligned access", regs); 1993 force_sig(SIGSEGV, current); 1994 1995 return; 1996 1997 sigbus: 1998 die_if_kernel("Unhandled kernel unaligned access", regs); 1999 force_sig(SIGBUS, current); 2000 2001 return; 2002 2003 sigill: 2004 die_if_kernel 2005 ("Unhandled kernel unaligned access or invalid instruction", regs); 2006 force_sig(SIGILL, current); 2007 } 2008 2009 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) 2010 { 2011 unsigned long value; 2012 unsigned int res; 2013 int reg; 2014 unsigned long orig31; 2015 u16 __user *pc16; 2016 unsigned long origpc; 2017 union mips16e_instruction mips16inst, oldinst; 2018 unsigned int opcode; 2019 int extended = 0; 2020 2021 origpc = regs->cp0_epc; 2022 orig31 = regs->regs[31]; 2023 pc16 = (unsigned short __user *)msk_isa16_mode(origpc); 2024 /* 2025 * This load never faults. 2026 */ 2027 __get_user(mips16inst.full, pc16); 2028 oldinst = mips16inst; 2029 2030 /* skip EXTEND instruction */ 2031 if (mips16inst.ri.opcode == MIPS16e_extend_op) { 2032 extended = 1; 2033 pc16++; 2034 __get_user(mips16inst.full, pc16); 2035 } else if (delay_slot(regs)) { 2036 /* skip jump instructions */ 2037 /* JAL/JALX are 32 bits but have OPCODE in first short int */ 2038 if (mips16inst.ri.opcode == MIPS16e_jal_op) 2039 pc16++; 2040 pc16++; 2041 if (get_user(mips16inst.full, pc16)) 2042 goto sigbus; 2043 } 2044 2045 opcode = mips16inst.ri.opcode; 2046 switch (opcode) { 2047 case MIPS16e_i64_op: /* I64 or RI64 instruction */ 2048 switch (mips16inst.i64.func) { /* I64/RI64 func field check */ 2049 case MIPS16e_ldpc_func: 2050 case MIPS16e_ldsp_func: 2051 reg = reg16to32[mips16inst.ri64.ry]; 2052 goto loadDW; 2053 2054 case MIPS16e_sdsp_func: 2055 reg = reg16to32[mips16inst.ri64.ry]; 2056 goto writeDW; 2057 2058 case MIPS16e_sdrasp_func: 2059 reg = 29; /* GPRSP */ 2060 goto writeDW; 2061 } 2062 2063 goto sigbus; 2064 2065 case MIPS16e_swsp_op: 2066 reg = reg16to32[mips16inst.ri.rx]; 2067 if (extended && cpu_has_mips16e2) 2068 switch (mips16inst.ri.imm >> 5) { 2069 case 0: /* SWSP */ 2070 case 1: /* SWGP */ 2071 break; 2072 case 2: /* SHGP */ 2073 opcode = MIPS16e_sh_op; 2074 break; 2075 default: 2076 goto sigbus; 2077 } 2078 break; 2079 2080 case MIPS16e_lwpc_op: 2081 reg = reg16to32[mips16inst.ri.rx]; 2082 break; 2083 2084 case MIPS16e_lwsp_op: 2085 reg = reg16to32[mips16inst.ri.rx]; 2086 if (extended && cpu_has_mips16e2) 2087 switch (mips16inst.ri.imm >> 5) { 2088 case 0: /* LWSP */ 2089 case 1: /* LWGP */ 2090 break; 2091 case 2: /* LHGP */ 2092 opcode = MIPS16e_lh_op; 2093 break; 2094 case 4: /* LHUGP */ 2095 opcode = MIPS16e_lhu_op; 2096 break; 2097 default: 2098 goto sigbus; 2099 } 2100 break; 2101 2102 case MIPS16e_i8_op: 2103 if (mips16inst.i8.func != MIPS16e_swrasp_func) 2104 goto sigbus; 2105 reg = 29; /* GPRSP */ 2106 break; 2107 2108 default: 2109 reg = reg16to32[mips16inst.rri.ry]; 2110 break; 2111 } 2112 2113 switch (opcode) { 2114 2115 case MIPS16e_lb_op: 2116 case MIPS16e_lbu_op: 2117 case MIPS16e_sb_op: 2118 goto sigbus; 2119 2120 case MIPS16e_lh_op: 2121 if (!access_ok(addr, 2)) 2122 goto sigbus; 2123 2124 LoadHW(addr, value, res); 2125 if (res) 2126 goto fault; 2127 MIPS16e_compute_return_epc(regs, &oldinst); 2128 regs->regs[reg] = value; 2129 break; 2130 2131 case MIPS16e_lhu_op: 2132 if (!access_ok(addr, 2)) 2133 goto sigbus; 2134 2135 LoadHWU(addr, value, res); 2136 if (res) 2137 goto fault; 2138 MIPS16e_compute_return_epc(regs, &oldinst); 2139 regs->regs[reg] = value; 2140 break; 2141 2142 case MIPS16e_lw_op: 2143 case MIPS16e_lwpc_op: 2144 case MIPS16e_lwsp_op: 2145 if (!access_ok(addr, 4)) 2146 goto sigbus; 2147 2148 LoadW(addr, value, res); 2149 if (res) 2150 goto fault; 2151 MIPS16e_compute_return_epc(regs, &oldinst); 2152 regs->regs[reg] = value; 2153 break; 2154 2155 case MIPS16e_lwu_op: 2156 #ifdef CONFIG_64BIT 2157 /* 2158 * A 32-bit kernel might be running on a 64-bit processor. But 2159 * if we're on a 32-bit processor and an i-cache incoherency 2160 * or race makes us see a 64-bit instruction here the sdl/sdr 2161 * would blow up, so for now we don't handle unaligned 64-bit 2162 * instructions on 32-bit kernels. 2163 */ 2164 if (!access_ok(addr, 4)) 2165 goto sigbus; 2166 2167 LoadWU(addr, value, res); 2168 if (res) 2169 goto fault; 2170 MIPS16e_compute_return_epc(regs, &oldinst); 2171 regs->regs[reg] = value; 2172 break; 2173 #endif /* CONFIG_64BIT */ 2174 2175 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2176 goto sigill; 2177 2178 case MIPS16e_ld_op: 2179 loadDW: 2180 #ifdef CONFIG_64BIT 2181 /* 2182 * A 32-bit kernel might be running on a 64-bit processor. But 2183 * if we're on a 32-bit processor and an i-cache incoherency 2184 * or race makes us see a 64-bit instruction here the sdl/sdr 2185 * would blow up, so for now we don't handle unaligned 64-bit 2186 * instructions on 32-bit kernels. 2187 */ 2188 if (!access_ok(addr, 8)) 2189 goto sigbus; 2190 2191 LoadDW(addr, value, res); 2192 if (res) 2193 goto fault; 2194 MIPS16e_compute_return_epc(regs, &oldinst); 2195 regs->regs[reg] = value; 2196 break; 2197 #endif /* CONFIG_64BIT */ 2198 2199 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2200 goto sigill; 2201 2202 case MIPS16e_sh_op: 2203 if (!access_ok(addr, 2)) 2204 goto sigbus; 2205 2206 MIPS16e_compute_return_epc(regs, &oldinst); 2207 value = regs->regs[reg]; 2208 StoreHW(addr, value, res); 2209 if (res) 2210 goto fault; 2211 break; 2212 2213 case MIPS16e_sw_op: 2214 case MIPS16e_swsp_op: 2215 case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ 2216 if (!access_ok(addr, 4)) 2217 goto sigbus; 2218 2219 MIPS16e_compute_return_epc(regs, &oldinst); 2220 value = regs->regs[reg]; 2221 StoreW(addr, value, res); 2222 if (res) 2223 goto fault; 2224 break; 2225 2226 case MIPS16e_sd_op: 2227 writeDW: 2228 #ifdef CONFIG_64BIT 2229 /* 2230 * A 32-bit kernel might be running on a 64-bit processor. But 2231 * if we're on a 32-bit processor and an i-cache incoherency 2232 * or race makes us see a 64-bit instruction here the sdl/sdr 2233 * would blow up, so for now we don't handle unaligned 64-bit 2234 * instructions on 32-bit kernels. 2235 */ 2236 if (!access_ok(addr, 8)) 2237 goto sigbus; 2238 2239 MIPS16e_compute_return_epc(regs, &oldinst); 2240 value = regs->regs[reg]; 2241 StoreDW(addr, value, res); 2242 if (res) 2243 goto fault; 2244 break; 2245 #endif /* CONFIG_64BIT */ 2246 2247 /* Cannot handle 64-bit instructions in 32-bit kernel */ 2248 goto sigill; 2249 2250 default: 2251 /* 2252 * Pheeee... We encountered an yet unknown instruction or 2253 * cache coherence problem. Die sucker, die ... 2254 */ 2255 goto sigill; 2256 } 2257 2258 #ifdef CONFIG_DEBUG_FS 2259 unaligned_instructions++; 2260 #endif 2261 2262 return; 2263 2264 fault: 2265 /* roll back jump/branch */ 2266 regs->cp0_epc = origpc; 2267 regs->regs[31] = orig31; 2268 /* Did we have an exception handler installed? */ 2269 if (fixup_exception(regs)) 2270 return; 2271 2272 die_if_kernel("Unhandled kernel unaligned access", regs); 2273 force_sig(SIGSEGV, current); 2274 2275 return; 2276 2277 sigbus: 2278 die_if_kernel("Unhandled kernel unaligned access", regs); 2279 force_sig(SIGBUS, current); 2280 2281 return; 2282 2283 sigill: 2284 die_if_kernel 2285 ("Unhandled kernel unaligned access or invalid instruction", regs); 2286 force_sig(SIGILL, current); 2287 } 2288 2289 asmlinkage void do_ade(struct pt_regs *regs) 2290 { 2291 enum ctx_state prev_state; 2292 unsigned int __user *pc; 2293 mm_segment_t seg; 2294 2295 prev_state = exception_enter(); 2296 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 2297 1, regs, regs->cp0_badvaddr); 2298 /* 2299 * Did we catch a fault trying to load an instruction? 2300 */ 2301 if (regs->cp0_badvaddr == regs->cp0_epc) 2302 goto sigbus; 2303 2304 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) 2305 goto sigbus; 2306 if (unaligned_action == UNALIGNED_ACTION_SIGNAL) 2307 goto sigbus; 2308 2309 /* 2310 * Do branch emulation only if we didn't forward the exception. 2311 * This is all so but ugly ... 2312 */ 2313 2314 /* 2315 * Are we running in microMIPS mode? 2316 */ 2317 if (get_isa16_mode(regs->cp0_epc)) { 2318 /* 2319 * Did we catch a fault trying to load an instruction in 2320 * 16-bit mode? 2321 */ 2322 if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) 2323 goto sigbus; 2324 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2325 show_registers(regs); 2326 2327 if (cpu_has_mmips) { 2328 seg = get_fs(); 2329 if (!user_mode(regs)) 2330 set_fs(KERNEL_DS); 2331 emulate_load_store_microMIPS(regs, 2332 (void __user *)regs->cp0_badvaddr); 2333 set_fs(seg); 2334 2335 return; 2336 } 2337 2338 if (cpu_has_mips16) { 2339 seg = get_fs(); 2340 if (!user_mode(regs)) 2341 set_fs(KERNEL_DS); 2342 emulate_load_store_MIPS16e(regs, 2343 (void __user *)regs->cp0_badvaddr); 2344 set_fs(seg); 2345 2346 return; 2347 } 2348 2349 goto sigbus; 2350 } 2351 2352 if (unaligned_action == UNALIGNED_ACTION_SHOW) 2353 show_registers(regs); 2354 pc = (unsigned int __user *)exception_epc(regs); 2355 2356 seg = get_fs(); 2357 if (!user_mode(regs)) 2358 set_fs(KERNEL_DS); 2359 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); 2360 set_fs(seg); 2361 2362 return; 2363 2364 sigbus: 2365 die_if_kernel("Kernel unaligned instruction access", regs); 2366 force_sig(SIGBUS, current); 2367 2368 /* 2369 * XXX On return from the signal handler we should advance the epc 2370 */ 2371 exception_exit(prev_state); 2372 } 2373 2374 #ifdef CONFIG_DEBUG_FS 2375 static int __init debugfs_unaligned(void) 2376 { 2377 struct dentry *d; 2378 2379 if (!mips_debugfs_dir) 2380 return -ENODEV; 2381 d = debugfs_create_u32("unaligned_instructions", S_IRUGO, 2382 mips_debugfs_dir, &unaligned_instructions); 2383 if (!d) 2384 return -ENOMEM; 2385 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, 2386 mips_debugfs_dir, &unaligned_action); 2387 if (!d) 2388 return -ENOMEM; 2389 return 0; 2390 } 2391 arch_initcall(debugfs_unaligned); 2392 #endif 2393