xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision 04b3c795)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  *
12  * This file contains exception handler for address error exception with the
13  * special capability to execute faulting instructions in software.  The
14  * handler does not try to handle the case when the program counter points
15  * to an address not aligned to a word boundary.
16  *
17  * Putting data to unaligned addresses is a bad practice even on Intel where
18  * only the performance is affected.  Much worse is that such code is non-
19  * portable.  Due to several programs that die on MIPS due to alignment
20  * problems I decided to implement this handler anyway though I originally
21  * didn't intend to do this at all for user code.
22  *
23  * For now I enable fixing of address errors by default to make life easier.
24  * I however intend to disable this somewhen in the future when the alignment
25  * problems with user programs have been fixed.	 For programmers this is the
26  * right way to go.
27  *
28  * Fixing address errors is a per process option.  The option is inherited
29  * across fork(2) and execve(2) calls.	If you really want to use the
30  * option in your user programs - I discourage the use of the software
31  * emulation strongly - use the following code in your userland stuff:
32  *
33  * #include <sys/sysmips.h>
34  *
35  * ...
36  * sysmips(MIPS_FIXADE, x);
37  * ...
38  *
39  * The argument x is 0 for disabling software emulation, enabled otherwise.
40  *
41  * Below a little program to play around with this feature.
42  *
43  * #include <stdio.h>
44  * #include <sys/sysmips.h>
45  *
46  * struct foo {
47  *	   unsigned char bar[8];
48  * };
49  *
50  * main(int argc, char *argv[])
51  * {
52  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
54  *	   int i;
55  *
56  *	   if (argc > 1)
57  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
58  *
59  *	   printf("*p = %08lx\n", *p);
60  *
61  *	   *p = 0xdeadface;
62  *
63  *	   for(i = 0; i <= 7; i++)
64  *	   printf("%02x ", x.bar[i]);
65  *	   printf("\n");
66  * }
67  *
68  * Coprocessor loads are not supported; I think this case is unimportant
69  * in the practice.
70  *
71  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72  *	 exception for the R6000.
73  *	 A store crossing a page boundary might be executed only partially.
74  *	 Undo the partial store in this case.
75  */
76 #include <linux/context_tracking.h>
77 #include <linux/mm.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
83 
84 #include <asm/asm.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
87 #include <asm/cop2.h>
88 #include <asm/debug.h>
89 #include <asm/fpu.h>
90 #include <asm/fpu_emulator.h>
91 #include <asm/inst.h>
92 #include <asm/unaligned-emul.h>
93 #include <asm/mmu_context.h>
94 #include <linux/uaccess.h>
95 
96 enum {
97 	UNALIGNED_ACTION_QUIET,
98 	UNALIGNED_ACTION_SIGNAL,
99 	UNALIGNED_ACTION_SHOW,
100 };
101 #ifdef CONFIG_DEBUG_FS
102 static u32 unaligned_instructions;
103 static u32 unaligned_action;
104 #else
105 #define unaligned_action UNALIGNED_ACTION_QUIET
106 #endif
107 extern void show_registers(struct pt_regs *regs);
108 
109 static void emulate_load_store_insn(struct pt_regs *regs,
110 	void __user *addr, unsigned int __user *pc)
111 {
112 	unsigned long origpc, orig31, value;
113 	union mips_instruction insn;
114 	unsigned int res;
115 #ifdef	CONFIG_EVA
116 	mm_segment_t seg;
117 #endif
118 	origpc = (unsigned long)pc;
119 	orig31 = regs->regs[31];
120 
121 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
122 
123 	/*
124 	 * This load never faults.
125 	 */
126 	__get_user(insn.word, pc);
127 
128 	switch (insn.i_format.opcode) {
129 		/*
130 		 * These are instructions that a compiler doesn't generate.  We
131 		 * can assume therefore that the code is MIPS-aware and
132 		 * really buggy.  Emulating these instructions would break the
133 		 * semantics anyway.
134 		 */
135 	case ll_op:
136 	case lld_op:
137 	case sc_op:
138 	case scd_op:
139 
140 		/*
141 		 * For these instructions the only way to create an address
142 		 * error is an attempted access to kernel/supervisor address
143 		 * space.
144 		 */
145 	case ldl_op:
146 	case ldr_op:
147 	case lwl_op:
148 	case lwr_op:
149 	case sdl_op:
150 	case sdr_op:
151 	case swl_op:
152 	case swr_op:
153 	case lb_op:
154 	case lbu_op:
155 	case sb_op:
156 		goto sigbus;
157 
158 		/*
159 		 * The remaining opcodes are the ones that are really of
160 		 * interest.
161 		 */
162 	case spec3_op:
163 		if (insn.dsp_format.func == lx_op) {
164 			switch (insn.dsp_format.op) {
165 			case lwx_op:
166 				if (!access_ok(addr, 4))
167 					goto sigbus;
168 				LoadW(addr, value, res);
169 				if (res)
170 					goto fault;
171 				compute_return_epc(regs);
172 				regs->regs[insn.dsp_format.rd] = value;
173 				break;
174 			case lhx_op:
175 				if (!access_ok(addr, 2))
176 					goto sigbus;
177 				LoadHW(addr, value, res);
178 				if (res)
179 					goto fault;
180 				compute_return_epc(regs);
181 				regs->regs[insn.dsp_format.rd] = value;
182 				break;
183 			default:
184 				goto sigill;
185 			}
186 		}
187 #ifdef CONFIG_EVA
188 		else {
189 			/*
190 			 * we can land here only from kernel accessing user
191 			 * memory, so we need to "switch" the address limit to
192 			 * user space, so that address check can work properly.
193 			 */
194 			seg = get_fs();
195 			set_fs(USER_DS);
196 			switch (insn.spec3_format.func) {
197 			case lhe_op:
198 				if (!access_ok(addr, 2)) {
199 					set_fs(seg);
200 					goto sigbus;
201 				}
202 				LoadHWE(addr, value, res);
203 				if (res) {
204 					set_fs(seg);
205 					goto fault;
206 				}
207 				compute_return_epc(regs);
208 				regs->regs[insn.spec3_format.rt] = value;
209 				break;
210 			case lwe_op:
211 				if (!access_ok(addr, 4)) {
212 					set_fs(seg);
213 					goto sigbus;
214 				}
215 				LoadWE(addr, value, res);
216 				if (res) {
217 					set_fs(seg);
218 					goto fault;
219 				}
220 				compute_return_epc(regs);
221 				regs->regs[insn.spec3_format.rt] = value;
222 				break;
223 			case lhue_op:
224 				if (!access_ok(addr, 2)) {
225 					set_fs(seg);
226 					goto sigbus;
227 				}
228 				LoadHWUE(addr, value, res);
229 				if (res) {
230 					set_fs(seg);
231 					goto fault;
232 				}
233 				compute_return_epc(regs);
234 				regs->regs[insn.spec3_format.rt] = value;
235 				break;
236 			case she_op:
237 				if (!access_ok(addr, 2)) {
238 					set_fs(seg);
239 					goto sigbus;
240 				}
241 				compute_return_epc(regs);
242 				value = regs->regs[insn.spec3_format.rt];
243 				StoreHWE(addr, value, res);
244 				if (res) {
245 					set_fs(seg);
246 					goto fault;
247 				}
248 				break;
249 			case swe_op:
250 				if (!access_ok(addr, 4)) {
251 					set_fs(seg);
252 					goto sigbus;
253 				}
254 				compute_return_epc(regs);
255 				value = regs->regs[insn.spec3_format.rt];
256 				StoreWE(addr, value, res);
257 				if (res) {
258 					set_fs(seg);
259 					goto fault;
260 				}
261 				break;
262 			default:
263 				set_fs(seg);
264 				goto sigill;
265 			}
266 			set_fs(seg);
267 		}
268 #endif
269 		break;
270 	case lh_op:
271 		if (!access_ok(addr, 2))
272 			goto sigbus;
273 
274 		if (IS_ENABLED(CONFIG_EVA)) {
275 			if (uaccess_kernel())
276 				LoadHW(addr, value, res);
277 			else
278 				LoadHWE(addr, value, res);
279 		} else {
280 			LoadHW(addr, value, res);
281 		}
282 
283 		if (res)
284 			goto fault;
285 		compute_return_epc(regs);
286 		regs->regs[insn.i_format.rt] = value;
287 		break;
288 
289 	case lw_op:
290 		if (!access_ok(addr, 4))
291 			goto sigbus;
292 
293 		if (IS_ENABLED(CONFIG_EVA)) {
294 			if (uaccess_kernel())
295 				LoadW(addr, value, res);
296 			else
297 				LoadWE(addr, value, res);
298 		} else {
299 			LoadW(addr, value, res);
300 		}
301 
302 		if (res)
303 			goto fault;
304 		compute_return_epc(regs);
305 		regs->regs[insn.i_format.rt] = value;
306 		break;
307 
308 	case lhu_op:
309 		if (!access_ok(addr, 2))
310 			goto sigbus;
311 
312 		if (IS_ENABLED(CONFIG_EVA)) {
313 			if (uaccess_kernel())
314 				LoadHWU(addr, value, res);
315 			else
316 				LoadHWUE(addr, value, res);
317 		} else {
318 			LoadHWU(addr, value, res);
319 		}
320 
321 		if (res)
322 			goto fault;
323 		compute_return_epc(regs);
324 		regs->regs[insn.i_format.rt] = value;
325 		break;
326 
327 	case lwu_op:
328 #ifdef CONFIG_64BIT
329 		/*
330 		 * A 32-bit kernel might be running on a 64-bit processor.  But
331 		 * if we're on a 32-bit processor and an i-cache incoherency
332 		 * or race makes us see a 64-bit instruction here the sdl/sdr
333 		 * would blow up, so for now we don't handle unaligned 64-bit
334 		 * instructions on 32-bit kernels.
335 		 */
336 		if (!access_ok(addr, 4))
337 			goto sigbus;
338 
339 		LoadWU(addr, value, res);
340 		if (res)
341 			goto fault;
342 		compute_return_epc(regs);
343 		regs->regs[insn.i_format.rt] = value;
344 		break;
345 #endif /* CONFIG_64BIT */
346 
347 		/* Cannot handle 64-bit instructions in 32-bit kernel */
348 		goto sigill;
349 
350 	case ld_op:
351 #ifdef CONFIG_64BIT
352 		/*
353 		 * A 32-bit kernel might be running on a 64-bit processor.  But
354 		 * if we're on a 32-bit processor and an i-cache incoherency
355 		 * or race makes us see a 64-bit instruction here the sdl/sdr
356 		 * would blow up, so for now we don't handle unaligned 64-bit
357 		 * instructions on 32-bit kernels.
358 		 */
359 		if (!access_ok(addr, 8))
360 			goto sigbus;
361 
362 		LoadDW(addr, value, res);
363 		if (res)
364 			goto fault;
365 		compute_return_epc(regs);
366 		regs->regs[insn.i_format.rt] = value;
367 		break;
368 #endif /* CONFIG_64BIT */
369 
370 		/* Cannot handle 64-bit instructions in 32-bit kernel */
371 		goto sigill;
372 
373 	case sh_op:
374 		if (!access_ok(addr, 2))
375 			goto sigbus;
376 
377 		compute_return_epc(regs);
378 		value = regs->regs[insn.i_format.rt];
379 
380 		if (IS_ENABLED(CONFIG_EVA)) {
381 			if (uaccess_kernel())
382 				StoreHW(addr, value, res);
383 			else
384 				StoreHWE(addr, value, res);
385 		} else {
386 			StoreHW(addr, value, res);
387 		}
388 
389 		if (res)
390 			goto fault;
391 		break;
392 
393 	case sw_op:
394 		if (!access_ok(addr, 4))
395 			goto sigbus;
396 
397 		compute_return_epc(regs);
398 		value = regs->regs[insn.i_format.rt];
399 
400 		if (IS_ENABLED(CONFIG_EVA)) {
401 			if (uaccess_kernel())
402 				StoreW(addr, value, res);
403 			else
404 				StoreWE(addr, value, res);
405 		} else {
406 			StoreW(addr, value, res);
407 		}
408 
409 		if (res)
410 			goto fault;
411 		break;
412 
413 	case sd_op:
414 #ifdef CONFIG_64BIT
415 		/*
416 		 * A 32-bit kernel might be running on a 64-bit processor.  But
417 		 * if we're on a 32-bit processor and an i-cache incoherency
418 		 * or race makes us see a 64-bit instruction here the sdl/sdr
419 		 * would blow up, so for now we don't handle unaligned 64-bit
420 		 * instructions on 32-bit kernels.
421 		 */
422 		if (!access_ok(addr, 8))
423 			goto sigbus;
424 
425 		compute_return_epc(regs);
426 		value = regs->regs[insn.i_format.rt];
427 		StoreDW(addr, value, res);
428 		if (res)
429 			goto fault;
430 		break;
431 #endif /* CONFIG_64BIT */
432 
433 		/* Cannot handle 64-bit instructions in 32-bit kernel */
434 		goto sigill;
435 
436 #ifdef CONFIG_MIPS_FP_SUPPORT
437 
438 	case lwc1_op:
439 	case ldc1_op:
440 	case swc1_op:
441 	case sdc1_op:
442 	case cop1x_op: {
443 		void __user *fault_addr = NULL;
444 
445 		die_if_kernel("Unaligned FP access in kernel code", regs);
446 		BUG_ON(!used_math());
447 
448 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
449 					       &fault_addr);
450 		own_fpu(1);	/* Restore FPU state. */
451 
452 		/* Signal if something went wrong. */
453 		process_fpemu_return(res, fault_addr, 0);
454 
455 		if (res == 0)
456 			break;
457 		return;
458 	}
459 #endif /* CONFIG_MIPS_FP_SUPPORT */
460 
461 #ifdef CONFIG_CPU_HAS_MSA
462 
463 	case msa_op: {
464 		unsigned int wd, preempted;
465 		enum msa_2b_fmt df;
466 		union fpureg *fpr;
467 
468 		if (!cpu_has_msa)
469 			goto sigill;
470 
471 		/*
472 		 * If we've reached this point then userland should have taken
473 		 * the MSA disabled exception & initialised vector context at
474 		 * some point in the past.
475 		 */
476 		BUG_ON(!thread_msa_context_live());
477 
478 		df = insn.msa_mi10_format.df;
479 		wd = insn.msa_mi10_format.wd;
480 		fpr = &current->thread.fpu.fpr[wd];
481 
482 		switch (insn.msa_mi10_format.func) {
483 		case msa_ld_op:
484 			if (!access_ok(addr, sizeof(*fpr)))
485 				goto sigbus;
486 
487 			do {
488 				/*
489 				 * If we have live MSA context keep track of
490 				 * whether we get preempted in order to avoid
491 				 * the register context we load being clobbered
492 				 * by the live context as it's saved during
493 				 * preemption. If we don't have live context
494 				 * then it can't be saved to clobber the value
495 				 * we load.
496 				 */
497 				preempted = test_thread_flag(TIF_USEDMSA);
498 
499 				res = __copy_from_user_inatomic(fpr, addr,
500 								sizeof(*fpr));
501 				if (res)
502 					goto fault;
503 
504 				/*
505 				 * Update the hardware register if it is in use
506 				 * by the task in this quantum, in order to
507 				 * avoid having to save & restore the whole
508 				 * vector context.
509 				 */
510 				preempt_disable();
511 				if (test_thread_flag(TIF_USEDMSA)) {
512 					write_msa_wr(wd, fpr, df);
513 					preempted = 0;
514 				}
515 				preempt_enable();
516 			} while (preempted);
517 			break;
518 
519 		case msa_st_op:
520 			if (!access_ok(addr, sizeof(*fpr)))
521 				goto sigbus;
522 
523 			/*
524 			 * Update from the hardware register if it is in use by
525 			 * the task in this quantum, in order to avoid having to
526 			 * save & restore the whole vector context.
527 			 */
528 			preempt_disable();
529 			if (test_thread_flag(TIF_USEDMSA))
530 				read_msa_wr(wd, fpr, df);
531 			preempt_enable();
532 
533 			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
534 			if (res)
535 				goto fault;
536 			break;
537 
538 		default:
539 			goto sigbus;
540 		}
541 
542 		compute_return_epc(regs);
543 		break;
544 	}
545 #endif /* CONFIG_CPU_HAS_MSA */
546 
547 #ifndef CONFIG_CPU_MIPSR6
548 	/*
549 	 * COP2 is available to implementor for application specific use.
550 	 * It's up to applications to register a notifier chain and do
551 	 * whatever they have to do, including possible sending of signals.
552 	 *
553 	 * This instruction has been reallocated in Release 6
554 	 */
555 	case lwc2_op:
556 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
557 		break;
558 
559 	case ldc2_op:
560 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
561 		break;
562 
563 	case swc2_op:
564 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
565 		break;
566 
567 	case sdc2_op:
568 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
569 		break;
570 #endif
571 	default:
572 		/*
573 		 * Pheeee...  We encountered an yet unknown instruction or
574 		 * cache coherence problem.  Die sucker, die ...
575 		 */
576 		goto sigill;
577 	}
578 
579 #ifdef CONFIG_DEBUG_FS
580 	unaligned_instructions++;
581 #endif
582 
583 	return;
584 
585 fault:
586 	/* roll back jump/branch */
587 	regs->cp0_epc = origpc;
588 	regs->regs[31] = orig31;
589 	/* Did we have an exception handler installed? */
590 	if (fixup_exception(regs))
591 		return;
592 
593 	die_if_kernel("Unhandled kernel unaligned access", regs);
594 	force_sig(SIGSEGV);
595 
596 	return;
597 
598 sigbus:
599 	die_if_kernel("Unhandled kernel unaligned access", regs);
600 	force_sig(SIGBUS);
601 
602 	return;
603 
604 sigill:
605 	die_if_kernel
606 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
607 	force_sig(SIGILL);
608 }
609 
610 /* Recode table from 16-bit register notation to 32-bit GPR. */
611 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
612 
613 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
614 static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
615 
616 static void emulate_load_store_microMIPS(struct pt_regs *regs,
617 					 void __user *addr)
618 {
619 	unsigned long value;
620 	unsigned int res;
621 	int i;
622 	unsigned int reg = 0, rvar;
623 	unsigned long orig31;
624 	u16 __user *pc16;
625 	u16 halfword;
626 	unsigned int word;
627 	unsigned long origpc, contpc;
628 	union mips_instruction insn;
629 	struct mm_decoded_insn mminsn;
630 
631 	origpc = regs->cp0_epc;
632 	orig31 = regs->regs[31];
633 
634 	mminsn.micro_mips_mode = 1;
635 
636 	/*
637 	 * This load never faults.
638 	 */
639 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
640 	__get_user(halfword, pc16);
641 	pc16++;
642 	contpc = regs->cp0_epc + 2;
643 	word = ((unsigned int)halfword << 16);
644 	mminsn.pc_inc = 2;
645 
646 	if (!mm_insn_16bit(halfword)) {
647 		__get_user(halfword, pc16);
648 		pc16++;
649 		contpc = regs->cp0_epc + 4;
650 		mminsn.pc_inc = 4;
651 		word |= halfword;
652 	}
653 	mminsn.insn = word;
654 
655 	if (get_user(halfword, pc16))
656 		goto fault;
657 	mminsn.next_pc_inc = 2;
658 	word = ((unsigned int)halfword << 16);
659 
660 	if (!mm_insn_16bit(halfword)) {
661 		pc16++;
662 		if (get_user(halfword, pc16))
663 			goto fault;
664 		mminsn.next_pc_inc = 4;
665 		word |= halfword;
666 	}
667 	mminsn.next_insn = word;
668 
669 	insn = (union mips_instruction)(mminsn.insn);
670 	if (mm_isBranchInstr(regs, mminsn, &contpc))
671 		insn = (union mips_instruction)(mminsn.next_insn);
672 
673 	/*  Parse instruction to find what to do */
674 
675 	switch (insn.mm_i_format.opcode) {
676 
677 	case mm_pool32a_op:
678 		switch (insn.mm_x_format.func) {
679 		case mm_lwxs_op:
680 			reg = insn.mm_x_format.rd;
681 			goto loadW;
682 		}
683 
684 		goto sigbus;
685 
686 	case mm_pool32b_op:
687 		switch (insn.mm_m_format.func) {
688 		case mm_lwp_func:
689 			reg = insn.mm_m_format.rd;
690 			if (reg == 31)
691 				goto sigbus;
692 
693 			if (!access_ok(addr, 8))
694 				goto sigbus;
695 
696 			LoadW(addr, value, res);
697 			if (res)
698 				goto fault;
699 			regs->regs[reg] = value;
700 			addr += 4;
701 			LoadW(addr, value, res);
702 			if (res)
703 				goto fault;
704 			regs->regs[reg + 1] = value;
705 			goto success;
706 
707 		case mm_swp_func:
708 			reg = insn.mm_m_format.rd;
709 			if (reg == 31)
710 				goto sigbus;
711 
712 			if (!access_ok(addr, 8))
713 				goto sigbus;
714 
715 			value = regs->regs[reg];
716 			StoreW(addr, value, res);
717 			if (res)
718 				goto fault;
719 			addr += 4;
720 			value = regs->regs[reg + 1];
721 			StoreW(addr, value, res);
722 			if (res)
723 				goto fault;
724 			goto success;
725 
726 		case mm_ldp_func:
727 #ifdef CONFIG_64BIT
728 			reg = insn.mm_m_format.rd;
729 			if (reg == 31)
730 				goto sigbus;
731 
732 			if (!access_ok(addr, 16))
733 				goto sigbus;
734 
735 			LoadDW(addr, value, res);
736 			if (res)
737 				goto fault;
738 			regs->regs[reg] = value;
739 			addr += 8;
740 			LoadDW(addr, value, res);
741 			if (res)
742 				goto fault;
743 			regs->regs[reg + 1] = value;
744 			goto success;
745 #endif /* CONFIG_64BIT */
746 
747 			goto sigill;
748 
749 		case mm_sdp_func:
750 #ifdef CONFIG_64BIT
751 			reg = insn.mm_m_format.rd;
752 			if (reg == 31)
753 				goto sigbus;
754 
755 			if (!access_ok(addr, 16))
756 				goto sigbus;
757 
758 			value = regs->regs[reg];
759 			StoreDW(addr, value, res);
760 			if (res)
761 				goto fault;
762 			addr += 8;
763 			value = regs->regs[reg + 1];
764 			StoreDW(addr, value, res);
765 			if (res)
766 				goto fault;
767 			goto success;
768 #endif /* CONFIG_64BIT */
769 
770 			goto sigill;
771 
772 		case mm_lwm32_func:
773 			reg = insn.mm_m_format.rd;
774 			rvar = reg & 0xf;
775 			if ((rvar > 9) || !reg)
776 				goto sigill;
777 			if (reg & 0x10) {
778 				if (!access_ok(addr, 4 * (rvar + 1)))
779 					goto sigbus;
780 			} else {
781 				if (!access_ok(addr, 4 * rvar))
782 					goto sigbus;
783 			}
784 			if (rvar == 9)
785 				rvar = 8;
786 			for (i = 16; rvar; rvar--, i++) {
787 				LoadW(addr, value, res);
788 				if (res)
789 					goto fault;
790 				addr += 4;
791 				regs->regs[i] = value;
792 			}
793 			if ((reg & 0xf) == 9) {
794 				LoadW(addr, value, res);
795 				if (res)
796 					goto fault;
797 				addr += 4;
798 				regs->regs[30] = value;
799 			}
800 			if (reg & 0x10) {
801 				LoadW(addr, value, res);
802 				if (res)
803 					goto fault;
804 				regs->regs[31] = value;
805 			}
806 			goto success;
807 
808 		case mm_swm32_func:
809 			reg = insn.mm_m_format.rd;
810 			rvar = reg & 0xf;
811 			if ((rvar > 9) || !reg)
812 				goto sigill;
813 			if (reg & 0x10) {
814 				if (!access_ok(addr, 4 * (rvar + 1)))
815 					goto sigbus;
816 			} else {
817 				if (!access_ok(addr, 4 * rvar))
818 					goto sigbus;
819 			}
820 			if (rvar == 9)
821 				rvar = 8;
822 			for (i = 16; rvar; rvar--, i++) {
823 				value = regs->regs[i];
824 				StoreW(addr, value, res);
825 				if (res)
826 					goto fault;
827 				addr += 4;
828 			}
829 			if ((reg & 0xf) == 9) {
830 				value = regs->regs[30];
831 				StoreW(addr, value, res);
832 				if (res)
833 					goto fault;
834 				addr += 4;
835 			}
836 			if (reg & 0x10) {
837 				value = regs->regs[31];
838 				StoreW(addr, value, res);
839 				if (res)
840 					goto fault;
841 			}
842 			goto success;
843 
844 		case mm_ldm_func:
845 #ifdef CONFIG_64BIT
846 			reg = insn.mm_m_format.rd;
847 			rvar = reg & 0xf;
848 			if ((rvar > 9) || !reg)
849 				goto sigill;
850 			if (reg & 0x10) {
851 				if (!access_ok(addr, 8 * (rvar + 1)))
852 					goto sigbus;
853 			} else {
854 				if (!access_ok(addr, 8 * rvar))
855 					goto sigbus;
856 			}
857 			if (rvar == 9)
858 				rvar = 8;
859 
860 			for (i = 16; rvar; rvar--, i++) {
861 				LoadDW(addr, value, res);
862 				if (res)
863 					goto fault;
864 				addr += 4;
865 				regs->regs[i] = value;
866 			}
867 			if ((reg & 0xf) == 9) {
868 				LoadDW(addr, value, res);
869 				if (res)
870 					goto fault;
871 				addr += 8;
872 				regs->regs[30] = value;
873 			}
874 			if (reg & 0x10) {
875 				LoadDW(addr, value, res);
876 				if (res)
877 					goto fault;
878 				regs->regs[31] = value;
879 			}
880 			goto success;
881 #endif /* CONFIG_64BIT */
882 
883 			goto sigill;
884 
885 		case mm_sdm_func:
886 #ifdef CONFIG_64BIT
887 			reg = insn.mm_m_format.rd;
888 			rvar = reg & 0xf;
889 			if ((rvar > 9) || !reg)
890 				goto sigill;
891 			if (reg & 0x10) {
892 				if (!access_ok(addr, 8 * (rvar + 1)))
893 					goto sigbus;
894 			} else {
895 				if (!access_ok(addr, 8 * rvar))
896 					goto sigbus;
897 			}
898 			if (rvar == 9)
899 				rvar = 8;
900 
901 			for (i = 16; rvar; rvar--, i++) {
902 				value = regs->regs[i];
903 				StoreDW(addr, value, res);
904 				if (res)
905 					goto fault;
906 				addr += 8;
907 			}
908 			if ((reg & 0xf) == 9) {
909 				value = regs->regs[30];
910 				StoreDW(addr, value, res);
911 				if (res)
912 					goto fault;
913 				addr += 8;
914 			}
915 			if (reg & 0x10) {
916 				value = regs->regs[31];
917 				StoreDW(addr, value, res);
918 				if (res)
919 					goto fault;
920 			}
921 			goto success;
922 #endif /* CONFIG_64BIT */
923 
924 			goto sigill;
925 
926 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
927 		}
928 
929 		goto sigbus;
930 
931 	case mm_pool32c_op:
932 		switch (insn.mm_m_format.func) {
933 		case mm_lwu_func:
934 			reg = insn.mm_m_format.rd;
935 			goto loadWU;
936 		}
937 
938 		/*  LL,SC,LLD,SCD are not serviced */
939 		goto sigbus;
940 
941 #ifdef CONFIG_MIPS_FP_SUPPORT
942 	case mm_pool32f_op:
943 		switch (insn.mm_x_format.func) {
944 		case mm_lwxc1_func:
945 		case mm_swxc1_func:
946 		case mm_ldxc1_func:
947 		case mm_sdxc1_func:
948 			goto fpu_emul;
949 		}
950 
951 		goto sigbus;
952 
953 	case mm_ldc132_op:
954 	case mm_sdc132_op:
955 	case mm_lwc132_op:
956 	case mm_swc132_op: {
957 		void __user *fault_addr = NULL;
958 
959 fpu_emul:
960 		/* roll back jump/branch */
961 		regs->cp0_epc = origpc;
962 		regs->regs[31] = orig31;
963 
964 		die_if_kernel("Unaligned FP access in kernel code", regs);
965 		BUG_ON(!used_math());
966 		BUG_ON(!is_fpu_owner());
967 
968 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
969 					       &fault_addr);
970 		own_fpu(1);	/* restore FPU state */
971 
972 		/* If something went wrong, signal */
973 		process_fpemu_return(res, fault_addr, 0);
974 
975 		if (res == 0)
976 			goto success;
977 		return;
978 	}
979 #endif /* CONFIG_MIPS_FP_SUPPORT */
980 
981 	case mm_lh32_op:
982 		reg = insn.mm_i_format.rt;
983 		goto loadHW;
984 
985 	case mm_lhu32_op:
986 		reg = insn.mm_i_format.rt;
987 		goto loadHWU;
988 
989 	case mm_lw32_op:
990 		reg = insn.mm_i_format.rt;
991 		goto loadW;
992 
993 	case mm_sh32_op:
994 		reg = insn.mm_i_format.rt;
995 		goto storeHW;
996 
997 	case mm_sw32_op:
998 		reg = insn.mm_i_format.rt;
999 		goto storeW;
1000 
1001 	case mm_ld32_op:
1002 		reg = insn.mm_i_format.rt;
1003 		goto loadDW;
1004 
1005 	case mm_sd32_op:
1006 		reg = insn.mm_i_format.rt;
1007 		goto storeDW;
1008 
1009 	case mm_pool16c_op:
1010 		switch (insn.mm16_m_format.func) {
1011 		case mm_lwm16_op:
1012 			reg = insn.mm16_m_format.rlist;
1013 			rvar = reg + 1;
1014 			if (!access_ok(addr, 4 * rvar))
1015 				goto sigbus;
1016 
1017 			for (i = 16; rvar; rvar--, i++) {
1018 				LoadW(addr, value, res);
1019 				if (res)
1020 					goto fault;
1021 				addr += 4;
1022 				regs->regs[i] = value;
1023 			}
1024 			LoadW(addr, value, res);
1025 			if (res)
1026 				goto fault;
1027 			regs->regs[31] = value;
1028 
1029 			goto success;
1030 
1031 		case mm_swm16_op:
1032 			reg = insn.mm16_m_format.rlist;
1033 			rvar = reg + 1;
1034 			if (!access_ok(addr, 4 * rvar))
1035 				goto sigbus;
1036 
1037 			for (i = 16; rvar; rvar--, i++) {
1038 				value = regs->regs[i];
1039 				StoreW(addr, value, res);
1040 				if (res)
1041 					goto fault;
1042 				addr += 4;
1043 			}
1044 			value = regs->regs[31];
1045 			StoreW(addr, value, res);
1046 			if (res)
1047 				goto fault;
1048 
1049 			goto success;
1050 
1051 		}
1052 
1053 		goto sigbus;
1054 
1055 	case mm_lhu16_op:
1056 		reg = reg16to32[insn.mm16_rb_format.rt];
1057 		goto loadHWU;
1058 
1059 	case mm_lw16_op:
1060 		reg = reg16to32[insn.mm16_rb_format.rt];
1061 		goto loadW;
1062 
1063 	case mm_sh16_op:
1064 		reg = reg16to32st[insn.mm16_rb_format.rt];
1065 		goto storeHW;
1066 
1067 	case mm_sw16_op:
1068 		reg = reg16to32st[insn.mm16_rb_format.rt];
1069 		goto storeW;
1070 
1071 	case mm_lwsp16_op:
1072 		reg = insn.mm16_r5_format.rt;
1073 		goto loadW;
1074 
1075 	case mm_swsp16_op:
1076 		reg = insn.mm16_r5_format.rt;
1077 		goto storeW;
1078 
1079 	case mm_lwgp16_op:
1080 		reg = reg16to32[insn.mm16_r3_format.rt];
1081 		goto loadW;
1082 
1083 	default:
1084 		goto sigill;
1085 	}
1086 
1087 loadHW:
1088 	if (!access_ok(addr, 2))
1089 		goto sigbus;
1090 
1091 	LoadHW(addr, value, res);
1092 	if (res)
1093 		goto fault;
1094 	regs->regs[reg] = value;
1095 	goto success;
1096 
1097 loadHWU:
1098 	if (!access_ok(addr, 2))
1099 		goto sigbus;
1100 
1101 	LoadHWU(addr, value, res);
1102 	if (res)
1103 		goto fault;
1104 	regs->regs[reg] = value;
1105 	goto success;
1106 
1107 loadW:
1108 	if (!access_ok(addr, 4))
1109 		goto sigbus;
1110 
1111 	LoadW(addr, value, res);
1112 	if (res)
1113 		goto fault;
1114 	regs->regs[reg] = value;
1115 	goto success;
1116 
1117 loadWU:
1118 #ifdef CONFIG_64BIT
1119 	/*
1120 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1121 	 * if we're on a 32-bit processor and an i-cache incoherency
1122 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1123 	 * would blow up, so for now we don't handle unaligned 64-bit
1124 	 * instructions on 32-bit kernels.
1125 	 */
1126 	if (!access_ok(addr, 4))
1127 		goto sigbus;
1128 
1129 	LoadWU(addr, value, res);
1130 	if (res)
1131 		goto fault;
1132 	regs->regs[reg] = value;
1133 	goto success;
1134 #endif /* CONFIG_64BIT */
1135 
1136 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1137 	goto sigill;
1138 
1139 loadDW:
1140 #ifdef CONFIG_64BIT
1141 	/*
1142 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1143 	 * if we're on a 32-bit processor and an i-cache incoherency
1144 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1145 	 * would blow up, so for now we don't handle unaligned 64-bit
1146 	 * instructions on 32-bit kernels.
1147 	 */
1148 	if (!access_ok(addr, 8))
1149 		goto sigbus;
1150 
1151 	LoadDW(addr, value, res);
1152 	if (res)
1153 		goto fault;
1154 	regs->regs[reg] = value;
1155 	goto success;
1156 #endif /* CONFIG_64BIT */
1157 
1158 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1159 	goto sigill;
1160 
1161 storeHW:
1162 	if (!access_ok(addr, 2))
1163 		goto sigbus;
1164 
1165 	value = regs->regs[reg];
1166 	StoreHW(addr, value, res);
1167 	if (res)
1168 		goto fault;
1169 	goto success;
1170 
1171 storeW:
1172 	if (!access_ok(addr, 4))
1173 		goto sigbus;
1174 
1175 	value = regs->regs[reg];
1176 	StoreW(addr, value, res);
1177 	if (res)
1178 		goto fault;
1179 	goto success;
1180 
1181 storeDW:
1182 #ifdef CONFIG_64BIT
1183 	/*
1184 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1185 	 * if we're on a 32-bit processor and an i-cache incoherency
1186 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1187 	 * would blow up, so for now we don't handle unaligned 64-bit
1188 	 * instructions on 32-bit kernels.
1189 	 */
1190 	if (!access_ok(addr, 8))
1191 		goto sigbus;
1192 
1193 	value = regs->regs[reg];
1194 	StoreDW(addr, value, res);
1195 	if (res)
1196 		goto fault;
1197 	goto success;
1198 #endif /* CONFIG_64BIT */
1199 
1200 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1201 	goto sigill;
1202 
1203 success:
1204 	regs->cp0_epc = contpc;	/* advance or branch */
1205 
1206 #ifdef CONFIG_DEBUG_FS
1207 	unaligned_instructions++;
1208 #endif
1209 	return;
1210 
1211 fault:
1212 	/* roll back jump/branch */
1213 	regs->cp0_epc = origpc;
1214 	regs->regs[31] = orig31;
1215 	/* Did we have an exception handler installed? */
1216 	if (fixup_exception(regs))
1217 		return;
1218 
1219 	die_if_kernel("Unhandled kernel unaligned access", regs);
1220 	force_sig(SIGSEGV);
1221 
1222 	return;
1223 
1224 sigbus:
1225 	die_if_kernel("Unhandled kernel unaligned access", regs);
1226 	force_sig(SIGBUS);
1227 
1228 	return;
1229 
1230 sigill:
1231 	die_if_kernel
1232 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1233 	force_sig(SIGILL);
1234 }
1235 
1236 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1237 {
1238 	unsigned long value;
1239 	unsigned int res;
1240 	int reg;
1241 	unsigned long orig31;
1242 	u16 __user *pc16;
1243 	unsigned long origpc;
1244 	union mips16e_instruction mips16inst, oldinst;
1245 	unsigned int opcode;
1246 	int extended = 0;
1247 
1248 	origpc = regs->cp0_epc;
1249 	orig31 = regs->regs[31];
1250 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1251 	/*
1252 	 * This load never faults.
1253 	 */
1254 	__get_user(mips16inst.full, pc16);
1255 	oldinst = mips16inst;
1256 
1257 	/* skip EXTEND instruction */
1258 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1259 		extended = 1;
1260 		pc16++;
1261 		__get_user(mips16inst.full, pc16);
1262 	} else if (delay_slot(regs)) {
1263 		/*  skip jump instructions */
1264 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1265 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1266 			pc16++;
1267 		pc16++;
1268 		if (get_user(mips16inst.full, pc16))
1269 			goto sigbus;
1270 	}
1271 
1272 	opcode = mips16inst.ri.opcode;
1273 	switch (opcode) {
1274 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1275 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1276 		case MIPS16e_ldpc_func:
1277 		case MIPS16e_ldsp_func:
1278 			reg = reg16to32[mips16inst.ri64.ry];
1279 			goto loadDW;
1280 
1281 		case MIPS16e_sdsp_func:
1282 			reg = reg16to32[mips16inst.ri64.ry];
1283 			goto writeDW;
1284 
1285 		case MIPS16e_sdrasp_func:
1286 			reg = 29;	/* GPRSP */
1287 			goto writeDW;
1288 		}
1289 
1290 		goto sigbus;
1291 
1292 	case MIPS16e_swsp_op:
1293 		reg = reg16to32[mips16inst.ri.rx];
1294 		if (extended && cpu_has_mips16e2)
1295 			switch (mips16inst.ri.imm >> 5) {
1296 			case 0:		/* SWSP */
1297 			case 1:		/* SWGP */
1298 				break;
1299 			case 2:		/* SHGP */
1300 				opcode = MIPS16e_sh_op;
1301 				break;
1302 			default:
1303 				goto sigbus;
1304 			}
1305 		break;
1306 
1307 	case MIPS16e_lwpc_op:
1308 		reg = reg16to32[mips16inst.ri.rx];
1309 		break;
1310 
1311 	case MIPS16e_lwsp_op:
1312 		reg = reg16to32[mips16inst.ri.rx];
1313 		if (extended && cpu_has_mips16e2)
1314 			switch (mips16inst.ri.imm >> 5) {
1315 			case 0:		/* LWSP */
1316 			case 1:		/* LWGP */
1317 				break;
1318 			case 2:		/* LHGP */
1319 				opcode = MIPS16e_lh_op;
1320 				break;
1321 			case 4:		/* LHUGP */
1322 				opcode = MIPS16e_lhu_op;
1323 				break;
1324 			default:
1325 				goto sigbus;
1326 			}
1327 		break;
1328 
1329 	case MIPS16e_i8_op:
1330 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1331 			goto sigbus;
1332 		reg = 29;	/* GPRSP */
1333 		break;
1334 
1335 	default:
1336 		reg = reg16to32[mips16inst.rri.ry];
1337 		break;
1338 	}
1339 
1340 	switch (opcode) {
1341 
1342 	case MIPS16e_lb_op:
1343 	case MIPS16e_lbu_op:
1344 	case MIPS16e_sb_op:
1345 		goto sigbus;
1346 
1347 	case MIPS16e_lh_op:
1348 		if (!access_ok(addr, 2))
1349 			goto sigbus;
1350 
1351 		LoadHW(addr, value, res);
1352 		if (res)
1353 			goto fault;
1354 		MIPS16e_compute_return_epc(regs, &oldinst);
1355 		regs->regs[reg] = value;
1356 		break;
1357 
1358 	case MIPS16e_lhu_op:
1359 		if (!access_ok(addr, 2))
1360 			goto sigbus;
1361 
1362 		LoadHWU(addr, value, res);
1363 		if (res)
1364 			goto fault;
1365 		MIPS16e_compute_return_epc(regs, &oldinst);
1366 		regs->regs[reg] = value;
1367 		break;
1368 
1369 	case MIPS16e_lw_op:
1370 	case MIPS16e_lwpc_op:
1371 	case MIPS16e_lwsp_op:
1372 		if (!access_ok(addr, 4))
1373 			goto sigbus;
1374 
1375 		LoadW(addr, value, res);
1376 		if (res)
1377 			goto fault;
1378 		MIPS16e_compute_return_epc(regs, &oldinst);
1379 		regs->regs[reg] = value;
1380 		break;
1381 
1382 	case MIPS16e_lwu_op:
1383 #ifdef CONFIG_64BIT
1384 		/*
1385 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1386 		 * if we're on a 32-bit processor and an i-cache incoherency
1387 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1388 		 * would blow up, so for now we don't handle unaligned 64-bit
1389 		 * instructions on 32-bit kernels.
1390 		 */
1391 		if (!access_ok(addr, 4))
1392 			goto sigbus;
1393 
1394 		LoadWU(addr, value, res);
1395 		if (res)
1396 			goto fault;
1397 		MIPS16e_compute_return_epc(regs, &oldinst);
1398 		regs->regs[reg] = value;
1399 		break;
1400 #endif /* CONFIG_64BIT */
1401 
1402 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1403 		goto sigill;
1404 
1405 	case MIPS16e_ld_op:
1406 loadDW:
1407 #ifdef CONFIG_64BIT
1408 		/*
1409 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1410 		 * if we're on a 32-bit processor and an i-cache incoherency
1411 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1412 		 * would blow up, so for now we don't handle unaligned 64-bit
1413 		 * instructions on 32-bit kernels.
1414 		 */
1415 		if (!access_ok(addr, 8))
1416 			goto sigbus;
1417 
1418 		LoadDW(addr, value, res);
1419 		if (res)
1420 			goto fault;
1421 		MIPS16e_compute_return_epc(regs, &oldinst);
1422 		regs->regs[reg] = value;
1423 		break;
1424 #endif /* CONFIG_64BIT */
1425 
1426 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1427 		goto sigill;
1428 
1429 	case MIPS16e_sh_op:
1430 		if (!access_ok(addr, 2))
1431 			goto sigbus;
1432 
1433 		MIPS16e_compute_return_epc(regs, &oldinst);
1434 		value = regs->regs[reg];
1435 		StoreHW(addr, value, res);
1436 		if (res)
1437 			goto fault;
1438 		break;
1439 
1440 	case MIPS16e_sw_op:
1441 	case MIPS16e_swsp_op:
1442 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1443 		if (!access_ok(addr, 4))
1444 			goto sigbus;
1445 
1446 		MIPS16e_compute_return_epc(regs, &oldinst);
1447 		value = regs->regs[reg];
1448 		StoreW(addr, value, res);
1449 		if (res)
1450 			goto fault;
1451 		break;
1452 
1453 	case MIPS16e_sd_op:
1454 writeDW:
1455 #ifdef CONFIG_64BIT
1456 		/*
1457 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1458 		 * if we're on a 32-bit processor and an i-cache incoherency
1459 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1460 		 * would blow up, so for now we don't handle unaligned 64-bit
1461 		 * instructions on 32-bit kernels.
1462 		 */
1463 		if (!access_ok(addr, 8))
1464 			goto sigbus;
1465 
1466 		MIPS16e_compute_return_epc(regs, &oldinst);
1467 		value = regs->regs[reg];
1468 		StoreDW(addr, value, res);
1469 		if (res)
1470 			goto fault;
1471 		break;
1472 #endif /* CONFIG_64BIT */
1473 
1474 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1475 		goto sigill;
1476 
1477 	default:
1478 		/*
1479 		 * Pheeee...  We encountered an yet unknown instruction or
1480 		 * cache coherence problem.  Die sucker, die ...
1481 		 */
1482 		goto sigill;
1483 	}
1484 
1485 #ifdef CONFIG_DEBUG_FS
1486 	unaligned_instructions++;
1487 #endif
1488 
1489 	return;
1490 
1491 fault:
1492 	/* roll back jump/branch */
1493 	regs->cp0_epc = origpc;
1494 	regs->regs[31] = orig31;
1495 	/* Did we have an exception handler installed? */
1496 	if (fixup_exception(regs))
1497 		return;
1498 
1499 	die_if_kernel("Unhandled kernel unaligned access", regs);
1500 	force_sig(SIGSEGV);
1501 
1502 	return;
1503 
1504 sigbus:
1505 	die_if_kernel("Unhandled kernel unaligned access", regs);
1506 	force_sig(SIGBUS);
1507 
1508 	return;
1509 
1510 sigill:
1511 	die_if_kernel
1512 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1513 	force_sig(SIGILL);
1514 }
1515 
1516 asmlinkage void do_ade(struct pt_regs *regs)
1517 {
1518 	enum ctx_state prev_state;
1519 	unsigned int __user *pc;
1520 	mm_segment_t seg;
1521 
1522 	prev_state = exception_enter();
1523 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1524 			1, regs, regs->cp0_badvaddr);
1525 	/*
1526 	 * Did we catch a fault trying to load an instruction?
1527 	 */
1528 	if (regs->cp0_badvaddr == regs->cp0_epc)
1529 		goto sigbus;
1530 
1531 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1532 		goto sigbus;
1533 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1534 		goto sigbus;
1535 
1536 	/*
1537 	 * Do branch emulation only if we didn't forward the exception.
1538 	 * This is all so but ugly ...
1539 	 */
1540 
1541 	/*
1542 	 * Are we running in microMIPS mode?
1543 	 */
1544 	if (get_isa16_mode(regs->cp0_epc)) {
1545 		/*
1546 		 * Did we catch a fault trying to load an instruction in
1547 		 * 16-bit mode?
1548 		 */
1549 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1550 			goto sigbus;
1551 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1552 			show_registers(regs);
1553 
1554 		if (cpu_has_mmips) {
1555 			seg = get_fs();
1556 			if (!user_mode(regs))
1557 				set_fs(KERNEL_DS);
1558 			emulate_load_store_microMIPS(regs,
1559 				(void __user *)regs->cp0_badvaddr);
1560 			set_fs(seg);
1561 
1562 			return;
1563 		}
1564 
1565 		if (cpu_has_mips16) {
1566 			seg = get_fs();
1567 			if (!user_mode(regs))
1568 				set_fs(KERNEL_DS);
1569 			emulate_load_store_MIPS16e(regs,
1570 				(void __user *)regs->cp0_badvaddr);
1571 			set_fs(seg);
1572 
1573 			return;
1574 		}
1575 
1576 		goto sigbus;
1577 	}
1578 
1579 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1580 		show_registers(regs);
1581 	pc = (unsigned int __user *)exception_epc(regs);
1582 
1583 	seg = get_fs();
1584 	if (!user_mode(regs))
1585 		set_fs(KERNEL_DS);
1586 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1587 	set_fs(seg);
1588 
1589 	return;
1590 
1591 sigbus:
1592 	die_if_kernel("Kernel unaligned instruction access", regs);
1593 	force_sig(SIGBUS);
1594 
1595 	/*
1596 	 * XXX On return from the signal handler we should advance the epc
1597 	 */
1598 	exception_exit(prev_state);
1599 }
1600 
1601 #ifdef CONFIG_DEBUG_FS
1602 static int __init debugfs_unaligned(void)
1603 {
1604 	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1605 			   &unaligned_instructions);
1606 	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1607 			   mips_debugfs_dir, &unaligned_action);
1608 	return 0;
1609 }
1610 arch_initcall(debugfs_unaligned);
1611 #endif
1612