xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision afb46f79)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  *
12  * This file contains exception handler for address error exception with the
13  * special capability to execute faulting instructions in software.  The
14  * handler does not try to handle the case when the program counter points
15  * to an address not aligned to a word boundary.
16  *
17  * Putting data to unaligned addresses is a bad practice even on Intel where
18  * only the performance is affected.  Much worse is that such code is non-
19  * portable.  Due to several programs that die on MIPS due to alignment
20  * problems I decided to implement this handler anyway though I originally
21  * didn't intend to do this at all for user code.
22  *
23  * For now I enable fixing of address errors by default to make life easier.
24  * I however intend to disable this somewhen in the future when the alignment
25  * problems with user programs have been fixed.	 For programmers this is the
26  * right way to go.
27  *
28  * Fixing address errors is a per process option.  The option is inherited
29  * across fork(2) and execve(2) calls.	If you really want to use the
30  * option in your user programs - I discourage the use of the software
31  * emulation strongly - use the following code in your userland stuff:
32  *
33  * #include <sys/sysmips.h>
34  *
35  * ...
36  * sysmips(MIPS_FIXADE, x);
37  * ...
38  *
39  * The argument x is 0 for disabling software emulation, enabled otherwise.
40  *
41  * Below a little program to play around with this feature.
42  *
43  * #include <stdio.h>
44  * #include <sys/sysmips.h>
45  *
46  * struct foo {
47  *	   unsigned char bar[8];
48  * };
49  *
50  * main(int argc, char *argv[])
51  * {
52  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
54  *	   int i;
55  *
56  *	   if (argc > 1)
57  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
58  *
59  *	   printf("*p = %08lx\n", *p);
60  *
61  *	   *p = 0xdeadface;
62  *
63  *	   for(i = 0; i <= 7; i++)
64  *	   printf("%02x ", x.bar[i]);
65  *	   printf("\n");
66  * }
67  *
68  * Coprocessor loads are not supported; I think this case is unimportant
69  * in the practice.
70  *
71  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72  *	 exception for the R6000.
73  *	 A store crossing a page boundary might be executed only partially.
74  *	 Undo the partial store in this case.
75  */
76 #include <linux/context_tracking.h>
77 #include <linux/mm.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
83 
84 #include <asm/asm.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
87 #include <asm/cop2.h>
88 #include <asm/fpu.h>
89 #include <asm/fpu_emulator.h>
90 #include <asm/inst.h>
91 #include <asm/uaccess.h>
92 #include <asm/fpu.h>
93 #include <asm/fpu_emulator.h>
94 
95 #define STR(x)	__STR(x)
96 #define __STR(x)  #x
97 
98 enum {
99 	UNALIGNED_ACTION_QUIET,
100 	UNALIGNED_ACTION_SIGNAL,
101 	UNALIGNED_ACTION_SHOW,
102 };
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
106 #else
107 #define unaligned_action UNALIGNED_ACTION_QUIET
108 #endif
109 extern void show_registers(struct pt_regs *regs);
110 
111 #ifdef __BIG_ENDIAN
112 #define     LoadHW(addr, value, res)  \
113 		__asm__ __volatile__ (".set\tnoat\n"        \
114 			"1:\t"user_lb("%0", "0(%2)")"\n"    \
115 			"2:\t"user_lbu("$1", "1(%2)")"\n\t" \
116 			"sll\t%0, 0x8\n\t"                  \
117 			"or\t%0, $1\n\t"                    \
118 			"li\t%1, 0\n"                       \
119 			"3:\t.set\tat\n\t"                  \
120 			".insn\n\t"                         \
121 			".section\t.fixup,\"ax\"\n\t"       \
122 			"4:\tli\t%1, %3\n\t"                \
123 			"j\t3b\n\t"                         \
124 			".previous\n\t"                     \
125 			".section\t__ex_table,\"a\"\n\t"    \
126 			STR(PTR)"\t1b, 4b\n\t"              \
127 			STR(PTR)"\t2b, 4b\n\t"              \
128 			".previous"                         \
129 			: "=&r" (value), "=r" (res)         \
130 			: "r" (addr), "i" (-EFAULT));
131 
132 #define     LoadW(addr, value, res)   \
133 		__asm__ __volatile__ (                      \
134 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
135 			"2:\t"user_lwr("%0", "3(%2)")"\n\t" \
136 			"li\t%1, 0\n"                       \
137 			"3:\n\t"                            \
138 			".insn\n\t"                         \
139 			".section\t.fixup,\"ax\"\n\t"       \
140 			"4:\tli\t%1, %3\n\t"                \
141 			"j\t3b\n\t"                         \
142 			".previous\n\t"                     \
143 			".section\t__ex_table,\"a\"\n\t"    \
144 			STR(PTR)"\t1b, 4b\n\t"              \
145 			STR(PTR)"\t2b, 4b\n\t"              \
146 			".previous"                         \
147 			: "=&r" (value), "=r" (res)         \
148 			: "r" (addr), "i" (-EFAULT));
149 
150 #define     LoadHWU(addr, value, res) \
151 		__asm__ __volatile__ (                      \
152 			".set\tnoat\n"                      \
153 			"1:\t"user_lbu("%0", "0(%2)")"\n"   \
154 			"2:\t"user_lbu("$1", "1(%2)")"\n\t" \
155 			"sll\t%0, 0x8\n\t"                  \
156 			"or\t%0, $1\n\t"                    \
157 			"li\t%1, 0\n"                       \
158 			"3:\n\t"                            \
159 			".insn\n\t"                         \
160 			".set\tat\n\t"                      \
161 			".section\t.fixup,\"ax\"\n\t"       \
162 			"4:\tli\t%1, %3\n\t"                \
163 			"j\t3b\n\t"                         \
164 			".previous\n\t"                     \
165 			".section\t__ex_table,\"a\"\n\t"    \
166 			STR(PTR)"\t1b, 4b\n\t"              \
167 			STR(PTR)"\t2b, 4b\n\t"              \
168 			".previous"                         \
169 			: "=&r" (value), "=r" (res)         \
170 			: "r" (addr), "i" (-EFAULT));
171 
172 #define     LoadWU(addr, value, res)  \
173 		__asm__ __volatile__ (                      \
174 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
175 			"2:\t"user_lwr("%0", "3(%2)")"\n\t" \
176 			"dsll\t%0, %0, 32\n\t"              \
177 			"dsrl\t%0, %0, 32\n\t"              \
178 			"li\t%1, 0\n"                       \
179 			"3:\n\t"                            \
180 			".insn\n\t"                         \
181 			"\t.section\t.fixup,\"ax\"\n\t"     \
182 			"4:\tli\t%1, %3\n\t"                \
183 			"j\t3b\n\t"                         \
184 			".previous\n\t"                     \
185 			".section\t__ex_table,\"a\"\n\t"    \
186 			STR(PTR)"\t1b, 4b\n\t"              \
187 			STR(PTR)"\t2b, 4b\n\t"              \
188 			".previous"                         \
189 			: "=&r" (value), "=r" (res)         \
190 			: "r" (addr), "i" (-EFAULT));
191 
192 #define     LoadDW(addr, value, res)  \
193 		__asm__ __volatile__ (                      \
194 			"1:\tldl\t%0, (%2)\n"               \
195 			"2:\tldr\t%0, 7(%2)\n\t"            \
196 			"li\t%1, 0\n"                       \
197 			"3:\n\t"                            \
198 			".insn\n\t"                         \
199 			"\t.section\t.fixup,\"ax\"\n\t"     \
200 			"4:\tli\t%1, %3\n\t"                \
201 			"j\t3b\n\t"                         \
202 			".previous\n\t"                     \
203 			".section\t__ex_table,\"a\"\n\t"    \
204 			STR(PTR)"\t1b, 4b\n\t"              \
205 			STR(PTR)"\t2b, 4b\n\t"              \
206 			".previous"                         \
207 			: "=&r" (value), "=r" (res)         \
208 			: "r" (addr), "i" (-EFAULT));
209 
210 #define     StoreHW(addr, value, res) \
211 		__asm__ __volatile__ (                      \
212 			".set\tnoat\n"                      \
213 			"1:\t"user_sb("%1", "1(%2)")"\n"    \
214 			"srl\t$1, %1, 0x8\n"                \
215 			"2:\t"user_sb("$1", "0(%2)")"\n"    \
216 			".set\tat\n\t"                      \
217 			"li\t%0, 0\n"                       \
218 			"3:\n\t"                            \
219 			".insn\n\t"                         \
220 			".section\t.fixup,\"ax\"\n\t"       \
221 			"4:\tli\t%0, %3\n\t"                \
222 			"j\t3b\n\t"                         \
223 			".previous\n\t"                     \
224 			".section\t__ex_table,\"a\"\n\t"    \
225 			STR(PTR)"\t1b, 4b\n\t"              \
226 			STR(PTR)"\t2b, 4b\n\t"              \
227 			".previous"                         \
228 			: "=r" (res)                        \
229 			: "r" (value), "r" (addr), "i" (-EFAULT));
230 
231 #define     StoreW(addr, value, res)  \
232 		__asm__ __volatile__ (                      \
233 			"1:\t"user_swl("%1", "(%2)")"\n"    \
234 			"2:\t"user_swr("%1", "3(%2)")"\n\t" \
235 			"li\t%0, 0\n"                       \
236 			"3:\n\t"                            \
237 			".insn\n\t"                         \
238 			".section\t.fixup,\"ax\"\n\t"       \
239 			"4:\tli\t%0, %3\n\t"                \
240 			"j\t3b\n\t"                         \
241 			".previous\n\t"                     \
242 			".section\t__ex_table,\"a\"\n\t"    \
243 			STR(PTR)"\t1b, 4b\n\t"              \
244 			STR(PTR)"\t2b, 4b\n\t"              \
245 			".previous"                         \
246 		: "=r" (res)                                \
247 		: "r" (value), "r" (addr), "i" (-EFAULT));
248 
249 #define     StoreDW(addr, value, res) \
250 		__asm__ __volatile__ (                      \
251 			"1:\tsdl\t%1,(%2)\n"                \
252 			"2:\tsdr\t%1, 7(%2)\n\t"            \
253 			"li\t%0, 0\n"                       \
254 			"3:\n\t"                            \
255 			".insn\n\t"                         \
256 			".section\t.fixup,\"ax\"\n\t"       \
257 			"4:\tli\t%0, %3\n\t"                \
258 			"j\t3b\n\t"                         \
259 			".previous\n\t"                     \
260 			".section\t__ex_table,\"a\"\n\t"    \
261 			STR(PTR)"\t1b, 4b\n\t"              \
262 			STR(PTR)"\t2b, 4b\n\t"              \
263 			".previous"                         \
264 		: "=r" (res)                                \
265 		: "r" (value), "r" (addr), "i" (-EFAULT));
266 #endif
267 
268 #ifdef __LITTLE_ENDIAN
269 #define     LoadHW(addr, value, res)  \
270 		__asm__ __volatile__ (".set\tnoat\n"        \
271 			"1:\t"user_lb("%0", "1(%2)")"\n"    \
272 			"2:\t"user_lbu("$1", "0(%2)")"\n\t" \
273 			"sll\t%0, 0x8\n\t"                  \
274 			"or\t%0, $1\n\t"                    \
275 			"li\t%1, 0\n"                       \
276 			"3:\t.set\tat\n\t"                  \
277 			".insn\n\t"                         \
278 			".section\t.fixup,\"ax\"\n\t"       \
279 			"4:\tli\t%1, %3\n\t"                \
280 			"j\t3b\n\t"                         \
281 			".previous\n\t"                     \
282 			".section\t__ex_table,\"a\"\n\t"    \
283 			STR(PTR)"\t1b, 4b\n\t"              \
284 			STR(PTR)"\t2b, 4b\n\t"              \
285 			".previous"                         \
286 			: "=&r" (value), "=r" (res)         \
287 			: "r" (addr), "i" (-EFAULT));
288 
289 #define     LoadW(addr, value, res)   \
290 		__asm__ __volatile__ (                      \
291 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
292 			"2:\t"user_lwr("%0", "(%2)")"\n\t"  \
293 			"li\t%1, 0\n"                       \
294 			"3:\n\t"                            \
295 			".insn\n\t"                         \
296 			".section\t.fixup,\"ax\"\n\t"       \
297 			"4:\tli\t%1, %3\n\t"                \
298 			"j\t3b\n\t"                         \
299 			".previous\n\t"                     \
300 			".section\t__ex_table,\"a\"\n\t"    \
301 			STR(PTR)"\t1b, 4b\n\t"              \
302 			STR(PTR)"\t2b, 4b\n\t"              \
303 			".previous"                         \
304 			: "=&r" (value), "=r" (res)         \
305 			: "r" (addr), "i" (-EFAULT));
306 
307 #define     LoadHWU(addr, value, res) \
308 		__asm__ __volatile__ (                      \
309 			".set\tnoat\n"                      \
310 			"1:\t"user_lbu("%0", "1(%2)")"\n"   \
311 			"2:\t"user_lbu("$1", "0(%2)")"\n\t" \
312 			"sll\t%0, 0x8\n\t"                  \
313 			"or\t%0, $1\n\t"                    \
314 			"li\t%1, 0\n"                       \
315 			"3:\n\t"                            \
316 			".insn\n\t"                         \
317 			".set\tat\n\t"                      \
318 			".section\t.fixup,\"ax\"\n\t"       \
319 			"4:\tli\t%1, %3\n\t"                \
320 			"j\t3b\n\t"                         \
321 			".previous\n\t"                     \
322 			".section\t__ex_table,\"a\"\n\t"    \
323 			STR(PTR)"\t1b, 4b\n\t"              \
324 			STR(PTR)"\t2b, 4b\n\t"              \
325 			".previous"                         \
326 			: "=&r" (value), "=r" (res)         \
327 			: "r" (addr), "i" (-EFAULT));
328 
329 #define     LoadWU(addr, value, res)  \
330 		__asm__ __volatile__ (                      \
331 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
332 			"2:\t"user_lwr("%0", "(%2)")"\n\t"  \
333 			"dsll\t%0, %0, 32\n\t"              \
334 			"dsrl\t%0, %0, 32\n\t"              \
335 			"li\t%1, 0\n"                       \
336 			"3:\n\t"                            \
337 			".insn\n\t"                         \
338 			"\t.section\t.fixup,\"ax\"\n\t"     \
339 			"4:\tli\t%1, %3\n\t"                \
340 			"j\t3b\n\t"                         \
341 			".previous\n\t"                     \
342 			".section\t__ex_table,\"a\"\n\t"    \
343 			STR(PTR)"\t1b, 4b\n\t"              \
344 			STR(PTR)"\t2b, 4b\n\t"              \
345 			".previous"                         \
346 			: "=&r" (value), "=r" (res)         \
347 			: "r" (addr), "i" (-EFAULT));
348 
349 #define     LoadDW(addr, value, res)  \
350 		__asm__ __volatile__ (                      \
351 			"1:\tldl\t%0, 7(%2)\n"              \
352 			"2:\tldr\t%0, (%2)\n\t"             \
353 			"li\t%1, 0\n"                       \
354 			"3:\n\t"                            \
355 			".insn\n\t"                         \
356 			"\t.section\t.fixup,\"ax\"\n\t"     \
357 			"4:\tli\t%1, %3\n\t"                \
358 			"j\t3b\n\t"                         \
359 			".previous\n\t"                     \
360 			".section\t__ex_table,\"a\"\n\t"    \
361 			STR(PTR)"\t1b, 4b\n\t"              \
362 			STR(PTR)"\t2b, 4b\n\t"              \
363 			".previous"                         \
364 			: "=&r" (value), "=r" (res)         \
365 			: "r" (addr), "i" (-EFAULT));
366 
367 #define     StoreHW(addr, value, res) \
368 		__asm__ __volatile__ (                      \
369 			".set\tnoat\n"                      \
370 			"1:\t"user_sb("%1", "0(%2)")"\n"    \
371 			"srl\t$1,%1, 0x8\n"                 \
372 			"2:\t"user_sb("$1", "1(%2)")"\n"    \
373 			".set\tat\n\t"                      \
374 			"li\t%0, 0\n"                       \
375 			"3:\n\t"                            \
376 			".insn\n\t"                         \
377 			".section\t.fixup,\"ax\"\n\t"       \
378 			"4:\tli\t%0, %3\n\t"                \
379 			"j\t3b\n\t"                         \
380 			".previous\n\t"                     \
381 			".section\t__ex_table,\"a\"\n\t"    \
382 			STR(PTR)"\t1b, 4b\n\t"              \
383 			STR(PTR)"\t2b, 4b\n\t"              \
384 			".previous"                         \
385 			: "=r" (res)                        \
386 			: "r" (value), "r" (addr), "i" (-EFAULT));
387 
388 #define     StoreW(addr, value, res)  \
389 		__asm__ __volatile__ (                      \
390 			"1:\t"user_swl("%1", "3(%2)")"\n"   \
391 			"2:\t"user_swr("%1", "(%2)")"\n\t"  \
392 			"li\t%0, 0\n"                       \
393 			"3:\n\t"                            \
394 			".insn\n\t"                         \
395 			".section\t.fixup,\"ax\"\n\t"       \
396 			"4:\tli\t%0, %3\n\t"                \
397 			"j\t3b\n\t"                         \
398 			".previous\n\t"                     \
399 			".section\t__ex_table,\"a\"\n\t"    \
400 			STR(PTR)"\t1b, 4b\n\t"              \
401 			STR(PTR)"\t2b, 4b\n\t"              \
402 			".previous"                         \
403 		: "=r" (res)                                \
404 		: "r" (value), "r" (addr), "i" (-EFAULT));
405 
406 #define     StoreDW(addr, value, res) \
407 		__asm__ __volatile__ (                      \
408 			"1:\tsdl\t%1, 7(%2)\n"              \
409 			"2:\tsdr\t%1, (%2)\n\t"             \
410 			"li\t%0, 0\n"                       \
411 			"3:\n\t"                            \
412 			".insn\n\t"                         \
413 			".section\t.fixup,\"ax\"\n\t"       \
414 			"4:\tli\t%0, %3\n\t"                \
415 			"j\t3b\n\t"                         \
416 			".previous\n\t"                     \
417 			".section\t__ex_table,\"a\"\n\t"    \
418 			STR(PTR)"\t1b, 4b\n\t"              \
419 			STR(PTR)"\t2b, 4b\n\t"              \
420 			".previous"                         \
421 		: "=r" (res)                                \
422 		: "r" (value), "r" (addr), "i" (-EFAULT));
423 #endif
424 
425 static void emulate_load_store_insn(struct pt_regs *regs,
426 	void __user *addr, unsigned int __user *pc)
427 {
428 	union mips_instruction insn;
429 	unsigned long value;
430 	unsigned int res;
431 	unsigned long origpc;
432 	unsigned long orig31;
433 	void __user *fault_addr = NULL;
434 #ifdef	CONFIG_EVA
435 	mm_segment_t seg;
436 #endif
437 	origpc = (unsigned long)pc;
438 	orig31 = regs->regs[31];
439 
440 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
441 
442 	/*
443 	 * This load never faults.
444 	 */
445 	__get_user(insn.word, pc);
446 
447 	switch (insn.i_format.opcode) {
448 		/*
449 		 * These are instructions that a compiler doesn't generate.  We
450 		 * can assume therefore that the code is MIPS-aware and
451 		 * really buggy.  Emulating these instructions would break the
452 		 * semantics anyway.
453 		 */
454 	case ll_op:
455 	case lld_op:
456 	case sc_op:
457 	case scd_op:
458 
459 		/*
460 		 * For these instructions the only way to create an address
461 		 * error is an attempted access to kernel/supervisor address
462 		 * space.
463 		 */
464 	case ldl_op:
465 	case ldr_op:
466 	case lwl_op:
467 	case lwr_op:
468 	case sdl_op:
469 	case sdr_op:
470 	case swl_op:
471 	case swr_op:
472 	case lb_op:
473 	case lbu_op:
474 	case sb_op:
475 		goto sigbus;
476 
477 		/*
478 		 * The remaining opcodes are the ones that are really of
479 		 * interest.
480 		 */
481 #ifdef CONFIG_EVA
482 	case spec3_op:
483 		/*
484 		 * we can land here only from kernel accessing user memory,
485 		 * so we need to "switch" the address limit to user space, so
486 		 * address check can work properly.
487 		 */
488 		seg = get_fs();
489 		set_fs(USER_DS);
490 		switch (insn.spec3_format.func) {
491 		case lhe_op:
492 			if (!access_ok(VERIFY_READ, addr, 2)) {
493 				set_fs(seg);
494 				goto sigbus;
495 			}
496 			LoadHW(addr, value, res);
497 			if (res) {
498 				set_fs(seg);
499 				goto fault;
500 			}
501 			compute_return_epc(regs);
502 			regs->regs[insn.spec3_format.rt] = value;
503 			break;
504 		case lwe_op:
505 			if (!access_ok(VERIFY_READ, addr, 4)) {
506 				set_fs(seg);
507 				goto sigbus;
508 			}
509 				LoadW(addr, value, res);
510 			if (res) {
511 				set_fs(seg);
512 				goto fault;
513 			}
514 			compute_return_epc(regs);
515 			regs->regs[insn.spec3_format.rt] = value;
516 			break;
517 		case lhue_op:
518 			if (!access_ok(VERIFY_READ, addr, 2)) {
519 				set_fs(seg);
520 				goto sigbus;
521 			}
522 			LoadHWU(addr, value, res);
523 			if (res) {
524 				set_fs(seg);
525 				goto fault;
526 			}
527 			compute_return_epc(regs);
528 			regs->regs[insn.spec3_format.rt] = value;
529 			break;
530 		case she_op:
531 			if (!access_ok(VERIFY_WRITE, addr, 2)) {
532 				set_fs(seg);
533 				goto sigbus;
534 			}
535 			compute_return_epc(regs);
536 			value = regs->regs[insn.spec3_format.rt];
537 			StoreHW(addr, value, res);
538 			if (res) {
539 				set_fs(seg);
540 				goto fault;
541 			}
542 			break;
543 		case swe_op:
544 			if (!access_ok(VERIFY_WRITE, addr, 4)) {
545 				set_fs(seg);
546 				goto sigbus;
547 			}
548 			compute_return_epc(regs);
549 			value = regs->regs[insn.spec3_format.rt];
550 			StoreW(addr, value, res);
551 			if (res) {
552 				set_fs(seg);
553 				goto fault;
554 			}
555 			break;
556 		default:
557 			set_fs(seg);
558 			goto sigill;
559 		}
560 		set_fs(seg);
561 		break;
562 #endif
563 	case lh_op:
564 		if (!access_ok(VERIFY_READ, addr, 2))
565 			goto sigbus;
566 
567 		LoadHW(addr, value, res);
568 		if (res)
569 			goto fault;
570 		compute_return_epc(regs);
571 		regs->regs[insn.i_format.rt] = value;
572 		break;
573 
574 	case lw_op:
575 		if (!access_ok(VERIFY_READ, addr, 4))
576 			goto sigbus;
577 
578 		LoadW(addr, value, res);
579 		if (res)
580 			goto fault;
581 		compute_return_epc(regs);
582 		regs->regs[insn.i_format.rt] = value;
583 		break;
584 
585 	case lhu_op:
586 		if (!access_ok(VERIFY_READ, addr, 2))
587 			goto sigbus;
588 
589 		LoadHWU(addr, value, res);
590 		if (res)
591 			goto fault;
592 		compute_return_epc(regs);
593 		regs->regs[insn.i_format.rt] = value;
594 		break;
595 
596 	case lwu_op:
597 #ifdef CONFIG_64BIT
598 		/*
599 		 * A 32-bit kernel might be running on a 64-bit processor.  But
600 		 * if we're on a 32-bit processor and an i-cache incoherency
601 		 * or race makes us see a 64-bit instruction here the sdl/sdr
602 		 * would blow up, so for now we don't handle unaligned 64-bit
603 		 * instructions on 32-bit kernels.
604 		 */
605 		if (!access_ok(VERIFY_READ, addr, 4))
606 			goto sigbus;
607 
608 		LoadWU(addr, value, res);
609 		if (res)
610 			goto fault;
611 		compute_return_epc(regs);
612 		regs->regs[insn.i_format.rt] = value;
613 		break;
614 #endif /* CONFIG_64BIT */
615 
616 		/* Cannot handle 64-bit instructions in 32-bit kernel */
617 		goto sigill;
618 
619 	case ld_op:
620 #ifdef CONFIG_64BIT
621 		/*
622 		 * A 32-bit kernel might be running on a 64-bit processor.  But
623 		 * if we're on a 32-bit processor and an i-cache incoherency
624 		 * or race makes us see a 64-bit instruction here the sdl/sdr
625 		 * would blow up, so for now we don't handle unaligned 64-bit
626 		 * instructions on 32-bit kernels.
627 		 */
628 		if (!access_ok(VERIFY_READ, addr, 8))
629 			goto sigbus;
630 
631 		LoadDW(addr, value, res);
632 		if (res)
633 			goto fault;
634 		compute_return_epc(regs);
635 		regs->regs[insn.i_format.rt] = value;
636 		break;
637 #endif /* CONFIG_64BIT */
638 
639 		/* Cannot handle 64-bit instructions in 32-bit kernel */
640 		goto sigill;
641 
642 	case sh_op:
643 		if (!access_ok(VERIFY_WRITE, addr, 2))
644 			goto sigbus;
645 
646 		compute_return_epc(regs);
647 		value = regs->regs[insn.i_format.rt];
648 		StoreHW(addr, value, res);
649 		if (res)
650 			goto fault;
651 		break;
652 
653 	case sw_op:
654 		if (!access_ok(VERIFY_WRITE, addr, 4))
655 			goto sigbus;
656 
657 		compute_return_epc(regs);
658 		value = regs->regs[insn.i_format.rt];
659 		StoreW(addr, value, res);
660 		if (res)
661 			goto fault;
662 		break;
663 
664 	case sd_op:
665 #ifdef CONFIG_64BIT
666 		/*
667 		 * A 32-bit kernel might be running on a 64-bit processor.  But
668 		 * if we're on a 32-bit processor and an i-cache incoherency
669 		 * or race makes us see a 64-bit instruction here the sdl/sdr
670 		 * would blow up, so for now we don't handle unaligned 64-bit
671 		 * instructions on 32-bit kernels.
672 		 */
673 		if (!access_ok(VERIFY_WRITE, addr, 8))
674 			goto sigbus;
675 
676 		compute_return_epc(regs);
677 		value = regs->regs[insn.i_format.rt];
678 		StoreDW(addr, value, res);
679 		if (res)
680 			goto fault;
681 		break;
682 #endif /* CONFIG_64BIT */
683 
684 		/* Cannot handle 64-bit instructions in 32-bit kernel */
685 		goto sigill;
686 
687 	case lwc1_op:
688 	case ldc1_op:
689 	case swc1_op:
690 	case sdc1_op:
691 		die_if_kernel("Unaligned FP access in kernel code", regs);
692 		BUG_ON(!used_math());
693 		BUG_ON(!is_fpu_owner());
694 
695 		lose_fpu(1);	/* Save FPU state for the emulator. */
696 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
697 					       &fault_addr);
698 		own_fpu(1);	/* Restore FPU state. */
699 
700 		/* Signal if something went wrong. */
701 		process_fpemu_return(res, fault_addr);
702 
703 		if (res == 0)
704 			break;
705 		return;
706 
707 	/*
708 	 * COP2 is available to implementor for application specific use.
709 	 * It's up to applications to register a notifier chain and do
710 	 * whatever they have to do, including possible sending of signals.
711 	 */
712 	case lwc2_op:
713 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
714 		break;
715 
716 	case ldc2_op:
717 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
718 		break;
719 
720 	case swc2_op:
721 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
722 		break;
723 
724 	case sdc2_op:
725 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
726 		break;
727 
728 	default:
729 		/*
730 		 * Pheeee...  We encountered an yet unknown instruction or
731 		 * cache coherence problem.  Die sucker, die ...
732 		 */
733 		goto sigill;
734 	}
735 
736 #ifdef CONFIG_DEBUG_FS
737 	unaligned_instructions++;
738 #endif
739 
740 	return;
741 
742 fault:
743 	/* roll back jump/branch */
744 	regs->cp0_epc = origpc;
745 	regs->regs[31] = orig31;
746 	/* Did we have an exception handler installed? */
747 	if (fixup_exception(regs))
748 		return;
749 
750 	die_if_kernel("Unhandled kernel unaligned access", regs);
751 	force_sig(SIGSEGV, current);
752 
753 	return;
754 
755 sigbus:
756 	die_if_kernel("Unhandled kernel unaligned access", regs);
757 	force_sig(SIGBUS, current);
758 
759 	return;
760 
761 sigill:
762 	die_if_kernel
763 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
764 	force_sig(SIGILL, current);
765 }
766 
767 /* Recode table from 16-bit register notation to 32-bit GPR. */
768 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
769 
770 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
771 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
772 
773 static void emulate_load_store_microMIPS(struct pt_regs *regs,
774 					 void __user *addr)
775 {
776 	unsigned long value;
777 	unsigned int res;
778 	int i;
779 	unsigned int reg = 0, rvar;
780 	unsigned long orig31;
781 	u16 __user *pc16;
782 	u16 halfword;
783 	unsigned int word;
784 	unsigned long origpc, contpc;
785 	union mips_instruction insn;
786 	struct mm_decoded_insn mminsn;
787 	void __user *fault_addr = NULL;
788 
789 	origpc = regs->cp0_epc;
790 	orig31 = regs->regs[31];
791 
792 	mminsn.micro_mips_mode = 1;
793 
794 	/*
795 	 * This load never faults.
796 	 */
797 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
798 	__get_user(halfword, pc16);
799 	pc16++;
800 	contpc = regs->cp0_epc + 2;
801 	word = ((unsigned int)halfword << 16);
802 	mminsn.pc_inc = 2;
803 
804 	if (!mm_insn_16bit(halfword)) {
805 		__get_user(halfword, pc16);
806 		pc16++;
807 		contpc = regs->cp0_epc + 4;
808 		mminsn.pc_inc = 4;
809 		word |= halfword;
810 	}
811 	mminsn.insn = word;
812 
813 	if (get_user(halfword, pc16))
814 		goto fault;
815 	mminsn.next_pc_inc = 2;
816 	word = ((unsigned int)halfword << 16);
817 
818 	if (!mm_insn_16bit(halfword)) {
819 		pc16++;
820 		if (get_user(halfword, pc16))
821 			goto fault;
822 		mminsn.next_pc_inc = 4;
823 		word |= halfword;
824 	}
825 	mminsn.next_insn = word;
826 
827 	insn = (union mips_instruction)(mminsn.insn);
828 	if (mm_isBranchInstr(regs, mminsn, &contpc))
829 		insn = (union mips_instruction)(mminsn.next_insn);
830 
831 	/*  Parse instruction to find what to do */
832 
833 	switch (insn.mm_i_format.opcode) {
834 
835 	case mm_pool32a_op:
836 		switch (insn.mm_x_format.func) {
837 		case mm_lwxs_op:
838 			reg = insn.mm_x_format.rd;
839 			goto loadW;
840 		}
841 
842 		goto sigbus;
843 
844 	case mm_pool32b_op:
845 		switch (insn.mm_m_format.func) {
846 		case mm_lwp_func:
847 			reg = insn.mm_m_format.rd;
848 			if (reg == 31)
849 				goto sigbus;
850 
851 			if (!access_ok(VERIFY_READ, addr, 8))
852 				goto sigbus;
853 
854 			LoadW(addr, value, res);
855 			if (res)
856 				goto fault;
857 			regs->regs[reg] = value;
858 			addr += 4;
859 			LoadW(addr, value, res);
860 			if (res)
861 				goto fault;
862 			regs->regs[reg + 1] = value;
863 			goto success;
864 
865 		case mm_swp_func:
866 			reg = insn.mm_m_format.rd;
867 			if (reg == 31)
868 				goto sigbus;
869 
870 			if (!access_ok(VERIFY_WRITE, addr, 8))
871 				goto sigbus;
872 
873 			value = regs->regs[reg];
874 			StoreW(addr, value, res);
875 			if (res)
876 				goto fault;
877 			addr += 4;
878 			value = regs->regs[reg + 1];
879 			StoreW(addr, value, res);
880 			if (res)
881 				goto fault;
882 			goto success;
883 
884 		case mm_ldp_func:
885 #ifdef CONFIG_64BIT
886 			reg = insn.mm_m_format.rd;
887 			if (reg == 31)
888 				goto sigbus;
889 
890 			if (!access_ok(VERIFY_READ, addr, 16))
891 				goto sigbus;
892 
893 			LoadDW(addr, value, res);
894 			if (res)
895 				goto fault;
896 			regs->regs[reg] = value;
897 			addr += 8;
898 			LoadDW(addr, value, res);
899 			if (res)
900 				goto fault;
901 			regs->regs[reg + 1] = value;
902 			goto success;
903 #endif /* CONFIG_64BIT */
904 
905 			goto sigill;
906 
907 		case mm_sdp_func:
908 #ifdef CONFIG_64BIT
909 			reg = insn.mm_m_format.rd;
910 			if (reg == 31)
911 				goto sigbus;
912 
913 			if (!access_ok(VERIFY_WRITE, addr, 16))
914 				goto sigbus;
915 
916 			value = regs->regs[reg];
917 			StoreDW(addr, value, res);
918 			if (res)
919 				goto fault;
920 			addr += 8;
921 			value = regs->regs[reg + 1];
922 			StoreDW(addr, value, res);
923 			if (res)
924 				goto fault;
925 			goto success;
926 #endif /* CONFIG_64BIT */
927 
928 			goto sigill;
929 
930 		case mm_lwm32_func:
931 			reg = insn.mm_m_format.rd;
932 			rvar = reg & 0xf;
933 			if ((rvar > 9) || !reg)
934 				goto sigill;
935 			if (reg & 0x10) {
936 				if (!access_ok
937 				    (VERIFY_READ, addr, 4 * (rvar + 1)))
938 					goto sigbus;
939 			} else {
940 				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
941 					goto sigbus;
942 			}
943 			if (rvar == 9)
944 				rvar = 8;
945 			for (i = 16; rvar; rvar--, i++) {
946 				LoadW(addr, value, res);
947 				if (res)
948 					goto fault;
949 				addr += 4;
950 				regs->regs[i] = value;
951 			}
952 			if ((reg & 0xf) == 9) {
953 				LoadW(addr, value, res);
954 				if (res)
955 					goto fault;
956 				addr += 4;
957 				regs->regs[30] = value;
958 			}
959 			if (reg & 0x10) {
960 				LoadW(addr, value, res);
961 				if (res)
962 					goto fault;
963 				regs->regs[31] = value;
964 			}
965 			goto success;
966 
967 		case mm_swm32_func:
968 			reg = insn.mm_m_format.rd;
969 			rvar = reg & 0xf;
970 			if ((rvar > 9) || !reg)
971 				goto sigill;
972 			if (reg & 0x10) {
973 				if (!access_ok
974 				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
975 					goto sigbus;
976 			} else {
977 				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
978 					goto sigbus;
979 			}
980 			if (rvar == 9)
981 				rvar = 8;
982 			for (i = 16; rvar; rvar--, i++) {
983 				value = regs->regs[i];
984 				StoreW(addr, value, res);
985 				if (res)
986 					goto fault;
987 				addr += 4;
988 			}
989 			if ((reg & 0xf) == 9) {
990 				value = regs->regs[30];
991 				StoreW(addr, value, res);
992 				if (res)
993 					goto fault;
994 				addr += 4;
995 			}
996 			if (reg & 0x10) {
997 				value = regs->regs[31];
998 				StoreW(addr, value, res);
999 				if (res)
1000 					goto fault;
1001 			}
1002 			goto success;
1003 
1004 		case mm_ldm_func:
1005 #ifdef CONFIG_64BIT
1006 			reg = insn.mm_m_format.rd;
1007 			rvar = reg & 0xf;
1008 			if ((rvar > 9) || !reg)
1009 				goto sigill;
1010 			if (reg & 0x10) {
1011 				if (!access_ok
1012 				    (VERIFY_READ, addr, 8 * (rvar + 1)))
1013 					goto sigbus;
1014 			} else {
1015 				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1016 					goto sigbus;
1017 			}
1018 			if (rvar == 9)
1019 				rvar = 8;
1020 
1021 			for (i = 16; rvar; rvar--, i++) {
1022 				LoadDW(addr, value, res);
1023 				if (res)
1024 					goto fault;
1025 				addr += 4;
1026 				regs->regs[i] = value;
1027 			}
1028 			if ((reg & 0xf) == 9) {
1029 				LoadDW(addr, value, res);
1030 				if (res)
1031 					goto fault;
1032 				addr += 8;
1033 				regs->regs[30] = value;
1034 			}
1035 			if (reg & 0x10) {
1036 				LoadDW(addr, value, res);
1037 				if (res)
1038 					goto fault;
1039 				regs->regs[31] = value;
1040 			}
1041 			goto success;
1042 #endif /* CONFIG_64BIT */
1043 
1044 			goto sigill;
1045 
1046 		case mm_sdm_func:
1047 #ifdef CONFIG_64BIT
1048 			reg = insn.mm_m_format.rd;
1049 			rvar = reg & 0xf;
1050 			if ((rvar > 9) || !reg)
1051 				goto sigill;
1052 			if (reg & 0x10) {
1053 				if (!access_ok
1054 				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1055 					goto sigbus;
1056 			} else {
1057 				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1058 					goto sigbus;
1059 			}
1060 			if (rvar == 9)
1061 				rvar = 8;
1062 
1063 			for (i = 16; rvar; rvar--, i++) {
1064 				value = regs->regs[i];
1065 				StoreDW(addr, value, res);
1066 				if (res)
1067 					goto fault;
1068 				addr += 8;
1069 			}
1070 			if ((reg & 0xf) == 9) {
1071 				value = regs->regs[30];
1072 				StoreDW(addr, value, res);
1073 				if (res)
1074 					goto fault;
1075 				addr += 8;
1076 			}
1077 			if (reg & 0x10) {
1078 				value = regs->regs[31];
1079 				StoreDW(addr, value, res);
1080 				if (res)
1081 					goto fault;
1082 			}
1083 			goto success;
1084 #endif /* CONFIG_64BIT */
1085 
1086 			goto sigill;
1087 
1088 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1089 		}
1090 
1091 		goto sigbus;
1092 
1093 	case mm_pool32c_op:
1094 		switch (insn.mm_m_format.func) {
1095 		case mm_lwu_func:
1096 			reg = insn.mm_m_format.rd;
1097 			goto loadWU;
1098 		}
1099 
1100 		/*  LL,SC,LLD,SCD are not serviced */
1101 		goto sigbus;
1102 
1103 	case mm_pool32f_op:
1104 		switch (insn.mm_x_format.func) {
1105 		case mm_lwxc1_func:
1106 		case mm_swxc1_func:
1107 		case mm_ldxc1_func:
1108 		case mm_sdxc1_func:
1109 			goto fpu_emul;
1110 		}
1111 
1112 		goto sigbus;
1113 
1114 	case mm_ldc132_op:
1115 	case mm_sdc132_op:
1116 	case mm_lwc132_op:
1117 	case mm_swc132_op:
1118 fpu_emul:
1119 		/* roll back jump/branch */
1120 		regs->cp0_epc = origpc;
1121 		regs->regs[31] = orig31;
1122 
1123 		die_if_kernel("Unaligned FP access in kernel code", regs);
1124 		BUG_ON(!used_math());
1125 		BUG_ON(!is_fpu_owner());
1126 
1127 		lose_fpu(1);	/* save the FPU state for the emulator */
1128 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1129 					       &fault_addr);
1130 		own_fpu(1);	/* restore FPU state */
1131 
1132 		/* If something went wrong, signal */
1133 		process_fpemu_return(res, fault_addr);
1134 
1135 		if (res == 0)
1136 			goto success;
1137 		return;
1138 
1139 	case mm_lh32_op:
1140 		reg = insn.mm_i_format.rt;
1141 		goto loadHW;
1142 
1143 	case mm_lhu32_op:
1144 		reg = insn.mm_i_format.rt;
1145 		goto loadHWU;
1146 
1147 	case mm_lw32_op:
1148 		reg = insn.mm_i_format.rt;
1149 		goto loadW;
1150 
1151 	case mm_sh32_op:
1152 		reg = insn.mm_i_format.rt;
1153 		goto storeHW;
1154 
1155 	case mm_sw32_op:
1156 		reg = insn.mm_i_format.rt;
1157 		goto storeW;
1158 
1159 	case mm_ld32_op:
1160 		reg = insn.mm_i_format.rt;
1161 		goto loadDW;
1162 
1163 	case mm_sd32_op:
1164 		reg = insn.mm_i_format.rt;
1165 		goto storeDW;
1166 
1167 	case mm_pool16c_op:
1168 		switch (insn.mm16_m_format.func) {
1169 		case mm_lwm16_op:
1170 			reg = insn.mm16_m_format.rlist;
1171 			rvar = reg + 1;
1172 			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1173 				goto sigbus;
1174 
1175 			for (i = 16; rvar; rvar--, i++) {
1176 				LoadW(addr, value, res);
1177 				if (res)
1178 					goto fault;
1179 				addr += 4;
1180 				regs->regs[i] = value;
1181 			}
1182 			LoadW(addr, value, res);
1183 			if (res)
1184 				goto fault;
1185 			regs->regs[31] = value;
1186 
1187 			goto success;
1188 
1189 		case mm_swm16_op:
1190 			reg = insn.mm16_m_format.rlist;
1191 			rvar = reg + 1;
1192 			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1193 				goto sigbus;
1194 
1195 			for (i = 16; rvar; rvar--, i++) {
1196 				value = regs->regs[i];
1197 				StoreW(addr, value, res);
1198 				if (res)
1199 					goto fault;
1200 				addr += 4;
1201 			}
1202 			value = regs->regs[31];
1203 			StoreW(addr, value, res);
1204 			if (res)
1205 				goto fault;
1206 
1207 			goto success;
1208 
1209 		}
1210 
1211 		goto sigbus;
1212 
1213 	case mm_lhu16_op:
1214 		reg = reg16to32[insn.mm16_rb_format.rt];
1215 		goto loadHWU;
1216 
1217 	case mm_lw16_op:
1218 		reg = reg16to32[insn.mm16_rb_format.rt];
1219 		goto loadW;
1220 
1221 	case mm_sh16_op:
1222 		reg = reg16to32st[insn.mm16_rb_format.rt];
1223 		goto storeHW;
1224 
1225 	case mm_sw16_op:
1226 		reg = reg16to32st[insn.mm16_rb_format.rt];
1227 		goto storeW;
1228 
1229 	case mm_lwsp16_op:
1230 		reg = insn.mm16_r5_format.rt;
1231 		goto loadW;
1232 
1233 	case mm_swsp16_op:
1234 		reg = insn.mm16_r5_format.rt;
1235 		goto storeW;
1236 
1237 	case mm_lwgp16_op:
1238 		reg = reg16to32[insn.mm16_r3_format.rt];
1239 		goto loadW;
1240 
1241 	default:
1242 		goto sigill;
1243 	}
1244 
1245 loadHW:
1246 	if (!access_ok(VERIFY_READ, addr, 2))
1247 		goto sigbus;
1248 
1249 	LoadHW(addr, value, res);
1250 	if (res)
1251 		goto fault;
1252 	regs->regs[reg] = value;
1253 	goto success;
1254 
1255 loadHWU:
1256 	if (!access_ok(VERIFY_READ, addr, 2))
1257 		goto sigbus;
1258 
1259 	LoadHWU(addr, value, res);
1260 	if (res)
1261 		goto fault;
1262 	regs->regs[reg] = value;
1263 	goto success;
1264 
1265 loadW:
1266 	if (!access_ok(VERIFY_READ, addr, 4))
1267 		goto sigbus;
1268 
1269 	LoadW(addr, value, res);
1270 	if (res)
1271 		goto fault;
1272 	regs->regs[reg] = value;
1273 	goto success;
1274 
1275 loadWU:
1276 #ifdef CONFIG_64BIT
1277 	/*
1278 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1279 	 * if we're on a 32-bit processor and an i-cache incoherency
1280 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1281 	 * would blow up, so for now we don't handle unaligned 64-bit
1282 	 * instructions on 32-bit kernels.
1283 	 */
1284 	if (!access_ok(VERIFY_READ, addr, 4))
1285 		goto sigbus;
1286 
1287 	LoadWU(addr, value, res);
1288 	if (res)
1289 		goto fault;
1290 	regs->regs[reg] = value;
1291 	goto success;
1292 #endif /* CONFIG_64BIT */
1293 
1294 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1295 	goto sigill;
1296 
1297 loadDW:
1298 #ifdef CONFIG_64BIT
1299 	/*
1300 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1301 	 * if we're on a 32-bit processor and an i-cache incoherency
1302 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1303 	 * would blow up, so for now we don't handle unaligned 64-bit
1304 	 * instructions on 32-bit kernels.
1305 	 */
1306 	if (!access_ok(VERIFY_READ, addr, 8))
1307 		goto sigbus;
1308 
1309 	LoadDW(addr, value, res);
1310 	if (res)
1311 		goto fault;
1312 	regs->regs[reg] = value;
1313 	goto success;
1314 #endif /* CONFIG_64BIT */
1315 
1316 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1317 	goto sigill;
1318 
1319 storeHW:
1320 	if (!access_ok(VERIFY_WRITE, addr, 2))
1321 		goto sigbus;
1322 
1323 	value = regs->regs[reg];
1324 	StoreHW(addr, value, res);
1325 	if (res)
1326 		goto fault;
1327 	goto success;
1328 
1329 storeW:
1330 	if (!access_ok(VERIFY_WRITE, addr, 4))
1331 		goto sigbus;
1332 
1333 	value = regs->regs[reg];
1334 	StoreW(addr, value, res);
1335 	if (res)
1336 		goto fault;
1337 	goto success;
1338 
1339 storeDW:
1340 #ifdef CONFIG_64BIT
1341 	/*
1342 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1343 	 * if we're on a 32-bit processor and an i-cache incoherency
1344 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1345 	 * would blow up, so for now we don't handle unaligned 64-bit
1346 	 * instructions on 32-bit kernels.
1347 	 */
1348 	if (!access_ok(VERIFY_WRITE, addr, 8))
1349 		goto sigbus;
1350 
1351 	value = regs->regs[reg];
1352 	StoreDW(addr, value, res);
1353 	if (res)
1354 		goto fault;
1355 	goto success;
1356 #endif /* CONFIG_64BIT */
1357 
1358 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1359 	goto sigill;
1360 
1361 success:
1362 	regs->cp0_epc = contpc;	/* advance or branch */
1363 
1364 #ifdef CONFIG_DEBUG_FS
1365 	unaligned_instructions++;
1366 #endif
1367 	return;
1368 
1369 fault:
1370 	/* roll back jump/branch */
1371 	regs->cp0_epc = origpc;
1372 	regs->regs[31] = orig31;
1373 	/* Did we have an exception handler installed? */
1374 	if (fixup_exception(regs))
1375 		return;
1376 
1377 	die_if_kernel("Unhandled kernel unaligned access", regs);
1378 	force_sig(SIGSEGV, current);
1379 
1380 	return;
1381 
1382 sigbus:
1383 	die_if_kernel("Unhandled kernel unaligned access", regs);
1384 	force_sig(SIGBUS, current);
1385 
1386 	return;
1387 
1388 sigill:
1389 	die_if_kernel
1390 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1391 	force_sig(SIGILL, current);
1392 }
1393 
1394 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1395 {
1396 	unsigned long value;
1397 	unsigned int res;
1398 	int reg;
1399 	unsigned long orig31;
1400 	u16 __user *pc16;
1401 	unsigned long origpc;
1402 	union mips16e_instruction mips16inst, oldinst;
1403 
1404 	origpc = regs->cp0_epc;
1405 	orig31 = regs->regs[31];
1406 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1407 	/*
1408 	 * This load never faults.
1409 	 */
1410 	__get_user(mips16inst.full, pc16);
1411 	oldinst = mips16inst;
1412 
1413 	/* skip EXTEND instruction */
1414 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1415 		pc16++;
1416 		__get_user(mips16inst.full, pc16);
1417 	} else if (delay_slot(regs)) {
1418 		/*  skip jump instructions */
1419 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1420 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1421 			pc16++;
1422 		pc16++;
1423 		if (get_user(mips16inst.full, pc16))
1424 			goto sigbus;
1425 	}
1426 
1427 	switch (mips16inst.ri.opcode) {
1428 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1429 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1430 		case MIPS16e_ldpc_func:
1431 		case MIPS16e_ldsp_func:
1432 			reg = reg16to32[mips16inst.ri64.ry];
1433 			goto loadDW;
1434 
1435 		case MIPS16e_sdsp_func:
1436 			reg = reg16to32[mips16inst.ri64.ry];
1437 			goto writeDW;
1438 
1439 		case MIPS16e_sdrasp_func:
1440 			reg = 29;	/* GPRSP */
1441 			goto writeDW;
1442 		}
1443 
1444 		goto sigbus;
1445 
1446 	case MIPS16e_swsp_op:
1447 	case MIPS16e_lwpc_op:
1448 	case MIPS16e_lwsp_op:
1449 		reg = reg16to32[mips16inst.ri.rx];
1450 		break;
1451 
1452 	case MIPS16e_i8_op:
1453 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1454 			goto sigbus;
1455 		reg = 29;	/* GPRSP */
1456 		break;
1457 
1458 	default:
1459 		reg = reg16to32[mips16inst.rri.ry];
1460 		break;
1461 	}
1462 
1463 	switch (mips16inst.ri.opcode) {
1464 
1465 	case MIPS16e_lb_op:
1466 	case MIPS16e_lbu_op:
1467 	case MIPS16e_sb_op:
1468 		goto sigbus;
1469 
1470 	case MIPS16e_lh_op:
1471 		if (!access_ok(VERIFY_READ, addr, 2))
1472 			goto sigbus;
1473 
1474 		LoadHW(addr, value, res);
1475 		if (res)
1476 			goto fault;
1477 		MIPS16e_compute_return_epc(regs, &oldinst);
1478 		regs->regs[reg] = value;
1479 		break;
1480 
1481 	case MIPS16e_lhu_op:
1482 		if (!access_ok(VERIFY_READ, addr, 2))
1483 			goto sigbus;
1484 
1485 		LoadHWU(addr, value, res);
1486 		if (res)
1487 			goto fault;
1488 		MIPS16e_compute_return_epc(regs, &oldinst);
1489 		regs->regs[reg] = value;
1490 		break;
1491 
1492 	case MIPS16e_lw_op:
1493 	case MIPS16e_lwpc_op:
1494 	case MIPS16e_lwsp_op:
1495 		if (!access_ok(VERIFY_READ, addr, 4))
1496 			goto sigbus;
1497 
1498 		LoadW(addr, value, res);
1499 		if (res)
1500 			goto fault;
1501 		MIPS16e_compute_return_epc(regs, &oldinst);
1502 		regs->regs[reg] = value;
1503 		break;
1504 
1505 	case MIPS16e_lwu_op:
1506 #ifdef CONFIG_64BIT
1507 		/*
1508 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1509 		 * if we're on a 32-bit processor and an i-cache incoherency
1510 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1511 		 * would blow up, so for now we don't handle unaligned 64-bit
1512 		 * instructions on 32-bit kernels.
1513 		 */
1514 		if (!access_ok(VERIFY_READ, addr, 4))
1515 			goto sigbus;
1516 
1517 		LoadWU(addr, value, res);
1518 		if (res)
1519 			goto fault;
1520 		MIPS16e_compute_return_epc(regs, &oldinst);
1521 		regs->regs[reg] = value;
1522 		break;
1523 #endif /* CONFIG_64BIT */
1524 
1525 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1526 		goto sigill;
1527 
1528 	case MIPS16e_ld_op:
1529 loadDW:
1530 #ifdef CONFIG_64BIT
1531 		/*
1532 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1533 		 * if we're on a 32-bit processor and an i-cache incoherency
1534 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1535 		 * would blow up, so for now we don't handle unaligned 64-bit
1536 		 * instructions on 32-bit kernels.
1537 		 */
1538 		if (!access_ok(VERIFY_READ, addr, 8))
1539 			goto sigbus;
1540 
1541 		LoadDW(addr, value, res);
1542 		if (res)
1543 			goto fault;
1544 		MIPS16e_compute_return_epc(regs, &oldinst);
1545 		regs->regs[reg] = value;
1546 		break;
1547 #endif /* CONFIG_64BIT */
1548 
1549 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1550 		goto sigill;
1551 
1552 	case MIPS16e_sh_op:
1553 		if (!access_ok(VERIFY_WRITE, addr, 2))
1554 			goto sigbus;
1555 
1556 		MIPS16e_compute_return_epc(regs, &oldinst);
1557 		value = regs->regs[reg];
1558 		StoreHW(addr, value, res);
1559 		if (res)
1560 			goto fault;
1561 		break;
1562 
1563 	case MIPS16e_sw_op:
1564 	case MIPS16e_swsp_op:
1565 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1566 		if (!access_ok(VERIFY_WRITE, addr, 4))
1567 			goto sigbus;
1568 
1569 		MIPS16e_compute_return_epc(regs, &oldinst);
1570 		value = regs->regs[reg];
1571 		StoreW(addr, value, res);
1572 		if (res)
1573 			goto fault;
1574 		break;
1575 
1576 	case MIPS16e_sd_op:
1577 writeDW:
1578 #ifdef CONFIG_64BIT
1579 		/*
1580 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1581 		 * if we're on a 32-bit processor and an i-cache incoherency
1582 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1583 		 * would blow up, so for now we don't handle unaligned 64-bit
1584 		 * instructions on 32-bit kernels.
1585 		 */
1586 		if (!access_ok(VERIFY_WRITE, addr, 8))
1587 			goto sigbus;
1588 
1589 		MIPS16e_compute_return_epc(regs, &oldinst);
1590 		value = regs->regs[reg];
1591 		StoreDW(addr, value, res);
1592 		if (res)
1593 			goto fault;
1594 		break;
1595 #endif /* CONFIG_64BIT */
1596 
1597 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1598 		goto sigill;
1599 
1600 	default:
1601 		/*
1602 		 * Pheeee...  We encountered an yet unknown instruction or
1603 		 * cache coherence problem.  Die sucker, die ...
1604 		 */
1605 		goto sigill;
1606 	}
1607 
1608 #ifdef CONFIG_DEBUG_FS
1609 	unaligned_instructions++;
1610 #endif
1611 
1612 	return;
1613 
1614 fault:
1615 	/* roll back jump/branch */
1616 	regs->cp0_epc = origpc;
1617 	regs->regs[31] = orig31;
1618 	/* Did we have an exception handler installed? */
1619 	if (fixup_exception(regs))
1620 		return;
1621 
1622 	die_if_kernel("Unhandled kernel unaligned access", regs);
1623 	force_sig(SIGSEGV, current);
1624 
1625 	return;
1626 
1627 sigbus:
1628 	die_if_kernel("Unhandled kernel unaligned access", regs);
1629 	force_sig(SIGBUS, current);
1630 
1631 	return;
1632 
1633 sigill:
1634 	die_if_kernel
1635 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1636 	force_sig(SIGILL, current);
1637 }
1638 
1639 asmlinkage void do_ade(struct pt_regs *regs)
1640 {
1641 	enum ctx_state prev_state;
1642 	unsigned int __user *pc;
1643 	mm_segment_t seg;
1644 
1645 	prev_state = exception_enter();
1646 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1647 			1, regs, regs->cp0_badvaddr);
1648 	/*
1649 	 * Did we catch a fault trying to load an instruction?
1650 	 */
1651 	if (regs->cp0_badvaddr == regs->cp0_epc)
1652 		goto sigbus;
1653 
1654 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1655 		goto sigbus;
1656 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1657 		goto sigbus;
1658 
1659 	/*
1660 	 * Do branch emulation only if we didn't forward the exception.
1661 	 * This is all so but ugly ...
1662 	 */
1663 
1664 	/*
1665 	 * Are we running in microMIPS mode?
1666 	 */
1667 	if (get_isa16_mode(regs->cp0_epc)) {
1668 		/*
1669 		 * Did we catch a fault trying to load an instruction in
1670 		 * 16-bit mode?
1671 		 */
1672 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1673 			goto sigbus;
1674 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1675 			show_registers(regs);
1676 
1677 		if (cpu_has_mmips) {
1678 			seg = get_fs();
1679 			if (!user_mode(regs))
1680 				set_fs(KERNEL_DS);
1681 			emulate_load_store_microMIPS(regs,
1682 				(void __user *)regs->cp0_badvaddr);
1683 			set_fs(seg);
1684 
1685 			return;
1686 		}
1687 
1688 		if (cpu_has_mips16) {
1689 			seg = get_fs();
1690 			if (!user_mode(regs))
1691 				set_fs(KERNEL_DS);
1692 			emulate_load_store_MIPS16e(regs,
1693 				(void __user *)regs->cp0_badvaddr);
1694 			set_fs(seg);
1695 
1696 			return;
1697 	}
1698 
1699 		goto sigbus;
1700 	}
1701 
1702 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1703 		show_registers(regs);
1704 	pc = (unsigned int __user *)exception_epc(regs);
1705 
1706 	seg = get_fs();
1707 	if (!user_mode(regs))
1708 		set_fs(KERNEL_DS);
1709 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1710 	set_fs(seg);
1711 
1712 	return;
1713 
1714 sigbus:
1715 	die_if_kernel("Kernel unaligned instruction access", regs);
1716 	force_sig(SIGBUS, current);
1717 
1718 	/*
1719 	 * XXX On return from the signal handler we should advance the epc
1720 	 */
1721 	exception_exit(prev_state);
1722 }
1723 
1724 #ifdef CONFIG_DEBUG_FS
1725 extern struct dentry *mips_debugfs_dir;
1726 static int __init debugfs_unaligned(void)
1727 {
1728 	struct dentry *d;
1729 
1730 	if (!mips_debugfs_dir)
1731 		return -ENODEV;
1732 	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1733 			       mips_debugfs_dir, &unaligned_instructions);
1734 	if (!d)
1735 		return -ENOMEM;
1736 	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1737 			       mips_debugfs_dir, &unaligned_action);
1738 	if (!d)
1739 		return -ENOMEM;
1740 	return 0;
1741 }
1742 __initcall(debugfs_unaligned);
1743 #endif
1744