xref: /openbmc/linux/arch/mips/kernel/unaligned.c (revision 6774def6)
1 /*
2  * Handle unaligned accesses by emulation.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9  * Copyright (C) 1999 Silicon Graphics, Inc.
10  * Copyright (C) 2014 Imagination Technologies Ltd.
11  *
12  * This file contains exception handler for address error exception with the
13  * special capability to execute faulting instructions in software.  The
14  * handler does not try to handle the case when the program counter points
15  * to an address not aligned to a word boundary.
16  *
17  * Putting data to unaligned addresses is a bad practice even on Intel where
18  * only the performance is affected.  Much worse is that such code is non-
19  * portable.  Due to several programs that die on MIPS due to alignment
20  * problems I decided to implement this handler anyway though I originally
21  * didn't intend to do this at all for user code.
22  *
23  * For now I enable fixing of address errors by default to make life easier.
24  * I however intend to disable this somewhen in the future when the alignment
25  * problems with user programs have been fixed.	 For programmers this is the
26  * right way to go.
27  *
28  * Fixing address errors is a per process option.  The option is inherited
29  * across fork(2) and execve(2) calls.	If you really want to use the
30  * option in your user programs - I discourage the use of the software
31  * emulation strongly - use the following code in your userland stuff:
32  *
33  * #include <sys/sysmips.h>
34  *
35  * ...
36  * sysmips(MIPS_FIXADE, x);
37  * ...
38  *
39  * The argument x is 0 for disabling software emulation, enabled otherwise.
40  *
41  * Below a little program to play around with this feature.
42  *
43  * #include <stdio.h>
44  * #include <sys/sysmips.h>
45  *
46  * struct foo {
47  *	   unsigned char bar[8];
48  * };
49  *
50  * main(int argc, char *argv[])
51  * {
52  *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
53  *	   unsigned int *p = (unsigned int *) (x.bar + 3);
54  *	   int i;
55  *
56  *	   if (argc > 1)
57  *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
58  *
59  *	   printf("*p = %08lx\n", *p);
60  *
61  *	   *p = 0xdeadface;
62  *
63  *	   for(i = 0; i <= 7; i++)
64  *	   printf("%02x ", x.bar[i]);
65  *	   printf("\n");
66  * }
67  *
68  * Coprocessor loads are not supported; I think this case is unimportant
69  * in the practice.
70  *
71  * TODO: Handle ndc (attempted store to doubleword in uncached memory)
72  *	 exception for the R6000.
73  *	 A store crossing a page boundary might be executed only partially.
74  *	 Undo the partial store in this case.
75  */
76 #include <linux/context_tracking.h>
77 #include <linux/mm.h>
78 #include <linux/signal.h>
79 #include <linux/smp.h>
80 #include <linux/sched.h>
81 #include <linux/debugfs.h>
82 #include <linux/perf_event.h>
83 
84 #include <asm/asm.h>
85 #include <asm/branch.h>
86 #include <asm/byteorder.h>
87 #include <asm/cop2.h>
88 #include <asm/fpu.h>
89 #include <asm/fpu_emulator.h>
90 #include <asm/inst.h>
91 #include <asm/uaccess.h>
92 #include <asm/fpu.h>
93 #include <asm/fpu_emulator.h>
94 
95 #define STR(x)	__STR(x)
96 #define __STR(x)  #x
97 
98 enum {
99 	UNALIGNED_ACTION_QUIET,
100 	UNALIGNED_ACTION_SIGNAL,
101 	UNALIGNED_ACTION_SHOW,
102 };
103 #ifdef CONFIG_DEBUG_FS
104 static u32 unaligned_instructions;
105 static u32 unaligned_action;
106 #else
107 #define unaligned_action UNALIGNED_ACTION_QUIET
108 #endif
109 extern void show_registers(struct pt_regs *regs);
110 
111 #ifdef __BIG_ENDIAN
112 #define     LoadHW(addr, value, res)  \
113 		__asm__ __volatile__ (".set\tnoat\n"        \
114 			"1:\t"user_lb("%0", "0(%2)")"\n"    \
115 			"2:\t"user_lbu("$1", "1(%2)")"\n\t" \
116 			"sll\t%0, 0x8\n\t"                  \
117 			"or\t%0, $1\n\t"                    \
118 			"li\t%1, 0\n"                       \
119 			"3:\t.set\tat\n\t"                  \
120 			".insn\n\t"                         \
121 			".section\t.fixup,\"ax\"\n\t"       \
122 			"4:\tli\t%1, %3\n\t"                \
123 			"j\t3b\n\t"                         \
124 			".previous\n\t"                     \
125 			".section\t__ex_table,\"a\"\n\t"    \
126 			STR(PTR)"\t1b, 4b\n\t"              \
127 			STR(PTR)"\t2b, 4b\n\t"              \
128 			".previous"                         \
129 			: "=&r" (value), "=r" (res)         \
130 			: "r" (addr), "i" (-EFAULT));
131 
132 #define     LoadW(addr, value, res)   \
133 		__asm__ __volatile__ (                      \
134 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
135 			"2:\t"user_lwr("%0", "3(%2)")"\n\t" \
136 			"li\t%1, 0\n"                       \
137 			"3:\n\t"                            \
138 			".insn\n\t"                         \
139 			".section\t.fixup,\"ax\"\n\t"       \
140 			"4:\tli\t%1, %3\n\t"                \
141 			"j\t3b\n\t"                         \
142 			".previous\n\t"                     \
143 			".section\t__ex_table,\"a\"\n\t"    \
144 			STR(PTR)"\t1b, 4b\n\t"              \
145 			STR(PTR)"\t2b, 4b\n\t"              \
146 			".previous"                         \
147 			: "=&r" (value), "=r" (res)         \
148 			: "r" (addr), "i" (-EFAULT));
149 
150 #define     LoadHWU(addr, value, res) \
151 		__asm__ __volatile__ (                      \
152 			".set\tnoat\n"                      \
153 			"1:\t"user_lbu("%0", "0(%2)")"\n"   \
154 			"2:\t"user_lbu("$1", "1(%2)")"\n\t" \
155 			"sll\t%0, 0x8\n\t"                  \
156 			"or\t%0, $1\n\t"                    \
157 			"li\t%1, 0\n"                       \
158 			"3:\n\t"                            \
159 			".insn\n\t"                         \
160 			".set\tat\n\t"                      \
161 			".section\t.fixup,\"ax\"\n\t"       \
162 			"4:\tli\t%1, %3\n\t"                \
163 			"j\t3b\n\t"                         \
164 			".previous\n\t"                     \
165 			".section\t__ex_table,\"a\"\n\t"    \
166 			STR(PTR)"\t1b, 4b\n\t"              \
167 			STR(PTR)"\t2b, 4b\n\t"              \
168 			".previous"                         \
169 			: "=&r" (value), "=r" (res)         \
170 			: "r" (addr), "i" (-EFAULT));
171 
172 #define     LoadWU(addr, value, res)  \
173 		__asm__ __volatile__ (                      \
174 			"1:\t"user_lwl("%0", "(%2)")"\n"    \
175 			"2:\t"user_lwr("%0", "3(%2)")"\n\t" \
176 			"dsll\t%0, %0, 32\n\t"              \
177 			"dsrl\t%0, %0, 32\n\t"              \
178 			"li\t%1, 0\n"                       \
179 			"3:\n\t"                            \
180 			".insn\n\t"                         \
181 			"\t.section\t.fixup,\"ax\"\n\t"     \
182 			"4:\tli\t%1, %3\n\t"                \
183 			"j\t3b\n\t"                         \
184 			".previous\n\t"                     \
185 			".section\t__ex_table,\"a\"\n\t"    \
186 			STR(PTR)"\t1b, 4b\n\t"              \
187 			STR(PTR)"\t2b, 4b\n\t"              \
188 			".previous"                         \
189 			: "=&r" (value), "=r" (res)         \
190 			: "r" (addr), "i" (-EFAULT));
191 
192 #define     LoadDW(addr, value, res)  \
193 		__asm__ __volatile__ (                      \
194 			"1:\tldl\t%0, (%2)\n"               \
195 			"2:\tldr\t%0, 7(%2)\n\t"            \
196 			"li\t%1, 0\n"                       \
197 			"3:\n\t"                            \
198 			".insn\n\t"                         \
199 			"\t.section\t.fixup,\"ax\"\n\t"     \
200 			"4:\tli\t%1, %3\n\t"                \
201 			"j\t3b\n\t"                         \
202 			".previous\n\t"                     \
203 			".section\t__ex_table,\"a\"\n\t"    \
204 			STR(PTR)"\t1b, 4b\n\t"              \
205 			STR(PTR)"\t2b, 4b\n\t"              \
206 			".previous"                         \
207 			: "=&r" (value), "=r" (res)         \
208 			: "r" (addr), "i" (-EFAULT));
209 
210 #define     StoreHW(addr, value, res) \
211 		__asm__ __volatile__ (                      \
212 			".set\tnoat\n"                      \
213 			"1:\t"user_sb("%1", "1(%2)")"\n"    \
214 			"srl\t$1, %1, 0x8\n"                \
215 			"2:\t"user_sb("$1", "0(%2)")"\n"    \
216 			".set\tat\n\t"                      \
217 			"li\t%0, 0\n"                       \
218 			"3:\n\t"                            \
219 			".insn\n\t"                         \
220 			".section\t.fixup,\"ax\"\n\t"       \
221 			"4:\tli\t%0, %3\n\t"                \
222 			"j\t3b\n\t"                         \
223 			".previous\n\t"                     \
224 			".section\t__ex_table,\"a\"\n\t"    \
225 			STR(PTR)"\t1b, 4b\n\t"              \
226 			STR(PTR)"\t2b, 4b\n\t"              \
227 			".previous"                         \
228 			: "=r" (res)                        \
229 			: "r" (value), "r" (addr), "i" (-EFAULT));
230 
231 #define     StoreW(addr, value, res)  \
232 		__asm__ __volatile__ (                      \
233 			"1:\t"user_swl("%1", "(%2)")"\n"    \
234 			"2:\t"user_swr("%1", "3(%2)")"\n\t" \
235 			"li\t%0, 0\n"                       \
236 			"3:\n\t"                            \
237 			".insn\n\t"                         \
238 			".section\t.fixup,\"ax\"\n\t"       \
239 			"4:\tli\t%0, %3\n\t"                \
240 			"j\t3b\n\t"                         \
241 			".previous\n\t"                     \
242 			".section\t__ex_table,\"a\"\n\t"    \
243 			STR(PTR)"\t1b, 4b\n\t"              \
244 			STR(PTR)"\t2b, 4b\n\t"              \
245 			".previous"                         \
246 		: "=r" (res)                                \
247 		: "r" (value), "r" (addr), "i" (-EFAULT));
248 
249 #define     StoreDW(addr, value, res) \
250 		__asm__ __volatile__ (                      \
251 			"1:\tsdl\t%1,(%2)\n"                \
252 			"2:\tsdr\t%1, 7(%2)\n\t"            \
253 			"li\t%0, 0\n"                       \
254 			"3:\n\t"                            \
255 			".insn\n\t"                         \
256 			".section\t.fixup,\"ax\"\n\t"       \
257 			"4:\tli\t%0, %3\n\t"                \
258 			"j\t3b\n\t"                         \
259 			".previous\n\t"                     \
260 			".section\t__ex_table,\"a\"\n\t"    \
261 			STR(PTR)"\t1b, 4b\n\t"              \
262 			STR(PTR)"\t2b, 4b\n\t"              \
263 			".previous"                         \
264 		: "=r" (res)                                \
265 		: "r" (value), "r" (addr), "i" (-EFAULT));
266 #endif
267 
268 #ifdef __LITTLE_ENDIAN
269 #define     LoadHW(addr, value, res)  \
270 		__asm__ __volatile__ (".set\tnoat\n"        \
271 			"1:\t"user_lb("%0", "1(%2)")"\n"    \
272 			"2:\t"user_lbu("$1", "0(%2)")"\n\t" \
273 			"sll\t%0, 0x8\n\t"                  \
274 			"or\t%0, $1\n\t"                    \
275 			"li\t%1, 0\n"                       \
276 			"3:\t.set\tat\n\t"                  \
277 			".insn\n\t"                         \
278 			".section\t.fixup,\"ax\"\n\t"       \
279 			"4:\tli\t%1, %3\n\t"                \
280 			"j\t3b\n\t"                         \
281 			".previous\n\t"                     \
282 			".section\t__ex_table,\"a\"\n\t"    \
283 			STR(PTR)"\t1b, 4b\n\t"              \
284 			STR(PTR)"\t2b, 4b\n\t"              \
285 			".previous"                         \
286 			: "=&r" (value), "=r" (res)         \
287 			: "r" (addr), "i" (-EFAULT));
288 
289 #define     LoadW(addr, value, res)   \
290 		__asm__ __volatile__ (                      \
291 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
292 			"2:\t"user_lwr("%0", "(%2)")"\n\t"  \
293 			"li\t%1, 0\n"                       \
294 			"3:\n\t"                            \
295 			".insn\n\t"                         \
296 			".section\t.fixup,\"ax\"\n\t"       \
297 			"4:\tli\t%1, %3\n\t"                \
298 			"j\t3b\n\t"                         \
299 			".previous\n\t"                     \
300 			".section\t__ex_table,\"a\"\n\t"    \
301 			STR(PTR)"\t1b, 4b\n\t"              \
302 			STR(PTR)"\t2b, 4b\n\t"              \
303 			".previous"                         \
304 			: "=&r" (value), "=r" (res)         \
305 			: "r" (addr), "i" (-EFAULT));
306 
307 #define     LoadHWU(addr, value, res) \
308 		__asm__ __volatile__ (                      \
309 			".set\tnoat\n"                      \
310 			"1:\t"user_lbu("%0", "1(%2)")"\n"   \
311 			"2:\t"user_lbu("$1", "0(%2)")"\n\t" \
312 			"sll\t%0, 0x8\n\t"                  \
313 			"or\t%0, $1\n\t"                    \
314 			"li\t%1, 0\n"                       \
315 			"3:\n\t"                            \
316 			".insn\n\t"                         \
317 			".set\tat\n\t"                      \
318 			".section\t.fixup,\"ax\"\n\t"       \
319 			"4:\tli\t%1, %3\n\t"                \
320 			"j\t3b\n\t"                         \
321 			".previous\n\t"                     \
322 			".section\t__ex_table,\"a\"\n\t"    \
323 			STR(PTR)"\t1b, 4b\n\t"              \
324 			STR(PTR)"\t2b, 4b\n\t"              \
325 			".previous"                         \
326 			: "=&r" (value), "=r" (res)         \
327 			: "r" (addr), "i" (-EFAULT));
328 
329 #define     LoadWU(addr, value, res)  \
330 		__asm__ __volatile__ (                      \
331 			"1:\t"user_lwl("%0", "3(%2)")"\n"   \
332 			"2:\t"user_lwr("%0", "(%2)")"\n\t"  \
333 			"dsll\t%0, %0, 32\n\t"              \
334 			"dsrl\t%0, %0, 32\n\t"              \
335 			"li\t%1, 0\n"                       \
336 			"3:\n\t"                            \
337 			".insn\n\t"                         \
338 			"\t.section\t.fixup,\"ax\"\n\t"     \
339 			"4:\tli\t%1, %3\n\t"                \
340 			"j\t3b\n\t"                         \
341 			".previous\n\t"                     \
342 			".section\t__ex_table,\"a\"\n\t"    \
343 			STR(PTR)"\t1b, 4b\n\t"              \
344 			STR(PTR)"\t2b, 4b\n\t"              \
345 			".previous"                         \
346 			: "=&r" (value), "=r" (res)         \
347 			: "r" (addr), "i" (-EFAULT));
348 
349 #define     LoadDW(addr, value, res)  \
350 		__asm__ __volatile__ (                      \
351 			"1:\tldl\t%0, 7(%2)\n"              \
352 			"2:\tldr\t%0, (%2)\n\t"             \
353 			"li\t%1, 0\n"                       \
354 			"3:\n\t"                            \
355 			".insn\n\t"                         \
356 			"\t.section\t.fixup,\"ax\"\n\t"     \
357 			"4:\tli\t%1, %3\n\t"                \
358 			"j\t3b\n\t"                         \
359 			".previous\n\t"                     \
360 			".section\t__ex_table,\"a\"\n\t"    \
361 			STR(PTR)"\t1b, 4b\n\t"              \
362 			STR(PTR)"\t2b, 4b\n\t"              \
363 			".previous"                         \
364 			: "=&r" (value), "=r" (res)         \
365 			: "r" (addr), "i" (-EFAULT));
366 
367 #define     StoreHW(addr, value, res) \
368 		__asm__ __volatile__ (                      \
369 			".set\tnoat\n"                      \
370 			"1:\t"user_sb("%1", "0(%2)")"\n"    \
371 			"srl\t$1,%1, 0x8\n"                 \
372 			"2:\t"user_sb("$1", "1(%2)")"\n"    \
373 			".set\tat\n\t"                      \
374 			"li\t%0, 0\n"                       \
375 			"3:\n\t"                            \
376 			".insn\n\t"                         \
377 			".section\t.fixup,\"ax\"\n\t"       \
378 			"4:\tli\t%0, %3\n\t"                \
379 			"j\t3b\n\t"                         \
380 			".previous\n\t"                     \
381 			".section\t__ex_table,\"a\"\n\t"    \
382 			STR(PTR)"\t1b, 4b\n\t"              \
383 			STR(PTR)"\t2b, 4b\n\t"              \
384 			".previous"                         \
385 			: "=r" (res)                        \
386 			: "r" (value), "r" (addr), "i" (-EFAULT));
387 
388 #define     StoreW(addr, value, res)  \
389 		__asm__ __volatile__ (                      \
390 			"1:\t"user_swl("%1", "3(%2)")"\n"   \
391 			"2:\t"user_swr("%1", "(%2)")"\n\t"  \
392 			"li\t%0, 0\n"                       \
393 			"3:\n\t"                            \
394 			".insn\n\t"                         \
395 			".section\t.fixup,\"ax\"\n\t"       \
396 			"4:\tli\t%0, %3\n\t"                \
397 			"j\t3b\n\t"                         \
398 			".previous\n\t"                     \
399 			".section\t__ex_table,\"a\"\n\t"    \
400 			STR(PTR)"\t1b, 4b\n\t"              \
401 			STR(PTR)"\t2b, 4b\n\t"              \
402 			".previous"                         \
403 		: "=r" (res)                                \
404 		: "r" (value), "r" (addr), "i" (-EFAULT));
405 
406 #define     StoreDW(addr, value, res) \
407 		__asm__ __volatile__ (                      \
408 			"1:\tsdl\t%1, 7(%2)\n"              \
409 			"2:\tsdr\t%1, (%2)\n\t"             \
410 			"li\t%0, 0\n"                       \
411 			"3:\n\t"                            \
412 			".insn\n\t"                         \
413 			".section\t.fixup,\"ax\"\n\t"       \
414 			"4:\tli\t%0, %3\n\t"                \
415 			"j\t3b\n\t"                         \
416 			".previous\n\t"                     \
417 			".section\t__ex_table,\"a\"\n\t"    \
418 			STR(PTR)"\t1b, 4b\n\t"              \
419 			STR(PTR)"\t2b, 4b\n\t"              \
420 			".previous"                         \
421 		: "=r" (res)                                \
422 		: "r" (value), "r" (addr), "i" (-EFAULT));
423 #endif
424 
425 static void emulate_load_store_insn(struct pt_regs *regs,
426 	void __user *addr, unsigned int __user *pc)
427 {
428 	union mips_instruction insn;
429 	unsigned long value;
430 	unsigned int res;
431 	unsigned long origpc;
432 	unsigned long orig31;
433 	void __user *fault_addr = NULL;
434 #ifdef	CONFIG_EVA
435 	mm_segment_t seg;
436 #endif
437 	origpc = (unsigned long)pc;
438 	orig31 = regs->regs[31];
439 
440 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
441 
442 	/*
443 	 * This load never faults.
444 	 */
445 	__get_user(insn.word, pc);
446 
447 	switch (insn.i_format.opcode) {
448 		/*
449 		 * These are instructions that a compiler doesn't generate.  We
450 		 * can assume therefore that the code is MIPS-aware and
451 		 * really buggy.  Emulating these instructions would break the
452 		 * semantics anyway.
453 		 */
454 	case ll_op:
455 	case lld_op:
456 	case sc_op:
457 	case scd_op:
458 
459 		/*
460 		 * For these instructions the only way to create an address
461 		 * error is an attempted access to kernel/supervisor address
462 		 * space.
463 		 */
464 	case ldl_op:
465 	case ldr_op:
466 	case lwl_op:
467 	case lwr_op:
468 	case sdl_op:
469 	case sdr_op:
470 	case swl_op:
471 	case swr_op:
472 	case lb_op:
473 	case lbu_op:
474 	case sb_op:
475 		goto sigbus;
476 
477 		/*
478 		 * The remaining opcodes are the ones that are really of
479 		 * interest.
480 		 */
481 #ifdef CONFIG_EVA
482 	case spec3_op:
483 		/*
484 		 * we can land here only from kernel accessing user memory,
485 		 * so we need to "switch" the address limit to user space, so
486 		 * address check can work properly.
487 		 */
488 		seg = get_fs();
489 		set_fs(USER_DS);
490 		switch (insn.spec3_format.func) {
491 		case lhe_op:
492 			if (!access_ok(VERIFY_READ, addr, 2)) {
493 				set_fs(seg);
494 				goto sigbus;
495 			}
496 			LoadHW(addr, value, res);
497 			if (res) {
498 				set_fs(seg);
499 				goto fault;
500 			}
501 			compute_return_epc(regs);
502 			regs->regs[insn.spec3_format.rt] = value;
503 			break;
504 		case lwe_op:
505 			if (!access_ok(VERIFY_READ, addr, 4)) {
506 				set_fs(seg);
507 				goto sigbus;
508 			}
509 				LoadW(addr, value, res);
510 			if (res) {
511 				set_fs(seg);
512 				goto fault;
513 			}
514 			compute_return_epc(regs);
515 			regs->regs[insn.spec3_format.rt] = value;
516 			break;
517 		case lhue_op:
518 			if (!access_ok(VERIFY_READ, addr, 2)) {
519 				set_fs(seg);
520 				goto sigbus;
521 			}
522 			LoadHWU(addr, value, res);
523 			if (res) {
524 				set_fs(seg);
525 				goto fault;
526 			}
527 			compute_return_epc(regs);
528 			regs->regs[insn.spec3_format.rt] = value;
529 			break;
530 		case she_op:
531 			if (!access_ok(VERIFY_WRITE, addr, 2)) {
532 				set_fs(seg);
533 				goto sigbus;
534 			}
535 			compute_return_epc(regs);
536 			value = regs->regs[insn.spec3_format.rt];
537 			StoreHW(addr, value, res);
538 			if (res) {
539 				set_fs(seg);
540 				goto fault;
541 			}
542 			break;
543 		case swe_op:
544 			if (!access_ok(VERIFY_WRITE, addr, 4)) {
545 				set_fs(seg);
546 				goto sigbus;
547 			}
548 			compute_return_epc(regs);
549 			value = regs->regs[insn.spec3_format.rt];
550 			StoreW(addr, value, res);
551 			if (res) {
552 				set_fs(seg);
553 				goto fault;
554 			}
555 			break;
556 		default:
557 			set_fs(seg);
558 			goto sigill;
559 		}
560 		set_fs(seg);
561 		break;
562 #endif
563 	case lh_op:
564 		if (!access_ok(VERIFY_READ, addr, 2))
565 			goto sigbus;
566 
567 		LoadHW(addr, value, res);
568 		if (res)
569 			goto fault;
570 		compute_return_epc(regs);
571 		regs->regs[insn.i_format.rt] = value;
572 		break;
573 
574 	case lw_op:
575 		if (!access_ok(VERIFY_READ, addr, 4))
576 			goto sigbus;
577 
578 		LoadW(addr, value, res);
579 		if (res)
580 			goto fault;
581 		compute_return_epc(regs);
582 		regs->regs[insn.i_format.rt] = value;
583 		break;
584 
585 	case lhu_op:
586 		if (!access_ok(VERIFY_READ, addr, 2))
587 			goto sigbus;
588 
589 		LoadHWU(addr, value, res);
590 		if (res)
591 			goto fault;
592 		compute_return_epc(regs);
593 		regs->regs[insn.i_format.rt] = value;
594 		break;
595 
596 	case lwu_op:
597 #ifdef CONFIG_64BIT
598 		/*
599 		 * A 32-bit kernel might be running on a 64-bit processor.  But
600 		 * if we're on a 32-bit processor and an i-cache incoherency
601 		 * or race makes us see a 64-bit instruction here the sdl/sdr
602 		 * would blow up, so for now we don't handle unaligned 64-bit
603 		 * instructions on 32-bit kernels.
604 		 */
605 		if (!access_ok(VERIFY_READ, addr, 4))
606 			goto sigbus;
607 
608 		LoadWU(addr, value, res);
609 		if (res)
610 			goto fault;
611 		compute_return_epc(regs);
612 		regs->regs[insn.i_format.rt] = value;
613 		break;
614 #endif /* CONFIG_64BIT */
615 
616 		/* Cannot handle 64-bit instructions in 32-bit kernel */
617 		goto sigill;
618 
619 	case ld_op:
620 #ifdef CONFIG_64BIT
621 		/*
622 		 * A 32-bit kernel might be running on a 64-bit processor.  But
623 		 * if we're on a 32-bit processor and an i-cache incoherency
624 		 * or race makes us see a 64-bit instruction here the sdl/sdr
625 		 * would blow up, so for now we don't handle unaligned 64-bit
626 		 * instructions on 32-bit kernels.
627 		 */
628 		if (!access_ok(VERIFY_READ, addr, 8))
629 			goto sigbus;
630 
631 		LoadDW(addr, value, res);
632 		if (res)
633 			goto fault;
634 		compute_return_epc(regs);
635 		regs->regs[insn.i_format.rt] = value;
636 		break;
637 #endif /* CONFIG_64BIT */
638 
639 		/* Cannot handle 64-bit instructions in 32-bit kernel */
640 		goto sigill;
641 
642 	case sh_op:
643 		if (!access_ok(VERIFY_WRITE, addr, 2))
644 			goto sigbus;
645 
646 		compute_return_epc(regs);
647 		value = regs->regs[insn.i_format.rt];
648 		StoreHW(addr, value, res);
649 		if (res)
650 			goto fault;
651 		break;
652 
653 	case sw_op:
654 		if (!access_ok(VERIFY_WRITE, addr, 4))
655 			goto sigbus;
656 
657 		compute_return_epc(regs);
658 		value = regs->regs[insn.i_format.rt];
659 		StoreW(addr, value, res);
660 		if (res)
661 			goto fault;
662 		break;
663 
664 	case sd_op:
665 #ifdef CONFIG_64BIT
666 		/*
667 		 * A 32-bit kernel might be running on a 64-bit processor.  But
668 		 * if we're on a 32-bit processor and an i-cache incoherency
669 		 * or race makes us see a 64-bit instruction here the sdl/sdr
670 		 * would blow up, so for now we don't handle unaligned 64-bit
671 		 * instructions on 32-bit kernels.
672 		 */
673 		if (!access_ok(VERIFY_WRITE, addr, 8))
674 			goto sigbus;
675 
676 		compute_return_epc(regs);
677 		value = regs->regs[insn.i_format.rt];
678 		StoreDW(addr, value, res);
679 		if (res)
680 			goto fault;
681 		break;
682 #endif /* CONFIG_64BIT */
683 
684 		/* Cannot handle 64-bit instructions in 32-bit kernel */
685 		goto sigill;
686 
687 	case lwc1_op:
688 	case ldc1_op:
689 	case swc1_op:
690 	case sdc1_op:
691 		die_if_kernel("Unaligned FP access in kernel code", regs);
692 		BUG_ON(!used_math());
693 
694 		lose_fpu(1);	/* Save FPU state for the emulator. */
695 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
696 					       &fault_addr);
697 		own_fpu(1);	/* Restore FPU state. */
698 
699 		/* Signal if something went wrong. */
700 		process_fpemu_return(res, fault_addr);
701 
702 		if (res == 0)
703 			break;
704 		return;
705 
706 	/*
707 	 * COP2 is available to implementor for application specific use.
708 	 * It's up to applications to register a notifier chain and do
709 	 * whatever they have to do, including possible sending of signals.
710 	 */
711 	case lwc2_op:
712 		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
713 		break;
714 
715 	case ldc2_op:
716 		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
717 		break;
718 
719 	case swc2_op:
720 		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
721 		break;
722 
723 	case sdc2_op:
724 		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
725 		break;
726 
727 	default:
728 		/*
729 		 * Pheeee...  We encountered an yet unknown instruction or
730 		 * cache coherence problem.  Die sucker, die ...
731 		 */
732 		goto sigill;
733 	}
734 
735 #ifdef CONFIG_DEBUG_FS
736 	unaligned_instructions++;
737 #endif
738 
739 	return;
740 
741 fault:
742 	/* roll back jump/branch */
743 	regs->cp0_epc = origpc;
744 	regs->regs[31] = orig31;
745 	/* Did we have an exception handler installed? */
746 	if (fixup_exception(regs))
747 		return;
748 
749 	die_if_kernel("Unhandled kernel unaligned access", regs);
750 	force_sig(SIGSEGV, current);
751 
752 	return;
753 
754 sigbus:
755 	die_if_kernel("Unhandled kernel unaligned access", regs);
756 	force_sig(SIGBUS, current);
757 
758 	return;
759 
760 sigill:
761 	die_if_kernel
762 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
763 	force_sig(SIGILL, current);
764 }
765 
766 /* Recode table from 16-bit register notation to 32-bit GPR. */
767 const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
768 
769 /* Recode table from 16-bit STORE register notation to 32-bit GPR. */
770 const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
771 
772 static void emulate_load_store_microMIPS(struct pt_regs *regs,
773 					 void __user *addr)
774 {
775 	unsigned long value;
776 	unsigned int res;
777 	int i;
778 	unsigned int reg = 0, rvar;
779 	unsigned long orig31;
780 	u16 __user *pc16;
781 	u16 halfword;
782 	unsigned int word;
783 	unsigned long origpc, contpc;
784 	union mips_instruction insn;
785 	struct mm_decoded_insn mminsn;
786 	void __user *fault_addr = NULL;
787 
788 	origpc = regs->cp0_epc;
789 	orig31 = regs->regs[31];
790 
791 	mminsn.micro_mips_mode = 1;
792 
793 	/*
794 	 * This load never faults.
795 	 */
796 	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
797 	__get_user(halfword, pc16);
798 	pc16++;
799 	contpc = regs->cp0_epc + 2;
800 	word = ((unsigned int)halfword << 16);
801 	mminsn.pc_inc = 2;
802 
803 	if (!mm_insn_16bit(halfword)) {
804 		__get_user(halfword, pc16);
805 		pc16++;
806 		contpc = regs->cp0_epc + 4;
807 		mminsn.pc_inc = 4;
808 		word |= halfword;
809 	}
810 	mminsn.insn = word;
811 
812 	if (get_user(halfword, pc16))
813 		goto fault;
814 	mminsn.next_pc_inc = 2;
815 	word = ((unsigned int)halfword << 16);
816 
817 	if (!mm_insn_16bit(halfword)) {
818 		pc16++;
819 		if (get_user(halfword, pc16))
820 			goto fault;
821 		mminsn.next_pc_inc = 4;
822 		word |= halfword;
823 	}
824 	mminsn.next_insn = word;
825 
826 	insn = (union mips_instruction)(mminsn.insn);
827 	if (mm_isBranchInstr(regs, mminsn, &contpc))
828 		insn = (union mips_instruction)(mminsn.next_insn);
829 
830 	/*  Parse instruction to find what to do */
831 
832 	switch (insn.mm_i_format.opcode) {
833 
834 	case mm_pool32a_op:
835 		switch (insn.mm_x_format.func) {
836 		case mm_lwxs_op:
837 			reg = insn.mm_x_format.rd;
838 			goto loadW;
839 		}
840 
841 		goto sigbus;
842 
843 	case mm_pool32b_op:
844 		switch (insn.mm_m_format.func) {
845 		case mm_lwp_func:
846 			reg = insn.mm_m_format.rd;
847 			if (reg == 31)
848 				goto sigbus;
849 
850 			if (!access_ok(VERIFY_READ, addr, 8))
851 				goto sigbus;
852 
853 			LoadW(addr, value, res);
854 			if (res)
855 				goto fault;
856 			regs->regs[reg] = value;
857 			addr += 4;
858 			LoadW(addr, value, res);
859 			if (res)
860 				goto fault;
861 			regs->regs[reg + 1] = value;
862 			goto success;
863 
864 		case mm_swp_func:
865 			reg = insn.mm_m_format.rd;
866 			if (reg == 31)
867 				goto sigbus;
868 
869 			if (!access_ok(VERIFY_WRITE, addr, 8))
870 				goto sigbus;
871 
872 			value = regs->regs[reg];
873 			StoreW(addr, value, res);
874 			if (res)
875 				goto fault;
876 			addr += 4;
877 			value = regs->regs[reg + 1];
878 			StoreW(addr, value, res);
879 			if (res)
880 				goto fault;
881 			goto success;
882 
883 		case mm_ldp_func:
884 #ifdef CONFIG_64BIT
885 			reg = insn.mm_m_format.rd;
886 			if (reg == 31)
887 				goto sigbus;
888 
889 			if (!access_ok(VERIFY_READ, addr, 16))
890 				goto sigbus;
891 
892 			LoadDW(addr, value, res);
893 			if (res)
894 				goto fault;
895 			regs->regs[reg] = value;
896 			addr += 8;
897 			LoadDW(addr, value, res);
898 			if (res)
899 				goto fault;
900 			regs->regs[reg + 1] = value;
901 			goto success;
902 #endif /* CONFIG_64BIT */
903 
904 			goto sigill;
905 
906 		case mm_sdp_func:
907 #ifdef CONFIG_64BIT
908 			reg = insn.mm_m_format.rd;
909 			if (reg == 31)
910 				goto sigbus;
911 
912 			if (!access_ok(VERIFY_WRITE, addr, 16))
913 				goto sigbus;
914 
915 			value = regs->regs[reg];
916 			StoreDW(addr, value, res);
917 			if (res)
918 				goto fault;
919 			addr += 8;
920 			value = regs->regs[reg + 1];
921 			StoreDW(addr, value, res);
922 			if (res)
923 				goto fault;
924 			goto success;
925 #endif /* CONFIG_64BIT */
926 
927 			goto sigill;
928 
929 		case mm_lwm32_func:
930 			reg = insn.mm_m_format.rd;
931 			rvar = reg & 0xf;
932 			if ((rvar > 9) || !reg)
933 				goto sigill;
934 			if (reg & 0x10) {
935 				if (!access_ok
936 				    (VERIFY_READ, addr, 4 * (rvar + 1)))
937 					goto sigbus;
938 			} else {
939 				if (!access_ok(VERIFY_READ, addr, 4 * rvar))
940 					goto sigbus;
941 			}
942 			if (rvar == 9)
943 				rvar = 8;
944 			for (i = 16; rvar; rvar--, i++) {
945 				LoadW(addr, value, res);
946 				if (res)
947 					goto fault;
948 				addr += 4;
949 				regs->regs[i] = value;
950 			}
951 			if ((reg & 0xf) == 9) {
952 				LoadW(addr, value, res);
953 				if (res)
954 					goto fault;
955 				addr += 4;
956 				regs->regs[30] = value;
957 			}
958 			if (reg & 0x10) {
959 				LoadW(addr, value, res);
960 				if (res)
961 					goto fault;
962 				regs->regs[31] = value;
963 			}
964 			goto success;
965 
966 		case mm_swm32_func:
967 			reg = insn.mm_m_format.rd;
968 			rvar = reg & 0xf;
969 			if ((rvar > 9) || !reg)
970 				goto sigill;
971 			if (reg & 0x10) {
972 				if (!access_ok
973 				    (VERIFY_WRITE, addr, 4 * (rvar + 1)))
974 					goto sigbus;
975 			} else {
976 				if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
977 					goto sigbus;
978 			}
979 			if (rvar == 9)
980 				rvar = 8;
981 			for (i = 16; rvar; rvar--, i++) {
982 				value = regs->regs[i];
983 				StoreW(addr, value, res);
984 				if (res)
985 					goto fault;
986 				addr += 4;
987 			}
988 			if ((reg & 0xf) == 9) {
989 				value = regs->regs[30];
990 				StoreW(addr, value, res);
991 				if (res)
992 					goto fault;
993 				addr += 4;
994 			}
995 			if (reg & 0x10) {
996 				value = regs->regs[31];
997 				StoreW(addr, value, res);
998 				if (res)
999 					goto fault;
1000 			}
1001 			goto success;
1002 
1003 		case mm_ldm_func:
1004 #ifdef CONFIG_64BIT
1005 			reg = insn.mm_m_format.rd;
1006 			rvar = reg & 0xf;
1007 			if ((rvar > 9) || !reg)
1008 				goto sigill;
1009 			if (reg & 0x10) {
1010 				if (!access_ok
1011 				    (VERIFY_READ, addr, 8 * (rvar + 1)))
1012 					goto sigbus;
1013 			} else {
1014 				if (!access_ok(VERIFY_READ, addr, 8 * rvar))
1015 					goto sigbus;
1016 			}
1017 			if (rvar == 9)
1018 				rvar = 8;
1019 
1020 			for (i = 16; rvar; rvar--, i++) {
1021 				LoadDW(addr, value, res);
1022 				if (res)
1023 					goto fault;
1024 				addr += 4;
1025 				regs->regs[i] = value;
1026 			}
1027 			if ((reg & 0xf) == 9) {
1028 				LoadDW(addr, value, res);
1029 				if (res)
1030 					goto fault;
1031 				addr += 8;
1032 				regs->regs[30] = value;
1033 			}
1034 			if (reg & 0x10) {
1035 				LoadDW(addr, value, res);
1036 				if (res)
1037 					goto fault;
1038 				regs->regs[31] = value;
1039 			}
1040 			goto success;
1041 #endif /* CONFIG_64BIT */
1042 
1043 			goto sigill;
1044 
1045 		case mm_sdm_func:
1046 #ifdef CONFIG_64BIT
1047 			reg = insn.mm_m_format.rd;
1048 			rvar = reg & 0xf;
1049 			if ((rvar > 9) || !reg)
1050 				goto sigill;
1051 			if (reg & 0x10) {
1052 				if (!access_ok
1053 				    (VERIFY_WRITE, addr, 8 * (rvar + 1)))
1054 					goto sigbus;
1055 			} else {
1056 				if (!access_ok(VERIFY_WRITE, addr, 8 * rvar))
1057 					goto sigbus;
1058 			}
1059 			if (rvar == 9)
1060 				rvar = 8;
1061 
1062 			for (i = 16; rvar; rvar--, i++) {
1063 				value = regs->regs[i];
1064 				StoreDW(addr, value, res);
1065 				if (res)
1066 					goto fault;
1067 				addr += 8;
1068 			}
1069 			if ((reg & 0xf) == 9) {
1070 				value = regs->regs[30];
1071 				StoreDW(addr, value, res);
1072 				if (res)
1073 					goto fault;
1074 				addr += 8;
1075 			}
1076 			if (reg & 0x10) {
1077 				value = regs->regs[31];
1078 				StoreDW(addr, value, res);
1079 				if (res)
1080 					goto fault;
1081 			}
1082 			goto success;
1083 #endif /* CONFIG_64BIT */
1084 
1085 			goto sigill;
1086 
1087 			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
1088 		}
1089 
1090 		goto sigbus;
1091 
1092 	case mm_pool32c_op:
1093 		switch (insn.mm_m_format.func) {
1094 		case mm_lwu_func:
1095 			reg = insn.mm_m_format.rd;
1096 			goto loadWU;
1097 		}
1098 
1099 		/*  LL,SC,LLD,SCD are not serviced */
1100 		goto sigbus;
1101 
1102 	case mm_pool32f_op:
1103 		switch (insn.mm_x_format.func) {
1104 		case mm_lwxc1_func:
1105 		case mm_swxc1_func:
1106 		case mm_ldxc1_func:
1107 		case mm_sdxc1_func:
1108 			goto fpu_emul;
1109 		}
1110 
1111 		goto sigbus;
1112 
1113 	case mm_ldc132_op:
1114 	case mm_sdc132_op:
1115 	case mm_lwc132_op:
1116 	case mm_swc132_op:
1117 fpu_emul:
1118 		/* roll back jump/branch */
1119 		regs->cp0_epc = origpc;
1120 		regs->regs[31] = orig31;
1121 
1122 		die_if_kernel("Unaligned FP access in kernel code", regs);
1123 		BUG_ON(!used_math());
1124 		BUG_ON(!is_fpu_owner());
1125 
1126 		lose_fpu(1);	/* save the FPU state for the emulator */
1127 		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
1128 					       &fault_addr);
1129 		own_fpu(1);	/* restore FPU state */
1130 
1131 		/* If something went wrong, signal */
1132 		process_fpemu_return(res, fault_addr);
1133 
1134 		if (res == 0)
1135 			goto success;
1136 		return;
1137 
1138 	case mm_lh32_op:
1139 		reg = insn.mm_i_format.rt;
1140 		goto loadHW;
1141 
1142 	case mm_lhu32_op:
1143 		reg = insn.mm_i_format.rt;
1144 		goto loadHWU;
1145 
1146 	case mm_lw32_op:
1147 		reg = insn.mm_i_format.rt;
1148 		goto loadW;
1149 
1150 	case mm_sh32_op:
1151 		reg = insn.mm_i_format.rt;
1152 		goto storeHW;
1153 
1154 	case mm_sw32_op:
1155 		reg = insn.mm_i_format.rt;
1156 		goto storeW;
1157 
1158 	case mm_ld32_op:
1159 		reg = insn.mm_i_format.rt;
1160 		goto loadDW;
1161 
1162 	case mm_sd32_op:
1163 		reg = insn.mm_i_format.rt;
1164 		goto storeDW;
1165 
1166 	case mm_pool16c_op:
1167 		switch (insn.mm16_m_format.func) {
1168 		case mm_lwm16_op:
1169 			reg = insn.mm16_m_format.rlist;
1170 			rvar = reg + 1;
1171 			if (!access_ok(VERIFY_READ, addr, 4 * rvar))
1172 				goto sigbus;
1173 
1174 			for (i = 16; rvar; rvar--, i++) {
1175 				LoadW(addr, value, res);
1176 				if (res)
1177 					goto fault;
1178 				addr += 4;
1179 				regs->regs[i] = value;
1180 			}
1181 			LoadW(addr, value, res);
1182 			if (res)
1183 				goto fault;
1184 			regs->regs[31] = value;
1185 
1186 			goto success;
1187 
1188 		case mm_swm16_op:
1189 			reg = insn.mm16_m_format.rlist;
1190 			rvar = reg + 1;
1191 			if (!access_ok(VERIFY_WRITE, addr, 4 * rvar))
1192 				goto sigbus;
1193 
1194 			for (i = 16; rvar; rvar--, i++) {
1195 				value = regs->regs[i];
1196 				StoreW(addr, value, res);
1197 				if (res)
1198 					goto fault;
1199 				addr += 4;
1200 			}
1201 			value = regs->regs[31];
1202 			StoreW(addr, value, res);
1203 			if (res)
1204 				goto fault;
1205 
1206 			goto success;
1207 
1208 		}
1209 
1210 		goto sigbus;
1211 
1212 	case mm_lhu16_op:
1213 		reg = reg16to32[insn.mm16_rb_format.rt];
1214 		goto loadHWU;
1215 
1216 	case mm_lw16_op:
1217 		reg = reg16to32[insn.mm16_rb_format.rt];
1218 		goto loadW;
1219 
1220 	case mm_sh16_op:
1221 		reg = reg16to32st[insn.mm16_rb_format.rt];
1222 		goto storeHW;
1223 
1224 	case mm_sw16_op:
1225 		reg = reg16to32st[insn.mm16_rb_format.rt];
1226 		goto storeW;
1227 
1228 	case mm_lwsp16_op:
1229 		reg = insn.mm16_r5_format.rt;
1230 		goto loadW;
1231 
1232 	case mm_swsp16_op:
1233 		reg = insn.mm16_r5_format.rt;
1234 		goto storeW;
1235 
1236 	case mm_lwgp16_op:
1237 		reg = reg16to32[insn.mm16_r3_format.rt];
1238 		goto loadW;
1239 
1240 	default:
1241 		goto sigill;
1242 	}
1243 
1244 loadHW:
1245 	if (!access_ok(VERIFY_READ, addr, 2))
1246 		goto sigbus;
1247 
1248 	LoadHW(addr, value, res);
1249 	if (res)
1250 		goto fault;
1251 	regs->regs[reg] = value;
1252 	goto success;
1253 
1254 loadHWU:
1255 	if (!access_ok(VERIFY_READ, addr, 2))
1256 		goto sigbus;
1257 
1258 	LoadHWU(addr, value, res);
1259 	if (res)
1260 		goto fault;
1261 	regs->regs[reg] = value;
1262 	goto success;
1263 
1264 loadW:
1265 	if (!access_ok(VERIFY_READ, addr, 4))
1266 		goto sigbus;
1267 
1268 	LoadW(addr, value, res);
1269 	if (res)
1270 		goto fault;
1271 	regs->regs[reg] = value;
1272 	goto success;
1273 
1274 loadWU:
1275 #ifdef CONFIG_64BIT
1276 	/*
1277 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1278 	 * if we're on a 32-bit processor and an i-cache incoherency
1279 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1280 	 * would blow up, so for now we don't handle unaligned 64-bit
1281 	 * instructions on 32-bit kernels.
1282 	 */
1283 	if (!access_ok(VERIFY_READ, addr, 4))
1284 		goto sigbus;
1285 
1286 	LoadWU(addr, value, res);
1287 	if (res)
1288 		goto fault;
1289 	regs->regs[reg] = value;
1290 	goto success;
1291 #endif /* CONFIG_64BIT */
1292 
1293 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1294 	goto sigill;
1295 
1296 loadDW:
1297 #ifdef CONFIG_64BIT
1298 	/*
1299 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1300 	 * if we're on a 32-bit processor and an i-cache incoherency
1301 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1302 	 * would blow up, so for now we don't handle unaligned 64-bit
1303 	 * instructions on 32-bit kernels.
1304 	 */
1305 	if (!access_ok(VERIFY_READ, addr, 8))
1306 		goto sigbus;
1307 
1308 	LoadDW(addr, value, res);
1309 	if (res)
1310 		goto fault;
1311 	regs->regs[reg] = value;
1312 	goto success;
1313 #endif /* CONFIG_64BIT */
1314 
1315 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1316 	goto sigill;
1317 
1318 storeHW:
1319 	if (!access_ok(VERIFY_WRITE, addr, 2))
1320 		goto sigbus;
1321 
1322 	value = regs->regs[reg];
1323 	StoreHW(addr, value, res);
1324 	if (res)
1325 		goto fault;
1326 	goto success;
1327 
1328 storeW:
1329 	if (!access_ok(VERIFY_WRITE, addr, 4))
1330 		goto sigbus;
1331 
1332 	value = regs->regs[reg];
1333 	StoreW(addr, value, res);
1334 	if (res)
1335 		goto fault;
1336 	goto success;
1337 
1338 storeDW:
1339 #ifdef CONFIG_64BIT
1340 	/*
1341 	 * A 32-bit kernel might be running on a 64-bit processor.  But
1342 	 * if we're on a 32-bit processor and an i-cache incoherency
1343 	 * or race makes us see a 64-bit instruction here the sdl/sdr
1344 	 * would blow up, so for now we don't handle unaligned 64-bit
1345 	 * instructions on 32-bit kernels.
1346 	 */
1347 	if (!access_ok(VERIFY_WRITE, addr, 8))
1348 		goto sigbus;
1349 
1350 	value = regs->regs[reg];
1351 	StoreDW(addr, value, res);
1352 	if (res)
1353 		goto fault;
1354 	goto success;
1355 #endif /* CONFIG_64BIT */
1356 
1357 	/* Cannot handle 64-bit instructions in 32-bit kernel */
1358 	goto sigill;
1359 
1360 success:
1361 	regs->cp0_epc = contpc;	/* advance or branch */
1362 
1363 #ifdef CONFIG_DEBUG_FS
1364 	unaligned_instructions++;
1365 #endif
1366 	return;
1367 
1368 fault:
1369 	/* roll back jump/branch */
1370 	regs->cp0_epc = origpc;
1371 	regs->regs[31] = orig31;
1372 	/* Did we have an exception handler installed? */
1373 	if (fixup_exception(regs))
1374 		return;
1375 
1376 	die_if_kernel("Unhandled kernel unaligned access", regs);
1377 	force_sig(SIGSEGV, current);
1378 
1379 	return;
1380 
1381 sigbus:
1382 	die_if_kernel("Unhandled kernel unaligned access", regs);
1383 	force_sig(SIGBUS, current);
1384 
1385 	return;
1386 
1387 sigill:
1388 	die_if_kernel
1389 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1390 	force_sig(SIGILL, current);
1391 }
1392 
1393 static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1394 {
1395 	unsigned long value;
1396 	unsigned int res;
1397 	int reg;
1398 	unsigned long orig31;
1399 	u16 __user *pc16;
1400 	unsigned long origpc;
1401 	union mips16e_instruction mips16inst, oldinst;
1402 
1403 	origpc = regs->cp0_epc;
1404 	orig31 = regs->regs[31];
1405 	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1406 	/*
1407 	 * This load never faults.
1408 	 */
1409 	__get_user(mips16inst.full, pc16);
1410 	oldinst = mips16inst;
1411 
1412 	/* skip EXTEND instruction */
1413 	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1414 		pc16++;
1415 		__get_user(mips16inst.full, pc16);
1416 	} else if (delay_slot(regs)) {
1417 		/*  skip jump instructions */
1418 		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1419 		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1420 			pc16++;
1421 		pc16++;
1422 		if (get_user(mips16inst.full, pc16))
1423 			goto sigbus;
1424 	}
1425 
1426 	switch (mips16inst.ri.opcode) {
1427 	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1428 		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1429 		case MIPS16e_ldpc_func:
1430 		case MIPS16e_ldsp_func:
1431 			reg = reg16to32[mips16inst.ri64.ry];
1432 			goto loadDW;
1433 
1434 		case MIPS16e_sdsp_func:
1435 			reg = reg16to32[mips16inst.ri64.ry];
1436 			goto writeDW;
1437 
1438 		case MIPS16e_sdrasp_func:
1439 			reg = 29;	/* GPRSP */
1440 			goto writeDW;
1441 		}
1442 
1443 		goto sigbus;
1444 
1445 	case MIPS16e_swsp_op:
1446 	case MIPS16e_lwpc_op:
1447 	case MIPS16e_lwsp_op:
1448 		reg = reg16to32[mips16inst.ri.rx];
1449 		break;
1450 
1451 	case MIPS16e_i8_op:
1452 		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1453 			goto sigbus;
1454 		reg = 29;	/* GPRSP */
1455 		break;
1456 
1457 	default:
1458 		reg = reg16to32[mips16inst.rri.ry];
1459 		break;
1460 	}
1461 
1462 	switch (mips16inst.ri.opcode) {
1463 
1464 	case MIPS16e_lb_op:
1465 	case MIPS16e_lbu_op:
1466 	case MIPS16e_sb_op:
1467 		goto sigbus;
1468 
1469 	case MIPS16e_lh_op:
1470 		if (!access_ok(VERIFY_READ, addr, 2))
1471 			goto sigbus;
1472 
1473 		LoadHW(addr, value, res);
1474 		if (res)
1475 			goto fault;
1476 		MIPS16e_compute_return_epc(regs, &oldinst);
1477 		regs->regs[reg] = value;
1478 		break;
1479 
1480 	case MIPS16e_lhu_op:
1481 		if (!access_ok(VERIFY_READ, addr, 2))
1482 			goto sigbus;
1483 
1484 		LoadHWU(addr, value, res);
1485 		if (res)
1486 			goto fault;
1487 		MIPS16e_compute_return_epc(regs, &oldinst);
1488 		regs->regs[reg] = value;
1489 		break;
1490 
1491 	case MIPS16e_lw_op:
1492 	case MIPS16e_lwpc_op:
1493 	case MIPS16e_lwsp_op:
1494 		if (!access_ok(VERIFY_READ, addr, 4))
1495 			goto sigbus;
1496 
1497 		LoadW(addr, value, res);
1498 		if (res)
1499 			goto fault;
1500 		MIPS16e_compute_return_epc(regs, &oldinst);
1501 		regs->regs[reg] = value;
1502 		break;
1503 
1504 	case MIPS16e_lwu_op:
1505 #ifdef CONFIG_64BIT
1506 		/*
1507 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1508 		 * if we're on a 32-bit processor and an i-cache incoherency
1509 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1510 		 * would blow up, so for now we don't handle unaligned 64-bit
1511 		 * instructions on 32-bit kernels.
1512 		 */
1513 		if (!access_ok(VERIFY_READ, addr, 4))
1514 			goto sigbus;
1515 
1516 		LoadWU(addr, value, res);
1517 		if (res)
1518 			goto fault;
1519 		MIPS16e_compute_return_epc(regs, &oldinst);
1520 		regs->regs[reg] = value;
1521 		break;
1522 #endif /* CONFIG_64BIT */
1523 
1524 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1525 		goto sigill;
1526 
1527 	case MIPS16e_ld_op:
1528 loadDW:
1529 #ifdef CONFIG_64BIT
1530 		/*
1531 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1532 		 * if we're on a 32-bit processor and an i-cache incoherency
1533 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1534 		 * would blow up, so for now we don't handle unaligned 64-bit
1535 		 * instructions on 32-bit kernels.
1536 		 */
1537 		if (!access_ok(VERIFY_READ, addr, 8))
1538 			goto sigbus;
1539 
1540 		LoadDW(addr, value, res);
1541 		if (res)
1542 			goto fault;
1543 		MIPS16e_compute_return_epc(regs, &oldinst);
1544 		regs->regs[reg] = value;
1545 		break;
1546 #endif /* CONFIG_64BIT */
1547 
1548 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1549 		goto sigill;
1550 
1551 	case MIPS16e_sh_op:
1552 		if (!access_ok(VERIFY_WRITE, addr, 2))
1553 			goto sigbus;
1554 
1555 		MIPS16e_compute_return_epc(regs, &oldinst);
1556 		value = regs->regs[reg];
1557 		StoreHW(addr, value, res);
1558 		if (res)
1559 			goto fault;
1560 		break;
1561 
1562 	case MIPS16e_sw_op:
1563 	case MIPS16e_swsp_op:
1564 	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1565 		if (!access_ok(VERIFY_WRITE, addr, 4))
1566 			goto sigbus;
1567 
1568 		MIPS16e_compute_return_epc(regs, &oldinst);
1569 		value = regs->regs[reg];
1570 		StoreW(addr, value, res);
1571 		if (res)
1572 			goto fault;
1573 		break;
1574 
1575 	case MIPS16e_sd_op:
1576 writeDW:
1577 #ifdef CONFIG_64BIT
1578 		/*
1579 		 * A 32-bit kernel might be running on a 64-bit processor.  But
1580 		 * if we're on a 32-bit processor and an i-cache incoherency
1581 		 * or race makes us see a 64-bit instruction here the sdl/sdr
1582 		 * would blow up, so for now we don't handle unaligned 64-bit
1583 		 * instructions on 32-bit kernels.
1584 		 */
1585 		if (!access_ok(VERIFY_WRITE, addr, 8))
1586 			goto sigbus;
1587 
1588 		MIPS16e_compute_return_epc(regs, &oldinst);
1589 		value = regs->regs[reg];
1590 		StoreDW(addr, value, res);
1591 		if (res)
1592 			goto fault;
1593 		break;
1594 #endif /* CONFIG_64BIT */
1595 
1596 		/* Cannot handle 64-bit instructions in 32-bit kernel */
1597 		goto sigill;
1598 
1599 	default:
1600 		/*
1601 		 * Pheeee...  We encountered an yet unknown instruction or
1602 		 * cache coherence problem.  Die sucker, die ...
1603 		 */
1604 		goto sigill;
1605 	}
1606 
1607 #ifdef CONFIG_DEBUG_FS
1608 	unaligned_instructions++;
1609 #endif
1610 
1611 	return;
1612 
1613 fault:
1614 	/* roll back jump/branch */
1615 	regs->cp0_epc = origpc;
1616 	regs->regs[31] = orig31;
1617 	/* Did we have an exception handler installed? */
1618 	if (fixup_exception(regs))
1619 		return;
1620 
1621 	die_if_kernel("Unhandled kernel unaligned access", regs);
1622 	force_sig(SIGSEGV, current);
1623 
1624 	return;
1625 
1626 sigbus:
1627 	die_if_kernel("Unhandled kernel unaligned access", regs);
1628 	force_sig(SIGBUS, current);
1629 
1630 	return;
1631 
1632 sigill:
1633 	die_if_kernel
1634 	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1635 	force_sig(SIGILL, current);
1636 }
1637 
1638 asmlinkage void do_ade(struct pt_regs *regs)
1639 {
1640 	enum ctx_state prev_state;
1641 	unsigned int __user *pc;
1642 	mm_segment_t seg;
1643 
1644 	prev_state = exception_enter();
1645 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1646 			1, regs, regs->cp0_badvaddr);
1647 	/*
1648 	 * Did we catch a fault trying to load an instruction?
1649 	 */
1650 	if (regs->cp0_badvaddr == regs->cp0_epc)
1651 		goto sigbus;
1652 
1653 	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1654 		goto sigbus;
1655 	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1656 		goto sigbus;
1657 
1658 	/*
1659 	 * Do branch emulation only if we didn't forward the exception.
1660 	 * This is all so but ugly ...
1661 	 */
1662 
1663 	/*
1664 	 * Are we running in microMIPS mode?
1665 	 */
1666 	if (get_isa16_mode(regs->cp0_epc)) {
1667 		/*
1668 		 * Did we catch a fault trying to load an instruction in
1669 		 * 16-bit mode?
1670 		 */
1671 		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1672 			goto sigbus;
1673 		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1674 			show_registers(regs);
1675 
1676 		if (cpu_has_mmips) {
1677 			seg = get_fs();
1678 			if (!user_mode(regs))
1679 				set_fs(KERNEL_DS);
1680 			emulate_load_store_microMIPS(regs,
1681 				(void __user *)regs->cp0_badvaddr);
1682 			set_fs(seg);
1683 
1684 			return;
1685 		}
1686 
1687 		if (cpu_has_mips16) {
1688 			seg = get_fs();
1689 			if (!user_mode(regs))
1690 				set_fs(KERNEL_DS);
1691 			emulate_load_store_MIPS16e(regs,
1692 				(void __user *)regs->cp0_badvaddr);
1693 			set_fs(seg);
1694 
1695 			return;
1696 	}
1697 
1698 		goto sigbus;
1699 	}
1700 
1701 	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1702 		show_registers(regs);
1703 	pc = (unsigned int __user *)exception_epc(regs);
1704 
1705 	seg = get_fs();
1706 	if (!user_mode(regs))
1707 		set_fs(KERNEL_DS);
1708 	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1709 	set_fs(seg);
1710 
1711 	return;
1712 
1713 sigbus:
1714 	die_if_kernel("Kernel unaligned instruction access", regs);
1715 	force_sig(SIGBUS, current);
1716 
1717 	/*
1718 	 * XXX On return from the signal handler we should advance the epc
1719 	 */
1720 	exception_exit(prev_state);
1721 }
1722 
1723 #ifdef CONFIG_DEBUG_FS
1724 extern struct dentry *mips_debugfs_dir;
1725 static int __init debugfs_unaligned(void)
1726 {
1727 	struct dentry *d;
1728 
1729 	if (!mips_debugfs_dir)
1730 		return -ENODEV;
1731 	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
1732 			       mips_debugfs_dir, &unaligned_instructions);
1733 	if (!d)
1734 		return -ENOMEM;
1735 	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1736 			       mips_debugfs_dir, &unaligned_action);
1737 	if (!d)
1738 		return -ENOMEM;
1739 	return 0;
1740 }
1741 __initcall(debugfs_unaligned);
1742 #endif
1743