xref: /openbmc/linux/arch/arc/kernel/unaligned.c (revision 82e6fdd6)
1 /*
2  * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * vineetg : May 2011
9  *  -Adapted (from .26 to .35)
10  *  -original contribution by Tim.yao@amlogic.com
11  *
12  */
13 
14 #include <linux/types.h>
15 #include <linux/perf_event.h>
16 #include <linux/ptrace.h>
17 #include <linux/uaccess.h>
18 #include <asm/disasm.h>
19 
20 #ifdef CONFIG_CPU_BIG_ENDIAN
21 #define BE		1
22 #define FIRST_BYTE_16	"swap %1, %1\n swape %1, %1\n"
23 #define FIRST_BYTE_32	"swape %1, %1\n"
24 #else
25 #define BE		0
26 #define FIRST_BYTE_16
27 #define FIRST_BYTE_32
28 #endif
29 
30 #define __get8_unaligned_check(val, addr, err)		\
31 	__asm__(					\
32 	"1:	ldb.ab	%1, [%2, 1]\n"			\
33 	"2:\n"						\
34 	"	.section .fixup,\"ax\"\n"		\
35 	"	.align	4\n"				\
36 	"3:	mov	%0, 1\n"			\
37 	"	j	2b\n"				\
38 	"	.previous\n"				\
39 	"	.section __ex_table,\"a\"\n"		\
40 	"	.align	4\n"				\
41 	"	.long	1b, 3b\n"			\
42 	"	.previous\n"				\
43 	: "=r" (err), "=&r" (val), "=r" (addr)		\
44 	: "0" (err), "2" (addr))
45 
46 #define get16_unaligned_check(val, addr)		\
47 	do {						\
48 		unsigned int err = 0, v, a = addr;	\
49 		__get8_unaligned_check(v, a, err);	\
50 		val =  v << ((BE) ? 8 : 0);		\
51 		__get8_unaligned_check(v, a, err);	\
52 		val |= v << ((BE) ? 0 : 8);		\
53 		if (err)				\
54 			goto fault;			\
55 	} while (0)
56 
57 #define get32_unaligned_check(val, addr)		\
58 	do {						\
59 		unsigned int err = 0, v, a = addr;	\
60 		__get8_unaligned_check(v, a, err);	\
61 		val =  v << ((BE) ? 24 : 0);		\
62 		__get8_unaligned_check(v, a, err);	\
63 		val |= v << ((BE) ? 16 : 8);		\
64 		__get8_unaligned_check(v, a, err);	\
65 		val |= v << ((BE) ? 8 : 16);		\
66 		__get8_unaligned_check(v, a, err);	\
67 		val |= v << ((BE) ? 0 : 24);		\
68 		if (err)				\
69 			goto fault;			\
70 	} while (0)
71 
72 #define put16_unaligned_check(val, addr)		\
73 	do {						\
74 		unsigned int err = 0, v = val, a = addr;\
75 							\
76 		__asm__(				\
77 		FIRST_BYTE_16				\
78 		"1:	stb.ab	%1, [%2, 1]\n"		\
79 		"	lsr %1, %1, 8\n"		\
80 		"2:	stb	%1, [%2]\n"		\
81 		"3:\n"					\
82 		"	.section .fixup,\"ax\"\n"	\
83 		"	.align	4\n"			\
84 		"4:	mov	%0, 1\n"		\
85 		"	j	3b\n"			\
86 		"	.previous\n"			\
87 		"	.section __ex_table,\"a\"\n"	\
88 		"	.align	4\n"			\
89 		"	.long	1b, 4b\n"		\
90 		"	.long	2b, 4b\n"		\
91 		"	.previous\n"			\
92 		: "=r" (err), "=&r" (v), "=&r" (a)	\
93 		: "0" (err), "1" (v), "2" (a));		\
94 							\
95 		if (err)				\
96 			goto fault;			\
97 	} while (0)
98 
99 #define put32_unaligned_check(val, addr)		\
100 	do {						\
101 		unsigned int err = 0, v = val, a = addr;\
102 							\
103 		__asm__(				\
104 		FIRST_BYTE_32				\
105 		"1:	stb.ab	%1, [%2, 1]\n"		\
106 		"	lsr %1, %1, 8\n"		\
107 		"2:	stb.ab	%1, [%2, 1]\n"		\
108 		"	lsr %1, %1, 8\n"		\
109 		"3:	stb.ab	%1, [%2, 1]\n"		\
110 		"	lsr %1, %1, 8\n"		\
111 		"4:	stb	%1, [%2]\n"		\
112 		"5:\n"					\
113 		"	.section .fixup,\"ax\"\n"	\
114 		"	.align	4\n"			\
115 		"6:	mov	%0, 1\n"		\
116 		"	j	5b\n"			\
117 		"	.previous\n"			\
118 		"	.section __ex_table,\"a\"\n"	\
119 		"	.align	4\n"			\
120 		"	.long	1b, 6b\n"		\
121 		"	.long	2b, 6b\n"		\
122 		"	.long	3b, 6b\n"		\
123 		"	.long	4b, 6b\n"		\
124 		"	.previous\n"			\
125 		: "=r" (err), "=&r" (v), "=&r" (a)	\
126 		: "0" (err), "1" (v), "2" (a));		\
127 							\
128 		if (err)				\
129 			goto fault;			\
130 	} while (0)
131 
132 /* sysctl hooks */
133 int unaligned_enabled __read_mostly = 1;	/* Enabled by default */
134 int no_unaligned_warning __read_mostly = 1;	/* Only 1 warning by default */
135 
136 static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
137 			struct callee_regs *cregs)
138 {
139 	int val;
140 
141 	/* register write back */
142 	if ((state->aa == 1) || (state->aa == 2)) {
143 		set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
144 
145 		if (state->aa == 2)
146 			state->src2 = 0;
147 	}
148 
149 	if (state->zz == 0) {
150 		get32_unaligned_check(val, state->src1 + state->src2);
151 	} else {
152 		get16_unaligned_check(val, state->src1 + state->src2);
153 
154 		if (state->x)
155 			val = (val << 16) >> 16;
156 	}
157 
158 	if (state->pref == 0)
159 		set_reg(state->dest, val, regs, cregs);
160 
161 	return;
162 
163 fault:	state->fault = 1;
164 }
165 
166 static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
167 			struct callee_regs *cregs)
168 {
169 	/* register write back */
170 	if ((state->aa == 1) || (state->aa == 2)) {
171 		set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
172 
173 		if (state->aa == 3)
174 			state->src3 = 0;
175 	} else if (state->aa == 3) {
176 		if (state->zz == 2) {
177 			set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
178 				regs, cregs);
179 		} else if (!state->zz) {
180 			set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
181 				regs, cregs);
182 		} else {
183 			goto fault;
184 		}
185 	}
186 
187 	/* write fix-up */
188 	if (!state->zz)
189 		put32_unaligned_check(state->src1, state->src2 + state->src3);
190 	else
191 		put16_unaligned_check(state->src1, state->src2 + state->src3);
192 
193 	return;
194 
195 fault:	state->fault = 1;
196 }
197 
198 /*
199  * Handle an unaligned access
200  * Returns 0 if successfully handled, 1 if some error happened
201  */
202 int misaligned_fixup(unsigned long address, struct pt_regs *regs,
203 		     struct callee_regs *cregs)
204 {
205 	struct disasm_state state;
206 	char buf[TASK_COMM_LEN];
207 
208 	/* handle user mode only and only if enabled by sysadmin */
209 	if (!user_mode(regs) || !unaligned_enabled)
210 		return 1;
211 
212 	if (no_unaligned_warning) {
213 		pr_warn_once("%s(%d) made unaligned access which was emulated"
214 			     " by kernel assist\n. This can degrade application"
215 			     " performance significantly\n. To enable further"
216 			     " logging of such instances, please \n"
217 			     " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
218 			     get_task_comm(buf, current), task_pid_nr(current));
219 	} else {
220 		/* Add rate limiting if it gets down to it */
221 		pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
222 			get_task_comm(buf, current), task_pid_nr(current),
223 			address, regs->ret);
224 
225 	}
226 
227 	disasm_instr(regs->ret, &state, 1, regs, cregs);
228 
229 	if (state.fault)
230 		goto fault;
231 
232 	/* ldb/stb should not have unaligned exception */
233 	if ((state.zz == 1) || (state.di))
234 		goto fault;
235 
236 	if (!state.write)
237 		fixup_load(&state, regs, cregs);
238 	else
239 		fixup_store(&state, regs, cregs);
240 
241 	if (state.fault)
242 		goto fault;
243 
244 	/* clear any remanants of delay slot */
245 	if (delay_mode(regs)) {
246 		regs->ret = regs->bta & ~1U;
247 		regs->status32 &= ~STATUS_DE_MASK;
248 	} else {
249 		regs->ret += state.instr_len;
250 
251 		/* handle zero-overhead-loop */
252 		if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
253 			regs->ret = regs->lp_start;
254 			regs->lp_count--;
255 		}
256 	}
257 
258 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
259 	return 0;
260 
261 fault:
262 	pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
263 		state.words[0], address);
264 
265 	return 1;
266 }
267