xref: /openbmc/linux/arch/powerpc/kernel/align.c (revision d0e22329)
1 /* align.c - handle alignment exceptions for the Power PC.
2  *
3  * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
4  * Copyright (c) 1998-1999 TiVo, Inc.
5  *   PowerPC 403GCX modifications.
6  * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
7  *   PowerPC 403GCX/405GP modifications.
8  * Copyright (c) 2001-2002 PPC64 team, IBM Corp
9  *   64-bit and Power4 support
10  * Copyright (c) 2005 Benjamin Herrenschmidt, IBM Corp
11  *                    <benh@kernel.crashing.org>
12  *   Merge ppc32 and ppc64 implementations
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version
17  * 2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/mm.h>
22 #include <asm/processor.h>
23 #include <linux/uaccess.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
26 #include <asm/emulated_ops.h>
27 #include <asm/switch_to.h>
28 #include <asm/disassemble.h>
29 #include <asm/cpu_has_feature.h>
30 #include <asm/sstep.h>
31 
32 struct aligninfo {
33 	unsigned char len;
34 	unsigned char flags;
35 };
36 
37 
38 #define INVALID	{ 0, 0 }
39 
40 /* Bits in the flags field */
41 #define LD	0	/* load */
42 #define ST	1	/* store */
43 #define SE	2	/* sign-extend value, or FP ld/st as word */
44 #define SW	0x20	/* byte swap */
45 #define E4	0x40	/* SPE endianness is word */
46 #define E8	0x80	/* SPE endianness is double word */
47 
48 #ifdef CONFIG_SPE
49 
50 static struct aligninfo spe_aligninfo[32] = {
51 	{ 8, LD+E8 },		/* 0 00 00: evldd[x] */
52 	{ 8, LD+E4 },		/* 0 00 01: evldw[x] */
53 	{ 8, LD },		/* 0 00 10: evldh[x] */
54 	INVALID,		/* 0 00 11 */
55 	{ 2, LD },		/* 0 01 00: evlhhesplat[x] */
56 	INVALID,		/* 0 01 01 */
57 	{ 2, LD },		/* 0 01 10: evlhhousplat[x] */
58 	{ 2, LD+SE },		/* 0 01 11: evlhhossplat[x] */
59 	{ 4, LD },		/* 0 10 00: evlwhe[x] */
60 	INVALID,		/* 0 10 01 */
61 	{ 4, LD },		/* 0 10 10: evlwhou[x] */
62 	{ 4, LD+SE },		/* 0 10 11: evlwhos[x] */
63 	{ 4, LD+E4 },		/* 0 11 00: evlwwsplat[x] */
64 	INVALID,		/* 0 11 01 */
65 	{ 4, LD },		/* 0 11 10: evlwhsplat[x] */
66 	INVALID,		/* 0 11 11 */
67 
68 	{ 8, ST+E8 },		/* 1 00 00: evstdd[x] */
69 	{ 8, ST+E4 },		/* 1 00 01: evstdw[x] */
70 	{ 8, ST },		/* 1 00 10: evstdh[x] */
71 	INVALID,		/* 1 00 11 */
72 	INVALID,		/* 1 01 00 */
73 	INVALID,		/* 1 01 01 */
74 	INVALID,		/* 1 01 10 */
75 	INVALID,		/* 1 01 11 */
76 	{ 4, ST },		/* 1 10 00: evstwhe[x] */
77 	INVALID,		/* 1 10 01 */
78 	{ 4, ST },		/* 1 10 10: evstwho[x] */
79 	INVALID,		/* 1 10 11 */
80 	{ 4, ST+E4 },		/* 1 11 00: evstwwe[x] */
81 	INVALID,		/* 1 11 01 */
82 	{ 4, ST+E4 },		/* 1 11 10: evstwwo[x] */
83 	INVALID,		/* 1 11 11 */
84 };
85 
86 #define	EVLDD		0x00
87 #define	EVLDW		0x01
88 #define	EVLDH		0x02
89 #define	EVLHHESPLAT	0x04
90 #define	EVLHHOUSPLAT	0x06
91 #define	EVLHHOSSPLAT	0x07
92 #define	EVLWHE		0x08
93 #define	EVLWHOU		0x0A
94 #define	EVLWHOS		0x0B
95 #define	EVLWWSPLAT	0x0C
96 #define	EVLWHSPLAT	0x0E
97 #define	EVSTDD		0x10
98 #define	EVSTDW		0x11
99 #define	EVSTDH		0x12
100 #define	EVSTWHE		0x18
101 #define	EVSTWHO		0x1A
102 #define	EVSTWWE		0x1C
103 #define	EVSTWWO		0x1E
104 
105 /*
106  * Emulate SPE loads and stores.
107  * Only Book-E has these instructions, and it does true little-endian,
108  * so we don't need the address swizzling.
109  */
110 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
111 		       unsigned int instr)
112 {
113 	int ret;
114 	union {
115 		u64 ll;
116 		u32 w[2];
117 		u16 h[4];
118 		u8 v[8];
119 	} data, temp;
120 	unsigned char __user *p, *addr;
121 	unsigned long *evr = &current->thread.evr[reg];
122 	unsigned int nb, flags;
123 
124 	instr = (instr >> 1) & 0x1f;
125 
126 	/* DAR has the operand effective address */
127 	addr = (unsigned char __user *)regs->dar;
128 
129 	nb = spe_aligninfo[instr].len;
130 	flags = spe_aligninfo[instr].flags;
131 
132 	/* Verify the address of the operand */
133 	if (unlikely(user_mode(regs) &&
134 		     !access_ok(addr, nb)))
135 		return -EFAULT;
136 
137 	/* userland only */
138 	if (unlikely(!user_mode(regs)))
139 		return 0;
140 
141 	flush_spe_to_thread(current);
142 
143 	/* If we are loading, get the data from user space, else
144 	 * get it from register values
145 	 */
146 	if (flags & ST) {
147 		data.ll = 0;
148 		switch (instr) {
149 		case EVSTDD:
150 		case EVSTDW:
151 		case EVSTDH:
152 			data.w[0] = *evr;
153 			data.w[1] = regs->gpr[reg];
154 			break;
155 		case EVSTWHE:
156 			data.h[2] = *evr >> 16;
157 			data.h[3] = regs->gpr[reg] >> 16;
158 			break;
159 		case EVSTWHO:
160 			data.h[2] = *evr & 0xffff;
161 			data.h[3] = regs->gpr[reg] & 0xffff;
162 			break;
163 		case EVSTWWE:
164 			data.w[1] = *evr;
165 			break;
166 		case EVSTWWO:
167 			data.w[1] = regs->gpr[reg];
168 			break;
169 		default:
170 			return -EINVAL;
171 		}
172 	} else {
173 		temp.ll = data.ll = 0;
174 		ret = 0;
175 		p = addr;
176 
177 		switch (nb) {
178 		case 8:
179 			ret |= __get_user_inatomic(temp.v[0], p++);
180 			ret |= __get_user_inatomic(temp.v[1], p++);
181 			ret |= __get_user_inatomic(temp.v[2], p++);
182 			ret |= __get_user_inatomic(temp.v[3], p++);
183 		case 4:
184 			ret |= __get_user_inatomic(temp.v[4], p++);
185 			ret |= __get_user_inatomic(temp.v[5], p++);
186 		case 2:
187 			ret |= __get_user_inatomic(temp.v[6], p++);
188 			ret |= __get_user_inatomic(temp.v[7], p++);
189 			if (unlikely(ret))
190 				return -EFAULT;
191 		}
192 
193 		switch (instr) {
194 		case EVLDD:
195 		case EVLDW:
196 		case EVLDH:
197 			data.ll = temp.ll;
198 			break;
199 		case EVLHHESPLAT:
200 			data.h[0] = temp.h[3];
201 			data.h[2] = temp.h[3];
202 			break;
203 		case EVLHHOUSPLAT:
204 		case EVLHHOSSPLAT:
205 			data.h[1] = temp.h[3];
206 			data.h[3] = temp.h[3];
207 			break;
208 		case EVLWHE:
209 			data.h[0] = temp.h[2];
210 			data.h[2] = temp.h[3];
211 			break;
212 		case EVLWHOU:
213 		case EVLWHOS:
214 			data.h[1] = temp.h[2];
215 			data.h[3] = temp.h[3];
216 			break;
217 		case EVLWWSPLAT:
218 			data.w[0] = temp.w[1];
219 			data.w[1] = temp.w[1];
220 			break;
221 		case EVLWHSPLAT:
222 			data.h[0] = temp.h[2];
223 			data.h[1] = temp.h[2];
224 			data.h[2] = temp.h[3];
225 			data.h[3] = temp.h[3];
226 			break;
227 		default:
228 			return -EINVAL;
229 		}
230 	}
231 
232 	if (flags & SW) {
233 		switch (flags & 0xf0) {
234 		case E8:
235 			data.ll = swab64(data.ll);
236 			break;
237 		case E4:
238 			data.w[0] = swab32(data.w[0]);
239 			data.w[1] = swab32(data.w[1]);
240 			break;
241 		/* Its half word endian */
242 		default:
243 			data.h[0] = swab16(data.h[0]);
244 			data.h[1] = swab16(data.h[1]);
245 			data.h[2] = swab16(data.h[2]);
246 			data.h[3] = swab16(data.h[3]);
247 			break;
248 		}
249 	}
250 
251 	if (flags & SE) {
252 		data.w[0] = (s16)data.h[1];
253 		data.w[1] = (s16)data.h[3];
254 	}
255 
256 	/* Store result to memory or update registers */
257 	if (flags & ST) {
258 		ret = 0;
259 		p = addr;
260 		switch (nb) {
261 		case 8:
262 			ret |= __put_user_inatomic(data.v[0], p++);
263 			ret |= __put_user_inatomic(data.v[1], p++);
264 			ret |= __put_user_inatomic(data.v[2], p++);
265 			ret |= __put_user_inatomic(data.v[3], p++);
266 		case 4:
267 			ret |= __put_user_inatomic(data.v[4], p++);
268 			ret |= __put_user_inatomic(data.v[5], p++);
269 		case 2:
270 			ret |= __put_user_inatomic(data.v[6], p++);
271 			ret |= __put_user_inatomic(data.v[7], p++);
272 		}
273 		if (unlikely(ret))
274 			return -EFAULT;
275 	} else {
276 		*evr = data.w[0];
277 		regs->gpr[reg] = data.w[1];
278 	}
279 
280 	return 1;
281 }
282 #endif /* CONFIG_SPE */
283 
284 /*
285  * Called on alignment exception. Attempts to fixup
286  *
287  * Return 1 on success
288  * Return 0 if unable to handle the interrupt
289  * Return -EFAULT if data address is bad
290  * Other negative return values indicate that the instruction can't
291  * be emulated, and the process should be given a SIGBUS.
292  */
293 
294 int fix_alignment(struct pt_regs *regs)
295 {
296 	unsigned int instr;
297 	struct instruction_op op;
298 	int r, type;
299 
300 	/*
301 	 * We require a complete register set, if not, then our assembly
302 	 * is broken
303 	 */
304 	CHECK_FULL_REGS(regs);
305 
306 	if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
307 		return -EFAULT;
308 	if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
309 		/* We don't handle PPC little-endian any more... */
310 		if (cpu_has_feature(CPU_FTR_PPC_LE))
311 			return -EIO;
312 		instr = swab32(instr);
313 	}
314 
315 #ifdef CONFIG_SPE
316 	if ((instr >> 26) == 0x4) {
317 		int reg = (instr >> 21) & 0x1f;
318 		PPC_WARN_ALIGNMENT(spe, regs);
319 		return emulate_spe(regs, reg, instr);
320 	}
321 #endif
322 
323 
324 	/*
325 	 * ISA 3.0 (such as P9) copy, copy_first, paste and paste_last alignment
326 	 * check.
327 	 *
328 	 * Send a SIGBUS to the process that caused the fault.
329 	 *
330 	 * We do not emulate these because paste may contain additional metadata
331 	 * when pasting to a co-processor. Furthermore, paste_last is the
332 	 * synchronisation point for preceding copy/paste sequences.
333 	 */
334 	if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
335 		return -EIO;
336 
337 	r = analyse_instr(&op, regs, instr);
338 	if (r < 0)
339 		return -EINVAL;
340 
341 	type = GETTYPE(op.type);
342 	if (!OP_IS_LOAD_STORE(type)) {
343 		if (op.type != CACHEOP + DCBZ)
344 			return -EINVAL;
345 		PPC_WARN_ALIGNMENT(dcbz, regs);
346 		r = emulate_dcbz(op.ea, regs);
347 	} else {
348 		if (type == LARX || type == STCX)
349 			return -EIO;
350 		PPC_WARN_ALIGNMENT(unaligned, regs);
351 		r = emulate_loadstore(regs, &op);
352 	}
353 
354 	if (!r)
355 		return 1;
356 	return r;
357 }
358