xref: /openbmc/linux/arch/xtensa/kernel/align.S (revision c3ef1f4d)
1/*
2 * arch/xtensa/kernel/align.S
3 *
4 * Handle unalignment exceptions in kernel space.
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License.  See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica, Inc.
11 * Copyright (C) 2014 Cadence Design Systems Inc.
12 *
13 * Rewritten by Chris Zankel <chris@zankel.net>
14 *
15 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
17 */
18
19#include <linux/linkage.h>
20#include <asm/current.h>
21#include <asm/asm-offsets.h>
22#include <asm/processor.h>
23
24#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
25
26/*  First-level exception handler for unaligned exceptions.
27 *
28 *  Note: This handler works only for kernel exceptions.  Unaligned user
29 *        access should get a seg fault.
30 */
31
32/* Big and little endian 16-bit values are located in
33 * different halves of a register.  HWORD_START helps to
34 * abstract the notion of extracting a 16-bit value from a
35 * register.
36 * We also have to define new shifting instructions because
37 * lsb and msb are on 'opposite' ends in a register for
38 * different endian machines.
39 *
40 * Assume a memory region in ascending address:
41 *   	0 1 2 3|4 5 6 7
42 *
43 * When loading one word into a register, the content of that register is:
44 *  LE	3 2 1 0, 7 6 5 4
45 *  BE  0 1 2 3, 4 5 6 7
46 *
47 * Masking the bits of the higher/lower address means:
48 *  LE  X X 0 0, 0 0 X X
49 *  BE	0 0 X X, X X 0 0
50 *
51 * Shifting to higher/lower addresses, means:
52 *  LE  shift left / shift right
53 *  BE  shift right / shift left
54 *
55 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
56 *  LE  mask 0 0 X X / shift left
57 *  BE  shift left / mask 0 0 X X
58 */
59
60#define UNALIGNED_USER_EXCEPTION
61
62#if XCHAL_HAVE_BE
63
64#define HWORD_START	16
65#define	INSN_OP0	28
66#define	INSN_T		24
67#define	INSN_OP1	16
68
69.macro __src_b	r, w0, w1;	src	\r, \w0, \w1;	.endm
70.macro __ssa8	r;		ssa8b	\r;		.endm
71.macro __ssa8r	r;		ssa8l	\r;		.endm
72.macro __sh	r, s;		srl	\r, \s;		.endm
73.macro __sl	r, s;		sll	\r, \s;		.endm
74.macro __exth	r, s;		extui	\r, \s, 0, 16;	.endm
75.macro __extl	r, s;		slli	\r, \s, 16;	.endm
76
77#else
78
79#define HWORD_START	0
80#define	INSN_OP0	0
81#define	INSN_T		4
82#define	INSN_OP1	12
83
84.macro __src_b	r, w0, w1;	src	\r, \w1, \w0;	.endm
85.macro __ssa8	r;		ssa8l	\r;		.endm
86.macro __ssa8r	r;		ssa8b	\r;		.endm
87.macro __sh	r, s;		sll	\r, \s;		.endm
88.macro __sl	r, s;		srl	\r, \s;		.endm
89.macro __exth	r, s;		slli	\r, \s, 16;	.endm
90.macro __extl	r, s;		extui	\r, \s, 0, 16;	.endm
91
92#endif
93
94/*
95 *	xxxx xxxx = imm8 field
96 *	     yyyy = imm4 field
97 *	     ssss = s field
98 *	     tttt = t field
99 *
100 *	       		 16		    0
101 *		          -------------------
102 *	L32I.N		  yyyy ssss tttt 1000
103 *	S32I.N	          yyyy ssss tttt 1001
104 *
105 *	       23			    0
106 *		-----------------------------
107 *	res	          0000           0010
108 *	L16UI	xxxx xxxx 0001 ssss tttt 0010
109 *	L32I	xxxx xxxx 0010 ssss tttt 0010
110 *	XXX	          0011 ssss tttt 0010
111 *	XXX	          0100 ssss tttt 0010
112 *	S16I	xxxx xxxx 0101 ssss tttt 0010
113 *	S32I	xxxx xxxx 0110 ssss tttt 0010
114 *	XXX	          0111 ssss tttt 0010
115 *	XXX	          1000 ssss tttt 0010
116 *	L16SI	xxxx xxxx 1001 ssss tttt 0010
117 *	XXX	          1010           0010
118 *      **L32AI	xxxx xxxx 1011 ssss tttt 0010 unsupported
119 *	XXX	          1100           0010
120 *	XXX	          1101           0010
121 *	XXX	          1110           0010
122 *	**S32RI	xxxx xxxx 1111 ssss tttt 0010 unsupported
123 *		-----------------------------
124 *                           ^         ^    ^
125 *    sub-opcode (NIBBLE_R) -+         |    |
126 *       t field (NIBBLE_T) -----------+    |
127 *  major opcode (NIBBLE_OP0) --------------+
128 */
129
130#define OP0_L32I_N	0x8		/* load immediate narrow */
131#define OP0_S32I_N	0x9		/* store immediate narrow */
132#define OP1_SI_MASK	0x4		/* OP1 bit set for stores */
133#define OP1_SI_BIT	2		/* OP1 bit number for stores */
134
135#define OP1_L32I	0x2
136#define OP1_L16UI	0x1
137#define OP1_L16SI	0x9
138#define OP1_L32AI	0xb
139
140#define OP1_S32I	0x6
141#define OP1_S16I	0x5
142#define OP1_S32RI	0xf
143
144/*
145 * Entry condition:
146 *
147 *   a0:	trashed, original value saved on stack (PT_AREG0)
148 *   a1:	a1
149 *   a2:	new stack pointer, original in DEPC
150 *   a3:	a3
151 *   depc:	a2, original value saved on stack (PT_DEPC)
152 *   excsave_1:	dispatch table
153 *
154 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
155 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
156 */
157
158
159ENTRY(fast_unaligned)
160
161	/* Note: We don't expect the address to be aligned on a word
162	 *       boundary. After all, the processor generated that exception
163	 *       and it would be a hardware fault.
164	 */
165
166	/* Save some working register */
167
168	s32i	a4, a2, PT_AREG4
169	s32i	a5, a2, PT_AREG5
170	s32i	a6, a2, PT_AREG6
171	s32i	a7, a2, PT_AREG7
172	s32i	a8, a2, PT_AREG8
173
174	rsr	a0, depc
175	s32i	a0, a2, PT_AREG2
176	s32i	a3, a2, PT_AREG3
177
178	rsr	a3, excsave1
179	movi	a4, fast_unaligned_fixup
180	s32i	a4, a3, EXC_TABLE_FIXUP
181
182	/* Keep value of SAR in a0 */
183
184	rsr	a0, sar
185	rsr	a8, excvaddr		# load unaligned memory address
186
187	/* Now, identify one of the following load/store instructions.
188	 *
189	 * The only possible danger of a double exception on the
190	 * following l32i instructions is kernel code in vmalloc
191	 * memory. The processor was just executing at the EPC_1
192	 * address, and indeed, already fetched the instruction.  That
193	 * guarantees a TLB mapping, which hasn't been replaced by
194	 * this unaligned exception handler that uses only static TLB
195	 * mappings. However, high-level interrupt handlers might
196	 * modify TLB entries, so for the generic case, we register a
197	 * TABLE_FIXUP handler here, too.
198	 */
199
200	/* a3...a6 saved on stack, a2 = SP */
201
202	/* Extract the instruction that caused the unaligned access. */
203
204	rsr	a7, epc1	# load exception address
205	movi	a3, ~3
206	and	a3, a3, a7	# mask lower bits
207
208	l32i	a4, a3, 0	# load 2 words
209	l32i	a5, a3, 4
210
211	__ssa8	a7
212	__src_b	a4, a4, a5	# a4 has the instruction
213
214	/* Analyze the instruction (load or store?). */
215
216	extui	a5, a4, INSN_OP0, 4	# get insn.op0 nibble
217
218#if XCHAL_HAVE_DENSITY
219	_beqi	a5, OP0_L32I_N, .Lload	# L32I.N, jump
220	addi	a6, a5, -OP0_S32I_N
221	_beqz	a6, .Lstore		# S32I.N, do a store
222#endif
223	/* 'store indicator bit' not set, jump */
224	_bbci.l	a4, OP1_SI_BIT + INSN_OP1, .Lload
225
226	/* Store: Jump to table entry to get the value in the source register.*/
227
228.Lstore:movi	a5, .Lstore_table	# table
229	extui	a6, a4, INSN_T, 4	# get source register
230	addx8	a5, a6, a5
231	jx	a5			# jump into table
232
233	/* Invalid instruction, CRITICAL! */
234.Linvalid_instruction_load:
235	j	.Linvalid_instruction
236
237	/* Load: Load memory address. */
238
239.Lload: movi	a3, ~3
240	and	a3, a3, a8		# align memory address
241
242	__ssa8	a8
243#ifdef UNALIGNED_USER_EXCEPTION
244	addi	a3, a3, 8
245	l32e	a5, a3, -8
246	l32e	a6, a3, -4
247#else
248	l32i	a5, a3, 0
249	l32i	a6, a3, 4
250#endif
251	__src_b	a3, a5, a6		# a3 has the data word
252
253#if XCHAL_HAVE_DENSITY
254	addi	a7, a7, 2		# increment PC (assume 16-bit insn)
255
256	extui	a5, a4, INSN_OP0, 4
257	_beqi	a5, OP0_L32I_N, 1f	# l32i.n: jump
258
259	addi	a7, a7, 1
260#else
261	addi	a7, a7, 3
262#endif
263
264	extui	a5, a4, INSN_OP1, 4
265	_beqi	a5, OP1_L32I, 1f	# l32i: jump
266
267	extui	a3, a3, 0, 16		# extract lower 16 bits
268	_beqi	a5, OP1_L16UI, 1f
269	addi	a5, a5, -OP1_L16SI
270	_bnez	a5, .Linvalid_instruction_load
271
272	/* sign extend value */
273
274	slli	a3, a3, 16
275	srai	a3, a3, 16
276
277	/* Set target register. */
278
2791:
280
281#if XCHAL_HAVE_LOOPS
282	rsr	a5, lend		# check if we reached LEND
283	bne	a7, a5, 1f
284	rsr	a5, lcount		# and LCOUNT != 0
285	beqz	a5, 1f
286	addi	a5, a5, -1		# decrement LCOUNT and set
287	rsr	a7, lbeg		# set PC to LBEGIN
288	wsr	a5, lcount
289#endif
290
2911:	wsr	a7, epc1		# skip load instruction
292	extui	a4, a4, INSN_T, 4	# extract target register
293	movi	a5, .Lload_table
294	addx8	a4, a4, a5
295	jx	a4			# jump to entry for target register
296
297	.align	8
298.Lload_table:
299	s32i	a3, a2, PT_AREG0;	_j .Lexit;	.align 8
300	mov	a1, a3;			_j .Lexit;	.align 8 # fishy??
301	s32i	a3, a2, PT_AREG2;	_j .Lexit;	.align 8
302	s32i	a3, a2, PT_AREG3;	_j .Lexit;	.align 8
303	s32i	a3, a2, PT_AREG4;	_j .Lexit;	.align 8
304	s32i	a3, a2, PT_AREG5;	_j .Lexit;	.align 8
305	s32i	a3, a2, PT_AREG6;	_j .Lexit;	.align 8
306	s32i	a3, a2, PT_AREG7;	_j .Lexit;	.align 8
307	s32i	a3, a2, PT_AREG8;	_j .Lexit;	.align 8
308	mov	a9, a3		;	_j .Lexit;	.align 8
309	mov	a10, a3		;	_j .Lexit;	.align 8
310	mov	a11, a3		;	_j .Lexit;	.align 8
311	mov	a12, a3		;	_j .Lexit;	.align 8
312	mov	a13, a3		;	_j .Lexit;	.align 8
313	mov	a14, a3		;	_j .Lexit;	.align 8
314	mov	a15, a3		;	_j .Lexit;	.align 8
315
316.Lstore_table:
317	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
318	mov	a3, a1;			_j 1f;	.align 8	# fishy??
319	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
320	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
321	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
322	l32i	a3, a2, PT_AREG5;	_j 1f;	.align 8
323	l32i	a3, a2, PT_AREG6;	_j 1f;	.align 8
324	l32i	a3, a2, PT_AREG7;	_j 1f;	.align 8
325	l32i	a3, a2, PT_AREG8;	_j 1f;	.align 8
326	mov	a3, a9		;	_j 1f;	.align 8
327	mov	a3, a10		;	_j 1f;	.align 8
328	mov	a3, a11		;	_j 1f;	.align 8
329	mov	a3, a12		;	_j 1f;	.align 8
330	mov	a3, a13		;	_j 1f;	.align 8
331	mov	a3, a14		;	_j 1f;	.align 8
332	mov	a3, a15		;	_j 1f;	.align 8
333
3341: 	# a7: instruction pointer, a4: instruction, a3: value
335
336	movi	a6, 0			# mask: ffffffff:00000000
337
338#if XCHAL_HAVE_DENSITY
339	addi	a7, a7, 2		# incr. PC,assume 16-bit instruction
340
341	extui	a5, a4, INSN_OP0, 4	# extract OP0
342	addi	a5, a5, -OP0_S32I_N
343	_beqz	a5, 1f			# s32i.n: jump
344
345	addi	a7, a7, 1		# increment PC, 32-bit instruction
346#else
347	addi	a7, a7, 3		# increment PC, 32-bit instruction
348#endif
349
350	extui	a5, a4, INSN_OP1, 4	# extract OP1
351	_beqi	a5, OP1_S32I, 1f	# jump if 32 bit store
352	_bnei	a5, OP1_S16I, .Linvalid_instruction_store
353
354	movi	a5, -1
355	__extl	a3, a3			# get 16-bit value
356	__exth	a6, a5			# get 16-bit mask ffffffff:ffff0000
357
358	/* Get memory address */
359
3601:
361#if XCHAL_HAVE_LOOPS
362	rsr	a4, lend		# check if we reached LEND
363	bne	a7, a4, 1f
364	rsr	a4, lcount		# and LCOUNT != 0
365	beqz	a4, 1f
366	addi	a4, a4, -1		# decrement LCOUNT and set
367	rsr	a7, lbeg		# set PC to LBEGIN
368	wsr	a4, lcount
369#endif
370
3711:	wsr	a7, epc1		# skip store instruction
372	movi	a4, ~3
373	and	a4, a4, a8		# align memory address
374
375	/* Insert value into memory */
376
377	movi	a5, -1			# mask: ffffffff:XXXX0000
378#ifdef UNALIGNED_USER_EXCEPTION
379	addi	a4, a4, 8
380#endif
381
382	__ssa8r a8
383	__src_b	a7, a5, a6		# lo-mask  F..F0..0 (BE) 0..0F..F (LE)
384	__src_b	a6, a6, a5		# hi-mask  0..0F..F (BE) F..F0..0 (LE)
385#ifdef UNALIGNED_USER_EXCEPTION
386	l32e	a5, a4, -8
387#else
388	l32i	a5, a4, 0		# load lower address word
389#endif
390	and	a5, a5, a7		# mask
391	__sh	a7, a3 			# shift value
392	or	a5, a5, a7		# or with original value
393#ifdef UNALIGNED_USER_EXCEPTION
394	s32e	a5, a4, -8
395	l32e	a7, a4, -4
396#else
397	s32i	a5, a4, 0		# store
398	l32i	a7, a4, 4		# same for upper address word
399#endif
400	__sl	a5, a3
401	and	a6, a7, a6
402	or	a6, a6, a5
403#ifdef UNALIGNED_USER_EXCEPTION
404	s32e	a6, a4, -4
405#else
406	s32i	a6, a4, 4
407#endif
408
409	/* Done. restore stack and return */
410
411.Lexit:
412	movi	a4, 0
413	rsr	a3, excsave1
414	s32i	a4, a3, EXC_TABLE_FIXUP
415
416	/* Restore working register */
417
418	l32i	a8, a2, PT_AREG8
419	l32i	a7, a2, PT_AREG7
420	l32i	a6, a2, PT_AREG6
421	l32i	a5, a2, PT_AREG5
422	l32i	a4, a2, PT_AREG4
423	l32i	a3, a2, PT_AREG3
424
425	/* restore SAR and return */
426
427	wsr	a0, sar
428	l32i	a0, a2, PT_AREG0
429	l32i	a2, a2, PT_AREG2
430	rfe
431
432	/* We cannot handle this exception. */
433
434	.extern _kernel_exception
435.Linvalid_instruction_store:
436.Linvalid_instruction:
437
438	movi	a4, 0
439	rsr	a3, excsave1
440	s32i	a4, a3, EXC_TABLE_FIXUP
441
442	/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
443
444	l32i	a8, a2, PT_AREG8
445	l32i	a7, a2, PT_AREG7
446	l32i	a6, a2, PT_AREG6
447	l32i	a5, a2, PT_AREG5
448	l32i	a4, a2, PT_AREG4
449	wsr	a0, sar
450	mov	a1, a2
451
452	rsr	a0, ps
453	bbsi.l  a0, PS_UM_BIT, 1f     # jump if user mode
454
455	movi	a0, _kernel_exception
456	jx	a0
457
4581:	movi	a0, _user_exception
459	jx	a0
460
461ENDPROC(fast_unaligned)
462
463ENTRY(fast_unaligned_fixup)
464
465	l32i	a2, a3, EXC_TABLE_DOUBLE_SAVE
466	wsr	a3, excsave1
467
468	l32i	a8, a2, PT_AREG8
469	l32i	a7, a2, PT_AREG7
470	l32i	a6, a2, PT_AREG6
471	l32i	a5, a2, PT_AREG5
472	l32i	a4, a2, PT_AREG4
473	l32i	a0, a2, PT_AREG2
474	xsr	a0, depc			# restore depc and a0
475	wsr	a0, sar
476
477	rsr	a0, exccause
478	s32i	a0, a2, PT_DEPC			# mark as a regular exception
479
480	rsr	a0, ps
481	bbsi.l  a0, PS_UM_BIT, 1f		# jump if user mode
482
483	rsr	a0, exccause
484	addx4	a0, a0, a3              	# find entry in table
485	l32i	a0, a0, EXC_TABLE_FAST_KERNEL   # load handler
486	l32i	a3, a2, PT_AREG3
487	jx	a0
4881:
489	rsr	a0, exccause
490	addx4	a0, a0, a3              	# find entry in table
491	l32i	a0, a0, EXC_TABLE_FAST_USER     # load handler
492	l32i	a3, a2, PT_AREG3
493	jx	a0
494
495ENDPROC(fast_unaligned_fixup)
496
497#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
498