xref: /openbmc/linux/arch/xtensa/kernel/align.S (revision 5a0015d6)
1/*
2 * arch/xtensa/kernel/align.S
3 *
4 * Handle unalignment exceptions in kernel space.
5 *
6 * This file is subject to the terms and conditions of the GNU General
7 * Public License.  See the file "COPYING" in the main directory of
8 * this archive for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica, Inc.
11 *
12 * Rewritten by Chris Zankel <chris@zankel.net>
13 *
14 * Based on work from Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
15 * and Marc Gauthier <marc@tensilica.com, marc@alimni.uwaterloo.ca>
16 */
17
18#include <linux/linkage.h>
19#include <asm/ptrace.h>
20#include <asm/ptrace.h>
21#include <asm/current.h>
22#include <asm/offsets.h>
23#include <asm/pgtable.h>
24#include <asm/processor.h>
25#include <asm/page.h>
26#include <asm/thread_info.h>
27
28#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
29
30/*  First-level exception handler for unaligned exceptions.
31 *
32 *  Note: This handler works only for kernel exceptions.  Unaligned user
33 *        access should get a seg fault.
34 */
35
36/* Big and little endian 16-bit values are located in
37 * different halves of a register.  HWORD_START helps to
38 * abstract the notion of extracting a 16-bit value from a
39 * register.
40 * We also have to define new shifting instructions because
41 * lsb and msb are on 'opposite' ends in a register for
42 * different endian machines.
43 *
44 * Assume a memory region in ascending address:
45 *   	0 1 2 3|4 5 6 7
46 *
47 * When loading one word into a register, the content of that register is:
48 *  LE	3 2 1 0, 7 6 5 4
49 *  BE  0 1 2 3, 4 5 6 7
50 *
51 * Masking the bits of the higher/lower address means:
52 *  LE  X X 0 0, 0 0 X X
53 *  BE	0 0 X X, X X 0 0
54 *
55 * Shifting to higher/lower addresses, means:
56 *  LE  shift left / shift right
57 *  BE  shift right / shift left
58 *
59 * Extracting 16 bits from a 32 bit reg. value to higher/lower address means:
60 *  LE  mask 0 0 X X / shift left
61 *  BE  shift left / mask 0 0 X X
62 */
63
64#define UNALIGNED_USER_EXCEPTION
65
66#if XCHAL_HAVE_BE
67
68#define HWORD_START	16
69#define	INSN_OP0	28
70#define	INSN_T		24
71#define	INSN_OP1	16
72
73.macro __src_b	r, w0, w1;	src	\r, \w0, \w1;	.endm
74.macro __ssa8	r;		ssa8b	\r;		.endm
75.macro __ssa8r	r;		ssa8l	\r;		.endm
76.macro __sh	r, s;		srl	\r, \s;		.endm
77.macro __sl	r, s;		sll	\r, \s;		.endm
78.macro __exth	r, s;		extui	\r, \s, 0, 16;	.endm
79.macro __extl	r, s;		slli	\r, \s, 16;	.endm
80
81#else
82
83#define HWORD_START	0
84#define	INSN_OP0	0
85#define	INSN_T		4
86#define	INSN_OP1	12
87
88.macro __src_b	r, w0, w1;	src	\r, \w1, \w0;	.endm
89.macro __ssa8	r;		ssa8l	\r;		.endm
90.macro __ssa8r	r;		ssa8b	\r;		.endm
91.macro __sh	r, s;		sll	\r, \s;		.endm
92.macro __sl	r, s;		srl	\r, \s;		.endm
93.macro __exth	r, s;		slli	\r, \s, 16;	.endm
94.macro __extl	r, s;		extui	\r, \s, 0, 16;	.endm
95
96#endif
97
98/*
99 *	xxxx xxxx = imm8 field
100 *	     yyyy = imm4 field
101 *	     ssss = s field
102 *	     tttt = t field
103 *
104 *	       		 16		    0
105 *		          -------------------
106 *	L32I.N		  yyyy ssss tttt 1000
107 *	S32I.N	          yyyy ssss tttt 1001
108 *
109 *	       23			    0
110 *		-----------------------------
111 *	res	          0000           0010
112 *	L16UI	xxxx xxxx 0001 ssss tttt 0010
113 *	L32I	xxxx xxxx 0010 ssss tttt 0010
114 *	XXX	          0011 ssss tttt 0010
115 *	XXX	          0100 ssss tttt 0010
116 *	S16I	xxxx xxxx 0101 ssss tttt 0010
117 *	S32I	xxxx xxxx 0110 ssss tttt 0010
118 *	XXX	          0111 ssss tttt 0010
119 *	XXX	          1000 ssss tttt 0010
120 *	L16SI	xxxx xxxx 1001 ssss tttt 0010
121 *	XXX	          1010           0010
122 *      **L32AI	xxxx xxxx 1011 ssss tttt 0010 unsupported
123 *	XXX	          1100           0010
124 *	XXX	          1101           0010
125 *	XXX	          1110           0010
126 *	**S32RI	xxxx xxxx 1111 ssss tttt 0010 unsupported
127 *		-----------------------------
128 *                           ^         ^    ^
129 *    sub-opcode (NIBBLE_R) -+         |    |
130 *       t field (NIBBLE_T) -----------+    |
131 *  major opcode (NIBBLE_OP0) --------------+
132 */
133
134#define OP0_L32I_N	0x8		/* load immediate narrow */
135#define OP0_S32I_N	0x9		/* store immediate narrow */
136#define OP1_SI_MASK	0x4		/* OP1 bit set for stores */
137#define OP1_SI_BIT	2		/* OP1 bit number for stores */
138
139#define OP1_L32I	0x2
140#define OP1_L16UI	0x1
141#define OP1_L16SI	0x9
142#define OP1_L32AI	0xb
143
144#define OP1_S32I	0x6
145#define OP1_S16I	0x5
146#define OP1_S32RI	0xf
147
148/*
149 * Entry condition:
150 *
151 *   a0:	trashed, original value saved on stack (PT_AREG0)
152 *   a1:	a1
153 *   a2:	new stack pointer, original in DEPC
154 *   a3:	dispatch table
155 *   depc:	a2, original value saved on stack (PT_DEPC)
156 *   excsave_1:	a3
157 *
158 *   PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
159 *	     <  VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
160 */
161
162
163ENTRY(fast_unaligned)
164
165	/* Note: We don't expect the address to be aligned on a word
166	 *       boundary. After all, the processor generated that exception
167	 *       and it would be a hardware fault.
168	 */
169
170	/* Save some working register */
171
172	s32i	a4, a2, PT_AREG4
173	s32i	a5, a2, PT_AREG5
174	s32i	a6, a2, PT_AREG6
175	s32i	a7, a2, PT_AREG7
176	s32i	a8, a2, PT_AREG8
177
178	rsr	a0, DEPC
179	xsr	a3, EXCSAVE_1
180	s32i	a0, a2, PT_AREG2
181	s32i	a3, a2, PT_AREG3
182
183	/* Keep value of SAR in a0 */
184
185	rsr	a0, SAR
186	rsr	a8, EXCVADDR		# load unaligned memory address
187
188	/* Now, identify one of the following load/store instructions.
189	 *
190	 * The only possible danger of a double exception on the
191	 * following l32i instructions is kernel code in vmalloc
192	 * memory. The processor was just executing at the EPC_1
193	 * address, and indeed, already fetched the instruction.  That
194	 * guarantees a TLB mapping, which hasn't been replaced by
195	 * this unaligned exception handler that uses only static TLB
196	 * mappings. However, high-level interrupt handlers might
197	 * modify TLB entries, so for the generic case, we register a
198	 * TABLE_FIXUP handler here, too.
199	 */
200
201	/* a3...a6 saved on stack, a2 = SP */
202
203	/* Extract the instruction that caused the unaligned access. */
204
205	rsr	a7, EPC_1	# load exception address
206	movi	a3, ~3
207	and	a3, a3, a7	# mask lower bits
208
209	l32i	a4, a3, 0	# load 2 words
210	l32i	a5, a3, 4
211
212	__ssa8	a7
213	__src_b	a4, a4, a5	# a4 has the instruction
214
215	/* Analyze the instruction (load or store?). */
216
217	extui	a5, a4, INSN_OP0, 4	# get insn.op0 nibble
218
219#if XCHAL_HAVE_NARROW
220	_beqi	a5, OP0_L32I_N, .Lload	# L32I.N, jump
221	addi	a6, a5, -OP0_S32I_N
222	_beqz	a6, .Lstore		# S32I.N, do a store
223#endif
224	/* 'store indicator bit' not set, jump */
225	_bbci.l	a4, OP1_SI_BIT + INSN_OP1, .Lload
226
227	/* Store: Jump to table entry to get the value in the source register.*/
228
229.Lstore:movi	a5, .Lstore_table	# table
230	extui	a6, a4, INSN_T, 4	# get source register
231	addx8	a5, a6, a5
232	jx	a5			# jump into table
233
234	/* Invalid instruction, CRITICAL! */
235.Linvalid_instruction_load:
236	j	.Linvalid_instruction
237
238	/* Load: Load memory address. */
239
240.Lload: movi	a3, ~3
241	and	a3, a3, a8		# align memory address
242
243	__ssa8	a8
244#ifdef UNALIGNED_USER_EXCEPTION
245	addi	a3, a3, 8
246	l32e	a5, a3, -8
247	l32e	a6, a3, -4
248#else
249	l32i	a5, a3, 0
250	l32i	a6, a3, 4
251#endif
252	__src_b	a3, a5, a6		# a3 has the data word
253
254#if XCHAL_HAVE_NARROW
255	addi	a7, a7, 2		# increment PC (assume 16-bit insn)
256
257	extui	a5, a4, INSN_OP0, 4
258	_beqi	a5, OP0_L32I_N, 1f	# l32i.n: jump
259
260	addi	a7, a7, 1
261#else
262	addi	a7, a7, 3
263#endif
264
265	extui	a5, a4, INSN_OP1, 4
266	_beqi	a5, OP1_L32I, 1f	# l32i: jump
267
268	extui	a3, a3, 0, 16		# extract lower 16 bits
269	_beqi	a5, OP1_L16UI, 1f
270	addi	a5, a5, -OP1_L16SI
271	_bnez	a5, .Linvalid_instruction_load
272
273	/* sign extend value */
274
275	slli	a3, a3, 16
276	srai	a3, a3, 16
277
278	/* Set target register. */
279
2801:
281
282#if XCHAL_HAVE_LOOP
283	rsr	a3, LEND		# check if we reached LEND
284	bne	a7, a3, 1f
285	rsr	a3, LCOUNT		# and LCOUNT != 0
286	beqz	a3, 1f
287	addi	a3, a3, -1		# decrement LCOUNT and set
288	rsr	a7, LBEG		# set PC to LBEGIN
289	wsr	a3, LCOUNT
290#endif
291
2921:	wsr	a7, EPC_1		# skip load instruction
293	extui	a4, a4, INSN_T, 4	# extract target register
294	movi	a5, .Lload_table
295	addx8	a4, a4, a5
296	jx	a4			# jump to entry for target register
297
298	.align	8
299.Lload_table:
300	s32i	a3, a2, PT_AREG0;	_j .Lexit;	.align 8
301	mov	a1, a3;			_j .Lexit;	.align 8 # fishy??
302	s32i	a3, a2, PT_AREG2;	_j .Lexit;	.align 8
303	s32i	a3, a2, PT_AREG3;	_j .Lexit;	.align 8
304	s32i	a3, a2, PT_AREG4;	_j .Lexit;	.align 8
305	s32i	a3, a2, PT_AREG5;	_j .Lexit;	.align 8
306	s32i	a3, a2, PT_AREG6;	_j .Lexit;	.align 8
307	s32i	a3, a2, PT_AREG7;	_j .Lexit;	.align 8
308	s32i	a3, a2, PT_AREG8;	_j .Lexit;	.align 8
309	mov	a9, a3		;	_j .Lexit;	.align 8
310	mov	a10, a3		;	_j .Lexit;	.align 8
311	mov	a11, a3		;	_j .Lexit;	.align 8
312	mov	a12, a3		;	_j .Lexit;	.align 8
313	mov	a13, a3		;	_j .Lexit;	.align 8
314	mov	a14, a3		;	_j .Lexit;	.align 8
315	mov	a15, a3		;	_j .Lexit;	.align 8
316
317.Lstore_table:
318	l32i	a3, a2, PT_AREG0;	_j 1f;	.align 8
319	mov	a3, a1;			_j 1f;	.align 8	# fishy??
320	l32i	a3, a2, PT_AREG2;	_j 1f;	.align 8
321	l32i	a3, a2, PT_AREG3;	_j 1f;	.align 8
322	l32i	a3, a2, PT_AREG4;	_j 1f;	.align 8
323	l32i	a3, a2, PT_AREG5;	_j 1f;	.align 8
324	l32i	a3, a2, PT_AREG6;	_j 1f;	.align 8
325	l32i	a3, a2, PT_AREG7;	_j 1f;	.align 8
326	l32i	a3, a2, PT_AREG8;	_j 1f;	.align 8
327	mov	a3, a9		;	_j 1f;	.align 8
328	mov	a3, a10		;	_j 1f;	.align 8
329	mov	a3, a11		;	_j 1f;	.align 8
330	mov	a3, a12		;	_j 1f;	.align 8
331	mov	a3, a13		;	_j 1f;	.align 8
332	mov	a3, a14		;	_j 1f;	.align 8
333	mov	a3, a15		;	_j 1f;	.align 8
334
3351: 	# a7: instruction pointer, a4: instruction, a3: value
336
337	movi	a6, 0			# mask: ffffffff:00000000
338
339#if XCHAL_HAVE_NARROW
340	addi	a7, a7, 2		# incr. PC,assume 16-bit instruction
341
342	extui	a5, a4, INSN_OP0, 4	# extract OP0
343	addi	a5, a5, -OP0_S32I_N
344	_beqz	a5, 1f			# s32i.n: jump
345
346	addi	a7, a7, 1		# increment PC, 32-bit instruction
347#else
348	addi	a7, a7, 3		# increment PC, 32-bit instruction
349#endif
350
351	extui	a5, a4, INSN_OP1, 4	# extract OP1
352	_beqi	a5, OP1_S32I, 1f	# jump if 32 bit store
353	_bnei	a5, OP1_S16I, .Linvalid_instruction_store
354
355	movi	a5, -1
356	__extl	a3, a3			# get 16-bit value
357	__exth	a6, a5			# get 16-bit mask ffffffff:ffff0000
358
359	/* Get memory address */
360
3611:
362#if XCHAL_HAVE_LOOP
363	rsr	a3, LEND		# check if we reached LEND
364	bne	a7, a3, 1f
365	rsr	a3, LCOUNT		# and LCOUNT != 0
366	beqz	a3, 1f
367	addi	a3, a3, -1		# decrement LCOUNT and set
368	rsr	a7, LBEG		# set PC to LBEGIN
369	wsr	a3, LCOUNT
370#endif
371
3721:	wsr	a7, EPC_1		# skip store instruction
373	movi	a4, ~3
374	and	a4, a4, a8		# align memory address
375
376	/* Insert value into memory */
377
378	movi	a5, -1			# mask: ffffffff:XXXX0000
379#ifdef UNALIGNED_USER_EXCEPTION
380	addi	a4, a4, 8
381#endif
382
383	__ssa8r a8
384	__src_b	a7, a5, a6		# lo-mask  F..F0..0 (BE) 0..0F..F (LE)
385	__src_b	a6, a6, a5		# hi-mask  0..0F..F (BE) F..F0..0 (LE)
386#ifdef UNALIGNED_USER_EXCEPTION
387	l32e	a5, a4, -8
388#else
389	l32i	a5, a4, 0		# load lower address word
390#endif
391	and	a5, a5, a7		# mask
392	__sh	a7, a3 			# shift value
393	or	a5, a5, a7		# or with original value
394#ifdef UNALIGNED_USER_EXCEPTION
395	s32e	a5, a4, -8
396	l32e	a7, a4, -4
397#else
398	s32i	a5, a4, 0		# store
399	l32i	a7, a4, 4		# same for upper address word
400#endif
401	__sl	a5, a3
402	and	a6, a7, a6
403	or	a6, a6, a5
404#ifdef UNALIGNED_USER_EXCEPTION
405	s32e	a6, a4, -4
406#else
407	s32i	a6, a4, 4
408#endif
409
410	/* Done. restore stack and return */
411
412.Lexit:
413	movi	a4, 0
414	rsr	a3, EXCSAVE_1
415	s32i	a4, a3, EXC_TABLE_FIXUP
416
417	/* Restore working register */
418
419	l32i	a7, a2, PT_AREG7
420	l32i	a6, a2, PT_AREG6
421	l32i	a5, a2, PT_AREG5
422	l32i	a4, a2, PT_AREG4
423	l32i	a3, a2, PT_AREG3
424
425	/* restore SAR and return */
426
427	wsr	a0, SAR
428	l32i	a0, a2, PT_AREG0
429	l32i	a2, a2, PT_AREG2
430	rfe
431
432	/* We cannot handle this exception. */
433
434	.extern _kernel_exception
435.Linvalid_instruction_store:
436.Linvalid_instruction:
437
438	/* Restore a4...a8 and SAR, set SP, and jump to default exception. */
439
440	l32i	a8, a2, PT_AREG8
441	l32i	a7, a2, PT_AREG7
442	l32i	a6, a2, PT_AREG6
443	l32i	a5, a2, PT_AREG5
444	l32i	a4, a2, PT_AREG4
445	wsr	a0, SAR
446	mov	a1, a2
447
448	rsr	a0, PS
449        bbsi.l  a2, PS_UM_SHIFT, 1f     # jump if user mode
450
451	movi	a0, _kernel_exception
452	jx	a0
453
4541:	movi	a0, _user_exception
455	jx	a0
456
457
458#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
459
460