xref: /openbmc/linux/arch/riscv/lib/uaccess.S (revision 2cf1c348)
1#include <linux/linkage.h>
2#include <asm-generic/export.h>
3#include <asm/asm.h>
4#include <asm/asm-extable.h>
5#include <asm/csr.h>
6
7	.macro fixup op reg addr lbl
8100:
9	\op \reg, \addr
10	_asm_extable	100b, \lbl
11	.endm
12
13ENTRY(__asm_copy_to_user)
14ENTRY(__asm_copy_from_user)
15
16	/* Enable access to user memory */
17	li t6, SR_SUM
18	csrs CSR_STATUS, t6
19
20	/* Save for return value */
21	mv	t5, a2
22
23	/*
24	 * Register allocation for code below:
25	 * a0 - start of uncopied dst
26	 * a1 - start of uncopied src
27	 * a2 - size
28	 * t0 - end of uncopied dst
29	 */
30	add	t0, a0, a2
31
32	/*
33	 * Use byte copy only if too small.
34	 * SZREG holds 4 for RV32 and 8 for RV64
35	 */
36	li	a3, 9*SZREG /* size must be larger than size in word_copy */
37	bltu	a2, a3, .Lbyte_copy_tail
38
39	/*
40	 * Copy first bytes until dst is aligned to word boundary.
41	 * a0 - start of dst
42	 * t1 - start of aligned dst
43	 */
44	addi	t1, a0, SZREG-1
45	andi	t1, t1, ~(SZREG-1)
46	/* dst is already aligned, skip */
47	beq	a0, t1, .Lskip_align_dst
481:
49	/* a5 - one byte for copying data */
50	fixup lb      a5, 0(a1), 10f
51	addi	a1, a1, 1	/* src */
52	fixup sb      a5, 0(a0), 10f
53	addi	a0, a0, 1	/* dst */
54	bltu	a0, t1, 1b	/* t1 - start of aligned dst */
55
56.Lskip_align_dst:
57	/*
58	 * Now dst is aligned.
59	 * Use shift-copy if src is misaligned.
60	 * Use word-copy if both src and dst are aligned because
61	 * can not use shift-copy which do not require shifting
62	 */
63	/* a1 - start of src */
64	andi	a3, a1, SZREG-1
65	bnez	a3, .Lshift_copy
66
67.Lword_copy:
68        /*
69	 * Both src and dst are aligned, unrolled word copy
70	 *
71	 * a0 - start of aligned dst
72	 * a1 - start of aligned src
73	 * t0 - end of aligned dst
74	 */
75	addi	t0, t0, -(8*SZREG) /* not to over run */
762:
77	fixup REG_L   a4,        0(a1), 10f
78	fixup REG_L   a5,    SZREG(a1), 10f
79	fixup REG_L   a6,  2*SZREG(a1), 10f
80	fixup REG_L   a7,  3*SZREG(a1), 10f
81	fixup REG_L   t1,  4*SZREG(a1), 10f
82	fixup REG_L   t2,  5*SZREG(a1), 10f
83	fixup REG_L   t3,  6*SZREG(a1), 10f
84	fixup REG_L   t4,  7*SZREG(a1), 10f
85	fixup REG_S   a4,        0(a0), 10f
86	fixup REG_S   a5,    SZREG(a0), 10f
87	fixup REG_S   a6,  2*SZREG(a0), 10f
88	fixup REG_S   a7,  3*SZREG(a0), 10f
89	fixup REG_S   t1,  4*SZREG(a0), 10f
90	fixup REG_S   t2,  5*SZREG(a0), 10f
91	fixup REG_S   t3,  6*SZREG(a0), 10f
92	fixup REG_S   t4,  7*SZREG(a0), 10f
93	addi	a0, a0, 8*SZREG
94	addi	a1, a1, 8*SZREG
95	bltu	a0, t0, 2b
96
97	addi	t0, t0, 8*SZREG /* revert to original value */
98	j	.Lbyte_copy_tail
99
100.Lshift_copy:
101
102	/*
103	 * Word copy with shifting.
104	 * For misaligned copy we still perform aligned word copy, but
105	 * we need to use the value fetched from the previous iteration and
106	 * do some shifts.
107	 * This is safe because reading is less than a word size.
108	 *
109	 * a0 - start of aligned dst
110	 * a1 - start of src
111	 * a3 - a1 & mask:(SZREG-1)
112	 * t0 - end of uncopied dst
113	 * t1 - end of aligned dst
114	 */
115	/* calculating aligned word boundary for dst */
116	andi	t1, t0, ~(SZREG-1)
117	/* Converting unaligned src to aligned src */
118	andi	a1, a1, ~(SZREG-1)
119
120	/*
121	 * Calculate shifts
122	 * t3 - prev shift
123	 * t4 - current shift
124	 */
125	slli	t3, a3, 3 /* converting bytes in a3 to bits */
126	li	a5, SZREG*8
127	sub	t4, a5, t3
128
129	/* Load the first word to combine with second word */
130	fixup REG_L   a5, 0(a1), 10f
131
1323:
133	/* Main shifting copy
134	 *
135	 * a0 - start of aligned dst
136	 * a1 - start of aligned src
137	 * t1 - end of aligned dst
138	 */
139
140	/* At least one iteration will be executed */
141	srl	a4, a5, t3
142	fixup REG_L   a5, SZREG(a1), 10f
143	addi	a1, a1, SZREG
144	sll	a2, a5, t4
145	or	a2, a2, a4
146	fixup REG_S   a2, 0(a0), 10f
147	addi	a0, a0, SZREG
148	bltu	a0, t1, 3b
149
150	/* Revert src to original unaligned value  */
151	add	a1, a1, a3
152
153.Lbyte_copy_tail:
154	/*
155	 * Byte copy anything left.
156	 *
157	 * a0 - start of remaining dst
158	 * a1 - start of remaining src
159	 * t0 - end of remaining dst
160	 */
161	bgeu	a0, t0, .Lout_copy_user  /* check if end of copy */
1624:
163	fixup lb      a5, 0(a1), 10f
164	addi	a1, a1, 1	/* src */
165	fixup sb      a5, 0(a0), 10f
166	addi	a0, a0, 1	/* dst */
167	bltu	a0, t0, 4b	/* t0 - end of dst */
168
169.Lout_copy_user:
170	/* Disable access to user memory */
171	csrc CSR_STATUS, t6
172	li	a0, 0
173	ret
174
175	/* Exception fixup code */
17610:
177	/* Disable access to user memory */
178	csrs CSR_STATUS, t6
179	mv a0, t5
180	ret
181ENDPROC(__asm_copy_to_user)
182ENDPROC(__asm_copy_from_user)
183EXPORT_SYMBOL(__asm_copy_to_user)
184EXPORT_SYMBOL(__asm_copy_from_user)
185
186
187ENTRY(__clear_user)
188
189	/* Enable access to user memory */
190	li t6, SR_SUM
191	csrs CSR_STATUS, t6
192
193	add a3, a0, a1
194	addi t0, a0, SZREG-1
195	andi t1, a3, ~(SZREG-1)
196	andi t0, t0, ~(SZREG-1)
197	/*
198	 * a3: terminal address of target region
199	 * t0: lowest doubleword-aligned address in target region
200	 * t1: highest doubleword-aligned address in target region
201	 */
202	bgeu t0, t1, 2f
203	bltu a0, t0, 4f
2041:
205	fixup REG_S, zero, (a0), 11f
206	addi a0, a0, SZREG
207	bltu a0, t1, 1b
2082:
209	bltu a0, a3, 5f
210
2113:
212	/* Disable access to user memory */
213	csrc CSR_STATUS, t6
214	li a0, 0
215	ret
2164: /* Edge case: unalignment */
217	fixup sb, zero, (a0), 11f
218	addi a0, a0, 1
219	bltu a0, t0, 4b
220	j 1b
2215: /* Edge case: remainder */
222	fixup sb, zero, (a0), 11f
223	addi a0, a0, 1
224	bltu a0, a3, 5b
225	j 3b
226
227	/* Exception fixup code */
22811:
229	/* Disable access to user memory */
230	csrs CSR_STATUS, t6
231	mv a0, a1
232	ret
233ENDPROC(__clear_user)
234EXPORT_SYMBOL(__clear_user)
235