xref: /openbmc/linux/arch/riscv/lib/uaccess.S (revision aa0dc6a7)
1#include <linux/linkage.h>
2#include <asm-generic/export.h>
3#include <asm/asm.h>
4#include <asm/csr.h>
5
6	.macro fixup op reg addr lbl
7100:
8	\op \reg, \addr
9	.section __ex_table,"a"
10	.balign RISCV_SZPTR
11	RISCV_PTR 100b, \lbl
12	.previous
13	.endm
14
15ENTRY(__asm_copy_to_user)
16ENTRY(__asm_copy_from_user)
17
18	/* Enable access to user memory */
19	li t6, SR_SUM
20	csrs CSR_STATUS, t6
21
22	/* Save for return value */
23	mv	t5, a2
24
25	/*
26	 * Register allocation for code below:
27	 * a0 - start of uncopied dst
28	 * a1 - start of uncopied src
29	 * a2 - size
30	 * t0 - end of uncopied dst
31	 */
32	add	t0, a0, a2
33	bgtu	a0, t0, 5f
34
35	/*
36	 * Use byte copy only if too small.
37	 */
38	li	a3, 8*SZREG /* size must be larger than size in word_copy */
39	bltu	a2, a3, .Lbyte_copy_tail
40
41	/*
42	 * Copy first bytes until dst is align to word boundary.
43	 * a0 - start of dst
44	 * t1 - start of aligned dst
45	 */
46	addi	t1, a0, SZREG-1
47	andi	t1, t1, ~(SZREG-1)
48	/* dst is already aligned, skip */
49	beq	a0, t1, .Lskip_first_bytes
501:
51	/* a5 - one byte for copying data */
52	fixup lb      a5, 0(a1), 10f
53	addi	a1, a1, 1	/* src */
54	fixup sb      a5, 0(a0), 10f
55	addi	a0, a0, 1	/* dst */
56	bltu	a0, t1, 1b	/* t1 - start of aligned dst */
57
58.Lskip_first_bytes:
59	/*
60	 * Now dst is aligned.
61	 * Use shift-copy if src is misaligned.
62	 * Use word-copy if both src and dst are aligned because
63	 * can not use shift-copy which do not require shifting
64	 */
65	/* a1 - start of src */
66	andi	a3, a1, SZREG-1
67	bnez	a3, .Lshift_copy
68
69.Lword_copy:
70        /*
71	 * Both src and dst are aligned, unrolled word copy
72	 *
73	 * a0 - start of aligned dst
74	 * a1 - start of aligned src
75	 * a3 - a1 & mask:(SZREG-1)
76	 * t0 - end of aligned dst
77	 */
78	addi	t0, t0, -(8*SZREG-1) /* not to over run */
792:
80	fixup REG_L   a4,        0(a1), 10f
81	fixup REG_L   a5,    SZREG(a1), 10f
82	fixup REG_L   a6,  2*SZREG(a1), 10f
83	fixup REG_L   a7,  3*SZREG(a1), 10f
84	fixup REG_L   t1,  4*SZREG(a1), 10f
85	fixup REG_L   t2,  5*SZREG(a1), 10f
86	fixup REG_L   t3,  6*SZREG(a1), 10f
87	fixup REG_L   t4,  7*SZREG(a1), 10f
88	fixup REG_S   a4,        0(a0), 10f
89	fixup REG_S   a5,    SZREG(a0), 10f
90	fixup REG_S   a6,  2*SZREG(a0), 10f
91	fixup REG_S   a7,  3*SZREG(a0), 10f
92	fixup REG_S   t1,  4*SZREG(a0), 10f
93	fixup REG_S   t2,  5*SZREG(a0), 10f
94	fixup REG_S   t3,  6*SZREG(a0), 10f
95	fixup REG_S   t4,  7*SZREG(a0), 10f
96	addi	a0, a0, 8*SZREG
97	addi	a1, a1, 8*SZREG
98	bltu	a0, t0, 2b
99
100	addi	t0, t0, 8*SZREG-1 /* revert to original value */
101	j	.Lbyte_copy_tail
102
103.Lshift_copy:
104
105	/*
106	 * Word copy with shifting.
107	 * For misaligned copy we still perform aligned word copy, but
108	 * we need to use the value fetched from the previous iteration and
109	 * do some shifts.
110	 * This is safe because reading less than a word size.
111	 *
112	 * a0 - start of aligned dst
113	 * a1 - start of src
114	 * a3 - a1 & mask:(SZREG-1)
115	 * t0 - end of uncopied dst
116	 * t1 - end of aligned dst
117	 */
118	/* calculating aligned word boundary for dst */
119	andi	t1, t0, ~(SZREG-1)
120	/* Converting unaligned src to aligned arc */
121	andi	a1, a1, ~(SZREG-1)
122
123	/*
124	 * Calculate shifts
125	 * t3 - prev shift
126	 * t4 - current shift
127	 */
128	slli	t3, a3, LGREG
129	li	a5, SZREG*8
130	sub	t4, a5, t3
131
132	/* Load the first word to combine with seceond word */
133	fixup REG_L   a5, 0(a1), 10f
134
1353:
136	/* Main shifting copy
137	 *
138	 * a0 - start of aligned dst
139	 * a1 - start of aligned src
140	 * t1 - end of aligned dst
141	 */
142
143	/* At least one iteration will be executed */
144	srl	a4, a5, t3
145	fixup REG_L   a5, SZREG(a1), 10f
146	addi	a1, a1, SZREG
147	sll	a2, a5, t4
148	or	a2, a2, a4
149	fixup REG_S   a2, 0(a0), 10f
150	addi	a0, a0, SZREG
151	bltu	a0, t1, 3b
152
153	/* Revert src to original unaligned value  */
154	add	a1, a1, a3
155
156.Lbyte_copy_tail:
157	/*
158	 * Byte copy anything left.
159	 *
160	 * a0 - start of remaining dst
161	 * a1 - start of remaining src
162	 * t0 - end of remaining dst
163	 */
164	bgeu	a0, t0, 5f
1654:
166	fixup lb      a5, 0(a1), 10f
167	addi	a1, a1, 1	/* src */
168	fixup sb      a5, 0(a0), 10f
169	addi	a0, a0, 1	/* dst */
170	bltu	a0, t0, 4b	/* t0 - end of dst */
171
1725:
173	/* Disable access to user memory */
174	csrc CSR_STATUS, t6
175	li	a0, 0
176	ret
177ENDPROC(__asm_copy_to_user)
178ENDPROC(__asm_copy_from_user)
179EXPORT_SYMBOL(__asm_copy_to_user)
180EXPORT_SYMBOL(__asm_copy_from_user)
181
182
183ENTRY(__clear_user)
184
185	/* Enable access to user memory */
186	li t6, SR_SUM
187	csrs CSR_STATUS, t6
188
189	add a3, a0, a1
190	addi t0, a0, SZREG-1
191	andi t1, a3, ~(SZREG-1)
192	andi t0, t0, ~(SZREG-1)
193	/*
194	 * a3: terminal address of target region
195	 * t0: lowest doubleword-aligned address in target region
196	 * t1: highest doubleword-aligned address in target region
197	 */
198	bgeu t0, t1, 2f
199	bltu a0, t0, 4f
2001:
201	fixup REG_S, zero, (a0), 11f
202	addi a0, a0, SZREG
203	bltu a0, t1, 1b
2042:
205	bltu a0, a3, 5f
206
2073:
208	/* Disable access to user memory */
209	csrc CSR_STATUS, t6
210	li a0, 0
211	ret
2124: /* Edge case: unalignment */
213	fixup sb, zero, (a0), 11f
214	addi a0, a0, 1
215	bltu a0, t0, 4b
216	j 1b
2175: /* Edge case: remainder */
218	fixup sb, zero, (a0), 11f
219	addi a0, a0, 1
220	bltu a0, a3, 5b
221	j 3b
222ENDPROC(__clear_user)
223EXPORT_SYMBOL(__clear_user)
224
225	.section .fixup,"ax"
226	.balign 4
227	/* Fixup code for __copy_user(10) and __clear_user(11) */
22810:
229	/* Disable access to user memory */
230	csrs CSR_STATUS, t6
231	mv a0, t5
232	ret
23311:
234	csrs CSR_STATUS, t6
235	mv a0, a1
236	ret
237	.previous
238