xref: /openbmc/linux/arch/riscv/lib/uaccess.S (revision f21e49be)
1#include <linux/linkage.h>
2#include <asm-generic/export.h>
3#include <asm/asm.h>
4#include <asm/csr.h>
5
6	.macro fixup op reg addr lbl
7100:
8	\op \reg, \addr
9	.section __ex_table,"a"
10	.balign RISCV_SZPTR
11	RISCV_PTR 100b, \lbl
12	.previous
13	.endm
14
15ENTRY(__asm_copy_to_user)
16ENTRY(__asm_copy_from_user)
17
18	/* Enable access to user memory */
19	li t6, SR_SUM
20	csrs CSR_STATUS, t6
21
22	/* Save for return value */
23	mv	t5, a2
24
25	/*
26	 * Register allocation for code below:
27	 * a0 - start of uncopied dst
28	 * a1 - start of uncopied src
29	 * a2 - size
30	 * t0 - end of uncopied dst
31	 */
32	add	t0, a0, a2
33
34	/*
35	 * Use byte copy only if too small.
36	 * SZREG holds 4 for RV32 and 8 for RV64
37	 */
38	li	a3, 9*SZREG /* size must be larger than size in word_copy */
39	bltu	a2, a3, .Lbyte_copy_tail
40
41	/*
42	 * Copy first bytes until dst is aligned to word boundary.
43	 * a0 - start of dst
44	 * t1 - start of aligned dst
45	 */
46	addi	t1, a0, SZREG-1
47	andi	t1, t1, ~(SZREG-1)
48	/* dst is already aligned, skip */
49	beq	a0, t1, .Lskip_align_dst
501:
51	/* a5 - one byte for copying data */
52	fixup lb      a5, 0(a1), 10f
53	addi	a1, a1, 1	/* src */
54	fixup sb      a5, 0(a0), 10f
55	addi	a0, a0, 1	/* dst */
56	bltu	a0, t1, 1b	/* t1 - start of aligned dst */
57
58.Lskip_align_dst:
59	/*
60	 * Now dst is aligned.
61	 * Use shift-copy if src is misaligned.
62	 * Use word-copy if both src and dst are aligned because
63	 * can not use shift-copy which do not require shifting
64	 */
65	/* a1 - start of src */
66	andi	a3, a1, SZREG-1
67	bnez	a3, .Lshift_copy
68
69.Lword_copy:
70        /*
71	 * Both src and dst are aligned, unrolled word copy
72	 *
73	 * a0 - start of aligned dst
74	 * a1 - start of aligned src
75	 * t0 - end of aligned dst
76	 */
77	addi	t0, t0, -(8*SZREG) /* not to over run */
782:
79	fixup REG_L   a4,        0(a1), 10f
80	fixup REG_L   a5,    SZREG(a1), 10f
81	fixup REG_L   a6,  2*SZREG(a1), 10f
82	fixup REG_L   a7,  3*SZREG(a1), 10f
83	fixup REG_L   t1,  4*SZREG(a1), 10f
84	fixup REG_L   t2,  5*SZREG(a1), 10f
85	fixup REG_L   t3,  6*SZREG(a1), 10f
86	fixup REG_L   t4,  7*SZREG(a1), 10f
87	fixup REG_S   a4,        0(a0), 10f
88	fixup REG_S   a5,    SZREG(a0), 10f
89	fixup REG_S   a6,  2*SZREG(a0), 10f
90	fixup REG_S   a7,  3*SZREG(a0), 10f
91	fixup REG_S   t1,  4*SZREG(a0), 10f
92	fixup REG_S   t2,  5*SZREG(a0), 10f
93	fixup REG_S   t3,  6*SZREG(a0), 10f
94	fixup REG_S   t4,  7*SZREG(a0), 10f
95	addi	a0, a0, 8*SZREG
96	addi	a1, a1, 8*SZREG
97	bltu	a0, t0, 2b
98
99	addi	t0, t0, 8*SZREG /* revert to original value */
100	j	.Lbyte_copy_tail
101
102.Lshift_copy:
103
104	/*
105	 * Word copy with shifting.
106	 * For misaligned copy we still perform aligned word copy, but
107	 * we need to use the value fetched from the previous iteration and
108	 * do some shifts.
109	 * This is safe because reading is less than a word size.
110	 *
111	 * a0 - start of aligned dst
112	 * a1 - start of src
113	 * a3 - a1 & mask:(SZREG-1)
114	 * t0 - end of uncopied dst
115	 * t1 - end of aligned dst
116	 */
117	/* calculating aligned word boundary for dst */
118	andi	t1, t0, ~(SZREG-1)
119	/* Converting unaligned src to aligned src */
120	andi	a1, a1, ~(SZREG-1)
121
122	/*
123	 * Calculate shifts
124	 * t3 - prev shift
125	 * t4 - current shift
126	 */
127	slli	t3, a3, 3 /* converting bytes in a3 to bits */
128	li	a5, SZREG*8
129	sub	t4, a5, t3
130
131	/* Load the first word to combine with second word */
132	fixup REG_L   a5, 0(a1), 10f
133
1343:
135	/* Main shifting copy
136	 *
137	 * a0 - start of aligned dst
138	 * a1 - start of aligned src
139	 * t1 - end of aligned dst
140	 */
141
142	/* At least one iteration will be executed */
143	srl	a4, a5, t3
144	fixup REG_L   a5, SZREG(a1), 10f
145	addi	a1, a1, SZREG
146	sll	a2, a5, t4
147	or	a2, a2, a4
148	fixup REG_S   a2, 0(a0), 10f
149	addi	a0, a0, SZREG
150	bltu	a0, t1, 3b
151
152	/* Revert src to original unaligned value  */
153	add	a1, a1, a3
154
155.Lbyte_copy_tail:
156	/*
157	 * Byte copy anything left.
158	 *
159	 * a0 - start of remaining dst
160	 * a1 - start of remaining src
161	 * t0 - end of remaining dst
162	 */
163	bgeu	a0, t0, .Lout_copy_user  /* check if end of copy */
1644:
165	fixup lb      a5, 0(a1), 10f
166	addi	a1, a1, 1	/* src */
167	fixup sb      a5, 0(a0), 10f
168	addi	a0, a0, 1	/* dst */
169	bltu	a0, t0, 4b	/* t0 - end of dst */
170
171.Lout_copy_user:
172	/* Disable access to user memory */
173	csrc CSR_STATUS, t6
174	li	a0, 0
175	ret
176ENDPROC(__asm_copy_to_user)
177ENDPROC(__asm_copy_from_user)
178EXPORT_SYMBOL(__asm_copy_to_user)
179EXPORT_SYMBOL(__asm_copy_from_user)
180
181
182ENTRY(__clear_user)
183
184	/* Enable access to user memory */
185	li t6, SR_SUM
186	csrs CSR_STATUS, t6
187
188	add a3, a0, a1
189	addi t0, a0, SZREG-1
190	andi t1, a3, ~(SZREG-1)
191	andi t0, t0, ~(SZREG-1)
192	/*
193	 * a3: terminal address of target region
194	 * t0: lowest doubleword-aligned address in target region
195	 * t1: highest doubleword-aligned address in target region
196	 */
197	bgeu t0, t1, 2f
198	bltu a0, t0, 4f
1991:
200	fixup REG_S, zero, (a0), 11f
201	addi a0, a0, SZREG
202	bltu a0, t1, 1b
2032:
204	bltu a0, a3, 5f
205
2063:
207	/* Disable access to user memory */
208	csrc CSR_STATUS, t6
209	li a0, 0
210	ret
2114: /* Edge case: unalignment */
212	fixup sb, zero, (a0), 11f
213	addi a0, a0, 1
214	bltu a0, t0, 4b
215	j 1b
2165: /* Edge case: remainder */
217	fixup sb, zero, (a0), 11f
218	addi a0, a0, 1
219	bltu a0, a3, 5b
220	j 3b
221ENDPROC(__clear_user)
222EXPORT_SYMBOL(__clear_user)
223
224	.section .fixup,"ax"
225	.balign 4
226	/* Fixup code for __copy_user(10) and __clear_user(11) */
22710:
228	/* Disable access to user memory */
229	csrs CSR_STATUS, t6
230	mv a0, t5
231	ret
23211:
233	csrs CSR_STATUS, t6
234	mv a0, a1
235	ret
236	.previous
237