xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision 827634ad)
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11#include <asm/current.h>
12#include <asm/asm-offsets.h>
13#include <asm/thread_info.h>
14#include <asm/cpufeature.h>
15#include <asm/alternative-asm.h>
16#include <asm/asm.h>
17#include <asm/smap.h>
18
19	.macro ALIGN_DESTINATION
20	/* check for bad alignment of destination */
21	movl %edi,%ecx
22	andl $7,%ecx
23	jz 102f				/* already aligned */
24	subl $8,%ecx
25	negl %ecx
26	subl %ecx,%edx
27100:	movb (%rsi),%al
28101:	movb %al,(%rdi)
29	incq %rsi
30	incq %rdi
31	decl %ecx
32	jnz 100b
33102:
34	.section .fixup,"ax"
35103:	addl %ecx,%edx			/* ecx is zerorest also */
36	jmp copy_user_handle_tail
37	.previous
38
39	_ASM_EXTABLE(100b,103b)
40	_ASM_EXTABLE(101b,103b)
41	.endm
42
43/* Standard copy_to_user with segment limit checking */
44ENTRY(_copy_to_user)
45	CFI_STARTPROC
46	GET_THREAD_INFO(%rax)
47	movq %rdi,%rcx
48	addq %rdx,%rcx
49	jc bad_to_user
50	cmpq TI_addr_limit(%rax),%rcx
51	ja bad_to_user
52	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
53		      "jmp copy_user_generic_string",		\
54		      X86_FEATURE_REP_GOOD,			\
55		      "jmp copy_user_enhanced_fast_string",	\
56		      X86_FEATURE_ERMS
57	CFI_ENDPROC
58ENDPROC(_copy_to_user)
59
60/* Standard copy_from_user with segment limit checking */
61ENTRY(_copy_from_user)
62	CFI_STARTPROC
63	GET_THREAD_INFO(%rax)
64	movq %rsi,%rcx
65	addq %rdx,%rcx
66	jc bad_from_user
67	cmpq TI_addr_limit(%rax),%rcx
68	ja bad_from_user
69	ALTERNATIVE_2 "jmp copy_user_generic_unrolled",		\
70		      "jmp copy_user_generic_string",		\
71		      X86_FEATURE_REP_GOOD,			\
72		      "jmp copy_user_enhanced_fast_string",	\
73		      X86_FEATURE_ERMS
74	CFI_ENDPROC
75ENDPROC(_copy_from_user)
76
77	.section .fixup,"ax"
78	/* must zero dest */
79ENTRY(bad_from_user)
80bad_from_user:
81	CFI_STARTPROC
82	movl %edx,%ecx
83	xorl %eax,%eax
84	rep
85	stosb
86bad_to_user:
87	movl %edx,%eax
88	ret
89	CFI_ENDPROC
90ENDPROC(bad_from_user)
91	.previous
92
93/*
94 * copy_user_generic_unrolled - memory copy with exception handling.
95 * This version is for CPUs like P4 that don't have efficient micro
96 * code for rep movsq
97 *
98 * Input:
99 * rdi destination
100 * rsi source
101 * rdx count
102 *
103 * Output:
104 * eax uncopied bytes or 0 if successful.
105 */
106ENTRY(copy_user_generic_unrolled)
107	CFI_STARTPROC
108	ASM_STAC
109	cmpl $8,%edx
110	jb 20f		/* less then 8 bytes, go to byte copy loop */
111	ALIGN_DESTINATION
112	movl %edx,%ecx
113	andl $63,%edx
114	shrl $6,%ecx
115	jz 17f
1161:	movq (%rsi),%r8
1172:	movq 1*8(%rsi),%r9
1183:	movq 2*8(%rsi),%r10
1194:	movq 3*8(%rsi),%r11
1205:	movq %r8,(%rdi)
1216:	movq %r9,1*8(%rdi)
1227:	movq %r10,2*8(%rdi)
1238:	movq %r11,3*8(%rdi)
1249:	movq 4*8(%rsi),%r8
12510:	movq 5*8(%rsi),%r9
12611:	movq 6*8(%rsi),%r10
12712:	movq 7*8(%rsi),%r11
12813:	movq %r8,4*8(%rdi)
12914:	movq %r9,5*8(%rdi)
13015:	movq %r10,6*8(%rdi)
13116:	movq %r11,7*8(%rdi)
132	leaq 64(%rsi),%rsi
133	leaq 64(%rdi),%rdi
134	decl %ecx
135	jnz 1b
13617:	movl %edx,%ecx
137	andl $7,%edx
138	shrl $3,%ecx
139	jz 20f
14018:	movq (%rsi),%r8
14119:	movq %r8,(%rdi)
142	leaq 8(%rsi),%rsi
143	leaq 8(%rdi),%rdi
144	decl %ecx
145	jnz 18b
14620:	andl %edx,%edx
147	jz 23f
148	movl %edx,%ecx
14921:	movb (%rsi),%al
15022:	movb %al,(%rdi)
151	incq %rsi
152	incq %rdi
153	decl %ecx
154	jnz 21b
15523:	xor %eax,%eax
156	ASM_CLAC
157	ret
158
159	.section .fixup,"ax"
16030:	shll $6,%ecx
161	addl %ecx,%edx
162	jmp 60f
16340:	leal (%rdx,%rcx,8),%edx
164	jmp 60f
16550:	movl %ecx,%edx
16660:	jmp copy_user_handle_tail /* ecx is zerorest also */
167	.previous
168
169	_ASM_EXTABLE(1b,30b)
170	_ASM_EXTABLE(2b,30b)
171	_ASM_EXTABLE(3b,30b)
172	_ASM_EXTABLE(4b,30b)
173	_ASM_EXTABLE(5b,30b)
174	_ASM_EXTABLE(6b,30b)
175	_ASM_EXTABLE(7b,30b)
176	_ASM_EXTABLE(8b,30b)
177	_ASM_EXTABLE(9b,30b)
178	_ASM_EXTABLE(10b,30b)
179	_ASM_EXTABLE(11b,30b)
180	_ASM_EXTABLE(12b,30b)
181	_ASM_EXTABLE(13b,30b)
182	_ASM_EXTABLE(14b,30b)
183	_ASM_EXTABLE(15b,30b)
184	_ASM_EXTABLE(16b,30b)
185	_ASM_EXTABLE(18b,40b)
186	_ASM_EXTABLE(19b,40b)
187	_ASM_EXTABLE(21b,50b)
188	_ASM_EXTABLE(22b,50b)
189	CFI_ENDPROC
190ENDPROC(copy_user_generic_unrolled)
191
192/* Some CPUs run faster using the string copy instructions.
193 * This is also a lot simpler. Use them when possible.
194 *
195 * Only 4GB of copy is supported. This shouldn't be a problem
196 * because the kernel normally only writes from/to page sized chunks
197 * even if user space passed a longer buffer.
198 * And more would be dangerous because both Intel and AMD have
199 * errata with rep movsq > 4GB. If someone feels the need to fix
200 * this please consider this.
201 *
202 * Input:
203 * rdi destination
204 * rsi source
205 * rdx count
206 *
207 * Output:
208 * eax uncopied bytes or 0 if successful.
209 */
210ENTRY(copy_user_generic_string)
211	CFI_STARTPROC
212	ASM_STAC
213	cmpl $8,%edx
214	jb 2f		/* less than 8 bytes, go to byte copy loop */
215	ALIGN_DESTINATION
216	movl %edx,%ecx
217	shrl $3,%ecx
218	andl $7,%edx
2191:	rep
220	movsq
2212:	movl %edx,%ecx
2223:	rep
223	movsb
224	xorl %eax,%eax
225	ASM_CLAC
226	ret
227
228	.section .fixup,"ax"
22911:	leal (%rdx,%rcx,8),%ecx
23012:	movl %ecx,%edx		/* ecx is zerorest also */
231	jmp copy_user_handle_tail
232	.previous
233
234	_ASM_EXTABLE(1b,11b)
235	_ASM_EXTABLE(3b,12b)
236	CFI_ENDPROC
237ENDPROC(copy_user_generic_string)
238
239/*
240 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
241 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
242 *
243 * Input:
244 * rdi destination
245 * rsi source
246 * rdx count
247 *
248 * Output:
249 * eax uncopied bytes or 0 if successful.
250 */
251ENTRY(copy_user_enhanced_fast_string)
252	CFI_STARTPROC
253	ASM_STAC
254	movl %edx,%ecx
2551:	rep
256	movsb
257	xorl %eax,%eax
258	ASM_CLAC
259	ret
260
261	.section .fixup,"ax"
26212:	movl %ecx,%edx		/* ecx is zerorest also */
263	jmp copy_user_handle_tail
264	.previous
265
266	_ASM_EXTABLE(1b,12b)
267	CFI_ENDPROC
268ENDPROC(copy_user_enhanced_fast_string)
269