xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision 9732da8c)
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11
12#define FIX_ALIGNMENT 1
13
14#include <asm/current.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
17#include <asm/cpufeature.h>
18#include <asm/alternative-asm.h>
19#include <asm/asm.h>
20
21/*
22 * By placing feature2 after feature1 in altinstructions section, we logically
23 * implement:
24 * If CPU has feature2, jmp to alt2 is used
25 * else if CPU has feature1, jmp to alt1 is used
26 * else jmp to orig is used.
27 */
28	.macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
290:
30	.byte 0xe9	/* 32bit jump */
31	.long \orig-1f	/* by default jump to orig */
321:
33	.section .altinstr_replacement,"ax"
342:	.byte 0xe9			/* near jump with 32bit immediate */
35	.long \alt1-1b /* offset */   /* or alternatively to alt1 */
363:	.byte 0xe9			/* near jump with 32bit immediate */
37	.long \alt2-1b /* offset */   /* or alternatively to alt2 */
38	.previous
39
40	.section .altinstructions,"a"
41	altinstruction_entry 0b,2b,\feature1,5,5
42	altinstruction_entry 0b,3b,\feature2,5,5
43	.previous
44	.endm
45
46	.macro ALIGN_DESTINATION
47#ifdef FIX_ALIGNMENT
48	/* check for bad alignment of destination */
49	movl %edi,%ecx
50	andl $7,%ecx
51	jz 102f				/* already aligned */
52	subl $8,%ecx
53	negl %ecx
54	subl %ecx,%edx
55100:	movb (%rsi),%al
56101:	movb %al,(%rdi)
57	incq %rsi
58	incq %rdi
59	decl %ecx
60	jnz 100b
61102:
62	.section .fixup,"ax"
63103:	addl %ecx,%edx			/* ecx is zerorest also */
64	jmp copy_user_handle_tail
65	.previous
66
67	_ASM_EXTABLE(100b,103b)
68	_ASM_EXTABLE(101b,103b)
69#endif
70	.endm
71
72/* Standard copy_to_user with segment limit checking */
73ENTRY(_copy_to_user)
74	CFI_STARTPROC
75	GET_THREAD_INFO(%rax)
76	movq %rdi,%rcx
77	addq %rdx,%rcx
78	jc bad_to_user
79	cmpq TI_addr_limit(%rax),%rcx
80	ja bad_to_user
81	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
82		copy_user_generic_unrolled,copy_user_generic_string,	\
83		copy_user_enhanced_fast_string
84	CFI_ENDPROC
85ENDPROC(_copy_to_user)
86
87/* Standard copy_from_user with segment limit checking */
88ENTRY(_copy_from_user)
89	CFI_STARTPROC
90	GET_THREAD_INFO(%rax)
91	movq %rsi,%rcx
92	addq %rdx,%rcx
93	jc bad_from_user
94	cmpq TI_addr_limit(%rax),%rcx
95	ja bad_from_user
96	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
97		copy_user_generic_unrolled,copy_user_generic_string,	\
98		copy_user_enhanced_fast_string
99	CFI_ENDPROC
100ENDPROC(_copy_from_user)
101
102	.section .fixup,"ax"
103	/* must zero dest */
104ENTRY(bad_from_user)
105bad_from_user:
106	CFI_STARTPROC
107	movl %edx,%ecx
108	xorl %eax,%eax
109	rep
110	stosb
111bad_to_user:
112	movl %edx,%eax
113	ret
114	CFI_ENDPROC
115ENDPROC(bad_from_user)
116	.previous
117
118/*
119 * copy_user_generic_unrolled - memory copy with exception handling.
120 * This version is for CPUs like P4 that don't have efficient micro
121 * code for rep movsq
122 *
123 * Input:
124 * rdi destination
125 * rsi source
126 * rdx count
127 *
128 * Output:
129 * eax uncopied bytes or 0 if successful.
130 */
131ENTRY(copy_user_generic_unrolled)
132	CFI_STARTPROC
133	cmpl $8,%edx
134	jb 20f		/* less then 8 bytes, go to byte copy loop */
135	ALIGN_DESTINATION
136	movl %edx,%ecx
137	andl $63,%edx
138	shrl $6,%ecx
139	jz 17f
1401:	movq (%rsi),%r8
1412:	movq 1*8(%rsi),%r9
1423:	movq 2*8(%rsi),%r10
1434:	movq 3*8(%rsi),%r11
1445:	movq %r8,(%rdi)
1456:	movq %r9,1*8(%rdi)
1467:	movq %r10,2*8(%rdi)
1478:	movq %r11,3*8(%rdi)
1489:	movq 4*8(%rsi),%r8
14910:	movq 5*8(%rsi),%r9
15011:	movq 6*8(%rsi),%r10
15112:	movq 7*8(%rsi),%r11
15213:	movq %r8,4*8(%rdi)
15314:	movq %r9,5*8(%rdi)
15415:	movq %r10,6*8(%rdi)
15516:	movq %r11,7*8(%rdi)
156	leaq 64(%rsi),%rsi
157	leaq 64(%rdi),%rdi
158	decl %ecx
159	jnz 1b
16017:	movl %edx,%ecx
161	andl $7,%edx
162	shrl $3,%ecx
163	jz 20f
16418:	movq (%rsi),%r8
16519:	movq %r8,(%rdi)
166	leaq 8(%rsi),%rsi
167	leaq 8(%rdi),%rdi
168	decl %ecx
169	jnz 18b
17020:	andl %edx,%edx
171	jz 23f
172	movl %edx,%ecx
17321:	movb (%rsi),%al
17422:	movb %al,(%rdi)
175	incq %rsi
176	incq %rdi
177	decl %ecx
178	jnz 21b
17923:	xor %eax,%eax
180	ret
181
182	.section .fixup,"ax"
18330:	shll $6,%ecx
184	addl %ecx,%edx
185	jmp 60f
18640:	lea (%rdx,%rcx,8),%rdx
187	jmp 60f
18850:	movl %ecx,%edx
18960:	jmp copy_user_handle_tail /* ecx is zerorest also */
190	.previous
191
192	_ASM_EXTABLE(1b,30b)
193	_ASM_EXTABLE(2b,30b)
194	_ASM_EXTABLE(3b,30b)
195	_ASM_EXTABLE(4b,30b)
196	_ASM_EXTABLE(5b,30b)
197	_ASM_EXTABLE(6b,30b)
198	_ASM_EXTABLE(7b,30b)
199	_ASM_EXTABLE(8b,30b)
200	_ASM_EXTABLE(9b,30b)
201	_ASM_EXTABLE(10b,30b)
202	_ASM_EXTABLE(11b,30b)
203	_ASM_EXTABLE(12b,30b)
204	_ASM_EXTABLE(13b,30b)
205	_ASM_EXTABLE(14b,30b)
206	_ASM_EXTABLE(15b,30b)
207	_ASM_EXTABLE(16b,30b)
208	_ASM_EXTABLE(18b,40b)
209	_ASM_EXTABLE(19b,40b)
210	_ASM_EXTABLE(21b,50b)
211	_ASM_EXTABLE(22b,50b)
212	CFI_ENDPROC
213ENDPROC(copy_user_generic_unrolled)
214
215/* Some CPUs run faster using the string copy instructions.
216 * This is also a lot simpler. Use them when possible.
217 *
218 * Only 4GB of copy is supported. This shouldn't be a problem
219 * because the kernel normally only writes from/to page sized chunks
220 * even if user space passed a longer buffer.
221 * And more would be dangerous because both Intel and AMD have
222 * errata with rep movsq > 4GB. If someone feels the need to fix
223 * this please consider this.
224 *
225 * Input:
226 * rdi destination
227 * rsi source
228 * rdx count
229 *
230 * Output:
231 * eax uncopied bytes or 0 if successful.
232 */
233ENTRY(copy_user_generic_string)
234	CFI_STARTPROC
235	andl %edx,%edx
236	jz 4f
237	cmpl $8,%edx
238	jb 2f		/* less than 8 bytes, go to byte copy loop */
239	ALIGN_DESTINATION
240	movl %edx,%ecx
241	shrl $3,%ecx
242	andl $7,%edx
2431:	rep
244	movsq
2452:	movl %edx,%ecx
2463:	rep
247	movsb
2484:	xorl %eax,%eax
249	ret
250
251	.section .fixup,"ax"
25211:	lea (%rdx,%rcx,8),%rcx
25312:	movl %ecx,%edx		/* ecx is zerorest also */
254	jmp copy_user_handle_tail
255	.previous
256
257	_ASM_EXTABLE(1b,11b)
258	_ASM_EXTABLE(3b,12b)
259	CFI_ENDPROC
260ENDPROC(copy_user_generic_string)
261
262/*
263 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
264 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
265 *
266 * Input:
267 * rdi destination
268 * rsi source
269 * rdx count
270 *
271 * Output:
272 * eax uncopied bytes or 0 if successful.
273 */
274ENTRY(copy_user_enhanced_fast_string)
275	CFI_STARTPROC
276	andl %edx,%edx
277	jz 2f
278	movl %edx,%ecx
2791:	rep
280	movsb
2812:	xorl %eax,%eax
282	ret
283
284	.section .fixup,"ax"
28512:	movl %ecx,%edx		/* ecx is zerorest also */
286	jmp copy_user_handle_tail
287	.previous
288
289	_ASM_EXTABLE(1b,12b)
290	CFI_ENDPROC
291ENDPROC(copy_user_enhanced_fast_string)
292