xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision c819e2cf)
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11
12#define FIX_ALIGNMENT 1
13
14#include <asm/current.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
17#include <asm/cpufeature.h>
18#include <asm/alternative-asm.h>
19#include <asm/asm.h>
20#include <asm/smap.h>
21
22/*
23 * By placing feature2 after feature1 in altinstructions section, we logically
24 * implement:
25 * If CPU has feature2, jmp to alt2 is used
26 * else if CPU has feature1, jmp to alt1 is used
27 * else jmp to orig is used.
28 */
29	.macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
300:
31	.byte 0xe9	/* 32bit jump */
32	.long \orig-1f	/* by default jump to orig */
331:
34	.section .altinstr_replacement,"ax"
352:	.byte 0xe9			/* near jump with 32bit immediate */
36	.long \alt1-1b /* offset */   /* or alternatively to alt1 */
373:	.byte 0xe9			/* near jump with 32bit immediate */
38	.long \alt2-1b /* offset */   /* or alternatively to alt2 */
39	.previous
40
41	.section .altinstructions,"a"
42	altinstruction_entry 0b,2b,\feature1,5,5
43	altinstruction_entry 0b,3b,\feature2,5,5
44	.previous
45	.endm
46
47	.macro ALIGN_DESTINATION
48#ifdef FIX_ALIGNMENT
49	/* check for bad alignment of destination */
50	movl %edi,%ecx
51	andl $7,%ecx
52	jz 102f				/* already aligned */
53	subl $8,%ecx
54	negl %ecx
55	subl %ecx,%edx
56100:	movb (%rsi),%al
57101:	movb %al,(%rdi)
58	incq %rsi
59	incq %rdi
60	decl %ecx
61	jnz 100b
62102:
63	.section .fixup,"ax"
64103:	addl %ecx,%edx			/* ecx is zerorest also */
65	jmp copy_user_handle_tail
66	.previous
67
68	_ASM_EXTABLE(100b,103b)
69	_ASM_EXTABLE(101b,103b)
70#endif
71	.endm
72
73/* Standard copy_to_user with segment limit checking */
74ENTRY(_copy_to_user)
75	CFI_STARTPROC
76	GET_THREAD_INFO(%rax)
77	movq %rdi,%rcx
78	addq %rdx,%rcx
79	jc bad_to_user
80	cmpq TI_addr_limit(%rax),%rcx
81	ja bad_to_user
82	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
83		copy_user_generic_unrolled,copy_user_generic_string,	\
84		copy_user_enhanced_fast_string
85	CFI_ENDPROC
86ENDPROC(_copy_to_user)
87
88/* Standard copy_from_user with segment limit checking */
89ENTRY(_copy_from_user)
90	CFI_STARTPROC
91	GET_THREAD_INFO(%rax)
92	movq %rsi,%rcx
93	addq %rdx,%rcx
94	jc bad_from_user
95	cmpq TI_addr_limit(%rax),%rcx
96	ja bad_from_user
97	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS,	\
98		copy_user_generic_unrolled,copy_user_generic_string,	\
99		copy_user_enhanced_fast_string
100	CFI_ENDPROC
101ENDPROC(_copy_from_user)
102
103	.section .fixup,"ax"
104	/* must zero dest */
105ENTRY(bad_from_user)
106bad_from_user:
107	CFI_STARTPROC
108	movl %edx,%ecx
109	xorl %eax,%eax
110	rep
111	stosb
112bad_to_user:
113	movl %edx,%eax
114	ret
115	CFI_ENDPROC
116ENDPROC(bad_from_user)
117	.previous
118
119/*
120 * copy_user_generic_unrolled - memory copy with exception handling.
121 * This version is for CPUs like P4 that don't have efficient micro
122 * code for rep movsq
123 *
124 * Input:
125 * rdi destination
126 * rsi source
127 * rdx count
128 *
129 * Output:
130 * eax uncopied bytes or 0 if successful.
131 */
132ENTRY(copy_user_generic_unrolled)
133	CFI_STARTPROC
134	ASM_STAC
135	cmpl $8,%edx
136	jb 20f		/* less then 8 bytes, go to byte copy loop */
137	ALIGN_DESTINATION
138	movl %edx,%ecx
139	andl $63,%edx
140	shrl $6,%ecx
141	jz 17f
1421:	movq (%rsi),%r8
1432:	movq 1*8(%rsi),%r9
1443:	movq 2*8(%rsi),%r10
1454:	movq 3*8(%rsi),%r11
1465:	movq %r8,(%rdi)
1476:	movq %r9,1*8(%rdi)
1487:	movq %r10,2*8(%rdi)
1498:	movq %r11,3*8(%rdi)
1509:	movq 4*8(%rsi),%r8
15110:	movq 5*8(%rsi),%r9
15211:	movq 6*8(%rsi),%r10
15312:	movq 7*8(%rsi),%r11
15413:	movq %r8,4*8(%rdi)
15514:	movq %r9,5*8(%rdi)
15615:	movq %r10,6*8(%rdi)
15716:	movq %r11,7*8(%rdi)
158	leaq 64(%rsi),%rsi
159	leaq 64(%rdi),%rdi
160	decl %ecx
161	jnz 1b
16217:	movl %edx,%ecx
163	andl $7,%edx
164	shrl $3,%ecx
165	jz 20f
16618:	movq (%rsi),%r8
16719:	movq %r8,(%rdi)
168	leaq 8(%rsi),%rsi
169	leaq 8(%rdi),%rdi
170	decl %ecx
171	jnz 18b
17220:	andl %edx,%edx
173	jz 23f
174	movl %edx,%ecx
17521:	movb (%rsi),%al
17622:	movb %al,(%rdi)
177	incq %rsi
178	incq %rdi
179	decl %ecx
180	jnz 21b
18123:	xor %eax,%eax
182	ASM_CLAC
183	ret
184
185	.section .fixup,"ax"
18630:	shll $6,%ecx
187	addl %ecx,%edx
188	jmp 60f
18940:	leal (%rdx,%rcx,8),%edx
190	jmp 60f
19150:	movl %ecx,%edx
19260:	jmp copy_user_handle_tail /* ecx is zerorest also */
193	.previous
194
195	_ASM_EXTABLE(1b,30b)
196	_ASM_EXTABLE(2b,30b)
197	_ASM_EXTABLE(3b,30b)
198	_ASM_EXTABLE(4b,30b)
199	_ASM_EXTABLE(5b,30b)
200	_ASM_EXTABLE(6b,30b)
201	_ASM_EXTABLE(7b,30b)
202	_ASM_EXTABLE(8b,30b)
203	_ASM_EXTABLE(9b,30b)
204	_ASM_EXTABLE(10b,30b)
205	_ASM_EXTABLE(11b,30b)
206	_ASM_EXTABLE(12b,30b)
207	_ASM_EXTABLE(13b,30b)
208	_ASM_EXTABLE(14b,30b)
209	_ASM_EXTABLE(15b,30b)
210	_ASM_EXTABLE(16b,30b)
211	_ASM_EXTABLE(18b,40b)
212	_ASM_EXTABLE(19b,40b)
213	_ASM_EXTABLE(21b,50b)
214	_ASM_EXTABLE(22b,50b)
215	CFI_ENDPROC
216ENDPROC(copy_user_generic_unrolled)
217
218/* Some CPUs run faster using the string copy instructions.
219 * This is also a lot simpler. Use them when possible.
220 *
221 * Only 4GB of copy is supported. This shouldn't be a problem
222 * because the kernel normally only writes from/to page sized chunks
223 * even if user space passed a longer buffer.
224 * And more would be dangerous because both Intel and AMD have
225 * errata with rep movsq > 4GB. If someone feels the need to fix
226 * this please consider this.
227 *
228 * Input:
229 * rdi destination
230 * rsi source
231 * rdx count
232 *
233 * Output:
234 * eax uncopied bytes or 0 if successful.
235 */
236ENTRY(copy_user_generic_string)
237	CFI_STARTPROC
238	ASM_STAC
239	cmpl $8,%edx
240	jb 2f		/* less than 8 bytes, go to byte copy loop */
241	ALIGN_DESTINATION
242	movl %edx,%ecx
243	shrl $3,%ecx
244	andl $7,%edx
2451:	rep
246	movsq
2472:	movl %edx,%ecx
2483:	rep
249	movsb
250	xorl %eax,%eax
251	ASM_CLAC
252	ret
253
254	.section .fixup,"ax"
25511:	leal (%rdx,%rcx,8),%ecx
25612:	movl %ecx,%edx		/* ecx is zerorest also */
257	jmp copy_user_handle_tail
258	.previous
259
260	_ASM_EXTABLE(1b,11b)
261	_ASM_EXTABLE(3b,12b)
262	CFI_ENDPROC
263ENDPROC(copy_user_generic_string)
264
265/*
266 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
267 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
268 *
269 * Input:
270 * rdi destination
271 * rsi source
272 * rdx count
273 *
274 * Output:
275 * eax uncopied bytes or 0 if successful.
276 */
277ENTRY(copy_user_enhanced_fast_string)
278	CFI_STARTPROC
279	ASM_STAC
280	movl %edx,%ecx
2811:	rep
282	movsb
283	xorl %eax,%eax
284	ASM_CLAC
285	ret
286
287	.section .fixup,"ax"
28812:	movl %ecx,%edx		/* ecx is zerorest also */
289	jmp copy_user_handle_tail
290	.previous
291
292	_ASM_EXTABLE(1b,12b)
293	CFI_ENDPROC
294ENDPROC(copy_user_enhanced_fast_string)
295