xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision 27cb0a75)
1/*
2 * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3 * Copyright 2002 Andi Kleen, SuSE Labs.
4 * Subject to the GNU Public License v2.
5 *
6 * Functions to copy from and to user space.
7 */
8
9#include <linux/linkage.h>
10#include <asm/dwarf2.h>
11
12#define FIX_ALIGNMENT 1
13
14#include <asm/current.h>
15#include <asm/asm-offsets.h>
16#include <asm/thread_info.h>
17#include <asm/cpufeature.h>
18
19	.macro ALTERNATIVE_JUMP feature,orig,alt
200:
21	.byte 0xe9	/* 32bit jump */
22	.long \orig-1f	/* by default jump to orig */
231:
24	.section .altinstr_replacement,"ax"
252:	.byte 0xe9			/* near jump with 32bit immediate */
26	.long \alt-1b /* offset */   /* or alternatively to alt */
27	.previous
28	.section .altinstructions,"a"
29	.align 8
30	.quad  0b
31	.quad  2b
32	.byte  \feature			/* when feature is set */
33	.byte  5
34	.byte  5
35	.previous
36	.endm
37
38	.macro ALIGN_DESTINATION
39#ifdef FIX_ALIGNMENT
40	/* check for bad alignment of destination */
41	movl %edi,%ecx
42	andl $7,%ecx
43	jz 102f				/* already aligned */
44	subl $8,%ecx
45	negl %ecx
46	subl %ecx,%edx
47100:	movb (%rsi),%al
48101:	movb %al,(%rdi)
49	incq %rsi
50	incq %rdi
51	decl %ecx
52	jnz 100b
53102:
54	.section .fixup,"ax"
55103:	addl %r8d,%edx			/* ecx is zerorest also */
56	jmp copy_user_handle_tail
57	.previous
58
59	.section __ex_table,"a"
60	.align 8
61	.quad 100b,103b
62	.quad 101b,103b
63	.previous
64#endif
65	.endm
66
67/* Standard copy_to_user with segment limit checking */
68ENTRY(copy_to_user)
69	CFI_STARTPROC
70	GET_THREAD_INFO(%rax)
71	movq %rdi,%rcx
72	addq %rdx,%rcx
73	jc bad_to_user
74	cmpq TI_addr_limit(%rax),%rcx
75	jae bad_to_user
76	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
77	CFI_ENDPROC
78
79/* Standard copy_from_user with segment limit checking */
80ENTRY(copy_from_user)
81	CFI_STARTPROC
82	GET_THREAD_INFO(%rax)
83	movq %rsi,%rcx
84	addq %rdx,%rcx
85	jc bad_from_user
86	cmpq TI_addr_limit(%rax),%rcx
87	jae bad_from_user
88	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
89	CFI_ENDPROC
90ENDPROC(copy_from_user)
91
92ENTRY(copy_user_generic)
93	CFI_STARTPROC
94	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
95	CFI_ENDPROC
96ENDPROC(copy_user_generic)
97
98ENTRY(__copy_from_user_inatomic)
99	CFI_STARTPROC
100	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
101	CFI_ENDPROC
102ENDPROC(__copy_from_user_inatomic)
103
104	.section .fixup,"ax"
105	/* must zero dest */
106ENTRY(bad_from_user)
107bad_from_user:
108	CFI_STARTPROC
109	movl %edx,%ecx
110	xorl %eax,%eax
111	rep
112	stosb
113bad_to_user:
114	movl %edx,%eax
115	ret
116	CFI_ENDPROC
117ENDPROC(bad_from_user)
118	.previous
119
120/*
121 * copy_user_generic_unrolled - memory copy with exception handling.
122 * This version is for CPUs like P4 that don't have efficient micro
123 * code for rep movsq
124 *
125 * Input:
126 * rdi destination
127 * rsi source
128 * rdx count
129 *
130 * Output:
131 * eax uncopied bytes or 0 if successfull.
132 */
133ENTRY(copy_user_generic_unrolled)
134	CFI_STARTPROC
135	cmpl $8,%edx
136	jb 20f		/* less then 8 bytes, go to byte copy loop */
137	ALIGN_DESTINATION
138	movl %edx,%ecx
139	andl $63,%edx
140	shrl $6,%ecx
141	jz 17f
1421:	movq (%rsi),%r8
1432:	movq 1*8(%rsi),%r9
1443:	movq 2*8(%rsi),%r10
1454:	movq 3*8(%rsi),%r11
1465:	movq %r8,(%rdi)
1476:	movq %r9,1*8(%rdi)
1487:	movq %r10,2*8(%rdi)
1498:	movq %r11,3*8(%rdi)
1509:	movq 4*8(%rsi),%r8
15110:	movq 5*8(%rsi),%r9
15211:	movq 6*8(%rsi),%r10
15312:	movq 7*8(%rsi),%r11
15413:	movq %r8,4*8(%rdi)
15514:	movq %r9,5*8(%rdi)
15615:	movq %r10,6*8(%rdi)
15716:	movq %r11,7*8(%rdi)
158	leaq 64(%rsi),%rsi
159	leaq 64(%rdi),%rdi
160	decl %ecx
161	jnz 1b
16217:	movl %edx,%ecx
163	andl $7,%edx
164	shrl $3,%ecx
165	jz 20f
16618:	movq (%rsi),%r8
16719:	movq %r8,(%rdi)
168	leaq 8(%rsi),%rsi
169	leaq 8(%rdi),%rdi
170	decl %ecx
171	jnz 18b
17220:	andl %edx,%edx
173	jz 23f
174	movl %edx,%ecx
17521:	movb (%rsi),%al
17622:	movb %al,(%rdi)
177	incq %rsi
178	incq %rdi
179	decl %ecx
180	jnz 21b
18123:	xor %eax,%eax
182	ret
183
184	.section .fixup,"ax"
18530:	shll $6,%ecx
186	addl %ecx,%edx
187	jmp 60f
18840:	lea (%rdx,%rcx,8),%rdx
189	jmp 60f
19050:	movl %ecx,%edx
19160:	jmp copy_user_handle_tail /* ecx is zerorest also */
192	.previous
193
194	.section __ex_table,"a"
195	.align 8
196	.quad 1b,30b
197	.quad 2b,30b
198	.quad 3b,30b
199	.quad 4b,30b
200	.quad 5b,30b
201	.quad 6b,30b
202	.quad 7b,30b
203	.quad 8b,30b
204	.quad 9b,30b
205	.quad 10b,30b
206	.quad 11b,30b
207	.quad 12b,30b
208	.quad 13b,30b
209	.quad 14b,30b
210	.quad 15b,30b
211	.quad 16b,30b
212	.quad 18b,40b
213	.quad 19b,40b
214	.quad 21b,50b
215	.quad 22b,50b
216	.previous
217	CFI_ENDPROC
218ENDPROC(copy_user_generic_unrolled)
219
220/* Some CPUs run faster using the string copy instructions.
221 * This is also a lot simpler. Use them when possible.
222 *
223 * Only 4GB of copy is supported. This shouldn't be a problem
224 * because the kernel normally only writes from/to page sized chunks
225 * even if user space passed a longer buffer.
226 * And more would be dangerous because both Intel and AMD have
227 * errata with rep movsq > 4GB. If someone feels the need to fix
228 * this please consider this.
229 *
230 * Input:
231 * rdi destination
232 * rsi source
233 * rdx count
234 *
235 * Output:
236 * eax uncopied bytes or 0 if successful.
237 */
238ENTRY(copy_user_generic_string)
239	CFI_STARTPROC
240	andl %edx,%edx
241	jz 4f
242	cmpl $8,%edx
243	jb 2f		/* less than 8 bytes, go to byte copy loop */
244	ALIGN_DESTINATION
245	movl %edx,%ecx
246	shrl $3,%ecx
247	andl $7,%edx
2481:	rep
249	movsq
2502:	movl %edx,%ecx
2513:	rep
252	movsb
2534:	xorl %eax,%eax
254	ret
255
256	.section .fixup,"ax"
25711:	lea (%rdx,%rcx,8),%rcx
25812:	movl %ecx,%edx		/* ecx is zerorest also */
259	jmp copy_user_handle_tail
260	.previous
261
262	.section __ex_table,"a"
263	.align 8
264	.quad 1b,11b
265	.quad 3b,12b
266	.previous
267	CFI_ENDPROC
268ENDPROC(copy_user_generic_string)
269