xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision 0d2eb44f)
1ad2fc2cdSVitaly Mayatskikh/*
2ad2fc2cdSVitaly Mayatskikh * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3ad2fc2cdSVitaly Mayatskikh * Copyright 2002 Andi Kleen, SuSE Labs.
4185f3d38SThomas Gleixner * Subject to the GNU Public License v2.
5185f3d38SThomas Gleixner *
6185f3d38SThomas Gleixner * Functions to copy from and to user space.
7185f3d38SThomas Gleixner */
8185f3d38SThomas Gleixner
9185f3d38SThomas Gleixner#include <linux/linkage.h>
10185f3d38SThomas Gleixner#include <asm/dwarf2.h>
11185f3d38SThomas Gleixner
12185f3d38SThomas Gleixner#define FIX_ALIGNMENT 1
13185f3d38SThomas Gleixner
14185f3d38SThomas Gleixner#include <asm/current.h>
15185f3d38SThomas Gleixner#include <asm/asm-offsets.h>
16185f3d38SThomas Gleixner#include <asm/thread_info.h>
17185f3d38SThomas Gleixner#include <asm/cpufeature.h>
18185f3d38SThomas Gleixner
19185f3d38SThomas Gleixner	.macro ALTERNATIVE_JUMP feature,orig,alt
20185f3d38SThomas Gleixner0:
21185f3d38SThomas Gleixner	.byte 0xe9	/* 32bit jump */
22185f3d38SThomas Gleixner	.long \orig-1f	/* by default jump to orig */
23185f3d38SThomas Gleixner1:
24185f3d38SThomas Gleixner	.section .altinstr_replacement,"ax"
25185f3d38SThomas Gleixner2:	.byte 0xe9			/* near jump with 32bit immediate */
26185f3d38SThomas Gleixner	.long \alt-1b /* offset */   /* or alternatively to alt */
27185f3d38SThomas Gleixner	.previous
28185f3d38SThomas Gleixner	.section .altinstructions,"a"
29185f3d38SThomas Gleixner	.align 8
30185f3d38SThomas Gleixner	.quad  0b
31185f3d38SThomas Gleixner	.quad  2b
32df378ccfSH. Peter Anvin	.word  \feature			/* when feature is set */
33185f3d38SThomas Gleixner	.byte  5
34185f3d38SThomas Gleixner	.byte  5
35185f3d38SThomas Gleixner	.previous
36185f3d38SThomas Gleixner	.endm
37185f3d38SThomas Gleixner
38ad2fc2cdSVitaly Mayatskikh	.macro ALIGN_DESTINATION
39ad2fc2cdSVitaly Mayatskikh#ifdef FIX_ALIGNMENT
40ad2fc2cdSVitaly Mayatskikh	/* check for bad alignment of destination */
41ad2fc2cdSVitaly Mayatskikh	movl %edi,%ecx
42ad2fc2cdSVitaly Mayatskikh	andl $7,%ecx
43ad2fc2cdSVitaly Mayatskikh	jz 102f				/* already aligned */
44ad2fc2cdSVitaly Mayatskikh	subl $8,%ecx
45ad2fc2cdSVitaly Mayatskikh	negl %ecx
46ad2fc2cdSVitaly Mayatskikh	subl %ecx,%edx
47ad2fc2cdSVitaly Mayatskikh100:	movb (%rsi),%al
48ad2fc2cdSVitaly Mayatskikh101:	movb %al,(%rdi)
49ad2fc2cdSVitaly Mayatskikh	incq %rsi
50ad2fc2cdSVitaly Mayatskikh	incq %rdi
51ad2fc2cdSVitaly Mayatskikh	decl %ecx
52ad2fc2cdSVitaly Mayatskikh	jnz 100b
53ad2fc2cdSVitaly Mayatskikh102:
54ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
55afd962a9SVitaly Mayatskikh103:	addl %ecx,%edx			/* ecx is zerorest also */
56ad2fc2cdSVitaly Mayatskikh	jmp copy_user_handle_tail
57ad2fc2cdSVitaly Mayatskikh	.previous
58ad2fc2cdSVitaly Mayatskikh
59ad2fc2cdSVitaly Mayatskikh	.section __ex_table,"a"
60ad2fc2cdSVitaly Mayatskikh	.align 8
61ad2fc2cdSVitaly Mayatskikh	.quad 100b,103b
62ad2fc2cdSVitaly Mayatskikh	.quad 101b,103b
63ad2fc2cdSVitaly Mayatskikh	.previous
64ad2fc2cdSVitaly Mayatskikh#endif
65ad2fc2cdSVitaly Mayatskikh	.endm
66ad2fc2cdSVitaly Mayatskikh
67185f3d38SThomas Gleixner/* Standard copy_to_user with segment limit checking */
683c93ca00SFrederic WeisbeckerENTRY(_copy_to_user)
69185f3d38SThomas Gleixner	CFI_STARTPROC
70185f3d38SThomas Gleixner	GET_THREAD_INFO(%rax)
71185f3d38SThomas Gleixner	movq %rdi,%rcx
72185f3d38SThomas Gleixner	addq %rdx,%rcx
73185f3d38SThomas Gleixner	jc bad_to_user
7426ccb8a7SGlauber Costa	cmpq TI_addr_limit(%rax),%rcx
75185f3d38SThomas Gleixner	jae bad_to_user
76185f3d38SThomas Gleixner	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
77185f3d38SThomas Gleixner	CFI_ENDPROC
783c93ca00SFrederic WeisbeckerENDPROC(_copy_to_user)
79185f3d38SThomas Gleixner
80185f3d38SThomas Gleixner/* Standard copy_from_user with segment limit checking */
819f0cf4adSArjan van de VenENTRY(_copy_from_user)
82185f3d38SThomas Gleixner	CFI_STARTPROC
83185f3d38SThomas Gleixner	GET_THREAD_INFO(%rax)
84185f3d38SThomas Gleixner	movq %rsi,%rcx
85185f3d38SThomas Gleixner	addq %rdx,%rcx
86185f3d38SThomas Gleixner	jc bad_from_user
8726ccb8a7SGlauber Costa	cmpq TI_addr_limit(%rax),%rcx
88185f3d38SThomas Gleixner	jae bad_from_user
89185f3d38SThomas Gleixner	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
90185f3d38SThomas Gleixner	CFI_ENDPROC
919f0cf4adSArjan van de VenENDPROC(_copy_from_user)
92185f3d38SThomas Gleixner
93185f3d38SThomas Gleixner	.section .fixup,"ax"
94185f3d38SThomas Gleixner	/* must zero dest */
95ad2fc2cdSVitaly MayatskikhENTRY(bad_from_user)
96185f3d38SThomas Gleixnerbad_from_user:
97185f3d38SThomas Gleixner	CFI_STARTPROC
98185f3d38SThomas Gleixner	movl %edx,%ecx
99185f3d38SThomas Gleixner	xorl %eax,%eax
100185f3d38SThomas Gleixner	rep
101185f3d38SThomas Gleixner	stosb
102185f3d38SThomas Gleixnerbad_to_user:
103185f3d38SThomas Gleixner	movl %edx,%eax
104185f3d38SThomas Gleixner	ret
105185f3d38SThomas Gleixner	CFI_ENDPROC
106ad2fc2cdSVitaly MayatskikhENDPROC(bad_from_user)
107185f3d38SThomas Gleixner	.previous
108185f3d38SThomas Gleixner
109185f3d38SThomas Gleixner/*
110185f3d38SThomas Gleixner * copy_user_generic_unrolled - memory copy with exception handling.
111ad2fc2cdSVitaly Mayatskikh * This version is for CPUs like P4 that don't have efficient micro
112ad2fc2cdSVitaly Mayatskikh * code for rep movsq
113185f3d38SThomas Gleixner *
114185f3d38SThomas Gleixner * Input:
115185f3d38SThomas Gleixner * rdi destination
116185f3d38SThomas Gleixner * rsi source
117185f3d38SThomas Gleixner * rdx count
118185f3d38SThomas Gleixner *
119185f3d38SThomas Gleixner * Output:
1200d2eb44fSLucas De Marchi * eax uncopied bytes or 0 if successful.
121ad2fc2cdSVitaly Mayatskikh */
122ad2fc2cdSVitaly MayatskikhENTRY(copy_user_generic_unrolled)
123ad2fc2cdSVitaly Mayatskikh	CFI_STARTPROC
124ad2fc2cdSVitaly Mayatskikh	cmpl $8,%edx
125ad2fc2cdSVitaly Mayatskikh	jb 20f		/* less then 8 bytes, go to byte copy loop */
126ad2fc2cdSVitaly Mayatskikh	ALIGN_DESTINATION
127ad2fc2cdSVitaly Mayatskikh	movl %edx,%ecx
128ad2fc2cdSVitaly Mayatskikh	andl $63,%edx
129ad2fc2cdSVitaly Mayatskikh	shrl $6,%ecx
130ad2fc2cdSVitaly Mayatskikh	jz 17f
131ad2fc2cdSVitaly Mayatskikh1:	movq (%rsi),%r8
132ad2fc2cdSVitaly Mayatskikh2:	movq 1*8(%rsi),%r9
133ad2fc2cdSVitaly Mayatskikh3:	movq 2*8(%rsi),%r10
134ad2fc2cdSVitaly Mayatskikh4:	movq 3*8(%rsi),%r11
135ad2fc2cdSVitaly Mayatskikh5:	movq %r8,(%rdi)
136ad2fc2cdSVitaly Mayatskikh6:	movq %r9,1*8(%rdi)
137ad2fc2cdSVitaly Mayatskikh7:	movq %r10,2*8(%rdi)
138ad2fc2cdSVitaly Mayatskikh8:	movq %r11,3*8(%rdi)
139ad2fc2cdSVitaly Mayatskikh9:	movq 4*8(%rsi),%r8
140ad2fc2cdSVitaly Mayatskikh10:	movq 5*8(%rsi),%r9
141ad2fc2cdSVitaly Mayatskikh11:	movq 6*8(%rsi),%r10
142ad2fc2cdSVitaly Mayatskikh12:	movq 7*8(%rsi),%r11
143ad2fc2cdSVitaly Mayatskikh13:	movq %r8,4*8(%rdi)
144ad2fc2cdSVitaly Mayatskikh14:	movq %r9,5*8(%rdi)
145ad2fc2cdSVitaly Mayatskikh15:	movq %r10,6*8(%rdi)
146ad2fc2cdSVitaly Mayatskikh16:	movq %r11,7*8(%rdi)
147ad2fc2cdSVitaly Mayatskikh	leaq 64(%rsi),%rsi
148ad2fc2cdSVitaly Mayatskikh	leaq 64(%rdi),%rdi
149ad2fc2cdSVitaly Mayatskikh	decl %ecx
150ad2fc2cdSVitaly Mayatskikh	jnz 1b
151ad2fc2cdSVitaly Mayatskikh17:	movl %edx,%ecx
152ad2fc2cdSVitaly Mayatskikh	andl $7,%edx
153ad2fc2cdSVitaly Mayatskikh	shrl $3,%ecx
154ad2fc2cdSVitaly Mayatskikh	jz 20f
155ad2fc2cdSVitaly Mayatskikh18:	movq (%rsi),%r8
156ad2fc2cdSVitaly Mayatskikh19:	movq %r8,(%rdi)
157ad2fc2cdSVitaly Mayatskikh	leaq 8(%rsi),%rsi
158ad2fc2cdSVitaly Mayatskikh	leaq 8(%rdi),%rdi
159ad2fc2cdSVitaly Mayatskikh	decl %ecx
160ad2fc2cdSVitaly Mayatskikh	jnz 18b
161ad2fc2cdSVitaly Mayatskikh20:	andl %edx,%edx
162ad2fc2cdSVitaly Mayatskikh	jz 23f
163ad2fc2cdSVitaly Mayatskikh	movl %edx,%ecx
164ad2fc2cdSVitaly Mayatskikh21:	movb (%rsi),%al
165ad2fc2cdSVitaly Mayatskikh22:	movb %al,(%rdi)
166ad2fc2cdSVitaly Mayatskikh	incq %rsi
167ad2fc2cdSVitaly Mayatskikh	incq %rdi
168ad2fc2cdSVitaly Mayatskikh	decl %ecx
169ad2fc2cdSVitaly Mayatskikh	jnz 21b
170ad2fc2cdSVitaly Mayatskikh23:	xor %eax,%eax
171ad2fc2cdSVitaly Mayatskikh	ret
172ad2fc2cdSVitaly Mayatskikh
173ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
174ad2fc2cdSVitaly Mayatskikh30:	shll $6,%ecx
175ad2fc2cdSVitaly Mayatskikh	addl %ecx,%edx
176ad2fc2cdSVitaly Mayatskikh	jmp 60f
17727cb0a75SJeremy Fitzhardinge40:	lea (%rdx,%rcx,8),%rdx
178ad2fc2cdSVitaly Mayatskikh	jmp 60f
179ad2fc2cdSVitaly Mayatskikh50:	movl %ecx,%edx
180ad2fc2cdSVitaly Mayatskikh60:	jmp copy_user_handle_tail /* ecx is zerorest also */
181ad2fc2cdSVitaly Mayatskikh	.previous
182ad2fc2cdSVitaly Mayatskikh
183ad2fc2cdSVitaly Mayatskikh	.section __ex_table,"a"
184ad2fc2cdSVitaly Mayatskikh	.align 8
185ad2fc2cdSVitaly Mayatskikh	.quad 1b,30b
186ad2fc2cdSVitaly Mayatskikh	.quad 2b,30b
187ad2fc2cdSVitaly Mayatskikh	.quad 3b,30b
188ad2fc2cdSVitaly Mayatskikh	.quad 4b,30b
189ad2fc2cdSVitaly Mayatskikh	.quad 5b,30b
190ad2fc2cdSVitaly Mayatskikh	.quad 6b,30b
191ad2fc2cdSVitaly Mayatskikh	.quad 7b,30b
192ad2fc2cdSVitaly Mayatskikh	.quad 8b,30b
193ad2fc2cdSVitaly Mayatskikh	.quad 9b,30b
194ad2fc2cdSVitaly Mayatskikh	.quad 10b,30b
195ad2fc2cdSVitaly Mayatskikh	.quad 11b,30b
196ad2fc2cdSVitaly Mayatskikh	.quad 12b,30b
197ad2fc2cdSVitaly Mayatskikh	.quad 13b,30b
198ad2fc2cdSVitaly Mayatskikh	.quad 14b,30b
199ad2fc2cdSVitaly Mayatskikh	.quad 15b,30b
200ad2fc2cdSVitaly Mayatskikh	.quad 16b,30b
201ad2fc2cdSVitaly Mayatskikh	.quad 18b,40b
202ad2fc2cdSVitaly Mayatskikh	.quad 19b,40b
203ad2fc2cdSVitaly Mayatskikh	.quad 21b,50b
204ad2fc2cdSVitaly Mayatskikh	.quad 22b,50b
205ad2fc2cdSVitaly Mayatskikh	.previous
206ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
207ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_unrolled)
208ad2fc2cdSVitaly Mayatskikh
209ad2fc2cdSVitaly Mayatskikh/* Some CPUs run faster using the string copy instructions.
210ad2fc2cdSVitaly Mayatskikh * This is also a lot simpler. Use them when possible.
211185f3d38SThomas Gleixner *
212185f3d38SThomas Gleixner * Only 4GB of copy is supported. This shouldn't be a problem
213185f3d38SThomas Gleixner * because the kernel normally only writes from/to page sized chunks
214185f3d38SThomas Gleixner * even if user space passed a longer buffer.
215185f3d38SThomas Gleixner * And more would be dangerous because both Intel and AMD have
216185f3d38SThomas Gleixner * errata with rep movsq > 4GB. If someone feels the need to fix
217185f3d38SThomas Gleixner * this please consider this.
218ad2fc2cdSVitaly Mayatskikh *
219ad2fc2cdSVitaly Mayatskikh * Input:
220ad2fc2cdSVitaly Mayatskikh * rdi destination
221ad2fc2cdSVitaly Mayatskikh * rsi source
222ad2fc2cdSVitaly Mayatskikh * rdx count
223ad2fc2cdSVitaly Mayatskikh *
224ad2fc2cdSVitaly Mayatskikh * Output:
225ad2fc2cdSVitaly Mayatskikh * eax uncopied bytes or 0 if successful.
226185f3d38SThomas Gleixner */
227185f3d38SThomas GleixnerENTRY(copy_user_generic_string)
228185f3d38SThomas Gleixner	CFI_STARTPROC
229ad2fc2cdSVitaly Mayatskikh	andl %edx,%edx
230ad2fc2cdSVitaly Mayatskikh	jz 4f
231ad2fc2cdSVitaly Mayatskikh	cmpl $8,%edx
232ad2fc2cdSVitaly Mayatskikh	jb 2f		/* less than 8 bytes, go to byte copy loop */
233ad2fc2cdSVitaly Mayatskikh	ALIGN_DESTINATION
234185f3d38SThomas Gleixner	movl %edx,%ecx
235185f3d38SThomas Gleixner	shrl $3,%ecx
236185f3d38SThomas Gleixner	andl $7,%edx
237185f3d38SThomas Gleixner1:	rep
238185f3d38SThomas Gleixner	movsq
239ad2fc2cdSVitaly Mayatskikh2:	movl %edx,%ecx
240ad2fc2cdSVitaly Mayatskikh3:	rep
241185f3d38SThomas Gleixner	movsb
242ad2fc2cdSVitaly Mayatskikh4:	xorl %eax,%eax
243185f3d38SThomas Gleixner	ret
244185f3d38SThomas Gleixner
245ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
24627cb0a75SJeremy Fitzhardinge11:	lea (%rdx,%rcx,8),%rcx
247ad2fc2cdSVitaly Mayatskikh12:	movl %ecx,%edx		/* ecx is zerorest also */
248ad2fc2cdSVitaly Mayatskikh	jmp copy_user_handle_tail
249ad2fc2cdSVitaly Mayatskikh	.previous
250185f3d38SThomas Gleixner
251185f3d38SThomas Gleixner	.section __ex_table,"a"
252ad2fc2cdSVitaly Mayatskikh	.align 8
253ad2fc2cdSVitaly Mayatskikh	.quad 1b,11b
254ad2fc2cdSVitaly Mayatskikh	.quad 3b,12b
255185f3d38SThomas Gleixner	.previous
256ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
257ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_string)
258