xref: /openbmc/linux/arch/x86/lib/copy_user_64.S (revision 9f0cf4ad)
1ad2fc2cdSVitaly Mayatskikh/*
2ad2fc2cdSVitaly Mayatskikh * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
3ad2fc2cdSVitaly Mayatskikh * Copyright 2002 Andi Kleen, SuSE Labs.
4185f3d38SThomas Gleixner * Subject to the GNU Public License v2.
5185f3d38SThomas Gleixner *
6185f3d38SThomas Gleixner * Functions to copy from and to user space.
7185f3d38SThomas Gleixner */
8185f3d38SThomas Gleixner
9185f3d38SThomas Gleixner#include <linux/linkage.h>
10185f3d38SThomas Gleixner#include <asm/dwarf2.h>
11185f3d38SThomas Gleixner
12185f3d38SThomas Gleixner#define FIX_ALIGNMENT 1
13185f3d38SThomas Gleixner
14185f3d38SThomas Gleixner#include <asm/current.h>
15185f3d38SThomas Gleixner#include <asm/asm-offsets.h>
16185f3d38SThomas Gleixner#include <asm/thread_info.h>
17185f3d38SThomas Gleixner#include <asm/cpufeature.h>
18185f3d38SThomas Gleixner
19185f3d38SThomas Gleixner	.macro ALTERNATIVE_JUMP feature,orig,alt
20185f3d38SThomas Gleixner0:
21185f3d38SThomas Gleixner	.byte 0xe9	/* 32bit jump */
22185f3d38SThomas Gleixner	.long \orig-1f	/* by default jump to orig */
23185f3d38SThomas Gleixner1:
24185f3d38SThomas Gleixner	.section .altinstr_replacement,"ax"
25185f3d38SThomas Gleixner2:	.byte 0xe9			/* near jump with 32bit immediate */
26185f3d38SThomas Gleixner	.long \alt-1b /* offset */   /* or alternatively to alt */
27185f3d38SThomas Gleixner	.previous
28185f3d38SThomas Gleixner	.section .altinstructions,"a"
29185f3d38SThomas Gleixner	.align 8
30185f3d38SThomas Gleixner	.quad  0b
31185f3d38SThomas Gleixner	.quad  2b
32185f3d38SThomas Gleixner	.byte  \feature			/* when feature is set */
33185f3d38SThomas Gleixner	.byte  5
34185f3d38SThomas Gleixner	.byte  5
35185f3d38SThomas Gleixner	.previous
36185f3d38SThomas Gleixner	.endm
37185f3d38SThomas Gleixner
38ad2fc2cdSVitaly Mayatskikh	.macro ALIGN_DESTINATION
39ad2fc2cdSVitaly Mayatskikh#ifdef FIX_ALIGNMENT
40ad2fc2cdSVitaly Mayatskikh	/* check for bad alignment of destination */
41ad2fc2cdSVitaly Mayatskikh	movl %edi,%ecx
42ad2fc2cdSVitaly Mayatskikh	andl $7,%ecx
43ad2fc2cdSVitaly Mayatskikh	jz 102f				/* already aligned */
44ad2fc2cdSVitaly Mayatskikh	subl $8,%ecx
45ad2fc2cdSVitaly Mayatskikh	negl %ecx
46ad2fc2cdSVitaly Mayatskikh	subl %ecx,%edx
47ad2fc2cdSVitaly Mayatskikh100:	movb (%rsi),%al
48ad2fc2cdSVitaly Mayatskikh101:	movb %al,(%rdi)
49ad2fc2cdSVitaly Mayatskikh	incq %rsi
50ad2fc2cdSVitaly Mayatskikh	incq %rdi
51ad2fc2cdSVitaly Mayatskikh	decl %ecx
52ad2fc2cdSVitaly Mayatskikh	jnz 100b
53ad2fc2cdSVitaly Mayatskikh102:
54ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
55afd962a9SVitaly Mayatskikh103:	addl %ecx,%edx			/* ecx is zerorest also */
56ad2fc2cdSVitaly Mayatskikh	jmp copy_user_handle_tail
57ad2fc2cdSVitaly Mayatskikh	.previous
58ad2fc2cdSVitaly Mayatskikh
59ad2fc2cdSVitaly Mayatskikh	.section __ex_table,"a"
60ad2fc2cdSVitaly Mayatskikh	.align 8
61ad2fc2cdSVitaly Mayatskikh	.quad 100b,103b
62ad2fc2cdSVitaly Mayatskikh	.quad 101b,103b
63ad2fc2cdSVitaly Mayatskikh	.previous
64ad2fc2cdSVitaly Mayatskikh#endif
65ad2fc2cdSVitaly Mayatskikh	.endm
66ad2fc2cdSVitaly Mayatskikh
67185f3d38SThomas Gleixner/* Standard copy_to_user with segment limit checking */
68185f3d38SThomas GleixnerENTRY(copy_to_user)
69185f3d38SThomas Gleixner	CFI_STARTPROC
70185f3d38SThomas Gleixner	GET_THREAD_INFO(%rax)
71185f3d38SThomas Gleixner	movq %rdi,%rcx
72185f3d38SThomas Gleixner	addq %rdx,%rcx
73185f3d38SThomas Gleixner	jc bad_to_user
7426ccb8a7SGlauber Costa	cmpq TI_addr_limit(%rax),%rcx
75185f3d38SThomas Gleixner	jae bad_to_user
76185f3d38SThomas Gleixner	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
77185f3d38SThomas Gleixner	CFI_ENDPROC
783fd382ceSMike GalbraithENDPROC(copy_to_user)
79185f3d38SThomas Gleixner
80185f3d38SThomas Gleixner/* Standard copy_from_user with segment limit checking */
819f0cf4adSArjan van de VenENTRY(_copy_from_user)
82185f3d38SThomas Gleixner	CFI_STARTPROC
83185f3d38SThomas Gleixner	GET_THREAD_INFO(%rax)
84185f3d38SThomas Gleixner	movq %rsi,%rcx
85185f3d38SThomas Gleixner	addq %rdx,%rcx
86185f3d38SThomas Gleixner	jc bad_from_user
8726ccb8a7SGlauber Costa	cmpq TI_addr_limit(%rax),%rcx
88185f3d38SThomas Gleixner	jae bad_from_user
89185f3d38SThomas Gleixner	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
90185f3d38SThomas Gleixner	CFI_ENDPROC
919f0cf4adSArjan van de VenENDPROC(_copy_from_user)
92185f3d38SThomas Gleixner
93ad2fc2cdSVitaly MayatskikhENTRY(copy_user_generic)
94ad2fc2cdSVitaly Mayatskikh	CFI_STARTPROC
95ad2fc2cdSVitaly Mayatskikh	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
96ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
97ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic)
98ad2fc2cdSVitaly Mayatskikh
99ad2fc2cdSVitaly MayatskikhENTRY(__copy_from_user_inatomic)
100ad2fc2cdSVitaly Mayatskikh	CFI_STARTPROC
101ad2fc2cdSVitaly Mayatskikh	ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
102ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
103ad2fc2cdSVitaly MayatskikhENDPROC(__copy_from_user_inatomic)
104ad2fc2cdSVitaly Mayatskikh
105185f3d38SThomas Gleixner	.section .fixup,"ax"
106185f3d38SThomas Gleixner	/* must zero dest */
107ad2fc2cdSVitaly MayatskikhENTRY(bad_from_user)
108185f3d38SThomas Gleixnerbad_from_user:
109185f3d38SThomas Gleixner	CFI_STARTPROC
110185f3d38SThomas Gleixner	movl %edx,%ecx
111185f3d38SThomas Gleixner	xorl %eax,%eax
112185f3d38SThomas Gleixner	rep
113185f3d38SThomas Gleixner	stosb
114185f3d38SThomas Gleixnerbad_to_user:
115185f3d38SThomas Gleixner	movl %edx,%eax
116185f3d38SThomas Gleixner	ret
117185f3d38SThomas Gleixner	CFI_ENDPROC
118ad2fc2cdSVitaly MayatskikhENDPROC(bad_from_user)
119185f3d38SThomas Gleixner	.previous
120185f3d38SThomas Gleixner
121185f3d38SThomas Gleixner/*
122185f3d38SThomas Gleixner * copy_user_generic_unrolled - memory copy with exception handling.
123ad2fc2cdSVitaly Mayatskikh * This version is for CPUs like P4 that don't have efficient micro
124ad2fc2cdSVitaly Mayatskikh * code for rep movsq
125185f3d38SThomas Gleixner *
126185f3d38SThomas Gleixner * Input:
127185f3d38SThomas Gleixner * rdi destination
128185f3d38SThomas Gleixner * rsi source
129185f3d38SThomas Gleixner * rdx count
130185f3d38SThomas Gleixner *
131185f3d38SThomas Gleixner * Output:
132185f3d38SThomas Gleixner * eax uncopied bytes or 0 if successfull.
133ad2fc2cdSVitaly Mayatskikh */
134ad2fc2cdSVitaly MayatskikhENTRY(copy_user_generic_unrolled)
135ad2fc2cdSVitaly Mayatskikh	CFI_STARTPROC
136ad2fc2cdSVitaly Mayatskikh	cmpl $8,%edx
137ad2fc2cdSVitaly Mayatskikh	jb 20f		/* less then 8 bytes, go to byte copy loop */
138ad2fc2cdSVitaly Mayatskikh	ALIGN_DESTINATION
139ad2fc2cdSVitaly Mayatskikh	movl %edx,%ecx
140ad2fc2cdSVitaly Mayatskikh	andl $63,%edx
141ad2fc2cdSVitaly Mayatskikh	shrl $6,%ecx
142ad2fc2cdSVitaly Mayatskikh	jz 17f
143ad2fc2cdSVitaly Mayatskikh1:	movq (%rsi),%r8
144ad2fc2cdSVitaly Mayatskikh2:	movq 1*8(%rsi),%r9
145ad2fc2cdSVitaly Mayatskikh3:	movq 2*8(%rsi),%r10
146ad2fc2cdSVitaly Mayatskikh4:	movq 3*8(%rsi),%r11
147ad2fc2cdSVitaly Mayatskikh5:	movq %r8,(%rdi)
148ad2fc2cdSVitaly Mayatskikh6:	movq %r9,1*8(%rdi)
149ad2fc2cdSVitaly Mayatskikh7:	movq %r10,2*8(%rdi)
150ad2fc2cdSVitaly Mayatskikh8:	movq %r11,3*8(%rdi)
151ad2fc2cdSVitaly Mayatskikh9:	movq 4*8(%rsi),%r8
152ad2fc2cdSVitaly Mayatskikh10:	movq 5*8(%rsi),%r9
153ad2fc2cdSVitaly Mayatskikh11:	movq 6*8(%rsi),%r10
154ad2fc2cdSVitaly Mayatskikh12:	movq 7*8(%rsi),%r11
155ad2fc2cdSVitaly Mayatskikh13:	movq %r8,4*8(%rdi)
156ad2fc2cdSVitaly Mayatskikh14:	movq %r9,5*8(%rdi)
157ad2fc2cdSVitaly Mayatskikh15:	movq %r10,6*8(%rdi)
158ad2fc2cdSVitaly Mayatskikh16:	movq %r11,7*8(%rdi)
159ad2fc2cdSVitaly Mayatskikh	leaq 64(%rsi),%rsi
160ad2fc2cdSVitaly Mayatskikh	leaq 64(%rdi),%rdi
161ad2fc2cdSVitaly Mayatskikh	decl %ecx
162ad2fc2cdSVitaly Mayatskikh	jnz 1b
163ad2fc2cdSVitaly Mayatskikh17:	movl %edx,%ecx
164ad2fc2cdSVitaly Mayatskikh	andl $7,%edx
165ad2fc2cdSVitaly Mayatskikh	shrl $3,%ecx
166ad2fc2cdSVitaly Mayatskikh	jz 20f
167ad2fc2cdSVitaly Mayatskikh18:	movq (%rsi),%r8
168ad2fc2cdSVitaly Mayatskikh19:	movq %r8,(%rdi)
169ad2fc2cdSVitaly Mayatskikh	leaq 8(%rsi),%rsi
170ad2fc2cdSVitaly Mayatskikh	leaq 8(%rdi),%rdi
171ad2fc2cdSVitaly Mayatskikh	decl %ecx
172ad2fc2cdSVitaly Mayatskikh	jnz 18b
173ad2fc2cdSVitaly Mayatskikh20:	andl %edx,%edx
174ad2fc2cdSVitaly Mayatskikh	jz 23f
175ad2fc2cdSVitaly Mayatskikh	movl %edx,%ecx
176ad2fc2cdSVitaly Mayatskikh21:	movb (%rsi),%al
177ad2fc2cdSVitaly Mayatskikh22:	movb %al,(%rdi)
178ad2fc2cdSVitaly Mayatskikh	incq %rsi
179ad2fc2cdSVitaly Mayatskikh	incq %rdi
180ad2fc2cdSVitaly Mayatskikh	decl %ecx
181ad2fc2cdSVitaly Mayatskikh	jnz 21b
182ad2fc2cdSVitaly Mayatskikh23:	xor %eax,%eax
183ad2fc2cdSVitaly Mayatskikh	ret
184ad2fc2cdSVitaly Mayatskikh
185ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
186ad2fc2cdSVitaly Mayatskikh30:	shll $6,%ecx
187ad2fc2cdSVitaly Mayatskikh	addl %ecx,%edx
188ad2fc2cdSVitaly Mayatskikh	jmp 60f
18927cb0a75SJeremy Fitzhardinge40:	lea (%rdx,%rcx,8),%rdx
190ad2fc2cdSVitaly Mayatskikh	jmp 60f
191ad2fc2cdSVitaly Mayatskikh50:	movl %ecx,%edx
192ad2fc2cdSVitaly Mayatskikh60:	jmp copy_user_handle_tail /* ecx is zerorest also */
193ad2fc2cdSVitaly Mayatskikh	.previous
194ad2fc2cdSVitaly Mayatskikh
195ad2fc2cdSVitaly Mayatskikh	.section __ex_table,"a"
196ad2fc2cdSVitaly Mayatskikh	.align 8
197ad2fc2cdSVitaly Mayatskikh	.quad 1b,30b
198ad2fc2cdSVitaly Mayatskikh	.quad 2b,30b
199ad2fc2cdSVitaly Mayatskikh	.quad 3b,30b
200ad2fc2cdSVitaly Mayatskikh	.quad 4b,30b
201ad2fc2cdSVitaly Mayatskikh	.quad 5b,30b
202ad2fc2cdSVitaly Mayatskikh	.quad 6b,30b
203ad2fc2cdSVitaly Mayatskikh	.quad 7b,30b
204ad2fc2cdSVitaly Mayatskikh	.quad 8b,30b
205ad2fc2cdSVitaly Mayatskikh	.quad 9b,30b
206ad2fc2cdSVitaly Mayatskikh	.quad 10b,30b
207ad2fc2cdSVitaly Mayatskikh	.quad 11b,30b
208ad2fc2cdSVitaly Mayatskikh	.quad 12b,30b
209ad2fc2cdSVitaly Mayatskikh	.quad 13b,30b
210ad2fc2cdSVitaly Mayatskikh	.quad 14b,30b
211ad2fc2cdSVitaly Mayatskikh	.quad 15b,30b
212ad2fc2cdSVitaly Mayatskikh	.quad 16b,30b
213ad2fc2cdSVitaly Mayatskikh	.quad 18b,40b
214ad2fc2cdSVitaly Mayatskikh	.quad 19b,40b
215ad2fc2cdSVitaly Mayatskikh	.quad 21b,50b
216ad2fc2cdSVitaly Mayatskikh	.quad 22b,50b
217ad2fc2cdSVitaly Mayatskikh	.previous
218ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
219ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_unrolled)
220ad2fc2cdSVitaly Mayatskikh
221ad2fc2cdSVitaly Mayatskikh/* Some CPUs run faster using the string copy instructions.
222ad2fc2cdSVitaly Mayatskikh * This is also a lot simpler. Use them when possible.
223185f3d38SThomas Gleixner *
224185f3d38SThomas Gleixner * Only 4GB of copy is supported. This shouldn't be a problem
225185f3d38SThomas Gleixner * because the kernel normally only writes from/to page sized chunks
226185f3d38SThomas Gleixner * even if user space passed a longer buffer.
227185f3d38SThomas Gleixner * And more would be dangerous because both Intel and AMD have
228185f3d38SThomas Gleixner * errata with rep movsq > 4GB. If someone feels the need to fix
229185f3d38SThomas Gleixner * this please consider this.
230ad2fc2cdSVitaly Mayatskikh *
231ad2fc2cdSVitaly Mayatskikh * Input:
232ad2fc2cdSVitaly Mayatskikh * rdi destination
233ad2fc2cdSVitaly Mayatskikh * rsi source
234ad2fc2cdSVitaly Mayatskikh * rdx count
235ad2fc2cdSVitaly Mayatskikh *
236ad2fc2cdSVitaly Mayatskikh * Output:
237ad2fc2cdSVitaly Mayatskikh * eax uncopied bytes or 0 if successful.
238185f3d38SThomas Gleixner */
239185f3d38SThomas GleixnerENTRY(copy_user_generic_string)
240185f3d38SThomas Gleixner	CFI_STARTPROC
241ad2fc2cdSVitaly Mayatskikh	andl %edx,%edx
242ad2fc2cdSVitaly Mayatskikh	jz 4f
243ad2fc2cdSVitaly Mayatskikh	cmpl $8,%edx
244ad2fc2cdSVitaly Mayatskikh	jb 2f		/* less than 8 bytes, go to byte copy loop */
245ad2fc2cdSVitaly Mayatskikh	ALIGN_DESTINATION
246185f3d38SThomas Gleixner	movl %edx,%ecx
247185f3d38SThomas Gleixner	shrl $3,%ecx
248185f3d38SThomas Gleixner	andl $7,%edx
249185f3d38SThomas Gleixner1:	rep
250185f3d38SThomas Gleixner	movsq
251ad2fc2cdSVitaly Mayatskikh2:	movl %edx,%ecx
252ad2fc2cdSVitaly Mayatskikh3:	rep
253185f3d38SThomas Gleixner	movsb
254ad2fc2cdSVitaly Mayatskikh4:	xorl %eax,%eax
255185f3d38SThomas Gleixner	ret
256185f3d38SThomas Gleixner
257ad2fc2cdSVitaly Mayatskikh	.section .fixup,"ax"
25827cb0a75SJeremy Fitzhardinge11:	lea (%rdx,%rcx,8),%rcx
259ad2fc2cdSVitaly Mayatskikh12:	movl %ecx,%edx		/* ecx is zerorest also */
260ad2fc2cdSVitaly Mayatskikh	jmp copy_user_handle_tail
261ad2fc2cdSVitaly Mayatskikh	.previous
262185f3d38SThomas Gleixner
263185f3d38SThomas Gleixner	.section __ex_table,"a"
264ad2fc2cdSVitaly Mayatskikh	.align 8
265ad2fc2cdSVitaly Mayatskikh	.quad 1b,11b
266ad2fc2cdSVitaly Mayatskikh	.quad 3b,12b
267185f3d38SThomas Gleixner	.previous
268ad2fc2cdSVitaly Mayatskikh	CFI_ENDPROC
269ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_string)
270