1ad2fc2cdSVitaly Mayatskikh/* 2ad2fc2cdSVitaly Mayatskikh * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com> 3ad2fc2cdSVitaly Mayatskikh * Copyright 2002 Andi Kleen, SuSE Labs. 4185f3d38SThomas Gleixner * Subject to the GNU Public License v2. 5185f3d38SThomas Gleixner * 6185f3d38SThomas Gleixner * Functions to copy from and to user space. 7185f3d38SThomas Gleixner */ 8185f3d38SThomas Gleixner 9185f3d38SThomas Gleixner#include <linux/linkage.h> 10185f3d38SThomas Gleixner#include <asm/dwarf2.h> 11185f3d38SThomas Gleixner 12185f3d38SThomas Gleixner#define FIX_ALIGNMENT 1 13185f3d38SThomas Gleixner 14185f3d38SThomas Gleixner#include <asm/current.h> 15185f3d38SThomas Gleixner#include <asm/asm-offsets.h> 16185f3d38SThomas Gleixner#include <asm/thread_info.h> 17185f3d38SThomas Gleixner#include <asm/cpufeature.h> 18185f3d38SThomas Gleixner 19185f3d38SThomas Gleixner .macro ALTERNATIVE_JUMP feature,orig,alt 20185f3d38SThomas Gleixner0: 21185f3d38SThomas Gleixner .byte 0xe9 /* 32bit jump */ 22185f3d38SThomas Gleixner .long \orig-1f /* by default jump to orig */ 23185f3d38SThomas Gleixner1: 24185f3d38SThomas Gleixner .section .altinstr_replacement,"ax" 25185f3d38SThomas Gleixner2: .byte 0xe9 /* near jump with 32bit immediate */ 26185f3d38SThomas Gleixner .long \alt-1b /* offset */ /* or alternatively to alt */ 27185f3d38SThomas Gleixner .previous 28185f3d38SThomas Gleixner .section .altinstructions,"a" 29185f3d38SThomas Gleixner .align 8 30185f3d38SThomas Gleixner .quad 0b 31185f3d38SThomas Gleixner .quad 2b 32185f3d38SThomas Gleixner .byte \feature /* when feature is set */ 33185f3d38SThomas Gleixner .byte 5 34185f3d38SThomas Gleixner .byte 5 35185f3d38SThomas Gleixner .previous 36185f3d38SThomas Gleixner .endm 37185f3d38SThomas Gleixner 38ad2fc2cdSVitaly Mayatskikh .macro ALIGN_DESTINATION 39ad2fc2cdSVitaly Mayatskikh#ifdef FIX_ALIGNMENT 40ad2fc2cdSVitaly Mayatskikh /* check for bad alignment of destination */ 41ad2fc2cdSVitaly Mayatskikh movl %edi,%ecx 42ad2fc2cdSVitaly Mayatskikh andl $7,%ecx 43ad2fc2cdSVitaly Mayatskikh jz 102f /* already aligned */ 44ad2fc2cdSVitaly Mayatskikh subl $8,%ecx 45ad2fc2cdSVitaly Mayatskikh negl %ecx 46ad2fc2cdSVitaly Mayatskikh subl %ecx,%edx 47ad2fc2cdSVitaly Mayatskikh100: movb (%rsi),%al 48ad2fc2cdSVitaly Mayatskikh101: movb %al,(%rdi) 49ad2fc2cdSVitaly Mayatskikh incq %rsi 50ad2fc2cdSVitaly Mayatskikh incq %rdi 51ad2fc2cdSVitaly Mayatskikh decl %ecx 52ad2fc2cdSVitaly Mayatskikh jnz 100b 53ad2fc2cdSVitaly Mayatskikh102: 54ad2fc2cdSVitaly Mayatskikh .section .fixup,"ax" 55afd962a9SVitaly Mayatskikh103: addl %ecx,%edx /* ecx is zerorest also */ 56ad2fc2cdSVitaly Mayatskikh jmp copy_user_handle_tail 57ad2fc2cdSVitaly Mayatskikh .previous 58ad2fc2cdSVitaly Mayatskikh 59ad2fc2cdSVitaly Mayatskikh .section __ex_table,"a" 60ad2fc2cdSVitaly Mayatskikh .align 8 61ad2fc2cdSVitaly Mayatskikh .quad 100b,103b 62ad2fc2cdSVitaly Mayatskikh .quad 101b,103b 63ad2fc2cdSVitaly Mayatskikh .previous 64ad2fc2cdSVitaly Mayatskikh#endif 65ad2fc2cdSVitaly Mayatskikh .endm 66ad2fc2cdSVitaly Mayatskikh 67185f3d38SThomas Gleixner/* Standard copy_to_user with segment limit checking */ 683c93ca00SFrederic WeisbeckerENTRY(_copy_to_user) 69185f3d38SThomas Gleixner CFI_STARTPROC 70185f3d38SThomas Gleixner GET_THREAD_INFO(%rax) 71185f3d38SThomas Gleixner movq %rdi,%rcx 72185f3d38SThomas Gleixner addq %rdx,%rcx 73185f3d38SThomas Gleixner jc bad_to_user 7426ccb8a7SGlauber Costa cmpq TI_addr_limit(%rax),%rcx 75185f3d38SThomas Gleixner jae bad_to_user 76185f3d38SThomas Gleixner ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 77185f3d38SThomas Gleixner CFI_ENDPROC 783c93ca00SFrederic WeisbeckerENDPROC(_copy_to_user) 79185f3d38SThomas Gleixner 80185f3d38SThomas Gleixner/* Standard copy_from_user with segment limit checking */ 819f0cf4adSArjan van de VenENTRY(_copy_from_user) 82185f3d38SThomas Gleixner CFI_STARTPROC 83185f3d38SThomas Gleixner GET_THREAD_INFO(%rax) 84185f3d38SThomas Gleixner movq %rsi,%rcx 85185f3d38SThomas Gleixner addq %rdx,%rcx 86185f3d38SThomas Gleixner jc bad_from_user 8726ccb8a7SGlauber Costa cmpq TI_addr_limit(%rax),%rcx 88185f3d38SThomas Gleixner jae bad_from_user 89185f3d38SThomas Gleixner ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 90185f3d38SThomas Gleixner CFI_ENDPROC 919f0cf4adSArjan van de VenENDPROC(_copy_from_user) 92185f3d38SThomas Gleixner 93ad2fc2cdSVitaly MayatskikhENTRY(copy_user_generic) 94ad2fc2cdSVitaly Mayatskikh CFI_STARTPROC 95ad2fc2cdSVitaly Mayatskikh ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string 96ad2fc2cdSVitaly Mayatskikh CFI_ENDPROC 97ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic) 98ad2fc2cdSVitaly Mayatskikh 99185f3d38SThomas Gleixner .section .fixup,"ax" 100185f3d38SThomas Gleixner /* must zero dest */ 101ad2fc2cdSVitaly MayatskikhENTRY(bad_from_user) 102185f3d38SThomas Gleixnerbad_from_user: 103185f3d38SThomas Gleixner CFI_STARTPROC 104185f3d38SThomas Gleixner movl %edx,%ecx 105185f3d38SThomas Gleixner xorl %eax,%eax 106185f3d38SThomas Gleixner rep 107185f3d38SThomas Gleixner stosb 108185f3d38SThomas Gleixnerbad_to_user: 109185f3d38SThomas Gleixner movl %edx,%eax 110185f3d38SThomas Gleixner ret 111185f3d38SThomas Gleixner CFI_ENDPROC 112ad2fc2cdSVitaly MayatskikhENDPROC(bad_from_user) 113185f3d38SThomas Gleixner .previous 114185f3d38SThomas Gleixner 115185f3d38SThomas Gleixner/* 116185f3d38SThomas Gleixner * copy_user_generic_unrolled - memory copy with exception handling. 117ad2fc2cdSVitaly Mayatskikh * This version is for CPUs like P4 that don't have efficient micro 118ad2fc2cdSVitaly Mayatskikh * code for rep movsq 119185f3d38SThomas Gleixner * 120185f3d38SThomas Gleixner * Input: 121185f3d38SThomas Gleixner * rdi destination 122185f3d38SThomas Gleixner * rsi source 123185f3d38SThomas Gleixner * rdx count 124185f3d38SThomas Gleixner * 125185f3d38SThomas Gleixner * Output: 126185f3d38SThomas Gleixner * eax uncopied bytes or 0 if successfull. 127ad2fc2cdSVitaly Mayatskikh */ 128ad2fc2cdSVitaly MayatskikhENTRY(copy_user_generic_unrolled) 129ad2fc2cdSVitaly Mayatskikh CFI_STARTPROC 130ad2fc2cdSVitaly Mayatskikh cmpl $8,%edx 131ad2fc2cdSVitaly Mayatskikh jb 20f /* less then 8 bytes, go to byte copy loop */ 132ad2fc2cdSVitaly Mayatskikh ALIGN_DESTINATION 133ad2fc2cdSVitaly Mayatskikh movl %edx,%ecx 134ad2fc2cdSVitaly Mayatskikh andl $63,%edx 135ad2fc2cdSVitaly Mayatskikh shrl $6,%ecx 136ad2fc2cdSVitaly Mayatskikh jz 17f 137ad2fc2cdSVitaly Mayatskikh1: movq (%rsi),%r8 138ad2fc2cdSVitaly Mayatskikh2: movq 1*8(%rsi),%r9 139ad2fc2cdSVitaly Mayatskikh3: movq 2*8(%rsi),%r10 140ad2fc2cdSVitaly Mayatskikh4: movq 3*8(%rsi),%r11 141ad2fc2cdSVitaly Mayatskikh5: movq %r8,(%rdi) 142ad2fc2cdSVitaly Mayatskikh6: movq %r9,1*8(%rdi) 143ad2fc2cdSVitaly Mayatskikh7: movq %r10,2*8(%rdi) 144ad2fc2cdSVitaly Mayatskikh8: movq %r11,3*8(%rdi) 145ad2fc2cdSVitaly Mayatskikh9: movq 4*8(%rsi),%r8 146ad2fc2cdSVitaly Mayatskikh10: movq 5*8(%rsi),%r9 147ad2fc2cdSVitaly Mayatskikh11: movq 6*8(%rsi),%r10 148ad2fc2cdSVitaly Mayatskikh12: movq 7*8(%rsi),%r11 149ad2fc2cdSVitaly Mayatskikh13: movq %r8,4*8(%rdi) 150ad2fc2cdSVitaly Mayatskikh14: movq %r9,5*8(%rdi) 151ad2fc2cdSVitaly Mayatskikh15: movq %r10,6*8(%rdi) 152ad2fc2cdSVitaly Mayatskikh16: movq %r11,7*8(%rdi) 153ad2fc2cdSVitaly Mayatskikh leaq 64(%rsi),%rsi 154ad2fc2cdSVitaly Mayatskikh leaq 64(%rdi),%rdi 155ad2fc2cdSVitaly Mayatskikh decl %ecx 156ad2fc2cdSVitaly Mayatskikh jnz 1b 157ad2fc2cdSVitaly Mayatskikh17: movl %edx,%ecx 158ad2fc2cdSVitaly Mayatskikh andl $7,%edx 159ad2fc2cdSVitaly Mayatskikh shrl $3,%ecx 160ad2fc2cdSVitaly Mayatskikh jz 20f 161ad2fc2cdSVitaly Mayatskikh18: movq (%rsi),%r8 162ad2fc2cdSVitaly Mayatskikh19: movq %r8,(%rdi) 163ad2fc2cdSVitaly Mayatskikh leaq 8(%rsi),%rsi 164ad2fc2cdSVitaly Mayatskikh leaq 8(%rdi),%rdi 165ad2fc2cdSVitaly Mayatskikh decl %ecx 166ad2fc2cdSVitaly Mayatskikh jnz 18b 167ad2fc2cdSVitaly Mayatskikh20: andl %edx,%edx 168ad2fc2cdSVitaly Mayatskikh jz 23f 169ad2fc2cdSVitaly Mayatskikh movl %edx,%ecx 170ad2fc2cdSVitaly Mayatskikh21: movb (%rsi),%al 171ad2fc2cdSVitaly Mayatskikh22: movb %al,(%rdi) 172ad2fc2cdSVitaly Mayatskikh incq %rsi 173ad2fc2cdSVitaly Mayatskikh incq %rdi 174ad2fc2cdSVitaly Mayatskikh decl %ecx 175ad2fc2cdSVitaly Mayatskikh jnz 21b 176ad2fc2cdSVitaly Mayatskikh23: xor %eax,%eax 177ad2fc2cdSVitaly Mayatskikh ret 178ad2fc2cdSVitaly Mayatskikh 179ad2fc2cdSVitaly Mayatskikh .section .fixup,"ax" 180ad2fc2cdSVitaly Mayatskikh30: shll $6,%ecx 181ad2fc2cdSVitaly Mayatskikh addl %ecx,%edx 182ad2fc2cdSVitaly Mayatskikh jmp 60f 18327cb0a75SJeremy Fitzhardinge40: lea (%rdx,%rcx,8),%rdx 184ad2fc2cdSVitaly Mayatskikh jmp 60f 185ad2fc2cdSVitaly Mayatskikh50: movl %ecx,%edx 186ad2fc2cdSVitaly Mayatskikh60: jmp copy_user_handle_tail /* ecx is zerorest also */ 187ad2fc2cdSVitaly Mayatskikh .previous 188ad2fc2cdSVitaly Mayatskikh 189ad2fc2cdSVitaly Mayatskikh .section __ex_table,"a" 190ad2fc2cdSVitaly Mayatskikh .align 8 191ad2fc2cdSVitaly Mayatskikh .quad 1b,30b 192ad2fc2cdSVitaly Mayatskikh .quad 2b,30b 193ad2fc2cdSVitaly Mayatskikh .quad 3b,30b 194ad2fc2cdSVitaly Mayatskikh .quad 4b,30b 195ad2fc2cdSVitaly Mayatskikh .quad 5b,30b 196ad2fc2cdSVitaly Mayatskikh .quad 6b,30b 197ad2fc2cdSVitaly Mayatskikh .quad 7b,30b 198ad2fc2cdSVitaly Mayatskikh .quad 8b,30b 199ad2fc2cdSVitaly Mayatskikh .quad 9b,30b 200ad2fc2cdSVitaly Mayatskikh .quad 10b,30b 201ad2fc2cdSVitaly Mayatskikh .quad 11b,30b 202ad2fc2cdSVitaly Mayatskikh .quad 12b,30b 203ad2fc2cdSVitaly Mayatskikh .quad 13b,30b 204ad2fc2cdSVitaly Mayatskikh .quad 14b,30b 205ad2fc2cdSVitaly Mayatskikh .quad 15b,30b 206ad2fc2cdSVitaly Mayatskikh .quad 16b,30b 207ad2fc2cdSVitaly Mayatskikh .quad 18b,40b 208ad2fc2cdSVitaly Mayatskikh .quad 19b,40b 209ad2fc2cdSVitaly Mayatskikh .quad 21b,50b 210ad2fc2cdSVitaly Mayatskikh .quad 22b,50b 211ad2fc2cdSVitaly Mayatskikh .previous 212ad2fc2cdSVitaly Mayatskikh CFI_ENDPROC 213ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_unrolled) 214ad2fc2cdSVitaly Mayatskikh 215ad2fc2cdSVitaly Mayatskikh/* Some CPUs run faster using the string copy instructions. 216ad2fc2cdSVitaly Mayatskikh * This is also a lot simpler. Use them when possible. 217185f3d38SThomas Gleixner * 218185f3d38SThomas Gleixner * Only 4GB of copy is supported. This shouldn't be a problem 219185f3d38SThomas Gleixner * because the kernel normally only writes from/to page sized chunks 220185f3d38SThomas Gleixner * even if user space passed a longer buffer. 221185f3d38SThomas Gleixner * And more would be dangerous because both Intel and AMD have 222185f3d38SThomas Gleixner * errata with rep movsq > 4GB. If someone feels the need to fix 223185f3d38SThomas Gleixner * this please consider this. 224ad2fc2cdSVitaly Mayatskikh * 225ad2fc2cdSVitaly Mayatskikh * Input: 226ad2fc2cdSVitaly Mayatskikh * rdi destination 227ad2fc2cdSVitaly Mayatskikh * rsi source 228ad2fc2cdSVitaly Mayatskikh * rdx count 229ad2fc2cdSVitaly Mayatskikh * 230ad2fc2cdSVitaly Mayatskikh * Output: 231ad2fc2cdSVitaly Mayatskikh * eax uncopied bytes or 0 if successful. 232185f3d38SThomas Gleixner */ 233185f3d38SThomas GleixnerENTRY(copy_user_generic_string) 234185f3d38SThomas Gleixner CFI_STARTPROC 235ad2fc2cdSVitaly Mayatskikh andl %edx,%edx 236ad2fc2cdSVitaly Mayatskikh jz 4f 237ad2fc2cdSVitaly Mayatskikh cmpl $8,%edx 238ad2fc2cdSVitaly Mayatskikh jb 2f /* less than 8 bytes, go to byte copy loop */ 239ad2fc2cdSVitaly Mayatskikh ALIGN_DESTINATION 240185f3d38SThomas Gleixner movl %edx,%ecx 241185f3d38SThomas Gleixner shrl $3,%ecx 242185f3d38SThomas Gleixner andl $7,%edx 243185f3d38SThomas Gleixner1: rep 244185f3d38SThomas Gleixner movsq 245ad2fc2cdSVitaly Mayatskikh2: movl %edx,%ecx 246ad2fc2cdSVitaly Mayatskikh3: rep 247185f3d38SThomas Gleixner movsb 248ad2fc2cdSVitaly Mayatskikh4: xorl %eax,%eax 249185f3d38SThomas Gleixner ret 250185f3d38SThomas Gleixner 251ad2fc2cdSVitaly Mayatskikh .section .fixup,"ax" 25227cb0a75SJeremy Fitzhardinge11: lea (%rdx,%rcx,8),%rcx 253ad2fc2cdSVitaly Mayatskikh12: movl %ecx,%edx /* ecx is zerorest also */ 254ad2fc2cdSVitaly Mayatskikh jmp copy_user_handle_tail 255ad2fc2cdSVitaly Mayatskikh .previous 256185f3d38SThomas Gleixner 257185f3d38SThomas Gleixner .section __ex_table,"a" 258ad2fc2cdSVitaly Mayatskikh .align 8 259ad2fc2cdSVitaly Mayatskikh .quad 1b,11b 260ad2fc2cdSVitaly Mayatskikh .quad 3b,12b 261185f3d38SThomas Gleixner .previous 262ad2fc2cdSVitaly Mayatskikh CFI_ENDPROC 263ad2fc2cdSVitaly MayatskikhENDPROC(copy_user_generic_string) 264