1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 29599ec04SFenghua Yu/* 39599ec04SFenghua Yu * Normally compiler builtins are used, but sometimes the compiler calls out 49599ec04SFenghua Yu * of line code. Based on asm-i386/string.h. 59599ec04SFenghua Yu * 69599ec04SFenghua Yu * This assembly file is re-written from memmove_64.c file. 79599ec04SFenghua Yu * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 89599ec04SFenghua Yu */ 99599ec04SFenghua Yu#include <linux/linkage.h> 10cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 1159e97e4dSAndy Lutomirski#include <asm/alternative-asm.h> 12784d5699SAl Viro#include <asm/export.h> 139599ec04SFenghua Yu 149599ec04SFenghua Yu#undef memmove 159599ec04SFenghua Yu 169599ec04SFenghua Yu/* 179599ec04SFenghua Yu * Implement memmove(). This can handle overlap between src and dst. 189599ec04SFenghua Yu * 199599ec04SFenghua Yu * Input: 209599ec04SFenghua Yu * rdi: dest 219599ec04SFenghua Yu * rsi: src 229599ec04SFenghua Yu * rdx: count 239599ec04SFenghua Yu * 249599ec04SFenghua Yu * Output: 259599ec04SFenghua Yu * rax: dest 269599ec04SFenghua Yu */ 27393f203fSAndrey Ryabinin.weak memmove 28393f203fSAndrey Ryabinin 29e9b9d020SJiri SlabySYM_FUNC_START_ALIAS(memmove) 30393f203fSAndrey RyabininENTRY(__memmove) 31057e05c1SFenghua Yu 329599ec04SFenghua Yu /* Handle more 32 bytes in loop */ 339599ec04SFenghua Yu mov %rdi, %rax 349599ec04SFenghua Yu cmp $0x20, %rdx 359599ec04SFenghua Yu jb 1f 369599ec04SFenghua Yu 379599ec04SFenghua Yu /* Decide forward/backward copy mode */ 389599ec04SFenghua Yu cmp %rdi, %rsi 39057e05c1SFenghua Yu jge .Lmemmove_begin_forward 40057e05c1SFenghua Yu mov %rsi, %r8 41057e05c1SFenghua Yu add %rdx, %r8 42057e05c1SFenghua Yu cmp %rdi, %r8 43057e05c1SFenghua Yu jg 2f 449599ec04SFenghua Yu 45057e05c1SFenghua Yu.Lmemmove_begin_forward: 46a77600cdSBorislav Petkov ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS 47a77600cdSBorislav Petkov 489599ec04SFenghua Yu /* 499599ec04SFenghua Yu * movsq instruction have many startup latency 509599ec04SFenghua Yu * so we handle small size by general register. 519599ec04SFenghua Yu */ 529599ec04SFenghua Yu cmp $680, %rdx 539599ec04SFenghua Yu jb 3f 549599ec04SFenghua Yu /* 559599ec04SFenghua Yu * movsq instruction is only good for aligned case. 569599ec04SFenghua Yu */ 579599ec04SFenghua Yu 589599ec04SFenghua Yu cmpb %dil, %sil 599599ec04SFenghua Yu je 4f 609599ec04SFenghua Yu3: 619599ec04SFenghua Yu sub $0x20, %rdx 629599ec04SFenghua Yu /* 63bb916ff7SAndy Shevchenko * We gobble 32 bytes forward in each loop. 649599ec04SFenghua Yu */ 659599ec04SFenghua Yu5: 669599ec04SFenghua Yu sub $0x20, %rdx 679599ec04SFenghua Yu movq 0*8(%rsi), %r11 689599ec04SFenghua Yu movq 1*8(%rsi), %r10 699599ec04SFenghua Yu movq 2*8(%rsi), %r9 709599ec04SFenghua Yu movq 3*8(%rsi), %r8 719599ec04SFenghua Yu leaq 4*8(%rsi), %rsi 729599ec04SFenghua Yu 739599ec04SFenghua Yu movq %r11, 0*8(%rdi) 749599ec04SFenghua Yu movq %r10, 1*8(%rdi) 759599ec04SFenghua Yu movq %r9, 2*8(%rdi) 769599ec04SFenghua Yu movq %r8, 3*8(%rdi) 779599ec04SFenghua Yu leaq 4*8(%rdi), %rdi 789599ec04SFenghua Yu jae 5b 799599ec04SFenghua Yu addq $0x20, %rdx 809599ec04SFenghua Yu jmp 1f 819599ec04SFenghua Yu /* 829599ec04SFenghua Yu * Handle data forward by movsq. 839599ec04SFenghua Yu */ 849599ec04SFenghua Yu .p2align 4 859599ec04SFenghua Yu4: 869599ec04SFenghua Yu movq %rdx, %rcx 879599ec04SFenghua Yu movq -8(%rsi, %rdx), %r11 889599ec04SFenghua Yu lea -8(%rdi, %rdx), %r10 899599ec04SFenghua Yu shrq $3, %rcx 909599ec04SFenghua Yu rep movsq 919599ec04SFenghua Yu movq %r11, (%r10) 929599ec04SFenghua Yu jmp 13f 93057e05c1SFenghua Yu.Lmemmove_end_forward: 94057e05c1SFenghua Yu 959599ec04SFenghua Yu /* 969599ec04SFenghua Yu * Handle data backward by movsq. 979599ec04SFenghua Yu */ 989599ec04SFenghua Yu .p2align 4 999599ec04SFenghua Yu7: 1009599ec04SFenghua Yu movq %rdx, %rcx 1019599ec04SFenghua Yu movq (%rsi), %r11 1029599ec04SFenghua Yu movq %rdi, %r10 1039599ec04SFenghua Yu leaq -8(%rsi, %rdx), %rsi 1049599ec04SFenghua Yu leaq -8(%rdi, %rdx), %rdi 1059599ec04SFenghua Yu shrq $3, %rcx 1069599ec04SFenghua Yu std 1079599ec04SFenghua Yu rep movsq 1089599ec04SFenghua Yu cld 1099599ec04SFenghua Yu movq %r11, (%r10) 1109599ec04SFenghua Yu jmp 13f 1119599ec04SFenghua Yu 1129599ec04SFenghua Yu /* 1139599ec04SFenghua Yu * Start to prepare for backward copy. 1149599ec04SFenghua Yu */ 1159599ec04SFenghua Yu .p2align 4 1169599ec04SFenghua Yu2: 1179599ec04SFenghua Yu cmp $680, %rdx 1189599ec04SFenghua Yu jb 6f 1199599ec04SFenghua Yu cmp %dil, %sil 1209599ec04SFenghua Yu je 7b 1219599ec04SFenghua Yu6: 1229599ec04SFenghua Yu /* 1239599ec04SFenghua Yu * Calculate copy position to tail. 1249599ec04SFenghua Yu */ 1259599ec04SFenghua Yu addq %rdx, %rsi 1269599ec04SFenghua Yu addq %rdx, %rdi 1279599ec04SFenghua Yu subq $0x20, %rdx 1289599ec04SFenghua Yu /* 129bb916ff7SAndy Shevchenko * We gobble 32 bytes backward in each loop. 1309599ec04SFenghua Yu */ 1319599ec04SFenghua Yu8: 1329599ec04SFenghua Yu subq $0x20, %rdx 1339599ec04SFenghua Yu movq -1*8(%rsi), %r11 1349599ec04SFenghua Yu movq -2*8(%rsi), %r10 1359599ec04SFenghua Yu movq -3*8(%rsi), %r9 1369599ec04SFenghua Yu movq -4*8(%rsi), %r8 1379599ec04SFenghua Yu leaq -4*8(%rsi), %rsi 1389599ec04SFenghua Yu 1399599ec04SFenghua Yu movq %r11, -1*8(%rdi) 1409599ec04SFenghua Yu movq %r10, -2*8(%rdi) 1419599ec04SFenghua Yu movq %r9, -3*8(%rdi) 1429599ec04SFenghua Yu movq %r8, -4*8(%rdi) 1439599ec04SFenghua Yu leaq -4*8(%rdi), %rdi 1449599ec04SFenghua Yu jae 8b 1459599ec04SFenghua Yu /* 1469599ec04SFenghua Yu * Calculate copy position to head. 1479599ec04SFenghua Yu */ 1489599ec04SFenghua Yu addq $0x20, %rdx 1499599ec04SFenghua Yu subq %rdx, %rsi 1509599ec04SFenghua Yu subq %rdx, %rdi 1519599ec04SFenghua Yu1: 1529599ec04SFenghua Yu cmpq $16, %rdx 1539599ec04SFenghua Yu jb 9f 1549599ec04SFenghua Yu /* 1559599ec04SFenghua Yu * Move data from 16 bytes to 31 bytes. 1569599ec04SFenghua Yu */ 1579599ec04SFenghua Yu movq 0*8(%rsi), %r11 1589599ec04SFenghua Yu movq 1*8(%rsi), %r10 1599599ec04SFenghua Yu movq -2*8(%rsi, %rdx), %r9 1609599ec04SFenghua Yu movq -1*8(%rsi, %rdx), %r8 1619599ec04SFenghua Yu movq %r11, 0*8(%rdi) 1629599ec04SFenghua Yu movq %r10, 1*8(%rdi) 1639599ec04SFenghua Yu movq %r9, -2*8(%rdi, %rdx) 1649599ec04SFenghua Yu movq %r8, -1*8(%rdi, %rdx) 1659599ec04SFenghua Yu jmp 13f 1669599ec04SFenghua Yu .p2align 4 1679599ec04SFenghua Yu9: 1689599ec04SFenghua Yu cmpq $8, %rdx 1699599ec04SFenghua Yu jb 10f 1709599ec04SFenghua Yu /* 1719599ec04SFenghua Yu * Move data from 8 bytes to 15 bytes. 1729599ec04SFenghua Yu */ 1739599ec04SFenghua Yu movq 0*8(%rsi), %r11 1749599ec04SFenghua Yu movq -1*8(%rsi, %rdx), %r10 1759599ec04SFenghua Yu movq %r11, 0*8(%rdi) 1769599ec04SFenghua Yu movq %r10, -1*8(%rdi, %rdx) 1779599ec04SFenghua Yu jmp 13f 1789599ec04SFenghua Yu10: 1799599ec04SFenghua Yu cmpq $4, %rdx 1809599ec04SFenghua Yu jb 11f 1819599ec04SFenghua Yu /* 1829599ec04SFenghua Yu * Move data from 4 bytes to 7 bytes. 1839599ec04SFenghua Yu */ 1849599ec04SFenghua Yu movl (%rsi), %r11d 1859599ec04SFenghua Yu movl -4(%rsi, %rdx), %r10d 1869599ec04SFenghua Yu movl %r11d, (%rdi) 1879599ec04SFenghua Yu movl %r10d, -4(%rdi, %rdx) 1889599ec04SFenghua Yu jmp 13f 1899599ec04SFenghua Yu11: 1909599ec04SFenghua Yu cmp $2, %rdx 1919599ec04SFenghua Yu jb 12f 1929599ec04SFenghua Yu /* 1939599ec04SFenghua Yu * Move data from 2 bytes to 3 bytes. 1949599ec04SFenghua Yu */ 1959599ec04SFenghua Yu movw (%rsi), %r11w 1969599ec04SFenghua Yu movw -2(%rsi, %rdx), %r10w 1979599ec04SFenghua Yu movw %r11w, (%rdi) 1989599ec04SFenghua Yu movw %r10w, -2(%rdi, %rdx) 1999599ec04SFenghua Yu jmp 13f 2009599ec04SFenghua Yu12: 2019599ec04SFenghua Yu cmp $1, %rdx 2029599ec04SFenghua Yu jb 13f 2039599ec04SFenghua Yu /* 2049599ec04SFenghua Yu * Move data for 1 byte. 2059599ec04SFenghua Yu */ 2069599ec04SFenghua Yu movb (%rsi), %r11b 2079599ec04SFenghua Yu movb %r11b, (%rdi) 2089599ec04SFenghua Yu13: 2099599ec04SFenghua Yu retq 210393f203fSAndrey RyabininENDPROC(__memmove) 211e9b9d020SJiri SlabySYM_FUNC_END_ALIAS(memmove) 212784d5699SAl ViroEXPORT_SYMBOL(__memmove) 213784d5699SAl ViroEXPORT_SYMBOL(memmove) 214