1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */ 29599ec04SFenghua Yu/* 39599ec04SFenghua Yu * Normally compiler builtins are used, but sometimes the compiler calls out 49599ec04SFenghua Yu * of line code. Based on asm-i386/string.h. 59599ec04SFenghua Yu * 69599ec04SFenghua Yu * This assembly file is re-written from memmove_64.c file. 79599ec04SFenghua Yu * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com> 89599ec04SFenghua Yu */ 99599ec04SFenghua Yu#include <linux/linkage.h> 10cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h> 1159e97e4dSAndy Lutomirski#include <asm/alternative-asm.h> 12784d5699SAl Viro#include <asm/export.h> 139599ec04SFenghua Yu 149599ec04SFenghua Yu#undef memmove 159599ec04SFenghua Yu 169599ec04SFenghua Yu/* 179599ec04SFenghua Yu * Implement memmove(). This can handle overlap between src and dst. 189599ec04SFenghua Yu * 199599ec04SFenghua Yu * Input: 209599ec04SFenghua Yu * rdi: dest 219599ec04SFenghua Yu * rsi: src 229599ec04SFenghua Yu * rdx: count 239599ec04SFenghua Yu * 249599ec04SFenghua Yu * Output: 259599ec04SFenghua Yu * rax: dest 269599ec04SFenghua Yu */ 27393f203fSAndrey Ryabinin.weak memmove 28393f203fSAndrey Ryabinin 29e9b9d020SJiri SlabySYM_FUNC_START_ALIAS(memmove) 306dcc5627SJiri SlabySYM_FUNC_START(__memmove) 31057e05c1SFenghua Yu 329599ec04SFenghua Yu mov %rdi, %rax 339599ec04SFenghua Yu 349599ec04SFenghua Yu /* Decide forward/backward copy mode */ 359599ec04SFenghua Yu cmp %rdi, %rsi 36057e05c1SFenghua Yu jge .Lmemmove_begin_forward 37057e05c1SFenghua Yu mov %rsi, %r8 38057e05c1SFenghua Yu add %rdx, %r8 39057e05c1SFenghua Yu cmp %rdi, %r8 40057e05c1SFenghua Yu jg 2f 419599ec04SFenghua Yu 42f444a5ffSTony Luck /* FSRM implies ERMS => no length checks, do the copy directly */ 43057e05c1SFenghua Yu.Lmemmove_begin_forward: 44f444a5ffSTony Luck ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM 45a77600cdSBorislav Petkov ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS 46a77600cdSBorislav Petkov 479599ec04SFenghua Yu /* 489599ec04SFenghua Yu * movsq instruction have many startup latency 499599ec04SFenghua Yu * so we handle small size by general register. 509599ec04SFenghua Yu */ 519599ec04SFenghua Yu cmp $680, %rdx 529599ec04SFenghua Yu jb 3f 539599ec04SFenghua Yu /* 549599ec04SFenghua Yu * movsq instruction is only good for aligned case. 559599ec04SFenghua Yu */ 569599ec04SFenghua Yu 579599ec04SFenghua Yu cmpb %dil, %sil 589599ec04SFenghua Yu je 4f 599599ec04SFenghua Yu3: 609599ec04SFenghua Yu sub $0x20, %rdx 619599ec04SFenghua Yu /* 62bb916ff7SAndy Shevchenko * We gobble 32 bytes forward in each loop. 639599ec04SFenghua Yu */ 649599ec04SFenghua Yu5: 659599ec04SFenghua Yu sub $0x20, %rdx 669599ec04SFenghua Yu movq 0*8(%rsi), %r11 679599ec04SFenghua Yu movq 1*8(%rsi), %r10 689599ec04SFenghua Yu movq 2*8(%rsi), %r9 699599ec04SFenghua Yu movq 3*8(%rsi), %r8 709599ec04SFenghua Yu leaq 4*8(%rsi), %rsi 719599ec04SFenghua Yu 729599ec04SFenghua Yu movq %r11, 0*8(%rdi) 739599ec04SFenghua Yu movq %r10, 1*8(%rdi) 749599ec04SFenghua Yu movq %r9, 2*8(%rdi) 759599ec04SFenghua Yu movq %r8, 3*8(%rdi) 769599ec04SFenghua Yu leaq 4*8(%rdi), %rdi 779599ec04SFenghua Yu jae 5b 789599ec04SFenghua Yu addq $0x20, %rdx 799599ec04SFenghua Yu jmp 1f 809599ec04SFenghua Yu /* 819599ec04SFenghua Yu * Handle data forward by movsq. 829599ec04SFenghua Yu */ 839599ec04SFenghua Yu .p2align 4 849599ec04SFenghua Yu4: 859599ec04SFenghua Yu movq %rdx, %rcx 869599ec04SFenghua Yu movq -8(%rsi, %rdx), %r11 879599ec04SFenghua Yu lea -8(%rdi, %rdx), %r10 889599ec04SFenghua Yu shrq $3, %rcx 899599ec04SFenghua Yu rep movsq 909599ec04SFenghua Yu movq %r11, (%r10) 919599ec04SFenghua Yu jmp 13f 92057e05c1SFenghua Yu.Lmemmove_end_forward: 93057e05c1SFenghua Yu 949599ec04SFenghua Yu /* 959599ec04SFenghua Yu * Handle data backward by movsq. 969599ec04SFenghua Yu */ 979599ec04SFenghua Yu .p2align 4 989599ec04SFenghua Yu7: 999599ec04SFenghua Yu movq %rdx, %rcx 1009599ec04SFenghua Yu movq (%rsi), %r11 1019599ec04SFenghua Yu movq %rdi, %r10 1029599ec04SFenghua Yu leaq -8(%rsi, %rdx), %rsi 1039599ec04SFenghua Yu leaq -8(%rdi, %rdx), %rdi 1049599ec04SFenghua Yu shrq $3, %rcx 1059599ec04SFenghua Yu std 1069599ec04SFenghua Yu rep movsq 1079599ec04SFenghua Yu cld 1089599ec04SFenghua Yu movq %r11, (%r10) 1099599ec04SFenghua Yu jmp 13f 1109599ec04SFenghua Yu 1119599ec04SFenghua Yu /* 1129599ec04SFenghua Yu * Start to prepare for backward copy. 1139599ec04SFenghua Yu */ 1149599ec04SFenghua Yu .p2align 4 1159599ec04SFenghua Yu2: 116f444a5ffSTony Luck cmp $0x20, %rdx 117f444a5ffSTony Luck jb 1f 1189599ec04SFenghua Yu cmp $680, %rdx 1199599ec04SFenghua Yu jb 6f 1209599ec04SFenghua Yu cmp %dil, %sil 1219599ec04SFenghua Yu je 7b 1229599ec04SFenghua Yu6: 1239599ec04SFenghua Yu /* 1249599ec04SFenghua Yu * Calculate copy position to tail. 1259599ec04SFenghua Yu */ 1269599ec04SFenghua Yu addq %rdx, %rsi 1279599ec04SFenghua Yu addq %rdx, %rdi 1289599ec04SFenghua Yu subq $0x20, %rdx 1299599ec04SFenghua Yu /* 130bb916ff7SAndy Shevchenko * We gobble 32 bytes backward in each loop. 1319599ec04SFenghua Yu */ 1329599ec04SFenghua Yu8: 1339599ec04SFenghua Yu subq $0x20, %rdx 1349599ec04SFenghua Yu movq -1*8(%rsi), %r11 1359599ec04SFenghua Yu movq -2*8(%rsi), %r10 1369599ec04SFenghua Yu movq -3*8(%rsi), %r9 1379599ec04SFenghua Yu movq -4*8(%rsi), %r8 1389599ec04SFenghua Yu leaq -4*8(%rsi), %rsi 1399599ec04SFenghua Yu 1409599ec04SFenghua Yu movq %r11, -1*8(%rdi) 1419599ec04SFenghua Yu movq %r10, -2*8(%rdi) 1429599ec04SFenghua Yu movq %r9, -3*8(%rdi) 1439599ec04SFenghua Yu movq %r8, -4*8(%rdi) 1449599ec04SFenghua Yu leaq -4*8(%rdi), %rdi 1459599ec04SFenghua Yu jae 8b 1469599ec04SFenghua Yu /* 1479599ec04SFenghua Yu * Calculate copy position to head. 1489599ec04SFenghua Yu */ 1499599ec04SFenghua Yu addq $0x20, %rdx 1509599ec04SFenghua Yu subq %rdx, %rsi 1519599ec04SFenghua Yu subq %rdx, %rdi 1529599ec04SFenghua Yu1: 1539599ec04SFenghua Yu cmpq $16, %rdx 1549599ec04SFenghua Yu jb 9f 1559599ec04SFenghua Yu /* 1569599ec04SFenghua Yu * Move data from 16 bytes to 31 bytes. 1579599ec04SFenghua Yu */ 1589599ec04SFenghua Yu movq 0*8(%rsi), %r11 1599599ec04SFenghua Yu movq 1*8(%rsi), %r10 1609599ec04SFenghua Yu movq -2*8(%rsi, %rdx), %r9 1619599ec04SFenghua Yu movq -1*8(%rsi, %rdx), %r8 1629599ec04SFenghua Yu movq %r11, 0*8(%rdi) 1639599ec04SFenghua Yu movq %r10, 1*8(%rdi) 1649599ec04SFenghua Yu movq %r9, -2*8(%rdi, %rdx) 1659599ec04SFenghua Yu movq %r8, -1*8(%rdi, %rdx) 1669599ec04SFenghua Yu jmp 13f 1679599ec04SFenghua Yu .p2align 4 1689599ec04SFenghua Yu9: 1699599ec04SFenghua Yu cmpq $8, %rdx 1709599ec04SFenghua Yu jb 10f 1719599ec04SFenghua Yu /* 1729599ec04SFenghua Yu * Move data from 8 bytes to 15 bytes. 1739599ec04SFenghua Yu */ 1749599ec04SFenghua Yu movq 0*8(%rsi), %r11 1759599ec04SFenghua Yu movq -1*8(%rsi, %rdx), %r10 1769599ec04SFenghua Yu movq %r11, 0*8(%rdi) 1779599ec04SFenghua Yu movq %r10, -1*8(%rdi, %rdx) 1789599ec04SFenghua Yu jmp 13f 1799599ec04SFenghua Yu10: 1809599ec04SFenghua Yu cmpq $4, %rdx 1819599ec04SFenghua Yu jb 11f 1829599ec04SFenghua Yu /* 1839599ec04SFenghua Yu * Move data from 4 bytes to 7 bytes. 1849599ec04SFenghua Yu */ 1859599ec04SFenghua Yu movl (%rsi), %r11d 1869599ec04SFenghua Yu movl -4(%rsi, %rdx), %r10d 1879599ec04SFenghua Yu movl %r11d, (%rdi) 1889599ec04SFenghua Yu movl %r10d, -4(%rdi, %rdx) 1899599ec04SFenghua Yu jmp 13f 1909599ec04SFenghua Yu11: 1919599ec04SFenghua Yu cmp $2, %rdx 1929599ec04SFenghua Yu jb 12f 1939599ec04SFenghua Yu /* 1949599ec04SFenghua Yu * Move data from 2 bytes to 3 bytes. 1959599ec04SFenghua Yu */ 1969599ec04SFenghua Yu movw (%rsi), %r11w 1979599ec04SFenghua Yu movw -2(%rsi, %rdx), %r10w 1989599ec04SFenghua Yu movw %r11w, (%rdi) 1999599ec04SFenghua Yu movw %r10w, -2(%rdi, %rdx) 2009599ec04SFenghua Yu jmp 13f 2019599ec04SFenghua Yu12: 2029599ec04SFenghua Yu cmp $1, %rdx 2039599ec04SFenghua Yu jb 13f 2049599ec04SFenghua Yu /* 2059599ec04SFenghua Yu * Move data for 1 byte. 2069599ec04SFenghua Yu */ 2079599ec04SFenghua Yu movb (%rsi), %r11b 2089599ec04SFenghua Yu movb %r11b, (%rdi) 2099599ec04SFenghua Yu13: 2109599ec04SFenghua Yu retq 2116dcc5627SJiri SlabySYM_FUNC_END(__memmove) 212e9b9d020SJiri SlabySYM_FUNC_END_ALIAS(memmove) 213784d5699SAl ViroEXPORT_SYMBOL(__memmove) 214784d5699SAl ViroEXPORT_SYMBOL(memmove) 215