xref: /openbmc/linux/arch/x86/lib/memmove_64.S (revision 10f4c9b9)
1b2441318SGreg Kroah-Hartman/* SPDX-License-Identifier: GPL-2.0 */
29599ec04SFenghua Yu/*
39599ec04SFenghua Yu * Normally compiler builtins are used, but sometimes the compiler calls out
49599ec04SFenghua Yu * of line code. Based on asm-i386/string.h.
59599ec04SFenghua Yu *
69599ec04SFenghua Yu * This assembly file is re-written from memmove_64.c file.
79599ec04SFenghua Yu *	- Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
89599ec04SFenghua Yu */
99599ec04SFenghua Yu#include <linux/linkage.h>
10cd4d09ecSBorislav Petkov#include <asm/cpufeatures.h>
115e21a3ecSJuergen Gross#include <asm/alternative.h>
12784d5699SAl Viro#include <asm/export.h>
139599ec04SFenghua Yu
149599ec04SFenghua Yu#undef memmove
159599ec04SFenghua Yu
1669d4c0d3SPeter Zijlstra.section .noinstr.text, "ax"
1769d4c0d3SPeter Zijlstra
189599ec04SFenghua Yu/*
199599ec04SFenghua Yu * Implement memmove(). This can handle overlap between src and dst.
209599ec04SFenghua Yu *
219599ec04SFenghua Yu * Input:
229599ec04SFenghua Yu * rdi: dest
239599ec04SFenghua Yu * rsi: src
249599ec04SFenghua Yu * rdx: count
259599ec04SFenghua Yu *
269599ec04SFenghua Yu * Output:
279599ec04SFenghua Yu * rax: dest
289599ec04SFenghua Yu */
296dcc5627SJiri SlabySYM_FUNC_START(__memmove)
30057e05c1SFenghua Yu
319599ec04SFenghua Yu	mov %rdi, %rax
329599ec04SFenghua Yu
339599ec04SFenghua Yu	/* Decide forward/backward copy mode */
349599ec04SFenghua Yu	cmp %rdi, %rsi
35057e05c1SFenghua Yu	jge .Lmemmove_begin_forward
36057e05c1SFenghua Yu	mov %rsi, %r8
37057e05c1SFenghua Yu	add %rdx, %r8
38057e05c1SFenghua Yu	cmp %rdi, %r8
39057e05c1SFenghua Yu	jg 2f
409599ec04SFenghua Yu
4114e4ec9cSBorislav Petkov (AMD)#define CHECK_LEN	cmp $0x20, %rdx; jb 1f
4214e4ec9cSBorislav Petkov (AMD)#define MEMMOVE_BYTES	movq %rdx, %rcx; rep movsb; RET
43057e05c1SFenghua Yu.Lmemmove_begin_forward:
4414e4ec9cSBorislav Petkov (AMD)	ALTERNATIVE_2 __stringify(CHECK_LEN), \
4514e4ec9cSBorislav Petkov (AMD)		      __stringify(CHECK_LEN; MEMMOVE_BYTES), X86_FEATURE_ERMS, \
4614e4ec9cSBorislav Petkov (AMD)		      __stringify(MEMMOVE_BYTES), X86_FEATURE_FSRM
47a77600cdSBorislav Petkov
489599ec04SFenghua Yu	/*
499599ec04SFenghua Yu	 * movsq instruction have many startup latency
509599ec04SFenghua Yu	 * so we handle small size by general register.
519599ec04SFenghua Yu	 */
529599ec04SFenghua Yu	cmp  $680, %rdx
539599ec04SFenghua Yu	jb	3f
549599ec04SFenghua Yu	/*
559599ec04SFenghua Yu	 * movsq instruction is only good for aligned case.
569599ec04SFenghua Yu	 */
579599ec04SFenghua Yu
589599ec04SFenghua Yu	cmpb %dil, %sil
599599ec04SFenghua Yu	je 4f
609599ec04SFenghua Yu3:
619599ec04SFenghua Yu	sub $0x20, %rdx
629599ec04SFenghua Yu	/*
63bb916ff7SAndy Shevchenko	 * We gobble 32 bytes forward in each loop.
649599ec04SFenghua Yu	 */
659599ec04SFenghua Yu5:
669599ec04SFenghua Yu	sub $0x20, %rdx
679599ec04SFenghua Yu	movq 0*8(%rsi), %r11
689599ec04SFenghua Yu	movq 1*8(%rsi), %r10
699599ec04SFenghua Yu	movq 2*8(%rsi), %r9
709599ec04SFenghua Yu	movq 3*8(%rsi), %r8
719599ec04SFenghua Yu	leaq 4*8(%rsi), %rsi
729599ec04SFenghua Yu
739599ec04SFenghua Yu	movq %r11, 0*8(%rdi)
749599ec04SFenghua Yu	movq %r10, 1*8(%rdi)
759599ec04SFenghua Yu	movq %r9, 2*8(%rdi)
769599ec04SFenghua Yu	movq %r8, 3*8(%rdi)
779599ec04SFenghua Yu	leaq 4*8(%rdi), %rdi
789599ec04SFenghua Yu	jae 5b
799599ec04SFenghua Yu	addq $0x20, %rdx
809599ec04SFenghua Yu	jmp 1f
819599ec04SFenghua Yu	/*
829599ec04SFenghua Yu	 * Handle data forward by movsq.
839599ec04SFenghua Yu	 */
849599ec04SFenghua Yu	.p2align 4
859599ec04SFenghua Yu4:
869599ec04SFenghua Yu	movq %rdx, %rcx
879599ec04SFenghua Yu	movq -8(%rsi, %rdx), %r11
889599ec04SFenghua Yu	lea -8(%rdi, %rdx), %r10
899599ec04SFenghua Yu	shrq $3, %rcx
909599ec04SFenghua Yu	rep movsq
919599ec04SFenghua Yu	movq %r11, (%r10)
929599ec04SFenghua Yu	jmp 13f
93057e05c1SFenghua Yu.Lmemmove_end_forward:
94057e05c1SFenghua Yu
959599ec04SFenghua Yu	/*
969599ec04SFenghua Yu	 * Handle data backward by movsq.
979599ec04SFenghua Yu	 */
989599ec04SFenghua Yu	.p2align 4
999599ec04SFenghua Yu7:
1009599ec04SFenghua Yu	movq %rdx, %rcx
1019599ec04SFenghua Yu	movq (%rsi), %r11
1029599ec04SFenghua Yu	movq %rdi, %r10
1039599ec04SFenghua Yu	leaq -8(%rsi, %rdx), %rsi
1049599ec04SFenghua Yu	leaq -8(%rdi, %rdx), %rdi
1059599ec04SFenghua Yu	shrq $3, %rcx
1069599ec04SFenghua Yu	std
1079599ec04SFenghua Yu	rep movsq
1089599ec04SFenghua Yu	cld
1099599ec04SFenghua Yu	movq %r11, (%r10)
1109599ec04SFenghua Yu	jmp 13f
1119599ec04SFenghua Yu
1129599ec04SFenghua Yu	/*
1139599ec04SFenghua Yu	 * Start to prepare for backward copy.
1149599ec04SFenghua Yu	 */
1159599ec04SFenghua Yu	.p2align 4
1169599ec04SFenghua Yu2:
117f444a5ffSTony Luck	cmp $0x20, %rdx
118f444a5ffSTony Luck	jb 1f
1199599ec04SFenghua Yu	cmp $680, %rdx
1209599ec04SFenghua Yu	jb 6f
1219599ec04SFenghua Yu	cmp %dil, %sil
1229599ec04SFenghua Yu	je 7b
1239599ec04SFenghua Yu6:
1249599ec04SFenghua Yu	/*
1259599ec04SFenghua Yu	 * Calculate copy position to tail.
1269599ec04SFenghua Yu	 */
1279599ec04SFenghua Yu	addq %rdx, %rsi
1289599ec04SFenghua Yu	addq %rdx, %rdi
1299599ec04SFenghua Yu	subq $0x20, %rdx
1309599ec04SFenghua Yu	/*
131bb916ff7SAndy Shevchenko	 * We gobble 32 bytes backward in each loop.
1329599ec04SFenghua Yu	 */
1339599ec04SFenghua Yu8:
1349599ec04SFenghua Yu	subq $0x20, %rdx
1359599ec04SFenghua Yu	movq -1*8(%rsi), %r11
1369599ec04SFenghua Yu	movq -2*8(%rsi), %r10
1379599ec04SFenghua Yu	movq -3*8(%rsi), %r9
1389599ec04SFenghua Yu	movq -4*8(%rsi), %r8
1399599ec04SFenghua Yu	leaq -4*8(%rsi), %rsi
1409599ec04SFenghua Yu
1419599ec04SFenghua Yu	movq %r11, -1*8(%rdi)
1429599ec04SFenghua Yu	movq %r10, -2*8(%rdi)
1439599ec04SFenghua Yu	movq %r9, -3*8(%rdi)
1449599ec04SFenghua Yu	movq %r8, -4*8(%rdi)
1459599ec04SFenghua Yu	leaq -4*8(%rdi), %rdi
1469599ec04SFenghua Yu	jae 8b
1479599ec04SFenghua Yu	/*
1489599ec04SFenghua Yu	 * Calculate copy position to head.
1499599ec04SFenghua Yu	 */
1509599ec04SFenghua Yu	addq $0x20, %rdx
1519599ec04SFenghua Yu	subq %rdx, %rsi
1529599ec04SFenghua Yu	subq %rdx, %rdi
1539599ec04SFenghua Yu1:
1549599ec04SFenghua Yu	cmpq $16, %rdx
1559599ec04SFenghua Yu	jb 9f
1569599ec04SFenghua Yu	/*
1579599ec04SFenghua Yu	 * Move data from 16 bytes to 31 bytes.
1589599ec04SFenghua Yu	 */
1599599ec04SFenghua Yu	movq 0*8(%rsi), %r11
1609599ec04SFenghua Yu	movq 1*8(%rsi), %r10
1619599ec04SFenghua Yu	movq -2*8(%rsi, %rdx), %r9
1629599ec04SFenghua Yu	movq -1*8(%rsi, %rdx), %r8
1639599ec04SFenghua Yu	movq %r11, 0*8(%rdi)
1649599ec04SFenghua Yu	movq %r10, 1*8(%rdi)
1659599ec04SFenghua Yu	movq %r9, -2*8(%rdi, %rdx)
1669599ec04SFenghua Yu	movq %r8, -1*8(%rdi, %rdx)
1679599ec04SFenghua Yu	jmp 13f
1689599ec04SFenghua Yu	.p2align 4
1699599ec04SFenghua Yu9:
1709599ec04SFenghua Yu	cmpq $8, %rdx
1719599ec04SFenghua Yu	jb 10f
1729599ec04SFenghua Yu	/*
1739599ec04SFenghua Yu	 * Move data from 8 bytes to 15 bytes.
1749599ec04SFenghua Yu	 */
1759599ec04SFenghua Yu	movq 0*8(%rsi), %r11
1769599ec04SFenghua Yu	movq -1*8(%rsi, %rdx), %r10
1779599ec04SFenghua Yu	movq %r11, 0*8(%rdi)
1789599ec04SFenghua Yu	movq %r10, -1*8(%rdi, %rdx)
1799599ec04SFenghua Yu	jmp 13f
1809599ec04SFenghua Yu10:
1819599ec04SFenghua Yu	cmpq $4, %rdx
1829599ec04SFenghua Yu	jb 11f
1839599ec04SFenghua Yu	/*
1849599ec04SFenghua Yu	 * Move data from 4 bytes to 7 bytes.
1859599ec04SFenghua Yu	 */
1869599ec04SFenghua Yu	movl (%rsi), %r11d
1879599ec04SFenghua Yu	movl -4(%rsi, %rdx), %r10d
1889599ec04SFenghua Yu	movl %r11d, (%rdi)
1899599ec04SFenghua Yu	movl %r10d, -4(%rdi, %rdx)
1909599ec04SFenghua Yu	jmp 13f
1919599ec04SFenghua Yu11:
1929599ec04SFenghua Yu	cmp $2, %rdx
1939599ec04SFenghua Yu	jb 12f
1949599ec04SFenghua Yu	/*
1959599ec04SFenghua Yu	 * Move data from 2 bytes to 3 bytes.
1969599ec04SFenghua Yu	 */
1979599ec04SFenghua Yu	movw (%rsi), %r11w
1989599ec04SFenghua Yu	movw -2(%rsi, %rdx), %r10w
1999599ec04SFenghua Yu	movw %r11w, (%rdi)
2009599ec04SFenghua Yu	movw %r10w, -2(%rdi, %rdx)
2019599ec04SFenghua Yu	jmp 13f
2029599ec04SFenghua Yu12:
2039599ec04SFenghua Yu	cmp $1, %rdx
2049599ec04SFenghua Yu	jb 13f
2059599ec04SFenghua Yu	/*
2069599ec04SFenghua Yu	 * Move data for 1 byte.
2079599ec04SFenghua Yu	 */
2089599ec04SFenghua Yu	movb (%rsi), %r11b
2099599ec04SFenghua Yu	movb %r11b, (%rdi)
2109599ec04SFenghua Yu13:
211f94909ceSPeter Zijlstra	RET
2126dcc5627SJiri SlabySYM_FUNC_END(__memmove)
213784d5699SAl ViroEXPORT_SYMBOL(__memmove)
2147be2e319SMark Rutland
215*10f4c9b9SVincent WhitchurchSYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
216784d5699SAl ViroEXPORT_SYMBOL(memmove)
217