xref: /openbmc/linux/arch/x86/include/asm/string_64.h (revision b24413180f5600bcb3bb70fbed5cf186b60864bd)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_STRING_64_H
3 #define _ASM_X86_STRING_64_H
4 
5 #ifdef __KERNEL__
6 #include <linux/jump_label.h>
7 
8 /* Written 2002 by Andi Kleen */
9 
10 /* Only used for special circumstances. Stolen from i386/string.h */
11 static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
12 {
13 	unsigned long d0, d1, d2;
14 	asm volatile("rep ; movsl\n\t"
15 		     "testb $2,%b4\n\t"
16 		     "je 1f\n\t"
17 		     "movsw\n"
18 		     "1:\ttestb $1,%b4\n\t"
19 		     "je 2f\n\t"
20 		     "movsb\n"
21 		     "2:"
22 		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
23 		     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
24 		     : "memory");
25 	return to;
26 }
27 
28 /* Even with __builtin_ the compiler may decide to use the out of line
29    function. */
30 
31 #define __HAVE_ARCH_MEMCPY 1
32 extern void *memcpy(void *to, const void *from, size_t len);
33 extern void *__memcpy(void *to, const void *from, size_t len);
34 
35 #ifndef CONFIG_FORTIFY_SOURCE
36 #ifndef CONFIG_KMEMCHECK
37 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
38 #define memcpy(dst, src, len)					\
39 ({								\
40 	size_t __len = (len);					\
41 	void *__ret;						\
42 	if (__builtin_constant_p(len) && __len >= 64)		\
43 		__ret = __memcpy((dst), (src), __len);		\
44 	else							\
45 		__ret = __builtin_memcpy((dst), (src), __len);	\
46 	__ret;							\
47 })
48 #endif
49 #else
50 /*
51  * kmemcheck becomes very happy if we use the REP instructions unconditionally,
52  * because it means that we know both memory operands in advance.
53  */
54 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
55 #endif
56 #endif /* !CONFIG_FORTIFY_SOURCE */
57 
58 #define __HAVE_ARCH_MEMSET
59 void *memset(void *s, int c, size_t n);
60 void *__memset(void *s, int c, size_t n);
61 
62 #define __HAVE_ARCH_MEMSET16
63 static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
64 {
65 	long d0, d1;
66 	asm volatile("rep\n\t"
67 		     "stosw"
68 		     : "=&c" (d0), "=&D" (d1)
69 		     : "a" (v), "1" (s), "0" (n)
70 		     : "memory");
71 	return s;
72 }
73 
74 #define __HAVE_ARCH_MEMSET32
75 static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
76 {
77 	long d0, d1;
78 	asm volatile("rep\n\t"
79 		     "stosl"
80 		     : "=&c" (d0), "=&D" (d1)
81 		     : "a" (v), "1" (s), "0" (n)
82 		     : "memory");
83 	return s;
84 }
85 
86 #define __HAVE_ARCH_MEMSET64
87 static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
88 {
89 	long d0, d1;
90 	asm volatile("rep\n\t"
91 		     "stosq"
92 		     : "=&c" (d0), "=&D" (d1)
93 		     : "a" (v), "1" (s), "0" (n)
94 		     : "memory");
95 	return s;
96 }
97 
98 #define __HAVE_ARCH_MEMMOVE
99 void *memmove(void *dest, const void *src, size_t count);
100 void *__memmove(void *dest, const void *src, size_t count);
101 
102 int memcmp(const void *cs, const void *ct, size_t count);
103 size_t strlen(const char *s);
104 char *strcpy(char *dest, const char *src);
105 char *strcat(char *dest, const char *src);
106 int strcmp(const char *cs, const char *ct);
107 
108 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
109 
110 /*
111  * For files that not instrumented (e.g. mm/slub.c) we
112  * should use not instrumented version of mem* functions.
113  */
114 
115 #undef memcpy
116 #define memcpy(dst, src, len) __memcpy(dst, src, len)
117 #define memmove(dst, src, len) __memmove(dst, src, len)
118 #define memset(s, c, n) __memset(s, c, n)
119 
120 #ifndef __NO_FORTIFY
121 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
122 #endif
123 
124 #endif
125 
126 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
127 __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
128 DECLARE_STATIC_KEY_FALSE(mcsafe_key);
129 
130 /**
131  * memcpy_mcsafe - copy memory with indication if a machine check happened
132  *
133  * @dst:	destination address
134  * @src:	source address
135  * @cnt:	number of bytes to copy
136  *
137  * Low level memory copy function that catches machine checks
138  * We only call into the "safe" function on systems that can
139  * actually do machine check recovery. Everyone else can just
140  * use memcpy().
141  *
142  * Return 0 for success, -EFAULT for fail
143  */
144 static __always_inline __must_check int
145 memcpy_mcsafe(void *dst, const void *src, size_t cnt)
146 {
147 #ifdef CONFIG_X86_MCE
148 	if (static_branch_unlikely(&mcsafe_key))
149 		return memcpy_mcsafe_unrolled(dst, src, cnt);
150 	else
151 #endif
152 		memcpy(dst, src, cnt);
153 	return 0;
154 }
155 
156 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
157 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
158 void memcpy_flushcache(void *dst, const void *src, size_t cnt);
159 #endif
160 
161 #endif /* __KERNEL__ */
162 
163 #endif /* _ASM_X86_STRING_64_H */
164