xref: /openbmc/linux/arch/x86/include/asm/string_64.h (revision c8dbaa22)
1 #ifndef _ASM_X86_STRING_64_H
2 #define _ASM_X86_STRING_64_H
3 
4 #ifdef __KERNEL__
5 #include <linux/jump_label.h>
6 
7 /* Written 2002 by Andi Kleen */
8 
9 /* Only used for special circumstances. Stolen from i386/string.h */
10 static __always_inline void *__inline_memcpy(void *to, const void *from, size_t n)
11 {
12 	unsigned long d0, d1, d2;
13 	asm volatile("rep ; movsl\n\t"
14 		     "testb $2,%b4\n\t"
15 		     "je 1f\n\t"
16 		     "movsw\n"
17 		     "1:\ttestb $1,%b4\n\t"
18 		     "je 2f\n\t"
19 		     "movsb\n"
20 		     "2:"
21 		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
22 		     : "0" (n / 4), "q" (n), "1" ((long)to), "2" ((long)from)
23 		     : "memory");
24 	return to;
25 }
26 
27 /* Even with __builtin_ the compiler may decide to use the out of line
28    function. */
29 
30 #define __HAVE_ARCH_MEMCPY 1
31 extern void *memcpy(void *to, const void *from, size_t len);
32 extern void *__memcpy(void *to, const void *from, size_t len);
33 
34 #ifndef CONFIG_FORTIFY_SOURCE
35 #ifndef CONFIG_KMEMCHECK
36 #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
37 #define memcpy(dst, src, len)					\
38 ({								\
39 	size_t __len = (len);					\
40 	void *__ret;						\
41 	if (__builtin_constant_p(len) && __len >= 64)		\
42 		__ret = __memcpy((dst), (src), __len);		\
43 	else							\
44 		__ret = __builtin_memcpy((dst), (src), __len);	\
45 	__ret;							\
46 })
47 #endif
48 #else
49 /*
50  * kmemcheck becomes very happy if we use the REP instructions unconditionally,
51  * because it means that we know both memory operands in advance.
52  */
53 #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
54 #endif
55 #endif /* !CONFIG_FORTIFY_SOURCE */
56 
57 #define __HAVE_ARCH_MEMSET
58 void *memset(void *s, int c, size_t n);
59 void *__memset(void *s, int c, size_t n);
60 
61 #define __HAVE_ARCH_MEMMOVE
62 void *memmove(void *dest, const void *src, size_t count);
63 void *__memmove(void *dest, const void *src, size_t count);
64 
65 int memcmp(const void *cs, const void *ct, size_t count);
66 size_t strlen(const char *s);
67 char *strcpy(char *dest, const char *src);
68 char *strcat(char *dest, const char *src);
69 int strcmp(const char *cs, const char *ct);
70 
71 #if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
72 
73 /*
74  * For files that not instrumented (e.g. mm/slub.c) we
75  * should use not instrumented version of mem* functions.
76  */
77 
78 #undef memcpy
79 #define memcpy(dst, src, len) __memcpy(dst, src, len)
80 #define memmove(dst, src, len) __memmove(dst, src, len)
81 #define memset(s, c, n) __memset(s, c, n)
82 
83 #ifndef __NO_FORTIFY
84 #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
85 #endif
86 
87 #endif
88 
89 #define __HAVE_ARCH_MEMCPY_MCSAFE 1
90 __must_check int memcpy_mcsafe_unrolled(void *dst, const void *src, size_t cnt);
91 DECLARE_STATIC_KEY_FALSE(mcsafe_key);
92 
93 /**
94  * memcpy_mcsafe - copy memory with indication if a machine check happened
95  *
96  * @dst:	destination address
97  * @src:	source address
98  * @cnt:	number of bytes to copy
99  *
100  * Low level memory copy function that catches machine checks
101  * We only call into the "safe" function on systems that can
102  * actually do machine check recovery. Everyone else can just
103  * use memcpy().
104  *
105  * Return 0 for success, -EFAULT for fail
106  */
107 static __always_inline __must_check int
108 memcpy_mcsafe(void *dst, const void *src, size_t cnt)
109 {
110 #ifdef CONFIG_X86_MCE
111 	if (static_branch_unlikely(&mcsafe_key))
112 		return memcpy_mcsafe_unrolled(dst, src, cnt);
113 	else
114 #endif
115 		memcpy(dst, src, cnt);
116 	return 0;
117 }
118 
119 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
120 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
121 void memcpy_flushcache(void *dst, const void *src, size_t cnt);
122 #endif
123 
124 #endif /* __KERNEL__ */
125 
126 #endif /* _ASM_X86_STRING_64_H */
127