xref: /openbmc/linux/include/linux/compiler.h (revision 8f762fe5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4 
5 #include <linux/compiler_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #ifdef __KERNEL__
10 
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 			  int expect, int is_constant);
19 
20 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
22 
23 #define __branch_check__(x, expect, is_constant) ({			\
24 			long ______r;					\
25 			static struct ftrace_likely_data		\
26 				__aligned(4)				\
27 				__section("_ftrace_annotated_branch")	\
28 				______f = {				\
29 				.data.func = __func__,			\
30 				.data.file = __FILE__,			\
31 				.data.line = __LINE__,			\
32 			};						\
33 			______r = __builtin_expect(!!(x), expect);	\
34 			ftrace_likely_update(&______f, ______r,		\
35 					     expect, is_constant);	\
36 			______r;					\
37 		})
38 
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50 
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57 
58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59 
60 #define __trace_if_value(cond) ({			\
61 	static struct ftrace_branch_data		\
62 		__aligned(4)				\
63 		__section("_ftrace_branch")		\
64 		__if_trace = {				\
65 			.func = __func__,		\
66 			.file = __FILE__,		\
67 			.line = __LINE__,		\
68 		};					\
69 	(cond) ?					\
70 		(__if_trace.miss_hit[1]++,1) :		\
71 		(__if_trace.miss_hit[0]++,0);		\
72 })
73 
74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
75 
76 #else
77 # define likely(x)	__builtin_expect(!!(x), 1)
78 # define unlikely(x)	__builtin_expect(!!(x), 0)
79 #endif
80 
81 /* Optimization barrier */
82 #ifndef barrier
83 # define barrier() __memory_barrier()
84 #endif
85 
86 #ifndef barrier_data
87 # define barrier_data(ptr) barrier()
88 #endif
89 
90 /* workaround for GCC PR82365 if needed */
91 #ifndef barrier_before_unreachable
92 # define barrier_before_unreachable() do { } while (0)
93 #endif
94 
95 /* Unreachable code */
96 #ifdef CONFIG_STACK_VALIDATION
97 /*
98  * These macros help objtool understand GCC code flow for unreachable code.
99  * The __COUNTER__ based labels are a hack to make each instance of the macros
100  * unique, to convince GCC not to merge duplicate inline asm statements.
101  */
102 #define annotate_reachable() ({						\
103 	asm volatile("%c0:\n\t"						\
104 		     ".pushsection .discard.reachable\n\t"		\
105 		     ".long %c0b - .\n\t"				\
106 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
107 })
108 #define annotate_unreachable() ({					\
109 	asm volatile("%c0:\n\t"						\
110 		     ".pushsection .discard.unreachable\n\t"		\
111 		     ".long %c0b - .\n\t"				\
112 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
113 })
114 #define ASM_UNREACHABLE							\
115 	"999:\n\t"							\
116 	".pushsection .discard.unreachable\n\t"				\
117 	".long 999b - .\n\t"						\
118 	".popsection\n\t"
119 #else
120 #define annotate_reachable()
121 #define annotate_unreachable()
122 #endif
123 
124 #ifndef ASM_UNREACHABLE
125 # define ASM_UNREACHABLE
126 #endif
127 #ifndef unreachable
128 # define unreachable() do {		\
129 	annotate_unreachable();		\
130 	__builtin_unreachable();	\
131 } while (0)
132 #endif
133 
134 /*
135  * KENTRY - kernel entry point
136  * This can be used to annotate symbols (functions or data) that are used
137  * without their linker symbol being referenced explicitly. For example,
138  * interrupt vector handlers, or functions in the kernel image that are found
139  * programatically.
140  *
141  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
142  * are handled in their own way (with KEEP() in linker scripts).
143  *
144  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
145  * linker script. For example an architecture could KEEP() its entire
146  * boot/exception vector code rather than annotate each function and data.
147  */
148 #ifndef KENTRY
149 # define KENTRY(sym)						\
150 	extern typeof(sym) sym;					\
151 	static const unsigned long __kentry_##sym		\
152 	__used							\
153 	__section("___kentry" "+" #sym )			\
154 	= (unsigned long)&sym;
155 #endif
156 
157 #ifndef RELOC_HIDE
158 # define RELOC_HIDE(ptr, off)					\
159   ({ unsigned long __ptr;					\
160      __ptr = (unsigned long) (ptr);				\
161     (typeof(ptr)) (__ptr + (off)); })
162 #endif
163 
164 #ifndef OPTIMIZER_HIDE_VAR
165 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
166 #define OPTIMIZER_HIDE_VAR(var)						\
167 	__asm__ ("" : "=r" (var) : "0" (var))
168 #endif
169 
170 /* Not-quite-unique ID. */
171 #ifndef __UNIQUE_ID
172 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
173 #endif
174 
175 #include <uapi/linux/types.h>
176 
177 #define __READ_ONCE_SIZE						\
178 ({									\
179 	switch (size) {							\
180 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
181 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
182 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
183 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
184 	default:							\
185 		barrier();						\
186 		__builtin_memcpy((void *)res, (const void *)p, size);	\
187 		barrier();						\
188 	}								\
189 })
190 
191 static __always_inline
192 void __read_once_size(const volatile void *p, void *res, int size)
193 {
194 	__READ_ONCE_SIZE;
195 }
196 
197 #ifdef CONFIG_KASAN
198 /*
199  * We can't declare function 'inline' because __no_sanitize_address confilcts
200  * with inlining. Attempt to inline it may cause a build failure.
201  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
202  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
203  */
204 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
205 #else
206 # define __no_kasan_or_inline __always_inline
207 #endif
208 
209 static __no_kasan_or_inline
210 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
211 {
212 	__READ_ONCE_SIZE;
213 }
214 
215 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
216 {
217 	switch (size) {
218 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
219 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
220 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
221 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
222 	default:
223 		barrier();
224 		__builtin_memcpy((void *)p, (const void *)res, size);
225 		barrier();
226 	}
227 }
228 
229 /*
230  * Prevent the compiler from merging or refetching reads or writes. The
231  * compiler is also forbidden from reordering successive instances of
232  * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
233  * particular ordering. One way to make the compiler aware of ordering is to
234  * put the two invocations of READ_ONCE or WRITE_ONCE in different C
235  * statements.
236  *
237  * These two macros will also work on aggregate data types like structs or
238  * unions. If the size of the accessed data type exceeds the word size of
239  * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will
240  * fall back to memcpy(). There's at least two memcpy()s: one for the
241  * __builtin_memcpy() and then one for the macro doing the copy of variable
242  * - '__u' allocated on the stack.
243  *
244  * Their two major use cases are: (1) Mediating communication between
245  * process-level code and irq/NMI handlers, all running on the same CPU,
246  * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
247  * mutilate accesses that either do not require ordering or that interact
248  * with an explicit memory barrier or atomic instruction that provides the
249  * required ordering.
250  */
251 #include <asm/barrier.h>
252 #include <linux/kasan-checks.h>
253 
254 #define __READ_ONCE(x, check)						\
255 ({									\
256 	union { typeof(x) __val; char __c[1]; } __u;			\
257 	if (check)							\
258 		__read_once_size(&(x), __u.__c, sizeof(x));		\
259 	else								\
260 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
261 	smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
262 	__u.__val;							\
263 })
264 #define READ_ONCE(x) __READ_ONCE(x, 1)
265 
266 /*
267  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
268  * to hide memory access from KASAN.
269  */
270 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
271 
272 static __no_kasan_or_inline
273 unsigned long read_word_at_a_time(const void *addr)
274 {
275 	kasan_check_read(addr, 1);
276 	return *(unsigned long *)addr;
277 }
278 
279 #define WRITE_ONCE(x, val) \
280 ({							\
281 	union { typeof(x) __val; char __c[1]; } __u =	\
282 		{ .__val = (__force typeof(x)) (val) }; \
283 	__write_once_size(&(x), __u.__c, sizeof(x));	\
284 	__u.__val;					\
285 })
286 
287 #endif /* __KERNEL__ */
288 
289 /*
290  * Force the compiler to emit 'sym' as a symbol, so that we can reference
291  * it from inline assembler. Necessary in case 'sym' could be inlined
292  * otherwise, or eliminated entirely due to lack of references that are
293  * visible to the compiler.
294  */
295 #define __ADDRESSABLE(sym) \
296 	static void * __section(".discard.addressable") __used \
297 		__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
298 
299 /**
300  * offset_to_ptr - convert a relative memory offset to an absolute pointer
301  * @off:	the address of the 32-bit offset value
302  */
303 static inline void *offset_to_ptr(const int *off)
304 {
305 	return (void *)((unsigned long)off + *off);
306 }
307 
308 #endif /* __ASSEMBLY__ */
309 
310 /* Compile time object size, -1 for unknown */
311 #ifndef __compiletime_object_size
312 # define __compiletime_object_size(obj) -1
313 #endif
314 #ifndef __compiletime_warning
315 # define __compiletime_warning(message)
316 #endif
317 #ifndef __compiletime_error
318 # define __compiletime_error(message)
319 #endif
320 
321 #ifdef __OPTIMIZE__
322 # define __compiletime_assert(condition, msg, prefix, suffix)		\
323 	do {								\
324 		extern void prefix ## suffix(void) __compiletime_error(msg); \
325 		if (!(condition))					\
326 			prefix ## suffix();				\
327 	} while (0)
328 #else
329 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
330 #endif
331 
332 #define _compiletime_assert(condition, msg, prefix, suffix) \
333 	__compiletime_assert(condition, msg, prefix, suffix)
334 
335 /**
336  * compiletime_assert - break build and emit msg if condition is false
337  * @condition: a compile-time constant condition to check
338  * @msg:       a message to emit if condition is false
339  *
340  * In tradition of POSIX assert, this macro will break the build if the
341  * supplied condition is *false*, emitting the supplied error message if the
342  * compiler has support to do so.
343  */
344 #define compiletime_assert(condition, msg) \
345 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
346 
347 #define compiletime_assert_atomic_type(t)				\
348 	compiletime_assert(__native_word(t),				\
349 		"Need native word sized stores/loads for atomicity.")
350 
351 /* &a[0] degrades to a pointer: a different type from an array */
352 #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
353 
354 #endif /* __LINUX_COMPILER_H */
355