xref: /openbmc/linux/include/linux/compiler.h (revision 3a35093a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_COMPILER_H
3 #define __LINUX_COMPILER_H
4 
5 #include <linux/compiler_types.h>
6 
7 #ifndef __ASSEMBLY__
8 
9 #ifdef __KERNEL__
10 
11 /*
12  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
13  * to disable branch tracing on a per file basis.
14  */
15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
16     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
17 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
18 			  int expect, int is_constant);
19 
20 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
21 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
22 
23 #define __branch_check__(x, expect, is_constant) ({			\
24 			long ______r;					\
25 			static struct ftrace_likely_data		\
26 				__aligned(4)				\
27 				__section("_ftrace_annotated_branch")	\
28 				______f = {				\
29 				.data.func = __func__,			\
30 				.data.file = __FILE__,			\
31 				.data.line = __LINE__,			\
32 			};						\
33 			______r = __builtin_expect(!!(x), expect);	\
34 			ftrace_likely_update(&______f, ______r,		\
35 					     expect, is_constant);	\
36 			______r;					\
37 		})
38 
39 /*
40  * Using __builtin_constant_p(x) to ignore cases where the return
41  * value is always the same.  This idea is taken from a similar patch
42  * written by Daniel Walker.
43  */
44 # ifndef likely
45 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
46 # endif
47 # ifndef unlikely
48 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
49 # endif
50 
51 #ifdef CONFIG_PROFILE_ALL_BRANCHES
52 /*
53  * "Define 'is'", Bill Clinton
54  * "Define 'if'", Steven Rostedt
55  */
56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
57 
58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
59 
60 #define __trace_if_value(cond) ({			\
61 	static struct ftrace_branch_data		\
62 		__aligned(4)				\
63 		__section("_ftrace_branch")		\
64 		__if_trace = {				\
65 			.func = __func__,		\
66 			.file = __FILE__,		\
67 			.line = __LINE__,		\
68 		};					\
69 	(cond) ?					\
70 		(__if_trace.miss_hit[1]++,1) :		\
71 		(__if_trace.miss_hit[0]++,0);		\
72 })
73 
74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
75 
76 #else
77 # define likely(x)	__builtin_expect(!!(x), 1)
78 # define unlikely(x)	__builtin_expect(!!(x), 0)
79 #endif
80 
81 /* Optimization barrier */
82 #ifndef barrier
83 /* The "volatile" is due to gcc bugs */
84 # define barrier() __asm__ __volatile__("": : :"memory")
85 #endif
86 
87 #ifndef barrier_data
88 /*
89  * This version is i.e. to prevent dead stores elimination on @ptr
90  * where gcc and llvm may behave differently when otherwise using
91  * normal barrier(): while gcc behavior gets along with a normal
92  * barrier(), llvm needs an explicit input variable to be assumed
93  * clobbered. The issue is as follows: while the inline asm might
94  * access any memory it wants, the compiler could have fit all of
95  * @ptr into memory registers instead, and since @ptr never escaped
96  * from that, it proved that the inline asm wasn't touching any of
97  * it. This version works well with both compilers, i.e. we're telling
98  * the compiler that the inline asm absolutely may see the contents
99  * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
100  */
101 # define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
102 #endif
103 
104 /* workaround for GCC PR82365 if needed */
105 #ifndef barrier_before_unreachable
106 # define barrier_before_unreachable() do { } while (0)
107 #endif
108 
109 /* Unreachable code */
110 #ifdef CONFIG_STACK_VALIDATION
111 /*
112  * These macros help objtool understand GCC code flow for unreachable code.
113  * The __COUNTER__ based labels are a hack to make each instance of the macros
114  * unique, to convince GCC not to merge duplicate inline asm statements.
115  */
116 #define annotate_reachable() ({						\
117 	asm volatile("%c0:\n\t"						\
118 		     ".pushsection .discard.reachable\n\t"		\
119 		     ".long %c0b - .\n\t"				\
120 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
121 })
122 #define annotate_unreachable() ({					\
123 	asm volatile("%c0:\n\t"						\
124 		     ".pushsection .discard.unreachable\n\t"		\
125 		     ".long %c0b - .\n\t"				\
126 		     ".popsection\n\t" : : "i" (__COUNTER__));		\
127 })
128 #define ASM_UNREACHABLE							\
129 	"999:\n\t"							\
130 	".pushsection .discard.unreachable\n\t"				\
131 	".long 999b - .\n\t"						\
132 	".popsection\n\t"
133 
134 /* Annotate a C jump table to allow objtool to follow the code flow */
135 #define __annotate_jump_table __section(".rodata..c_jump_table")
136 
137 #else
138 #define annotate_reachable()
139 #define annotate_unreachable()
140 #define __annotate_jump_table
141 #endif
142 
143 #ifndef ASM_UNREACHABLE
144 # define ASM_UNREACHABLE
145 #endif
146 #ifndef unreachable
147 # define unreachable() do {		\
148 	annotate_unreachable();		\
149 	__builtin_unreachable();	\
150 } while (0)
151 #endif
152 
153 /*
154  * KENTRY - kernel entry point
155  * This can be used to annotate symbols (functions or data) that are used
156  * without their linker symbol being referenced explicitly. For example,
157  * interrupt vector handlers, or functions in the kernel image that are found
158  * programatically.
159  *
160  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
161  * are handled in their own way (with KEEP() in linker scripts).
162  *
163  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
164  * linker script. For example an architecture could KEEP() its entire
165  * boot/exception vector code rather than annotate each function and data.
166  */
167 #ifndef KENTRY
168 # define KENTRY(sym)						\
169 	extern typeof(sym) sym;					\
170 	static const unsigned long __kentry_##sym		\
171 	__used							\
172 	__attribute__((__section__("___kentry+" #sym)))		\
173 	= (unsigned long)&sym;
174 #endif
175 
176 #ifndef RELOC_HIDE
177 # define RELOC_HIDE(ptr, off)					\
178   ({ unsigned long __ptr;					\
179      __ptr = (unsigned long) (ptr);				\
180     (typeof(ptr)) (__ptr + (off)); })
181 #endif
182 
183 #ifndef OPTIMIZER_HIDE_VAR
184 /* Make the optimizer believe the variable can be manipulated arbitrarily. */
185 #define OPTIMIZER_HIDE_VAR(var)						\
186 	__asm__ ("" : "=r" (var) : "0" (var))
187 #endif
188 
189 /* Not-quite-unique ID. */
190 #ifndef __UNIQUE_ID
191 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
192 #endif
193 
194 /**
195  * data_race - mark an expression as containing intentional data races
196  *
197  * This data_race() macro is useful for situations in which data races
198  * should be forgiven.  One example is diagnostic code that accesses
199  * shared variables but is not a part of the core synchronization design.
200  *
201  * This macro *does not* affect normal code generation, but is a hint
202  * to tooling that data races here are to be ignored.
203  */
204 #define data_race(expr)							\
205 ({									\
206 	__unqual_scalar_typeof(({ expr; })) __v = ({			\
207 		__kcsan_disable_current();				\
208 		expr;							\
209 	});								\
210 	__kcsan_enable_current();					\
211 	__v;								\
212 })
213 
214 #endif /* __KERNEL__ */
215 
216 /*
217  * Force the compiler to emit 'sym' as a symbol, so that we can reference
218  * it from inline assembler. Necessary in case 'sym' could be inlined
219  * otherwise, or eliminated entirely due to lack of references that are
220  * visible to the compiler.
221  */
222 #define __ADDRESSABLE(sym) \
223 	static void * __section(".discard.addressable") __used \
224 		__UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym;
225 
226 /**
227  * offset_to_ptr - convert a relative memory offset to an absolute pointer
228  * @off:	the address of the 32-bit offset value
229  */
230 static inline void *offset_to_ptr(const int *off)
231 {
232 	return (void *)((unsigned long)off + *off);
233 }
234 
235 #endif /* __ASSEMBLY__ */
236 
237 /* &a[0] degrades to a pointer: a different type from an array */
238 #define __must_be_array(a)	BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
239 
240 /*
241  * This is needed in functions which generate the stack canary, see
242  * arch/x86/kernel/smpboot.c::start_secondary() for an example.
243  */
244 #define prevent_tail_call_optimization()	mb()
245 
246 #include <asm/rwonce.h>
247 
248 #endif /* __LINUX_COMPILER_H */
249