xref: /openbmc/linux/include/linux/compiler.h (revision 0edbfea5)
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3 
4 #ifndef __ASSEMBLY__
5 
6 #ifdef __CHECKER__
7 # define __user		__attribute__((noderef, address_space(1)))
8 # define __kernel	__attribute__((address_space(0)))
9 # define __safe		__attribute__((safe))
10 # define __force	__attribute__((force))
11 # define __nocast	__attribute__((nocast))
12 # define __iomem	__attribute__((noderef, address_space(2)))
13 # define __must_hold(x)	__attribute__((context(x,1,1)))
14 # define __acquires(x)	__attribute__((context(x,0,1)))
15 # define __releases(x)	__attribute__((context(x,1,0)))
16 # define __acquire(x)	__context__(x,1)
17 # define __release(x)	__context__(x,-1)
18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu	__attribute__((noderef, address_space(3)))
20 # define __pmem		__attribute__((noderef, address_space(5)))
21 #ifdef CONFIG_SPARSE_RCU_POINTER
22 # define __rcu		__attribute__((noderef, address_space(4)))
23 #else /* CONFIG_SPARSE_RCU_POINTER */
24 # define __rcu
25 #endif /* CONFIG_SPARSE_RCU_POINTER */
26 # define __private	__attribute__((noderef))
27 extern void __chk_user_ptr(const volatile void __user *);
28 extern void __chk_io_ptr(const volatile void __iomem *);
29 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
30 #else /* __CHECKER__ */
31 # define __user
32 # define __kernel
33 # define __safe
34 # define __force
35 # define __nocast
36 # define __iomem
37 # define __chk_user_ptr(x) (void)0
38 # define __chk_io_ptr(x) (void)0
39 # define __builtin_warning(x, y...) (1)
40 # define __must_hold(x)
41 # define __acquires(x)
42 # define __releases(x)
43 # define __acquire(x) (void)0
44 # define __release(x) (void)0
45 # define __cond_lock(x,c) (c)
46 # define __percpu
47 # define __rcu
48 # define __pmem
49 # define __private
50 # define ACCESS_PRIVATE(p, member) ((p)->member)
51 #endif /* __CHECKER__ */
52 
53 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
54 #define ___PASTE(a,b) a##b
55 #define __PASTE(a,b) ___PASTE(a,b)
56 
57 #ifdef __KERNEL__
58 
59 #ifdef __GNUC__
60 #include <linux/compiler-gcc.h>
61 #endif
62 
63 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
64 #define notrace __attribute__((hotpatch(0,0)))
65 #else
66 #define notrace __attribute__((no_instrument_function))
67 #endif
68 
69 /* Intel compiler defines __GNUC__. So we will overwrite implementations
70  * coming from above header files here
71  */
72 #ifdef __INTEL_COMPILER
73 # include <linux/compiler-intel.h>
74 #endif
75 
76 /* Clang compiler defines __GNUC__. So we will overwrite implementations
77  * coming from above header files here
78  */
79 #ifdef __clang__
80 #include <linux/compiler-clang.h>
81 #endif
82 
83 /*
84  * Generic compiler-dependent macros required for kernel
85  * build go below this comment. Actual compiler/compiler version
86  * specific implementations come from the above header files
87  */
88 
89 struct ftrace_branch_data {
90 	const char *func;
91 	const char *file;
92 	unsigned line;
93 	union {
94 		struct {
95 			unsigned long correct;
96 			unsigned long incorrect;
97 		};
98 		struct {
99 			unsigned long miss;
100 			unsigned long hit;
101 		};
102 		unsigned long miss_hit[2];
103 	};
104 };
105 
106 /*
107  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
108  * to disable branch tracing on a per file basis.
109  */
110 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
111     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
112 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
113 
114 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
115 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
116 
117 #define __branch_check__(x, expect) ({					\
118 			int ______r;					\
119 			static struct ftrace_branch_data		\
120 				__attribute__((__aligned__(4)))		\
121 				__attribute__((section("_ftrace_annotated_branch"))) \
122 				______f = {				\
123 				.func = __func__,			\
124 				.file = __FILE__,			\
125 				.line = __LINE__,			\
126 			};						\
127 			______r = likely_notrace(x);			\
128 			ftrace_likely_update(&______f, ______r, expect); \
129 			______r;					\
130 		})
131 
132 /*
133  * Using __builtin_constant_p(x) to ignore cases where the return
134  * value is always the same.  This idea is taken from a similar patch
135  * written by Daniel Walker.
136  */
137 # ifndef likely
138 #  define likely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
139 # endif
140 # ifndef unlikely
141 #  define unlikely(x)	(__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
142 # endif
143 
144 #ifdef CONFIG_PROFILE_ALL_BRANCHES
145 /*
146  * "Define 'is'", Bill Clinton
147  * "Define 'if'", Steven Rostedt
148  */
149 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
150 #define __trace_if(cond) \
151 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
152 	({								\
153 		int ______r;						\
154 		static struct ftrace_branch_data			\
155 			__attribute__((__aligned__(4)))			\
156 			__attribute__((section("_ftrace_branch")))	\
157 			______f = {					\
158 				.func = __func__,			\
159 				.file = __FILE__,			\
160 				.line = __LINE__,			\
161 			};						\
162 		______r = !!(cond);					\
163 		______f.miss_hit[______r]++;					\
164 		______r;						\
165 	}))
166 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
167 
168 #else
169 # define likely(x)	__builtin_expect(!!(x), 1)
170 # define unlikely(x)	__builtin_expect(!!(x), 0)
171 #endif
172 
173 /* Optimization barrier */
174 #ifndef barrier
175 # define barrier() __memory_barrier()
176 #endif
177 
178 #ifndef barrier_data
179 # define barrier_data(ptr) barrier()
180 #endif
181 
182 /* Unreachable code */
183 #ifndef unreachable
184 # define unreachable() do { } while (1)
185 #endif
186 
187 #ifndef RELOC_HIDE
188 # define RELOC_HIDE(ptr, off)					\
189   ({ unsigned long __ptr;					\
190      __ptr = (unsigned long) (ptr);				\
191     (typeof(ptr)) (__ptr + (off)); })
192 #endif
193 
194 #ifndef OPTIMIZER_HIDE_VAR
195 #define OPTIMIZER_HIDE_VAR(var) barrier()
196 #endif
197 
198 /* Not-quite-unique ID. */
199 #ifndef __UNIQUE_ID
200 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
201 #endif
202 
203 #include <uapi/linux/types.h>
204 
205 #define __READ_ONCE_SIZE						\
206 ({									\
207 	switch (size) {							\
208 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
209 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
210 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
211 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
212 	default:							\
213 		barrier();						\
214 		__builtin_memcpy((void *)res, (const void *)p, size);	\
215 		barrier();						\
216 	}								\
217 })
218 
219 static __always_inline
220 void __read_once_size(const volatile void *p, void *res, int size)
221 {
222 	__READ_ONCE_SIZE;
223 }
224 
225 #ifdef CONFIG_KASAN
226 /*
227  * This function is not 'inline' because __no_sanitize_address confilcts
228  * with inlining. Attempt to inline it may cause a build failure.
229  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
230  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
231  */
232 static __no_sanitize_address __maybe_unused
233 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
234 {
235 	__READ_ONCE_SIZE;
236 }
237 #else
238 static __always_inline
239 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
240 {
241 	__READ_ONCE_SIZE;
242 }
243 #endif
244 
245 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
246 {
247 	switch (size) {
248 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
249 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
250 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
251 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
252 	default:
253 		barrier();
254 		__builtin_memcpy((void *)p, (const void *)res, size);
255 		barrier();
256 	}
257 }
258 
259 /*
260  * Prevent the compiler from merging or refetching reads or writes. The
261  * compiler is also forbidden from reordering successive instances of
262  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
263  * compiler is aware of some particular ordering.  One way to make the
264  * compiler aware of ordering is to put the two invocations of READ_ONCE,
265  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
266  *
267  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
268  * data types like structs or unions. If the size of the accessed data
269  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
270  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
271  * least two memcpy()s: one for the __builtin_memcpy() and then one for
272  * the macro doing the copy of variable - '__u' allocated on the stack.
273  *
274  * Their two major use cases are: (1) Mediating communication between
275  * process-level code and irq/NMI handlers, all running on the same CPU,
276  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
277  * mutilate accesses that either do not require ordering or that interact
278  * with an explicit memory barrier or atomic instruction that provides the
279  * required ordering.
280  */
281 
282 #define __READ_ONCE(x, check)						\
283 ({									\
284 	union { typeof(x) __val; char __c[1]; } __u;			\
285 	if (check)							\
286 		__read_once_size(&(x), __u.__c, sizeof(x));		\
287 	else								\
288 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
289 	__u.__val;							\
290 })
291 #define READ_ONCE(x) __READ_ONCE(x, 1)
292 
293 /*
294  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
295  * to hide memory access from KASAN.
296  */
297 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
298 
299 #define WRITE_ONCE(x, val) \
300 ({							\
301 	union { typeof(x) __val; char __c[1]; } __u =	\
302 		{ .__val = (__force typeof(x)) (val) }; \
303 	__write_once_size(&(x), __u.__c, sizeof(x));	\
304 	__u.__val;					\
305 })
306 
307 /**
308  * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
309  * @cond: boolean expression to wait for
310  *
311  * Equivalent to using smp_load_acquire() on the condition variable but employs
312  * the control dependency of the wait to reduce the barrier on many platforms.
313  *
314  * The control dependency provides a LOAD->STORE order, the additional RMB
315  * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
316  * aka. ACQUIRE.
317  */
318 #define smp_cond_acquire(cond)	do {		\
319 	while (!(cond))				\
320 		cpu_relax();			\
321 	smp_rmb(); /* ctrl + rmb := acquire */	\
322 } while (0)
323 
324 #endif /* __KERNEL__ */
325 
326 #endif /* __ASSEMBLY__ */
327 
328 #ifdef __KERNEL__
329 /*
330  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
331  * warning for each use, in hopes of speeding the functions removal.
332  * Usage is:
333  * 		int __deprecated foo(void)
334  */
335 #ifndef __deprecated
336 # define __deprecated		/* unimplemented */
337 #endif
338 
339 #ifdef MODULE
340 #define __deprecated_for_modules __deprecated
341 #else
342 #define __deprecated_for_modules
343 #endif
344 
345 #ifndef __must_check
346 #define __must_check
347 #endif
348 
349 #ifndef CONFIG_ENABLE_MUST_CHECK
350 #undef __must_check
351 #define __must_check
352 #endif
353 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
354 #undef __deprecated
355 #undef __deprecated_for_modules
356 #define __deprecated
357 #define __deprecated_for_modules
358 #endif
359 
360 #ifndef __malloc
361 #define __malloc
362 #endif
363 
364 /*
365  * Allow us to avoid 'defined but not used' warnings on functions and data,
366  * as well as force them to be emitted to the assembly file.
367  *
368  * As of gcc 3.4, static functions that are not marked with attribute((used))
369  * may be elided from the assembly file.  As of gcc 3.4, static data not so
370  * marked will not be elided, but this may change in a future gcc version.
371  *
372  * NOTE: Because distributions shipped with a backported unit-at-a-time
373  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
374  * for gcc >=3.3 instead of 3.4.
375  *
376  * In prior versions of gcc, such functions and data would be emitted, but
377  * would be warned about except with attribute((unused)).
378  *
379  * Mark functions that are referenced only in inline assembly as __used so
380  * the code is emitted even though it appears to be unreferenced.
381  */
382 #ifndef __used
383 # define __used			/* unimplemented */
384 #endif
385 
386 #ifndef __maybe_unused
387 # define __maybe_unused		/* unimplemented */
388 #endif
389 
390 #ifndef __always_unused
391 # define __always_unused	/* unimplemented */
392 #endif
393 
394 #ifndef noinline
395 #define noinline
396 #endif
397 
398 /*
399  * Rather then using noinline to prevent stack consumption, use
400  * noinline_for_stack instead.  For documentation reasons.
401  */
402 #define noinline_for_stack noinline
403 
404 #ifndef __always_inline
405 #define __always_inline inline
406 #endif
407 
408 #endif /* __KERNEL__ */
409 
410 /*
411  * From the GCC manual:
412  *
413  * Many functions do not examine any values except their arguments,
414  * and have no effects except the return value.  Basically this is
415  * just slightly more strict class than the `pure' attribute above,
416  * since function is not allowed to read global memory.
417  *
418  * Note that a function that has pointer arguments and examines the
419  * data pointed to must _not_ be declared `const'.  Likewise, a
420  * function that calls a non-`const' function usually must not be
421  * `const'.  It does not make sense for a `const' function to return
422  * `void'.
423  */
424 #ifndef __attribute_const__
425 # define __attribute_const__	/* unimplemented */
426 #endif
427 
428 /*
429  * Tell gcc if a function is cold. The compiler will assume any path
430  * directly leading to the call is unlikely.
431  */
432 
433 #ifndef __cold
434 #define __cold
435 #endif
436 
437 /* Simple shorthand for a section definition */
438 #ifndef __section
439 # define __section(S) __attribute__ ((__section__(#S)))
440 #endif
441 
442 #ifndef __visible
443 #define __visible
444 #endif
445 
446 /*
447  * Assume alignment of return value.
448  */
449 #ifndef __assume_aligned
450 #define __assume_aligned(a, ...)
451 #endif
452 
453 
454 /* Are two types/vars the same type (ignoring qualifiers)? */
455 #ifndef __same_type
456 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
457 #endif
458 
459 /* Is this type a native word size -- useful for atomic operations */
460 #ifndef __native_word
461 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
462 #endif
463 
464 /* Compile time object size, -1 for unknown */
465 #ifndef __compiletime_object_size
466 # define __compiletime_object_size(obj) -1
467 #endif
468 #ifndef __compiletime_warning
469 # define __compiletime_warning(message)
470 #endif
471 #ifndef __compiletime_error
472 # define __compiletime_error(message)
473 /*
474  * Sparse complains of variable sized arrays due to the temporary variable in
475  * __compiletime_assert. Unfortunately we can't just expand it out to make
476  * sparse see a constant array size without breaking compiletime_assert on old
477  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
478  */
479 # ifndef __CHECKER__
480 #  define __compiletime_error_fallback(condition) \
481 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
482 # endif
483 #endif
484 #ifndef __compiletime_error_fallback
485 # define __compiletime_error_fallback(condition) do { } while (0)
486 #endif
487 
488 #define __compiletime_assert(condition, msg, prefix, suffix)		\
489 	do {								\
490 		bool __cond = !(condition);				\
491 		extern void prefix ## suffix(void) __compiletime_error(msg); \
492 		if (__cond)						\
493 			prefix ## suffix();				\
494 		__compiletime_error_fallback(__cond);			\
495 	} while (0)
496 
497 #define _compiletime_assert(condition, msg, prefix, suffix) \
498 	__compiletime_assert(condition, msg, prefix, suffix)
499 
500 /**
501  * compiletime_assert - break build and emit msg if condition is false
502  * @condition: a compile-time constant condition to check
503  * @msg:       a message to emit if condition is false
504  *
505  * In tradition of POSIX assert, this macro will break the build if the
506  * supplied condition is *false*, emitting the supplied error message if the
507  * compiler has support to do so.
508  */
509 #define compiletime_assert(condition, msg) \
510 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
511 
512 #define compiletime_assert_atomic_type(t)				\
513 	compiletime_assert(__native_word(t),				\
514 		"Need native word sized stores/loads for atomicity.")
515 
516 /*
517  * Prevent the compiler from merging or refetching accesses.  The compiler
518  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
519  * but only when the compiler is aware of some particular ordering.  One way
520  * to make the compiler aware of ordering is to put the two invocations of
521  * ACCESS_ONCE() in different C statements.
522  *
523  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
524  * on a union member will work as long as the size of the member matches the
525  * size of the union and the size is smaller than word size.
526  *
527  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
528  * between process-level code and irq/NMI handlers, all running on the same CPU,
529  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
530  * mutilate accesses that either do not require ordering or that interact
531  * with an explicit memory barrier or atomic instruction that provides the
532  * required ordering.
533  *
534  * If possible use READ_ONCE()/WRITE_ONCE() instead.
535  */
536 #define __ACCESS_ONCE(x) ({ \
537 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
538 	(volatile typeof(x) *)&(x); })
539 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
540 
541 /**
542  * lockless_dereference() - safely load a pointer for later dereference
543  * @p: The pointer to load
544  *
545  * Similar to rcu_dereference(), but for situations where the pointed-to
546  * object's lifetime is managed by something other than RCU.  That
547  * "something other" might be reference counting or simple immortality.
548  */
549 #define lockless_dereference(p) \
550 ({ \
551 	typeof(p) _________p1 = READ_ONCE(p); \
552 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
553 	(_________p1); \
554 })
555 
556 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
557 #ifdef CONFIG_KPROBES
558 # define __kprobes	__attribute__((__section__(".kprobes.text")))
559 # define nokprobe_inline	__always_inline
560 #else
561 # define __kprobes
562 # define nokprobe_inline	inline
563 #endif
564 #endif /* __LINUX_COMPILER_H */
565