xref: /openbmc/linux/include/linux/compiler.h (revision a518d637)
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3 
4 #ifndef __ASSEMBLY__
5 
6 #ifdef __CHECKER__
7 # define __user		__attribute__((noderef, address_space(1)))
8 # define __kernel	__attribute__((address_space(0)))
9 # define __safe		__attribute__((safe))
10 # define __force	__attribute__((force))
11 # define __nocast	__attribute__((nocast))
12 # define __iomem	__attribute__((noderef, address_space(2)))
13 # define __must_hold(x)	__attribute__((context(x,1,1)))
14 # define __acquires(x)	__attribute__((context(x,0,1)))
15 # define __releases(x)	__attribute__((context(x,1,0)))
16 # define __acquire(x)	__context__(x,1)
17 # define __release(x)	__context__(x,-1)
18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu	__attribute__((noderef, address_space(3)))
20 # define __rcu		__attribute__((noderef, address_space(4)))
21 # define __private	__attribute__((noderef))
22 extern void __chk_user_ptr(const volatile void __user *);
23 extern void __chk_io_ptr(const volatile void __iomem *);
24 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
25 #else /* __CHECKER__ */
26 # ifdef STRUCTLEAK_PLUGIN
27 #  define __user __attribute__((user))
28 # else
29 #  define __user
30 # endif
31 # define __kernel
32 # define __safe
33 # define __force
34 # define __nocast
35 # define __iomem
36 # define __chk_user_ptr(x) (void)0
37 # define __chk_io_ptr(x) (void)0
38 # define __builtin_warning(x, y...) (1)
39 # define __must_hold(x)
40 # define __acquires(x)
41 # define __releases(x)
42 # define __acquire(x) (void)0
43 # define __release(x) (void)0
44 # define __cond_lock(x,c) (c)
45 # define __percpu
46 # define __rcu
47 # define __private
48 # define ACCESS_PRIVATE(p, member) ((p)->member)
49 #endif /* __CHECKER__ */
50 
51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
52 #define ___PASTE(a,b) a##b
53 #define __PASTE(a,b) ___PASTE(a,b)
54 
55 #ifdef __KERNEL__
56 
57 #ifdef __GNUC__
58 #include <linux/compiler-gcc.h>
59 #endif
60 
61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
62 #define notrace __attribute__((hotpatch(0,0)))
63 #else
64 #define notrace __attribute__((no_instrument_function))
65 #endif
66 
67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
68  * coming from above header files here
69  */
70 #ifdef __INTEL_COMPILER
71 # include <linux/compiler-intel.h>
72 #endif
73 
74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
75  * coming from above header files here
76  */
77 #ifdef __clang__
78 #include <linux/compiler-clang.h>
79 #endif
80 
81 /*
82  * Generic compiler-dependent macros required for kernel
83  * build go below this comment. Actual compiler/compiler version
84  * specific implementations come from the above header files
85  */
86 
87 struct ftrace_branch_data {
88 	const char *func;
89 	const char *file;
90 	unsigned line;
91 	union {
92 		struct {
93 			unsigned long correct;
94 			unsigned long incorrect;
95 		};
96 		struct {
97 			unsigned long miss;
98 			unsigned long hit;
99 		};
100 		unsigned long miss_hit[2];
101 	};
102 };
103 
104 struct ftrace_likely_data {
105 	struct ftrace_branch_data	data;
106 	unsigned long			constant;
107 };
108 
109 /*
110  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
111  * to disable branch tracing on a per file basis.
112  */
113 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
114     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
115 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
116 			  int expect, int is_constant);
117 
118 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
119 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
120 
121 #define __branch_check__(x, expect, is_constant) ({			\
122 			int ______r;					\
123 			static struct ftrace_likely_data		\
124 				__attribute__((__aligned__(4)))		\
125 				__attribute__((section("_ftrace_annotated_branch"))) \
126 				______f = {				\
127 				.data.func = __func__,			\
128 				.data.file = __FILE__,			\
129 				.data.line = __LINE__,			\
130 			};						\
131 			______r = __builtin_expect(!!(x), expect);	\
132 			ftrace_likely_update(&______f, ______r,		\
133 					     expect, is_constant);	\
134 			______r;					\
135 		})
136 
137 /*
138  * Using __builtin_constant_p(x) to ignore cases where the return
139  * value is always the same.  This idea is taken from a similar patch
140  * written by Daniel Walker.
141  */
142 # ifndef likely
143 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
144 # endif
145 # ifndef unlikely
146 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
147 # endif
148 
149 #ifdef CONFIG_PROFILE_ALL_BRANCHES
150 /*
151  * "Define 'is'", Bill Clinton
152  * "Define 'if'", Steven Rostedt
153  */
154 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
155 #define __trace_if(cond) \
156 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
157 	({								\
158 		int ______r;						\
159 		static struct ftrace_branch_data			\
160 			__attribute__((__aligned__(4)))			\
161 			__attribute__((section("_ftrace_branch")))	\
162 			______f = {					\
163 				.func = __func__,			\
164 				.file = __FILE__,			\
165 				.line = __LINE__,			\
166 			};						\
167 		______r = !!(cond);					\
168 		______f.miss_hit[______r]++;					\
169 		______r;						\
170 	}))
171 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
172 
173 #else
174 # define likely(x)	__builtin_expect(!!(x), 1)
175 # define unlikely(x)	__builtin_expect(!!(x), 0)
176 #endif
177 
178 /* Optimization barrier */
179 #ifndef barrier
180 # define barrier() __memory_barrier()
181 #endif
182 
183 #ifndef barrier_data
184 # define barrier_data(ptr) barrier()
185 #endif
186 
187 /* Unreachable code */
188 #ifndef unreachable
189 # define unreachable() do { } while (1)
190 #endif
191 
192 /*
193  * KENTRY - kernel entry point
194  * This can be used to annotate symbols (functions or data) that are used
195  * without their linker symbol being referenced explicitly. For example,
196  * interrupt vector handlers, or functions in the kernel image that are found
197  * programatically.
198  *
199  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
200  * are handled in their own way (with KEEP() in linker scripts).
201  *
202  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
203  * linker script. For example an architecture could KEEP() its entire
204  * boot/exception vector code rather than annotate each function and data.
205  */
206 #ifndef KENTRY
207 # define KENTRY(sym)						\
208 	extern typeof(sym) sym;					\
209 	static const unsigned long __kentry_##sym		\
210 	__used							\
211 	__attribute__((section("___kentry" "+" #sym ), used))	\
212 	= (unsigned long)&sym;
213 #endif
214 
215 #ifndef RELOC_HIDE
216 # define RELOC_HIDE(ptr, off)					\
217   ({ unsigned long __ptr;					\
218      __ptr = (unsigned long) (ptr);				\
219     (typeof(ptr)) (__ptr + (off)); })
220 #endif
221 
222 #ifndef OPTIMIZER_HIDE_VAR
223 #define OPTIMIZER_HIDE_VAR(var) barrier()
224 #endif
225 
226 /* Not-quite-unique ID. */
227 #ifndef __UNIQUE_ID
228 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
229 #endif
230 
231 #include <uapi/linux/types.h>
232 
233 #define __READ_ONCE_SIZE						\
234 ({									\
235 	switch (size) {							\
236 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
237 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
238 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
239 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
240 	default:							\
241 		barrier();						\
242 		__builtin_memcpy((void *)res, (const void *)p, size);	\
243 		barrier();						\
244 	}								\
245 })
246 
247 static __always_inline
248 void __read_once_size(const volatile void *p, void *res, int size)
249 {
250 	__READ_ONCE_SIZE;
251 }
252 
253 #ifdef CONFIG_KASAN
254 /*
255  * This function is not 'inline' because __no_sanitize_address confilcts
256  * with inlining. Attempt to inline it may cause a build failure.
257  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
258  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
259  */
260 static __no_sanitize_address __maybe_unused
261 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
262 {
263 	__READ_ONCE_SIZE;
264 }
265 #else
266 static __always_inline
267 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
268 {
269 	__READ_ONCE_SIZE;
270 }
271 #endif
272 
273 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
274 {
275 	switch (size) {
276 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
277 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
278 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
279 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
280 	default:
281 		barrier();
282 		__builtin_memcpy((void *)p, (const void *)res, size);
283 		barrier();
284 	}
285 }
286 
287 /*
288  * Prevent the compiler from merging or refetching reads or writes. The
289  * compiler is also forbidden from reordering successive instances of
290  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
291  * compiler is aware of some particular ordering.  One way to make the
292  * compiler aware of ordering is to put the two invocations of READ_ONCE,
293  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
294  *
295  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
296  * data types like structs or unions. If the size of the accessed data
297  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
298  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
299  * least two memcpy()s: one for the __builtin_memcpy() and then one for
300  * the macro doing the copy of variable - '__u' allocated on the stack.
301  *
302  * Their two major use cases are: (1) Mediating communication between
303  * process-level code and irq/NMI handlers, all running on the same CPU,
304  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
305  * mutilate accesses that either do not require ordering or that interact
306  * with an explicit memory barrier or atomic instruction that provides the
307  * required ordering.
308  */
309 
310 #define __READ_ONCE(x, check)						\
311 ({									\
312 	union { typeof(x) __val; char __c[1]; } __u;			\
313 	if (check)							\
314 		__read_once_size(&(x), __u.__c, sizeof(x));		\
315 	else								\
316 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
317 	__u.__val;							\
318 })
319 #define READ_ONCE(x) __READ_ONCE(x, 1)
320 
321 /*
322  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
323  * to hide memory access from KASAN.
324  */
325 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
326 
327 #define WRITE_ONCE(x, val) \
328 ({							\
329 	union { typeof(x) __val; char __c[1]; } __u =	\
330 		{ .__val = (__force typeof(x)) (val) }; \
331 	__write_once_size(&(x), __u.__c, sizeof(x));	\
332 	__u.__val;					\
333 })
334 
335 #endif /* __KERNEL__ */
336 
337 #endif /* __ASSEMBLY__ */
338 
339 #ifdef __KERNEL__
340 /*
341  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
342  * warning for each use, in hopes of speeding the functions removal.
343  * Usage is:
344  * 		int __deprecated foo(void)
345  */
346 #ifndef __deprecated
347 # define __deprecated		/* unimplemented */
348 #endif
349 
350 #ifdef MODULE
351 #define __deprecated_for_modules __deprecated
352 #else
353 #define __deprecated_for_modules
354 #endif
355 
356 #ifndef __must_check
357 #define __must_check
358 #endif
359 
360 #ifndef CONFIG_ENABLE_MUST_CHECK
361 #undef __must_check
362 #define __must_check
363 #endif
364 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
365 #undef __deprecated
366 #undef __deprecated_for_modules
367 #define __deprecated
368 #define __deprecated_for_modules
369 #endif
370 
371 #ifndef __malloc
372 #define __malloc
373 #endif
374 
375 /*
376  * Allow us to avoid 'defined but not used' warnings on functions and data,
377  * as well as force them to be emitted to the assembly file.
378  *
379  * As of gcc 3.4, static functions that are not marked with attribute((used))
380  * may be elided from the assembly file.  As of gcc 3.4, static data not so
381  * marked will not be elided, but this may change in a future gcc version.
382  *
383  * NOTE: Because distributions shipped with a backported unit-at-a-time
384  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
385  * for gcc >=3.3 instead of 3.4.
386  *
387  * In prior versions of gcc, such functions and data would be emitted, but
388  * would be warned about except with attribute((unused)).
389  *
390  * Mark functions that are referenced only in inline assembly as __used so
391  * the code is emitted even though it appears to be unreferenced.
392  */
393 #ifndef __used
394 # define __used			/* unimplemented */
395 #endif
396 
397 #ifndef __maybe_unused
398 # define __maybe_unused		/* unimplemented */
399 #endif
400 
401 #ifndef __always_unused
402 # define __always_unused	/* unimplemented */
403 #endif
404 
405 #ifndef noinline
406 #define noinline
407 #endif
408 
409 /*
410  * Rather then using noinline to prevent stack consumption, use
411  * noinline_for_stack instead.  For documentation reasons.
412  */
413 #define noinline_for_stack noinline
414 
415 #ifndef __always_inline
416 #define __always_inline inline
417 #endif
418 
419 #endif /* __KERNEL__ */
420 
421 /*
422  * From the GCC manual:
423  *
424  * Many functions do not examine any values except their arguments,
425  * and have no effects except the return value.  Basically this is
426  * just slightly more strict class than the `pure' attribute above,
427  * since function is not allowed to read global memory.
428  *
429  * Note that a function that has pointer arguments and examines the
430  * data pointed to must _not_ be declared `const'.  Likewise, a
431  * function that calls a non-`const' function usually must not be
432  * `const'.  It does not make sense for a `const' function to return
433  * `void'.
434  */
435 #ifndef __attribute_const__
436 # define __attribute_const__	/* unimplemented */
437 #endif
438 
439 #ifndef __designated_init
440 # define __designated_init
441 #endif
442 
443 #ifndef __latent_entropy
444 # define __latent_entropy
445 #endif
446 
447 #ifndef __randomize_layout
448 # define __randomize_layout __designated_init
449 #endif
450 
451 #ifndef __no_randomize_layout
452 # define __no_randomize_layout
453 #endif
454 
455 #ifndef randomized_struct_fields_start
456 # define randomized_struct_fields_start
457 # define randomized_struct_fields_end
458 #endif
459 
460 /*
461  * Tell gcc if a function is cold. The compiler will assume any path
462  * directly leading to the call is unlikely.
463  */
464 
465 #ifndef __cold
466 #define __cold
467 #endif
468 
469 /* Simple shorthand for a section definition */
470 #ifndef __section
471 # define __section(S) __attribute__ ((__section__(#S)))
472 #endif
473 
474 #ifndef __visible
475 #define __visible
476 #endif
477 
478 /*
479  * Assume alignment of return value.
480  */
481 #ifndef __assume_aligned
482 #define __assume_aligned(a, ...)
483 #endif
484 
485 
486 /* Are two types/vars the same type (ignoring qualifiers)? */
487 #ifndef __same_type
488 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
489 #endif
490 
491 /* Is this type a native word size -- useful for atomic operations */
492 #ifndef __native_word
493 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
494 #endif
495 
496 /* Compile time object size, -1 for unknown */
497 #ifndef __compiletime_object_size
498 # define __compiletime_object_size(obj) -1
499 #endif
500 #ifndef __compiletime_warning
501 # define __compiletime_warning(message)
502 #endif
503 #ifndef __compiletime_error
504 # define __compiletime_error(message)
505 /*
506  * Sparse complains of variable sized arrays due to the temporary variable in
507  * __compiletime_assert. Unfortunately we can't just expand it out to make
508  * sparse see a constant array size without breaking compiletime_assert on old
509  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
510  */
511 # ifndef __CHECKER__
512 #  define __compiletime_error_fallback(condition) \
513 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
514 # endif
515 #endif
516 #ifndef __compiletime_error_fallback
517 # define __compiletime_error_fallback(condition) do { } while (0)
518 #endif
519 
520 #define __compiletime_assert(condition, msg, prefix, suffix)		\
521 	do {								\
522 		bool __cond = !(condition);				\
523 		extern void prefix ## suffix(void) __compiletime_error(msg); \
524 		if (__cond)						\
525 			prefix ## suffix();				\
526 		__compiletime_error_fallback(__cond);			\
527 	} while (0)
528 
529 #define _compiletime_assert(condition, msg, prefix, suffix) \
530 	__compiletime_assert(condition, msg, prefix, suffix)
531 
532 /**
533  * compiletime_assert - break build and emit msg if condition is false
534  * @condition: a compile-time constant condition to check
535  * @msg:       a message to emit if condition is false
536  *
537  * In tradition of POSIX assert, this macro will break the build if the
538  * supplied condition is *false*, emitting the supplied error message if the
539  * compiler has support to do so.
540  */
541 #define compiletime_assert(condition, msg) \
542 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
543 
544 #define compiletime_assert_atomic_type(t)				\
545 	compiletime_assert(__native_word(t),				\
546 		"Need native word sized stores/loads for atomicity.")
547 
548 /*
549  * Prevent the compiler from merging or refetching accesses.  The compiler
550  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
551  * but only when the compiler is aware of some particular ordering.  One way
552  * to make the compiler aware of ordering is to put the two invocations of
553  * ACCESS_ONCE() in different C statements.
554  *
555  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
556  * on a union member will work as long as the size of the member matches the
557  * size of the union and the size is smaller than word size.
558  *
559  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
560  * between process-level code and irq/NMI handlers, all running on the same CPU,
561  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
562  * mutilate accesses that either do not require ordering or that interact
563  * with an explicit memory barrier or atomic instruction that provides the
564  * required ordering.
565  *
566  * If possible use READ_ONCE()/WRITE_ONCE() instead.
567  */
568 #define __ACCESS_ONCE(x) ({ \
569 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
570 	(volatile typeof(x) *)&(x); })
571 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
572 
573 /**
574  * lockless_dereference() - safely load a pointer for later dereference
575  * @p: The pointer to load
576  *
577  * Similar to rcu_dereference(), but for situations where the pointed-to
578  * object's lifetime is managed by something other than RCU.  That
579  * "something other" might be reference counting or simple immortality.
580  *
581  * The seemingly unused variable ___typecheck_p validates that @p is
582  * indeed a pointer type by using a pointer to typeof(*p) as the type.
583  * Taking a pointer to typeof(*p) again is needed in case p is void *.
584  */
585 #define lockless_dereference(p) \
586 ({ \
587 	typeof(p) _________p1 = READ_ONCE(p); \
588 	typeof(*(p)) *___typecheck_p __maybe_unused; \
589 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
590 	(_________p1); \
591 })
592 
593 #endif /* __LINUX_COMPILER_H */
594