xref: /openbmc/linux/include/linux/compiler.h (revision ef833eab)
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3 
4 #ifndef __ASSEMBLY__
5 
6 #ifdef __CHECKER__
7 # define __user		__attribute__((noderef, address_space(1)))
8 # define __kernel	__attribute__((address_space(0)))
9 # define __safe		__attribute__((safe))
10 # define __force	__attribute__((force))
11 # define __nocast	__attribute__((nocast))
12 # define __iomem	__attribute__((noderef, address_space(2)))
13 # define __must_hold(x)	__attribute__((context(x,1,1)))
14 # define __acquires(x)	__attribute__((context(x,0,1)))
15 # define __releases(x)	__attribute__((context(x,1,0)))
16 # define __acquire(x)	__context__(x,1)
17 # define __release(x)	__context__(x,-1)
18 # define __cond_lock(x,c)	((c) ? ({ __acquire(x); 1; }) : 0)
19 # define __percpu	__attribute__((noderef, address_space(3)))
20 # define __rcu		__attribute__((noderef, address_space(4)))
21 # define __private	__attribute__((noderef))
22 extern void __chk_user_ptr(const volatile void __user *);
23 extern void __chk_io_ptr(const volatile void __iomem *);
24 # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
25 #else /* __CHECKER__ */
26 # ifdef STRUCTLEAK_PLUGIN
27 #  define __user __attribute__((user))
28 # else
29 #  define __user
30 # endif
31 # define __kernel
32 # define __safe
33 # define __force
34 # define __nocast
35 # define __iomem
36 # define __chk_user_ptr(x) (void)0
37 # define __chk_io_ptr(x) (void)0
38 # define __builtin_warning(x, y...) (1)
39 # define __must_hold(x)
40 # define __acquires(x)
41 # define __releases(x)
42 # define __acquire(x) (void)0
43 # define __release(x) (void)0
44 # define __cond_lock(x,c) (c)
45 # define __percpu
46 # define __rcu
47 # define __private
48 # define ACCESS_PRIVATE(p, member) ((p)->member)
49 #endif /* __CHECKER__ */
50 
51 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
52 #define ___PASTE(a,b) a##b
53 #define __PASTE(a,b) ___PASTE(a,b)
54 
55 #ifdef __KERNEL__
56 
57 #ifdef __GNUC__
58 #include <linux/compiler-gcc.h>
59 #endif
60 
61 #if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
62 #define notrace __attribute__((hotpatch(0,0)))
63 #else
64 #define notrace __attribute__((no_instrument_function))
65 #endif
66 
67 /* Intel compiler defines __GNUC__. So we will overwrite implementations
68  * coming from above header files here
69  */
70 #ifdef __INTEL_COMPILER
71 # include <linux/compiler-intel.h>
72 #endif
73 
74 /* Clang compiler defines __GNUC__. So we will overwrite implementations
75  * coming from above header files here
76  */
77 #ifdef __clang__
78 #include <linux/compiler-clang.h>
79 #endif
80 
81 /*
82  * Generic compiler-dependent macros required for kernel
83  * build go below this comment. Actual compiler/compiler version
84  * specific implementations come from the above header files
85  */
86 
87 struct ftrace_branch_data {
88 	const char *func;
89 	const char *file;
90 	unsigned line;
91 	union {
92 		struct {
93 			unsigned long correct;
94 			unsigned long incorrect;
95 		};
96 		struct {
97 			unsigned long miss;
98 			unsigned long hit;
99 		};
100 		unsigned long miss_hit[2];
101 	};
102 };
103 
104 struct ftrace_likely_data {
105 	struct ftrace_branch_data	data;
106 	unsigned long			constant;
107 };
108 
109 /*
110  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
111  * to disable branch tracing on a per file basis.
112  */
113 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
114     && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
115 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
116 			  int expect, int is_constant);
117 
118 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
119 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
120 
121 #define __branch_check__(x, expect, is_constant) ({			\
122 			int ______r;					\
123 			static struct ftrace_likely_data		\
124 				__attribute__((__aligned__(4)))		\
125 				__attribute__((section("_ftrace_annotated_branch"))) \
126 				______f = {				\
127 				.data.func = __func__,			\
128 				.data.file = __FILE__,			\
129 				.data.line = __LINE__,			\
130 			};						\
131 			______r = __builtin_expect(!!(x), expect);	\
132 			ftrace_likely_update(&______f, ______r,		\
133 					     expect, is_constant);	\
134 			______r;					\
135 		})
136 
137 /*
138  * Using __builtin_constant_p(x) to ignore cases where the return
139  * value is always the same.  This idea is taken from a similar patch
140  * written by Daniel Walker.
141  */
142 # ifndef likely
143 #  define likely(x)	(__branch_check__(x, 1, __builtin_constant_p(x)))
144 # endif
145 # ifndef unlikely
146 #  define unlikely(x)	(__branch_check__(x, 0, __builtin_constant_p(x)))
147 # endif
148 
149 #ifdef CONFIG_PROFILE_ALL_BRANCHES
150 /*
151  * "Define 'is'", Bill Clinton
152  * "Define 'if'", Steven Rostedt
153  */
154 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
155 #define __trace_if(cond) \
156 	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
157 	({								\
158 		int ______r;						\
159 		static struct ftrace_branch_data			\
160 			__attribute__((__aligned__(4)))			\
161 			__attribute__((section("_ftrace_branch")))	\
162 			______f = {					\
163 				.func = __func__,			\
164 				.file = __FILE__,			\
165 				.line = __LINE__,			\
166 			};						\
167 		______r = !!(cond);					\
168 		______f.miss_hit[______r]++;					\
169 		______r;						\
170 	}))
171 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
172 
173 #else
174 # define likely(x)	__builtin_expect(!!(x), 1)
175 # define unlikely(x)	__builtin_expect(!!(x), 0)
176 #endif
177 
178 /* Optimization barrier */
179 #ifndef barrier
180 # define barrier() __memory_barrier()
181 #endif
182 
183 #ifndef barrier_data
184 # define barrier_data(ptr) barrier()
185 #endif
186 
187 /* Unreachable code */
188 #ifdef CONFIG_STACK_VALIDATION
189 #define annotate_reachable() ({						\
190 	asm("%c0:\n\t"							\
191 	    ".pushsection .discard.reachable\n\t"			\
192 	    ".long %c0b - .\n\t"					\
193 	    ".popsection\n\t" : : "i" (__LINE__));			\
194 })
195 #define annotate_unreachable() ({					\
196 	asm("%c0:\n\t"							\
197 	    ".pushsection .discard.unreachable\n\t"			\
198 	    ".long %c0b - .\n\t"					\
199 	    ".popsection\n\t" : : "i" (__LINE__));			\
200 })
201 #define ASM_UNREACHABLE							\
202 	"999:\n\t"							\
203 	".pushsection .discard.unreachable\n\t"				\
204 	".long 999b - .\n\t"						\
205 	".popsection\n\t"
206 #else
207 #define annotate_reachable()
208 #define annotate_unreachable()
209 #endif
210 
211 #ifndef ASM_UNREACHABLE
212 # define ASM_UNREACHABLE
213 #endif
214 #ifndef unreachable
215 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
216 #endif
217 
218 /*
219  * KENTRY - kernel entry point
220  * This can be used to annotate symbols (functions or data) that are used
221  * without their linker symbol being referenced explicitly. For example,
222  * interrupt vector handlers, or functions in the kernel image that are found
223  * programatically.
224  *
225  * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
226  * are handled in their own way (with KEEP() in linker scripts).
227  *
228  * KENTRY can be avoided if the symbols in question are marked as KEEP() in the
229  * linker script. For example an architecture could KEEP() its entire
230  * boot/exception vector code rather than annotate each function and data.
231  */
232 #ifndef KENTRY
233 # define KENTRY(sym)						\
234 	extern typeof(sym) sym;					\
235 	static const unsigned long __kentry_##sym		\
236 	__used							\
237 	__attribute__((section("___kentry" "+" #sym ), used))	\
238 	= (unsigned long)&sym;
239 #endif
240 
241 #ifndef RELOC_HIDE
242 # define RELOC_HIDE(ptr, off)					\
243   ({ unsigned long __ptr;					\
244      __ptr = (unsigned long) (ptr);				\
245     (typeof(ptr)) (__ptr + (off)); })
246 #endif
247 
248 #ifndef OPTIMIZER_HIDE_VAR
249 #define OPTIMIZER_HIDE_VAR(var) barrier()
250 #endif
251 
252 /* Not-quite-unique ID. */
253 #ifndef __UNIQUE_ID
254 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
255 #endif
256 
257 #include <uapi/linux/types.h>
258 
259 #define __READ_ONCE_SIZE						\
260 ({									\
261 	switch (size) {							\
262 	case 1: *(__u8 *)res = *(volatile __u8 *)p; break;		\
263 	case 2: *(__u16 *)res = *(volatile __u16 *)p; break;		\
264 	case 4: *(__u32 *)res = *(volatile __u32 *)p; break;		\
265 	case 8: *(__u64 *)res = *(volatile __u64 *)p; break;		\
266 	default:							\
267 		barrier();						\
268 		__builtin_memcpy((void *)res, (const void *)p, size);	\
269 		barrier();						\
270 	}								\
271 })
272 
273 static __always_inline
274 void __read_once_size(const volatile void *p, void *res, int size)
275 {
276 	__READ_ONCE_SIZE;
277 }
278 
279 #ifdef CONFIG_KASAN
280 /*
281  * This function is not 'inline' because __no_sanitize_address confilcts
282  * with inlining. Attempt to inline it may cause a build failure.
283  * 	https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
284  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
285  */
286 static __no_sanitize_address __maybe_unused
287 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
288 {
289 	__READ_ONCE_SIZE;
290 }
291 #else
292 static __always_inline
293 void __read_once_size_nocheck(const volatile void *p, void *res, int size)
294 {
295 	__READ_ONCE_SIZE;
296 }
297 #endif
298 
299 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
300 {
301 	switch (size) {
302 	case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
303 	case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
304 	case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
305 	case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
306 	default:
307 		barrier();
308 		__builtin_memcpy((void *)p, (const void *)res, size);
309 		barrier();
310 	}
311 }
312 
313 /*
314  * Prevent the compiler from merging or refetching reads or writes. The
315  * compiler is also forbidden from reordering successive instances of
316  * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
317  * compiler is aware of some particular ordering.  One way to make the
318  * compiler aware of ordering is to put the two invocations of READ_ONCE,
319  * WRITE_ONCE or ACCESS_ONCE() in different C statements.
320  *
321  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
322  * data types like structs or unions. If the size of the accessed data
323  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
324  * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
325  * least two memcpy()s: one for the __builtin_memcpy() and then one for
326  * the macro doing the copy of variable - '__u' allocated on the stack.
327  *
328  * Their two major use cases are: (1) Mediating communication between
329  * process-level code and irq/NMI handlers, all running on the same CPU,
330  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
331  * mutilate accesses that either do not require ordering or that interact
332  * with an explicit memory barrier or atomic instruction that provides the
333  * required ordering.
334  */
335 
336 #define __READ_ONCE(x, check)						\
337 ({									\
338 	union { typeof(x) __val; char __c[1]; } __u;			\
339 	if (check)							\
340 		__read_once_size(&(x), __u.__c, sizeof(x));		\
341 	else								\
342 		__read_once_size_nocheck(&(x), __u.__c, sizeof(x));	\
343 	__u.__val;							\
344 })
345 #define READ_ONCE(x) __READ_ONCE(x, 1)
346 
347 /*
348  * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
349  * to hide memory access from KASAN.
350  */
351 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
352 
353 #define WRITE_ONCE(x, val) \
354 ({							\
355 	union { typeof(x) __val; char __c[1]; } __u =	\
356 		{ .__val = (__force typeof(x)) (val) }; \
357 	__write_once_size(&(x), __u.__c, sizeof(x));	\
358 	__u.__val;					\
359 })
360 
361 #endif /* __KERNEL__ */
362 
363 #endif /* __ASSEMBLY__ */
364 
365 #ifdef __KERNEL__
366 /*
367  * Allow us to mark functions as 'deprecated' and have gcc emit a nice
368  * warning for each use, in hopes of speeding the functions removal.
369  * Usage is:
370  * 		int __deprecated foo(void)
371  */
372 #ifndef __deprecated
373 # define __deprecated		/* unimplemented */
374 #endif
375 
376 #ifdef MODULE
377 #define __deprecated_for_modules __deprecated
378 #else
379 #define __deprecated_for_modules
380 #endif
381 
382 #ifndef __must_check
383 #define __must_check
384 #endif
385 
386 #ifndef CONFIG_ENABLE_MUST_CHECK
387 #undef __must_check
388 #define __must_check
389 #endif
390 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
391 #undef __deprecated
392 #undef __deprecated_for_modules
393 #define __deprecated
394 #define __deprecated_for_modules
395 #endif
396 
397 #ifndef __malloc
398 #define __malloc
399 #endif
400 
401 /*
402  * Allow us to avoid 'defined but not used' warnings on functions and data,
403  * as well as force them to be emitted to the assembly file.
404  *
405  * As of gcc 3.4, static functions that are not marked with attribute((used))
406  * may be elided from the assembly file.  As of gcc 3.4, static data not so
407  * marked will not be elided, but this may change in a future gcc version.
408  *
409  * NOTE: Because distributions shipped with a backported unit-at-a-time
410  * compiler in gcc 3.3, we must define __used to be __attribute__((used))
411  * for gcc >=3.3 instead of 3.4.
412  *
413  * In prior versions of gcc, such functions and data would be emitted, but
414  * would be warned about except with attribute((unused)).
415  *
416  * Mark functions that are referenced only in inline assembly as __used so
417  * the code is emitted even though it appears to be unreferenced.
418  */
419 #ifndef __used
420 # define __used			/* unimplemented */
421 #endif
422 
423 #ifndef __maybe_unused
424 # define __maybe_unused		/* unimplemented */
425 #endif
426 
427 #ifndef __always_unused
428 # define __always_unused	/* unimplemented */
429 #endif
430 
431 #ifndef noinline
432 #define noinline
433 #endif
434 
435 /*
436  * Rather then using noinline to prevent stack consumption, use
437  * noinline_for_stack instead.  For documentation reasons.
438  */
439 #define noinline_for_stack noinline
440 
441 #ifndef __always_inline
442 #define __always_inline inline
443 #endif
444 
445 #endif /* __KERNEL__ */
446 
447 /*
448  * From the GCC manual:
449  *
450  * Many functions do not examine any values except their arguments,
451  * and have no effects except the return value.  Basically this is
452  * just slightly more strict class than the `pure' attribute above,
453  * since function is not allowed to read global memory.
454  *
455  * Note that a function that has pointer arguments and examines the
456  * data pointed to must _not_ be declared `const'.  Likewise, a
457  * function that calls a non-`const' function usually must not be
458  * `const'.  It does not make sense for a `const' function to return
459  * `void'.
460  */
461 #ifndef __attribute_const__
462 # define __attribute_const__	/* unimplemented */
463 #endif
464 
465 #ifndef __designated_init
466 # define __designated_init
467 #endif
468 
469 #ifndef __latent_entropy
470 # define __latent_entropy
471 #endif
472 
473 #ifndef __randomize_layout
474 # define __randomize_layout __designated_init
475 #endif
476 
477 #ifndef __no_randomize_layout
478 # define __no_randomize_layout
479 #endif
480 
481 #ifndef randomized_struct_fields_start
482 # define randomized_struct_fields_start
483 # define randomized_struct_fields_end
484 #endif
485 
486 /*
487  * Tell gcc if a function is cold. The compiler will assume any path
488  * directly leading to the call is unlikely.
489  */
490 
491 #ifndef __cold
492 #define __cold
493 #endif
494 
495 /* Simple shorthand for a section definition */
496 #ifndef __section
497 # define __section(S) __attribute__ ((__section__(#S)))
498 #endif
499 
500 #ifndef __visible
501 #define __visible
502 #endif
503 
504 #ifndef __nostackprotector
505 # define __nostackprotector
506 #endif
507 
508 /*
509  * Assume alignment of return value.
510  */
511 #ifndef __assume_aligned
512 #define __assume_aligned(a, ...)
513 #endif
514 
515 
516 /* Are two types/vars the same type (ignoring qualifiers)? */
517 #ifndef __same_type
518 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
519 #endif
520 
521 /* Is this type a native word size -- useful for atomic operations */
522 #ifndef __native_word
523 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
524 #endif
525 
526 /* Compile time object size, -1 for unknown */
527 #ifndef __compiletime_object_size
528 # define __compiletime_object_size(obj) -1
529 #endif
530 #ifndef __compiletime_warning
531 # define __compiletime_warning(message)
532 #endif
533 #ifndef __compiletime_error
534 # define __compiletime_error(message)
535 /*
536  * Sparse complains of variable sized arrays due to the temporary variable in
537  * __compiletime_assert. Unfortunately we can't just expand it out to make
538  * sparse see a constant array size without breaking compiletime_assert on old
539  * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
540  */
541 # ifndef __CHECKER__
542 #  define __compiletime_error_fallback(condition) \
543 	do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
544 # endif
545 #endif
546 #ifndef __compiletime_error_fallback
547 # define __compiletime_error_fallback(condition) do { } while (0)
548 #endif
549 
550 #ifdef __OPTIMIZE__
551 # define __compiletime_assert(condition, msg, prefix, suffix)		\
552 	do {								\
553 		bool __cond = !(condition);				\
554 		extern void prefix ## suffix(void) __compiletime_error(msg); \
555 		if (__cond)						\
556 			prefix ## suffix();				\
557 		__compiletime_error_fallback(__cond);			\
558 	} while (0)
559 #else
560 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
561 #endif
562 
563 #define _compiletime_assert(condition, msg, prefix, suffix) \
564 	__compiletime_assert(condition, msg, prefix, suffix)
565 
566 /**
567  * compiletime_assert - break build and emit msg if condition is false
568  * @condition: a compile-time constant condition to check
569  * @msg:       a message to emit if condition is false
570  *
571  * In tradition of POSIX assert, this macro will break the build if the
572  * supplied condition is *false*, emitting the supplied error message if the
573  * compiler has support to do so.
574  */
575 #define compiletime_assert(condition, msg) \
576 	_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
577 
578 #define compiletime_assert_atomic_type(t)				\
579 	compiletime_assert(__native_word(t),				\
580 		"Need native word sized stores/loads for atomicity.")
581 
582 /*
583  * Prevent the compiler from merging or refetching accesses.  The compiler
584  * is also forbidden from reordering successive instances of ACCESS_ONCE(),
585  * but only when the compiler is aware of some particular ordering.  One way
586  * to make the compiler aware of ordering is to put the two invocations of
587  * ACCESS_ONCE() in different C statements.
588  *
589  * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
590  * on a union member will work as long as the size of the member matches the
591  * size of the union and the size is smaller than word size.
592  *
593  * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
594  * between process-level code and irq/NMI handlers, all running on the same CPU,
595  * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
596  * mutilate accesses that either do not require ordering or that interact
597  * with an explicit memory barrier or atomic instruction that provides the
598  * required ordering.
599  *
600  * If possible use READ_ONCE()/WRITE_ONCE() instead.
601  */
602 #define __ACCESS_ONCE(x) ({ \
603 	 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
604 	(volatile typeof(x) *)&(x); })
605 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
606 
607 /**
608  * lockless_dereference() - safely load a pointer for later dereference
609  * @p: The pointer to load
610  *
611  * Similar to rcu_dereference(), but for situations where the pointed-to
612  * object's lifetime is managed by something other than RCU.  That
613  * "something other" might be reference counting or simple immortality.
614  *
615  * The seemingly unused variable ___typecheck_p validates that @p is
616  * indeed a pointer type by using a pointer to typeof(*p) as the type.
617  * Taking a pointer to typeof(*p) again is needed in case p is void *.
618  */
619 #define lockless_dereference(p) \
620 ({ \
621 	typeof(p) _________p1 = READ_ONCE(p); \
622 	typeof(*(p)) *___typecheck_p __maybe_unused; \
623 	smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
624 	(_________p1); \
625 })
626 
627 #endif /* __LINUX_COMPILER_H */
628