1 #ifndef __LINUX_COMPILER_H 2 #define __LINUX_COMPILER_H 3 4 #ifndef __ASSEMBLY__ 5 6 #ifdef __CHECKER__ 7 # define __user __attribute__((noderef, address_space(1))) 8 # define __kernel __attribute__((address_space(0))) 9 # define __safe __attribute__((safe)) 10 # define __force __attribute__((force)) 11 # define __nocast __attribute__((nocast)) 12 # define __iomem __attribute__((noderef, address_space(2))) 13 # define __must_hold(x) __attribute__((context(x,1,1))) 14 # define __acquires(x) __attribute__((context(x,0,1))) 15 # define __releases(x) __attribute__((context(x,1,0))) 16 # define __acquire(x) __context__(x,1) 17 # define __release(x) __context__(x,-1) 18 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) 19 # define __percpu __attribute__((noderef, address_space(3))) 20 # define __pmem __attribute__((noderef, address_space(5))) 21 #ifdef CONFIG_SPARSE_RCU_POINTER 22 # define __rcu __attribute__((noderef, address_space(4))) 23 #else 24 # define __rcu 25 #endif 26 extern void __chk_user_ptr(const volatile void __user *); 27 extern void __chk_io_ptr(const volatile void __iomem *); 28 #else 29 # define __user 30 # define __kernel 31 # define __safe 32 # define __force 33 # define __nocast 34 # define __iomem 35 # define __chk_user_ptr(x) (void)0 36 # define __chk_io_ptr(x) (void)0 37 # define __builtin_warning(x, y...) (1) 38 # define __must_hold(x) 39 # define __acquires(x) 40 # define __releases(x) 41 # define __acquire(x) (void)0 42 # define __release(x) (void)0 43 # define __cond_lock(x,c) (c) 44 # define __percpu 45 # define __rcu 46 # define __pmem 47 #endif 48 49 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ 50 #define ___PASTE(a,b) a##b 51 #define __PASTE(a,b) ___PASTE(a,b) 52 53 #ifdef __KERNEL__ 54 55 #ifdef __GNUC__ 56 #include <linux/compiler-gcc.h> 57 #endif 58 59 #ifdef CC_USING_HOTPATCH 60 #define notrace __attribute__((hotpatch(0,0))) 61 #else 62 #define notrace __attribute__((no_instrument_function)) 63 #endif 64 65 /* Intel compiler defines __GNUC__. So we will overwrite implementations 66 * coming from above header files here 67 */ 68 #ifdef __INTEL_COMPILER 69 # include <linux/compiler-intel.h> 70 #endif 71 72 /* Clang compiler defines __GNUC__. So we will overwrite implementations 73 * coming from above header files here 74 */ 75 #ifdef __clang__ 76 #include <linux/compiler-clang.h> 77 #endif 78 79 /* 80 * Generic compiler-dependent macros required for kernel 81 * build go below this comment. Actual compiler/compiler version 82 * specific implementations come from the above header files 83 */ 84 85 struct ftrace_branch_data { 86 const char *func; 87 const char *file; 88 unsigned line; 89 union { 90 struct { 91 unsigned long correct; 92 unsigned long incorrect; 93 }; 94 struct { 95 unsigned long miss; 96 unsigned long hit; 97 }; 98 unsigned long miss_hit[2]; 99 }; 100 }; 101 102 /* 103 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 104 * to disable branch tracing on a per file basis. 105 */ 106 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 107 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 108 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); 109 110 #define likely_notrace(x) __builtin_expect(!!(x), 1) 111 #define unlikely_notrace(x) __builtin_expect(!!(x), 0) 112 113 #define __branch_check__(x, expect) ({ \ 114 int ______r; \ 115 static struct ftrace_branch_data \ 116 __attribute__((__aligned__(4))) \ 117 __attribute__((section("_ftrace_annotated_branch"))) \ 118 ______f = { \ 119 .func = __func__, \ 120 .file = __FILE__, \ 121 .line = __LINE__, \ 122 }; \ 123 ______r = likely_notrace(x); \ 124 ftrace_likely_update(&______f, ______r, expect); \ 125 ______r; \ 126 }) 127 128 /* 129 * Using __builtin_constant_p(x) to ignore cases where the return 130 * value is always the same. This idea is taken from a similar patch 131 * written by Daniel Walker. 132 */ 133 # ifndef likely 134 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) 135 # endif 136 # ifndef unlikely 137 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) 138 # endif 139 140 #ifdef CONFIG_PROFILE_ALL_BRANCHES 141 /* 142 * "Define 'is'", Bill Clinton 143 * "Define 'if'", Steven Rostedt 144 */ 145 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 146 #define __trace_if(cond) \ 147 if (__builtin_constant_p((cond)) ? !!(cond) : \ 148 ({ \ 149 int ______r; \ 150 static struct ftrace_branch_data \ 151 __attribute__((__aligned__(4))) \ 152 __attribute__((section("_ftrace_branch"))) \ 153 ______f = { \ 154 .func = __func__, \ 155 .file = __FILE__, \ 156 .line = __LINE__, \ 157 }; \ 158 ______r = !!(cond); \ 159 ______f.miss_hit[______r]++; \ 160 ______r; \ 161 })) 162 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 163 164 #else 165 # define likely(x) __builtin_expect(!!(x), 1) 166 # define unlikely(x) __builtin_expect(!!(x), 0) 167 #endif 168 169 /* Optimization barrier */ 170 #ifndef barrier 171 # define barrier() __memory_barrier() 172 #endif 173 174 #ifndef barrier_data 175 # define barrier_data(ptr) barrier() 176 #endif 177 178 /* Unreachable code */ 179 #ifndef unreachable 180 # define unreachable() do { } while (1) 181 #endif 182 183 #ifndef RELOC_HIDE 184 # define RELOC_HIDE(ptr, off) \ 185 ({ unsigned long __ptr; \ 186 __ptr = (unsigned long) (ptr); \ 187 (typeof(ptr)) (__ptr + (off)); }) 188 #endif 189 190 #ifndef OPTIMIZER_HIDE_VAR 191 #define OPTIMIZER_HIDE_VAR(var) barrier() 192 #endif 193 194 /* Not-quite-unique ID. */ 195 #ifndef __UNIQUE_ID 196 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 197 #endif 198 199 #include <uapi/linux/types.h> 200 201 static __always_inline void __read_once_size(const volatile void *p, void *res, int size) 202 { 203 switch (size) { 204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; 205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; 206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; 207 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; 208 default: 209 barrier(); 210 __builtin_memcpy((void *)res, (const void *)p, size); 211 barrier(); 212 } 213 } 214 215 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 216 { 217 switch (size) { 218 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 219 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 220 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 221 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 222 default: 223 barrier(); 224 __builtin_memcpy((void *)p, (const void *)res, size); 225 barrier(); 226 } 227 } 228 229 /* 230 * Prevent the compiler from merging or refetching reads or writes. The 231 * compiler is also forbidden from reordering successive instances of 232 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 233 * compiler is aware of some particular ordering. One way to make the 234 * compiler aware of ordering is to put the two invocations of READ_ONCE, 235 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 236 * 237 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 238 * data types like structs or unions. If the size of the accessed data 239 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 240 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a 241 * compile-time warning. 242 * 243 * Their two major use cases are: (1) Mediating communication between 244 * process-level code and irq/NMI handlers, all running on the same CPU, 245 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 246 * mutilate accesses that either do not require ordering or that interact 247 * with an explicit memory barrier or atomic instruction that provides the 248 * required ordering. 249 */ 250 251 #define READ_ONCE(x) \ 252 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 253 254 #define WRITE_ONCE(x, val) \ 255 ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) 256 257 /** 258 * READ_ONCE_CTRL - Read a value heading a control dependency 259 * @x: The value to be read, heading the control dependency 260 * 261 * Control dependencies are tricky. See Documentation/memory-barriers.txt 262 * for important information on how to use them. Note that in many cases, 263 * use of smp_load_acquire() will be much simpler. Control dependencies 264 * should be avoided except on the hottest of hotpaths. 265 */ 266 #define READ_ONCE_CTRL(x) \ 267 ({ \ 268 typeof(x) __val = READ_ONCE(x); \ 269 smp_read_barrier_depends(); /* Enforce control dependency. */ \ 270 __val; \ 271 }) 272 273 #endif /* __KERNEL__ */ 274 275 #endif /* __ASSEMBLY__ */ 276 277 #ifdef __KERNEL__ 278 /* 279 * Allow us to mark functions as 'deprecated' and have gcc emit a nice 280 * warning for each use, in hopes of speeding the functions removal. 281 * Usage is: 282 * int __deprecated foo(void) 283 */ 284 #ifndef __deprecated 285 # define __deprecated /* unimplemented */ 286 #endif 287 288 #ifdef MODULE 289 #define __deprecated_for_modules __deprecated 290 #else 291 #define __deprecated_for_modules 292 #endif 293 294 #ifndef __must_check 295 #define __must_check 296 #endif 297 298 #ifndef CONFIG_ENABLE_MUST_CHECK 299 #undef __must_check 300 #define __must_check 301 #endif 302 #ifndef CONFIG_ENABLE_WARN_DEPRECATED 303 #undef __deprecated 304 #undef __deprecated_for_modules 305 #define __deprecated 306 #define __deprecated_for_modules 307 #endif 308 309 /* 310 * Allow us to avoid 'defined but not used' warnings on functions and data, 311 * as well as force them to be emitted to the assembly file. 312 * 313 * As of gcc 3.4, static functions that are not marked with attribute((used)) 314 * may be elided from the assembly file. As of gcc 3.4, static data not so 315 * marked will not be elided, but this may change in a future gcc version. 316 * 317 * NOTE: Because distributions shipped with a backported unit-at-a-time 318 * compiler in gcc 3.3, we must define __used to be __attribute__((used)) 319 * for gcc >=3.3 instead of 3.4. 320 * 321 * In prior versions of gcc, such functions and data would be emitted, but 322 * would be warned about except with attribute((unused)). 323 * 324 * Mark functions that are referenced only in inline assembly as __used so 325 * the code is emitted even though it appears to be unreferenced. 326 */ 327 #ifndef __used 328 # define __used /* unimplemented */ 329 #endif 330 331 #ifndef __maybe_unused 332 # define __maybe_unused /* unimplemented */ 333 #endif 334 335 #ifndef __always_unused 336 # define __always_unused /* unimplemented */ 337 #endif 338 339 #ifndef noinline 340 #define noinline 341 #endif 342 343 /* 344 * Rather then using noinline to prevent stack consumption, use 345 * noinline_for_stack instead. For documentation reasons. 346 */ 347 #define noinline_for_stack noinline 348 349 #ifndef __always_inline 350 #define __always_inline inline 351 #endif 352 353 #endif /* __KERNEL__ */ 354 355 /* 356 * From the GCC manual: 357 * 358 * Many functions do not examine any values except their arguments, 359 * and have no effects except the return value. Basically this is 360 * just slightly more strict class than the `pure' attribute above, 361 * since function is not allowed to read global memory. 362 * 363 * Note that a function that has pointer arguments and examines the 364 * data pointed to must _not_ be declared `const'. Likewise, a 365 * function that calls a non-`const' function usually must not be 366 * `const'. It does not make sense for a `const' function to return 367 * `void'. 368 */ 369 #ifndef __attribute_const__ 370 # define __attribute_const__ /* unimplemented */ 371 #endif 372 373 /* 374 * Tell gcc if a function is cold. The compiler will assume any path 375 * directly leading to the call is unlikely. 376 */ 377 378 #ifndef __cold 379 #define __cold 380 #endif 381 382 /* Simple shorthand for a section definition */ 383 #ifndef __section 384 # define __section(S) __attribute__ ((__section__(#S))) 385 #endif 386 387 #ifndef __visible 388 #define __visible 389 #endif 390 391 /* Are two types/vars the same type (ignoring qualifiers)? */ 392 #ifndef __same_type 393 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) 394 #endif 395 396 /* Is this type a native word size -- useful for atomic operations */ 397 #ifndef __native_word 398 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) 399 #endif 400 401 /* Compile time object size, -1 for unknown */ 402 #ifndef __compiletime_object_size 403 # define __compiletime_object_size(obj) -1 404 #endif 405 #ifndef __compiletime_warning 406 # define __compiletime_warning(message) 407 #endif 408 #ifndef __compiletime_error 409 # define __compiletime_error(message) 410 /* 411 * Sparse complains of variable sized arrays due to the temporary variable in 412 * __compiletime_assert. Unfortunately we can't just expand it out to make 413 * sparse see a constant array size without breaking compiletime_assert on old 414 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. 415 */ 416 # ifndef __CHECKER__ 417 # define __compiletime_error_fallback(condition) \ 418 do { ((void)sizeof(char[1 - 2 * condition])); } while (0) 419 # endif 420 #endif 421 #ifndef __compiletime_error_fallback 422 # define __compiletime_error_fallback(condition) do { } while (0) 423 #endif 424 425 #define __compiletime_assert(condition, msg, prefix, suffix) \ 426 do { \ 427 bool __cond = !(condition); \ 428 extern void prefix ## suffix(void) __compiletime_error(msg); \ 429 if (__cond) \ 430 prefix ## suffix(); \ 431 __compiletime_error_fallback(__cond); \ 432 } while (0) 433 434 #define _compiletime_assert(condition, msg, prefix, suffix) \ 435 __compiletime_assert(condition, msg, prefix, suffix) 436 437 /** 438 * compiletime_assert - break build and emit msg if condition is false 439 * @condition: a compile-time constant condition to check 440 * @msg: a message to emit if condition is false 441 * 442 * In tradition of POSIX assert, this macro will break the build if the 443 * supplied condition is *false*, emitting the supplied error message if the 444 * compiler has support to do so. 445 */ 446 #define compiletime_assert(condition, msg) \ 447 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) 448 449 #define compiletime_assert_atomic_type(t) \ 450 compiletime_assert(__native_word(t), \ 451 "Need native word sized stores/loads for atomicity.") 452 453 /* 454 * Prevent the compiler from merging or refetching accesses. The compiler 455 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 456 * but only when the compiler is aware of some particular ordering. One way 457 * to make the compiler aware of ordering is to put the two invocations of 458 * ACCESS_ONCE() in different C statements. 459 * 460 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE 461 * on a union member will work as long as the size of the member matches the 462 * size of the union and the size is smaller than word size. 463 * 464 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication 465 * between process-level code and irq/NMI handlers, all running on the same CPU, 466 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 467 * mutilate accesses that either do not require ordering or that interact 468 * with an explicit memory barrier or atomic instruction that provides the 469 * required ordering. 470 * 471 * If possible use READ_ONCE()/WRITE_ONCE() instead. 472 */ 473 #define __ACCESS_ONCE(x) ({ \ 474 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ 475 (volatile typeof(x) *)&(x); }) 476 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) 477 478 /** 479 * lockless_dereference() - safely load a pointer for later dereference 480 * @p: The pointer to load 481 * 482 * Similar to rcu_dereference(), but for situations where the pointed-to 483 * object's lifetime is managed by something other than RCU. That 484 * "something other" might be reference counting or simple immortality. 485 */ 486 #define lockless_dereference(p) \ 487 ({ \ 488 typeof(p) _________p1 = READ_ONCE(p); \ 489 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ 490 (_________p1); \ 491 }) 492 493 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ 494 #ifdef CONFIG_KPROBES 495 # define __kprobes __attribute__((__section__(".kprobes.text"))) 496 # define nokprobe_inline __always_inline 497 #else 498 # define __kprobes 499 # define nokprobe_inline inline 500 #endif 501 #endif /* __LINUX_COMPILER_H */ 502