1 #ifndef __LINUX_COMPILER_H 2 #define __LINUX_COMPILER_H 3 4 #include <linux/compiler_types.h> 5 6 #ifndef __ASSEMBLY__ 7 8 #ifdef __KERNEL__ 9 10 /* 11 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 12 * to disable branch tracing on a per file basis. 13 */ 14 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 15 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 16 void ftrace_likely_update(struct ftrace_likely_data *f, int val, 17 int expect, int is_constant); 18 19 #define likely_notrace(x) __builtin_expect(!!(x), 1) 20 #define unlikely_notrace(x) __builtin_expect(!!(x), 0) 21 22 #define __branch_check__(x, expect, is_constant) ({ \ 23 int ______r; \ 24 static struct ftrace_likely_data \ 25 __attribute__((__aligned__(4))) \ 26 __attribute__((section("_ftrace_annotated_branch"))) \ 27 ______f = { \ 28 .data.func = __func__, \ 29 .data.file = __FILE__, \ 30 .data.line = __LINE__, \ 31 }; \ 32 ______r = __builtin_expect(!!(x), expect); \ 33 ftrace_likely_update(&______f, ______r, \ 34 expect, is_constant); \ 35 ______r; \ 36 }) 37 38 /* 39 * Using __builtin_constant_p(x) to ignore cases where the return 40 * value is always the same. This idea is taken from a similar patch 41 * written by Daniel Walker. 42 */ 43 # ifndef likely 44 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 45 # endif 46 # ifndef unlikely 47 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 48 # endif 49 50 #ifdef CONFIG_PROFILE_ALL_BRANCHES 51 /* 52 * "Define 'is'", Bill Clinton 53 * "Define 'if'", Steven Rostedt 54 */ 55 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) 56 #define __trace_if(cond) \ 57 if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ 58 ({ \ 59 int ______r; \ 60 static struct ftrace_branch_data \ 61 __attribute__((__aligned__(4))) \ 62 __attribute__((section("_ftrace_branch"))) \ 63 ______f = { \ 64 .func = __func__, \ 65 .file = __FILE__, \ 66 .line = __LINE__, \ 67 }; \ 68 ______r = !!(cond); \ 69 ______f.miss_hit[______r]++; \ 70 ______r; \ 71 })) 72 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 73 74 #else 75 # define likely(x) __builtin_expect(!!(x), 1) 76 # define unlikely(x) __builtin_expect(!!(x), 0) 77 #endif 78 79 /* Optimization barrier */ 80 #ifndef barrier 81 # define barrier() __memory_barrier() 82 #endif 83 84 #ifndef barrier_data 85 # define barrier_data(ptr) barrier() 86 #endif 87 88 /* Unreachable code */ 89 #ifdef CONFIG_STACK_VALIDATION 90 #define annotate_reachable() ({ \ 91 asm("%c0:\n\t" \ 92 ".pushsection .discard.reachable\n\t" \ 93 ".long %c0b - .\n\t" \ 94 ".popsection\n\t" : : "i" (__LINE__)); \ 95 }) 96 #define annotate_unreachable() ({ \ 97 asm("%c0:\n\t" \ 98 ".pushsection .discard.unreachable\n\t" \ 99 ".long %c0b - .\n\t" \ 100 ".popsection\n\t" : : "i" (__LINE__)); \ 101 }) 102 #define ASM_UNREACHABLE \ 103 "999:\n\t" \ 104 ".pushsection .discard.unreachable\n\t" \ 105 ".long 999b - .\n\t" \ 106 ".popsection\n\t" 107 #else 108 #define annotate_reachable() 109 #define annotate_unreachable() 110 #endif 111 112 #ifndef ASM_UNREACHABLE 113 # define ASM_UNREACHABLE 114 #endif 115 #ifndef unreachable 116 # define unreachable() do { annotate_reachable(); do { } while (1); } while (0) 117 #endif 118 119 /* 120 * KENTRY - kernel entry point 121 * This can be used to annotate symbols (functions or data) that are used 122 * without their linker symbol being referenced explicitly. For example, 123 * interrupt vector handlers, or functions in the kernel image that are found 124 * programatically. 125 * 126 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 127 * are handled in their own way (with KEEP() in linker scripts). 128 * 129 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 130 * linker script. For example an architecture could KEEP() its entire 131 * boot/exception vector code rather than annotate each function and data. 132 */ 133 #ifndef KENTRY 134 # define KENTRY(sym) \ 135 extern typeof(sym) sym; \ 136 static const unsigned long __kentry_##sym \ 137 __used \ 138 __attribute__((section("___kentry" "+" #sym ), used)) \ 139 = (unsigned long)&sym; 140 #endif 141 142 #ifndef RELOC_HIDE 143 # define RELOC_HIDE(ptr, off) \ 144 ({ unsigned long __ptr; \ 145 __ptr = (unsigned long) (ptr); \ 146 (typeof(ptr)) (__ptr + (off)); }) 147 #endif 148 149 #ifndef OPTIMIZER_HIDE_VAR 150 #define OPTIMIZER_HIDE_VAR(var) barrier() 151 #endif 152 153 /* Not-quite-unique ID. */ 154 #ifndef __UNIQUE_ID 155 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 156 #endif 157 158 #include <uapi/linux/types.h> 159 160 #define __READ_ONCE_SIZE \ 161 ({ \ 162 switch (size) { \ 163 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ 164 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ 165 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ 166 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ 167 default: \ 168 barrier(); \ 169 __builtin_memcpy((void *)res, (const void *)p, size); \ 170 barrier(); \ 171 } \ 172 }) 173 174 static __always_inline 175 void __read_once_size(const volatile void *p, void *res, int size) 176 { 177 __READ_ONCE_SIZE; 178 } 179 180 #ifdef CONFIG_KASAN 181 /* 182 * This function is not 'inline' because __no_sanitize_address confilcts 183 * with inlining. Attempt to inline it may cause a build failure. 184 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 185 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 186 */ 187 static __no_sanitize_address __maybe_unused 188 void __read_once_size_nocheck(const volatile void *p, void *res, int size) 189 { 190 __READ_ONCE_SIZE; 191 } 192 #else 193 static __always_inline 194 void __read_once_size_nocheck(const volatile void *p, void *res, int size) 195 { 196 __READ_ONCE_SIZE; 197 } 198 #endif 199 200 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 201 { 202 switch (size) { 203 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 204 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 205 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 206 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 207 default: 208 barrier(); 209 __builtin_memcpy((void *)p, (const void *)res, size); 210 barrier(); 211 } 212 } 213 214 /* 215 * Prevent the compiler from merging or refetching reads or writes. The 216 * compiler is also forbidden from reordering successive instances of 217 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the 218 * compiler is aware of some particular ordering. One way to make the 219 * compiler aware of ordering is to put the two invocations of READ_ONCE, 220 * WRITE_ONCE or ACCESS_ONCE() in different C statements. 221 * 222 * In contrast to ACCESS_ONCE these two macros will also work on aggregate 223 * data types like structs or unions. If the size of the accessed data 224 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) 225 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at 226 * least two memcpy()s: one for the __builtin_memcpy() and then one for 227 * the macro doing the copy of variable - '__u' allocated on the stack. 228 * 229 * Their two major use cases are: (1) Mediating communication between 230 * process-level code and irq/NMI handlers, all running on the same CPU, 231 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 232 * mutilate accesses that either do not require ordering or that interact 233 * with an explicit memory barrier or atomic instruction that provides the 234 * required ordering. 235 */ 236 #include <asm/barrier.h> 237 238 #define __READ_ONCE(x, check) \ 239 ({ \ 240 union { typeof(x) __val; char __c[1]; } __u; \ 241 if (check) \ 242 __read_once_size(&(x), __u.__c, sizeof(x)); \ 243 else \ 244 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ 245 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ 246 __u.__val; \ 247 }) 248 #define READ_ONCE(x) __READ_ONCE(x, 1) 249 250 /* 251 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need 252 * to hide memory access from KASAN. 253 */ 254 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 255 256 #define WRITE_ONCE(x, val) \ 257 ({ \ 258 union { typeof(x) __val; char __c[1]; } __u = \ 259 { .__val = (__force typeof(x)) (val) }; \ 260 __write_once_size(&(x), __u.__c, sizeof(x)); \ 261 __u.__val; \ 262 }) 263 264 #endif /* __KERNEL__ */ 265 266 #endif /* __ASSEMBLY__ */ 267 268 /* Compile time object size, -1 for unknown */ 269 #ifndef __compiletime_object_size 270 # define __compiletime_object_size(obj) -1 271 #endif 272 #ifndef __compiletime_warning 273 # define __compiletime_warning(message) 274 #endif 275 #ifndef __compiletime_error 276 # define __compiletime_error(message) 277 /* 278 * Sparse complains of variable sized arrays due to the temporary variable in 279 * __compiletime_assert. Unfortunately we can't just expand it out to make 280 * sparse see a constant array size without breaking compiletime_assert on old 281 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. 282 */ 283 # ifndef __CHECKER__ 284 # define __compiletime_error_fallback(condition) \ 285 do { ((void)sizeof(char[1 - 2 * condition])); } while (0) 286 # endif 287 #endif 288 #ifndef __compiletime_error_fallback 289 # define __compiletime_error_fallback(condition) do { } while (0) 290 #endif 291 292 #ifdef __OPTIMIZE__ 293 # define __compiletime_assert(condition, msg, prefix, suffix) \ 294 do { \ 295 bool __cond = !(condition); \ 296 extern void prefix ## suffix(void) __compiletime_error(msg); \ 297 if (__cond) \ 298 prefix ## suffix(); \ 299 __compiletime_error_fallback(__cond); \ 300 } while (0) 301 #else 302 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) 303 #endif 304 305 #define _compiletime_assert(condition, msg, prefix, suffix) \ 306 __compiletime_assert(condition, msg, prefix, suffix) 307 308 /** 309 * compiletime_assert - break build and emit msg if condition is false 310 * @condition: a compile-time constant condition to check 311 * @msg: a message to emit if condition is false 312 * 313 * In tradition of POSIX assert, this macro will break the build if the 314 * supplied condition is *false*, emitting the supplied error message if the 315 * compiler has support to do so. 316 */ 317 #define compiletime_assert(condition, msg) \ 318 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) 319 320 #define compiletime_assert_atomic_type(t) \ 321 compiletime_assert(__native_word(t), \ 322 "Need native word sized stores/loads for atomicity.") 323 324 /* 325 * Prevent the compiler from merging or refetching accesses. The compiler 326 * is also forbidden from reordering successive instances of ACCESS_ONCE(), 327 * but only when the compiler is aware of some particular ordering. One way 328 * to make the compiler aware of ordering is to put the two invocations of 329 * ACCESS_ONCE() in different C statements. 330 * 331 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE 332 * on a union member will work as long as the size of the member matches the 333 * size of the union and the size is smaller than word size. 334 * 335 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication 336 * between process-level code and irq/NMI handlers, all running on the same CPU, 337 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 338 * mutilate accesses that either do not require ordering or that interact 339 * with an explicit memory barrier or atomic instruction that provides the 340 * required ordering. 341 * 342 * If possible use READ_ONCE()/WRITE_ONCE() instead. 343 */ 344 #define __ACCESS_ONCE(x) ({ \ 345 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ 346 (volatile typeof(x) *)&(x); }) 347 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) 348 349 #endif /* __LINUX_COMPILER_H */ 350