1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __LINUX_COMPILER_H 3 #define __LINUX_COMPILER_H 4 5 #include <linux/compiler_types.h> 6 7 #ifndef __ASSEMBLY__ 8 9 #ifdef __KERNEL__ 10 11 /* 12 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 13 * to disable branch tracing on a per file basis. 14 */ 15 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ 16 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) 17 void ftrace_likely_update(struct ftrace_likely_data *f, int val, 18 int expect, int is_constant); 19 20 #define likely_notrace(x) __builtin_expect(!!(x), 1) 21 #define unlikely_notrace(x) __builtin_expect(!!(x), 0) 22 23 #define __branch_check__(x, expect, is_constant) ({ \ 24 long ______r; \ 25 static struct ftrace_likely_data \ 26 __aligned(4) \ 27 __section(_ftrace_annotated_branch) \ 28 ______f = { \ 29 .data.func = __func__, \ 30 .data.file = __FILE__, \ 31 .data.line = __LINE__, \ 32 }; \ 33 ______r = __builtin_expect(!!(x), expect); \ 34 ftrace_likely_update(&______f, ______r, \ 35 expect, is_constant); \ 36 ______r; \ 37 }) 38 39 /* 40 * Using __builtin_constant_p(x) to ignore cases where the return 41 * value is always the same. This idea is taken from a similar patch 42 * written by Daniel Walker. 43 */ 44 # ifndef likely 45 # define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) 46 # endif 47 # ifndef unlikely 48 # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) 49 # endif 50 51 #ifdef CONFIG_PROFILE_ALL_BRANCHES 52 /* 53 * "Define 'is'", Bill Clinton 54 * "Define 'if'", Steven Rostedt 55 */ 56 #define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) 57 58 #define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) 59 60 #define __trace_if_value(cond) ({ \ 61 static struct ftrace_branch_data \ 62 __aligned(4) \ 63 __section(_ftrace_branch) \ 64 __if_trace = { \ 65 .func = __func__, \ 66 .file = __FILE__, \ 67 .line = __LINE__, \ 68 }; \ 69 (cond) ? \ 70 (__if_trace.miss_hit[1]++,1) : \ 71 (__if_trace.miss_hit[0]++,0); \ 72 }) 73 74 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 75 76 #else 77 # define likely(x) __builtin_expect(!!(x), 1) 78 # define unlikely(x) __builtin_expect(!!(x), 0) 79 #endif 80 81 /* Optimization barrier */ 82 #ifndef barrier 83 # define barrier() __memory_barrier() 84 #endif 85 86 #ifndef barrier_data 87 # define barrier_data(ptr) barrier() 88 #endif 89 90 /* workaround for GCC PR82365 if needed */ 91 #ifndef barrier_before_unreachable 92 # define barrier_before_unreachable() do { } while (0) 93 #endif 94 95 /* Unreachable code */ 96 #ifdef CONFIG_STACK_VALIDATION 97 /* 98 * These macros help objtool understand GCC code flow for unreachable code. 99 * The __COUNTER__ based labels are a hack to make each instance of the macros 100 * unique, to convince GCC not to merge duplicate inline asm statements. 101 */ 102 #define annotate_reachable() ({ \ 103 asm volatile("%c0:\n\t" \ 104 ".pushsection .discard.reachable\n\t" \ 105 ".long %c0b - .\n\t" \ 106 ".popsection\n\t" : : "i" (__COUNTER__)); \ 107 }) 108 #define annotate_unreachable() ({ \ 109 asm volatile("%c0:\n\t" \ 110 ".pushsection .discard.unreachable\n\t" \ 111 ".long %c0b - .\n\t" \ 112 ".popsection\n\t" : : "i" (__COUNTER__)); \ 113 }) 114 #define ASM_UNREACHABLE \ 115 "999:\n\t" \ 116 ".pushsection .discard.unreachable\n\t" \ 117 ".long 999b - .\n\t" \ 118 ".popsection\n\t" 119 120 /* Annotate a C jump table to allow objtool to follow the code flow */ 121 #define __annotate_jump_table __section(.rodata..c_jump_table) 122 123 #ifdef CONFIG_DEBUG_ENTRY 124 /* Begin/end of an instrumentation safe region */ 125 #define instrumentation_begin() ({ \ 126 asm volatile("%c0:\n\t" \ 127 ".pushsection .discard.instr_begin\n\t" \ 128 ".long %c0b - .\n\t" \ 129 ".popsection\n\t" : : "i" (__COUNTER__)); \ 130 }) 131 132 /* 133 * Because instrumentation_{begin,end}() can nest, objtool validation considers 134 * _begin() a +1 and _end() a -1 and computes a sum over the instructions. 135 * When the value is greater than 0, we consider instrumentation allowed. 136 * 137 * There is a problem with code like: 138 * 139 * noinstr void foo() 140 * { 141 * instrumentation_begin(); 142 * ... 143 * if (cond) { 144 * instrumentation_begin(); 145 * ... 146 * instrumentation_end(); 147 * } 148 * bar(); 149 * instrumentation_end(); 150 * } 151 * 152 * If instrumentation_end() would be an empty label, like all the other 153 * annotations, the inner _end(), which is at the end of a conditional block, 154 * would land on the instruction after the block. 155 * 156 * If we then consider the sum of the !cond path, we'll see that the call to 157 * bar() is with a 0-value, even though, we meant it to happen with a positive 158 * value. 159 * 160 * To avoid this, have _end() be a NOP instruction, this ensures it will be 161 * part of the condition block and does not escape. 162 */ 163 #define instrumentation_end() ({ \ 164 asm volatile("%c0: nop\n\t" \ 165 ".pushsection .discard.instr_end\n\t" \ 166 ".long %c0b - .\n\t" \ 167 ".popsection\n\t" : : "i" (__COUNTER__)); \ 168 }) 169 #endif /* CONFIG_DEBUG_ENTRY */ 170 171 #else 172 #define annotate_reachable() 173 #define annotate_unreachable() 174 #define __annotate_jump_table 175 #endif 176 177 #ifndef instrumentation_begin 178 #define instrumentation_begin() do { } while(0) 179 #define instrumentation_end() do { } while(0) 180 #endif 181 182 #ifndef ASM_UNREACHABLE 183 # define ASM_UNREACHABLE 184 #endif 185 #ifndef unreachable 186 # define unreachable() do { \ 187 annotate_unreachable(); \ 188 __builtin_unreachable(); \ 189 } while (0) 190 #endif 191 192 /* 193 * KENTRY - kernel entry point 194 * This can be used to annotate symbols (functions or data) that are used 195 * without their linker symbol being referenced explicitly. For example, 196 * interrupt vector handlers, or functions in the kernel image that are found 197 * programatically. 198 * 199 * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those 200 * are handled in their own way (with KEEP() in linker scripts). 201 * 202 * KENTRY can be avoided if the symbols in question are marked as KEEP() in the 203 * linker script. For example an architecture could KEEP() its entire 204 * boot/exception vector code rather than annotate each function and data. 205 */ 206 #ifndef KENTRY 207 # define KENTRY(sym) \ 208 extern typeof(sym) sym; \ 209 static const unsigned long __kentry_##sym \ 210 __used \ 211 __section("___kentry" "+" #sym ) \ 212 = (unsigned long)&sym; 213 #endif 214 215 #ifndef RELOC_HIDE 216 # define RELOC_HIDE(ptr, off) \ 217 ({ unsigned long __ptr; \ 218 __ptr = (unsigned long) (ptr); \ 219 (typeof(ptr)) (__ptr + (off)); }) 220 #endif 221 222 #ifndef OPTIMIZER_HIDE_VAR 223 /* Make the optimizer believe the variable can be manipulated arbitrarily. */ 224 #define OPTIMIZER_HIDE_VAR(var) \ 225 __asm__ ("" : "=r" (var) : "0" (var)) 226 #endif 227 228 /* Not-quite-unique ID. */ 229 #ifndef __UNIQUE_ID 230 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 231 #endif 232 233 #include <uapi/linux/types.h> 234 235 #define __READ_ONCE_SIZE \ 236 ({ \ 237 switch (size) { \ 238 case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ 239 case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ 240 case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ 241 case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ 242 default: \ 243 barrier(); \ 244 __builtin_memcpy((void *)res, (const void *)p, size); \ 245 barrier(); \ 246 } \ 247 }) 248 249 static __always_inline 250 void __read_once_size(const volatile void *p, void *res, int size) 251 { 252 __READ_ONCE_SIZE; 253 } 254 255 #ifdef CONFIG_KASAN 256 /* 257 * We can't declare function 'inline' because __no_sanitize_address confilcts 258 * with inlining. Attempt to inline it may cause a build failure. 259 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 260 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. 261 */ 262 # define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused 263 #else 264 # define __no_kasan_or_inline __always_inline 265 #endif 266 267 static __no_kasan_or_inline 268 void __read_once_size_nocheck(const volatile void *p, void *res, int size) 269 { 270 __READ_ONCE_SIZE; 271 } 272 273 static __always_inline void __write_once_size(volatile void *p, void *res, int size) 274 { 275 switch (size) { 276 case 1: *(volatile __u8 *)p = *(__u8 *)res; break; 277 case 2: *(volatile __u16 *)p = *(__u16 *)res; break; 278 case 4: *(volatile __u32 *)p = *(__u32 *)res; break; 279 case 8: *(volatile __u64 *)p = *(__u64 *)res; break; 280 default: 281 barrier(); 282 __builtin_memcpy((void *)p, (const void *)res, size); 283 barrier(); 284 } 285 } 286 287 /* 288 * Prevent the compiler from merging or refetching reads or writes. The 289 * compiler is also forbidden from reordering successive instances of 290 * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some 291 * particular ordering. One way to make the compiler aware of ordering is to 292 * put the two invocations of READ_ONCE or WRITE_ONCE in different C 293 * statements. 294 * 295 * These two macros will also work on aggregate data types like structs or 296 * unions. If the size of the accessed data type exceeds the word size of 297 * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will 298 * fall back to memcpy(). There's at least two memcpy()s: one for the 299 * __builtin_memcpy() and then one for the macro doing the copy of variable 300 * - '__u' allocated on the stack. 301 * 302 * Their two major use cases are: (1) Mediating communication between 303 * process-level code and irq/NMI handlers, all running on the same CPU, 304 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise 305 * mutilate accesses that either do not require ordering or that interact 306 * with an explicit memory barrier or atomic instruction that provides the 307 * required ordering. 308 */ 309 #include <asm/barrier.h> 310 #include <linux/kasan-checks.h> 311 312 #define __READ_ONCE(x, check) \ 313 ({ \ 314 union { typeof(x) __val; char __c[1]; } __u; \ 315 if (check) \ 316 __read_once_size(&(x), __u.__c, sizeof(x)); \ 317 else \ 318 __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ 319 smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ 320 __u.__val; \ 321 }) 322 #define READ_ONCE(x) __READ_ONCE(x, 1) 323 324 /* 325 * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need 326 * to hide memory access from KASAN. 327 */ 328 #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) 329 330 static __no_kasan_or_inline 331 unsigned long read_word_at_a_time(const void *addr) 332 { 333 kasan_check_read(addr, 1); 334 return *(unsigned long *)addr; 335 } 336 337 #define WRITE_ONCE(x, val) \ 338 ({ \ 339 union { typeof(x) __val; char __c[1]; } __u = \ 340 { .__val = (__force typeof(x)) (val) }; \ 341 __write_once_size(&(x), __u.__c, sizeof(x)); \ 342 __u.__val; \ 343 }) 344 345 #endif /* __KERNEL__ */ 346 347 /* 348 * Force the compiler to emit 'sym' as a symbol, so that we can reference 349 * it from inline assembler. Necessary in case 'sym' could be inlined 350 * otherwise, or eliminated entirely due to lack of references that are 351 * visible to the compiler. 352 */ 353 #define __ADDRESSABLE(sym) \ 354 static void * __section(.discard.addressable) __used \ 355 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; 356 357 /** 358 * offset_to_ptr - convert a relative memory offset to an absolute pointer 359 * @off: the address of the 32-bit offset value 360 */ 361 static inline void *offset_to_ptr(const int *off) 362 { 363 return (void *)((unsigned long)off + *off); 364 } 365 366 #endif /* __ASSEMBLY__ */ 367 368 /* Compile time object size, -1 for unknown */ 369 #ifndef __compiletime_object_size 370 # define __compiletime_object_size(obj) -1 371 #endif 372 #ifndef __compiletime_warning 373 # define __compiletime_warning(message) 374 #endif 375 #ifndef __compiletime_error 376 # define __compiletime_error(message) 377 #endif 378 379 #ifdef __OPTIMIZE__ 380 # define __compiletime_assert(condition, msg, prefix, suffix) \ 381 do { \ 382 extern void prefix ## suffix(void) __compiletime_error(msg); \ 383 if (!(condition)) \ 384 prefix ## suffix(); \ 385 } while (0) 386 #else 387 # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) 388 #endif 389 390 #define _compiletime_assert(condition, msg, prefix, suffix) \ 391 __compiletime_assert(condition, msg, prefix, suffix) 392 393 /** 394 * compiletime_assert - break build and emit msg if condition is false 395 * @condition: a compile-time constant condition to check 396 * @msg: a message to emit if condition is false 397 * 398 * In tradition of POSIX assert, this macro will break the build if the 399 * supplied condition is *false*, emitting the supplied error message if the 400 * compiler has support to do so. 401 */ 402 #define compiletime_assert(condition, msg) \ 403 _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) 404 405 #define compiletime_assert_atomic_type(t) \ 406 compiletime_assert(__native_word(t), \ 407 "Need native word sized stores/loads for atomicity.") 408 409 /* &a[0] degrades to a pointer: a different type from an array */ 410 #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) 411 412 /* 413 * This is needed in functions which generate the stack canary, see 414 * arch/x86/kernel/smpboot.c::start_secondary() for an example. 415 */ 416 #define prevent_tail_call_optimization() mb() 417 418 #endif /* __LINUX_COMPILER_H */ 419