xref: /openbmc/linux/tools/testing/selftests/rseq/rseq.h (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3  * rseq.h
4  *
5  * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6  */
7 
8 #ifndef RSEQ_H
9 #define RSEQ_H
10 
11 #include <stdint.h>
12 #include <stdbool.h>
13 #include <pthread.h>
14 #include <signal.h>
15 #include <sched.h>
16 #include <errno.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <sched.h>
20 #include <linux/rseq.h>
21 
22 /*
23  * Empty code injection macros, override when testing.
24  * It is important to consider that the ASM injection macros need to be
25  * fully reentrant (e.g. do not modify the stack).
26  */
27 #ifndef RSEQ_INJECT_ASM
28 #define RSEQ_INJECT_ASM(n)
29 #endif
30 
31 #ifndef RSEQ_INJECT_C
32 #define RSEQ_INJECT_C(n)
33 #endif
34 
35 #ifndef RSEQ_INJECT_INPUT
36 #define RSEQ_INJECT_INPUT
37 #endif
38 
39 #ifndef RSEQ_INJECT_CLOBBER
40 #define RSEQ_INJECT_CLOBBER
41 #endif
42 
43 #ifndef RSEQ_INJECT_FAILED
44 #define RSEQ_INJECT_FAILED
45 #endif
46 
47 extern __thread volatile struct rseq __rseq_abi;
48 
49 #define rseq_likely(x)		__builtin_expect(!!(x), 1)
50 #define rseq_unlikely(x)	__builtin_expect(!!(x), 0)
51 #define rseq_barrier()		__asm__ __volatile__("" : : : "memory")
52 
53 #define RSEQ_ACCESS_ONCE(x)	(*(__volatile__  __typeof__(x) *)&(x))
54 #define RSEQ_WRITE_ONCE(x, v)	__extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
55 #define RSEQ_READ_ONCE(x)	RSEQ_ACCESS_ONCE(x)
56 
57 #define __rseq_str_1(x)	#x
58 #define __rseq_str(x)		__rseq_str_1(x)
59 
60 #define rseq_log(fmt, args...)						       \
61 	fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
62 		## args, __func__)
63 
64 #define rseq_bug(fmt, args...)		\
65 	do {				\
66 		rseq_log(fmt, ##args);	\
67 		abort();		\
68 	} while (0)
69 
70 #if defined(__x86_64__) || defined(__i386__)
71 #include <rseq-x86.h>
72 #elif defined(__ARMEL__)
73 #include <rseq-arm.h>
74 #elif defined(__PPC__)
75 #include <rseq-ppc.h>
76 #else
77 #error unsupported target
78 #endif
79 
80 /*
81  * Register rseq for the current thread. This needs to be called once
82  * by any thread which uses restartable sequences, before they start
83  * using restartable sequences, to ensure restartable sequences
84  * succeed. A restartable sequence executed from a non-registered
85  * thread will always fail.
86  */
87 int rseq_register_current_thread(void);
88 
89 /*
90  * Unregister rseq for current thread.
91  */
92 int rseq_unregister_current_thread(void);
93 
94 /*
95  * Restartable sequence fallback for reading the current CPU number.
96  */
97 int32_t rseq_fallback_current_cpu(void);
98 
99 /*
100  * Values returned can be either the current CPU number, -1 (rseq is
101  * uninitialized), or -2 (rseq initialization has failed).
102  */
103 static inline int32_t rseq_current_cpu_raw(void)
104 {
105 	return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id);
106 }
107 
108 /*
109  * Returns a possible CPU number, which is typically the current CPU.
110  * The returned CPU number can be used to prepare for an rseq critical
111  * section, which will confirm whether the cpu number is indeed the
112  * current one, and whether rseq is initialized.
113  *
114  * The CPU number returned by rseq_cpu_start should always be validated
115  * by passing it to a rseq asm sequence, or by comparing it to the
116  * return value of rseq_current_cpu_raw() if the rseq asm sequence
117  * does not need to be invoked.
118  */
119 static inline uint32_t rseq_cpu_start(void)
120 {
121 	return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start);
122 }
123 
124 static inline uint32_t rseq_current_cpu(void)
125 {
126 	int32_t cpu;
127 
128 	cpu = rseq_current_cpu_raw();
129 	if (rseq_unlikely(cpu < 0))
130 		cpu = rseq_fallback_current_cpu();
131 	return cpu;
132 }
133 
134 /*
135  * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
136  * at least once between their last rseq_finish*() and library unload of the
137  * library defining the rseq critical section (struct rseq_cs). This also
138  * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
139  * should be invoked at least once by each thread using rseq_finish*() before
140  * reclaim of the memory holding the struct rseq_cs.
141  */
142 static inline void rseq_prepare_unload(void)
143 {
144 	__rseq_abi.rseq_cs = 0;
145 }
146 
147 #endif  /* RSEQ_H_ */
148