1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
2 /*
3  * Select the instruction "csrw mhartid, x0" as the RSEQ_SIG. Unlike
4  * other architectures, the ebreak instruction has no immediate field for
5  * distinguishing purposes. Hence, ebreak is not suitable as RSEQ_SIG.
6  * "csrw mhartid, x0" can also satisfy the RSEQ requirement because it
7  * is an uncommon instruction and will raise an illegal instruction
8  * exception when executed in all modes.
9  */
10 #include <endian.h>
11 
12 #if defined(__BYTE_ORDER) ? (__BYTE_ORDER == __LITTLE_ENDIAN) : defined(__LITTLE_ENDIAN)
13 #define RSEQ_SIG   0xf1401073  /* csrr mhartid, x0 */
14 #else
15 #error "Currently, RSEQ only supports Little-Endian version"
16 #endif
17 
18 #if __riscv_xlen == 64
19 #define __REG_SEL(a, b)	a
20 #elif __riscv_xlen == 32
21 #define __REG_SEL(a, b)	b
22 #endif
23 
24 #define REG_L	__REG_SEL("ld ", "lw ")
25 #define REG_S	__REG_SEL("sd ", "sw ")
26 
27 #define RISCV_FENCE(p, s) \
28 	__asm__ __volatile__ ("fence " #p "," #s : : : "memory")
29 #define rseq_smp_mb()	RISCV_FENCE(rw, rw)
30 #define rseq_smp_rmb()	RISCV_FENCE(r, r)
31 #define rseq_smp_wmb()	RISCV_FENCE(w, w)
32 #define RSEQ_ASM_TMP_REG_1	"t6"
33 #define RSEQ_ASM_TMP_REG_2	"t5"
34 #define RSEQ_ASM_TMP_REG_3	"t4"
35 #define RSEQ_ASM_TMP_REG_4	"t3"
36 
37 #define rseq_smp_load_acquire(p)					\
38 __extension__ ({							\
39 	rseq_unqual_scalar_typeof(*(p)) ____p1 = RSEQ_READ_ONCE(*(p));	\
40 	RISCV_FENCE(r, rw);						\
41 	____p1;								\
42 })
43 
44 #define rseq_smp_acquire__after_ctrl_dep()	rseq_smp_rmb()
45 
46 #define rseq_smp_store_release(p, v)					\
47 do {									\
48 	RISCV_FENCE(rw, w);						\
49 	RSEQ_WRITE_ONCE(*(p), v);					\
50 } while (0)
51 
52 #define __RSEQ_ASM_DEFINE_TABLE(label, version, flags, start_ip,	\
53 				post_commit_offset, abort_ip)		\
54 	".pushsection	__rseq_cs, \"aw\"\n"				\
55 	".balign	32\n"						\
56 	__rseq_str(label) ":\n"						\
57 	".long	" __rseq_str(version) ", " __rseq_str(flags) "\n"	\
58 	".quad	" __rseq_str(start_ip) ", "				\
59 			  __rseq_str(post_commit_offset) ", "		\
60 			  __rseq_str(abort_ip) "\n"			\
61 	".popsection\n\t"						\
62 	".pushsection __rseq_cs_ptr_array, \"aw\"\n"			\
63 	".quad " __rseq_str(label) "b\n"				\
64 	".popsection\n"
65 
66 #define RSEQ_ASM_DEFINE_TABLE(label, start_ip, post_commit_ip, abort_ip) \
67 	__RSEQ_ASM_DEFINE_TABLE(label, 0x0, 0x0, start_ip,		 \
68 				((post_commit_ip) - (start_ip)), abort_ip)
69 
70 /*
71  * Exit points of a rseq critical section consist of all instructions outside
72  * of the critical section where a critical section can either branch to or
73  * reach through the normal course of its execution. The abort IP and the
74  * post-commit IP are already part of the __rseq_cs section and should not be
75  * explicitly defined as additional exit points. Knowing all exit points is
76  * useful to assist debuggers stepping over the critical section.
77  */
78 #define RSEQ_ASM_DEFINE_EXIT_POINT(start_ip, exit_ip)			\
79 	".pushsection __rseq_exit_point_array, \"aw\"\n"		\
80 	".quad " __rseq_str(start_ip) ", " __rseq_str(exit_ip) "\n"	\
81 	".popsection\n"
82 
83 #define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs)		\
84 	RSEQ_INJECT_ASM(1)						\
85 	"la	" RSEQ_ASM_TMP_REG_1 ", " __rseq_str(cs_label) "\n"	\
86 	REG_S	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(rseq_cs) "]\n"	\
87 	__rseq_str(label) ":\n"
88 
89 #define RSEQ_ASM_DEFINE_ABORT(label, abort_label)			\
90 	"j	222f\n"							\
91 	".balign	4\n"						\
92 	".long "	__rseq_str(RSEQ_SIG) "\n"			\
93 	__rseq_str(label) ":\n"						\
94 	"j	%l[" __rseq_str(abort_label) "]\n"			\
95 	"222:\n"
96 
97 #define RSEQ_ASM_OP_STORE(value, var)					\
98 	REG_S	"%[" __rseq_str(value) "], %[" __rseq_str(var) "]\n"
99 
100 #define RSEQ_ASM_OP_CMPEQ(var, expect, label)				\
101 	REG_L	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"		\
102 	"bne	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ,"	\
103 		  __rseq_str(label) "\n"
104 
105 #define RSEQ_ASM_OP_CMPEQ32(var, expect, label)				\
106 	"lw	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"	\
107 	"bne	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ,"	\
108 		  __rseq_str(label) "\n"
109 
110 #define RSEQ_ASM_OP_CMPNE(var, expect, label)				\
111 	REG_L	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"		\
112 	"beq	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(expect) "] ,"	\
113 		  __rseq_str(label) "\n"
114 
115 #define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label)		\
116 	RSEQ_INJECT_ASM(2)						\
117 	RSEQ_ASM_OP_CMPEQ32(current_cpu_id, cpu_id, label)
118 
119 #define RSEQ_ASM_OP_R_LOAD(var)						\
120 	REG_L	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
121 
122 #define RSEQ_ASM_OP_R_STORE(var)					\
123 	REG_S	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"
124 
125 #define RSEQ_ASM_OP_R_LOAD_OFF(offset)					\
126 	"add	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(offset) "], "	\
127 		 RSEQ_ASM_TMP_REG_1 "\n"				\
128 	REG_L	RSEQ_ASM_TMP_REG_1 ", (" RSEQ_ASM_TMP_REG_1 ")\n"
129 
130 #define RSEQ_ASM_OP_R_ADD(count)					\
131 	"add	" RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1		\
132 		", %[" __rseq_str(count) "]\n"
133 
134 #define RSEQ_ASM_OP_FINAL_STORE(value, var, post_commit_label)		\
135 	RSEQ_ASM_OP_STORE(value, var)					\
136 	__rseq_str(post_commit_label) ":\n"
137 
138 #define RSEQ_ASM_OP_FINAL_STORE_RELEASE(value, var, post_commit_label)	\
139 	"fence	rw, w\n"						\
140 	RSEQ_ASM_OP_STORE(value, var)					\
141 	__rseq_str(post_commit_label) ":\n"
142 
143 #define RSEQ_ASM_OP_R_FINAL_STORE(var, post_commit_label)		\
144 	REG_S	RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(var) "]\n"		\
145 	__rseq_str(post_commit_label) ":\n"
146 
147 #define RSEQ_ASM_OP_R_BAD_MEMCPY(dst, src, len)				\
148 	"beqz	%[" __rseq_str(len) "], 333f\n"				\
149 	"mv	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(len) "]\n"	\
150 	"mv	" RSEQ_ASM_TMP_REG_2 ", %[" __rseq_str(src) "]\n"	\
151 	"mv	" RSEQ_ASM_TMP_REG_3 ", %[" __rseq_str(dst) "]\n"	\
152 	"222:\n"							\
153 	"lb	" RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_2 ")\n"	\
154 	"sb	" RSEQ_ASM_TMP_REG_4 ", 0(" RSEQ_ASM_TMP_REG_3 ")\n"	\
155 	"addi	" RSEQ_ASM_TMP_REG_1 ", " RSEQ_ASM_TMP_REG_1 ", -1\n"	\
156 	"addi	" RSEQ_ASM_TMP_REG_2 ", " RSEQ_ASM_TMP_REG_2 ", 1\n"	\
157 	"addi	" RSEQ_ASM_TMP_REG_3 ", " RSEQ_ASM_TMP_REG_3 ", 1\n"	\
158 	"bnez	" RSEQ_ASM_TMP_REG_1 ", 222b\n"				\
159 	"333:\n"
160 
161 #define RSEQ_ASM_OP_R_DEREF_ADDV(ptr, off, post_commit_label)		\
162 	"mv	" RSEQ_ASM_TMP_REG_1 ", %[" __rseq_str(ptr) "]\n"	\
163 	RSEQ_ASM_OP_R_ADD(off)						\
164 	REG_L	  RSEQ_ASM_TMP_REG_1 ", 0(" RSEQ_ASM_TMP_REG_1 ")\n"	\
165 	RSEQ_ASM_OP_R_ADD(inc)						\
166 	__rseq_str(post_commit_label) ":\n"
167 
168 /* Per-cpu-id indexing. */
169 
170 #define RSEQ_TEMPLATE_CPU_ID
171 #define RSEQ_TEMPLATE_MO_RELAXED
172 #include "rseq-riscv-bits.h"
173 #undef RSEQ_TEMPLATE_MO_RELAXED
174 
175 #define RSEQ_TEMPLATE_MO_RELEASE
176 #include "rseq-riscv-bits.h"
177 #undef RSEQ_TEMPLATE_MO_RELEASE
178 #undef RSEQ_TEMPLATE_CPU_ID
179 
180 /* Per-mm-cid indexing. */
181 
182 #define RSEQ_TEMPLATE_MM_CID
183 #define RSEQ_TEMPLATE_MO_RELAXED
184 #include "rseq-riscv-bits.h"
185 #undef RSEQ_TEMPLATE_MO_RELAXED
186 
187 #define RSEQ_TEMPLATE_MO_RELEASE
188 #include "rseq-riscv-bits.h"
189 #undef RSEQ_TEMPLATE_MO_RELEASE
190 #undef RSEQ_TEMPLATE_MM_CID
191 
192 /* APIs which are not based on cpu ids. */
193 
194 #define RSEQ_TEMPLATE_CPU_ID_NONE
195 #define RSEQ_TEMPLATE_MO_RELAXED
196 #include "rseq-riscv-bits.h"
197 #undef RSEQ_TEMPLATE_MO_RELAXED
198 #undef RSEQ_TEMPLATE_CPU_ID_NONE
199