1 {
2 	"atomic compare-and-exchange smoketest - 64bit",
3 	.insns = {
4 		/* val = 3; */
5 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
6 		/* old = atomic_cmpxchg(&val, 2, 4); */
7 		BPF_MOV64_IMM(BPF_REG_1, 4),
8 		BPF_MOV64_IMM(BPF_REG_0, 2),
9 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
10 		/* if (old != 3) exit(2); */
11 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
12 		BPF_MOV64_IMM(BPF_REG_0, 2),
13 		BPF_EXIT_INSN(),
14 		/* if (val != 3) exit(3); */
15 		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
16 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
17 		BPF_MOV64_IMM(BPF_REG_0, 3),
18 		BPF_EXIT_INSN(),
19 		/* old = atomic_cmpxchg(&val, 3, 4); */
20 		BPF_MOV64_IMM(BPF_REG_1, 4),
21 		BPF_MOV64_IMM(BPF_REG_0, 3),
22 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
23 		/* if (old != 3) exit(4); */
24 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
25 		BPF_MOV64_IMM(BPF_REG_0, 4),
26 		BPF_EXIT_INSN(),
27 		/* if (val != 4) exit(5); */
28 		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
29 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
30 		BPF_MOV64_IMM(BPF_REG_0, 5),
31 		BPF_EXIT_INSN(),
32 		/* exit(0); */
33 		BPF_MOV64_IMM(BPF_REG_0, 0),
34 		BPF_EXIT_INSN(),
35 	},
36 	.result = ACCEPT,
37 },
38 {
39 	"atomic compare-and-exchange smoketest - 32bit",
40 	.insns = {
41 		/* val = 3; */
42 		BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
43 		/* old = atomic_cmpxchg(&val, 2, 4); */
44 		BPF_MOV32_IMM(BPF_REG_1, 4),
45 		BPF_MOV32_IMM(BPF_REG_0, 2),
46 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
47 		/* if (old != 3) exit(2); */
48 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
49 		BPF_MOV32_IMM(BPF_REG_0, 2),
50 		BPF_EXIT_INSN(),
51 		/* if (val != 3) exit(3); */
52 		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
53 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
54 		BPF_MOV32_IMM(BPF_REG_0, 3),
55 		BPF_EXIT_INSN(),
56 		/* old = atomic_cmpxchg(&val, 3, 4); */
57 		BPF_MOV32_IMM(BPF_REG_1, 4),
58 		BPF_MOV32_IMM(BPF_REG_0, 3),
59 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
60 		/* if (old != 3) exit(4); */
61 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
62 		BPF_MOV32_IMM(BPF_REG_0, 4),
63 		BPF_EXIT_INSN(),
64 		/* if (val != 4) exit(5); */
65 		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
66 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
67 		BPF_MOV32_IMM(BPF_REG_0, 5),
68 		BPF_EXIT_INSN(),
69 		/* exit(0); */
70 		BPF_MOV32_IMM(BPF_REG_0, 0),
71 		BPF_EXIT_INSN(),
72 	},
73 	.result = ACCEPT,
74 },
75 {
76 	"Can't use cmpxchg on uninit src reg",
77 	.insns = {
78 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
79 		BPF_MOV64_IMM(BPF_REG_0, 3),
80 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
81 		BPF_EXIT_INSN(),
82 	},
83 	.result = REJECT,
84 	.errstr = "!read_ok",
85 },
86 {
87 	"BPF_W cmpxchg should zero top 32 bits",
88 	.insns = {
89 		/* r0 = U64_MAX; */
90 		BPF_MOV64_IMM(BPF_REG_0, 0),
91 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
92 		/* u64 val = r0; */
93 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
94 		/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
95 		BPF_MOV32_IMM(BPF_REG_1, 1),
96 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
97 		/* r1 = 0x00000000FFFFFFFFull; */
98 		BPF_MOV64_IMM(BPF_REG_1, 1),
99 		BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
100 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
101 		/* if (r0 != r1) exit(1); */
102 		BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
103 		BPF_MOV32_IMM(BPF_REG_0, 1),
104 		BPF_EXIT_INSN(),
105 		/* exit(0); */
106 		BPF_MOV32_IMM(BPF_REG_0, 0),
107 		BPF_EXIT_INSN(),
108 	},
109 	.result = ACCEPT,
110 },
111 {
112 	"Dest pointer in r0 - fail",
113 	.insns = {
114 		/* val = 0; */
115 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
116 		/* r0 = &val */
117 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
118 		/* r0 = atomic_cmpxchg(&val, r0, 1); */
119 		BPF_MOV64_IMM(BPF_REG_1, 1),
120 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
121 		/* if (r0 != 0) exit(1); */
122 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
123 		BPF_MOV64_IMM(BPF_REG_0, 1),
124 		BPF_EXIT_INSN(),
125 		/* exit(0); */
126 		BPF_MOV64_IMM(BPF_REG_0, 0),
127 		BPF_EXIT_INSN(),
128 	},
129 	.result = ACCEPT,
130 	.result_unpriv = REJECT,
131 	.errstr_unpriv = "R0 leaks addr into mem",
132 },
133 {
134 	"Dest pointer in r0 - succeed",
135 	.insns = {
136 		/* r0 = &val */
137 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
138 		/* val = r0; */
139 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
140 		/* r0 = atomic_cmpxchg(&val, r0, 0); */
141 		BPF_MOV64_IMM(BPF_REG_1, 0),
142 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
143 		/* r1 = *r0 */
144 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
145 		/* exit(0); */
146 		BPF_MOV64_IMM(BPF_REG_0, 0),
147 		BPF_EXIT_INSN(),
148 	},
149 	.result = ACCEPT,
150 	.result_unpriv = REJECT,
151 	.errstr_unpriv = "R0 leaks addr into mem",
152 },
153 {
154 	"Dest pointer in r0 - succeed, check 2",
155 	.insns = {
156 		/* r0 = &val */
157 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
158 		/* val = r0; */
159 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
160 		/* r5 = &val */
161 		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
162 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
163 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
164 		/* r1 = *r0 */
165 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
166 		/* exit(0); */
167 		BPF_MOV64_IMM(BPF_REG_0, 0),
168 		BPF_EXIT_INSN(),
169 	},
170 	.result = ACCEPT,
171 	.result_unpriv = REJECT,
172 	.errstr_unpriv = "R0 leaks addr into mem",
173 },
174 {
175 	"Dest pointer in r0 - succeed, check 3",
176 	.insns = {
177 		/* r0 = &val */
178 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
179 		/* val = r0; */
180 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
181 		/* r5 = &val */
182 		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
183 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
184 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
185 		/* exit(0); */
186 		BPF_MOV64_IMM(BPF_REG_0, 0),
187 		BPF_EXIT_INSN(),
188 	},
189 	.result = REJECT,
190 	.errstr = "invalid size of register fill",
191 	.errstr_unpriv = "R0 leaks addr into mem",
192 },
193 {
194 	"Dest pointer in r0 - succeed, check 4",
195 	.insns = {
196 		/* r0 = &val */
197 		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
198 		/* val = r0; */
199 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
200 		/* r5 = &val */
201 		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
202 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
203 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
204 		/* r1 = *r10 */
205 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
206 		/* exit(0); */
207 		BPF_MOV64_IMM(BPF_REG_0, 0),
208 		BPF_EXIT_INSN(),
209 	},
210 	.result = ACCEPT,
211 	.result_unpriv = REJECT,
212 	.errstr_unpriv = "R10 partial copy of pointer",
213 },
214 {
215 	"Dest pointer in r0 - succeed, check 5",
216 	.insns = {
217 		/* r0 = &val */
218 		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
219 		/* val = r0; */
220 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
221 		/* r5 = &val */
222 		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
223 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
224 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
225 		/* r1 = *r0 */
226 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
227 		/* exit(0); */
228 		BPF_MOV64_IMM(BPF_REG_0, 0),
229 		BPF_EXIT_INSN(),
230 	},
231 	.result = REJECT,
232 	.errstr = "R0 invalid mem access",
233 	.errstr_unpriv = "R10 partial copy of pointer",
234 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
235 },
236