xref: /openbmc/linux/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c (revision 1188f7f111c61394ec56beb8e30322305a8220b6)
198d666d0SBrendan Jackman {
298d666d0SBrendan Jackman 	"atomic compare-and-exchange smoketest - 64bit",
398d666d0SBrendan Jackman 	.insns = {
498d666d0SBrendan Jackman 		/* val = 3; */
598d666d0SBrendan Jackman 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
698d666d0SBrendan Jackman 		/* old = atomic_cmpxchg(&val, 2, 4); */
798d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_1, 4),
898d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 2),
998d666d0SBrendan Jackman 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
1098d666d0SBrendan Jackman 		/* if (old != 3) exit(2); */
1198d666d0SBrendan Jackman 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
1298d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 2),
1398d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
1498d666d0SBrendan Jackman 		/* if (val != 3) exit(3); */
1598d666d0SBrendan Jackman 		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
1698d666d0SBrendan Jackman 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
1798d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 3),
1898d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
1998d666d0SBrendan Jackman 		/* old = atomic_cmpxchg(&val, 3, 4); */
2098d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_1, 4),
2198d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 3),
2298d666d0SBrendan Jackman 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
2398d666d0SBrendan Jackman 		/* if (old != 3) exit(4); */
2498d666d0SBrendan Jackman 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
2598d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 4),
2698d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
2798d666d0SBrendan Jackman 		/* if (val != 4) exit(5); */
2898d666d0SBrendan Jackman 		BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
2998d666d0SBrendan Jackman 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
3098d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 5),
3198d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
3298d666d0SBrendan Jackman 		/* exit(0); */
3398d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 0),
3498d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
3598d666d0SBrendan Jackman 	},
3698d666d0SBrendan Jackman 	.result = ACCEPT,
3798d666d0SBrendan Jackman },
3898d666d0SBrendan Jackman {
3998d666d0SBrendan Jackman 	"atomic compare-and-exchange smoketest - 32bit",
4098d666d0SBrendan Jackman 	.insns = {
4198d666d0SBrendan Jackman 		/* val = 3; */
4298d666d0SBrendan Jackman 		BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 3),
4398d666d0SBrendan Jackman 		/* old = atomic_cmpxchg(&val, 2, 4); */
4498d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_1, 4),
4598d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 2),
4698d666d0SBrendan Jackman 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
4798d666d0SBrendan Jackman 		/* if (old != 3) exit(2); */
4898d666d0SBrendan Jackman 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
4998d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 2),
5098d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
5198d666d0SBrendan Jackman 		/* if (val != 3) exit(3); */
5298d666d0SBrendan Jackman 		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
5398d666d0SBrendan Jackman 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
5498d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 3),
5598d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
5698d666d0SBrendan Jackman 		/* old = atomic_cmpxchg(&val, 3, 4); */
5798d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_1, 4),
5898d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 3),
5998d666d0SBrendan Jackman 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -4),
6098d666d0SBrendan Jackman 		/* if (old != 3) exit(4); */
6198d666d0SBrendan Jackman 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 3, 2),
6298d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 4),
6398d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
6498d666d0SBrendan Jackman 		/* if (val != 4) exit(5); */
6598d666d0SBrendan Jackman 		BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -4),
6698d666d0SBrendan Jackman 		BPF_JMP32_IMM(BPF_JEQ, BPF_REG_0, 4, 2),
6798d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 5),
6898d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
6998d666d0SBrendan Jackman 		/* exit(0); */
7098d666d0SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 0),
7198d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
7298d666d0SBrendan Jackman 	},
7398d666d0SBrendan Jackman 	.result = ACCEPT,
7498d666d0SBrendan Jackman },
7598d666d0SBrendan Jackman {
7698d666d0SBrendan Jackman 	"Can't use cmpxchg on uninit src reg",
7798d666d0SBrendan Jackman 	.insns = {
7898d666d0SBrendan Jackman 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 3),
7998d666d0SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 3),
8098d666d0SBrendan Jackman 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_2, -8),
8198d666d0SBrendan Jackman 		BPF_EXIT_INSN(),
8298d666d0SBrendan Jackman 	},
8398d666d0SBrendan Jackman 	.result = REJECT,
8498d666d0SBrendan Jackman 	.errstr = "!read_ok",
8598d666d0SBrendan Jackman },
8698d666d0SBrendan Jackman {
8739491867SBrendan Jackman 	"BPF_W cmpxchg should zero top 32 bits",
8839491867SBrendan Jackman 	.insns = {
8939491867SBrendan Jackman 		/* r0 = U64_MAX; */
9039491867SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 0),
9139491867SBrendan Jackman 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
9239491867SBrendan Jackman 		/* u64 val = r0; */
9339491867SBrendan Jackman 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
9439491867SBrendan Jackman 		/* r0 = (u32)atomic_cmpxchg((u32 *)&val, r0, 1); */
9539491867SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_1, 1),
9639491867SBrendan Jackman 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
9739491867SBrendan Jackman 		/* r1 = 0x00000000FFFFFFFFull; */
9839491867SBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_1, 1),
9939491867SBrendan Jackman 		BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
10039491867SBrendan Jackman 		BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
10139491867SBrendan Jackman 		/* if (r0 != r1) exit(1); */
10239491867SBrendan Jackman 		BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_1, 2),
10339491867SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 1),
10439491867SBrendan Jackman 		BPF_EXIT_INSN(),
10539491867SBrendan Jackman 		/* exit(0); */
10639491867SBrendan Jackman 		BPF_MOV32_IMM(BPF_REG_0, 0),
10739491867SBrendan Jackman 		BPF_EXIT_INSN(),
10839491867SBrendan Jackman 	},
10939491867SBrendan Jackman 	.result = ACCEPT,
11039491867SBrendan Jackman },
1117960d02dSBrendan Jackman {
1127960d02dSBrendan Jackman 	"Dest pointer in r0 - fail",
1137960d02dSBrendan Jackman 	.insns = {
1147960d02dSBrendan Jackman 		/* val = 0; */
1157960d02dSBrendan Jackman 		BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
1167960d02dSBrendan Jackman 		/* r0 = &val */
1177960d02dSBrendan Jackman 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1187960d02dSBrendan Jackman 		/* r0 = atomic_cmpxchg(&val, r0, 1); */
1197960d02dSBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_1, 1),
1207960d02dSBrendan Jackman 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
1217960d02dSBrendan Jackman 		/* if (r0 != 0) exit(1); */
1227960d02dSBrendan Jackman 		BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
1237960d02dSBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 1),
1247960d02dSBrendan Jackman 		BPF_EXIT_INSN(),
1257960d02dSBrendan Jackman 		/* exit(0); */
1267960d02dSBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 0),
1277960d02dSBrendan Jackman 		BPF_EXIT_INSN(),
1287960d02dSBrendan Jackman 	},
1297960d02dSBrendan Jackman 	.result = ACCEPT,
130e523102cSDaniel Borkmann 	.result_unpriv = REJECT,
131e523102cSDaniel Borkmann 	.errstr_unpriv = "R0 leaks addr into mem",
1327960d02dSBrendan Jackman },
1337960d02dSBrendan Jackman {
1347960d02dSBrendan Jackman 	"Dest pointer in r0 - succeed",
1357960d02dSBrendan Jackman 	.insns = {
1367960d02dSBrendan Jackman 		/* r0 = &val */
1377960d02dSBrendan Jackman 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
1387960d02dSBrendan Jackman 		/* val = r0; */
1397960d02dSBrendan Jackman 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
1407960d02dSBrendan Jackman 		/* r0 = atomic_cmpxchg(&val, r0, 0); */
1417960d02dSBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_1, 0),
1427960d02dSBrendan Jackman 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
1437960d02dSBrendan Jackman 		/* r1 = *r0 */
1447960d02dSBrendan Jackman 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
1457960d02dSBrendan Jackman 		/* exit(0); */
1467960d02dSBrendan Jackman 		BPF_MOV64_IMM(BPF_REG_0, 0),
1477960d02dSBrendan Jackman 		BPF_EXIT_INSN(),
1487960d02dSBrendan Jackman 	},
1497960d02dSBrendan Jackman 	.result = ACCEPT,
150180486b4SDaniel Borkmann 	.result_unpriv = REJECT,
151e523102cSDaniel Borkmann 	.errstr_unpriv = "R0 leaks addr into mem",
152180486b4SDaniel Borkmann },
153180486b4SDaniel Borkmann {
154180486b4SDaniel Borkmann 	"Dest pointer in r0 - succeed, check 2",
155180486b4SDaniel Borkmann 	.insns = {
156180486b4SDaniel Borkmann 		/* r0 = &val */
157180486b4SDaniel Borkmann 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
158180486b4SDaniel Borkmann 		/* val = r0; */
159180486b4SDaniel Borkmann 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
160180486b4SDaniel Borkmann 		/* r5 = &val */
161180486b4SDaniel Borkmann 		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
162180486b4SDaniel Borkmann 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
163180486b4SDaniel Borkmann 		BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
164180486b4SDaniel Borkmann 		/* r1 = *r0 */
165180486b4SDaniel Borkmann 		BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
166180486b4SDaniel Borkmann 		/* exit(0); */
167180486b4SDaniel Borkmann 		BPF_MOV64_IMM(BPF_REG_0, 0),
168180486b4SDaniel Borkmann 		BPF_EXIT_INSN(),
169180486b4SDaniel Borkmann 	},
170180486b4SDaniel Borkmann 	.result = ACCEPT,
171180486b4SDaniel Borkmann 	.result_unpriv = REJECT,
172e523102cSDaniel Borkmann 	.errstr_unpriv = "R0 leaks addr into mem",
173e523102cSDaniel Borkmann },
174e523102cSDaniel Borkmann {
175e523102cSDaniel Borkmann 	"Dest pointer in r0 - succeed, check 3",
176e523102cSDaniel Borkmann 	.insns = {
177e523102cSDaniel Borkmann 		/* r0 = &val */
178e523102cSDaniel Borkmann 		BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
179e523102cSDaniel Borkmann 		/* val = r0; */
180e523102cSDaniel Borkmann 		BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
181e523102cSDaniel Borkmann 		/* r5 = &val */
182e523102cSDaniel Borkmann 		BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
183e523102cSDaniel Borkmann 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
184e523102cSDaniel Borkmann 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
185e523102cSDaniel Borkmann 		/* exit(0); */
186e523102cSDaniel Borkmann 		BPF_MOV64_IMM(BPF_REG_0, 0),
187e523102cSDaniel Borkmann 		BPF_EXIT_INSN(),
188e523102cSDaniel Borkmann 	},
189e523102cSDaniel Borkmann 	.result = REJECT,
190e523102cSDaniel Borkmann 	.errstr = "invalid size of register fill",
191e523102cSDaniel Borkmann 	.errstr_unpriv = "R0 leaks addr into mem",
192e523102cSDaniel Borkmann },
193e523102cSDaniel Borkmann {
194e523102cSDaniel Borkmann 	"Dest pointer in r0 - succeed, check 4",
195e523102cSDaniel Borkmann 	.insns = {
196e523102cSDaniel Borkmann 		/* r0 = &val */
197e523102cSDaniel Borkmann 		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
198e523102cSDaniel Borkmann 		/* val = r0; */
199e523102cSDaniel Borkmann 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
200e523102cSDaniel Borkmann 		/* r5 = &val */
201e523102cSDaniel Borkmann 		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
202e523102cSDaniel Borkmann 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
203e523102cSDaniel Borkmann 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
204e523102cSDaniel Borkmann 		/* r1 = *r10 */
205e523102cSDaniel Borkmann 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
206e523102cSDaniel Borkmann 		/* exit(0); */
207e523102cSDaniel Borkmann 		BPF_MOV64_IMM(BPF_REG_0, 0),
208e523102cSDaniel Borkmann 		BPF_EXIT_INSN(),
209e523102cSDaniel Borkmann 	},
210e523102cSDaniel Borkmann 	.result = ACCEPT,
211e523102cSDaniel Borkmann 	.result_unpriv = REJECT,
212e523102cSDaniel Borkmann 	.errstr_unpriv = "R10 partial copy of pointer",
213e523102cSDaniel Borkmann },
214e523102cSDaniel Borkmann {
215e523102cSDaniel Borkmann 	"Dest pointer in r0 - succeed, check 5",
216e523102cSDaniel Borkmann 	.insns = {
217e523102cSDaniel Borkmann 		/* r0 = &val */
218e523102cSDaniel Borkmann 		BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
219e523102cSDaniel Borkmann 		/* val = r0; */
220e523102cSDaniel Borkmann 		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
221e523102cSDaniel Borkmann 		/* r5 = &val */
222e523102cSDaniel Borkmann 		BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
223e523102cSDaniel Borkmann 		/* r0 = atomic_cmpxchg(&val, r0, r5); */
224e523102cSDaniel Borkmann 		BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
225e523102cSDaniel Borkmann 		/* r1 = *r0 */
226e523102cSDaniel Borkmann 		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
227e523102cSDaniel Borkmann 		/* exit(0); */
228e523102cSDaniel Borkmann 		BPF_MOV64_IMM(BPF_REG_0, 0),
229e523102cSDaniel Borkmann 		BPF_EXIT_INSN(),
230e523102cSDaniel Borkmann 	},
231e523102cSDaniel Borkmann 	.result = REJECT,
232e523102cSDaniel Borkmann 	.errstr = "R0 invalid mem access",
233e523102cSDaniel Borkmann 	.errstr_unpriv = "R10 partial copy of pointer",
234*ce1f289fSBjörn Töpel 	.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
2357960d02dSBrendan Jackman },
236