1*f323a818SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*f323a818SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/spin_lock.c */
3*f323a818SEduard Zingerman
4*f323a818SEduard Zingerman #include <linux/bpf.h>
5*f323a818SEduard Zingerman #include <bpf/bpf_helpers.h>
6*f323a818SEduard Zingerman #include "bpf_misc.h"
7*f323a818SEduard Zingerman
8*f323a818SEduard Zingerman struct val {
9*f323a818SEduard Zingerman int cnt;
10*f323a818SEduard Zingerman struct bpf_spin_lock l;
11*f323a818SEduard Zingerman };
12*f323a818SEduard Zingerman
13*f323a818SEduard Zingerman struct {
14*f323a818SEduard Zingerman __uint(type, BPF_MAP_TYPE_ARRAY);
15*f323a818SEduard Zingerman __uint(max_entries, 1);
16*f323a818SEduard Zingerman __type(key, int);
17*f323a818SEduard Zingerman __type(value, struct val);
18*f323a818SEduard Zingerman } map_spin_lock SEC(".maps");
19*f323a818SEduard Zingerman
20*f323a818SEduard Zingerman SEC("cgroup/skb")
21*f323a818SEduard Zingerman __description("spin_lock: test1 success")
22*f323a818SEduard Zingerman __success __failure_unpriv __msg_unpriv("")
23*f323a818SEduard Zingerman __retval(0)
spin_lock_test1_success(void)24*f323a818SEduard Zingerman __naked void spin_lock_test1_success(void)
25*f323a818SEduard Zingerman {
26*f323a818SEduard Zingerman asm volatile (" \
27*f323a818SEduard Zingerman r1 = 0; \
28*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
29*f323a818SEduard Zingerman r2 = r10; \
30*f323a818SEduard Zingerman r2 += -4; \
31*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
32*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
33*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
34*f323a818SEduard Zingerman exit; \
35*f323a818SEduard Zingerman l0_%=: r6 = r0; \
36*f323a818SEduard Zingerman r1 = r0; \
37*f323a818SEduard Zingerman r1 += 4; \
38*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
39*f323a818SEduard Zingerman r1 = r6; \
40*f323a818SEduard Zingerman r1 += 4; \
41*f323a818SEduard Zingerman r0 = *(u32*)(r6 + 0); \
42*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
43*f323a818SEduard Zingerman r0 = 0; \
44*f323a818SEduard Zingerman exit; \
45*f323a818SEduard Zingerman " :
46*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
47*f323a818SEduard Zingerman __imm(bpf_spin_lock),
48*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
49*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
50*f323a818SEduard Zingerman : __clobber_all);
51*f323a818SEduard Zingerman }
52*f323a818SEduard Zingerman
53*f323a818SEduard Zingerman SEC("cgroup/skb")
54*f323a818SEduard Zingerman __description("spin_lock: test2 direct ld/st")
55*f323a818SEduard Zingerman __failure __msg("cannot be accessed directly")
56*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
lock_test2_direct_ld_st(void)57*f323a818SEduard Zingerman __naked void lock_test2_direct_ld_st(void)
58*f323a818SEduard Zingerman {
59*f323a818SEduard Zingerman asm volatile (" \
60*f323a818SEduard Zingerman r1 = 0; \
61*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
62*f323a818SEduard Zingerman r2 = r10; \
63*f323a818SEduard Zingerman r2 += -4; \
64*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
65*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
66*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
67*f323a818SEduard Zingerman exit; \
68*f323a818SEduard Zingerman l0_%=: r6 = r0; \
69*f323a818SEduard Zingerman r1 = r0; \
70*f323a818SEduard Zingerman r1 += 4; \
71*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
72*f323a818SEduard Zingerman r1 = r6; \
73*f323a818SEduard Zingerman r1 += 4; \
74*f323a818SEduard Zingerman r0 = *(u32*)(r1 + 0); \
75*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
76*f323a818SEduard Zingerman r0 = 0; \
77*f323a818SEduard Zingerman exit; \
78*f323a818SEduard Zingerman " :
79*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
80*f323a818SEduard Zingerman __imm(bpf_spin_lock),
81*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
82*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
83*f323a818SEduard Zingerman : __clobber_all);
84*f323a818SEduard Zingerman }
85*f323a818SEduard Zingerman
86*f323a818SEduard Zingerman SEC("cgroup/skb")
87*f323a818SEduard Zingerman __description("spin_lock: test3 direct ld/st")
88*f323a818SEduard Zingerman __failure __msg("cannot be accessed directly")
89*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
__flag(BPF_F_ANY_ALIGNMENT)90*f323a818SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
91*f323a818SEduard Zingerman __naked void lock_test3_direct_ld_st(void)
92*f323a818SEduard Zingerman {
93*f323a818SEduard Zingerman asm volatile (" \
94*f323a818SEduard Zingerman r1 = 0; \
95*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
96*f323a818SEduard Zingerman r2 = r10; \
97*f323a818SEduard Zingerman r2 += -4; \
98*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
99*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
100*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
101*f323a818SEduard Zingerman exit; \
102*f323a818SEduard Zingerman l0_%=: r6 = r0; \
103*f323a818SEduard Zingerman r1 = r0; \
104*f323a818SEduard Zingerman r1 += 4; \
105*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
106*f323a818SEduard Zingerman r1 = r6; \
107*f323a818SEduard Zingerman r1 += 4; \
108*f323a818SEduard Zingerman r0 = *(u32*)(r6 + 1); \
109*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
110*f323a818SEduard Zingerman r0 = 0; \
111*f323a818SEduard Zingerman exit; \
112*f323a818SEduard Zingerman " :
113*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
114*f323a818SEduard Zingerman __imm(bpf_spin_lock),
115*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
116*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
117*f323a818SEduard Zingerman : __clobber_all);
118*f323a818SEduard Zingerman }
119*f323a818SEduard Zingerman
120*f323a818SEduard Zingerman SEC("cgroup/skb")
121*f323a818SEduard Zingerman __description("spin_lock: test4 direct ld/st")
122*f323a818SEduard Zingerman __failure __msg("cannot be accessed directly")
123*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
__flag(BPF_F_ANY_ALIGNMENT)124*f323a818SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
125*f323a818SEduard Zingerman __naked void lock_test4_direct_ld_st(void)
126*f323a818SEduard Zingerman {
127*f323a818SEduard Zingerman asm volatile (" \
128*f323a818SEduard Zingerman r1 = 0; \
129*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
130*f323a818SEduard Zingerman r2 = r10; \
131*f323a818SEduard Zingerman r2 += -4; \
132*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
133*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
134*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
135*f323a818SEduard Zingerman exit; \
136*f323a818SEduard Zingerman l0_%=: r6 = r0; \
137*f323a818SEduard Zingerman r1 = r0; \
138*f323a818SEduard Zingerman r1 += 4; \
139*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
140*f323a818SEduard Zingerman r1 = r6; \
141*f323a818SEduard Zingerman r1 += 4; \
142*f323a818SEduard Zingerman r0 = *(u16*)(r6 + 3); \
143*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
144*f323a818SEduard Zingerman r0 = 0; \
145*f323a818SEduard Zingerman exit; \
146*f323a818SEduard Zingerman " :
147*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
148*f323a818SEduard Zingerman __imm(bpf_spin_lock),
149*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
150*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
151*f323a818SEduard Zingerman : __clobber_all);
152*f323a818SEduard Zingerman }
153*f323a818SEduard Zingerman
154*f323a818SEduard Zingerman SEC("cgroup/skb")
155*f323a818SEduard Zingerman __description("spin_lock: test5 call within a locked region")
156*f323a818SEduard Zingerman __failure __msg("calls are not allowed")
157*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
call_within_a_locked_region(void)158*f323a818SEduard Zingerman __naked void call_within_a_locked_region(void)
159*f323a818SEduard Zingerman {
160*f323a818SEduard Zingerman asm volatile (" \
161*f323a818SEduard Zingerman r1 = 0; \
162*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
163*f323a818SEduard Zingerman r2 = r10; \
164*f323a818SEduard Zingerman r2 += -4; \
165*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
166*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
167*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
168*f323a818SEduard Zingerman exit; \
169*f323a818SEduard Zingerman l0_%=: r6 = r0; \
170*f323a818SEduard Zingerman r1 = r0; \
171*f323a818SEduard Zingerman r1 += 4; \
172*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
173*f323a818SEduard Zingerman call %[bpf_get_prandom_u32]; \
174*f323a818SEduard Zingerman r1 = r6; \
175*f323a818SEduard Zingerman r1 += 4; \
176*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
177*f323a818SEduard Zingerman r0 = 0; \
178*f323a818SEduard Zingerman exit; \
179*f323a818SEduard Zingerman " :
180*f323a818SEduard Zingerman : __imm(bpf_get_prandom_u32),
181*f323a818SEduard Zingerman __imm(bpf_map_lookup_elem),
182*f323a818SEduard Zingerman __imm(bpf_spin_lock),
183*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
184*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
185*f323a818SEduard Zingerman : __clobber_all);
186*f323a818SEduard Zingerman }
187*f323a818SEduard Zingerman
188*f323a818SEduard Zingerman SEC("cgroup/skb")
189*f323a818SEduard Zingerman __description("spin_lock: test6 missing unlock")
190*f323a818SEduard Zingerman __failure __msg("unlock is missing")
191*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
spin_lock_test6_missing_unlock(void)192*f323a818SEduard Zingerman __naked void spin_lock_test6_missing_unlock(void)
193*f323a818SEduard Zingerman {
194*f323a818SEduard Zingerman asm volatile (" \
195*f323a818SEduard Zingerman r1 = 0; \
196*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
197*f323a818SEduard Zingerman r2 = r10; \
198*f323a818SEduard Zingerman r2 += -4; \
199*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
200*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
201*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
202*f323a818SEduard Zingerman exit; \
203*f323a818SEduard Zingerman l0_%=: r6 = r0; \
204*f323a818SEduard Zingerman r1 = r0; \
205*f323a818SEduard Zingerman r1 += 4; \
206*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
207*f323a818SEduard Zingerman r1 = r6; \
208*f323a818SEduard Zingerman r1 += 4; \
209*f323a818SEduard Zingerman r0 = *(u32*)(r6 + 0); \
210*f323a818SEduard Zingerman if r0 != 0 goto l1_%=; \
211*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
212*f323a818SEduard Zingerman l1_%=: r0 = 0; \
213*f323a818SEduard Zingerman exit; \
214*f323a818SEduard Zingerman " :
215*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
216*f323a818SEduard Zingerman __imm(bpf_spin_lock),
217*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
218*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
219*f323a818SEduard Zingerman : __clobber_all);
220*f323a818SEduard Zingerman }
221*f323a818SEduard Zingerman
222*f323a818SEduard Zingerman SEC("cgroup/skb")
223*f323a818SEduard Zingerman __description("spin_lock: test7 unlock without lock")
224*f323a818SEduard Zingerman __failure __msg("without taking a lock")
225*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
lock_test7_unlock_without_lock(void)226*f323a818SEduard Zingerman __naked void lock_test7_unlock_without_lock(void)
227*f323a818SEduard Zingerman {
228*f323a818SEduard Zingerman asm volatile (" \
229*f323a818SEduard Zingerman r1 = 0; \
230*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
231*f323a818SEduard Zingerman r2 = r10; \
232*f323a818SEduard Zingerman r2 += -4; \
233*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
234*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
235*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
236*f323a818SEduard Zingerman exit; \
237*f323a818SEduard Zingerman l0_%=: r6 = r0; \
238*f323a818SEduard Zingerman r1 = r0; \
239*f323a818SEduard Zingerman r1 += 4; \
240*f323a818SEduard Zingerman if r1 != 0 goto l1_%=; \
241*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
242*f323a818SEduard Zingerman l1_%=: r1 = r6; \
243*f323a818SEduard Zingerman r1 += 4; \
244*f323a818SEduard Zingerman r0 = *(u32*)(r6 + 0); \
245*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
246*f323a818SEduard Zingerman r0 = 0; \
247*f323a818SEduard Zingerman exit; \
248*f323a818SEduard Zingerman " :
249*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
250*f323a818SEduard Zingerman __imm(bpf_spin_lock),
251*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
252*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
253*f323a818SEduard Zingerman : __clobber_all);
254*f323a818SEduard Zingerman }
255*f323a818SEduard Zingerman
256*f323a818SEduard Zingerman SEC("cgroup/skb")
257*f323a818SEduard Zingerman __description("spin_lock: test8 double lock")
258*f323a818SEduard Zingerman __failure __msg("calls are not allowed")
259*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
spin_lock_test8_double_lock(void)260*f323a818SEduard Zingerman __naked void spin_lock_test8_double_lock(void)
261*f323a818SEduard Zingerman {
262*f323a818SEduard Zingerman asm volatile (" \
263*f323a818SEduard Zingerman r1 = 0; \
264*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
265*f323a818SEduard Zingerman r2 = r10; \
266*f323a818SEduard Zingerman r2 += -4; \
267*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
268*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
269*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
270*f323a818SEduard Zingerman exit; \
271*f323a818SEduard Zingerman l0_%=: r6 = r0; \
272*f323a818SEduard Zingerman r1 = r0; \
273*f323a818SEduard Zingerman r1 += 4; \
274*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
275*f323a818SEduard Zingerman r1 = r6; \
276*f323a818SEduard Zingerman r1 += 4; \
277*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
278*f323a818SEduard Zingerman r1 = r6; \
279*f323a818SEduard Zingerman r1 += 4; \
280*f323a818SEduard Zingerman r0 = *(u32*)(r6 + 0); \
281*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
282*f323a818SEduard Zingerman r0 = 0; \
283*f323a818SEduard Zingerman exit; \
284*f323a818SEduard Zingerman " :
285*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
286*f323a818SEduard Zingerman __imm(bpf_spin_lock),
287*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
288*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
289*f323a818SEduard Zingerman : __clobber_all);
290*f323a818SEduard Zingerman }
291*f323a818SEduard Zingerman
292*f323a818SEduard Zingerman SEC("cgroup/skb")
293*f323a818SEduard Zingerman __description("spin_lock: test9 different lock")
294*f323a818SEduard Zingerman __failure __msg("unlock of different lock")
295*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
spin_lock_test9_different_lock(void)296*f323a818SEduard Zingerman __naked void spin_lock_test9_different_lock(void)
297*f323a818SEduard Zingerman {
298*f323a818SEduard Zingerman asm volatile (" \
299*f323a818SEduard Zingerman r1 = 0; \
300*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
301*f323a818SEduard Zingerman r2 = r10; \
302*f323a818SEduard Zingerman r2 += -4; \
303*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
304*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
305*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
306*f323a818SEduard Zingerman exit; \
307*f323a818SEduard Zingerman l0_%=: r6 = r0; \
308*f323a818SEduard Zingerman r2 = r10; \
309*f323a818SEduard Zingerman r2 += -4; \
310*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
311*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
312*f323a818SEduard Zingerman if r0 != 0 goto l1_%=; \
313*f323a818SEduard Zingerman exit; \
314*f323a818SEduard Zingerman l1_%=: r7 = r0; \
315*f323a818SEduard Zingerman r1 = r6; \
316*f323a818SEduard Zingerman r1 += 4; \
317*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
318*f323a818SEduard Zingerman r1 = r7; \
319*f323a818SEduard Zingerman r1 += 4; \
320*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
321*f323a818SEduard Zingerman r0 = 0; \
322*f323a818SEduard Zingerman exit; \
323*f323a818SEduard Zingerman " :
324*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
325*f323a818SEduard Zingerman __imm(bpf_spin_lock),
326*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
327*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
328*f323a818SEduard Zingerman : __clobber_all);
329*f323a818SEduard Zingerman }
330*f323a818SEduard Zingerman
331*f323a818SEduard Zingerman SEC("cgroup/skb")
332*f323a818SEduard Zingerman __description("spin_lock: test10 lock in subprog without unlock")
333*f323a818SEduard Zingerman __failure __msg("unlock is missing")
334*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
lock_in_subprog_without_unlock(void)335*f323a818SEduard Zingerman __naked void lock_in_subprog_without_unlock(void)
336*f323a818SEduard Zingerman {
337*f323a818SEduard Zingerman asm volatile (" \
338*f323a818SEduard Zingerman r1 = 0; \
339*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
340*f323a818SEduard Zingerman r2 = r10; \
341*f323a818SEduard Zingerman r2 += -4; \
342*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
343*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
344*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
345*f323a818SEduard Zingerman exit; \
346*f323a818SEduard Zingerman l0_%=: r6 = r0; \
347*f323a818SEduard Zingerman r1 = r0; \
348*f323a818SEduard Zingerman r1 += 4; \
349*f323a818SEduard Zingerman call lock_in_subprog_without_unlock__1; \
350*f323a818SEduard Zingerman r1 = r6; \
351*f323a818SEduard Zingerman r1 += 4; \
352*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
353*f323a818SEduard Zingerman r0 = 1; \
354*f323a818SEduard Zingerman exit; \
355*f323a818SEduard Zingerman " :
356*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
357*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
358*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
359*f323a818SEduard Zingerman : __clobber_all);
360*f323a818SEduard Zingerman }
361*f323a818SEduard Zingerman
362*f323a818SEduard Zingerman static __naked __noinline __attribute__((used))
lock_in_subprog_without_unlock__1(void)363*f323a818SEduard Zingerman void lock_in_subprog_without_unlock__1(void)
364*f323a818SEduard Zingerman {
365*f323a818SEduard Zingerman asm volatile (" \
366*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
367*f323a818SEduard Zingerman r0 = 0; \
368*f323a818SEduard Zingerman exit; \
369*f323a818SEduard Zingerman " :
370*f323a818SEduard Zingerman : __imm(bpf_spin_lock)
371*f323a818SEduard Zingerman : __clobber_all);
372*f323a818SEduard Zingerman }
373*f323a818SEduard Zingerman
374*f323a818SEduard Zingerman SEC("tc")
375*f323a818SEduard Zingerman __description("spin_lock: test11 ld_abs under lock")
376*f323a818SEduard Zingerman __failure __msg("inside bpf_spin_lock")
test11_ld_abs_under_lock(void)377*f323a818SEduard Zingerman __naked void test11_ld_abs_under_lock(void)
378*f323a818SEduard Zingerman {
379*f323a818SEduard Zingerman asm volatile (" \
380*f323a818SEduard Zingerman r6 = r1; \
381*f323a818SEduard Zingerman r1 = 0; \
382*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
383*f323a818SEduard Zingerman r2 = r10; \
384*f323a818SEduard Zingerman r2 += -4; \
385*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
386*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
387*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
388*f323a818SEduard Zingerman exit; \
389*f323a818SEduard Zingerman l0_%=: r7 = r0; \
390*f323a818SEduard Zingerman r1 = r0; \
391*f323a818SEduard Zingerman r1 += 4; \
392*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
393*f323a818SEduard Zingerman r0 = *(u8*)skb[0]; \
394*f323a818SEduard Zingerman r1 = r7; \
395*f323a818SEduard Zingerman r1 += 4; \
396*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
397*f323a818SEduard Zingerman r0 = 0; \
398*f323a818SEduard Zingerman exit; \
399*f323a818SEduard Zingerman " :
400*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
401*f323a818SEduard Zingerman __imm(bpf_spin_lock),
402*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
403*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
404*f323a818SEduard Zingerman : __clobber_all);
405*f323a818SEduard Zingerman }
406*f323a818SEduard Zingerman
407*f323a818SEduard Zingerman SEC("tc")
408*f323a818SEduard Zingerman __description("spin_lock: regsafe compare reg->id for map value")
409*f323a818SEduard Zingerman __failure __msg("bpf_spin_unlock of different lock")
__flag(BPF_F_TEST_STATE_FREQ)410*f323a818SEduard Zingerman __flag(BPF_F_TEST_STATE_FREQ)
411*f323a818SEduard Zingerman __naked void reg_id_for_map_value(void)
412*f323a818SEduard Zingerman {
413*f323a818SEduard Zingerman asm volatile (" \
414*f323a818SEduard Zingerman r6 = r1; \
415*f323a818SEduard Zingerman r6 = *(u32*)(r6 + %[__sk_buff_mark]); \
416*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
417*f323a818SEduard Zingerman r9 = r1; \
418*f323a818SEduard Zingerman r2 = 0; \
419*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r2; \
420*f323a818SEduard Zingerman r2 = r10; \
421*f323a818SEduard Zingerman r2 += -4; \
422*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
423*f323a818SEduard Zingerman if r0 != 0 goto l0_%=; \
424*f323a818SEduard Zingerman exit; \
425*f323a818SEduard Zingerman l0_%=: r7 = r0; \
426*f323a818SEduard Zingerman r1 = r9; \
427*f323a818SEduard Zingerman r2 = r10; \
428*f323a818SEduard Zingerman r2 += -4; \
429*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
430*f323a818SEduard Zingerman if r0 != 0 goto l1_%=; \
431*f323a818SEduard Zingerman exit; \
432*f323a818SEduard Zingerman l1_%=: r8 = r0; \
433*f323a818SEduard Zingerman r1 = r7; \
434*f323a818SEduard Zingerman r1 += 4; \
435*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
436*f323a818SEduard Zingerman if r6 == 0 goto l2_%=; \
437*f323a818SEduard Zingerman goto l3_%=; \
438*f323a818SEduard Zingerman l2_%=: r7 = r8; \
439*f323a818SEduard Zingerman l3_%=: r1 = r7; \
440*f323a818SEduard Zingerman r1 += 4; \
441*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
442*f323a818SEduard Zingerman r0 = 0; \
443*f323a818SEduard Zingerman exit; \
444*f323a818SEduard Zingerman " :
445*f323a818SEduard Zingerman : __imm(bpf_map_lookup_elem),
446*f323a818SEduard Zingerman __imm(bpf_spin_lock),
447*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
448*f323a818SEduard Zingerman __imm_addr(map_spin_lock),
449*f323a818SEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
450*f323a818SEduard Zingerman : __clobber_all);
451*f323a818SEduard Zingerman }
452*f323a818SEduard Zingerman
453*f323a818SEduard Zingerman /* Make sure that regsafe() compares ids for spin lock records using
454*f323a818SEduard Zingerman * check_ids():
455*f323a818SEduard Zingerman * 1: r9 = map_lookup_elem(...) ; r9.id == 1
456*f323a818SEduard Zingerman * 2: r8 = map_lookup_elem(...) ; r8.id == 2
457*f323a818SEduard Zingerman * 3: r7 = ktime_get_ns()
458*f323a818SEduard Zingerman * 4: r6 = ktime_get_ns()
459*f323a818SEduard Zingerman * 5: if r6 > r7 goto <9>
460*f323a818SEduard Zingerman * 6: spin_lock(r8)
461*f323a818SEduard Zingerman * 7: r9 = r8
462*f323a818SEduard Zingerman * 8: goto <10>
463*f323a818SEduard Zingerman * 9: spin_lock(r9)
464*f323a818SEduard Zingerman * 10: spin_unlock(r9) ; r9.id == 1 || r9.id == 2 and lock is active,
465*f323a818SEduard Zingerman * ; second visit to (10) should be considered safe
466*f323a818SEduard Zingerman * ; if check_ids() is used.
467*f323a818SEduard Zingerman * 11: exit(0)
468*f323a818SEduard Zingerman */
469*f323a818SEduard Zingerman
470*f323a818SEduard Zingerman SEC("cgroup/skb")
471*f323a818SEduard Zingerman __description("spin_lock: regsafe() check_ids() similar id mappings")
472*f323a818SEduard Zingerman __success __msg("29: safe")
473*f323a818SEduard Zingerman __failure_unpriv __msg_unpriv("")
__flag(BPF_F_TEST_STATE_FREQ)474*f323a818SEduard Zingerman __log_level(2) __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
475*f323a818SEduard Zingerman __naked void check_ids_similar_id_mappings(void)
476*f323a818SEduard Zingerman {
477*f323a818SEduard Zingerman asm volatile (" \
478*f323a818SEduard Zingerman r1 = 0; \
479*f323a818SEduard Zingerman *(u32*)(r10 - 4) = r1; \
480*f323a818SEduard Zingerman /* r9 = map_lookup_elem(...) */ \
481*f323a818SEduard Zingerman r2 = r10; \
482*f323a818SEduard Zingerman r2 += -4; \
483*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
484*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
485*f323a818SEduard Zingerman if r0 == 0 goto l0_%=; \
486*f323a818SEduard Zingerman r9 = r0; \
487*f323a818SEduard Zingerman /* r8 = map_lookup_elem(...) */ \
488*f323a818SEduard Zingerman r2 = r10; \
489*f323a818SEduard Zingerman r2 += -4; \
490*f323a818SEduard Zingerman r1 = %[map_spin_lock] ll; \
491*f323a818SEduard Zingerman call %[bpf_map_lookup_elem]; \
492*f323a818SEduard Zingerman if r0 == 0 goto l1_%=; \
493*f323a818SEduard Zingerman r8 = r0; \
494*f323a818SEduard Zingerman /* r7 = ktime_get_ns() */ \
495*f323a818SEduard Zingerman call %[bpf_ktime_get_ns]; \
496*f323a818SEduard Zingerman r7 = r0; \
497*f323a818SEduard Zingerman /* r6 = ktime_get_ns() */ \
498*f323a818SEduard Zingerman call %[bpf_ktime_get_ns]; \
499*f323a818SEduard Zingerman r6 = r0; \
500*f323a818SEduard Zingerman /* if r6 > r7 goto +5 ; no new information about the state is derived from\
501*f323a818SEduard Zingerman * ; this check, thus produced verifier states differ\
502*f323a818SEduard Zingerman * ; only in 'insn_idx' \
503*f323a818SEduard Zingerman * spin_lock(r8) \
504*f323a818SEduard Zingerman * r9 = r8 \
505*f323a818SEduard Zingerman * goto unlock \
506*f323a818SEduard Zingerman */ \
507*f323a818SEduard Zingerman if r6 > r7 goto l2_%=; \
508*f323a818SEduard Zingerman r1 = r8; \
509*f323a818SEduard Zingerman r1 += 4; \
510*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
511*f323a818SEduard Zingerman r9 = r8; \
512*f323a818SEduard Zingerman goto l3_%=; \
513*f323a818SEduard Zingerman l2_%=: /* spin_lock(r9) */ \
514*f323a818SEduard Zingerman r1 = r9; \
515*f323a818SEduard Zingerman r1 += 4; \
516*f323a818SEduard Zingerman call %[bpf_spin_lock]; \
517*f323a818SEduard Zingerman l3_%=: /* spin_unlock(r9) */ \
518*f323a818SEduard Zingerman r1 = r9; \
519*f323a818SEduard Zingerman r1 += 4; \
520*f323a818SEduard Zingerman call %[bpf_spin_unlock]; \
521*f323a818SEduard Zingerman l0_%=: /* exit(0) */ \
522*f323a818SEduard Zingerman r0 = 0; \
523*f323a818SEduard Zingerman l1_%=: exit; \
524*f323a818SEduard Zingerman " :
525*f323a818SEduard Zingerman : __imm(bpf_ktime_get_ns),
526*f323a818SEduard Zingerman __imm(bpf_map_lookup_elem),
527*f323a818SEduard Zingerman __imm(bpf_spin_lock),
528*f323a818SEduard Zingerman __imm(bpf_spin_unlock),
529*f323a818SEduard Zingerman __imm_addr(map_spin_lock)
530*f323a818SEduard Zingerman : __clobber_all);
531*f323a818SEduard Zingerman }
532*f323a818SEduard Zingerman
533*f323a818SEduard Zingerman char _license[] SEC("license") = "GPL";
534