1*8be63279SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*8be63279SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */
3*8be63279SEduard Zingerman 
4*8be63279SEduard Zingerman #include <linux/bpf.h>
5*8be63279SEduard Zingerman #include <bpf/bpf_helpers.h>
6*8be63279SEduard Zingerman #include "../../../include/linux/filter.h"
7*8be63279SEduard Zingerman #include "bpf_misc.h"
8*8be63279SEduard Zingerman 
9*8be63279SEduard Zingerman #define BPF_SK_LOOKUP(func) \
10*8be63279SEduard Zingerman 	/* struct bpf_sock_tuple tuple = {} */ \
11*8be63279SEduard Zingerman 	"r2 = 0;"			\
12*8be63279SEduard Zingerman 	"*(u32*)(r10 - 8) = r2;"	\
13*8be63279SEduard Zingerman 	"*(u64*)(r10 - 16) = r2;"	\
14*8be63279SEduard Zingerman 	"*(u64*)(r10 - 24) = r2;"	\
15*8be63279SEduard Zingerman 	"*(u64*)(r10 - 32) = r2;"	\
16*8be63279SEduard Zingerman 	"*(u64*)(r10 - 40) = r2;"	\
17*8be63279SEduard Zingerman 	"*(u64*)(r10 - 48) = r2;"	\
18*8be63279SEduard Zingerman 	/* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
19*8be63279SEduard Zingerman 	"r2 = r10;"			\
20*8be63279SEduard Zingerman 	"r2 += -48;"			\
21*8be63279SEduard Zingerman 	"r3 = %[sizeof_bpf_sock_tuple];"\
22*8be63279SEduard Zingerman 	"r4 = 0;"			\
23*8be63279SEduard Zingerman 	"r5 = 0;"			\
24*8be63279SEduard Zingerman 	"call %[" #func "];"
25*8be63279SEduard Zingerman 
26*8be63279SEduard Zingerman struct bpf_key {} __attribute__((preserve_access_index));
27*8be63279SEduard Zingerman 
28*8be63279SEduard Zingerman extern void bpf_key_put(struct bpf_key *key) __ksym;
29*8be63279SEduard Zingerman extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
30*8be63279SEduard Zingerman extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
31*8be63279SEduard Zingerman 
32*8be63279SEduard Zingerman /* BTF FUNC records are not generated for kfuncs referenced
33*8be63279SEduard Zingerman  * from inline assembly. These records are necessary for
34*8be63279SEduard Zingerman  * libbpf to link the program. The function below is a hack
35*8be63279SEduard Zingerman  * to ensure that BTF FUNC records are generated.
36*8be63279SEduard Zingerman  */
__kfunc_btf_root(void)37*8be63279SEduard Zingerman void __kfunc_btf_root(void)
38*8be63279SEduard Zingerman {
39*8be63279SEduard Zingerman 	bpf_key_put(0);
40*8be63279SEduard Zingerman 	bpf_lookup_system_key(0);
41*8be63279SEduard Zingerman 	bpf_lookup_user_key(0, 0);
42*8be63279SEduard Zingerman }
43*8be63279SEduard Zingerman 
44*8be63279SEduard Zingerman #define MAX_ENTRIES 11
45*8be63279SEduard Zingerman 
46*8be63279SEduard Zingerman struct test_val {
47*8be63279SEduard Zingerman 	unsigned int index;
48*8be63279SEduard Zingerman 	int foo[MAX_ENTRIES];
49*8be63279SEduard Zingerman };
50*8be63279SEduard Zingerman 
51*8be63279SEduard Zingerman struct {
52*8be63279SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_ARRAY);
53*8be63279SEduard Zingerman 	__uint(max_entries, 1);
54*8be63279SEduard Zingerman 	__type(key, int);
55*8be63279SEduard Zingerman 	__type(value, struct test_val);
56*8be63279SEduard Zingerman } map_array_48b SEC(".maps");
57*8be63279SEduard Zingerman 
58*8be63279SEduard Zingerman struct {
59*8be63279SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_RINGBUF);
60*8be63279SEduard Zingerman 	__uint(max_entries, 4096);
61*8be63279SEduard Zingerman } map_ringbuf SEC(".maps");
62*8be63279SEduard Zingerman 
63*8be63279SEduard Zingerman void dummy_prog_42_tc(void);
64*8be63279SEduard Zingerman void dummy_prog_24_tc(void);
65*8be63279SEduard Zingerman void dummy_prog_loop1_tc(void);
66*8be63279SEduard Zingerman 
67*8be63279SEduard Zingerman struct {
68*8be63279SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
69*8be63279SEduard Zingerman 	__uint(max_entries, 4);
70*8be63279SEduard Zingerman 	__uint(key_size, sizeof(int));
71*8be63279SEduard Zingerman 	__array(values, void (void));
72*8be63279SEduard Zingerman } map_prog1_tc SEC(".maps") = {
73*8be63279SEduard Zingerman 	.values = {
74*8be63279SEduard Zingerman 		[0] = (void *)&dummy_prog_42_tc,
75*8be63279SEduard Zingerman 		[1] = (void *)&dummy_prog_loop1_tc,
76*8be63279SEduard Zingerman 		[2] = (void *)&dummy_prog_24_tc,
77*8be63279SEduard Zingerman 	},
78*8be63279SEduard Zingerman };
79*8be63279SEduard Zingerman 
80*8be63279SEduard Zingerman SEC("tc")
81*8be63279SEduard Zingerman __auxiliary
dummy_prog_42_tc(void)82*8be63279SEduard Zingerman __naked void dummy_prog_42_tc(void)
83*8be63279SEduard Zingerman {
84*8be63279SEduard Zingerman 	asm volatile ("r0 = 42; exit;");
85*8be63279SEduard Zingerman }
86*8be63279SEduard Zingerman 
87*8be63279SEduard Zingerman SEC("tc")
88*8be63279SEduard Zingerman __auxiliary
dummy_prog_24_tc(void)89*8be63279SEduard Zingerman __naked void dummy_prog_24_tc(void)
90*8be63279SEduard Zingerman {
91*8be63279SEduard Zingerman 	asm volatile ("r0 = 24; exit;");
92*8be63279SEduard Zingerman }
93*8be63279SEduard Zingerman 
94*8be63279SEduard Zingerman SEC("tc")
95*8be63279SEduard Zingerman __auxiliary
dummy_prog_loop1_tc(void)96*8be63279SEduard Zingerman __naked void dummy_prog_loop1_tc(void)
97*8be63279SEduard Zingerman {
98*8be63279SEduard Zingerman 	asm volatile ("			\
99*8be63279SEduard Zingerman 	r3 = 1;				\
100*8be63279SEduard Zingerman 	r2 = %[map_prog1_tc] ll;	\
101*8be63279SEduard Zingerman 	call %[bpf_tail_call];		\
102*8be63279SEduard Zingerman 	r0 = 41;			\
103*8be63279SEduard Zingerman 	exit;				\
104*8be63279SEduard Zingerman "	:
105*8be63279SEduard Zingerman 	: __imm(bpf_tail_call),
106*8be63279SEduard Zingerman 	  __imm_addr(map_prog1_tc)
107*8be63279SEduard Zingerman 	: __clobber_all);
108*8be63279SEduard Zingerman }
109*8be63279SEduard Zingerman 
110*8be63279SEduard Zingerman SEC("tc")
111*8be63279SEduard Zingerman __description("reference tracking: leak potential reference")
112*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
reference_tracking_leak_potential_reference(void)113*8be63279SEduard Zingerman __naked void reference_tracking_leak_potential_reference(void)
114*8be63279SEduard Zingerman {
115*8be63279SEduard Zingerman 	asm volatile (
116*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
117*8be63279SEduard Zingerman "	r6 = r0;		/* leak reference */	\
118*8be63279SEduard Zingerman 	exit;						\
119*8be63279SEduard Zingerman "	:
120*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
121*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
122*8be63279SEduard Zingerman 	: __clobber_all);
123*8be63279SEduard Zingerman }
124*8be63279SEduard Zingerman 
125*8be63279SEduard Zingerman SEC("tc")
126*8be63279SEduard Zingerman __description("reference tracking: leak potential reference to sock_common")
127*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
potential_reference_to_sock_common_1(void)128*8be63279SEduard Zingerman __naked void potential_reference_to_sock_common_1(void)
129*8be63279SEduard Zingerman {
130*8be63279SEduard Zingerman 	asm volatile (
131*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
132*8be63279SEduard Zingerman "	r6 = r0;		/* leak reference */	\
133*8be63279SEduard Zingerman 	exit;						\
134*8be63279SEduard Zingerman "	:
135*8be63279SEduard Zingerman 	: __imm(bpf_skc_lookup_tcp),
136*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
137*8be63279SEduard Zingerman 	: __clobber_all);
138*8be63279SEduard Zingerman }
139*8be63279SEduard Zingerman 
140*8be63279SEduard Zingerman SEC("tc")
141*8be63279SEduard Zingerman __description("reference tracking: leak potential reference on stack")
142*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
leak_potential_reference_on_stack(void)143*8be63279SEduard Zingerman __naked void leak_potential_reference_on_stack(void)
144*8be63279SEduard Zingerman {
145*8be63279SEduard Zingerman 	asm volatile (
146*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
147*8be63279SEduard Zingerman "	r4 = r10;					\
148*8be63279SEduard Zingerman 	r4 += -8;					\
149*8be63279SEduard Zingerman 	*(u64*)(r4 + 0) = r0;				\
150*8be63279SEduard Zingerman 	r0 = 0;						\
151*8be63279SEduard Zingerman 	exit;						\
152*8be63279SEduard Zingerman "	:
153*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
154*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
155*8be63279SEduard Zingerman 	: __clobber_all);
156*8be63279SEduard Zingerman }
157*8be63279SEduard Zingerman 
158*8be63279SEduard Zingerman SEC("tc")
159*8be63279SEduard Zingerman __description("reference tracking: leak potential reference on stack 2")
160*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
potential_reference_on_stack_2(void)161*8be63279SEduard Zingerman __naked void potential_reference_on_stack_2(void)
162*8be63279SEduard Zingerman {
163*8be63279SEduard Zingerman 	asm volatile (
164*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
165*8be63279SEduard Zingerman "	r4 = r10;					\
166*8be63279SEduard Zingerman 	r4 += -8;					\
167*8be63279SEduard Zingerman 	*(u64*)(r4 + 0) = r0;				\
168*8be63279SEduard Zingerman 	r0 = 0;						\
169*8be63279SEduard Zingerman 	r1 = 0;						\
170*8be63279SEduard Zingerman 	*(u64*)(r4 + 0) = r1;				\
171*8be63279SEduard Zingerman 	exit;						\
172*8be63279SEduard Zingerman "	:
173*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
174*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
175*8be63279SEduard Zingerman 	: __clobber_all);
176*8be63279SEduard Zingerman }
177*8be63279SEduard Zingerman 
178*8be63279SEduard Zingerman SEC("tc")
179*8be63279SEduard Zingerman __description("reference tracking: zero potential reference")
180*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
reference_tracking_zero_potential_reference(void)181*8be63279SEduard Zingerman __naked void reference_tracking_zero_potential_reference(void)
182*8be63279SEduard Zingerman {
183*8be63279SEduard Zingerman 	asm volatile (
184*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
185*8be63279SEduard Zingerman "	r0 = 0;			/* leak reference */	\
186*8be63279SEduard Zingerman 	exit;						\
187*8be63279SEduard Zingerman "	:
188*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
189*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
190*8be63279SEduard Zingerman 	: __clobber_all);
191*8be63279SEduard Zingerman }
192*8be63279SEduard Zingerman 
193*8be63279SEduard Zingerman SEC("tc")
194*8be63279SEduard Zingerman __description("reference tracking: zero potential reference to sock_common")
195*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
potential_reference_to_sock_common_2(void)196*8be63279SEduard Zingerman __naked void potential_reference_to_sock_common_2(void)
197*8be63279SEduard Zingerman {
198*8be63279SEduard Zingerman 	asm volatile (
199*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
200*8be63279SEduard Zingerman "	r0 = 0;			/* leak reference */	\
201*8be63279SEduard Zingerman 	exit;						\
202*8be63279SEduard Zingerman "	:
203*8be63279SEduard Zingerman 	: __imm(bpf_skc_lookup_tcp),
204*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
205*8be63279SEduard Zingerman 	: __clobber_all);
206*8be63279SEduard Zingerman }
207*8be63279SEduard Zingerman 
208*8be63279SEduard Zingerman SEC("tc")
209*8be63279SEduard Zingerman __description("reference tracking: copy and zero potential references")
210*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
copy_and_zero_potential_references(void)211*8be63279SEduard Zingerman __naked void copy_and_zero_potential_references(void)
212*8be63279SEduard Zingerman {
213*8be63279SEduard Zingerman 	asm volatile (
214*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
215*8be63279SEduard Zingerman "	r7 = r0;					\
216*8be63279SEduard Zingerman 	r0 = 0;						\
217*8be63279SEduard Zingerman 	r7 = 0;			/* leak reference */	\
218*8be63279SEduard Zingerman 	exit;						\
219*8be63279SEduard Zingerman "	:
220*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
221*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
222*8be63279SEduard Zingerman 	: __clobber_all);
223*8be63279SEduard Zingerman }
224*8be63279SEduard Zingerman 
225*8be63279SEduard Zingerman SEC("lsm.s/bpf")
226*8be63279SEduard Zingerman __description("reference tracking: acquire/release user key reference")
227*8be63279SEduard Zingerman __success
acquire_release_user_key_reference(void)228*8be63279SEduard Zingerman __naked void acquire_release_user_key_reference(void)
229*8be63279SEduard Zingerman {
230*8be63279SEduard Zingerman 	asm volatile ("					\
231*8be63279SEduard Zingerman 	r1 = -3;					\
232*8be63279SEduard Zingerman 	r2 = 0;						\
233*8be63279SEduard Zingerman 	call %[bpf_lookup_user_key];			\
234*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
235*8be63279SEduard Zingerman 	r1 = r0;					\
236*8be63279SEduard Zingerman 	call %[bpf_key_put];				\
237*8be63279SEduard Zingerman l0_%=:	r0 = 0;						\
238*8be63279SEduard Zingerman 	exit;						\
239*8be63279SEduard Zingerman "	:
240*8be63279SEduard Zingerman 	: __imm(bpf_key_put),
241*8be63279SEduard Zingerman 	  __imm(bpf_lookup_user_key)
242*8be63279SEduard Zingerman 	: __clobber_all);
243*8be63279SEduard Zingerman }
244*8be63279SEduard Zingerman 
245*8be63279SEduard Zingerman SEC("lsm.s/bpf")
246*8be63279SEduard Zingerman __description("reference tracking: acquire/release system key reference")
247*8be63279SEduard Zingerman __success
acquire_release_system_key_reference(void)248*8be63279SEduard Zingerman __naked void acquire_release_system_key_reference(void)
249*8be63279SEduard Zingerman {
250*8be63279SEduard Zingerman 	asm volatile ("					\
251*8be63279SEduard Zingerman 	r1 = 1;						\
252*8be63279SEduard Zingerman 	call %[bpf_lookup_system_key];			\
253*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
254*8be63279SEduard Zingerman 	r1 = r0;					\
255*8be63279SEduard Zingerman 	call %[bpf_key_put];				\
256*8be63279SEduard Zingerman l0_%=:	r0 = 0;						\
257*8be63279SEduard Zingerman 	exit;						\
258*8be63279SEduard Zingerman "	:
259*8be63279SEduard Zingerman 	: __imm(bpf_key_put),
260*8be63279SEduard Zingerman 	  __imm(bpf_lookup_system_key)
261*8be63279SEduard Zingerman 	: __clobber_all);
262*8be63279SEduard Zingerman }
263*8be63279SEduard Zingerman 
264*8be63279SEduard Zingerman SEC("lsm.s/bpf")
265*8be63279SEduard Zingerman __description("reference tracking: release user key reference without check")
266*8be63279SEduard Zingerman __failure __msg("Possibly NULL pointer passed to trusted arg0")
user_key_reference_without_check(void)267*8be63279SEduard Zingerman __naked void user_key_reference_without_check(void)
268*8be63279SEduard Zingerman {
269*8be63279SEduard Zingerman 	asm volatile ("					\
270*8be63279SEduard Zingerman 	r1 = -3;					\
271*8be63279SEduard Zingerman 	r2 = 0;						\
272*8be63279SEduard Zingerman 	call %[bpf_lookup_user_key];			\
273*8be63279SEduard Zingerman 	r1 = r0;					\
274*8be63279SEduard Zingerman 	call %[bpf_key_put];				\
275*8be63279SEduard Zingerman 	r0 = 0;						\
276*8be63279SEduard Zingerman 	exit;						\
277*8be63279SEduard Zingerman "	:
278*8be63279SEduard Zingerman 	: __imm(bpf_key_put),
279*8be63279SEduard Zingerman 	  __imm(bpf_lookup_user_key)
280*8be63279SEduard Zingerman 	: __clobber_all);
281*8be63279SEduard Zingerman }
282*8be63279SEduard Zingerman 
283*8be63279SEduard Zingerman SEC("lsm.s/bpf")
284*8be63279SEduard Zingerman __description("reference tracking: release system key reference without check")
285*8be63279SEduard Zingerman __failure __msg("Possibly NULL pointer passed to trusted arg0")
system_key_reference_without_check(void)286*8be63279SEduard Zingerman __naked void system_key_reference_without_check(void)
287*8be63279SEduard Zingerman {
288*8be63279SEduard Zingerman 	asm volatile ("					\
289*8be63279SEduard Zingerman 	r1 = 1;						\
290*8be63279SEduard Zingerman 	call %[bpf_lookup_system_key];			\
291*8be63279SEduard Zingerman 	r1 = r0;					\
292*8be63279SEduard Zingerman 	call %[bpf_key_put];				\
293*8be63279SEduard Zingerman 	r0 = 0;						\
294*8be63279SEduard Zingerman 	exit;						\
295*8be63279SEduard Zingerman "	:
296*8be63279SEduard Zingerman 	: __imm(bpf_key_put),
297*8be63279SEduard Zingerman 	  __imm(bpf_lookup_system_key)
298*8be63279SEduard Zingerman 	: __clobber_all);
299*8be63279SEduard Zingerman }
300*8be63279SEduard Zingerman 
301*8be63279SEduard Zingerman SEC("lsm.s/bpf")
302*8be63279SEduard Zingerman __description("reference tracking: release with NULL key pointer")
303*8be63279SEduard Zingerman __failure __msg("Possibly NULL pointer passed to trusted arg0")
release_with_null_key_pointer(void)304*8be63279SEduard Zingerman __naked void release_with_null_key_pointer(void)
305*8be63279SEduard Zingerman {
306*8be63279SEduard Zingerman 	asm volatile ("					\
307*8be63279SEduard Zingerman 	r1 = 0;						\
308*8be63279SEduard Zingerman 	call %[bpf_key_put];				\
309*8be63279SEduard Zingerman 	r0 = 0;						\
310*8be63279SEduard Zingerman 	exit;						\
311*8be63279SEduard Zingerman "	:
312*8be63279SEduard Zingerman 	: __imm(bpf_key_put)
313*8be63279SEduard Zingerman 	: __clobber_all);
314*8be63279SEduard Zingerman }
315*8be63279SEduard Zingerman 
316*8be63279SEduard Zingerman SEC("lsm.s/bpf")
317*8be63279SEduard Zingerman __description("reference tracking: leak potential reference to user key")
318*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
potential_reference_to_user_key(void)319*8be63279SEduard Zingerman __naked void potential_reference_to_user_key(void)
320*8be63279SEduard Zingerman {
321*8be63279SEduard Zingerman 	asm volatile ("					\
322*8be63279SEduard Zingerman 	r1 = -3;					\
323*8be63279SEduard Zingerman 	r2 = 0;						\
324*8be63279SEduard Zingerman 	call %[bpf_lookup_user_key];			\
325*8be63279SEduard Zingerman 	exit;						\
326*8be63279SEduard Zingerman "	:
327*8be63279SEduard Zingerman 	: __imm(bpf_lookup_user_key)
328*8be63279SEduard Zingerman 	: __clobber_all);
329*8be63279SEduard Zingerman }
330*8be63279SEduard Zingerman 
331*8be63279SEduard Zingerman SEC("lsm.s/bpf")
332*8be63279SEduard Zingerman __description("reference tracking: leak potential reference to system key")
333*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
potential_reference_to_system_key(void)334*8be63279SEduard Zingerman __naked void potential_reference_to_system_key(void)
335*8be63279SEduard Zingerman {
336*8be63279SEduard Zingerman 	asm volatile ("					\
337*8be63279SEduard Zingerman 	r1 = 1;						\
338*8be63279SEduard Zingerman 	call %[bpf_lookup_system_key];			\
339*8be63279SEduard Zingerman 	exit;						\
340*8be63279SEduard Zingerman "	:
341*8be63279SEduard Zingerman 	: __imm(bpf_lookup_system_key)
342*8be63279SEduard Zingerman 	: __clobber_all);
343*8be63279SEduard Zingerman }
344*8be63279SEduard Zingerman 
345*8be63279SEduard Zingerman SEC("tc")
346*8be63279SEduard Zingerman __description("reference tracking: release reference without check")
347*8be63279SEduard Zingerman __failure __msg("type=sock_or_null expected=sock")
tracking_release_reference_without_check(void)348*8be63279SEduard Zingerman __naked void tracking_release_reference_without_check(void)
349*8be63279SEduard Zingerman {
350*8be63279SEduard Zingerman 	asm volatile (
351*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
352*8be63279SEduard Zingerman "	/* reference in r0 may be NULL */		\
353*8be63279SEduard Zingerman 	r1 = r0;					\
354*8be63279SEduard Zingerman 	r2 = 0;						\
355*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
356*8be63279SEduard Zingerman 	exit;						\
357*8be63279SEduard Zingerman "	:
358*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
359*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
360*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
361*8be63279SEduard Zingerman 	: __clobber_all);
362*8be63279SEduard Zingerman }
363*8be63279SEduard Zingerman 
364*8be63279SEduard Zingerman SEC("tc")
365*8be63279SEduard Zingerman __description("reference tracking: release reference to sock_common without check")
366*8be63279SEduard Zingerman __failure __msg("type=sock_common_or_null expected=sock")
to_sock_common_without_check(void)367*8be63279SEduard Zingerman __naked void to_sock_common_without_check(void)
368*8be63279SEduard Zingerman {
369*8be63279SEduard Zingerman 	asm volatile (
370*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
371*8be63279SEduard Zingerman "	/* reference in r0 may be NULL */		\
372*8be63279SEduard Zingerman 	r1 = r0;					\
373*8be63279SEduard Zingerman 	r2 = 0;						\
374*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
375*8be63279SEduard Zingerman 	exit;						\
376*8be63279SEduard Zingerman "	:
377*8be63279SEduard Zingerman 	: __imm(bpf_sk_release),
378*8be63279SEduard Zingerman 	  __imm(bpf_skc_lookup_tcp),
379*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
380*8be63279SEduard Zingerman 	: __clobber_all);
381*8be63279SEduard Zingerman }
382*8be63279SEduard Zingerman 
383*8be63279SEduard Zingerman SEC("tc")
384*8be63279SEduard Zingerman __description("reference tracking: release reference")
385*8be63279SEduard Zingerman __success __retval(0)
reference_tracking_release_reference(void)386*8be63279SEduard Zingerman __naked void reference_tracking_release_reference(void)
387*8be63279SEduard Zingerman {
388*8be63279SEduard Zingerman 	asm volatile (
389*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
390*8be63279SEduard Zingerman "	r1 = r0;					\
391*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
392*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
393*8be63279SEduard Zingerman l0_%=:	exit;						\
394*8be63279SEduard Zingerman "	:
395*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
396*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
397*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
398*8be63279SEduard Zingerman 	: __clobber_all);
399*8be63279SEduard Zingerman }
400*8be63279SEduard Zingerman 
401*8be63279SEduard Zingerman SEC("tc")
402*8be63279SEduard Zingerman __description("reference tracking: release reference to sock_common")
403*8be63279SEduard Zingerman __success __retval(0)
release_reference_to_sock_common(void)404*8be63279SEduard Zingerman __naked void release_reference_to_sock_common(void)
405*8be63279SEduard Zingerman {
406*8be63279SEduard Zingerman 	asm volatile (
407*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
408*8be63279SEduard Zingerman "	r1 = r0;					\
409*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
410*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
411*8be63279SEduard Zingerman l0_%=:	exit;						\
412*8be63279SEduard Zingerman "	:
413*8be63279SEduard Zingerman 	: __imm(bpf_sk_release),
414*8be63279SEduard Zingerman 	  __imm(bpf_skc_lookup_tcp),
415*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
416*8be63279SEduard Zingerman 	: __clobber_all);
417*8be63279SEduard Zingerman }
418*8be63279SEduard Zingerman 
419*8be63279SEduard Zingerman SEC("tc")
420*8be63279SEduard Zingerman __description("reference tracking: release reference 2")
421*8be63279SEduard Zingerman __success __retval(0)
reference_tracking_release_reference_2(void)422*8be63279SEduard Zingerman __naked void reference_tracking_release_reference_2(void)
423*8be63279SEduard Zingerman {
424*8be63279SEduard Zingerman 	asm volatile (
425*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
426*8be63279SEduard Zingerman "	r1 = r0;					\
427*8be63279SEduard Zingerman 	if r0 != 0 goto l0_%=;				\
428*8be63279SEduard Zingerman 	exit;						\
429*8be63279SEduard Zingerman l0_%=:	call %[bpf_sk_release];				\
430*8be63279SEduard Zingerman 	exit;						\
431*8be63279SEduard Zingerman "	:
432*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
433*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
434*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
435*8be63279SEduard Zingerman 	: __clobber_all);
436*8be63279SEduard Zingerman }
437*8be63279SEduard Zingerman 
438*8be63279SEduard Zingerman SEC("tc")
439*8be63279SEduard Zingerman __description("reference tracking: release reference twice")
440*8be63279SEduard Zingerman __failure __msg("type=scalar expected=sock")
reference_tracking_release_reference_twice(void)441*8be63279SEduard Zingerman __naked void reference_tracking_release_reference_twice(void)
442*8be63279SEduard Zingerman {
443*8be63279SEduard Zingerman 	asm volatile (
444*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
445*8be63279SEduard Zingerman "	r1 = r0;					\
446*8be63279SEduard Zingerman 	r6 = r0;					\
447*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
448*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
449*8be63279SEduard Zingerman l0_%=:	r1 = r6;					\
450*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
451*8be63279SEduard Zingerman 	exit;						\
452*8be63279SEduard Zingerman "	:
453*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
454*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
455*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
456*8be63279SEduard Zingerman 	: __clobber_all);
457*8be63279SEduard Zingerman }
458*8be63279SEduard Zingerman 
459*8be63279SEduard Zingerman SEC("tc")
460*8be63279SEduard Zingerman __description("reference tracking: release reference twice inside branch")
461*8be63279SEduard Zingerman __failure __msg("type=scalar expected=sock")
release_reference_twice_inside_branch(void)462*8be63279SEduard Zingerman __naked void release_reference_twice_inside_branch(void)
463*8be63279SEduard Zingerman {
464*8be63279SEduard Zingerman 	asm volatile (
465*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
466*8be63279SEduard Zingerman "	r1 = r0;					\
467*8be63279SEduard Zingerman 	r6 = r0;					\
468*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;		/* goto end */	\
469*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
470*8be63279SEduard Zingerman 	r1 = r6;					\
471*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
472*8be63279SEduard Zingerman l0_%=:	exit;						\
473*8be63279SEduard Zingerman "	:
474*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
475*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
476*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
477*8be63279SEduard Zingerman 	: __clobber_all);
478*8be63279SEduard Zingerman }
479*8be63279SEduard Zingerman 
480*8be63279SEduard Zingerman SEC("tc")
481*8be63279SEduard Zingerman __description("reference tracking: alloc, check, free in one subbranch")
482*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
__flag(BPF_F_ANY_ALIGNMENT)483*8be63279SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
484*8be63279SEduard Zingerman __naked void check_free_in_one_subbranch(void)
485*8be63279SEduard Zingerman {
486*8be63279SEduard Zingerman 	asm volatile ("					\
487*8be63279SEduard Zingerman 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
488*8be63279SEduard Zingerman 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
489*8be63279SEduard Zingerman 	r0 = r2;					\
490*8be63279SEduard Zingerman 	r0 += 16;					\
491*8be63279SEduard Zingerman 	/* if (offsetof(skb, mark) > data_len) exit; */	\
492*8be63279SEduard Zingerman 	if r0 <= r3 goto l0_%=;				\
493*8be63279SEduard Zingerman 	exit;						\
494*8be63279SEduard Zingerman l0_%=:	r6 = *(u32*)(r2 + %[__sk_buff_mark]);		\
495*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
496*8be63279SEduard Zingerman "	if r6 == 0 goto l1_%=;		/* mark == 0? */\
497*8be63279SEduard Zingerman 	/* Leak reference in R0 */			\
498*8be63279SEduard Zingerman 	exit;						\
499*8be63279SEduard Zingerman l1_%=:	if r0 == 0 goto l2_%=;		/* sk NULL? */	\
500*8be63279SEduard Zingerman 	r1 = r0;					\
501*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
502*8be63279SEduard Zingerman l2_%=:	exit;						\
503*8be63279SEduard Zingerman "	:
504*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
505*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
506*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
507*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
508*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
509*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
510*8be63279SEduard Zingerman 	: __clobber_all);
511*8be63279SEduard Zingerman }
512*8be63279SEduard Zingerman 
513*8be63279SEduard Zingerman SEC("tc")
514*8be63279SEduard Zingerman __description("reference tracking: alloc, check, free in both subbranches")
__flag(BPF_F_ANY_ALIGNMENT)515*8be63279SEduard Zingerman __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
516*8be63279SEduard Zingerman __naked void check_free_in_both_subbranches(void)
517*8be63279SEduard Zingerman {
518*8be63279SEduard Zingerman 	asm volatile ("					\
519*8be63279SEduard Zingerman 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
520*8be63279SEduard Zingerman 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
521*8be63279SEduard Zingerman 	r0 = r2;					\
522*8be63279SEduard Zingerman 	r0 += 16;					\
523*8be63279SEduard Zingerman 	/* if (offsetof(skb, mark) > data_len) exit; */	\
524*8be63279SEduard Zingerman 	if r0 <= r3 goto l0_%=;				\
525*8be63279SEduard Zingerman 	exit;						\
526*8be63279SEduard Zingerman l0_%=:	r6 = *(u32*)(r2 + %[__sk_buff_mark]);		\
527*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
528*8be63279SEduard Zingerman "	if r6 == 0 goto l1_%=;		/* mark == 0? */\
529*8be63279SEduard Zingerman 	if r0 == 0 goto l2_%=;		/* sk NULL? */	\
530*8be63279SEduard Zingerman 	r1 = r0;					\
531*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
532*8be63279SEduard Zingerman l2_%=:	exit;						\
533*8be63279SEduard Zingerman l1_%=:	if r0 == 0 goto l3_%=;		/* sk NULL? */	\
534*8be63279SEduard Zingerman 	r1 = r0;					\
535*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
536*8be63279SEduard Zingerman l3_%=:	exit;						\
537*8be63279SEduard Zingerman "	:
538*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
539*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
540*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
541*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
542*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
543*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
544*8be63279SEduard Zingerman 	: __clobber_all);
545*8be63279SEduard Zingerman }
546*8be63279SEduard Zingerman 
547*8be63279SEduard Zingerman SEC("tc")
548*8be63279SEduard Zingerman __description("reference tracking in call: free reference in subprog")
549*8be63279SEduard Zingerman __success __retval(0)
call_free_reference_in_subprog(void)550*8be63279SEduard Zingerman __naked void call_free_reference_in_subprog(void)
551*8be63279SEduard Zingerman {
552*8be63279SEduard Zingerman 	asm volatile (
553*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
554*8be63279SEduard Zingerman "	r1 = r0;	/* unchecked reference */	\
555*8be63279SEduard Zingerman 	call call_free_reference_in_subprog__1;		\
556*8be63279SEduard Zingerman 	r0 = 0;						\
557*8be63279SEduard Zingerman 	exit;						\
558*8be63279SEduard Zingerman "	:
559*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
560*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
561*8be63279SEduard Zingerman 	: __clobber_all);
562*8be63279SEduard Zingerman }
563*8be63279SEduard Zingerman 
564*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
call_free_reference_in_subprog__1(void)565*8be63279SEduard Zingerman void call_free_reference_in_subprog__1(void)
566*8be63279SEduard Zingerman {
567*8be63279SEduard Zingerman 	asm volatile ("					\
568*8be63279SEduard Zingerman 	/* subprog 1 */					\
569*8be63279SEduard Zingerman 	r2 = r1;					\
570*8be63279SEduard Zingerman 	if r2 == 0 goto l0_%=;				\
571*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
572*8be63279SEduard Zingerman l0_%=:	exit;						\
573*8be63279SEduard Zingerman "	:
574*8be63279SEduard Zingerman 	: __imm(bpf_sk_release)
575*8be63279SEduard Zingerman 	: __clobber_all);
576*8be63279SEduard Zingerman }
577*8be63279SEduard Zingerman 
578*8be63279SEduard Zingerman SEC("tc")
579*8be63279SEduard Zingerman __description("reference tracking in call: free reference in subprog and outside")
580*8be63279SEduard Zingerman __failure __msg("type=scalar expected=sock")
reference_in_subprog_and_outside(void)581*8be63279SEduard Zingerman __naked void reference_in_subprog_and_outside(void)
582*8be63279SEduard Zingerman {
583*8be63279SEduard Zingerman 	asm volatile (
584*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
585*8be63279SEduard Zingerman "	r1 = r0;	/* unchecked reference */	\
586*8be63279SEduard Zingerman 	r6 = r0;					\
587*8be63279SEduard Zingerman 	call reference_in_subprog_and_outside__1;	\
588*8be63279SEduard Zingerman 	r1 = r6;					\
589*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
590*8be63279SEduard Zingerman 	exit;						\
591*8be63279SEduard Zingerman "	:
592*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
593*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
594*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
595*8be63279SEduard Zingerman 	: __clobber_all);
596*8be63279SEduard Zingerman }
597*8be63279SEduard Zingerman 
598*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
reference_in_subprog_and_outside__1(void)599*8be63279SEduard Zingerman void reference_in_subprog_and_outside__1(void)
600*8be63279SEduard Zingerman {
601*8be63279SEduard Zingerman 	asm volatile ("					\
602*8be63279SEduard Zingerman 	/* subprog 1 */					\
603*8be63279SEduard Zingerman 	r2 = r1;					\
604*8be63279SEduard Zingerman 	if r2 == 0 goto l0_%=;				\
605*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
606*8be63279SEduard Zingerman l0_%=:	exit;						\
607*8be63279SEduard Zingerman "	:
608*8be63279SEduard Zingerman 	: __imm(bpf_sk_release)
609*8be63279SEduard Zingerman 	: __clobber_all);
610*8be63279SEduard Zingerman }
611*8be63279SEduard Zingerman 
612*8be63279SEduard Zingerman SEC("tc")
613*8be63279SEduard Zingerman __description("reference tracking in call: alloc & leak reference in subprog")
614*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
alloc_leak_reference_in_subprog(void)615*8be63279SEduard Zingerman __naked void alloc_leak_reference_in_subprog(void)
616*8be63279SEduard Zingerman {
617*8be63279SEduard Zingerman 	asm volatile ("					\
618*8be63279SEduard Zingerman 	r4 = r10;					\
619*8be63279SEduard Zingerman 	r4 += -8;					\
620*8be63279SEduard Zingerman 	call alloc_leak_reference_in_subprog__1;	\
621*8be63279SEduard Zingerman 	r1 = r0;					\
622*8be63279SEduard Zingerman 	r0 = 0;						\
623*8be63279SEduard Zingerman 	exit;						\
624*8be63279SEduard Zingerman "	::: __clobber_all);
625*8be63279SEduard Zingerman }
626*8be63279SEduard Zingerman 
627*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
alloc_leak_reference_in_subprog__1(void)628*8be63279SEduard Zingerman void alloc_leak_reference_in_subprog__1(void)
629*8be63279SEduard Zingerman {
630*8be63279SEduard Zingerman 	asm volatile ("					\
631*8be63279SEduard Zingerman 	/* subprog 1 */					\
632*8be63279SEduard Zingerman 	r6 = r4;					\
633*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
634*8be63279SEduard Zingerman "	/* spill unchecked sk_ptr into stack of caller */\
635*8be63279SEduard Zingerman 	*(u64*)(r6 + 0) = r0;				\
636*8be63279SEduard Zingerman 	r1 = r0;					\
637*8be63279SEduard Zingerman 	exit;						\
638*8be63279SEduard Zingerman "	:
639*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
640*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
641*8be63279SEduard Zingerman 	: __clobber_all);
642*8be63279SEduard Zingerman }
643*8be63279SEduard Zingerman 
644*8be63279SEduard Zingerman SEC("tc")
645*8be63279SEduard Zingerman __description("reference tracking in call: alloc in subprog, release outside")
__retval(POINTER_VALUE)646*8be63279SEduard Zingerman __success __retval(POINTER_VALUE)
647*8be63279SEduard Zingerman __naked void alloc_in_subprog_release_outside(void)
648*8be63279SEduard Zingerman {
649*8be63279SEduard Zingerman 	asm volatile ("					\
650*8be63279SEduard Zingerman 	r4 = r10;					\
651*8be63279SEduard Zingerman 	call alloc_in_subprog_release_outside__1;	\
652*8be63279SEduard Zingerman 	r1 = r0;					\
653*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
654*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
655*8be63279SEduard Zingerman l0_%=:	exit;						\
656*8be63279SEduard Zingerman "	:
657*8be63279SEduard Zingerman 	: __imm(bpf_sk_release)
658*8be63279SEduard Zingerman 	: __clobber_all);
659*8be63279SEduard Zingerman }
660*8be63279SEduard Zingerman 
661*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
alloc_in_subprog_release_outside__1(void)662*8be63279SEduard Zingerman void alloc_in_subprog_release_outside__1(void)
663*8be63279SEduard Zingerman {
664*8be63279SEduard Zingerman 	asm volatile ("					\
665*8be63279SEduard Zingerman 	/* subprog 1 */					\
666*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
667*8be63279SEduard Zingerman "	exit;				/* return sk */	\
668*8be63279SEduard Zingerman "	:
669*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
670*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
671*8be63279SEduard Zingerman 	: __clobber_all);
672*8be63279SEduard Zingerman }
673*8be63279SEduard Zingerman 
674*8be63279SEduard Zingerman SEC("tc")
675*8be63279SEduard Zingerman __description("reference tracking in call: sk_ptr leak into caller stack")
676*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
ptr_leak_into_caller_stack(void)677*8be63279SEduard Zingerman __naked void ptr_leak_into_caller_stack(void)
678*8be63279SEduard Zingerman {
679*8be63279SEduard Zingerman 	asm volatile ("					\
680*8be63279SEduard Zingerman 	r4 = r10;					\
681*8be63279SEduard Zingerman 	r4 += -8;					\
682*8be63279SEduard Zingerman 	call ptr_leak_into_caller_stack__1;		\
683*8be63279SEduard Zingerman 	r0 = 0;						\
684*8be63279SEduard Zingerman 	exit;						\
685*8be63279SEduard Zingerman "	::: __clobber_all);
686*8be63279SEduard Zingerman }
687*8be63279SEduard Zingerman 
688*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
ptr_leak_into_caller_stack__1(void)689*8be63279SEduard Zingerman void ptr_leak_into_caller_stack__1(void)
690*8be63279SEduard Zingerman {
691*8be63279SEduard Zingerman 	asm volatile ("					\
692*8be63279SEduard Zingerman 	/* subprog 1 */					\
693*8be63279SEduard Zingerman 	r5 = r10;					\
694*8be63279SEduard Zingerman 	r5 += -8;					\
695*8be63279SEduard Zingerman 	*(u64*)(r5 + 0) = r4;				\
696*8be63279SEduard Zingerman 	call ptr_leak_into_caller_stack__2;		\
697*8be63279SEduard Zingerman 	/* spill unchecked sk_ptr into stack of caller */\
698*8be63279SEduard Zingerman 	r5 = r10;					\
699*8be63279SEduard Zingerman 	r5 += -8;					\
700*8be63279SEduard Zingerman 	r4 = *(u64*)(r5 + 0);				\
701*8be63279SEduard Zingerman 	*(u64*)(r4 + 0) = r0;				\
702*8be63279SEduard Zingerman 	exit;						\
703*8be63279SEduard Zingerman "	::: __clobber_all);
704*8be63279SEduard Zingerman }
705*8be63279SEduard Zingerman 
706*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
ptr_leak_into_caller_stack__2(void)707*8be63279SEduard Zingerman void ptr_leak_into_caller_stack__2(void)
708*8be63279SEduard Zingerman {
709*8be63279SEduard Zingerman 	asm volatile ("					\
710*8be63279SEduard Zingerman 	/* subprog 2 */					\
711*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
712*8be63279SEduard Zingerman "	exit;						\
713*8be63279SEduard Zingerman "	:
714*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
715*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
716*8be63279SEduard Zingerman 	: __clobber_all);
717*8be63279SEduard Zingerman }
718*8be63279SEduard Zingerman 
719*8be63279SEduard Zingerman SEC("tc")
720*8be63279SEduard Zingerman __description("reference tracking in call: sk_ptr spill into caller stack")
721*8be63279SEduard Zingerman __success __retval(0)
ptr_spill_into_caller_stack(void)722*8be63279SEduard Zingerman __naked void ptr_spill_into_caller_stack(void)
723*8be63279SEduard Zingerman {
724*8be63279SEduard Zingerman 	asm volatile ("					\
725*8be63279SEduard Zingerman 	r4 = r10;					\
726*8be63279SEduard Zingerman 	r4 += -8;					\
727*8be63279SEduard Zingerman 	call ptr_spill_into_caller_stack__1;		\
728*8be63279SEduard Zingerman 	r0 = 0;						\
729*8be63279SEduard Zingerman 	exit;						\
730*8be63279SEduard Zingerman "	::: __clobber_all);
731*8be63279SEduard Zingerman }
732*8be63279SEduard Zingerman 
733*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
ptr_spill_into_caller_stack__1(void)734*8be63279SEduard Zingerman void ptr_spill_into_caller_stack__1(void)
735*8be63279SEduard Zingerman {
736*8be63279SEduard Zingerman 	asm volatile ("					\
737*8be63279SEduard Zingerman 	/* subprog 1 */					\
738*8be63279SEduard Zingerman 	r5 = r10;					\
739*8be63279SEduard Zingerman 	r5 += -8;					\
740*8be63279SEduard Zingerman 	*(u64*)(r5 + 0) = r4;				\
741*8be63279SEduard Zingerman 	call ptr_spill_into_caller_stack__2;		\
742*8be63279SEduard Zingerman 	/* spill unchecked sk_ptr into stack of caller */\
743*8be63279SEduard Zingerman 	r5 = r10;					\
744*8be63279SEduard Zingerman 	r5 += -8;					\
745*8be63279SEduard Zingerman 	r4 = *(u64*)(r5 + 0);				\
746*8be63279SEduard Zingerman 	*(u64*)(r4 + 0) = r0;				\
747*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
748*8be63279SEduard Zingerman 	/* now the sk_ptr is verified, free the reference */\
749*8be63279SEduard Zingerman 	r1 = *(u64*)(r4 + 0);				\
750*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
751*8be63279SEduard Zingerman l0_%=:	exit;						\
752*8be63279SEduard Zingerman "	:
753*8be63279SEduard Zingerman 	: __imm(bpf_sk_release)
754*8be63279SEduard Zingerman 	: __clobber_all);
755*8be63279SEduard Zingerman }
756*8be63279SEduard Zingerman 
757*8be63279SEduard Zingerman static __naked __noinline __attribute__((used))
ptr_spill_into_caller_stack__2(void)758*8be63279SEduard Zingerman void ptr_spill_into_caller_stack__2(void)
759*8be63279SEduard Zingerman {
760*8be63279SEduard Zingerman 	asm volatile ("					\
761*8be63279SEduard Zingerman 	/* subprog 2 */					\
762*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
763*8be63279SEduard Zingerman "	exit;						\
764*8be63279SEduard Zingerman "	:
765*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
766*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
767*8be63279SEduard Zingerman 	: __clobber_all);
768*8be63279SEduard Zingerman }
769*8be63279SEduard Zingerman 
770*8be63279SEduard Zingerman SEC("tc")
771*8be63279SEduard Zingerman __description("reference tracking: allow LD_ABS")
772*8be63279SEduard Zingerman __success __retval(0)
reference_tracking_allow_ld_abs(void)773*8be63279SEduard Zingerman __naked void reference_tracking_allow_ld_abs(void)
774*8be63279SEduard Zingerman {
775*8be63279SEduard Zingerman 	asm volatile ("					\
776*8be63279SEduard Zingerman 	r6 = r1;					\
777*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
778*8be63279SEduard Zingerman "	r1 = r0;					\
779*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
780*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
781*8be63279SEduard Zingerman l0_%=:	r0 = *(u8*)skb[0];				\
782*8be63279SEduard Zingerman 	r0 = *(u16*)skb[0];				\
783*8be63279SEduard Zingerman 	r0 = *(u32*)skb[0];				\
784*8be63279SEduard Zingerman 	exit;						\
785*8be63279SEduard Zingerman "	:
786*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
787*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
788*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
789*8be63279SEduard Zingerman 	: __clobber_all);
790*8be63279SEduard Zingerman }
791*8be63279SEduard Zingerman 
792*8be63279SEduard Zingerman SEC("tc")
793*8be63279SEduard Zingerman __description("reference tracking: forbid LD_ABS while holding reference")
794*8be63279SEduard Zingerman __failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references")
ld_abs_while_holding_reference(void)795*8be63279SEduard Zingerman __naked void ld_abs_while_holding_reference(void)
796*8be63279SEduard Zingerman {
797*8be63279SEduard Zingerman 	asm volatile ("					\
798*8be63279SEduard Zingerman 	r6 = r1;					\
799*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
800*8be63279SEduard Zingerman "	r0 = *(u8*)skb[0];				\
801*8be63279SEduard Zingerman 	r0 = *(u16*)skb[0];				\
802*8be63279SEduard Zingerman 	r0 = *(u32*)skb[0];				\
803*8be63279SEduard Zingerman 	r1 = r0;					\
804*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
805*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
806*8be63279SEduard Zingerman l0_%=:	exit;						\
807*8be63279SEduard Zingerman "	:
808*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
809*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
810*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
811*8be63279SEduard Zingerman 	: __clobber_all);
812*8be63279SEduard Zingerman }
813*8be63279SEduard Zingerman 
814*8be63279SEduard Zingerman SEC("tc")
815*8be63279SEduard Zingerman __description("reference tracking: allow LD_IND")
816*8be63279SEduard Zingerman __success __retval(1)
reference_tracking_allow_ld_ind(void)817*8be63279SEduard Zingerman __naked void reference_tracking_allow_ld_ind(void)
818*8be63279SEduard Zingerman {
819*8be63279SEduard Zingerman 	asm volatile ("					\
820*8be63279SEduard Zingerman 	r6 = r1;					\
821*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
822*8be63279SEduard Zingerman "	r1 = r0;					\
823*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
824*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
825*8be63279SEduard Zingerman l0_%=:	r7 = 1;						\
826*8be63279SEduard Zingerman 	.8byte %[ld_ind];				\
827*8be63279SEduard Zingerman 	r0 = r7;					\
828*8be63279SEduard Zingerman 	exit;						\
829*8be63279SEduard Zingerman "	:
830*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
831*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
832*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
833*8be63279SEduard Zingerman 	  __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
834*8be63279SEduard Zingerman 	: __clobber_all);
835*8be63279SEduard Zingerman }
836*8be63279SEduard Zingerman 
837*8be63279SEduard Zingerman SEC("tc")
838*8be63279SEduard Zingerman __description("reference tracking: forbid LD_IND while holding reference")
839*8be63279SEduard Zingerman __failure __msg("BPF_LD_[ABS|IND] cannot be mixed with socket references")
ld_ind_while_holding_reference(void)840*8be63279SEduard Zingerman __naked void ld_ind_while_holding_reference(void)
841*8be63279SEduard Zingerman {
842*8be63279SEduard Zingerman 	asm volatile ("					\
843*8be63279SEduard Zingerman 	r6 = r1;					\
844*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
845*8be63279SEduard Zingerman "	r4 = r0;					\
846*8be63279SEduard Zingerman 	r7 = 1;						\
847*8be63279SEduard Zingerman 	.8byte %[ld_ind];				\
848*8be63279SEduard Zingerman 	r0 = r7;					\
849*8be63279SEduard Zingerman 	r1 = r4;					\
850*8be63279SEduard Zingerman 	if r1 == 0 goto l0_%=;				\
851*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
852*8be63279SEduard Zingerman l0_%=:	exit;						\
853*8be63279SEduard Zingerman "	:
854*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
855*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
856*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
857*8be63279SEduard Zingerman 	  __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
858*8be63279SEduard Zingerman 	: __clobber_all);
859*8be63279SEduard Zingerman }
860*8be63279SEduard Zingerman 
861*8be63279SEduard Zingerman SEC("tc")
862*8be63279SEduard Zingerman __description("reference tracking: check reference or tail call")
863*8be63279SEduard Zingerman __success __retval(0)
check_reference_or_tail_call(void)864*8be63279SEduard Zingerman __naked void check_reference_or_tail_call(void)
865*8be63279SEduard Zingerman {
866*8be63279SEduard Zingerman 	asm volatile ("					\
867*8be63279SEduard Zingerman 	r7 = r1;					\
868*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
869*8be63279SEduard Zingerman "	/* if (sk) bpf_sk_release() */			\
870*8be63279SEduard Zingerman 	r1 = r0;					\
871*8be63279SEduard Zingerman 	if r1 != 0 goto l0_%=;				\
872*8be63279SEduard Zingerman 	/* bpf_tail_call() */				\
873*8be63279SEduard Zingerman 	r3 = 3;						\
874*8be63279SEduard Zingerman 	r2 = %[map_prog1_tc] ll;			\
875*8be63279SEduard Zingerman 	r1 = r7;					\
876*8be63279SEduard Zingerman 	call %[bpf_tail_call];				\
877*8be63279SEduard Zingerman 	r0 = 0;						\
878*8be63279SEduard Zingerman 	exit;						\
879*8be63279SEduard Zingerman l0_%=:	call %[bpf_sk_release];				\
880*8be63279SEduard Zingerman 	exit;						\
881*8be63279SEduard Zingerman "	:
882*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
883*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
884*8be63279SEduard Zingerman 	  __imm(bpf_tail_call),
885*8be63279SEduard Zingerman 	  __imm_addr(map_prog1_tc),
886*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
887*8be63279SEduard Zingerman 	: __clobber_all);
888*8be63279SEduard Zingerman }
889*8be63279SEduard Zingerman 
890*8be63279SEduard Zingerman SEC("tc")
891*8be63279SEduard Zingerman __description("reference tracking: release reference then tail call")
892*8be63279SEduard Zingerman __success __retval(0)
release_reference_then_tail_call(void)893*8be63279SEduard Zingerman __naked void release_reference_then_tail_call(void)
894*8be63279SEduard Zingerman {
895*8be63279SEduard Zingerman 	asm volatile ("					\
896*8be63279SEduard Zingerman 	r7 = r1;					\
897*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
898*8be63279SEduard Zingerman "	/* if (sk) bpf_sk_release() */			\
899*8be63279SEduard Zingerman 	r1 = r0;					\
900*8be63279SEduard Zingerman 	if r1 == 0 goto l0_%=;				\
901*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
902*8be63279SEduard Zingerman l0_%=:	/* bpf_tail_call() */				\
903*8be63279SEduard Zingerman 	r3 = 3;						\
904*8be63279SEduard Zingerman 	r2 = %[map_prog1_tc] ll;			\
905*8be63279SEduard Zingerman 	r1 = r7;					\
906*8be63279SEduard Zingerman 	call %[bpf_tail_call];				\
907*8be63279SEduard Zingerman 	r0 = 0;						\
908*8be63279SEduard Zingerman 	exit;						\
909*8be63279SEduard Zingerman "	:
910*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
911*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
912*8be63279SEduard Zingerman 	  __imm(bpf_tail_call),
913*8be63279SEduard Zingerman 	  __imm_addr(map_prog1_tc),
914*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
915*8be63279SEduard Zingerman 	: __clobber_all);
916*8be63279SEduard Zingerman }
917*8be63279SEduard Zingerman 
918*8be63279SEduard Zingerman SEC("tc")
919*8be63279SEduard Zingerman __description("reference tracking: leak possible reference over tail call")
920*8be63279SEduard Zingerman __failure __msg("tail_call would lead to reference leak")
possible_reference_over_tail_call(void)921*8be63279SEduard Zingerman __naked void possible_reference_over_tail_call(void)
922*8be63279SEduard Zingerman {
923*8be63279SEduard Zingerman 	asm volatile ("					\
924*8be63279SEduard Zingerman 	r7 = r1;					\
925*8be63279SEduard Zingerman 	/* Look up socket and store in REG_6 */		\
926*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
927*8be63279SEduard Zingerman "	/* bpf_tail_call() */				\
928*8be63279SEduard Zingerman 	r6 = r0;					\
929*8be63279SEduard Zingerman 	r3 = 3;						\
930*8be63279SEduard Zingerman 	r2 = %[map_prog1_tc] ll;			\
931*8be63279SEduard Zingerman 	r1 = r7;					\
932*8be63279SEduard Zingerman 	call %[bpf_tail_call];				\
933*8be63279SEduard Zingerman 	r0 = 0;						\
934*8be63279SEduard Zingerman 	/* if (sk) bpf_sk_release() */			\
935*8be63279SEduard Zingerman 	r1 = r6;					\
936*8be63279SEduard Zingerman 	if r1 == 0 goto l0_%=;				\
937*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
938*8be63279SEduard Zingerman l0_%=:	exit;						\
939*8be63279SEduard Zingerman "	:
940*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
941*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
942*8be63279SEduard Zingerman 	  __imm(bpf_tail_call),
943*8be63279SEduard Zingerman 	  __imm_addr(map_prog1_tc),
944*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
945*8be63279SEduard Zingerman 	: __clobber_all);
946*8be63279SEduard Zingerman }
947*8be63279SEduard Zingerman 
948*8be63279SEduard Zingerman SEC("tc")
949*8be63279SEduard Zingerman __description("reference tracking: leak checked reference over tail call")
950*8be63279SEduard Zingerman __failure __msg("tail_call would lead to reference leak")
checked_reference_over_tail_call(void)951*8be63279SEduard Zingerman __naked void checked_reference_over_tail_call(void)
952*8be63279SEduard Zingerman {
953*8be63279SEduard Zingerman 	asm volatile ("					\
954*8be63279SEduard Zingerman 	r7 = r1;					\
955*8be63279SEduard Zingerman 	/* Look up socket and store in REG_6 */		\
956*8be63279SEduard Zingerman "	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
957*8be63279SEduard Zingerman "	r6 = r0;					\
958*8be63279SEduard Zingerman 	/* if (!sk) goto end */				\
959*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
960*8be63279SEduard Zingerman 	/* bpf_tail_call() */				\
961*8be63279SEduard Zingerman 	r3 = 0;						\
962*8be63279SEduard Zingerman 	r2 = %[map_prog1_tc] ll;			\
963*8be63279SEduard Zingerman 	r1 = r7;					\
964*8be63279SEduard Zingerman 	call %[bpf_tail_call];				\
965*8be63279SEduard Zingerman 	r0 = 0;						\
966*8be63279SEduard Zingerman 	r1 = r6;					\
967*8be63279SEduard Zingerman l0_%=:	call %[bpf_sk_release];				\
968*8be63279SEduard Zingerman 	exit;						\
969*8be63279SEduard Zingerman "	:
970*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
971*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
972*8be63279SEduard Zingerman 	  __imm(bpf_tail_call),
973*8be63279SEduard Zingerman 	  __imm_addr(map_prog1_tc),
974*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
975*8be63279SEduard Zingerman 	: __clobber_all);
976*8be63279SEduard Zingerman }
977*8be63279SEduard Zingerman 
978*8be63279SEduard Zingerman SEC("tc")
979*8be63279SEduard Zingerman __description("reference tracking: mangle and release sock_or_null")
980*8be63279SEduard Zingerman __failure __msg("R1 pointer arithmetic on sock_or_null prohibited")
and_release_sock_or_null(void)981*8be63279SEduard Zingerman __naked void and_release_sock_or_null(void)
982*8be63279SEduard Zingerman {
983*8be63279SEduard Zingerman 	asm volatile (
984*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
985*8be63279SEduard Zingerman "	r1 = r0;					\
986*8be63279SEduard Zingerman 	r1 += 5;					\
987*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
988*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
989*8be63279SEduard Zingerman l0_%=:	exit;						\
990*8be63279SEduard Zingerman "	:
991*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
992*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
993*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
994*8be63279SEduard Zingerman 	: __clobber_all);
995*8be63279SEduard Zingerman }
996*8be63279SEduard Zingerman 
997*8be63279SEduard Zingerman SEC("tc")
998*8be63279SEduard Zingerman __description("reference tracking: mangle and release sock")
999*8be63279SEduard Zingerman __failure __msg("R1 pointer arithmetic on sock prohibited")
tracking_mangle_and_release_sock(void)1000*8be63279SEduard Zingerman __naked void tracking_mangle_and_release_sock(void)
1001*8be63279SEduard Zingerman {
1002*8be63279SEduard Zingerman 	asm volatile (
1003*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1004*8be63279SEduard Zingerman "	r1 = r0;					\
1005*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1006*8be63279SEduard Zingerman 	r1 += 5;					\
1007*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1008*8be63279SEduard Zingerman l0_%=:	exit;						\
1009*8be63279SEduard Zingerman "	:
1010*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1011*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1012*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1013*8be63279SEduard Zingerman 	: __clobber_all);
1014*8be63279SEduard Zingerman }
1015*8be63279SEduard Zingerman 
1016*8be63279SEduard Zingerman SEC("tc")
1017*8be63279SEduard Zingerman __description("reference tracking: access member")
1018*8be63279SEduard Zingerman __success __retval(0)
reference_tracking_access_member(void)1019*8be63279SEduard Zingerman __naked void reference_tracking_access_member(void)
1020*8be63279SEduard Zingerman {
1021*8be63279SEduard Zingerman 	asm volatile (
1022*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1023*8be63279SEduard Zingerman "	r6 = r0;					\
1024*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1025*8be63279SEduard Zingerman 	r2 = *(u32*)(r0 + 4);				\
1026*8be63279SEduard Zingerman 	r1 = r6;					\
1027*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1028*8be63279SEduard Zingerman l0_%=:	exit;						\
1029*8be63279SEduard Zingerman "	:
1030*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1031*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1032*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1033*8be63279SEduard Zingerman 	: __clobber_all);
1034*8be63279SEduard Zingerman }
1035*8be63279SEduard Zingerman 
1036*8be63279SEduard Zingerman SEC("tc")
1037*8be63279SEduard Zingerman __description("reference tracking: write to member")
1038*8be63279SEduard Zingerman __failure __msg("cannot write into sock")
reference_tracking_write_to_member(void)1039*8be63279SEduard Zingerman __naked void reference_tracking_write_to_member(void)
1040*8be63279SEduard Zingerman {
1041*8be63279SEduard Zingerman 	asm volatile (
1042*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1043*8be63279SEduard Zingerman "	r6 = r0;					\
1044*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1045*8be63279SEduard Zingerman 	r1 = r6;					\
1046*8be63279SEduard Zingerman 	r2 = 42 ll;					\
1047*8be63279SEduard Zingerman 	*(u32*)(r1 + %[bpf_sock_mark]) = r2;		\
1048*8be63279SEduard Zingerman 	r1 = r6;					\
1049*8be63279SEduard Zingerman l0_%=:	call %[bpf_sk_release];				\
1050*8be63279SEduard Zingerman 	r0 = 0 ll;					\
1051*8be63279SEduard Zingerman 	exit;						\
1052*8be63279SEduard Zingerman "	:
1053*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1054*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1055*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
1056*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1057*8be63279SEduard Zingerman 	: __clobber_all);
1058*8be63279SEduard Zingerman }
1059*8be63279SEduard Zingerman 
1060*8be63279SEduard Zingerman SEC("tc")
1061*8be63279SEduard Zingerman __description("reference tracking: invalid 64-bit access of member")
1062*8be63279SEduard Zingerman __failure __msg("invalid sock access off=0 size=8")
_64_bit_access_of_member(void)1063*8be63279SEduard Zingerman __naked void _64_bit_access_of_member(void)
1064*8be63279SEduard Zingerman {
1065*8be63279SEduard Zingerman 	asm volatile (
1066*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1067*8be63279SEduard Zingerman "	r6 = r0;					\
1068*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1069*8be63279SEduard Zingerman 	r2 = *(u64*)(r0 + 0);				\
1070*8be63279SEduard Zingerman 	r1 = r6;					\
1071*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1072*8be63279SEduard Zingerman l0_%=:	exit;						\
1073*8be63279SEduard Zingerman "	:
1074*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1075*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1076*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1077*8be63279SEduard Zingerman 	: __clobber_all);
1078*8be63279SEduard Zingerman }
1079*8be63279SEduard Zingerman 
1080*8be63279SEduard Zingerman SEC("tc")
1081*8be63279SEduard Zingerman __description("reference tracking: access after release")
1082*8be63279SEduard Zingerman __failure __msg("!read_ok")
reference_tracking_access_after_release(void)1083*8be63279SEduard Zingerman __naked void reference_tracking_access_after_release(void)
1084*8be63279SEduard Zingerman {
1085*8be63279SEduard Zingerman 	asm volatile (
1086*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1087*8be63279SEduard Zingerman "	r1 = r0;					\
1088*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1089*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1090*8be63279SEduard Zingerman 	r2 = *(u32*)(r1 + 0);				\
1091*8be63279SEduard Zingerman l0_%=:	exit;						\
1092*8be63279SEduard Zingerman "	:
1093*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1094*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1095*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1096*8be63279SEduard Zingerman 	: __clobber_all);
1097*8be63279SEduard Zingerman }
1098*8be63279SEduard Zingerman 
1099*8be63279SEduard Zingerman SEC("tc")
1100*8be63279SEduard Zingerman __description("reference tracking: direct access for lookup")
1101*8be63279SEduard Zingerman __success __retval(0)
tracking_direct_access_for_lookup(void)1102*8be63279SEduard Zingerman __naked void tracking_direct_access_for_lookup(void)
1103*8be63279SEduard Zingerman {
1104*8be63279SEduard Zingerman 	asm volatile ("					\
1105*8be63279SEduard Zingerman 	/* Check that the packet is at least 64B long */\
1106*8be63279SEduard Zingerman 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
1107*8be63279SEduard Zingerman 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
1108*8be63279SEduard Zingerman 	r0 = r2;					\
1109*8be63279SEduard Zingerman 	r0 += 64;					\
1110*8be63279SEduard Zingerman 	if r0 > r3 goto l0_%=;				\
1111*8be63279SEduard Zingerman 	/* sk = sk_lookup_tcp(ctx, skb->data, ...) */	\
1112*8be63279SEduard Zingerman 	r3 = %[sizeof_bpf_sock_tuple];			\
1113*8be63279SEduard Zingerman 	r4 = 0;						\
1114*8be63279SEduard Zingerman 	r5 = 0;						\
1115*8be63279SEduard Zingerman 	call %[bpf_sk_lookup_tcp];			\
1116*8be63279SEduard Zingerman 	r6 = r0;					\
1117*8be63279SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
1118*8be63279SEduard Zingerman 	r2 = *(u32*)(r0 + 4);				\
1119*8be63279SEduard Zingerman 	r1 = r6;					\
1120*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1121*8be63279SEduard Zingerman l0_%=:	exit;						\
1122*8be63279SEduard Zingerman "	:
1123*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1124*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1125*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
1126*8be63279SEduard Zingerman 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
1127*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1128*8be63279SEduard Zingerman 	: __clobber_all);
1129*8be63279SEduard Zingerman }
1130*8be63279SEduard Zingerman 
1131*8be63279SEduard Zingerman SEC("tc")
1132*8be63279SEduard Zingerman __description("reference tracking: use ptr from bpf_tcp_sock() after release")
1133*8be63279SEduard Zingerman __failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)1134*8be63279SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1135*8be63279SEduard Zingerman __naked void bpf_tcp_sock_after_release(void)
1136*8be63279SEduard Zingerman {
1137*8be63279SEduard Zingerman 	asm volatile (
1138*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1139*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1140*8be63279SEduard Zingerman 	exit;						\
1141*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1142*8be63279SEduard Zingerman 	r1 = r0;					\
1143*8be63279SEduard Zingerman 	call %[bpf_tcp_sock];				\
1144*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1145*8be63279SEduard Zingerman 	r1 = r6;					\
1146*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1147*8be63279SEduard Zingerman 	exit;						\
1148*8be63279SEduard Zingerman l1_%=:	r7 = r0;					\
1149*8be63279SEduard Zingerman 	r1 = r6;					\
1150*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1151*8be63279SEduard Zingerman 	r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]);	\
1152*8be63279SEduard Zingerman 	exit;						\
1153*8be63279SEduard Zingerman "	:
1154*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1155*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1156*8be63279SEduard Zingerman 	  __imm(bpf_tcp_sock),
1157*8be63279SEduard Zingerman 	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
1158*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1159*8be63279SEduard Zingerman 	: __clobber_all);
1160*8be63279SEduard Zingerman }
1161*8be63279SEduard Zingerman 
1162*8be63279SEduard Zingerman SEC("tc")
1163*8be63279SEduard Zingerman __description("reference tracking: use ptr from bpf_sk_fullsock() after release")
1164*8be63279SEduard Zingerman __failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)1165*8be63279SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1166*8be63279SEduard Zingerman __naked void bpf_sk_fullsock_after_release(void)
1167*8be63279SEduard Zingerman {
1168*8be63279SEduard Zingerman 	asm volatile (
1169*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1170*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1171*8be63279SEduard Zingerman 	exit;						\
1172*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1173*8be63279SEduard Zingerman 	r1 = r0;					\
1174*8be63279SEduard Zingerman 	call %[bpf_sk_fullsock];			\
1175*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1176*8be63279SEduard Zingerman 	r1 = r6;					\
1177*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1178*8be63279SEduard Zingerman 	exit;						\
1179*8be63279SEduard Zingerman l1_%=:	r7 = r0;					\
1180*8be63279SEduard Zingerman 	r1 = r6;					\
1181*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1182*8be63279SEduard Zingerman 	r0 = *(u32*)(r7 + %[bpf_sock_type]);		\
1183*8be63279SEduard Zingerman 	exit;						\
1184*8be63279SEduard Zingerman "	:
1185*8be63279SEduard Zingerman 	: __imm(bpf_sk_fullsock),
1186*8be63279SEduard Zingerman 	  __imm(bpf_sk_lookup_tcp),
1187*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1188*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1189*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1190*8be63279SEduard Zingerman 	: __clobber_all);
1191*8be63279SEduard Zingerman }
1192*8be63279SEduard Zingerman 
1193*8be63279SEduard Zingerman SEC("tc")
1194*8be63279SEduard Zingerman __description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release")
1195*8be63279SEduard Zingerman __failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)1196*8be63279SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1197*8be63279SEduard Zingerman __naked void sk_fullsock_tp_after_release(void)
1198*8be63279SEduard Zingerman {
1199*8be63279SEduard Zingerman 	asm volatile (
1200*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1201*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1202*8be63279SEduard Zingerman 	exit;						\
1203*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1204*8be63279SEduard Zingerman 	r1 = r0;					\
1205*8be63279SEduard Zingerman 	call %[bpf_tcp_sock];				\
1206*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1207*8be63279SEduard Zingerman 	r1 = r6;					\
1208*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1209*8be63279SEduard Zingerman 	exit;						\
1210*8be63279SEduard Zingerman l1_%=:	r1 = r0;					\
1211*8be63279SEduard Zingerman 	call %[bpf_sk_fullsock];			\
1212*8be63279SEduard Zingerman 	r1 = r6;					\
1213*8be63279SEduard Zingerman 	r6 = r0;					\
1214*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1215*8be63279SEduard Zingerman 	if r6 != 0 goto l2_%=;				\
1216*8be63279SEduard Zingerman 	exit;						\
1217*8be63279SEduard Zingerman l2_%=:	r0 = *(u32*)(r6 + %[bpf_sock_type]);		\
1218*8be63279SEduard Zingerman 	exit;						\
1219*8be63279SEduard Zingerman "	:
1220*8be63279SEduard Zingerman 	: __imm(bpf_sk_fullsock),
1221*8be63279SEduard Zingerman 	  __imm(bpf_sk_lookup_tcp),
1222*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1223*8be63279SEduard Zingerman 	  __imm(bpf_tcp_sock),
1224*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1225*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1226*8be63279SEduard Zingerman 	: __clobber_all);
1227*8be63279SEduard Zingerman }
1228*8be63279SEduard Zingerman 
1229*8be63279SEduard Zingerman SEC("tc")
1230*8be63279SEduard Zingerman __description("reference tracking: use sk after bpf_sk_release(tp)")
1231*8be63279SEduard Zingerman __failure __msg("invalid mem access")
__flag(BPF_F_ANY_ALIGNMENT)1232*8be63279SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1233*8be63279SEduard Zingerman __naked void after_bpf_sk_release_tp(void)
1234*8be63279SEduard Zingerman {
1235*8be63279SEduard Zingerman 	asm volatile (
1236*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1237*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1238*8be63279SEduard Zingerman 	exit;						\
1239*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1240*8be63279SEduard Zingerman 	r1 = r0;					\
1241*8be63279SEduard Zingerman 	call %[bpf_tcp_sock];				\
1242*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1243*8be63279SEduard Zingerman 	r1 = r6;					\
1244*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1245*8be63279SEduard Zingerman 	exit;						\
1246*8be63279SEduard Zingerman l1_%=:	r1 = r0;					\
1247*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1248*8be63279SEduard Zingerman 	r0 = *(u32*)(r6 + %[bpf_sock_type]);		\
1249*8be63279SEduard Zingerman 	exit;						\
1250*8be63279SEduard Zingerman "	:
1251*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1252*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1253*8be63279SEduard Zingerman 	  __imm(bpf_tcp_sock),
1254*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1255*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1256*8be63279SEduard Zingerman 	: __clobber_all);
1257*8be63279SEduard Zingerman }
1258*8be63279SEduard Zingerman 
1259*8be63279SEduard Zingerman SEC("tc")
1260*8be63279SEduard Zingerman __description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)")
1261*8be63279SEduard Zingerman __success __retval(0)
after_bpf_sk_release_sk(void)1262*8be63279SEduard Zingerman __naked void after_bpf_sk_release_sk(void)
1263*8be63279SEduard Zingerman {
1264*8be63279SEduard Zingerman 	asm volatile (
1265*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1266*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1267*8be63279SEduard Zingerman 	exit;						\
1268*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1269*8be63279SEduard Zingerman 	r1 = r0;					\
1270*8be63279SEduard Zingerman 	call %[bpf_get_listener_sock];			\
1271*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1272*8be63279SEduard Zingerman 	r1 = r6;					\
1273*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1274*8be63279SEduard Zingerman 	exit;						\
1275*8be63279SEduard Zingerman l1_%=:	r1 = r6;					\
1276*8be63279SEduard Zingerman 	r6 = r0;					\
1277*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1278*8be63279SEduard Zingerman 	r0 = *(u32*)(r6 + %[bpf_sock_src_port]);	\
1279*8be63279SEduard Zingerman 	exit;						\
1280*8be63279SEduard Zingerman "	:
1281*8be63279SEduard Zingerman 	: __imm(bpf_get_listener_sock),
1282*8be63279SEduard Zingerman 	  __imm(bpf_sk_lookup_tcp),
1283*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1284*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)),
1285*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1286*8be63279SEduard Zingerman 	: __clobber_all);
1287*8be63279SEduard Zingerman }
1288*8be63279SEduard Zingerman 
1289*8be63279SEduard Zingerman SEC("tc")
1290*8be63279SEduard Zingerman __description("reference tracking: bpf_sk_release(listen_sk)")
1291*8be63279SEduard Zingerman __failure __msg("R1 must be referenced when passed to release function")
bpf_sk_release_listen_sk(void)1292*8be63279SEduard Zingerman __naked void bpf_sk_release_listen_sk(void)
1293*8be63279SEduard Zingerman {
1294*8be63279SEduard Zingerman 	asm volatile (
1295*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1296*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1297*8be63279SEduard Zingerman 	exit;						\
1298*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1299*8be63279SEduard Zingerman 	r1 = r0;					\
1300*8be63279SEduard Zingerman 	call %[bpf_get_listener_sock];			\
1301*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1302*8be63279SEduard Zingerman 	r1 = r6;					\
1303*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1304*8be63279SEduard Zingerman 	exit;						\
1305*8be63279SEduard Zingerman l1_%=:	r1 = r0;					\
1306*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1307*8be63279SEduard Zingerman 	r0 = *(u32*)(r6 + %[bpf_sock_type]);		\
1308*8be63279SEduard Zingerman 	r1 = r6;					\
1309*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1310*8be63279SEduard Zingerman 	exit;						\
1311*8be63279SEduard Zingerman "	:
1312*8be63279SEduard Zingerman 	: __imm(bpf_get_listener_sock),
1313*8be63279SEduard Zingerman 	  __imm(bpf_sk_lookup_tcp),
1314*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1315*8be63279SEduard Zingerman 	  __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1316*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1317*8be63279SEduard Zingerman 	: __clobber_all);
1318*8be63279SEduard Zingerman }
1319*8be63279SEduard Zingerman 
1320*8be63279SEduard Zingerman /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
1321*8be63279SEduard Zingerman SEC("tc")
1322*8be63279SEduard Zingerman __description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)")
1323*8be63279SEduard Zingerman __failure __msg("invalid mem access")
and_bpf_tcp_sock_sk(void)1324*8be63279SEduard Zingerman __naked void and_bpf_tcp_sock_sk(void)
1325*8be63279SEduard Zingerman {
1326*8be63279SEduard Zingerman 	asm volatile (
1327*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1328*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1329*8be63279SEduard Zingerman 	exit;						\
1330*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1331*8be63279SEduard Zingerman 	r1 = r0;					\
1332*8be63279SEduard Zingerman 	call %[bpf_sk_fullsock];			\
1333*8be63279SEduard Zingerman 	r7 = r0;					\
1334*8be63279SEduard Zingerman 	r1 = r6;					\
1335*8be63279SEduard Zingerman 	call %[bpf_tcp_sock];				\
1336*8be63279SEduard Zingerman 	r8 = r0;					\
1337*8be63279SEduard Zingerman 	if r7 != 0 goto l1_%=;				\
1338*8be63279SEduard Zingerman 	r1 = r6;					\
1339*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1340*8be63279SEduard Zingerman 	exit;						\
1341*8be63279SEduard Zingerman l1_%=:	r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]);	\
1342*8be63279SEduard Zingerman 	r1 = r6;					\
1343*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1344*8be63279SEduard Zingerman 	exit;						\
1345*8be63279SEduard Zingerman "	:
1346*8be63279SEduard Zingerman 	: __imm(bpf_sk_fullsock),
1347*8be63279SEduard Zingerman 	  __imm(bpf_sk_lookup_tcp),
1348*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1349*8be63279SEduard Zingerman 	  __imm(bpf_tcp_sock),
1350*8be63279SEduard Zingerman 	  __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
1351*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1352*8be63279SEduard Zingerman 	: __clobber_all);
1353*8be63279SEduard Zingerman }
1354*8be63279SEduard Zingerman 
1355*8be63279SEduard Zingerman SEC("tc")
1356*8be63279SEduard Zingerman __description("reference tracking: branch tracking valid pointer null comparison")
1357*8be63279SEduard Zingerman __success __retval(0)
tracking_valid_pointer_null_comparison(void)1358*8be63279SEduard Zingerman __naked void tracking_valid_pointer_null_comparison(void)
1359*8be63279SEduard Zingerman {
1360*8be63279SEduard Zingerman 	asm volatile (
1361*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1362*8be63279SEduard Zingerman "	r6 = r0;					\
1363*8be63279SEduard Zingerman 	r3 = 1;						\
1364*8be63279SEduard Zingerman 	if r6 != 0 goto l0_%=;				\
1365*8be63279SEduard Zingerman 	r3 = 0;						\
1366*8be63279SEduard Zingerman l0_%=:	if r6 == 0 goto l1_%=;				\
1367*8be63279SEduard Zingerman 	r1 = r6;					\
1368*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1369*8be63279SEduard Zingerman l1_%=:	exit;						\
1370*8be63279SEduard Zingerman "	:
1371*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1372*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1373*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1374*8be63279SEduard Zingerman 	: __clobber_all);
1375*8be63279SEduard Zingerman }
1376*8be63279SEduard Zingerman 
1377*8be63279SEduard Zingerman SEC("tc")
1378*8be63279SEduard Zingerman __description("reference tracking: branch tracking valid pointer value comparison")
1379*8be63279SEduard Zingerman __failure __msg("Unreleased reference")
tracking_valid_pointer_value_comparison(void)1380*8be63279SEduard Zingerman __naked void tracking_valid_pointer_value_comparison(void)
1381*8be63279SEduard Zingerman {
1382*8be63279SEduard Zingerman 	asm volatile (
1383*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1384*8be63279SEduard Zingerman "	r6 = r0;					\
1385*8be63279SEduard Zingerman 	r3 = 1;						\
1386*8be63279SEduard Zingerman 	if r6 == 0 goto l0_%=;				\
1387*8be63279SEduard Zingerman 	r3 = 0;						\
1388*8be63279SEduard Zingerman 	if r6 == 1234 goto l0_%=;			\
1389*8be63279SEduard Zingerman 	r1 = r6;					\
1390*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1391*8be63279SEduard Zingerman l0_%=:	exit;						\
1392*8be63279SEduard Zingerman "	:
1393*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1394*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1395*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1396*8be63279SEduard Zingerman 	: __clobber_all);
1397*8be63279SEduard Zingerman }
1398*8be63279SEduard Zingerman 
1399*8be63279SEduard Zingerman SEC("tc")
1400*8be63279SEduard Zingerman __description("reference tracking: bpf_sk_release(btf_tcp_sock)")
1401*8be63279SEduard Zingerman __success
1402*8be63279SEduard Zingerman __retval(0)
sk_release_btf_tcp_sock(void)1403*8be63279SEduard Zingerman __naked void sk_release_btf_tcp_sock(void)
1404*8be63279SEduard Zingerman {
1405*8be63279SEduard Zingerman 	asm volatile (
1406*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1407*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1408*8be63279SEduard Zingerman 	exit;						\
1409*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1410*8be63279SEduard Zingerman 	r1 = r0;					\
1411*8be63279SEduard Zingerman 	call %[bpf_skc_to_tcp_sock];			\
1412*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1413*8be63279SEduard Zingerman 	r1 = r6;					\
1414*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1415*8be63279SEduard Zingerman 	exit;						\
1416*8be63279SEduard Zingerman l1_%=:	r1 = r0;					\
1417*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1418*8be63279SEduard Zingerman 	exit;						\
1419*8be63279SEduard Zingerman "	:
1420*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1421*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1422*8be63279SEduard Zingerman 	  __imm(bpf_skc_to_tcp_sock),
1423*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1424*8be63279SEduard Zingerman 	: __clobber_all);
1425*8be63279SEduard Zingerman }
1426*8be63279SEduard Zingerman 
1427*8be63279SEduard Zingerman SEC("tc")
1428*8be63279SEduard Zingerman __description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release")
1429*8be63279SEduard Zingerman __failure __msg("invalid mem access")
to_tcp_sock_after_release(void)1430*8be63279SEduard Zingerman __naked void to_tcp_sock_after_release(void)
1431*8be63279SEduard Zingerman {
1432*8be63279SEduard Zingerman 	asm volatile (
1433*8be63279SEduard Zingerman 	BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1434*8be63279SEduard Zingerman "	if r0 != 0 goto l0_%=;				\
1435*8be63279SEduard Zingerman 	exit;						\
1436*8be63279SEduard Zingerman l0_%=:	r6 = r0;					\
1437*8be63279SEduard Zingerman 	r1 = r0;					\
1438*8be63279SEduard Zingerman 	call %[bpf_skc_to_tcp_sock];			\
1439*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1440*8be63279SEduard Zingerman 	r1 = r6;					\
1441*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1442*8be63279SEduard Zingerman 	exit;						\
1443*8be63279SEduard Zingerman l1_%=:	r7 = r0;					\
1444*8be63279SEduard Zingerman 	r1 = r6;					\
1445*8be63279SEduard Zingerman 	call %[bpf_sk_release];				\
1446*8be63279SEduard Zingerman 	r0 = *(u8*)(r7 + 0);				\
1447*8be63279SEduard Zingerman 	exit;						\
1448*8be63279SEduard Zingerman "	:
1449*8be63279SEduard Zingerman 	: __imm(bpf_sk_lookup_tcp),
1450*8be63279SEduard Zingerman 	  __imm(bpf_sk_release),
1451*8be63279SEduard Zingerman 	  __imm(bpf_skc_to_tcp_sock),
1452*8be63279SEduard Zingerman 	  __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1453*8be63279SEduard Zingerman 	: __clobber_all);
1454*8be63279SEduard Zingerman }
1455*8be63279SEduard Zingerman 
1456*8be63279SEduard Zingerman SEC("socket")
1457*8be63279SEduard Zingerman __description("reference tracking: try to leak released ptr reg")
1458*8be63279SEduard Zingerman __success __failure_unpriv __msg_unpriv("R8 !read_ok")
1459*8be63279SEduard Zingerman __retval(0)
to_leak_released_ptr_reg(void)1460*8be63279SEduard Zingerman __naked void to_leak_released_ptr_reg(void)
1461*8be63279SEduard Zingerman {
1462*8be63279SEduard Zingerman 	asm volatile ("					\
1463*8be63279SEduard Zingerman 	r0 = 0;						\
1464*8be63279SEduard Zingerman 	*(u32*)(r10 - 4) = r0;				\
1465*8be63279SEduard Zingerman 	r2 = r10;					\
1466*8be63279SEduard Zingerman 	r2 += -4;					\
1467*8be63279SEduard Zingerman 	r1 = %[map_array_48b] ll;			\
1468*8be63279SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
1469*8be63279SEduard Zingerman 	if r0 != 0 goto l0_%=;				\
1470*8be63279SEduard Zingerman 	exit;						\
1471*8be63279SEduard Zingerman l0_%=:	r9 = r0;					\
1472*8be63279SEduard Zingerman 	r0 = 0;						\
1473*8be63279SEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
1474*8be63279SEduard Zingerman 	r2 = 8;						\
1475*8be63279SEduard Zingerman 	r3 = 0;						\
1476*8be63279SEduard Zingerman 	call %[bpf_ringbuf_reserve];			\
1477*8be63279SEduard Zingerman 	if r0 != 0 goto l1_%=;				\
1478*8be63279SEduard Zingerman 	exit;						\
1479*8be63279SEduard Zingerman l1_%=:	r8 = r0;					\
1480*8be63279SEduard Zingerman 	r1 = r8;					\
1481*8be63279SEduard Zingerman 	r2 = 0;						\
1482*8be63279SEduard Zingerman 	call %[bpf_ringbuf_discard];			\
1483*8be63279SEduard Zingerman 	r0 = 0;						\
1484*8be63279SEduard Zingerman 	*(u64*)(r9 + 0) = r8;				\
1485*8be63279SEduard Zingerman 	exit;						\
1486*8be63279SEduard Zingerman "	:
1487*8be63279SEduard Zingerman 	: __imm(bpf_map_lookup_elem),
1488*8be63279SEduard Zingerman 	  __imm(bpf_ringbuf_discard),
1489*8be63279SEduard Zingerman 	  __imm(bpf_ringbuf_reserve),
1490*8be63279SEduard Zingerman 	  __imm_addr(map_array_48b),
1491*8be63279SEduard Zingerman 	  __imm_addr(map_ringbuf)
1492*8be63279SEduard Zingerman 	: __clobber_all);
1493*8be63279SEduard Zingerman }
1494*8be63279SEduard Zingerman 
1495*8be63279SEduard Zingerman char _license[] SEC("license") = "GPL";
1496