1*426fc0e3SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*426fc0e3SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/sock.c */
3*426fc0e3SEduard Zingerman
4*426fc0e3SEduard Zingerman #include <linux/bpf.h>
5*426fc0e3SEduard Zingerman #include <bpf/bpf_helpers.h>
6*426fc0e3SEduard Zingerman #include "bpf_misc.h"
7*426fc0e3SEduard Zingerman
8*426fc0e3SEduard Zingerman #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
9*426fc0e3SEduard Zingerman #define offsetofend(TYPE, MEMBER) \
10*426fc0e3SEduard Zingerman (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER))
11*426fc0e3SEduard Zingerman
12*426fc0e3SEduard Zingerman struct {
13*426fc0e3SEduard Zingerman __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
14*426fc0e3SEduard Zingerman __uint(max_entries, 1);
15*426fc0e3SEduard Zingerman __type(key, __u32);
16*426fc0e3SEduard Zingerman __type(value, __u64);
17*426fc0e3SEduard Zingerman } map_reuseport_array SEC(".maps");
18*426fc0e3SEduard Zingerman
19*426fc0e3SEduard Zingerman struct {
20*426fc0e3SEduard Zingerman __uint(type, BPF_MAP_TYPE_SOCKHASH);
21*426fc0e3SEduard Zingerman __uint(max_entries, 1);
22*426fc0e3SEduard Zingerman __type(key, int);
23*426fc0e3SEduard Zingerman __type(value, int);
24*426fc0e3SEduard Zingerman } map_sockhash SEC(".maps");
25*426fc0e3SEduard Zingerman
26*426fc0e3SEduard Zingerman struct {
27*426fc0e3SEduard Zingerman __uint(type, BPF_MAP_TYPE_SOCKMAP);
28*426fc0e3SEduard Zingerman __uint(max_entries, 1);
29*426fc0e3SEduard Zingerman __type(key, int);
30*426fc0e3SEduard Zingerman __type(value, int);
31*426fc0e3SEduard Zingerman } map_sockmap SEC(".maps");
32*426fc0e3SEduard Zingerman
33*426fc0e3SEduard Zingerman struct {
34*426fc0e3SEduard Zingerman __uint(type, BPF_MAP_TYPE_XSKMAP);
35*426fc0e3SEduard Zingerman __uint(max_entries, 1);
36*426fc0e3SEduard Zingerman __type(key, int);
37*426fc0e3SEduard Zingerman __type(value, int);
38*426fc0e3SEduard Zingerman } map_xskmap SEC(".maps");
39*426fc0e3SEduard Zingerman
40*426fc0e3SEduard Zingerman struct val {
41*426fc0e3SEduard Zingerman int cnt;
42*426fc0e3SEduard Zingerman struct bpf_spin_lock l;
43*426fc0e3SEduard Zingerman };
44*426fc0e3SEduard Zingerman
45*426fc0e3SEduard Zingerman struct {
46*426fc0e3SEduard Zingerman __uint(type, BPF_MAP_TYPE_SK_STORAGE);
47*426fc0e3SEduard Zingerman __uint(max_entries, 0);
48*426fc0e3SEduard Zingerman __type(key, int);
49*426fc0e3SEduard Zingerman __type(value, struct val);
50*426fc0e3SEduard Zingerman __uint(map_flags, BPF_F_NO_PREALLOC);
51*426fc0e3SEduard Zingerman } sk_storage_map SEC(".maps");
52*426fc0e3SEduard Zingerman
53*426fc0e3SEduard Zingerman SEC("cgroup/skb")
54*426fc0e3SEduard Zingerman __description("skb->sk: no NULL check")
55*426fc0e3SEduard Zingerman __failure __msg("invalid mem access 'sock_common_or_null'")
56*426fc0e3SEduard Zingerman __failure_unpriv
skb_sk_no_null_check(void)57*426fc0e3SEduard Zingerman __naked void skb_sk_no_null_check(void)
58*426fc0e3SEduard Zingerman {
59*426fc0e3SEduard Zingerman asm volatile (" \
60*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
61*426fc0e3SEduard Zingerman r0 = *(u32*)(r1 + 0); \
62*426fc0e3SEduard Zingerman r0 = 0; \
63*426fc0e3SEduard Zingerman exit; \
64*426fc0e3SEduard Zingerman " :
65*426fc0e3SEduard Zingerman : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
66*426fc0e3SEduard Zingerman : __clobber_all);
67*426fc0e3SEduard Zingerman }
68*426fc0e3SEduard Zingerman
69*426fc0e3SEduard Zingerman SEC("cgroup/skb")
70*426fc0e3SEduard Zingerman __description("skb->sk: sk->family [non fullsock field]")
71*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_family_non_fullsock_field_1(void)72*426fc0e3SEduard Zingerman __naked void sk_family_non_fullsock_field_1(void)
73*426fc0e3SEduard Zingerman {
74*426fc0e3SEduard Zingerman asm volatile (" \
75*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
76*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
77*426fc0e3SEduard Zingerman r0 = 0; \
78*426fc0e3SEduard Zingerman exit; \
79*426fc0e3SEduard Zingerman l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_family]); \
80*426fc0e3SEduard Zingerman r0 = 0; \
81*426fc0e3SEduard Zingerman exit; \
82*426fc0e3SEduard Zingerman " :
83*426fc0e3SEduard Zingerman : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
84*426fc0e3SEduard Zingerman __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
85*426fc0e3SEduard Zingerman : __clobber_all);
86*426fc0e3SEduard Zingerman }
87*426fc0e3SEduard Zingerman
88*426fc0e3SEduard Zingerman SEC("cgroup/skb")
89*426fc0e3SEduard Zingerman __description("skb->sk: sk->type [fullsock field]")
90*426fc0e3SEduard Zingerman __failure __msg("invalid sock_common access")
91*426fc0e3SEduard Zingerman __failure_unpriv
sk_sk_type_fullsock_field_1(void)92*426fc0e3SEduard Zingerman __naked void sk_sk_type_fullsock_field_1(void)
93*426fc0e3SEduard Zingerman {
94*426fc0e3SEduard Zingerman asm volatile (" \
95*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
96*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
97*426fc0e3SEduard Zingerman r0 = 0; \
98*426fc0e3SEduard Zingerman exit; \
99*426fc0e3SEduard Zingerman l0_%=: r0 = *(u32*)(r1 + %[bpf_sock_type]); \
100*426fc0e3SEduard Zingerman r0 = 0; \
101*426fc0e3SEduard Zingerman exit; \
102*426fc0e3SEduard Zingerman " :
103*426fc0e3SEduard Zingerman : __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
104*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
105*426fc0e3SEduard Zingerman : __clobber_all);
106*426fc0e3SEduard Zingerman }
107*426fc0e3SEduard Zingerman
108*426fc0e3SEduard Zingerman SEC("cgroup/skb")
109*426fc0e3SEduard Zingerman __description("bpf_sk_fullsock(skb->sk): no !skb->sk check")
110*426fc0e3SEduard Zingerman __failure __msg("type=sock_common_or_null expected=sock_common")
111*426fc0e3SEduard Zingerman __failure_unpriv
sk_no_skb_sk_check_1(void)112*426fc0e3SEduard Zingerman __naked void sk_no_skb_sk_check_1(void)
113*426fc0e3SEduard Zingerman {
114*426fc0e3SEduard Zingerman asm volatile (" \
115*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
116*426fc0e3SEduard Zingerman call %[bpf_sk_fullsock]; \
117*426fc0e3SEduard Zingerman r0 = 0; \
118*426fc0e3SEduard Zingerman exit; \
119*426fc0e3SEduard Zingerman " :
120*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
121*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
122*426fc0e3SEduard Zingerman : __clobber_all);
123*426fc0e3SEduard Zingerman }
124*426fc0e3SEduard Zingerman
125*426fc0e3SEduard Zingerman SEC("cgroup/skb")
126*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): no NULL check on ret")
127*426fc0e3SEduard Zingerman __failure __msg("invalid mem access 'sock_or_null'")
128*426fc0e3SEduard Zingerman __failure_unpriv
no_null_check_on_ret_1(void)129*426fc0e3SEduard Zingerman __naked void no_null_check_on_ret_1(void)
130*426fc0e3SEduard Zingerman {
131*426fc0e3SEduard Zingerman asm volatile (" \
132*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
133*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
134*426fc0e3SEduard Zingerman r0 = 0; \
135*426fc0e3SEduard Zingerman exit; \
136*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
137*426fc0e3SEduard Zingerman r0 = *(u32*)(r0 + %[bpf_sock_type]); \
138*426fc0e3SEduard Zingerman r0 = 0; \
139*426fc0e3SEduard Zingerman exit; \
140*426fc0e3SEduard Zingerman " :
141*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
142*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
143*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
144*426fc0e3SEduard Zingerman : __clobber_all);
145*426fc0e3SEduard Zingerman }
146*426fc0e3SEduard Zingerman
147*426fc0e3SEduard Zingerman SEC("cgroup/skb")
148*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->type [fullsock field]")
149*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_sk_type_fullsock_field_2(void)150*426fc0e3SEduard Zingerman __naked void sk_sk_type_fullsock_field_2(void)
151*426fc0e3SEduard Zingerman {
152*426fc0e3SEduard Zingerman asm volatile (" \
153*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
154*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
155*426fc0e3SEduard Zingerman r0 = 0; \
156*426fc0e3SEduard Zingerman exit; \
157*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
158*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
159*426fc0e3SEduard Zingerman r0 = 0; \
160*426fc0e3SEduard Zingerman exit; \
161*426fc0e3SEduard Zingerman l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_type]); \
162*426fc0e3SEduard Zingerman r0 = 0; \
163*426fc0e3SEduard Zingerman exit; \
164*426fc0e3SEduard Zingerman " :
165*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
166*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
168*426fc0e3SEduard Zingerman : __clobber_all);
169*426fc0e3SEduard Zingerman }
170*426fc0e3SEduard Zingerman
171*426fc0e3SEduard Zingerman SEC("cgroup/skb")
172*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->family [non fullsock field]")
173*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_family_non_fullsock_field_2(void)174*426fc0e3SEduard Zingerman __naked void sk_family_non_fullsock_field_2(void)
175*426fc0e3SEduard Zingerman {
176*426fc0e3SEduard Zingerman asm volatile (" \
177*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
178*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
179*426fc0e3SEduard Zingerman r0 = 0; \
180*426fc0e3SEduard Zingerman exit; \
181*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
182*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
183*426fc0e3SEduard Zingerman exit; \
184*426fc0e3SEduard Zingerman l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_family]); \
185*426fc0e3SEduard Zingerman r0 = 0; \
186*426fc0e3SEduard Zingerman exit; \
187*426fc0e3SEduard Zingerman " :
188*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
189*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
190*426fc0e3SEduard Zingerman __imm_const(bpf_sock_family, offsetof(struct bpf_sock, family))
191*426fc0e3SEduard Zingerman : __clobber_all);
192*426fc0e3SEduard Zingerman }
193*426fc0e3SEduard Zingerman
194*426fc0e3SEduard Zingerman SEC("cgroup/skb")
195*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->state [narrow load]")
196*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_sk_state_narrow_load(void)197*426fc0e3SEduard Zingerman __naked void sk_sk_state_narrow_load(void)
198*426fc0e3SEduard Zingerman {
199*426fc0e3SEduard Zingerman asm volatile (" \
200*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
201*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
202*426fc0e3SEduard Zingerman r0 = 0; \
203*426fc0e3SEduard Zingerman exit; \
204*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
205*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
206*426fc0e3SEduard Zingerman r0 = 0; \
207*426fc0e3SEduard Zingerman exit; \
208*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_state]); \
209*426fc0e3SEduard Zingerman r0 = 0; \
210*426fc0e3SEduard Zingerman exit; \
211*426fc0e3SEduard Zingerman " :
212*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
213*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
214*426fc0e3SEduard Zingerman __imm_const(bpf_sock_state, offsetof(struct bpf_sock, state))
215*426fc0e3SEduard Zingerman : __clobber_all);
216*426fc0e3SEduard Zingerman }
217*426fc0e3SEduard Zingerman
218*426fc0e3SEduard Zingerman SEC("cgroup/skb")
219*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_port [word load] (backward compatibility)")
220*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
port_word_load_backward_compatibility(void)221*426fc0e3SEduard Zingerman __naked void port_word_load_backward_compatibility(void)
222*426fc0e3SEduard Zingerman {
223*426fc0e3SEduard Zingerman asm volatile (" \
224*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
225*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
226*426fc0e3SEduard Zingerman r0 = 0; \
227*426fc0e3SEduard Zingerman exit; \
228*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
229*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
230*426fc0e3SEduard Zingerman r0 = 0; \
231*426fc0e3SEduard Zingerman exit; \
232*426fc0e3SEduard Zingerman l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_dst_port]); \
233*426fc0e3SEduard Zingerman r0 = 0; \
234*426fc0e3SEduard Zingerman exit; \
235*426fc0e3SEduard Zingerman " :
236*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
237*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
238*426fc0e3SEduard Zingerman __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
239*426fc0e3SEduard Zingerman : __clobber_all);
240*426fc0e3SEduard Zingerman }
241*426fc0e3SEduard Zingerman
242*426fc0e3SEduard Zingerman SEC("cgroup/skb")
243*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_port [half load]")
244*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_dst_port_half_load(void)245*426fc0e3SEduard Zingerman __naked void sk_dst_port_half_load(void)
246*426fc0e3SEduard Zingerman {
247*426fc0e3SEduard Zingerman asm volatile (" \
248*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
249*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
250*426fc0e3SEduard Zingerman r0 = 0; \
251*426fc0e3SEduard Zingerman exit; \
252*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
253*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
254*426fc0e3SEduard Zingerman r0 = 0; \
255*426fc0e3SEduard Zingerman exit; \
256*426fc0e3SEduard Zingerman l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port]); \
257*426fc0e3SEduard Zingerman r0 = 0; \
258*426fc0e3SEduard Zingerman exit; \
259*426fc0e3SEduard Zingerman " :
260*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
261*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
262*426fc0e3SEduard Zingerman __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
263*426fc0e3SEduard Zingerman : __clobber_all);
264*426fc0e3SEduard Zingerman }
265*426fc0e3SEduard Zingerman
266*426fc0e3SEduard Zingerman SEC("cgroup/skb")
267*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_port [half load] (invalid)")
268*426fc0e3SEduard Zingerman __failure __msg("invalid sock access")
269*426fc0e3SEduard Zingerman __failure_unpriv
dst_port_half_load_invalid_1(void)270*426fc0e3SEduard Zingerman __naked void dst_port_half_load_invalid_1(void)
271*426fc0e3SEduard Zingerman {
272*426fc0e3SEduard Zingerman asm volatile (" \
273*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
274*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
275*426fc0e3SEduard Zingerman r0 = 0; \
276*426fc0e3SEduard Zingerman exit; \
277*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
278*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
279*426fc0e3SEduard Zingerman r0 = 0; \
280*426fc0e3SEduard Zingerman exit; \
281*426fc0e3SEduard Zingerman l1_%=: r0 = *(u16*)(r0 + %[__imm_0]); \
282*426fc0e3SEduard Zingerman r0 = 0; \
283*426fc0e3SEduard Zingerman exit; \
284*426fc0e3SEduard Zingerman " :
285*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
286*426fc0e3SEduard Zingerman __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
287*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
288*426fc0e3SEduard Zingerman : __clobber_all);
289*426fc0e3SEduard Zingerman }
290*426fc0e3SEduard Zingerman
291*426fc0e3SEduard Zingerman SEC("cgroup/skb")
292*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_port [byte load]")
293*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_dst_port_byte_load(void)294*426fc0e3SEduard Zingerman __naked void sk_dst_port_byte_load(void)
295*426fc0e3SEduard Zingerman {
296*426fc0e3SEduard Zingerman asm volatile (" \
297*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
298*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
299*426fc0e3SEduard Zingerman r0 = 0; \
300*426fc0e3SEduard Zingerman exit; \
301*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
302*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
303*426fc0e3SEduard Zingerman r0 = 0; \
304*426fc0e3SEduard Zingerman exit; \
305*426fc0e3SEduard Zingerman l1_%=: r2 = *(u8*)(r0 + %[bpf_sock_dst_port]); \
306*426fc0e3SEduard Zingerman r2 = *(u8*)(r0 + %[__imm_0]); \
307*426fc0e3SEduard Zingerman r0 = 0; \
308*426fc0e3SEduard Zingerman exit; \
309*426fc0e3SEduard Zingerman " :
310*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
311*426fc0e3SEduard Zingerman __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 1),
312*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
313*426fc0e3SEduard Zingerman __imm_const(bpf_sock_dst_port, offsetof(struct bpf_sock, dst_port))
314*426fc0e3SEduard Zingerman : __clobber_all);
315*426fc0e3SEduard Zingerman }
316*426fc0e3SEduard Zingerman
317*426fc0e3SEduard Zingerman SEC("cgroup/skb")
318*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_port [byte load] (invalid)")
319*426fc0e3SEduard Zingerman __failure __msg("invalid sock access")
320*426fc0e3SEduard Zingerman __failure_unpriv
dst_port_byte_load_invalid(void)321*426fc0e3SEduard Zingerman __naked void dst_port_byte_load_invalid(void)
322*426fc0e3SEduard Zingerman {
323*426fc0e3SEduard Zingerman asm volatile (" \
324*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
325*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
326*426fc0e3SEduard Zingerman r0 = 0; \
327*426fc0e3SEduard Zingerman exit; \
328*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
329*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
330*426fc0e3SEduard Zingerman r0 = 0; \
331*426fc0e3SEduard Zingerman exit; \
332*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
333*426fc0e3SEduard Zingerman r0 = 0; \
334*426fc0e3SEduard Zingerman exit; \
335*426fc0e3SEduard Zingerman " :
336*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
337*426fc0e3SEduard Zingerman __imm_const(__imm_0, offsetof(struct bpf_sock, dst_port) + 2),
338*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
339*426fc0e3SEduard Zingerman : __clobber_all);
340*426fc0e3SEduard Zingerman }
341*426fc0e3SEduard Zingerman
342*426fc0e3SEduard Zingerman SEC("cgroup/skb")
343*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): past sk->dst_port [half load] (invalid)")
344*426fc0e3SEduard Zingerman __failure __msg("invalid sock access")
345*426fc0e3SEduard Zingerman __failure_unpriv
dst_port_half_load_invalid_2(void)346*426fc0e3SEduard Zingerman __naked void dst_port_half_load_invalid_2(void)
347*426fc0e3SEduard Zingerman {
348*426fc0e3SEduard Zingerman asm volatile (" \
349*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
350*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
351*426fc0e3SEduard Zingerman r0 = 0; \
352*426fc0e3SEduard Zingerman exit; \
353*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
354*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
355*426fc0e3SEduard Zingerman r0 = 0; \
356*426fc0e3SEduard Zingerman exit; \
357*426fc0e3SEduard Zingerman l1_%=: r0 = *(u16*)(r0 + %[bpf_sock_dst_port__end]); \
358*426fc0e3SEduard Zingerman r0 = 0; \
359*426fc0e3SEduard Zingerman exit; \
360*426fc0e3SEduard Zingerman " :
361*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
362*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
363*426fc0e3SEduard Zingerman __imm_const(bpf_sock_dst_port__end, offsetofend(struct bpf_sock, dst_port))
364*426fc0e3SEduard Zingerman : __clobber_all);
365*426fc0e3SEduard Zingerman }
366*426fc0e3SEduard Zingerman
367*426fc0e3SEduard Zingerman SEC("cgroup/skb")
368*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->dst_ip6 [load 2nd byte]")
369*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
dst_ip6_load_2nd_byte(void)370*426fc0e3SEduard Zingerman __naked void dst_ip6_load_2nd_byte(void)
371*426fc0e3SEduard Zingerman {
372*426fc0e3SEduard Zingerman asm volatile (" \
373*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
374*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
375*426fc0e3SEduard Zingerman r0 = 0; \
376*426fc0e3SEduard Zingerman exit; \
377*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
378*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
379*426fc0e3SEduard Zingerman r0 = 0; \
380*426fc0e3SEduard Zingerman exit; \
381*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r0 + %[__imm_0]); \
382*426fc0e3SEduard Zingerman r0 = 0; \
383*426fc0e3SEduard Zingerman exit; \
384*426fc0e3SEduard Zingerman " :
385*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
386*426fc0e3SEduard Zingerman __imm_const(__imm_0, offsetof(struct bpf_sock, dst_ip6[0]) + 1),
387*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
388*426fc0e3SEduard Zingerman : __clobber_all);
389*426fc0e3SEduard Zingerman }
390*426fc0e3SEduard Zingerman
391*426fc0e3SEduard Zingerman SEC("cgroup/skb")
392*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->type [narrow load]")
393*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_sk_type_narrow_load(void)394*426fc0e3SEduard Zingerman __naked void sk_sk_type_narrow_load(void)
395*426fc0e3SEduard Zingerman {
396*426fc0e3SEduard Zingerman asm volatile (" \
397*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
398*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
399*426fc0e3SEduard Zingerman r0 = 0; \
400*426fc0e3SEduard Zingerman exit; \
401*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
402*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
403*426fc0e3SEduard Zingerman r0 = 0; \
404*426fc0e3SEduard Zingerman exit; \
405*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_type]); \
406*426fc0e3SEduard Zingerman r0 = 0; \
407*426fc0e3SEduard Zingerman exit; \
408*426fc0e3SEduard Zingerman " :
409*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
410*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
411*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
412*426fc0e3SEduard Zingerman : __clobber_all);
413*426fc0e3SEduard Zingerman }
414*426fc0e3SEduard Zingerman
415*426fc0e3SEduard Zingerman SEC("cgroup/skb")
416*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): sk->protocol [narrow load]")
417*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
sk_sk_protocol_narrow_load(void)418*426fc0e3SEduard Zingerman __naked void sk_sk_protocol_narrow_load(void)
419*426fc0e3SEduard Zingerman {
420*426fc0e3SEduard Zingerman asm volatile (" \
421*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
422*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
423*426fc0e3SEduard Zingerman r0 = 0; \
424*426fc0e3SEduard Zingerman exit; \
425*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
426*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
427*426fc0e3SEduard Zingerman r0 = 0; \
428*426fc0e3SEduard Zingerman exit; \
429*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r0 + %[bpf_sock_protocol]); \
430*426fc0e3SEduard Zingerman r0 = 0; \
431*426fc0e3SEduard Zingerman exit; \
432*426fc0e3SEduard Zingerman " :
433*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
434*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
435*426fc0e3SEduard Zingerman __imm_const(bpf_sock_protocol, offsetof(struct bpf_sock, protocol))
436*426fc0e3SEduard Zingerman : __clobber_all);
437*426fc0e3SEduard Zingerman }
438*426fc0e3SEduard Zingerman
439*426fc0e3SEduard Zingerman SEC("cgroup/skb")
440*426fc0e3SEduard Zingerman __description("sk_fullsock(skb->sk): beyond last field")
441*426fc0e3SEduard Zingerman __failure __msg("invalid sock access")
442*426fc0e3SEduard Zingerman __failure_unpriv
skb_sk_beyond_last_field_1(void)443*426fc0e3SEduard Zingerman __naked void skb_sk_beyond_last_field_1(void)
444*426fc0e3SEduard Zingerman {
445*426fc0e3SEduard Zingerman asm volatile (" \
446*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
447*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
448*426fc0e3SEduard Zingerman r0 = 0; \
449*426fc0e3SEduard Zingerman exit; \
450*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
451*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
452*426fc0e3SEduard Zingerman r0 = 0; \
453*426fc0e3SEduard Zingerman exit; \
454*426fc0e3SEduard Zingerman l1_%=: r0 = *(u32*)(r0 + %[bpf_sock_rx_queue_mapping__end]);\
455*426fc0e3SEduard Zingerman r0 = 0; \
456*426fc0e3SEduard Zingerman exit; \
457*426fc0e3SEduard Zingerman " :
458*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
459*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
460*426fc0e3SEduard Zingerman __imm_const(bpf_sock_rx_queue_mapping__end, offsetofend(struct bpf_sock, rx_queue_mapping))
461*426fc0e3SEduard Zingerman : __clobber_all);
462*426fc0e3SEduard Zingerman }
463*426fc0e3SEduard Zingerman
464*426fc0e3SEduard Zingerman SEC("cgroup/skb")
465*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(skb->sk): no !skb->sk check")
466*426fc0e3SEduard Zingerman __failure __msg("type=sock_common_or_null expected=sock_common")
467*426fc0e3SEduard Zingerman __failure_unpriv
sk_no_skb_sk_check_2(void)468*426fc0e3SEduard Zingerman __naked void sk_no_skb_sk_check_2(void)
469*426fc0e3SEduard Zingerman {
470*426fc0e3SEduard Zingerman asm volatile (" \
471*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
472*426fc0e3SEduard Zingerman call %[bpf_tcp_sock]; \
473*426fc0e3SEduard Zingerman r0 = 0; \
474*426fc0e3SEduard Zingerman exit; \
475*426fc0e3SEduard Zingerman " :
476*426fc0e3SEduard Zingerman : __imm(bpf_tcp_sock),
477*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
478*426fc0e3SEduard Zingerman : __clobber_all);
479*426fc0e3SEduard Zingerman }
480*426fc0e3SEduard Zingerman
481*426fc0e3SEduard Zingerman SEC("cgroup/skb")
482*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(skb->sk): no NULL check on ret")
483*426fc0e3SEduard Zingerman __failure __msg("invalid mem access 'tcp_sock_or_null'")
484*426fc0e3SEduard Zingerman __failure_unpriv
no_null_check_on_ret_2(void)485*426fc0e3SEduard Zingerman __naked void no_null_check_on_ret_2(void)
486*426fc0e3SEduard Zingerman {
487*426fc0e3SEduard Zingerman asm volatile (" \
488*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
489*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
490*426fc0e3SEduard Zingerman r0 = 0; \
491*426fc0e3SEduard Zingerman exit; \
492*426fc0e3SEduard Zingerman l0_%=: call %[bpf_tcp_sock]; \
493*426fc0e3SEduard Zingerman r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
494*426fc0e3SEduard Zingerman r0 = 0; \
495*426fc0e3SEduard Zingerman exit; \
496*426fc0e3SEduard Zingerman " :
497*426fc0e3SEduard Zingerman : __imm(bpf_tcp_sock),
498*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
499*426fc0e3SEduard Zingerman __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
500*426fc0e3SEduard Zingerman : __clobber_all);
501*426fc0e3SEduard Zingerman }
502*426fc0e3SEduard Zingerman
503*426fc0e3SEduard Zingerman SEC("cgroup/skb")
504*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(skb->sk): tp->snd_cwnd")
505*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
skb_sk_tp_snd_cwnd_1(void)506*426fc0e3SEduard Zingerman __naked void skb_sk_tp_snd_cwnd_1(void)
507*426fc0e3SEduard Zingerman {
508*426fc0e3SEduard Zingerman asm volatile (" \
509*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
510*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
511*426fc0e3SEduard Zingerman r0 = 0; \
512*426fc0e3SEduard Zingerman exit; \
513*426fc0e3SEduard Zingerman l0_%=: call %[bpf_tcp_sock]; \
514*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
515*426fc0e3SEduard Zingerman exit; \
516*426fc0e3SEduard Zingerman l1_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
517*426fc0e3SEduard Zingerman r0 = 0; \
518*426fc0e3SEduard Zingerman exit; \
519*426fc0e3SEduard Zingerman " :
520*426fc0e3SEduard Zingerman : __imm(bpf_tcp_sock),
521*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
522*426fc0e3SEduard Zingerman __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
523*426fc0e3SEduard Zingerman : __clobber_all);
524*426fc0e3SEduard Zingerman }
525*426fc0e3SEduard Zingerman
526*426fc0e3SEduard Zingerman SEC("cgroup/skb")
527*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(skb->sk): tp->bytes_acked")
528*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
skb_sk_tp_bytes_acked(void)529*426fc0e3SEduard Zingerman __naked void skb_sk_tp_bytes_acked(void)
530*426fc0e3SEduard Zingerman {
531*426fc0e3SEduard Zingerman asm volatile (" \
532*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
533*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
534*426fc0e3SEduard Zingerman r0 = 0; \
535*426fc0e3SEduard Zingerman exit; \
536*426fc0e3SEduard Zingerman l0_%=: call %[bpf_tcp_sock]; \
537*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
538*426fc0e3SEduard Zingerman exit; \
539*426fc0e3SEduard Zingerman l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked]); \
540*426fc0e3SEduard Zingerman r0 = 0; \
541*426fc0e3SEduard Zingerman exit; \
542*426fc0e3SEduard Zingerman " :
543*426fc0e3SEduard Zingerman : __imm(bpf_tcp_sock),
544*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
545*426fc0e3SEduard Zingerman __imm_const(bpf_tcp_sock_bytes_acked, offsetof(struct bpf_tcp_sock, bytes_acked))
546*426fc0e3SEduard Zingerman : __clobber_all);
547*426fc0e3SEduard Zingerman }
548*426fc0e3SEduard Zingerman
549*426fc0e3SEduard Zingerman SEC("cgroup/skb")
550*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(skb->sk): beyond last field")
551*426fc0e3SEduard Zingerman __failure __msg("invalid tcp_sock access")
552*426fc0e3SEduard Zingerman __failure_unpriv
skb_sk_beyond_last_field_2(void)553*426fc0e3SEduard Zingerman __naked void skb_sk_beyond_last_field_2(void)
554*426fc0e3SEduard Zingerman {
555*426fc0e3SEduard Zingerman asm volatile (" \
556*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
557*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
558*426fc0e3SEduard Zingerman r0 = 0; \
559*426fc0e3SEduard Zingerman exit; \
560*426fc0e3SEduard Zingerman l0_%=: call %[bpf_tcp_sock]; \
561*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
562*426fc0e3SEduard Zingerman exit; \
563*426fc0e3SEduard Zingerman l1_%=: r0 = *(u64*)(r0 + %[bpf_tcp_sock_bytes_acked__end]);\
564*426fc0e3SEduard Zingerman r0 = 0; \
565*426fc0e3SEduard Zingerman exit; \
566*426fc0e3SEduard Zingerman " :
567*426fc0e3SEduard Zingerman : __imm(bpf_tcp_sock),
568*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
569*426fc0e3SEduard Zingerman __imm_const(bpf_tcp_sock_bytes_acked__end, offsetofend(struct bpf_tcp_sock, bytes_acked))
570*426fc0e3SEduard Zingerman : __clobber_all);
571*426fc0e3SEduard Zingerman }
572*426fc0e3SEduard Zingerman
573*426fc0e3SEduard Zingerman SEC("cgroup/skb")
574*426fc0e3SEduard Zingerman __description("bpf_tcp_sock(bpf_sk_fullsock(skb->sk)): tp->snd_cwnd")
575*426fc0e3SEduard Zingerman __success __success_unpriv __retval(0)
skb_sk_tp_snd_cwnd_2(void)576*426fc0e3SEduard Zingerman __naked void skb_sk_tp_snd_cwnd_2(void)
577*426fc0e3SEduard Zingerman {
578*426fc0e3SEduard Zingerman asm volatile (" \
579*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
580*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
581*426fc0e3SEduard Zingerman r0 = 0; \
582*426fc0e3SEduard Zingerman exit; \
583*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
584*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
585*426fc0e3SEduard Zingerman exit; \
586*426fc0e3SEduard Zingerman l1_%=: r1 = r0; \
587*426fc0e3SEduard Zingerman call %[bpf_tcp_sock]; \
588*426fc0e3SEduard Zingerman if r0 != 0 goto l2_%=; \
589*426fc0e3SEduard Zingerman exit; \
590*426fc0e3SEduard Zingerman l2_%=: r0 = *(u32*)(r0 + %[bpf_tcp_sock_snd_cwnd]); \
591*426fc0e3SEduard Zingerman r0 = 0; \
592*426fc0e3SEduard Zingerman exit; \
593*426fc0e3SEduard Zingerman " :
594*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
595*426fc0e3SEduard Zingerman __imm(bpf_tcp_sock),
596*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
597*426fc0e3SEduard Zingerman __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd))
598*426fc0e3SEduard Zingerman : __clobber_all);
599*426fc0e3SEduard Zingerman }
600*426fc0e3SEduard Zingerman
601*426fc0e3SEduard Zingerman SEC("tc")
602*426fc0e3SEduard Zingerman __description("bpf_sk_release(skb->sk)")
603*426fc0e3SEduard Zingerman __failure __msg("R1 must be referenced when passed to release function")
bpf_sk_release_skb_sk(void)604*426fc0e3SEduard Zingerman __naked void bpf_sk_release_skb_sk(void)
605*426fc0e3SEduard Zingerman {
606*426fc0e3SEduard Zingerman asm volatile (" \
607*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
608*426fc0e3SEduard Zingerman if r1 == 0 goto l0_%=; \
609*426fc0e3SEduard Zingerman call %[bpf_sk_release]; \
610*426fc0e3SEduard Zingerman l0_%=: r0 = 0; \
611*426fc0e3SEduard Zingerman exit; \
612*426fc0e3SEduard Zingerman " :
613*426fc0e3SEduard Zingerman : __imm(bpf_sk_release),
614*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
615*426fc0e3SEduard Zingerman : __clobber_all);
616*426fc0e3SEduard Zingerman }
617*426fc0e3SEduard Zingerman
618*426fc0e3SEduard Zingerman SEC("tc")
619*426fc0e3SEduard Zingerman __description("bpf_sk_release(bpf_sk_fullsock(skb->sk))")
620*426fc0e3SEduard Zingerman __failure __msg("R1 must be referenced when passed to release function")
bpf_sk_fullsock_skb_sk(void)621*426fc0e3SEduard Zingerman __naked void bpf_sk_fullsock_skb_sk(void)
622*426fc0e3SEduard Zingerman {
623*426fc0e3SEduard Zingerman asm volatile (" \
624*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
625*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
626*426fc0e3SEduard Zingerman r0 = 0; \
627*426fc0e3SEduard Zingerman exit; \
628*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
629*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
630*426fc0e3SEduard Zingerman exit; \
631*426fc0e3SEduard Zingerman l1_%=: r1 = r0; \
632*426fc0e3SEduard Zingerman call %[bpf_sk_release]; \
633*426fc0e3SEduard Zingerman r0 = 1; \
634*426fc0e3SEduard Zingerman exit; \
635*426fc0e3SEduard Zingerman " :
636*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
637*426fc0e3SEduard Zingerman __imm(bpf_sk_release),
638*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
639*426fc0e3SEduard Zingerman : __clobber_all);
640*426fc0e3SEduard Zingerman }
641*426fc0e3SEduard Zingerman
642*426fc0e3SEduard Zingerman SEC("tc")
643*426fc0e3SEduard Zingerman __description("bpf_sk_release(bpf_tcp_sock(skb->sk))")
644*426fc0e3SEduard Zingerman __failure __msg("R1 must be referenced when passed to release function")
bpf_tcp_sock_skb_sk(void)645*426fc0e3SEduard Zingerman __naked void bpf_tcp_sock_skb_sk(void)
646*426fc0e3SEduard Zingerman {
647*426fc0e3SEduard Zingerman asm volatile (" \
648*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
649*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
650*426fc0e3SEduard Zingerman r0 = 0; \
651*426fc0e3SEduard Zingerman exit; \
652*426fc0e3SEduard Zingerman l0_%=: call %[bpf_tcp_sock]; \
653*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
654*426fc0e3SEduard Zingerman exit; \
655*426fc0e3SEduard Zingerman l1_%=: r1 = r0; \
656*426fc0e3SEduard Zingerman call %[bpf_sk_release]; \
657*426fc0e3SEduard Zingerman r0 = 1; \
658*426fc0e3SEduard Zingerman exit; \
659*426fc0e3SEduard Zingerman " :
660*426fc0e3SEduard Zingerman : __imm(bpf_sk_release),
661*426fc0e3SEduard Zingerman __imm(bpf_tcp_sock),
662*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
663*426fc0e3SEduard Zingerman : __clobber_all);
664*426fc0e3SEduard Zingerman }
665*426fc0e3SEduard Zingerman
666*426fc0e3SEduard Zingerman SEC("tc")
667*426fc0e3SEduard Zingerman __description("sk_storage_get(map, skb->sk, NULL, 0): value == NULL")
668*426fc0e3SEduard Zingerman __success __retval(0)
sk_null_0_value_null(void)669*426fc0e3SEduard Zingerman __naked void sk_null_0_value_null(void)
670*426fc0e3SEduard Zingerman {
671*426fc0e3SEduard Zingerman asm volatile (" \
672*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
673*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
674*426fc0e3SEduard Zingerman r0 = 0; \
675*426fc0e3SEduard Zingerman exit; \
676*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
677*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
678*426fc0e3SEduard Zingerman r0 = 0; \
679*426fc0e3SEduard Zingerman exit; \
680*426fc0e3SEduard Zingerman l1_%=: r4 = 0; \
681*426fc0e3SEduard Zingerman r3 = 0; \
682*426fc0e3SEduard Zingerman r2 = r0; \
683*426fc0e3SEduard Zingerman r1 = %[sk_storage_map] ll; \
684*426fc0e3SEduard Zingerman call %[bpf_sk_storage_get]; \
685*426fc0e3SEduard Zingerman r0 = 0; \
686*426fc0e3SEduard Zingerman exit; \
687*426fc0e3SEduard Zingerman " :
688*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
689*426fc0e3SEduard Zingerman __imm(bpf_sk_storage_get),
690*426fc0e3SEduard Zingerman __imm_addr(sk_storage_map),
691*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
692*426fc0e3SEduard Zingerman : __clobber_all);
693*426fc0e3SEduard Zingerman }
694*426fc0e3SEduard Zingerman
695*426fc0e3SEduard Zingerman SEC("tc")
696*426fc0e3SEduard Zingerman __description("sk_storage_get(map, skb->sk, 1, 1): value == 1")
697*426fc0e3SEduard Zingerman __failure __msg("R3 type=scalar expected=fp")
sk_1_1_value_1(void)698*426fc0e3SEduard Zingerman __naked void sk_1_1_value_1(void)
699*426fc0e3SEduard Zingerman {
700*426fc0e3SEduard Zingerman asm volatile (" \
701*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
702*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
703*426fc0e3SEduard Zingerman r0 = 0; \
704*426fc0e3SEduard Zingerman exit; \
705*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
706*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
707*426fc0e3SEduard Zingerman r0 = 0; \
708*426fc0e3SEduard Zingerman exit; \
709*426fc0e3SEduard Zingerman l1_%=: r4 = 1; \
710*426fc0e3SEduard Zingerman r3 = 1; \
711*426fc0e3SEduard Zingerman r2 = r0; \
712*426fc0e3SEduard Zingerman r1 = %[sk_storage_map] ll; \
713*426fc0e3SEduard Zingerman call %[bpf_sk_storage_get]; \
714*426fc0e3SEduard Zingerman r0 = 0; \
715*426fc0e3SEduard Zingerman exit; \
716*426fc0e3SEduard Zingerman " :
717*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
718*426fc0e3SEduard Zingerman __imm(bpf_sk_storage_get),
719*426fc0e3SEduard Zingerman __imm_addr(sk_storage_map),
720*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
721*426fc0e3SEduard Zingerman : __clobber_all);
722*426fc0e3SEduard Zingerman }
723*426fc0e3SEduard Zingerman
724*426fc0e3SEduard Zingerman SEC("tc")
725*426fc0e3SEduard Zingerman __description("sk_storage_get(map, skb->sk, &stack_value, 1): stack_value")
726*426fc0e3SEduard Zingerman __success __retval(0)
stack_value_1_stack_value(void)727*426fc0e3SEduard Zingerman __naked void stack_value_1_stack_value(void)
728*426fc0e3SEduard Zingerman {
729*426fc0e3SEduard Zingerman asm volatile (" \
730*426fc0e3SEduard Zingerman r2 = 0; \
731*426fc0e3SEduard Zingerman *(u64*)(r10 - 8) = r2; \
732*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
733*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
734*426fc0e3SEduard Zingerman r0 = 0; \
735*426fc0e3SEduard Zingerman exit; \
736*426fc0e3SEduard Zingerman l0_%=: call %[bpf_sk_fullsock]; \
737*426fc0e3SEduard Zingerman if r0 != 0 goto l1_%=; \
738*426fc0e3SEduard Zingerman r0 = 0; \
739*426fc0e3SEduard Zingerman exit; \
740*426fc0e3SEduard Zingerman l1_%=: r4 = 1; \
741*426fc0e3SEduard Zingerman r3 = r10; \
742*426fc0e3SEduard Zingerman r3 += -8; \
743*426fc0e3SEduard Zingerman r2 = r0; \
744*426fc0e3SEduard Zingerman r1 = %[sk_storage_map] ll; \
745*426fc0e3SEduard Zingerman call %[bpf_sk_storage_get]; \
746*426fc0e3SEduard Zingerman r0 = 0; \
747*426fc0e3SEduard Zingerman exit; \
748*426fc0e3SEduard Zingerman " :
749*426fc0e3SEduard Zingerman : __imm(bpf_sk_fullsock),
750*426fc0e3SEduard Zingerman __imm(bpf_sk_storage_get),
751*426fc0e3SEduard Zingerman __imm_addr(sk_storage_map),
752*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
753*426fc0e3SEduard Zingerman : __clobber_all);
754*426fc0e3SEduard Zingerman }
755*426fc0e3SEduard Zingerman
756*426fc0e3SEduard Zingerman SEC("tc")
757*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(smap, &key)")
758*426fc0e3SEduard Zingerman __failure __msg("cannot pass map_type 24 into func bpf_map_lookup_elem")
map_lookup_elem_smap_key(void)759*426fc0e3SEduard Zingerman __naked void map_lookup_elem_smap_key(void)
760*426fc0e3SEduard Zingerman {
761*426fc0e3SEduard Zingerman asm volatile (" \
762*426fc0e3SEduard Zingerman r1 = 0; \
763*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r1; \
764*426fc0e3SEduard Zingerman r2 = r10; \
765*426fc0e3SEduard Zingerman r2 += -4; \
766*426fc0e3SEduard Zingerman r1 = %[sk_storage_map] ll; \
767*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
768*426fc0e3SEduard Zingerman r0 = 0; \
769*426fc0e3SEduard Zingerman exit; \
770*426fc0e3SEduard Zingerman " :
771*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
772*426fc0e3SEduard Zingerman __imm_addr(sk_storage_map)
773*426fc0e3SEduard Zingerman : __clobber_all);
774*426fc0e3SEduard Zingerman }
775*426fc0e3SEduard Zingerman
776*426fc0e3SEduard Zingerman SEC("xdp")
777*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(xskmap, &key); xs->queue_id")
778*426fc0e3SEduard Zingerman __success __retval(0)
xskmap_key_xs_queue_id(void)779*426fc0e3SEduard Zingerman __naked void xskmap_key_xs_queue_id(void)
780*426fc0e3SEduard Zingerman {
781*426fc0e3SEduard Zingerman asm volatile (" \
782*426fc0e3SEduard Zingerman r1 = 0; \
783*426fc0e3SEduard Zingerman *(u32*)(r10 - 8) = r1; \
784*426fc0e3SEduard Zingerman r2 = r10; \
785*426fc0e3SEduard Zingerman r2 += -8; \
786*426fc0e3SEduard Zingerman r1 = %[map_xskmap] ll; \
787*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
788*426fc0e3SEduard Zingerman if r0 != 0 goto l0_%=; \
789*426fc0e3SEduard Zingerman exit; \
790*426fc0e3SEduard Zingerman l0_%=: r0 = *(u32*)(r0 + %[bpf_xdp_sock_queue_id]); \
791*426fc0e3SEduard Zingerman r0 = 0; \
792*426fc0e3SEduard Zingerman exit; \
793*426fc0e3SEduard Zingerman " :
794*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
795*426fc0e3SEduard Zingerman __imm_addr(map_xskmap),
796*426fc0e3SEduard Zingerman __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
797*426fc0e3SEduard Zingerman : __clobber_all);
798*426fc0e3SEduard Zingerman }
799*426fc0e3SEduard Zingerman
800*426fc0e3SEduard Zingerman SEC("sk_skb")
801*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(sockmap, &key)")
802*426fc0e3SEduard Zingerman __failure __msg("Unreleased reference id=2 alloc_insn=6")
map_lookup_elem_sockmap_key(void)803*426fc0e3SEduard Zingerman __naked void map_lookup_elem_sockmap_key(void)
804*426fc0e3SEduard Zingerman {
805*426fc0e3SEduard Zingerman asm volatile (" \
806*426fc0e3SEduard Zingerman r1 = 0; \
807*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r1; \
808*426fc0e3SEduard Zingerman r2 = r10; \
809*426fc0e3SEduard Zingerman r2 += -4; \
810*426fc0e3SEduard Zingerman r1 = %[map_sockmap] ll; \
811*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
812*426fc0e3SEduard Zingerman r0 = 0; \
813*426fc0e3SEduard Zingerman exit; \
814*426fc0e3SEduard Zingerman " :
815*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
816*426fc0e3SEduard Zingerman __imm_addr(map_sockmap)
817*426fc0e3SEduard Zingerman : __clobber_all);
818*426fc0e3SEduard Zingerman }
819*426fc0e3SEduard Zingerman
820*426fc0e3SEduard Zingerman SEC("sk_skb")
821*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(sockhash, &key)")
822*426fc0e3SEduard Zingerman __failure __msg("Unreleased reference id=2 alloc_insn=6")
map_lookup_elem_sockhash_key(void)823*426fc0e3SEduard Zingerman __naked void map_lookup_elem_sockhash_key(void)
824*426fc0e3SEduard Zingerman {
825*426fc0e3SEduard Zingerman asm volatile (" \
826*426fc0e3SEduard Zingerman r1 = 0; \
827*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r1; \
828*426fc0e3SEduard Zingerman r2 = r10; \
829*426fc0e3SEduard Zingerman r2 += -4; \
830*426fc0e3SEduard Zingerman r1 = %[map_sockhash] ll; \
831*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
832*426fc0e3SEduard Zingerman r0 = 0; \
833*426fc0e3SEduard Zingerman exit; \
834*426fc0e3SEduard Zingerman " :
835*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
836*426fc0e3SEduard Zingerman __imm_addr(map_sockhash)
837*426fc0e3SEduard Zingerman : __clobber_all);
838*426fc0e3SEduard Zingerman }
839*426fc0e3SEduard Zingerman
840*426fc0e3SEduard Zingerman SEC("sk_skb")
841*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(sockmap, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
842*426fc0e3SEduard Zingerman __success
field_bpf_sk_release_sk_1(void)843*426fc0e3SEduard Zingerman __naked void field_bpf_sk_release_sk_1(void)
844*426fc0e3SEduard Zingerman {
845*426fc0e3SEduard Zingerman asm volatile (" \
846*426fc0e3SEduard Zingerman r1 = 0; \
847*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r1; \
848*426fc0e3SEduard Zingerman r2 = r10; \
849*426fc0e3SEduard Zingerman r2 += -4; \
850*426fc0e3SEduard Zingerman r1 = %[map_sockmap] ll; \
851*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
852*426fc0e3SEduard Zingerman if r0 != 0 goto l0_%=; \
853*426fc0e3SEduard Zingerman exit; \
854*426fc0e3SEduard Zingerman l0_%=: r1 = r0; \
855*426fc0e3SEduard Zingerman r0 = *(u32*)(r0 + %[bpf_sock_type]); \
856*426fc0e3SEduard Zingerman call %[bpf_sk_release]; \
857*426fc0e3SEduard Zingerman exit; \
858*426fc0e3SEduard Zingerman " :
859*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
860*426fc0e3SEduard Zingerman __imm(bpf_sk_release),
861*426fc0e3SEduard Zingerman __imm_addr(map_sockmap),
862*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
863*426fc0e3SEduard Zingerman : __clobber_all);
864*426fc0e3SEduard Zingerman }
865*426fc0e3SEduard Zingerman
866*426fc0e3SEduard Zingerman SEC("sk_skb")
867*426fc0e3SEduard Zingerman __description("bpf_map_lookup_elem(sockhash, &key); sk->type [fullsock field]; bpf_sk_release(sk)")
868*426fc0e3SEduard Zingerman __success
field_bpf_sk_release_sk_2(void)869*426fc0e3SEduard Zingerman __naked void field_bpf_sk_release_sk_2(void)
870*426fc0e3SEduard Zingerman {
871*426fc0e3SEduard Zingerman asm volatile (" \
872*426fc0e3SEduard Zingerman r1 = 0; \
873*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r1; \
874*426fc0e3SEduard Zingerman r2 = r10; \
875*426fc0e3SEduard Zingerman r2 += -4; \
876*426fc0e3SEduard Zingerman r1 = %[map_sockhash] ll; \
877*426fc0e3SEduard Zingerman call %[bpf_map_lookup_elem]; \
878*426fc0e3SEduard Zingerman if r0 != 0 goto l0_%=; \
879*426fc0e3SEduard Zingerman exit; \
880*426fc0e3SEduard Zingerman l0_%=: r1 = r0; \
881*426fc0e3SEduard Zingerman r0 = *(u32*)(r0 + %[bpf_sock_type]); \
882*426fc0e3SEduard Zingerman call %[bpf_sk_release]; \
883*426fc0e3SEduard Zingerman exit; \
884*426fc0e3SEduard Zingerman " :
885*426fc0e3SEduard Zingerman : __imm(bpf_map_lookup_elem),
886*426fc0e3SEduard Zingerman __imm(bpf_sk_release),
887*426fc0e3SEduard Zingerman __imm_addr(map_sockhash),
888*426fc0e3SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
889*426fc0e3SEduard Zingerman : __clobber_all);
890*426fc0e3SEduard Zingerman }
891*426fc0e3SEduard Zingerman
892*426fc0e3SEduard Zingerman SEC("sk_reuseport")
893*426fc0e3SEduard Zingerman __description("bpf_sk_select_reuseport(ctx, reuseport_array, &key, flags)")
894*426fc0e3SEduard Zingerman __success
ctx_reuseport_array_key_flags(void)895*426fc0e3SEduard Zingerman __naked void ctx_reuseport_array_key_flags(void)
896*426fc0e3SEduard Zingerman {
897*426fc0e3SEduard Zingerman asm volatile (" \
898*426fc0e3SEduard Zingerman r4 = 0; \
899*426fc0e3SEduard Zingerman r2 = 0; \
900*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r2; \
901*426fc0e3SEduard Zingerman r3 = r10; \
902*426fc0e3SEduard Zingerman r3 += -4; \
903*426fc0e3SEduard Zingerman r2 = %[map_reuseport_array] ll; \
904*426fc0e3SEduard Zingerman call %[bpf_sk_select_reuseport]; \
905*426fc0e3SEduard Zingerman exit; \
906*426fc0e3SEduard Zingerman " :
907*426fc0e3SEduard Zingerman : __imm(bpf_sk_select_reuseport),
908*426fc0e3SEduard Zingerman __imm_addr(map_reuseport_array)
909*426fc0e3SEduard Zingerman : __clobber_all);
910*426fc0e3SEduard Zingerman }
911*426fc0e3SEduard Zingerman
912*426fc0e3SEduard Zingerman SEC("sk_reuseport")
913*426fc0e3SEduard Zingerman __description("bpf_sk_select_reuseport(ctx, sockmap, &key, flags)")
914*426fc0e3SEduard Zingerman __success
reuseport_ctx_sockmap_key_flags(void)915*426fc0e3SEduard Zingerman __naked void reuseport_ctx_sockmap_key_flags(void)
916*426fc0e3SEduard Zingerman {
917*426fc0e3SEduard Zingerman asm volatile (" \
918*426fc0e3SEduard Zingerman r4 = 0; \
919*426fc0e3SEduard Zingerman r2 = 0; \
920*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r2; \
921*426fc0e3SEduard Zingerman r3 = r10; \
922*426fc0e3SEduard Zingerman r3 += -4; \
923*426fc0e3SEduard Zingerman r2 = %[map_sockmap] ll; \
924*426fc0e3SEduard Zingerman call %[bpf_sk_select_reuseport]; \
925*426fc0e3SEduard Zingerman exit; \
926*426fc0e3SEduard Zingerman " :
927*426fc0e3SEduard Zingerman : __imm(bpf_sk_select_reuseport),
928*426fc0e3SEduard Zingerman __imm_addr(map_sockmap)
929*426fc0e3SEduard Zingerman : __clobber_all);
930*426fc0e3SEduard Zingerman }
931*426fc0e3SEduard Zingerman
932*426fc0e3SEduard Zingerman SEC("sk_reuseport")
933*426fc0e3SEduard Zingerman __description("bpf_sk_select_reuseport(ctx, sockhash, &key, flags)")
934*426fc0e3SEduard Zingerman __success
reuseport_ctx_sockhash_key_flags(void)935*426fc0e3SEduard Zingerman __naked void reuseport_ctx_sockhash_key_flags(void)
936*426fc0e3SEduard Zingerman {
937*426fc0e3SEduard Zingerman asm volatile (" \
938*426fc0e3SEduard Zingerman r4 = 0; \
939*426fc0e3SEduard Zingerman r2 = 0; \
940*426fc0e3SEduard Zingerman *(u32*)(r10 - 4) = r2; \
941*426fc0e3SEduard Zingerman r3 = r10; \
942*426fc0e3SEduard Zingerman r3 += -4; \
943*426fc0e3SEduard Zingerman r2 = %[map_sockmap] ll; \
944*426fc0e3SEduard Zingerman call %[bpf_sk_select_reuseport]; \
945*426fc0e3SEduard Zingerman exit; \
946*426fc0e3SEduard Zingerman " :
947*426fc0e3SEduard Zingerman : __imm(bpf_sk_select_reuseport),
948*426fc0e3SEduard Zingerman __imm_addr(map_sockmap)
949*426fc0e3SEduard Zingerman : __clobber_all);
950*426fc0e3SEduard Zingerman }
951*426fc0e3SEduard Zingerman
952*426fc0e3SEduard Zingerman SEC("tc")
953*426fc0e3SEduard Zingerman __description("mark null check on return value of bpf_skc_to helpers")
954*426fc0e3SEduard Zingerman __failure __msg("invalid mem access")
of_bpf_skc_to_helpers(void)955*426fc0e3SEduard Zingerman __naked void of_bpf_skc_to_helpers(void)
956*426fc0e3SEduard Zingerman {
957*426fc0e3SEduard Zingerman asm volatile (" \
958*426fc0e3SEduard Zingerman r1 = *(u64*)(r1 + %[__sk_buff_sk]); \
959*426fc0e3SEduard Zingerman if r1 != 0 goto l0_%=; \
960*426fc0e3SEduard Zingerman r0 = 0; \
961*426fc0e3SEduard Zingerman exit; \
962*426fc0e3SEduard Zingerman l0_%=: r6 = r1; \
963*426fc0e3SEduard Zingerman call %[bpf_skc_to_tcp_sock]; \
964*426fc0e3SEduard Zingerman r7 = r0; \
965*426fc0e3SEduard Zingerman r1 = r6; \
966*426fc0e3SEduard Zingerman call %[bpf_skc_to_tcp_request_sock]; \
967*426fc0e3SEduard Zingerman r8 = r0; \
968*426fc0e3SEduard Zingerman if r8 != 0 goto l1_%=; \
969*426fc0e3SEduard Zingerman r0 = 0; \
970*426fc0e3SEduard Zingerman exit; \
971*426fc0e3SEduard Zingerman l1_%=: r0 = *(u8*)(r7 + 0); \
972*426fc0e3SEduard Zingerman exit; \
973*426fc0e3SEduard Zingerman " :
974*426fc0e3SEduard Zingerman : __imm(bpf_skc_to_tcp_request_sock),
975*426fc0e3SEduard Zingerman __imm(bpf_skc_to_tcp_sock),
976*426fc0e3SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk))
977*426fc0e3SEduard Zingerman : __clobber_all);
978*426fc0e3SEduard Zingerman }
979*426fc0e3SEduard Zingerman
980*426fc0e3SEduard Zingerman char _license[] SEC("license") = "GPL";
981