1*a5828e31SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*a5828e31SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/jeq_infer_not_null.c */
3*a5828e31SEduard Zingerman
4*a5828e31SEduard Zingerman #include <linux/bpf.h>
5*a5828e31SEduard Zingerman #include <bpf/bpf_helpers.h>
6*a5828e31SEduard Zingerman #include "bpf_misc.h"
7*a5828e31SEduard Zingerman
8*a5828e31SEduard Zingerman struct {
9*a5828e31SEduard Zingerman __uint(type, BPF_MAP_TYPE_XSKMAP);
10*a5828e31SEduard Zingerman __uint(max_entries, 1);
11*a5828e31SEduard Zingerman __type(key, int);
12*a5828e31SEduard Zingerman __type(value, int);
13*a5828e31SEduard Zingerman } map_xskmap SEC(".maps");
14*a5828e31SEduard Zingerman
15*a5828e31SEduard Zingerman /* This is equivalent to the following program:
16*a5828e31SEduard Zingerman *
17*a5828e31SEduard Zingerman * r6 = skb->sk;
18*a5828e31SEduard Zingerman * r7 = sk_fullsock(r6);
19*a5828e31SEduard Zingerman * r0 = sk_fullsock(r6);
20*a5828e31SEduard Zingerman * if (r0 == 0) return 0; (a)
21*a5828e31SEduard Zingerman * if (r0 != r7) return 0; (b)
22*a5828e31SEduard Zingerman * *r7->type; (c)
23*a5828e31SEduard Zingerman * return 0;
24*a5828e31SEduard Zingerman *
25*a5828e31SEduard Zingerman * It is safe to dereference r7 at point (c), because of (a) and (b).
26*a5828e31SEduard Zingerman * The test verifies that relation r0 == r7 is propagated from (b) to (c).
27*a5828e31SEduard Zingerman */
28*a5828e31SEduard Zingerman SEC("cgroup/skb")
29*a5828e31SEduard Zingerman __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JNE false branch")
30*a5828e31SEduard Zingerman __success __failure_unpriv __msg_unpriv("R7 pointer comparison")
31*a5828e31SEduard Zingerman __retval(0)
socket_for_jne_false_branch(void)32*a5828e31SEduard Zingerman __naked void socket_for_jne_false_branch(void)
33*a5828e31SEduard Zingerman {
34*a5828e31SEduard Zingerman asm volatile (" \
35*a5828e31SEduard Zingerman /* r6 = skb->sk; */ \
36*a5828e31SEduard Zingerman r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
37*a5828e31SEduard Zingerman /* if (r6 == 0) return 0; */ \
38*a5828e31SEduard Zingerman if r6 == 0 goto l0_%=; \
39*a5828e31SEduard Zingerman /* r7 = sk_fullsock(skb); */ \
40*a5828e31SEduard Zingerman r1 = r6; \
41*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
42*a5828e31SEduard Zingerman r7 = r0; \
43*a5828e31SEduard Zingerman /* r0 = sk_fullsock(skb); */ \
44*a5828e31SEduard Zingerman r1 = r6; \
45*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
46*a5828e31SEduard Zingerman /* if (r0 == null) return 0; */ \
47*a5828e31SEduard Zingerman if r0 == 0 goto l0_%=; \
48*a5828e31SEduard Zingerman /* if (r0 == r7) r0 = *(r7->type); */ \
49*a5828e31SEduard Zingerman if r0 != r7 goto l0_%=; /* Use ! JNE ! */\
50*a5828e31SEduard Zingerman r0 = *(u32*)(r7 + %[bpf_sock_type]); \
51*a5828e31SEduard Zingerman l0_%=: /* return 0 */ \
52*a5828e31SEduard Zingerman r0 = 0; \
53*a5828e31SEduard Zingerman exit; \
54*a5828e31SEduard Zingerman " :
55*a5828e31SEduard Zingerman : __imm(bpf_sk_fullsock),
56*a5828e31SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
57*a5828e31SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
58*a5828e31SEduard Zingerman : __clobber_all);
59*a5828e31SEduard Zingerman }
60*a5828e31SEduard Zingerman
61*a5828e31SEduard Zingerman /* Same as above, but verify that another branch of JNE still
62*a5828e31SEduard Zingerman * prohibits access to PTR_MAYBE_NULL.
63*a5828e31SEduard Zingerman */
64*a5828e31SEduard Zingerman SEC("cgroup/skb")
65*a5828e31SEduard Zingerman __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JNE true branch")
66*a5828e31SEduard Zingerman __failure __msg("R7 invalid mem access 'sock_or_null'")
67*a5828e31SEduard Zingerman __failure_unpriv __msg_unpriv("R7 pointer comparison")
unchanged_for_jne_true_branch(void)68*a5828e31SEduard Zingerman __naked void unchanged_for_jne_true_branch(void)
69*a5828e31SEduard Zingerman {
70*a5828e31SEduard Zingerman asm volatile (" \
71*a5828e31SEduard Zingerman /* r6 = skb->sk */ \
72*a5828e31SEduard Zingerman r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
73*a5828e31SEduard Zingerman /* if (r6 == 0) return 0; */ \
74*a5828e31SEduard Zingerman if r6 == 0 goto l0_%=; \
75*a5828e31SEduard Zingerman /* r7 = sk_fullsock(skb); */ \
76*a5828e31SEduard Zingerman r1 = r6; \
77*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
78*a5828e31SEduard Zingerman r7 = r0; \
79*a5828e31SEduard Zingerman /* r0 = sk_fullsock(skb); */ \
80*a5828e31SEduard Zingerman r1 = r6; \
81*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
82*a5828e31SEduard Zingerman /* if (r0 == null) return 0; */ \
83*a5828e31SEduard Zingerman if r0 != 0 goto l0_%=; \
84*a5828e31SEduard Zingerman /* if (r0 == r7) return 0; */ \
85*a5828e31SEduard Zingerman if r0 != r7 goto l1_%=; /* Use ! JNE ! */\
86*a5828e31SEduard Zingerman goto l0_%=; \
87*a5828e31SEduard Zingerman l1_%=: /* r0 = *(r7->type); */ \
88*a5828e31SEduard Zingerman r0 = *(u32*)(r7 + %[bpf_sock_type]); \
89*a5828e31SEduard Zingerman l0_%=: /* return 0 */ \
90*a5828e31SEduard Zingerman r0 = 0; \
91*a5828e31SEduard Zingerman exit; \
92*a5828e31SEduard Zingerman " :
93*a5828e31SEduard Zingerman : __imm(bpf_sk_fullsock),
94*a5828e31SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
95*a5828e31SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
96*a5828e31SEduard Zingerman : __clobber_all);
97*a5828e31SEduard Zingerman }
98*a5828e31SEduard Zingerman
99*a5828e31SEduard Zingerman /* Same as a first test, but not null should be inferred for JEQ branch */
100*a5828e31SEduard Zingerman SEC("cgroup/skb")
101*a5828e31SEduard Zingerman __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL -> PTR_TO_SOCKET for JEQ true branch")
102*a5828e31SEduard Zingerman __success __failure_unpriv __msg_unpriv("R7 pointer comparison")
103*a5828e31SEduard Zingerman __retval(0)
socket_for_jeq_true_branch(void)104*a5828e31SEduard Zingerman __naked void socket_for_jeq_true_branch(void)
105*a5828e31SEduard Zingerman {
106*a5828e31SEduard Zingerman asm volatile (" \
107*a5828e31SEduard Zingerman /* r6 = skb->sk; */ \
108*a5828e31SEduard Zingerman r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
109*a5828e31SEduard Zingerman /* if (r6 == null) return 0; */ \
110*a5828e31SEduard Zingerman if r6 == 0 goto l0_%=; \
111*a5828e31SEduard Zingerman /* r7 = sk_fullsock(skb); */ \
112*a5828e31SEduard Zingerman r1 = r6; \
113*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
114*a5828e31SEduard Zingerman r7 = r0; \
115*a5828e31SEduard Zingerman /* r0 = sk_fullsock(skb); */ \
116*a5828e31SEduard Zingerman r1 = r6; \
117*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
118*a5828e31SEduard Zingerman /* if (r0 == null) return 0; */ \
119*a5828e31SEduard Zingerman if r0 == 0 goto l0_%=; \
120*a5828e31SEduard Zingerman /* if (r0 != r7) return 0; */ \
121*a5828e31SEduard Zingerman if r0 == r7 goto l1_%=; /* Use ! JEQ ! */\
122*a5828e31SEduard Zingerman goto l0_%=; \
123*a5828e31SEduard Zingerman l1_%=: /* r0 = *(r7->type); */ \
124*a5828e31SEduard Zingerman r0 = *(u32*)(r7 + %[bpf_sock_type]); \
125*a5828e31SEduard Zingerman l0_%=: /* return 0; */ \
126*a5828e31SEduard Zingerman r0 = 0; \
127*a5828e31SEduard Zingerman exit; \
128*a5828e31SEduard Zingerman " :
129*a5828e31SEduard Zingerman : __imm(bpf_sk_fullsock),
130*a5828e31SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
131*a5828e31SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
132*a5828e31SEduard Zingerman : __clobber_all);
133*a5828e31SEduard Zingerman }
134*a5828e31SEduard Zingerman
135*a5828e31SEduard Zingerman /* Same as above, but verify that another branch of JNE still
136*a5828e31SEduard Zingerman * prohibits access to PTR_MAYBE_NULL.
137*a5828e31SEduard Zingerman */
138*a5828e31SEduard Zingerman SEC("cgroup/skb")
139*a5828e31SEduard Zingerman __description("jne/jeq infer not null, PTR_TO_SOCKET_OR_NULL unchanged for JEQ false branch")
140*a5828e31SEduard Zingerman __failure __msg("R7 invalid mem access 'sock_or_null'")
141*a5828e31SEduard Zingerman __failure_unpriv __msg_unpriv("R7 pointer comparison")
unchanged_for_jeq_false_branch(void)142*a5828e31SEduard Zingerman __naked void unchanged_for_jeq_false_branch(void)
143*a5828e31SEduard Zingerman {
144*a5828e31SEduard Zingerman asm volatile (" \
145*a5828e31SEduard Zingerman /* r6 = skb->sk; */ \
146*a5828e31SEduard Zingerman r6 = *(u64*)(r1 + %[__sk_buff_sk]); \
147*a5828e31SEduard Zingerman /* if (r6 == null) return 0; */ \
148*a5828e31SEduard Zingerman if r6 == 0 goto l0_%=; \
149*a5828e31SEduard Zingerman /* r7 = sk_fullsock(skb); */ \
150*a5828e31SEduard Zingerman r1 = r6; \
151*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
152*a5828e31SEduard Zingerman r7 = r0; \
153*a5828e31SEduard Zingerman /* r0 = sk_fullsock(skb); */ \
154*a5828e31SEduard Zingerman r1 = r6; \
155*a5828e31SEduard Zingerman call %[bpf_sk_fullsock]; \
156*a5828e31SEduard Zingerman /* if (r0 == null) return 0; */ \
157*a5828e31SEduard Zingerman if r0 == 0 goto l0_%=; \
158*a5828e31SEduard Zingerman /* if (r0 != r7) r0 = *(r7->type); */ \
159*a5828e31SEduard Zingerman if r0 == r7 goto l0_%=; /* Use ! JEQ ! */\
160*a5828e31SEduard Zingerman r0 = *(u32*)(r7 + %[bpf_sock_type]); \
161*a5828e31SEduard Zingerman l0_%=: /* return 0; */ \
162*a5828e31SEduard Zingerman r0 = 0; \
163*a5828e31SEduard Zingerman exit; \
164*a5828e31SEduard Zingerman " :
165*a5828e31SEduard Zingerman : __imm(bpf_sk_fullsock),
166*a5828e31SEduard Zingerman __imm_const(__sk_buff_sk, offsetof(struct __sk_buff, sk)),
167*a5828e31SEduard Zingerman __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type))
168*a5828e31SEduard Zingerman : __clobber_all);
169*a5828e31SEduard Zingerman }
170*a5828e31SEduard Zingerman
171*a5828e31SEduard Zingerman /* Maps are treated in a different branch of `mark_ptr_not_null_reg`,
172*a5828e31SEduard Zingerman * so separate test for maps case.
173*a5828e31SEduard Zingerman */
174*a5828e31SEduard Zingerman SEC("xdp")
175*a5828e31SEduard Zingerman __description("jne/jeq infer not null, PTR_TO_MAP_VALUE_OR_NULL -> PTR_TO_MAP_VALUE")
176*a5828e31SEduard Zingerman __success __retval(0)
null_ptr_to_map_value(void)177*a5828e31SEduard Zingerman __naked void null_ptr_to_map_value(void)
178*a5828e31SEduard Zingerman {
179*a5828e31SEduard Zingerman asm volatile (" \
180*a5828e31SEduard Zingerman /* r9 = &some stack to use as key */ \
181*a5828e31SEduard Zingerman r1 = 0; \
182*a5828e31SEduard Zingerman *(u32*)(r10 - 8) = r1; \
183*a5828e31SEduard Zingerman r9 = r10; \
184*a5828e31SEduard Zingerman r9 += -8; \
185*a5828e31SEduard Zingerman /* r8 = process local map */ \
186*a5828e31SEduard Zingerman r8 = %[map_xskmap] ll; \
187*a5828e31SEduard Zingerman /* r6 = map_lookup_elem(r8, r9); */ \
188*a5828e31SEduard Zingerman r1 = r8; \
189*a5828e31SEduard Zingerman r2 = r9; \
190*a5828e31SEduard Zingerman call %[bpf_map_lookup_elem]; \
191*a5828e31SEduard Zingerman r6 = r0; \
192*a5828e31SEduard Zingerman /* r7 = map_lookup_elem(r8, r9); */ \
193*a5828e31SEduard Zingerman r1 = r8; \
194*a5828e31SEduard Zingerman r2 = r9; \
195*a5828e31SEduard Zingerman call %[bpf_map_lookup_elem]; \
196*a5828e31SEduard Zingerman r7 = r0; \
197*a5828e31SEduard Zingerman /* if (r6 == 0) return 0; */ \
198*a5828e31SEduard Zingerman if r6 == 0 goto l0_%=; \
199*a5828e31SEduard Zingerman /* if (r6 != r7) return 0; */ \
200*a5828e31SEduard Zingerman if r6 != r7 goto l0_%=; \
201*a5828e31SEduard Zingerman /* read *r7; */ \
202*a5828e31SEduard Zingerman r0 = *(u32*)(r7 + %[bpf_xdp_sock_queue_id]); \
203*a5828e31SEduard Zingerman l0_%=: /* return 0; */ \
204*a5828e31SEduard Zingerman r0 = 0; \
205*a5828e31SEduard Zingerman exit; \
206*a5828e31SEduard Zingerman " :
207*a5828e31SEduard Zingerman : __imm(bpf_map_lookup_elem),
208*a5828e31SEduard Zingerman __imm_addr(map_xskmap),
209*a5828e31SEduard Zingerman __imm_const(bpf_xdp_sock_queue_id, offsetof(struct bpf_xdp_sock, queue_id))
210*a5828e31SEduard Zingerman : __clobber_all);
211*a5828e31SEduard Zingerman }
212*a5828e31SEduard Zingerman
213*a5828e31SEduard Zingerman char _license[] SEC("license") = "GPL";
214