1*0a372c9cSEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*0a372c9cSEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/direct_packet_access.c */
3*0a372c9cSEduard Zingerman
4*0a372c9cSEduard Zingerman #include <linux/bpf.h>
5*0a372c9cSEduard Zingerman #include <bpf/bpf_helpers.h>
6*0a372c9cSEduard Zingerman #include "bpf_misc.h"
7*0a372c9cSEduard Zingerman
8*0a372c9cSEduard Zingerman SEC("tc")
9*0a372c9cSEduard Zingerman __description("pkt_end - pkt_start is allowed")
__retval(TEST_DATA_LEN)10*0a372c9cSEduard Zingerman __success __retval(TEST_DATA_LEN)
11*0a372c9cSEduard Zingerman __naked void end_pkt_start_is_allowed(void)
12*0a372c9cSEduard Zingerman {
13*0a372c9cSEduard Zingerman asm volatile (" \
14*0a372c9cSEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_data_end]); \
15*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
16*0a372c9cSEduard Zingerman r0 -= r2; \
17*0a372c9cSEduard Zingerman exit; \
18*0a372c9cSEduard Zingerman " :
19*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
20*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
21*0a372c9cSEduard Zingerman : __clobber_all);
22*0a372c9cSEduard Zingerman }
23*0a372c9cSEduard Zingerman
24*0a372c9cSEduard Zingerman SEC("tc")
25*0a372c9cSEduard Zingerman __description("direct packet access: test1")
26*0a372c9cSEduard Zingerman __success __retval(0)
direct_packet_access_test1(void)27*0a372c9cSEduard Zingerman __naked void direct_packet_access_test1(void)
28*0a372c9cSEduard Zingerman {
29*0a372c9cSEduard Zingerman asm volatile (" \
30*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
31*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
32*0a372c9cSEduard Zingerman r0 = r2; \
33*0a372c9cSEduard Zingerman r0 += 8; \
34*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
35*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
36*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
37*0a372c9cSEduard Zingerman exit; \
38*0a372c9cSEduard Zingerman " :
39*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
40*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
41*0a372c9cSEduard Zingerman : __clobber_all);
42*0a372c9cSEduard Zingerman }
43*0a372c9cSEduard Zingerman
44*0a372c9cSEduard Zingerman SEC("tc")
45*0a372c9cSEduard Zingerman __description("direct packet access: test2")
46*0a372c9cSEduard Zingerman __success __retval(0)
direct_packet_access_test2(void)47*0a372c9cSEduard Zingerman __naked void direct_packet_access_test2(void)
48*0a372c9cSEduard Zingerman {
49*0a372c9cSEduard Zingerman asm volatile (" \
50*0a372c9cSEduard Zingerman r0 = 1; \
51*0a372c9cSEduard Zingerman r4 = *(u32*)(r1 + %[__sk_buff_data_end]); \
52*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data]); \
53*0a372c9cSEduard Zingerman r5 = r3; \
54*0a372c9cSEduard Zingerman r5 += 14; \
55*0a372c9cSEduard Zingerman if r5 > r4 goto l0_%=; \
56*0a372c9cSEduard Zingerman r0 = *(u8*)(r3 + 7); \
57*0a372c9cSEduard Zingerman r4 = *(u8*)(r3 + 12); \
58*0a372c9cSEduard Zingerman r4 *= 14; \
59*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data]); \
60*0a372c9cSEduard Zingerman r3 += r4; \
61*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_len]); \
62*0a372c9cSEduard Zingerman r2 <<= 49; \
63*0a372c9cSEduard Zingerman r2 >>= 49; \
64*0a372c9cSEduard Zingerman r3 += r2; \
65*0a372c9cSEduard Zingerman r2 = r3; \
66*0a372c9cSEduard Zingerman r2 += 8; \
67*0a372c9cSEduard Zingerman r1 = *(u32*)(r1 + %[__sk_buff_data_end]); \
68*0a372c9cSEduard Zingerman if r2 > r1 goto l1_%=; \
69*0a372c9cSEduard Zingerman r1 = *(u8*)(r3 + 4); \
70*0a372c9cSEduard Zingerman l1_%=: r0 = 0; \
71*0a372c9cSEduard Zingerman l0_%=: exit; \
72*0a372c9cSEduard Zingerman " :
73*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
74*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
75*0a372c9cSEduard Zingerman __imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
76*0a372c9cSEduard Zingerman : __clobber_all);
77*0a372c9cSEduard Zingerman }
78*0a372c9cSEduard Zingerman
79*0a372c9cSEduard Zingerman SEC("socket")
80*0a372c9cSEduard Zingerman __description("direct packet access: test3")
81*0a372c9cSEduard Zingerman __failure __msg("invalid bpf_context access off=76")
82*0a372c9cSEduard Zingerman __failure_unpriv
direct_packet_access_test3(void)83*0a372c9cSEduard Zingerman __naked void direct_packet_access_test3(void)
84*0a372c9cSEduard Zingerman {
85*0a372c9cSEduard Zingerman asm volatile (" \
86*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
87*0a372c9cSEduard Zingerman r0 = 0; \
88*0a372c9cSEduard Zingerman exit; \
89*0a372c9cSEduard Zingerman " :
90*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data))
91*0a372c9cSEduard Zingerman : __clobber_all);
92*0a372c9cSEduard Zingerman }
93*0a372c9cSEduard Zingerman
94*0a372c9cSEduard Zingerman SEC("tc")
95*0a372c9cSEduard Zingerman __description("direct packet access: test4 (write)")
96*0a372c9cSEduard Zingerman __success __retval(0)
direct_packet_access_test4_write(void)97*0a372c9cSEduard Zingerman __naked void direct_packet_access_test4_write(void)
98*0a372c9cSEduard Zingerman {
99*0a372c9cSEduard Zingerman asm volatile (" \
100*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
101*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
102*0a372c9cSEduard Zingerman r0 = r2; \
103*0a372c9cSEduard Zingerman r0 += 8; \
104*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
105*0a372c9cSEduard Zingerman *(u8*)(r2 + 0) = r2; \
106*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
107*0a372c9cSEduard Zingerman exit; \
108*0a372c9cSEduard Zingerman " :
109*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
110*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
111*0a372c9cSEduard Zingerman : __clobber_all);
112*0a372c9cSEduard Zingerman }
113*0a372c9cSEduard Zingerman
114*0a372c9cSEduard Zingerman SEC("tc")
115*0a372c9cSEduard Zingerman __description("direct packet access: test5 (pkt_end >= reg, good access)")
116*0a372c9cSEduard Zingerman __success __retval(0)
pkt_end_reg_good_access(void)117*0a372c9cSEduard Zingerman __naked void pkt_end_reg_good_access(void)
118*0a372c9cSEduard Zingerman {
119*0a372c9cSEduard Zingerman asm volatile (" \
120*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
121*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
122*0a372c9cSEduard Zingerman r0 = r2; \
123*0a372c9cSEduard Zingerman r0 += 8; \
124*0a372c9cSEduard Zingerman if r3 >= r0 goto l0_%=; \
125*0a372c9cSEduard Zingerman r0 = 1; \
126*0a372c9cSEduard Zingerman exit; \
127*0a372c9cSEduard Zingerman l0_%=: r0 = *(u8*)(r2 + 0); \
128*0a372c9cSEduard Zingerman r0 = 0; \
129*0a372c9cSEduard Zingerman exit; \
130*0a372c9cSEduard Zingerman " :
131*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
132*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
133*0a372c9cSEduard Zingerman : __clobber_all);
134*0a372c9cSEduard Zingerman }
135*0a372c9cSEduard Zingerman
136*0a372c9cSEduard Zingerman SEC("tc")
137*0a372c9cSEduard Zingerman __description("direct packet access: test6 (pkt_end >= reg, bad access)")
138*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet")
pkt_end_reg_bad_access(void)139*0a372c9cSEduard Zingerman __naked void pkt_end_reg_bad_access(void)
140*0a372c9cSEduard Zingerman {
141*0a372c9cSEduard Zingerman asm volatile (" \
142*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
143*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
144*0a372c9cSEduard Zingerman r0 = r2; \
145*0a372c9cSEduard Zingerman r0 += 8; \
146*0a372c9cSEduard Zingerman if r3 >= r0 goto l0_%=; \
147*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
148*0a372c9cSEduard Zingerman r0 = 1; \
149*0a372c9cSEduard Zingerman exit; \
150*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
151*0a372c9cSEduard Zingerman exit; \
152*0a372c9cSEduard Zingerman " :
153*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
154*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
155*0a372c9cSEduard Zingerman : __clobber_all);
156*0a372c9cSEduard Zingerman }
157*0a372c9cSEduard Zingerman
158*0a372c9cSEduard Zingerman SEC("tc")
159*0a372c9cSEduard Zingerman __description("direct packet access: test7 (pkt_end >= reg, both accesses)")
160*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet")
pkt_end_reg_both_accesses(void)161*0a372c9cSEduard Zingerman __naked void pkt_end_reg_both_accesses(void)
162*0a372c9cSEduard Zingerman {
163*0a372c9cSEduard Zingerman asm volatile (" \
164*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
165*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
166*0a372c9cSEduard Zingerman r0 = r2; \
167*0a372c9cSEduard Zingerman r0 += 8; \
168*0a372c9cSEduard Zingerman if r3 >= r0 goto l0_%=; \
169*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
170*0a372c9cSEduard Zingerman r0 = 1; \
171*0a372c9cSEduard Zingerman exit; \
172*0a372c9cSEduard Zingerman l0_%=: r0 = *(u8*)(r2 + 0); \
173*0a372c9cSEduard Zingerman r0 = 0; \
174*0a372c9cSEduard Zingerman exit; \
175*0a372c9cSEduard Zingerman " :
176*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
177*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
178*0a372c9cSEduard Zingerman : __clobber_all);
179*0a372c9cSEduard Zingerman }
180*0a372c9cSEduard Zingerman
181*0a372c9cSEduard Zingerman SEC("tc")
182*0a372c9cSEduard Zingerman __description("direct packet access: test8 (double test, variant 1)")
183*0a372c9cSEduard Zingerman __success __retval(0)
test8_double_test_variant_1(void)184*0a372c9cSEduard Zingerman __naked void test8_double_test_variant_1(void)
185*0a372c9cSEduard Zingerman {
186*0a372c9cSEduard Zingerman asm volatile (" \
187*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
188*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
189*0a372c9cSEduard Zingerman r0 = r2; \
190*0a372c9cSEduard Zingerman r0 += 8; \
191*0a372c9cSEduard Zingerman if r3 >= r0 goto l0_%=; \
192*0a372c9cSEduard Zingerman if r0 > r3 goto l1_%=; \
193*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
194*0a372c9cSEduard Zingerman l1_%=: r0 = 1; \
195*0a372c9cSEduard Zingerman exit; \
196*0a372c9cSEduard Zingerman l0_%=: r0 = *(u8*)(r2 + 0); \
197*0a372c9cSEduard Zingerman r0 = 0; \
198*0a372c9cSEduard Zingerman exit; \
199*0a372c9cSEduard Zingerman " :
200*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
201*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
202*0a372c9cSEduard Zingerman : __clobber_all);
203*0a372c9cSEduard Zingerman }
204*0a372c9cSEduard Zingerman
205*0a372c9cSEduard Zingerman SEC("tc")
206*0a372c9cSEduard Zingerman __description("direct packet access: test9 (double test, variant 2)")
207*0a372c9cSEduard Zingerman __success __retval(0)
test9_double_test_variant_2(void)208*0a372c9cSEduard Zingerman __naked void test9_double_test_variant_2(void)
209*0a372c9cSEduard Zingerman {
210*0a372c9cSEduard Zingerman asm volatile (" \
211*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
212*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
213*0a372c9cSEduard Zingerman r0 = r2; \
214*0a372c9cSEduard Zingerman r0 += 8; \
215*0a372c9cSEduard Zingerman if r3 >= r0 goto l0_%=; \
216*0a372c9cSEduard Zingerman r0 = 1; \
217*0a372c9cSEduard Zingerman exit; \
218*0a372c9cSEduard Zingerman l0_%=: if r0 > r3 goto l1_%=; \
219*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
220*0a372c9cSEduard Zingerman l1_%=: r0 = *(u8*)(r2 + 0); \
221*0a372c9cSEduard Zingerman r0 = 0; \
222*0a372c9cSEduard Zingerman exit; \
223*0a372c9cSEduard Zingerman " :
224*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
225*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
226*0a372c9cSEduard Zingerman : __clobber_all);
227*0a372c9cSEduard Zingerman }
228*0a372c9cSEduard Zingerman
229*0a372c9cSEduard Zingerman SEC("tc")
230*0a372c9cSEduard Zingerman __description("direct packet access: test10 (write invalid)")
231*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet")
packet_access_test10_write_invalid(void)232*0a372c9cSEduard Zingerman __naked void packet_access_test10_write_invalid(void)
233*0a372c9cSEduard Zingerman {
234*0a372c9cSEduard Zingerman asm volatile (" \
235*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
236*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
237*0a372c9cSEduard Zingerman r0 = r2; \
238*0a372c9cSEduard Zingerman r0 += 8; \
239*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
240*0a372c9cSEduard Zingerman r0 = 0; \
241*0a372c9cSEduard Zingerman exit; \
242*0a372c9cSEduard Zingerman l0_%=: *(u8*)(r2 + 0) = r2; \
243*0a372c9cSEduard Zingerman r0 = 0; \
244*0a372c9cSEduard Zingerman exit; \
245*0a372c9cSEduard Zingerman " :
246*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
247*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
248*0a372c9cSEduard Zingerman : __clobber_all);
249*0a372c9cSEduard Zingerman }
250*0a372c9cSEduard Zingerman
251*0a372c9cSEduard Zingerman SEC("tc")
252*0a372c9cSEduard Zingerman __description("direct packet access: test11 (shift, good access)")
253*0a372c9cSEduard Zingerman __success __retval(1)
access_test11_shift_good_access(void)254*0a372c9cSEduard Zingerman __naked void access_test11_shift_good_access(void)
255*0a372c9cSEduard Zingerman {
256*0a372c9cSEduard Zingerman asm volatile (" \
257*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
258*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
259*0a372c9cSEduard Zingerman r0 = r2; \
260*0a372c9cSEduard Zingerman r0 += 22; \
261*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
262*0a372c9cSEduard Zingerman r3 = 144; \
263*0a372c9cSEduard Zingerman r5 = r3; \
264*0a372c9cSEduard Zingerman r5 += 23; \
265*0a372c9cSEduard Zingerman r5 >>= 3; \
266*0a372c9cSEduard Zingerman r6 = r2; \
267*0a372c9cSEduard Zingerman r6 += r5; \
268*0a372c9cSEduard Zingerman r0 = 1; \
269*0a372c9cSEduard Zingerman exit; \
270*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
271*0a372c9cSEduard Zingerman exit; \
272*0a372c9cSEduard Zingerman " :
273*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
274*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
275*0a372c9cSEduard Zingerman : __clobber_all);
276*0a372c9cSEduard Zingerman }
277*0a372c9cSEduard Zingerman
278*0a372c9cSEduard Zingerman SEC("tc")
279*0a372c9cSEduard Zingerman __description("direct packet access: test12 (and, good access)")
280*0a372c9cSEduard Zingerman __success __retval(1)
access_test12_and_good_access(void)281*0a372c9cSEduard Zingerman __naked void access_test12_and_good_access(void)
282*0a372c9cSEduard Zingerman {
283*0a372c9cSEduard Zingerman asm volatile (" \
284*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
285*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
286*0a372c9cSEduard Zingerman r0 = r2; \
287*0a372c9cSEduard Zingerman r0 += 22; \
288*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
289*0a372c9cSEduard Zingerman r3 = 144; \
290*0a372c9cSEduard Zingerman r5 = r3; \
291*0a372c9cSEduard Zingerman r5 += 23; \
292*0a372c9cSEduard Zingerman r5 &= 15; \
293*0a372c9cSEduard Zingerman r6 = r2; \
294*0a372c9cSEduard Zingerman r6 += r5; \
295*0a372c9cSEduard Zingerman r0 = 1; \
296*0a372c9cSEduard Zingerman exit; \
297*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
298*0a372c9cSEduard Zingerman exit; \
299*0a372c9cSEduard Zingerman " :
300*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
301*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
302*0a372c9cSEduard Zingerman : __clobber_all);
303*0a372c9cSEduard Zingerman }
304*0a372c9cSEduard Zingerman
305*0a372c9cSEduard Zingerman SEC("tc")
306*0a372c9cSEduard Zingerman __description("direct packet access: test13 (branches, good access)")
307*0a372c9cSEduard Zingerman __success __retval(1)
access_test13_branches_good_access(void)308*0a372c9cSEduard Zingerman __naked void access_test13_branches_good_access(void)
309*0a372c9cSEduard Zingerman {
310*0a372c9cSEduard Zingerman asm volatile (" \
311*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
312*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
313*0a372c9cSEduard Zingerman r0 = r2; \
314*0a372c9cSEduard Zingerman r0 += 22; \
315*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
316*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_mark]); \
317*0a372c9cSEduard Zingerman r4 = 1; \
318*0a372c9cSEduard Zingerman if r3 > r4 goto l1_%=; \
319*0a372c9cSEduard Zingerman r3 = 14; \
320*0a372c9cSEduard Zingerman goto l2_%=; \
321*0a372c9cSEduard Zingerman l1_%=: r3 = 24; \
322*0a372c9cSEduard Zingerman l2_%=: r5 = r3; \
323*0a372c9cSEduard Zingerman r5 += 23; \
324*0a372c9cSEduard Zingerman r5 &= 15; \
325*0a372c9cSEduard Zingerman r6 = r2; \
326*0a372c9cSEduard Zingerman r6 += r5; \
327*0a372c9cSEduard Zingerman r0 = 1; \
328*0a372c9cSEduard Zingerman exit; \
329*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
330*0a372c9cSEduard Zingerman exit; \
331*0a372c9cSEduard Zingerman " :
332*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
333*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
334*0a372c9cSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
335*0a372c9cSEduard Zingerman : __clobber_all);
336*0a372c9cSEduard Zingerman }
337*0a372c9cSEduard Zingerman
338*0a372c9cSEduard Zingerman SEC("tc")
339*0a372c9cSEduard Zingerman __description("direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)")
340*0a372c9cSEduard Zingerman __success __retval(1)
_0_const_imm_good_access(void)341*0a372c9cSEduard Zingerman __naked void _0_const_imm_good_access(void)
342*0a372c9cSEduard Zingerman {
343*0a372c9cSEduard Zingerman asm volatile (" \
344*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
345*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
346*0a372c9cSEduard Zingerman r0 = r2; \
347*0a372c9cSEduard Zingerman r0 += 22; \
348*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
349*0a372c9cSEduard Zingerman r5 = 12; \
350*0a372c9cSEduard Zingerman r5 >>= 4; \
351*0a372c9cSEduard Zingerman r6 = r2; \
352*0a372c9cSEduard Zingerman r6 += r5; \
353*0a372c9cSEduard Zingerman r0 = *(u8*)(r6 + 0); \
354*0a372c9cSEduard Zingerman r0 = 1; \
355*0a372c9cSEduard Zingerman exit; \
356*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
357*0a372c9cSEduard Zingerman exit; \
358*0a372c9cSEduard Zingerman " :
359*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
360*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
361*0a372c9cSEduard Zingerman : __clobber_all);
362*0a372c9cSEduard Zingerman }
363*0a372c9cSEduard Zingerman
364*0a372c9cSEduard Zingerman SEC("tc")
365*0a372c9cSEduard Zingerman __description("direct packet access: test15 (spill with xadd)")
366*0a372c9cSEduard Zingerman __failure __msg("R2 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)367*0a372c9cSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
368*0a372c9cSEduard Zingerman __naked void access_test15_spill_with_xadd(void)
369*0a372c9cSEduard Zingerman {
370*0a372c9cSEduard Zingerman asm volatile (" \
371*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
372*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
373*0a372c9cSEduard Zingerman r0 = r2; \
374*0a372c9cSEduard Zingerman r0 += 8; \
375*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
376*0a372c9cSEduard Zingerman r5 = 4096; \
377*0a372c9cSEduard Zingerman r4 = r10; \
378*0a372c9cSEduard Zingerman r4 += -8; \
379*0a372c9cSEduard Zingerman *(u64*)(r4 + 0) = r2; \
380*0a372c9cSEduard Zingerman lock *(u64 *)(r4 + 0) += r5; \
381*0a372c9cSEduard Zingerman r2 = *(u64*)(r4 + 0); \
382*0a372c9cSEduard Zingerman *(u32*)(r2 + 0) = r5; \
383*0a372c9cSEduard Zingerman r0 = 0; \
384*0a372c9cSEduard Zingerman l0_%=: exit; \
385*0a372c9cSEduard Zingerman " :
386*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
387*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
388*0a372c9cSEduard Zingerman : __clobber_all);
389*0a372c9cSEduard Zingerman }
390*0a372c9cSEduard Zingerman
391*0a372c9cSEduard Zingerman SEC("tc")
392*0a372c9cSEduard Zingerman __description("direct packet access: test16 (arith on data_end)")
393*0a372c9cSEduard Zingerman __failure __msg("R3 pointer arithmetic on pkt_end")
test16_arith_on_data_end(void)394*0a372c9cSEduard Zingerman __naked void test16_arith_on_data_end(void)
395*0a372c9cSEduard Zingerman {
396*0a372c9cSEduard Zingerman asm volatile (" \
397*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
398*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
399*0a372c9cSEduard Zingerman r0 = r2; \
400*0a372c9cSEduard Zingerman r0 += 8; \
401*0a372c9cSEduard Zingerman r3 += 16; \
402*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
403*0a372c9cSEduard Zingerman *(u8*)(r2 + 0) = r2; \
404*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
405*0a372c9cSEduard Zingerman exit; \
406*0a372c9cSEduard Zingerman " :
407*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
408*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
409*0a372c9cSEduard Zingerman : __clobber_all);
410*0a372c9cSEduard Zingerman }
411*0a372c9cSEduard Zingerman
412*0a372c9cSEduard Zingerman SEC("tc")
413*0a372c9cSEduard Zingerman __description("direct packet access: test17 (pruning, alignment)")
414*0a372c9cSEduard Zingerman __failure __msg("misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4")
__flag(BPF_F_STRICT_ALIGNMENT)415*0a372c9cSEduard Zingerman __flag(BPF_F_STRICT_ALIGNMENT)
416*0a372c9cSEduard Zingerman __naked void packet_access_test17_pruning_alignment(void)
417*0a372c9cSEduard Zingerman {
418*0a372c9cSEduard Zingerman asm volatile (" \
419*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
420*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
421*0a372c9cSEduard Zingerman r7 = *(u32*)(r1 + %[__sk_buff_mark]); \
422*0a372c9cSEduard Zingerman r0 = r2; \
423*0a372c9cSEduard Zingerman r0 += 14; \
424*0a372c9cSEduard Zingerman if r7 > 1 goto l0_%=; \
425*0a372c9cSEduard Zingerman l2_%=: if r0 > r3 goto l1_%=; \
426*0a372c9cSEduard Zingerman *(u32*)(r0 - 4) = r0; \
427*0a372c9cSEduard Zingerman l1_%=: r0 = 0; \
428*0a372c9cSEduard Zingerman exit; \
429*0a372c9cSEduard Zingerman l0_%=: r0 += 1; \
430*0a372c9cSEduard Zingerman goto l2_%=; \
431*0a372c9cSEduard Zingerman " :
432*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
433*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
434*0a372c9cSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
435*0a372c9cSEduard Zingerman : __clobber_all);
436*0a372c9cSEduard Zingerman }
437*0a372c9cSEduard Zingerman
438*0a372c9cSEduard Zingerman SEC("tc")
439*0a372c9cSEduard Zingerman __description("direct packet access: test18 (imm += pkt_ptr, 1)")
440*0a372c9cSEduard Zingerman __success __retval(0)
test18_imm_pkt_ptr_1(void)441*0a372c9cSEduard Zingerman __naked void test18_imm_pkt_ptr_1(void)
442*0a372c9cSEduard Zingerman {
443*0a372c9cSEduard Zingerman asm volatile (" \
444*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
445*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
446*0a372c9cSEduard Zingerman r0 = 8; \
447*0a372c9cSEduard Zingerman r0 += r2; \
448*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
449*0a372c9cSEduard Zingerman *(u8*)(r2 + 0) = r2; \
450*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
451*0a372c9cSEduard Zingerman exit; \
452*0a372c9cSEduard Zingerman " :
453*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
454*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
455*0a372c9cSEduard Zingerman : __clobber_all);
456*0a372c9cSEduard Zingerman }
457*0a372c9cSEduard Zingerman
458*0a372c9cSEduard Zingerman SEC("tc")
459*0a372c9cSEduard Zingerman __description("direct packet access: test19 (imm += pkt_ptr, 2)")
460*0a372c9cSEduard Zingerman __success __retval(0)
test19_imm_pkt_ptr_2(void)461*0a372c9cSEduard Zingerman __naked void test19_imm_pkt_ptr_2(void)
462*0a372c9cSEduard Zingerman {
463*0a372c9cSEduard Zingerman asm volatile (" \
464*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
465*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
466*0a372c9cSEduard Zingerman r0 = r2; \
467*0a372c9cSEduard Zingerman r0 += 8; \
468*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
469*0a372c9cSEduard Zingerman r4 = 4; \
470*0a372c9cSEduard Zingerman r4 += r2; \
471*0a372c9cSEduard Zingerman *(u8*)(r4 + 0) = r4; \
472*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
473*0a372c9cSEduard Zingerman exit; \
474*0a372c9cSEduard Zingerman " :
475*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
476*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
477*0a372c9cSEduard Zingerman : __clobber_all);
478*0a372c9cSEduard Zingerman }
479*0a372c9cSEduard Zingerman
480*0a372c9cSEduard Zingerman SEC("tc")
481*0a372c9cSEduard Zingerman __description("direct packet access: test20 (x += pkt_ptr, 1)")
__flag(BPF_F_ANY_ALIGNMENT)482*0a372c9cSEduard Zingerman __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
483*0a372c9cSEduard Zingerman __naked void test20_x_pkt_ptr_1(void)
484*0a372c9cSEduard Zingerman {
485*0a372c9cSEduard Zingerman asm volatile (" \
486*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
487*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
488*0a372c9cSEduard Zingerman r0 = 0xffffffff; \
489*0a372c9cSEduard Zingerman *(u64*)(r10 - 8) = r0; \
490*0a372c9cSEduard Zingerman r0 = *(u64*)(r10 - 8); \
491*0a372c9cSEduard Zingerman r0 &= 0x7fff; \
492*0a372c9cSEduard Zingerman r4 = r0; \
493*0a372c9cSEduard Zingerman r4 += r2; \
494*0a372c9cSEduard Zingerman r5 = r4; \
495*0a372c9cSEduard Zingerman r4 += %[__imm_0]; \
496*0a372c9cSEduard Zingerman if r4 > r3 goto l0_%=; \
497*0a372c9cSEduard Zingerman *(u64*)(r5 + 0) = r4; \
498*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
499*0a372c9cSEduard Zingerman exit; \
500*0a372c9cSEduard Zingerman " :
501*0a372c9cSEduard Zingerman : __imm_const(__imm_0, 0x7fff - 1),
502*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
503*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
504*0a372c9cSEduard Zingerman : __clobber_all);
505*0a372c9cSEduard Zingerman }
506*0a372c9cSEduard Zingerman
507*0a372c9cSEduard Zingerman SEC("tc")
508*0a372c9cSEduard Zingerman __description("direct packet access: test21 (x += pkt_ptr, 2)")
__flag(BPF_F_ANY_ALIGNMENT)509*0a372c9cSEduard Zingerman __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
510*0a372c9cSEduard Zingerman __naked void test21_x_pkt_ptr_2(void)
511*0a372c9cSEduard Zingerman {
512*0a372c9cSEduard Zingerman asm volatile (" \
513*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
514*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
515*0a372c9cSEduard Zingerman r0 = r2; \
516*0a372c9cSEduard Zingerman r0 += 8; \
517*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
518*0a372c9cSEduard Zingerman r4 = 0xffffffff; \
519*0a372c9cSEduard Zingerman *(u64*)(r10 - 8) = r4; \
520*0a372c9cSEduard Zingerman r4 = *(u64*)(r10 - 8); \
521*0a372c9cSEduard Zingerman r4 &= 0x7fff; \
522*0a372c9cSEduard Zingerman r4 += r2; \
523*0a372c9cSEduard Zingerman r5 = r4; \
524*0a372c9cSEduard Zingerman r4 += %[__imm_0]; \
525*0a372c9cSEduard Zingerman if r4 > r3 goto l0_%=; \
526*0a372c9cSEduard Zingerman *(u64*)(r5 + 0) = r4; \
527*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
528*0a372c9cSEduard Zingerman exit; \
529*0a372c9cSEduard Zingerman " :
530*0a372c9cSEduard Zingerman : __imm_const(__imm_0, 0x7fff - 1),
531*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
532*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
533*0a372c9cSEduard Zingerman : __clobber_all);
534*0a372c9cSEduard Zingerman }
535*0a372c9cSEduard Zingerman
536*0a372c9cSEduard Zingerman SEC("tc")
537*0a372c9cSEduard Zingerman __description("direct packet access: test22 (x += pkt_ptr, 3)")
__flag(BPF_F_ANY_ALIGNMENT)538*0a372c9cSEduard Zingerman __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
539*0a372c9cSEduard Zingerman __naked void test22_x_pkt_ptr_3(void)
540*0a372c9cSEduard Zingerman {
541*0a372c9cSEduard Zingerman asm volatile (" \
542*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
543*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
544*0a372c9cSEduard Zingerman r0 = r2; \
545*0a372c9cSEduard Zingerman r0 += 8; \
546*0a372c9cSEduard Zingerman *(u64*)(r10 - 8) = r2; \
547*0a372c9cSEduard Zingerman *(u64*)(r10 - 16) = r3; \
548*0a372c9cSEduard Zingerman r3 = *(u64*)(r10 - 16); \
549*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
550*0a372c9cSEduard Zingerman r2 = *(u64*)(r10 - 8); \
551*0a372c9cSEduard Zingerman r4 = 0xffffffff; \
552*0a372c9cSEduard Zingerman lock *(u64 *)(r10 - 8) += r4; \
553*0a372c9cSEduard Zingerman r4 = *(u64*)(r10 - 8); \
554*0a372c9cSEduard Zingerman r4 >>= 49; \
555*0a372c9cSEduard Zingerman r4 += r2; \
556*0a372c9cSEduard Zingerman r0 = r4; \
557*0a372c9cSEduard Zingerman r0 += 2; \
558*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
559*0a372c9cSEduard Zingerman r2 = 1; \
560*0a372c9cSEduard Zingerman *(u16*)(r4 + 0) = r2; \
561*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
562*0a372c9cSEduard Zingerman exit; \
563*0a372c9cSEduard Zingerman " :
564*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
565*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
566*0a372c9cSEduard Zingerman : __clobber_all);
567*0a372c9cSEduard Zingerman }
568*0a372c9cSEduard Zingerman
569*0a372c9cSEduard Zingerman SEC("tc")
570*0a372c9cSEduard Zingerman __description("direct packet access: test23 (x += pkt_ptr, 4)")
571*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)572*0a372c9cSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
573*0a372c9cSEduard Zingerman __naked void test23_x_pkt_ptr_4(void)
574*0a372c9cSEduard Zingerman {
575*0a372c9cSEduard Zingerman asm volatile (" \
576*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
577*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
578*0a372c9cSEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_mark]); \
579*0a372c9cSEduard Zingerman *(u64*)(r10 - 8) = r0; \
580*0a372c9cSEduard Zingerman r0 = *(u64*)(r10 - 8); \
581*0a372c9cSEduard Zingerman r0 &= 0xffff; \
582*0a372c9cSEduard Zingerman r4 = r0; \
583*0a372c9cSEduard Zingerman r0 = 31; \
584*0a372c9cSEduard Zingerman r0 += r4; \
585*0a372c9cSEduard Zingerman r0 += r2; \
586*0a372c9cSEduard Zingerman r5 = r0; \
587*0a372c9cSEduard Zingerman r0 += %[__imm_0]; \
588*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
589*0a372c9cSEduard Zingerman *(u64*)(r5 + 0) = r0; \
590*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
591*0a372c9cSEduard Zingerman exit; \
592*0a372c9cSEduard Zingerman " :
593*0a372c9cSEduard Zingerman : __imm_const(__imm_0, 0xffff - 1),
594*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
595*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
596*0a372c9cSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
597*0a372c9cSEduard Zingerman : __clobber_all);
598*0a372c9cSEduard Zingerman }
599*0a372c9cSEduard Zingerman
600*0a372c9cSEduard Zingerman SEC("tc")
601*0a372c9cSEduard Zingerman __description("direct packet access: test24 (x += pkt_ptr, 5)")
__flag(BPF_F_ANY_ALIGNMENT)602*0a372c9cSEduard Zingerman __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
603*0a372c9cSEduard Zingerman __naked void test24_x_pkt_ptr_5(void)
604*0a372c9cSEduard Zingerman {
605*0a372c9cSEduard Zingerman asm volatile (" \
606*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
607*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
608*0a372c9cSEduard Zingerman r0 = 0xffffffff; \
609*0a372c9cSEduard Zingerman *(u64*)(r10 - 8) = r0; \
610*0a372c9cSEduard Zingerman r0 = *(u64*)(r10 - 8); \
611*0a372c9cSEduard Zingerman r0 &= 0xff; \
612*0a372c9cSEduard Zingerman r4 = r0; \
613*0a372c9cSEduard Zingerman r0 = 64; \
614*0a372c9cSEduard Zingerman r0 += r4; \
615*0a372c9cSEduard Zingerman r0 += r2; \
616*0a372c9cSEduard Zingerman r5 = r0; \
617*0a372c9cSEduard Zingerman r0 += %[__imm_0]; \
618*0a372c9cSEduard Zingerman if r0 > r3 goto l0_%=; \
619*0a372c9cSEduard Zingerman *(u64*)(r5 + 0) = r0; \
620*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
621*0a372c9cSEduard Zingerman exit; \
622*0a372c9cSEduard Zingerman " :
623*0a372c9cSEduard Zingerman : __imm_const(__imm_0, 0x7fff - 1),
624*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
625*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
626*0a372c9cSEduard Zingerman : __clobber_all);
627*0a372c9cSEduard Zingerman }
628*0a372c9cSEduard Zingerman
629*0a372c9cSEduard Zingerman SEC("tc")
630*0a372c9cSEduard Zingerman __description("direct packet access: test25 (marking on <, good access)")
631*0a372c9cSEduard Zingerman __success __retval(0)
test25_marking_on_good_access(void)632*0a372c9cSEduard Zingerman __naked void test25_marking_on_good_access(void)
633*0a372c9cSEduard Zingerman {
634*0a372c9cSEduard Zingerman asm volatile (" \
635*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
636*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
637*0a372c9cSEduard Zingerman r0 = r2; \
638*0a372c9cSEduard Zingerman r0 += 8; \
639*0a372c9cSEduard Zingerman if r0 < r3 goto l0_%=; \
640*0a372c9cSEduard Zingerman l1_%=: r0 = 0; \
641*0a372c9cSEduard Zingerman exit; \
642*0a372c9cSEduard Zingerman l0_%=: r0 = *(u8*)(r2 + 0); \
643*0a372c9cSEduard Zingerman goto l1_%=; \
644*0a372c9cSEduard Zingerman " :
645*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
646*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
647*0a372c9cSEduard Zingerman : __clobber_all);
648*0a372c9cSEduard Zingerman }
649*0a372c9cSEduard Zingerman
650*0a372c9cSEduard Zingerman SEC("tc")
651*0a372c9cSEduard Zingerman __description("direct packet access: test26 (marking on <, bad access)")
652*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet")
test26_marking_on_bad_access(void)653*0a372c9cSEduard Zingerman __naked void test26_marking_on_bad_access(void)
654*0a372c9cSEduard Zingerman {
655*0a372c9cSEduard Zingerman asm volatile (" \
656*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
657*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
658*0a372c9cSEduard Zingerman r0 = r2; \
659*0a372c9cSEduard Zingerman r0 += 8; \
660*0a372c9cSEduard Zingerman if r0 < r3 goto l0_%=; \
661*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
662*0a372c9cSEduard Zingerman l1_%=: r0 = 0; \
663*0a372c9cSEduard Zingerman exit; \
664*0a372c9cSEduard Zingerman l0_%=: goto l1_%=; \
665*0a372c9cSEduard Zingerman " :
666*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
667*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
668*0a372c9cSEduard Zingerman : __clobber_all);
669*0a372c9cSEduard Zingerman }
670*0a372c9cSEduard Zingerman
671*0a372c9cSEduard Zingerman SEC("tc")
672*0a372c9cSEduard Zingerman __description("direct packet access: test27 (marking on <=, good access)")
673*0a372c9cSEduard Zingerman __success __retval(1)
test27_marking_on_good_access(void)674*0a372c9cSEduard Zingerman __naked void test27_marking_on_good_access(void)
675*0a372c9cSEduard Zingerman {
676*0a372c9cSEduard Zingerman asm volatile (" \
677*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
678*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
679*0a372c9cSEduard Zingerman r0 = r2; \
680*0a372c9cSEduard Zingerman r0 += 8; \
681*0a372c9cSEduard Zingerman if r3 <= r0 goto l0_%=; \
682*0a372c9cSEduard Zingerman r0 = *(u8*)(r2 + 0); \
683*0a372c9cSEduard Zingerman l0_%=: r0 = 1; \
684*0a372c9cSEduard Zingerman exit; \
685*0a372c9cSEduard Zingerman " :
686*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
687*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
688*0a372c9cSEduard Zingerman : __clobber_all);
689*0a372c9cSEduard Zingerman }
690*0a372c9cSEduard Zingerman
691*0a372c9cSEduard Zingerman SEC("tc")
692*0a372c9cSEduard Zingerman __description("direct packet access: test28 (marking on <=, bad access)")
693*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet")
test28_marking_on_bad_access(void)694*0a372c9cSEduard Zingerman __naked void test28_marking_on_bad_access(void)
695*0a372c9cSEduard Zingerman {
696*0a372c9cSEduard Zingerman asm volatile (" \
697*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
698*0a372c9cSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
699*0a372c9cSEduard Zingerman r0 = r2; \
700*0a372c9cSEduard Zingerman r0 += 8; \
701*0a372c9cSEduard Zingerman if r3 <= r0 goto l0_%=; \
702*0a372c9cSEduard Zingerman l1_%=: r0 = 1; \
703*0a372c9cSEduard Zingerman exit; \
704*0a372c9cSEduard Zingerman l0_%=: r0 = *(u8*)(r2 + 0); \
705*0a372c9cSEduard Zingerman goto l1_%=; \
706*0a372c9cSEduard Zingerman " :
707*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
708*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
709*0a372c9cSEduard Zingerman : __clobber_all);
710*0a372c9cSEduard Zingerman }
711*0a372c9cSEduard Zingerman
712*0a372c9cSEduard Zingerman SEC("tc")
713*0a372c9cSEduard Zingerman __description("direct packet access: test29 (reg > pkt_end in subprog)")
714*0a372c9cSEduard Zingerman __success __retval(0)
reg_pkt_end_in_subprog(void)715*0a372c9cSEduard Zingerman __naked void reg_pkt_end_in_subprog(void)
716*0a372c9cSEduard Zingerman {
717*0a372c9cSEduard Zingerman asm volatile (" \
718*0a372c9cSEduard Zingerman r6 = *(u32*)(r1 + %[__sk_buff_data]); \
719*0a372c9cSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data_end]); \
720*0a372c9cSEduard Zingerman r3 = r6; \
721*0a372c9cSEduard Zingerman r3 += 8; \
722*0a372c9cSEduard Zingerman call reg_pkt_end_in_subprog__1; \
723*0a372c9cSEduard Zingerman if r0 == 0 goto l0_%=; \
724*0a372c9cSEduard Zingerman r0 = *(u8*)(r6 + 0); \
725*0a372c9cSEduard Zingerman l0_%=: r0 = 0; \
726*0a372c9cSEduard Zingerman exit; \
727*0a372c9cSEduard Zingerman " :
728*0a372c9cSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
729*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
730*0a372c9cSEduard Zingerman : __clobber_all);
731*0a372c9cSEduard Zingerman }
732*0a372c9cSEduard Zingerman
733*0a372c9cSEduard Zingerman static __naked __noinline __attribute__((used))
reg_pkt_end_in_subprog__1(void)734*0a372c9cSEduard Zingerman void reg_pkt_end_in_subprog__1(void)
735*0a372c9cSEduard Zingerman {
736*0a372c9cSEduard Zingerman asm volatile (" \
737*0a372c9cSEduard Zingerman r0 = 0; \
738*0a372c9cSEduard Zingerman if r3 > r2 goto l0_%=; \
739*0a372c9cSEduard Zingerman r0 = 1; \
740*0a372c9cSEduard Zingerman l0_%=: exit; \
741*0a372c9cSEduard Zingerman " ::: __clobber_all);
742*0a372c9cSEduard Zingerman }
743*0a372c9cSEduard Zingerman
744*0a372c9cSEduard Zingerman SEC("tc")
745*0a372c9cSEduard Zingerman __description("direct packet access: test30 (check_id() in regsafe(), bad access)")
746*0a372c9cSEduard Zingerman __failure __msg("invalid access to packet, off=0 size=1, R2")
__flag(BPF_F_TEST_STATE_FREQ)747*0a372c9cSEduard Zingerman __flag(BPF_F_TEST_STATE_FREQ)
748*0a372c9cSEduard Zingerman __naked void id_in_regsafe_bad_access(void)
749*0a372c9cSEduard Zingerman {
750*0a372c9cSEduard Zingerman asm volatile (" \
751*0a372c9cSEduard Zingerman /* r9 = ctx */ \
752*0a372c9cSEduard Zingerman r9 = r1; \
753*0a372c9cSEduard Zingerman /* r7 = ktime_get_ns() */ \
754*0a372c9cSEduard Zingerman call %[bpf_ktime_get_ns]; \
755*0a372c9cSEduard Zingerman r7 = r0; \
756*0a372c9cSEduard Zingerman /* r6 = ktime_get_ns() */ \
757*0a372c9cSEduard Zingerman call %[bpf_ktime_get_ns]; \
758*0a372c9cSEduard Zingerman r6 = r0; \
759*0a372c9cSEduard Zingerman /* r2 = ctx->data \
760*0a372c9cSEduard Zingerman * r3 = ctx->data \
761*0a372c9cSEduard Zingerman * r4 = ctx->data_end \
762*0a372c9cSEduard Zingerman */ \
763*0a372c9cSEduard Zingerman r2 = *(u32*)(r9 + %[__sk_buff_data]); \
764*0a372c9cSEduard Zingerman r3 = *(u32*)(r9 + %[__sk_buff_data]); \
765*0a372c9cSEduard Zingerman r4 = *(u32*)(r9 + %[__sk_buff_data_end]); \
766*0a372c9cSEduard Zingerman /* if r6 > 100 goto exit \
767*0a372c9cSEduard Zingerman * if r7 > 100 goto exit \
768*0a372c9cSEduard Zingerman */ \
769*0a372c9cSEduard Zingerman if r6 > 100 goto l0_%=; \
770*0a372c9cSEduard Zingerman if r7 > 100 goto l0_%=; \
771*0a372c9cSEduard Zingerman /* r2 += r6 ; this forces assignment of ID to r2\
772*0a372c9cSEduard Zingerman * r2 += 1 ; get some fixed off for r2\
773*0a372c9cSEduard Zingerman * r3 += r7 ; this forces assignment of ID to r3\
774*0a372c9cSEduard Zingerman * r3 += 1 ; get some fixed off for r3\
775*0a372c9cSEduard Zingerman */ \
776*0a372c9cSEduard Zingerman r2 += r6; \
777*0a372c9cSEduard Zingerman r2 += 1; \
778*0a372c9cSEduard Zingerman r3 += r7; \
779*0a372c9cSEduard Zingerman r3 += 1; \
780*0a372c9cSEduard Zingerman /* if r6 > r7 goto +1 ; no new information about the state is derived from\
781*0a372c9cSEduard Zingerman * ; this check, thus produced verifier states differ\
782*0a372c9cSEduard Zingerman * ; only in 'insn_idx' \
783*0a372c9cSEduard Zingerman * r2 = r3 ; optionally share ID between r2 and r3\
784*0a372c9cSEduard Zingerman */ \
785*0a372c9cSEduard Zingerman if r6 != r7 goto l1_%=; \
786*0a372c9cSEduard Zingerman r2 = r3; \
787*0a372c9cSEduard Zingerman l1_%=: /* if r3 > ctx->data_end goto exit */ \
788*0a372c9cSEduard Zingerman if r3 > r4 goto l0_%=; \
789*0a372c9cSEduard Zingerman /* r5 = *(u8 *) (r2 - 1) ; access packet memory using r2,\
790*0a372c9cSEduard Zingerman * ; this is not always safe\
791*0a372c9cSEduard Zingerman */ \
792*0a372c9cSEduard Zingerman r5 = *(u8*)(r2 - 1); \
793*0a372c9cSEduard Zingerman l0_%=: /* exit(0) */ \
794*0a372c9cSEduard Zingerman r0 = 0; \
795*0a372c9cSEduard Zingerman exit; \
796*0a372c9cSEduard Zingerman " :
797*0a372c9cSEduard Zingerman : __imm(bpf_ktime_get_ns),
798*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
799*0a372c9cSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
800*0a372c9cSEduard Zingerman : __clobber_all);
801*0a372c9cSEduard Zingerman }
802*0a372c9cSEduard Zingerman
803*0a372c9cSEduard Zingerman char _license[] SEC("license") = "GPL";
804