1*b427ca57SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*b427ca57SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/lwt.c */
3*b427ca57SEduard Zingerman
4*b427ca57SEduard Zingerman #include <linux/bpf.h>
5*b427ca57SEduard Zingerman #include <bpf/bpf_helpers.h>
6*b427ca57SEduard Zingerman #include "bpf_misc.h"
7*b427ca57SEduard Zingerman
8*b427ca57SEduard Zingerman SEC("lwt_in")
9*b427ca57SEduard Zingerman __description("invalid direct packet write for LWT_IN")
10*b427ca57SEduard Zingerman __failure __msg("cannot write into packet")
packet_write_for_lwt_in(void)11*b427ca57SEduard Zingerman __naked void packet_write_for_lwt_in(void)
12*b427ca57SEduard Zingerman {
13*b427ca57SEduard Zingerman asm volatile (" \
14*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
15*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
16*b427ca57SEduard Zingerman r0 = r2; \
17*b427ca57SEduard Zingerman r0 += 8; \
18*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
19*b427ca57SEduard Zingerman *(u8*)(r2 + 0) = r2; \
20*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
21*b427ca57SEduard Zingerman exit; \
22*b427ca57SEduard Zingerman " :
23*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
24*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
25*b427ca57SEduard Zingerman : __clobber_all);
26*b427ca57SEduard Zingerman }
27*b427ca57SEduard Zingerman
28*b427ca57SEduard Zingerman SEC("lwt_out")
29*b427ca57SEduard Zingerman __description("invalid direct packet write for LWT_OUT")
30*b427ca57SEduard Zingerman __failure __msg("cannot write into packet")
packet_write_for_lwt_out(void)31*b427ca57SEduard Zingerman __naked void packet_write_for_lwt_out(void)
32*b427ca57SEduard Zingerman {
33*b427ca57SEduard Zingerman asm volatile (" \
34*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
35*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
36*b427ca57SEduard Zingerman r0 = r2; \
37*b427ca57SEduard Zingerman r0 += 8; \
38*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
39*b427ca57SEduard Zingerman *(u8*)(r2 + 0) = r2; \
40*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
41*b427ca57SEduard Zingerman exit; \
42*b427ca57SEduard Zingerman " :
43*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
44*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
45*b427ca57SEduard Zingerman : __clobber_all);
46*b427ca57SEduard Zingerman }
47*b427ca57SEduard Zingerman
48*b427ca57SEduard Zingerman SEC("lwt_xmit")
49*b427ca57SEduard Zingerman __description("direct packet write for LWT_XMIT")
50*b427ca57SEduard Zingerman __success __retval(0)
packet_write_for_lwt_xmit(void)51*b427ca57SEduard Zingerman __naked void packet_write_for_lwt_xmit(void)
52*b427ca57SEduard Zingerman {
53*b427ca57SEduard Zingerman asm volatile (" \
54*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
55*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
56*b427ca57SEduard Zingerman r0 = r2; \
57*b427ca57SEduard Zingerman r0 += 8; \
58*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
59*b427ca57SEduard Zingerman *(u8*)(r2 + 0) = r2; \
60*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
61*b427ca57SEduard Zingerman exit; \
62*b427ca57SEduard Zingerman " :
63*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
64*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
65*b427ca57SEduard Zingerman : __clobber_all);
66*b427ca57SEduard Zingerman }
67*b427ca57SEduard Zingerman
68*b427ca57SEduard Zingerman SEC("lwt_in")
69*b427ca57SEduard Zingerman __description("direct packet read for LWT_IN")
70*b427ca57SEduard Zingerman __success __retval(0)
packet_read_for_lwt_in(void)71*b427ca57SEduard Zingerman __naked void packet_read_for_lwt_in(void)
72*b427ca57SEduard Zingerman {
73*b427ca57SEduard Zingerman asm volatile (" \
74*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
75*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
76*b427ca57SEduard Zingerman r0 = r2; \
77*b427ca57SEduard Zingerman r0 += 8; \
78*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
79*b427ca57SEduard Zingerman r0 = *(u8*)(r2 + 0); \
80*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
81*b427ca57SEduard Zingerman exit; \
82*b427ca57SEduard Zingerman " :
83*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
84*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
85*b427ca57SEduard Zingerman : __clobber_all);
86*b427ca57SEduard Zingerman }
87*b427ca57SEduard Zingerman
88*b427ca57SEduard Zingerman SEC("lwt_out")
89*b427ca57SEduard Zingerman __description("direct packet read for LWT_OUT")
90*b427ca57SEduard Zingerman __success __retval(0)
packet_read_for_lwt_out(void)91*b427ca57SEduard Zingerman __naked void packet_read_for_lwt_out(void)
92*b427ca57SEduard Zingerman {
93*b427ca57SEduard Zingerman asm volatile (" \
94*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
95*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
96*b427ca57SEduard Zingerman r0 = r2; \
97*b427ca57SEduard Zingerman r0 += 8; \
98*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
99*b427ca57SEduard Zingerman r0 = *(u8*)(r2 + 0); \
100*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
101*b427ca57SEduard Zingerman exit; \
102*b427ca57SEduard Zingerman " :
103*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
104*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
105*b427ca57SEduard Zingerman : __clobber_all);
106*b427ca57SEduard Zingerman }
107*b427ca57SEduard Zingerman
108*b427ca57SEduard Zingerman SEC("lwt_xmit")
109*b427ca57SEduard Zingerman __description("direct packet read for LWT_XMIT")
110*b427ca57SEduard Zingerman __success __retval(0)
packet_read_for_lwt_xmit(void)111*b427ca57SEduard Zingerman __naked void packet_read_for_lwt_xmit(void)
112*b427ca57SEduard Zingerman {
113*b427ca57SEduard Zingerman asm volatile (" \
114*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
115*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
116*b427ca57SEduard Zingerman r0 = r2; \
117*b427ca57SEduard Zingerman r0 += 8; \
118*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
119*b427ca57SEduard Zingerman r0 = *(u8*)(r2 + 0); \
120*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
121*b427ca57SEduard Zingerman exit; \
122*b427ca57SEduard Zingerman " :
123*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
124*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
125*b427ca57SEduard Zingerman : __clobber_all);
126*b427ca57SEduard Zingerman }
127*b427ca57SEduard Zingerman
128*b427ca57SEduard Zingerman SEC("lwt_xmit")
129*b427ca57SEduard Zingerman __description("overlapping checks for direct packet access")
130*b427ca57SEduard Zingerman __success __retval(0)
checks_for_direct_packet_access(void)131*b427ca57SEduard Zingerman __naked void checks_for_direct_packet_access(void)
132*b427ca57SEduard Zingerman {
133*b427ca57SEduard Zingerman asm volatile (" \
134*b427ca57SEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
135*b427ca57SEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
136*b427ca57SEduard Zingerman r0 = r2; \
137*b427ca57SEduard Zingerman r0 += 8; \
138*b427ca57SEduard Zingerman if r0 > r3 goto l0_%=; \
139*b427ca57SEduard Zingerman r1 = r2; \
140*b427ca57SEduard Zingerman r1 += 6; \
141*b427ca57SEduard Zingerman if r1 > r3 goto l0_%=; \
142*b427ca57SEduard Zingerman r0 = *(u16*)(r2 + 6); \
143*b427ca57SEduard Zingerman l0_%=: r0 = 0; \
144*b427ca57SEduard Zingerman exit; \
145*b427ca57SEduard Zingerman " :
146*b427ca57SEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
147*b427ca57SEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
148*b427ca57SEduard Zingerman : __clobber_all);
149*b427ca57SEduard Zingerman }
150*b427ca57SEduard Zingerman
151*b427ca57SEduard Zingerman SEC("lwt_xmit")
152*b427ca57SEduard Zingerman __description("make headroom for LWT_XMIT")
153*b427ca57SEduard Zingerman __success __retval(0)
make_headroom_for_lwt_xmit(void)154*b427ca57SEduard Zingerman __naked void make_headroom_for_lwt_xmit(void)
155*b427ca57SEduard Zingerman {
156*b427ca57SEduard Zingerman asm volatile (" \
157*b427ca57SEduard Zingerman r6 = r1; \
158*b427ca57SEduard Zingerman r2 = 34; \
159*b427ca57SEduard Zingerman r3 = 0; \
160*b427ca57SEduard Zingerman call %[bpf_skb_change_head]; \
161*b427ca57SEduard Zingerman /* split for s390 to succeed */ \
162*b427ca57SEduard Zingerman r1 = r6; \
163*b427ca57SEduard Zingerman r2 = 42; \
164*b427ca57SEduard Zingerman r3 = 0; \
165*b427ca57SEduard Zingerman call %[bpf_skb_change_head]; \
166*b427ca57SEduard Zingerman r0 = 0; \
167*b427ca57SEduard Zingerman exit; \
168*b427ca57SEduard Zingerman " :
169*b427ca57SEduard Zingerman : __imm(bpf_skb_change_head)
170*b427ca57SEduard Zingerman : __clobber_all);
171*b427ca57SEduard Zingerman }
172*b427ca57SEduard Zingerman
173*b427ca57SEduard Zingerman SEC("socket")
174*b427ca57SEduard Zingerman __description("invalid access of tc_classid for LWT_IN")
175*b427ca57SEduard Zingerman __failure __msg("invalid bpf_context access")
176*b427ca57SEduard Zingerman __failure_unpriv
tc_classid_for_lwt_in(void)177*b427ca57SEduard Zingerman __naked void tc_classid_for_lwt_in(void)
178*b427ca57SEduard Zingerman {
179*b427ca57SEduard Zingerman asm volatile (" \
180*b427ca57SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
181*b427ca57SEduard Zingerman exit; \
182*b427ca57SEduard Zingerman " :
183*b427ca57SEduard Zingerman : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
184*b427ca57SEduard Zingerman : __clobber_all);
185*b427ca57SEduard Zingerman }
186*b427ca57SEduard Zingerman
187*b427ca57SEduard Zingerman SEC("socket")
188*b427ca57SEduard Zingerman __description("invalid access of tc_classid for LWT_OUT")
189*b427ca57SEduard Zingerman __failure __msg("invalid bpf_context access")
190*b427ca57SEduard Zingerman __failure_unpriv
tc_classid_for_lwt_out(void)191*b427ca57SEduard Zingerman __naked void tc_classid_for_lwt_out(void)
192*b427ca57SEduard Zingerman {
193*b427ca57SEduard Zingerman asm volatile (" \
194*b427ca57SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
195*b427ca57SEduard Zingerman exit; \
196*b427ca57SEduard Zingerman " :
197*b427ca57SEduard Zingerman : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
198*b427ca57SEduard Zingerman : __clobber_all);
199*b427ca57SEduard Zingerman }
200*b427ca57SEduard Zingerman
201*b427ca57SEduard Zingerman SEC("socket")
202*b427ca57SEduard Zingerman __description("invalid access of tc_classid for LWT_XMIT")
203*b427ca57SEduard Zingerman __failure __msg("invalid bpf_context access")
204*b427ca57SEduard Zingerman __failure_unpriv
tc_classid_for_lwt_xmit(void)205*b427ca57SEduard Zingerman __naked void tc_classid_for_lwt_xmit(void)
206*b427ca57SEduard Zingerman {
207*b427ca57SEduard Zingerman asm volatile (" \
208*b427ca57SEduard Zingerman r0 = *(u32*)(r1 + %[__sk_buff_tc_classid]); \
209*b427ca57SEduard Zingerman exit; \
210*b427ca57SEduard Zingerman " :
211*b427ca57SEduard Zingerman : __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
212*b427ca57SEduard Zingerman : __clobber_all);
213*b427ca57SEduard Zingerman }
214*b427ca57SEduard Zingerman
215*b427ca57SEduard Zingerman SEC("lwt_in")
216*b427ca57SEduard Zingerman __description("check skb->tc_classid half load not permitted for lwt prog")
217*b427ca57SEduard Zingerman __failure __msg("invalid bpf_context access")
not_permitted_for_lwt_prog(void)218*b427ca57SEduard Zingerman __naked void not_permitted_for_lwt_prog(void)
219*b427ca57SEduard Zingerman {
220*b427ca57SEduard Zingerman asm volatile (
221*b427ca57SEduard Zingerman "r0 = 0;"
222*b427ca57SEduard Zingerman #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
223*b427ca57SEduard Zingerman "r0 = *(u16*)(r1 + %[__sk_buff_tc_classid]);"
224*b427ca57SEduard Zingerman #else
225*b427ca57SEduard Zingerman "r0 = *(u16*)(r1 + %[__imm_0]);"
226*b427ca57SEduard Zingerman #endif
227*b427ca57SEduard Zingerman "exit;"
228*b427ca57SEduard Zingerman :
229*b427ca57SEduard Zingerman : __imm_const(__imm_0, offsetof(struct __sk_buff, tc_classid) + 2),
230*b427ca57SEduard Zingerman __imm_const(__sk_buff_tc_classid, offsetof(struct __sk_buff, tc_classid))
231*b427ca57SEduard Zingerman : __clobber_all);
232*b427ca57SEduard Zingerman }
233*b427ca57SEduard Zingerman
234*b427ca57SEduard Zingerman char _license[] SEC("license") = "GPL";
235