1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7 
8 struct {
9 	__uint(type, BPF_MAP_TYPE_RINGBUF);
10 	__uint(max_entries, 4096);
11 } map_ringbuf SEC(".maps");
12 
13 SEC("socket")
14 __description("check valid spill/fill")
15 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
16 __retval(POINTER_VALUE)
17 __naked void check_valid_spill_fill(void)
18 {
19 	asm volatile ("					\
20 	/* spill R1(ctx) into stack */			\
21 	*(u64*)(r10 - 8) = r1;				\
22 	/* fill it back into R2 */			\
23 	r2 = *(u64*)(r10 - 8);				\
24 	/* should be able to access R0 = *(R2 + 8) */	\
25 	/* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
26 	r0 = r2;					\
27 	exit;						\
28 "	::: __clobber_all);
29 }
30 
31 SEC("socket")
32 __description("check valid spill/fill, skb mark")
33 __success __success_unpriv __retval(0)
34 __naked void valid_spill_fill_skb_mark(void)
35 {
36 	asm volatile ("					\
37 	r6 = r1;					\
38 	*(u64*)(r10 - 8) = r6;				\
39 	r0 = *(u64*)(r10 - 8);				\
40 	r0 = *(u32*)(r0 + %[__sk_buff_mark]);		\
41 	exit;						\
42 "	:
43 	: __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
44 	: __clobber_all);
45 }
46 
47 SEC("socket")
48 __description("check valid spill/fill, ptr to mem")
49 __success __success_unpriv __retval(0)
50 __naked void spill_fill_ptr_to_mem(void)
51 {
52 	asm volatile ("					\
53 	/* reserve 8 byte ringbuf memory */		\
54 	r1 = 0;						\
55 	*(u64*)(r10 - 8) = r1;				\
56 	r1 = %[map_ringbuf] ll;				\
57 	r2 = 8;						\
58 	r3 = 0;						\
59 	call %[bpf_ringbuf_reserve];			\
60 	/* store a pointer to the reserved memory in R6 */\
61 	r6 = r0;					\
62 	/* check whether the reservation was successful */\
63 	if r0 == 0 goto l0_%=;				\
64 	/* spill R6(mem) into the stack */		\
65 	*(u64*)(r10 - 8) = r6;				\
66 	/* fill it back in R7 */			\
67 	r7 = *(u64*)(r10 - 8);				\
68 	/* should be able to access *(R7) = 0 */	\
69 	r1 = 0;						\
70 	*(u64*)(r7 + 0) = r1;				\
71 	/* submit the reserved ringbuf memory */	\
72 	r1 = r7;					\
73 	r2 = 0;						\
74 	call %[bpf_ringbuf_submit];			\
75 l0_%=:	r0 = 0;						\
76 	exit;						\
77 "	:
78 	: __imm(bpf_ringbuf_reserve),
79 	  __imm(bpf_ringbuf_submit),
80 	  __imm_addr(map_ringbuf)
81 	: __clobber_all);
82 }
83 
84 SEC("socket")
85 __description("check with invalid reg offset 0")
86 __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
87 __failure_unpriv
88 __naked void with_invalid_reg_offset_0(void)
89 {
90 	asm volatile ("					\
91 	/* reserve 8 byte ringbuf memory */		\
92 	r1 = 0;						\
93 	*(u64*)(r10 - 8) = r1;				\
94 	r1 = %[map_ringbuf] ll;				\
95 	r2 = 8;						\
96 	r3 = 0;						\
97 	call %[bpf_ringbuf_reserve];			\
98 	/* store a pointer to the reserved memory in R6 */\
99 	r6 = r0;					\
100 	/* add invalid offset to memory or NULL */	\
101 	r0 += 1;					\
102 	/* check whether the reservation was successful */\
103 	if r0 == 0 goto l0_%=;				\
104 	/* should not be able to access *(R7) = 0 */	\
105 	r1 = 0;						\
106 	*(u32*)(r6 + 0) = r1;				\
107 	/* submit the reserved ringbuf memory */	\
108 	r1 = r6;					\
109 	r2 = 0;						\
110 	call %[bpf_ringbuf_submit];			\
111 l0_%=:	r0 = 0;						\
112 	exit;						\
113 "	:
114 	: __imm(bpf_ringbuf_reserve),
115 	  __imm(bpf_ringbuf_submit),
116 	  __imm_addr(map_ringbuf)
117 	: __clobber_all);
118 }
119 
120 SEC("socket")
121 __description("check corrupted spill/fill")
122 __failure __msg("R0 invalid mem access 'scalar'")
123 __msg_unpriv("attempt to corrupt spilled")
124 __flag(BPF_F_ANY_ALIGNMENT)
125 __naked void check_corrupted_spill_fill(void)
126 {
127 	asm volatile ("					\
128 	/* spill R1(ctx) into stack */			\
129 	*(u64*)(r10 - 8) = r1;				\
130 	/* mess up with R1 pointer on stack */		\
131 	r0 = 0x23;					\
132 	*(u8*)(r10 - 7) = r0;				\
133 	/* fill back into R0 is fine for priv.		\
134 	 * R0 now becomes SCALAR_VALUE.			\
135 	 */						\
136 	r0 = *(u64*)(r10 - 8);				\
137 	/* Load from R0 should fail. */			\
138 	r0 = *(u64*)(r0 + 8);				\
139 	exit;						\
140 "	::: __clobber_all);
141 }
142 
143 SEC("socket")
144 __description("check corrupted spill/fill, LSB")
145 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
146 __retval(POINTER_VALUE)
147 __naked void check_corrupted_spill_fill_lsb(void)
148 {
149 	asm volatile ("					\
150 	*(u64*)(r10 - 8) = r1;				\
151 	r0 = 0xcafe;					\
152 	*(u16*)(r10 - 8) = r0;				\
153 	r0 = *(u64*)(r10 - 8);				\
154 	exit;						\
155 "	::: __clobber_all);
156 }
157 
158 SEC("socket")
159 __description("check corrupted spill/fill, MSB")
160 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
161 __retval(POINTER_VALUE)
162 __naked void check_corrupted_spill_fill_msb(void)
163 {
164 	asm volatile ("					\
165 	*(u64*)(r10 - 8) = r1;				\
166 	r0 = 0x12345678;				\
167 	*(u32*)(r10 - 4) = r0;				\
168 	r0 = *(u64*)(r10 - 8);				\
169 	exit;						\
170 "	::: __clobber_all);
171 }
172 
173 SEC("tc")
174 __description("Spill and refill a u32 const scalar.  Offset to skb->data")
175 __success __retval(0)
176 __naked void scalar_offset_to_skb_data_1(void)
177 {
178 	asm volatile ("					\
179 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
180 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
181 	w4 = 20;					\
182 	*(u32*)(r10 - 8) = r4;				\
183 	r4 = *(u32*)(r10 - 8);				\
184 	r0 = r2;					\
185 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */	\
186 	r0 += r4;					\
187 	/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
188 	if r0 > r3 goto l0_%=;				\
189 	/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
190 	r0 = *(u32*)(r2 + 0);				\
191 l0_%=:	r0 = 0;						\
192 	exit;						\
193 "	:
194 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
195 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
196 	: __clobber_all);
197 }
198 
199 SEC("socket")
200 __description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
201 /* in privileged mode reads from uninitialized stack locations are permitted */
202 __success __failure_unpriv
203 __msg_unpriv("invalid read from stack off -4+0 size 4")
204 __retval(0)
205 __naked void uninit_u32_from_the_stack(void)
206 {
207 	asm volatile ("					\
208 	w4 = 20;					\
209 	*(u32*)(r10 - 8) = r4;				\
210 	/* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/	\
211 	r4 = *(u32*)(r10 - 4);				\
212 	r0 = 0;						\
213 	exit;						\
214 "	::: __clobber_all);
215 }
216 
217 SEC("tc")
218 __description("Spill a u32 const scalar.  Refill as u16.  Offset to skb->data")
219 __failure __msg("invalid access to packet")
220 __naked void u16_offset_to_skb_data(void)
221 {
222 	asm volatile ("					\
223 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
224 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
225 	w4 = 20;					\
226 	*(u32*)(r10 - 8) = r4;				\
227 	r4 = *(u16*)(r10 - 8);				\
228 	r0 = r2;					\
229 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
230 	r0 += r4;					\
231 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
232 	if r0 > r3 goto l0_%=;				\
233 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
234 	r0 = *(u32*)(r2 + 0);				\
235 l0_%=:	r0 = 0;						\
236 	exit;						\
237 "	:
238 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
239 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
240 	: __clobber_all);
241 }
242 
243 SEC("tc")
244 __description("Spill u32 const scalars.  Refill as u64.  Offset to skb->data")
245 __failure __msg("invalid access to packet")
246 __naked void u64_offset_to_skb_data(void)
247 {
248 	asm volatile ("					\
249 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
250 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
251 	w6 = 0;						\
252 	w7 = 20;					\
253 	*(u32*)(r10 - 4) = r6;				\
254 	*(u32*)(r10 - 8) = r7;				\
255 	r4 = *(u16*)(r10 - 8);				\
256 	r0 = r2;					\
257 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
258 	r0 += r4;					\
259 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
260 	if r0 > r3 goto l0_%=;				\
261 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
262 	r0 = *(u32*)(r2 + 0);				\
263 l0_%=:	r0 = 0;						\
264 	exit;						\
265 "	:
266 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
267 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
268 	: __clobber_all);
269 }
270 
271 SEC("tc")
272 __description("Spill a u32 const scalar.  Refill as u16 from fp-6.  Offset to skb->data")
273 __failure __msg("invalid access to packet")
274 __naked void _6_offset_to_skb_data(void)
275 {
276 	asm volatile ("					\
277 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
278 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
279 	w4 = 20;					\
280 	*(u32*)(r10 - 8) = r4;				\
281 	r4 = *(u16*)(r10 - 6);				\
282 	r0 = r2;					\
283 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
284 	r0 += r4;					\
285 	/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
286 	if r0 > r3 goto l0_%=;				\
287 	/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
288 	r0 = *(u32*)(r2 + 0);				\
289 l0_%=:	r0 = 0;						\
290 	exit;						\
291 "	:
292 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
293 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
294 	: __clobber_all);
295 }
296 
297 SEC("tc")
298 __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr.  Offset to skb->data")
299 __failure __msg("invalid access to packet")
300 __naked void addr_offset_to_skb_data(void)
301 {
302 	asm volatile ("					\
303 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
304 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
305 	w4 = 20;					\
306 	*(u32*)(r10 - 8) = r4;				\
307 	*(u32*)(r10 - 4) = r4;				\
308 	r4 = *(u32*)(r10 - 4);				\
309 	r0 = r2;					\
310 	/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
311 	r0 += r4;					\
312 	/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
313 	if r0 > r3 goto l0_%=;				\
314 	/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
315 	r0 = *(u32*)(r2 + 0);				\
316 l0_%=:	r0 = 0;						\
317 	exit;						\
318 "	:
319 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
320 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
321 	: __clobber_all);
322 }
323 
324 SEC("tc")
325 __description("Spill and refill a umax=40 bounded scalar.  Offset to skb->data")
326 __success __retval(0)
327 __naked void scalar_offset_to_skb_data_2(void)
328 {
329 	asm volatile ("					\
330 	r2 = *(u32*)(r1 + %[__sk_buff_data]);		\
331 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
332 	r4 = *(u64*)(r1 + %[__sk_buff_tstamp]);		\
333 	if r4 <= 40 goto l0_%=;				\
334 	r0 = 0;						\
335 	exit;						\
336 l0_%=:	/* *(u32 *)(r10 -8) = r4 R4=umax=40 */		\
337 	*(u32*)(r10 - 8) = r4;				\
338 	/* r4 = (*u32 *)(r10 - 8) */			\
339 	r4 = *(u32*)(r10 - 8);				\
340 	/* r2 += r4 R2=pkt R4=umax=40 */		\
341 	r2 += r4;					\
342 	/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */		\
343 	r0 = r2;					\
344 	/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */	\
345 	r2 += 20;					\
346 	/* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
347 	if r2 > r3 goto l1_%=;				\
348 	/* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
349 	r0 = *(u32*)(r0 + 0);				\
350 l1_%=:	r0 = 0;						\
351 	exit;						\
352 "	:
353 	: __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
354 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
355 	  __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
356 	: __clobber_all);
357 }
358 
359 SEC("tc")
360 __description("Spill a u32 scalar at fp-4 and then at fp-8")
361 __success __retval(0)
362 __naked void and_then_at_fp_8(void)
363 {
364 	asm volatile ("					\
365 	w4 = 4321;					\
366 	*(u32*)(r10 - 4) = r4;				\
367 	*(u32*)(r10 - 8) = r4;				\
368 	r4 = *(u64*)(r10 - 8);				\
369 	r0 = 0;						\
370 	exit;						\
371 "	::: __clobber_all);
372 }
373 
374 char _license[] SEC("license") = "GPL";
375