1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 struct {
9 __uint(type, BPF_MAP_TYPE_RINGBUF);
10 __uint(max_entries, 4096);
11 } map_ringbuf SEC(".maps");
12
13 SEC("socket")
14 __description("check valid spill/fill")
15 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)16 __retval(POINTER_VALUE)
17 __naked void check_valid_spill_fill(void)
18 {
19 asm volatile (" \
20 /* spill R1(ctx) into stack */ \
21 *(u64*)(r10 - 8) = r1; \
22 /* fill it back into R2 */ \
23 r2 = *(u64*)(r10 - 8); \
24 /* should be able to access R0 = *(R2 + 8) */ \
25 /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
26 r0 = r2; \
27 exit; \
28 " ::: __clobber_all);
29 }
30
31 SEC("socket")
32 __description("check valid spill/fill, skb mark")
33 __success __success_unpriv __retval(0)
valid_spill_fill_skb_mark(void)34 __naked void valid_spill_fill_skb_mark(void)
35 {
36 asm volatile (" \
37 r6 = r1; \
38 *(u64*)(r10 - 8) = r6; \
39 r0 = *(u64*)(r10 - 8); \
40 r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
41 exit; \
42 " :
43 : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
44 : __clobber_all);
45 }
46
47 SEC("socket")
48 __description("check valid spill/fill, ptr to mem")
49 __success __success_unpriv __retval(0)
spill_fill_ptr_to_mem(void)50 __naked void spill_fill_ptr_to_mem(void)
51 {
52 asm volatile (" \
53 /* reserve 8 byte ringbuf memory */ \
54 r1 = 0; \
55 *(u64*)(r10 - 8) = r1; \
56 r1 = %[map_ringbuf] ll; \
57 r2 = 8; \
58 r3 = 0; \
59 call %[bpf_ringbuf_reserve]; \
60 /* store a pointer to the reserved memory in R6 */\
61 r6 = r0; \
62 /* check whether the reservation was successful */\
63 if r0 == 0 goto l0_%=; \
64 /* spill R6(mem) into the stack */ \
65 *(u64*)(r10 - 8) = r6; \
66 /* fill it back in R7 */ \
67 r7 = *(u64*)(r10 - 8); \
68 /* should be able to access *(R7) = 0 */ \
69 r1 = 0; \
70 *(u64*)(r7 + 0) = r1; \
71 /* submit the reserved ringbuf memory */ \
72 r1 = r7; \
73 r2 = 0; \
74 call %[bpf_ringbuf_submit]; \
75 l0_%=: r0 = 0; \
76 exit; \
77 " :
78 : __imm(bpf_ringbuf_reserve),
79 __imm(bpf_ringbuf_submit),
80 __imm_addr(map_ringbuf)
81 : __clobber_all);
82 }
83
84 SEC("socket")
85 __description("check with invalid reg offset 0")
86 __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
87 __failure_unpriv
with_invalid_reg_offset_0(void)88 __naked void with_invalid_reg_offset_0(void)
89 {
90 asm volatile (" \
91 /* reserve 8 byte ringbuf memory */ \
92 r1 = 0; \
93 *(u64*)(r10 - 8) = r1; \
94 r1 = %[map_ringbuf] ll; \
95 r2 = 8; \
96 r3 = 0; \
97 call %[bpf_ringbuf_reserve]; \
98 /* store a pointer to the reserved memory in R6 */\
99 r6 = r0; \
100 /* add invalid offset to memory or NULL */ \
101 r0 += 1; \
102 /* check whether the reservation was successful */\
103 if r0 == 0 goto l0_%=; \
104 /* should not be able to access *(R7) = 0 */ \
105 r1 = 0; \
106 *(u32*)(r6 + 0) = r1; \
107 /* submit the reserved ringbuf memory */ \
108 r1 = r6; \
109 r2 = 0; \
110 call %[bpf_ringbuf_submit]; \
111 l0_%=: r0 = 0; \
112 exit; \
113 " :
114 : __imm(bpf_ringbuf_reserve),
115 __imm(bpf_ringbuf_submit),
116 __imm_addr(map_ringbuf)
117 : __clobber_all);
118 }
119
120 SEC("socket")
121 __description("check corrupted spill/fill")
122 __failure __msg("R0 invalid mem access 'scalar'")
123 __msg_unpriv("attempt to corrupt spilled")
__flag(BPF_F_ANY_ALIGNMENT)124 __flag(BPF_F_ANY_ALIGNMENT)
125 __naked void check_corrupted_spill_fill(void)
126 {
127 asm volatile (" \
128 /* spill R1(ctx) into stack */ \
129 *(u64*)(r10 - 8) = r1; \
130 /* mess up with R1 pointer on stack */ \
131 r0 = 0x23; \
132 *(u8*)(r10 - 7) = r0; \
133 /* fill back into R0 is fine for priv. \
134 * R0 now becomes SCALAR_VALUE. \
135 */ \
136 r0 = *(u64*)(r10 - 8); \
137 /* Load from R0 should fail. */ \
138 r0 = *(u64*)(r0 + 8); \
139 exit; \
140 " ::: __clobber_all);
141 }
142
143 SEC("socket")
144 __description("check corrupted spill/fill, LSB")
145 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)146 __retval(POINTER_VALUE)
147 __naked void check_corrupted_spill_fill_lsb(void)
148 {
149 asm volatile (" \
150 *(u64*)(r10 - 8) = r1; \
151 r0 = 0xcafe; \
152 *(u16*)(r10 - 8) = r0; \
153 r0 = *(u64*)(r10 - 8); \
154 exit; \
155 " ::: __clobber_all);
156 }
157
158 SEC("socket")
159 __description("check corrupted spill/fill, MSB")
160 __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)161 __retval(POINTER_VALUE)
162 __naked void check_corrupted_spill_fill_msb(void)
163 {
164 asm volatile (" \
165 *(u64*)(r10 - 8) = r1; \
166 r0 = 0x12345678; \
167 *(u32*)(r10 - 4) = r0; \
168 r0 = *(u64*)(r10 - 8); \
169 exit; \
170 " ::: __clobber_all);
171 }
172
173 SEC("tc")
174 __description("Spill and refill a u32 const scalar. Offset to skb->data")
175 __success __retval(0)
scalar_offset_to_skb_data_1(void)176 __naked void scalar_offset_to_skb_data_1(void)
177 {
178 asm volatile (" \
179 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
180 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
181 w4 = 20; \
182 *(u32*)(r10 - 8) = r4; \
183 r4 = *(u32*)(r10 - 8); \
184 r0 = r2; \
185 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \
186 r0 += r4; \
187 /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
188 if r0 > r3 goto l0_%=; \
189 /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
190 r0 = *(u32*)(r2 + 0); \
191 l0_%=: r0 = 0; \
192 exit; \
193 " :
194 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
195 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
196 : __clobber_all);
197 }
198
199 SEC("socket")
200 __description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
201 /* in privileged mode reads from uninitialized stack locations are permitted */
202 __success __failure_unpriv
203 __msg_unpriv("invalid read from stack off -4+0 size 4")
204 __retval(0)
uninit_u32_from_the_stack(void)205 __naked void uninit_u32_from_the_stack(void)
206 {
207 asm volatile (" \
208 w4 = 20; \
209 *(u32*)(r10 - 8) = r4; \
210 /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \
211 r4 = *(u32*)(r10 - 4); \
212 r0 = 0; \
213 exit; \
214 " ::: __clobber_all);
215 }
216
217 SEC("tc")
218 __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
219 __failure __msg("invalid access to packet")
u16_offset_to_skb_data(void)220 __naked void u16_offset_to_skb_data(void)
221 {
222 asm volatile (" \
223 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
224 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
225 w4 = 20; \
226 *(u32*)(r10 - 8) = r4; \
227 r4 = *(u16*)(r10 - 8); \
228 r0 = r2; \
229 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
230 r0 += r4; \
231 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
232 if r0 > r3 goto l0_%=; \
233 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
234 r0 = *(u32*)(r2 + 0); \
235 l0_%=: r0 = 0; \
236 exit; \
237 " :
238 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
239 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
240 : __clobber_all);
241 }
242
243 SEC("tc")
244 __description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
245 __failure __msg("invalid access to packet")
u64_offset_to_skb_data(void)246 __naked void u64_offset_to_skb_data(void)
247 {
248 asm volatile (" \
249 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
250 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
251 w6 = 0; \
252 w7 = 20; \
253 *(u32*)(r10 - 4) = r6; \
254 *(u32*)(r10 - 8) = r7; \
255 r4 = *(u16*)(r10 - 8); \
256 r0 = r2; \
257 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
258 r0 += r4; \
259 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
260 if r0 > r3 goto l0_%=; \
261 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
262 r0 = *(u32*)(r2 + 0); \
263 l0_%=: r0 = 0; \
264 exit; \
265 " :
266 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
267 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
268 : __clobber_all);
269 }
270
271 SEC("tc")
272 __description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
273 __failure __msg("invalid access to packet")
_6_offset_to_skb_data(void)274 __naked void _6_offset_to_skb_data(void)
275 {
276 asm volatile (" \
277 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
278 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
279 w4 = 20; \
280 *(u32*)(r10 - 8) = r4; \
281 r4 = *(u16*)(r10 - 6); \
282 r0 = r2; \
283 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
284 r0 += r4; \
285 /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
286 if r0 > r3 goto l0_%=; \
287 /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
288 r0 = *(u32*)(r2 + 0); \
289 l0_%=: r0 = 0; \
290 exit; \
291 " :
292 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
293 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
294 : __clobber_all);
295 }
296
297 SEC("tc")
298 __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data")
299 __failure __msg("invalid access to packet")
addr_offset_to_skb_data(void)300 __naked void addr_offset_to_skb_data(void)
301 {
302 asm volatile (" \
303 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
304 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
305 w4 = 20; \
306 *(u32*)(r10 - 8) = r4; \
307 *(u32*)(r10 - 4) = r4; \
308 r4 = *(u32*)(r10 - 4); \
309 r0 = r2; \
310 /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
311 r0 += r4; \
312 /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
313 if r0 > r3 goto l0_%=; \
314 /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
315 r0 = *(u32*)(r2 + 0); \
316 l0_%=: r0 = 0; \
317 exit; \
318 " :
319 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
320 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
321 : __clobber_all);
322 }
323
324 SEC("tc")
325 __description("Spill and refill a umax=40 bounded scalar. Offset to skb->data")
326 __success __retval(0)
scalar_offset_to_skb_data_2(void)327 __naked void scalar_offset_to_skb_data_2(void)
328 {
329 asm volatile (" \
330 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
331 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
332 r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
333 if r4 <= 40 goto l0_%=; \
334 r0 = 0; \
335 exit; \
336 l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \
337 *(u32*)(r10 - 8) = r4; \
338 /* r4 = (*u32 *)(r10 - 8) */ \
339 r4 = *(u32*)(r10 - 8); \
340 /* r2 += r4 R2=pkt R4=umax=40 */ \
341 r2 += r4; \
342 /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \
343 r0 = r2; \
344 /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \
345 r2 += 20; \
346 /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
347 if r2 > r3 goto l1_%=; \
348 /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
349 r0 = *(u32*)(r0 + 0); \
350 l1_%=: r0 = 0; \
351 exit; \
352 " :
353 : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
354 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
355 __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
356 : __clobber_all);
357 }
358
359 SEC("tc")
360 __description("Spill a u32 scalar at fp-4 and then at fp-8")
361 __success __retval(0)
and_then_at_fp_8(void)362 __naked void and_then_at_fp_8(void)
363 {
364 asm volatile (" \
365 w4 = 4321; \
366 *(u32*)(r10 - 4) = r4; \
367 *(u32*)(r10 - 8) = r4; \
368 r4 = *(u64*)(r10 - 8); \
369 r0 = 0; \
370 exit; \
371 " ::: __clobber_all);
372 }
373
374 SEC("xdp")
375 __description("32-bit spill of 64-bit reg should clear ID")
376 __failure __msg("math between ctx pointer and 4294967295 is not allowed")
spill_32bit_of_64bit_fail(void)377 __naked void spill_32bit_of_64bit_fail(void)
378 {
379 asm volatile (" \
380 r6 = r1; \
381 /* Roll one bit to force the verifier to track both branches. */\
382 call %[bpf_get_prandom_u32]; \
383 r0 &= 0x8; \
384 /* Put a large number into r1. */ \
385 r1 = 0xffffffff; \
386 r1 <<= 32; \
387 r1 += r0; \
388 /* Assign an ID to r1. */ \
389 r2 = r1; \
390 /* 32-bit spill r1 to stack - should clear the ID! */\
391 *(u32*)(r10 - 8) = r1; \
392 /* 32-bit fill r2 from stack. */ \
393 r2 = *(u32*)(r10 - 8); \
394 /* Compare r2 with another register to trigger find_equal_scalars.\
395 * Having one random bit is important here, otherwise the verifier cuts\
396 * the corners. If the ID was mistakenly preserved on spill, this would\
397 * cause the verifier to think that r1 is also equal to zero in one of\
398 * the branches, and equal to eight on the other branch.\
399 */ \
400 r3 = 0; \
401 if r2 != r3 goto l0_%=; \
402 l0_%=: r1 >>= 32; \
403 /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
404 * read will happen, because it actually contains 0xffffffff.\
405 */ \
406 r6 += r1; \
407 r0 = *(u32*)(r6 + 0); \
408 exit; \
409 " :
410 : __imm(bpf_get_prandom_u32)
411 : __clobber_all);
412 }
413
414 SEC("xdp")
415 __description("16-bit spill of 32-bit reg should clear ID")
416 __failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
spill_16bit_of_32bit_fail(void)417 __naked void spill_16bit_of_32bit_fail(void)
418 {
419 asm volatile (" \
420 r6 = r1; \
421 /* Roll one bit to force the verifier to track both branches. */\
422 call %[bpf_get_prandom_u32]; \
423 r0 &= 0x8; \
424 /* Put a large number into r1. */ \
425 w1 = 0xffff0000; \
426 r1 += r0; \
427 /* Assign an ID to r1. */ \
428 r2 = r1; \
429 /* 16-bit spill r1 to stack - should clear the ID! */\
430 *(u16*)(r10 - 8) = r1; \
431 /* 16-bit fill r2 from stack. */ \
432 r2 = *(u16*)(r10 - 8); \
433 /* Compare r2 with another register to trigger find_equal_scalars.\
434 * Having one random bit is important here, otherwise the verifier cuts\
435 * the corners. If the ID was mistakenly preserved on spill, this would\
436 * cause the verifier to think that r1 is also equal to zero in one of\
437 * the branches, and equal to eight on the other branch.\
438 */ \
439 r3 = 0; \
440 if r2 != r3 goto l0_%=; \
441 l0_%=: r1 >>= 16; \
442 /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
443 * read will happen, because it actually contains 0xffff.\
444 */ \
445 r6 += r1; \
446 r0 = *(u32*)(r6 + 0); \
447 exit; \
448 " :
449 : __imm(bpf_get_prandom_u32)
450 : __clobber_all);
451 }
452
453 char _license[] SEC("license") = "GPL";
454