1*b7e42030SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*b7e42030SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/ringbuf.c */
3*b7e42030SEduard Zingerman 
4*b7e42030SEduard Zingerman #include <linux/bpf.h>
5*b7e42030SEduard Zingerman #include <bpf/bpf_helpers.h>
6*b7e42030SEduard Zingerman #include "bpf_misc.h"
7*b7e42030SEduard Zingerman 
8*b7e42030SEduard Zingerman struct {
9*b7e42030SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_RINGBUF);
10*b7e42030SEduard Zingerman 	__uint(max_entries, 4096);
11*b7e42030SEduard Zingerman } map_ringbuf SEC(".maps");
12*b7e42030SEduard Zingerman 
13*b7e42030SEduard Zingerman SEC("socket")
14*b7e42030SEduard Zingerman __description("ringbuf: invalid reservation offset 1")
15*b7e42030SEduard Zingerman __failure __msg("R1 must have zero offset when passed to release func")
16*b7e42030SEduard Zingerman __failure_unpriv
ringbuf_invalid_reservation_offset_1(void)17*b7e42030SEduard Zingerman __naked void ringbuf_invalid_reservation_offset_1(void)
18*b7e42030SEduard Zingerman {
19*b7e42030SEduard Zingerman 	asm volatile ("					\
20*b7e42030SEduard Zingerman 	/* reserve 8 byte ringbuf memory */		\
21*b7e42030SEduard Zingerman 	r1 = 0;						\
22*b7e42030SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
23*b7e42030SEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
24*b7e42030SEduard Zingerman 	r2 = 8;						\
25*b7e42030SEduard Zingerman 	r3 = 0;						\
26*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_reserve];			\
27*b7e42030SEduard Zingerman 	/* store a pointer to the reserved memory in R6 */\
28*b7e42030SEduard Zingerman 	r6 = r0;					\
29*b7e42030SEduard Zingerman 	/* check whether the reservation was successful */\
30*b7e42030SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
31*b7e42030SEduard Zingerman 	/* spill R6(mem) into the stack */		\
32*b7e42030SEduard Zingerman 	*(u64*)(r10 - 8) = r6;				\
33*b7e42030SEduard Zingerman 	/* fill it back in R7 */			\
34*b7e42030SEduard Zingerman 	r7 = *(u64*)(r10 - 8);				\
35*b7e42030SEduard Zingerman 	/* should be able to access *(R7) = 0 */	\
36*b7e42030SEduard Zingerman 	r1 = 0;						\
37*b7e42030SEduard Zingerman 	*(u64*)(r7 + 0) = r1;				\
38*b7e42030SEduard Zingerman 	/* submit the reserved ringbuf memory */	\
39*b7e42030SEduard Zingerman 	r1 = r7;					\
40*b7e42030SEduard Zingerman 	/* add invalid offset to reserved ringbuf memory */\
41*b7e42030SEduard Zingerman 	r1 += 0xcafe;					\
42*b7e42030SEduard Zingerman 	r2 = 0;						\
43*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_submit];			\
44*b7e42030SEduard Zingerman l0_%=:	r0 = 0;						\
45*b7e42030SEduard Zingerman 	exit;						\
46*b7e42030SEduard Zingerman "	:
47*b7e42030SEduard Zingerman 	: __imm(bpf_ringbuf_reserve),
48*b7e42030SEduard Zingerman 	  __imm(bpf_ringbuf_submit),
49*b7e42030SEduard Zingerman 	  __imm_addr(map_ringbuf)
50*b7e42030SEduard Zingerman 	: __clobber_all);
51*b7e42030SEduard Zingerman }
52*b7e42030SEduard Zingerman 
53*b7e42030SEduard Zingerman SEC("socket")
54*b7e42030SEduard Zingerman __description("ringbuf: invalid reservation offset 2")
55*b7e42030SEduard Zingerman __failure __msg("R7 min value is outside of the allowed memory range")
56*b7e42030SEduard Zingerman __failure_unpriv
ringbuf_invalid_reservation_offset_2(void)57*b7e42030SEduard Zingerman __naked void ringbuf_invalid_reservation_offset_2(void)
58*b7e42030SEduard Zingerman {
59*b7e42030SEduard Zingerman 	asm volatile ("					\
60*b7e42030SEduard Zingerman 	/* reserve 8 byte ringbuf memory */		\
61*b7e42030SEduard Zingerman 	r1 = 0;						\
62*b7e42030SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
63*b7e42030SEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
64*b7e42030SEduard Zingerman 	r2 = 8;						\
65*b7e42030SEduard Zingerman 	r3 = 0;						\
66*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_reserve];			\
67*b7e42030SEduard Zingerman 	/* store a pointer to the reserved memory in R6 */\
68*b7e42030SEduard Zingerman 	r6 = r0;					\
69*b7e42030SEduard Zingerman 	/* check whether the reservation was successful */\
70*b7e42030SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
71*b7e42030SEduard Zingerman 	/* spill R6(mem) into the stack */		\
72*b7e42030SEduard Zingerman 	*(u64*)(r10 - 8) = r6;				\
73*b7e42030SEduard Zingerman 	/* fill it back in R7 */			\
74*b7e42030SEduard Zingerman 	r7 = *(u64*)(r10 - 8);				\
75*b7e42030SEduard Zingerman 	/* add invalid offset to reserved ringbuf memory */\
76*b7e42030SEduard Zingerman 	r7 += 0xcafe;					\
77*b7e42030SEduard Zingerman 	/* should be able to access *(R7) = 0 */	\
78*b7e42030SEduard Zingerman 	r1 = 0;						\
79*b7e42030SEduard Zingerman 	*(u64*)(r7 + 0) = r1;				\
80*b7e42030SEduard Zingerman 	/* submit the reserved ringbuf memory */	\
81*b7e42030SEduard Zingerman 	r1 = r7;					\
82*b7e42030SEduard Zingerman 	r2 = 0;						\
83*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_submit];			\
84*b7e42030SEduard Zingerman l0_%=:	r0 = 0;						\
85*b7e42030SEduard Zingerman 	exit;						\
86*b7e42030SEduard Zingerman "	:
87*b7e42030SEduard Zingerman 	: __imm(bpf_ringbuf_reserve),
88*b7e42030SEduard Zingerman 	  __imm(bpf_ringbuf_submit),
89*b7e42030SEduard Zingerman 	  __imm_addr(map_ringbuf)
90*b7e42030SEduard Zingerman 	: __clobber_all);
91*b7e42030SEduard Zingerman }
92*b7e42030SEduard Zingerman 
93*b7e42030SEduard Zingerman SEC("xdp")
94*b7e42030SEduard Zingerman __description("ringbuf: check passing rb mem to helpers")
95*b7e42030SEduard Zingerman __success __retval(0)
passing_rb_mem_to_helpers(void)96*b7e42030SEduard Zingerman __naked void passing_rb_mem_to_helpers(void)
97*b7e42030SEduard Zingerman {
98*b7e42030SEduard Zingerman 	asm volatile ("					\
99*b7e42030SEduard Zingerman 	r6 = r1;					\
100*b7e42030SEduard Zingerman 	/* reserve 8 byte ringbuf memory */		\
101*b7e42030SEduard Zingerman 	r1 = 0;						\
102*b7e42030SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
103*b7e42030SEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
104*b7e42030SEduard Zingerman 	r2 = 8;						\
105*b7e42030SEduard Zingerman 	r3 = 0;						\
106*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_reserve];			\
107*b7e42030SEduard Zingerman 	r7 = r0;					\
108*b7e42030SEduard Zingerman 	/* check whether the reservation was successful */\
109*b7e42030SEduard Zingerman 	if r0 != 0 goto l0_%=;				\
110*b7e42030SEduard Zingerman 	exit;						\
111*b7e42030SEduard Zingerman l0_%=:	/* pass allocated ring buffer memory to fib lookup */\
112*b7e42030SEduard Zingerman 	r1 = r6;					\
113*b7e42030SEduard Zingerman 	r2 = r0;					\
114*b7e42030SEduard Zingerman 	r3 = 8;						\
115*b7e42030SEduard Zingerman 	r4 = 0;						\
116*b7e42030SEduard Zingerman 	call %[bpf_fib_lookup];				\
117*b7e42030SEduard Zingerman 	/* submit the ringbuf memory */			\
118*b7e42030SEduard Zingerman 	r1 = r7;					\
119*b7e42030SEduard Zingerman 	r2 = 0;						\
120*b7e42030SEduard Zingerman 	call %[bpf_ringbuf_submit];			\
121*b7e42030SEduard Zingerman 	r0 = 0;						\
122*b7e42030SEduard Zingerman 	exit;						\
123*b7e42030SEduard Zingerman "	:
124*b7e42030SEduard Zingerman 	: __imm(bpf_fib_lookup),
125*b7e42030SEduard Zingerman 	  __imm(bpf_ringbuf_reserve),
126*b7e42030SEduard Zingerman 	  __imm(bpf_ringbuf_submit),
127*b7e42030SEduard Zingerman 	  __imm_addr(map_ringbuf)
128*b7e42030SEduard Zingerman 	: __clobber_all);
129*b7e42030SEduard Zingerman }
130*b7e42030SEduard Zingerman 
131*b7e42030SEduard Zingerman char _license[] SEC("license") = "GPL";
132