1*b37d776bSEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*b37d776bSEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/helper_access_var_len.c */
3*b37d776bSEduard Zingerman 
4*b37d776bSEduard Zingerman #include <linux/bpf.h>
5*b37d776bSEduard Zingerman #include <bpf/bpf_helpers.h>
6*b37d776bSEduard Zingerman #include "bpf_misc.h"
7*b37d776bSEduard Zingerman 
8*b37d776bSEduard Zingerman #define MAX_ENTRIES 11
9*b37d776bSEduard Zingerman 
10*b37d776bSEduard Zingerman struct test_val {
11*b37d776bSEduard Zingerman 	unsigned int index;
12*b37d776bSEduard Zingerman 	int foo[MAX_ENTRIES];
13*b37d776bSEduard Zingerman };
14*b37d776bSEduard Zingerman 
15*b37d776bSEduard Zingerman struct {
16*b37d776bSEduard Zingerman 	__uint(type, BPF_MAP_TYPE_HASH);
17*b37d776bSEduard Zingerman 	__uint(max_entries, 1);
18*b37d776bSEduard Zingerman 	__type(key, long long);
19*b37d776bSEduard Zingerman 	__type(value, struct test_val);
20*b37d776bSEduard Zingerman } map_hash_48b SEC(".maps");
21*b37d776bSEduard Zingerman 
22*b37d776bSEduard Zingerman struct {
23*b37d776bSEduard Zingerman 	__uint(type, BPF_MAP_TYPE_HASH);
24*b37d776bSEduard Zingerman 	__uint(max_entries, 1);
25*b37d776bSEduard Zingerman 	__type(key, long long);
26*b37d776bSEduard Zingerman 	__type(value, long long);
27*b37d776bSEduard Zingerman } map_hash_8b SEC(".maps");
28*b37d776bSEduard Zingerman 
29*b37d776bSEduard Zingerman struct {
30*b37d776bSEduard Zingerman 	__uint(type, BPF_MAP_TYPE_RINGBUF);
31*b37d776bSEduard Zingerman 	__uint(max_entries, 4096);
32*b37d776bSEduard Zingerman } map_ringbuf SEC(".maps");
33*b37d776bSEduard Zingerman 
34*b37d776bSEduard Zingerman SEC("tracepoint")
35*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, bitwise AND + JMP, correct bounds")
36*b37d776bSEduard Zingerman __success
bitwise_and_jmp_correct_bounds(void)37*b37d776bSEduard Zingerman __naked void bitwise_and_jmp_correct_bounds(void)
38*b37d776bSEduard Zingerman {
39*b37d776bSEduard Zingerman 	asm volatile ("					\
40*b37d776bSEduard Zingerman 	r1 = r10;					\
41*b37d776bSEduard Zingerman 	r1 += -64;					\
42*b37d776bSEduard Zingerman 	r0 = 0;						\
43*b37d776bSEduard Zingerman 	*(u64*)(r10 - 64) = r0;				\
44*b37d776bSEduard Zingerman 	*(u64*)(r10 - 56) = r0;				\
45*b37d776bSEduard Zingerman 	*(u64*)(r10 - 48) = r0;				\
46*b37d776bSEduard Zingerman 	*(u64*)(r10 - 40) = r0;				\
47*b37d776bSEduard Zingerman 	*(u64*)(r10 - 32) = r0;				\
48*b37d776bSEduard Zingerman 	*(u64*)(r10 - 24) = r0;				\
49*b37d776bSEduard Zingerman 	*(u64*)(r10 - 16) = r0;				\
50*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
51*b37d776bSEduard Zingerman 	r2 = 16;					\
52*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
53*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
54*b37d776bSEduard Zingerman 	r2 &= 64;					\
55*b37d776bSEduard Zingerman 	r4 = 0;						\
56*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
57*b37d776bSEduard Zingerman 	r3 = 0;						\
58*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
59*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
60*b37d776bSEduard Zingerman 	exit;						\
61*b37d776bSEduard Zingerman "	:
62*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
63*b37d776bSEduard Zingerman 	: __clobber_all);
64*b37d776bSEduard Zingerman }
65*b37d776bSEduard Zingerman 
66*b37d776bSEduard Zingerman SEC("socket")
67*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, bitwise AND, zero included")
68*b37d776bSEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
69*b37d776bSEduard Zingerman __success __failure_unpriv
70*b37d776bSEduard Zingerman __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
71*b37d776bSEduard Zingerman __retval(0)
stack_bitwise_and_zero_included(void)72*b37d776bSEduard Zingerman __naked void stack_bitwise_and_zero_included(void)
73*b37d776bSEduard Zingerman {
74*b37d776bSEduard Zingerman 	asm volatile ("					\
75*b37d776bSEduard Zingerman 	/* set max stack size */			\
76*b37d776bSEduard Zingerman 	r6 = 0;						\
77*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r6;			\
78*b37d776bSEduard Zingerman 	/* set r3 to a random value */			\
79*b37d776bSEduard Zingerman 	call %[bpf_get_prandom_u32];			\
80*b37d776bSEduard Zingerman 	r3 = r0;					\
81*b37d776bSEduard Zingerman 	/* use bitwise AND to limit r3 range to [0, 64] */\
82*b37d776bSEduard Zingerman 	r3 &= 64;					\
83*b37d776bSEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
84*b37d776bSEduard Zingerman 	r2 = r10;					\
85*b37d776bSEduard Zingerman 	r2 += -64;					\
86*b37d776bSEduard Zingerman 	r4 = 0;						\
87*b37d776bSEduard Zingerman 	/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
88*b37d776bSEduard Zingerman 	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
89*b37d776bSEduard Zingerman 	 * For unpriv this should signal an error, because memory at &fp[-64] is\
90*b37d776bSEduard Zingerman 	 * not initialized.				\
91*b37d776bSEduard Zingerman 	 */						\
92*b37d776bSEduard Zingerman 	call %[bpf_ringbuf_output];			\
93*b37d776bSEduard Zingerman 	exit;						\
94*b37d776bSEduard Zingerman "	:
95*b37d776bSEduard Zingerman 	: __imm(bpf_get_prandom_u32),
96*b37d776bSEduard Zingerman 	  __imm(bpf_ringbuf_output),
97*b37d776bSEduard Zingerman 	  __imm_addr(map_ringbuf)
98*b37d776bSEduard Zingerman 	: __clobber_all);
99*b37d776bSEduard Zingerman }
100*b37d776bSEduard Zingerman 
101*b37d776bSEduard Zingerman SEC("tracepoint")
102*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, bitwise AND + JMP, wrong max")
103*b37d776bSEduard Zingerman __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
bitwise_and_jmp_wrong_max(void)104*b37d776bSEduard Zingerman __naked void bitwise_and_jmp_wrong_max(void)
105*b37d776bSEduard Zingerman {
106*b37d776bSEduard Zingerman 	asm volatile ("					\
107*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 + 8);				\
108*b37d776bSEduard Zingerman 	r1 = r10;					\
109*b37d776bSEduard Zingerman 	r1 += -64;					\
110*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
111*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
112*b37d776bSEduard Zingerman 	r2 &= 65;					\
113*b37d776bSEduard Zingerman 	r4 = 0;						\
114*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
115*b37d776bSEduard Zingerman 	r3 = 0;						\
116*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
117*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
118*b37d776bSEduard Zingerman 	exit;						\
119*b37d776bSEduard Zingerman "	:
120*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
121*b37d776bSEduard Zingerman 	: __clobber_all);
122*b37d776bSEduard Zingerman }
123*b37d776bSEduard Zingerman 
124*b37d776bSEduard Zingerman SEC("tracepoint")
125*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP, correct bounds")
126*b37d776bSEduard Zingerman __success
memory_stack_jmp_correct_bounds(void)127*b37d776bSEduard Zingerman __naked void memory_stack_jmp_correct_bounds(void)
128*b37d776bSEduard Zingerman {
129*b37d776bSEduard Zingerman 	asm volatile ("					\
130*b37d776bSEduard Zingerman 	r1 = r10;					\
131*b37d776bSEduard Zingerman 	r1 += -64;					\
132*b37d776bSEduard Zingerman 	r0 = 0;						\
133*b37d776bSEduard Zingerman 	*(u64*)(r10 - 64) = r0;				\
134*b37d776bSEduard Zingerman 	*(u64*)(r10 - 56) = r0;				\
135*b37d776bSEduard Zingerman 	*(u64*)(r10 - 48) = r0;				\
136*b37d776bSEduard Zingerman 	*(u64*)(r10 - 40) = r0;				\
137*b37d776bSEduard Zingerman 	*(u64*)(r10 - 32) = r0;				\
138*b37d776bSEduard Zingerman 	*(u64*)(r10 - 24) = r0;				\
139*b37d776bSEduard Zingerman 	*(u64*)(r10 - 16) = r0;				\
140*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
141*b37d776bSEduard Zingerman 	r2 = 16;					\
142*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
143*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
144*b37d776bSEduard Zingerman 	if r2 > 64 goto l0_%=;				\
145*b37d776bSEduard Zingerman 	r4 = 0;						\
146*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
147*b37d776bSEduard Zingerman 	r3 = 0;						\
148*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
149*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
150*b37d776bSEduard Zingerman 	exit;						\
151*b37d776bSEduard Zingerman "	:
152*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
153*b37d776bSEduard Zingerman 	: __clobber_all);
154*b37d776bSEduard Zingerman }
155*b37d776bSEduard Zingerman 
156*b37d776bSEduard Zingerman SEC("tracepoint")
157*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP (signed), correct bounds")
158*b37d776bSEduard Zingerman __success
stack_jmp_signed_correct_bounds(void)159*b37d776bSEduard Zingerman __naked void stack_jmp_signed_correct_bounds(void)
160*b37d776bSEduard Zingerman {
161*b37d776bSEduard Zingerman 	asm volatile ("					\
162*b37d776bSEduard Zingerman 	r1 = r10;					\
163*b37d776bSEduard Zingerman 	r1 += -64;					\
164*b37d776bSEduard Zingerman 	r0 = 0;						\
165*b37d776bSEduard Zingerman 	*(u64*)(r10 - 64) = r0;				\
166*b37d776bSEduard Zingerman 	*(u64*)(r10 - 56) = r0;				\
167*b37d776bSEduard Zingerman 	*(u64*)(r10 - 48) = r0;				\
168*b37d776bSEduard Zingerman 	*(u64*)(r10 - 40) = r0;				\
169*b37d776bSEduard Zingerman 	*(u64*)(r10 - 32) = r0;				\
170*b37d776bSEduard Zingerman 	*(u64*)(r10 - 24) = r0;				\
171*b37d776bSEduard Zingerman 	*(u64*)(r10 - 16) = r0;				\
172*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
173*b37d776bSEduard Zingerman 	r2 = 16;					\
174*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
175*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
176*b37d776bSEduard Zingerman 	if r2 s> 64 goto l0_%=;				\
177*b37d776bSEduard Zingerman 	r4 = 0;						\
178*b37d776bSEduard Zingerman 	if r4 s>= r2 goto l0_%=;			\
179*b37d776bSEduard Zingerman 	r3 = 0;						\
180*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
181*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
182*b37d776bSEduard Zingerman 	exit;						\
183*b37d776bSEduard Zingerman "	:
184*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
185*b37d776bSEduard Zingerman 	: __clobber_all);
186*b37d776bSEduard Zingerman }
187*b37d776bSEduard Zingerman 
188*b37d776bSEduard Zingerman SEC("tracepoint")
189*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP, bounds + offset")
190*b37d776bSEduard Zingerman __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
memory_stack_jmp_bounds_offset(void)191*b37d776bSEduard Zingerman __naked void memory_stack_jmp_bounds_offset(void)
192*b37d776bSEduard Zingerman {
193*b37d776bSEduard Zingerman 	asm volatile ("					\
194*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 + 8);				\
195*b37d776bSEduard Zingerman 	r1 = r10;					\
196*b37d776bSEduard Zingerman 	r1 += -64;					\
197*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
198*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
199*b37d776bSEduard Zingerman 	if r2 > 64 goto l0_%=;				\
200*b37d776bSEduard Zingerman 	r4 = 0;						\
201*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
202*b37d776bSEduard Zingerman 	r2 += 1;					\
203*b37d776bSEduard Zingerman 	r3 = 0;						\
204*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
205*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
206*b37d776bSEduard Zingerman 	exit;						\
207*b37d776bSEduard Zingerman "	:
208*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
209*b37d776bSEduard Zingerman 	: __clobber_all);
210*b37d776bSEduard Zingerman }
211*b37d776bSEduard Zingerman 
212*b37d776bSEduard Zingerman SEC("tracepoint")
213*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP, wrong max")
214*b37d776bSEduard Zingerman __failure __msg("invalid indirect access to stack R1 off=-64 size=65")
memory_stack_jmp_wrong_max(void)215*b37d776bSEduard Zingerman __naked void memory_stack_jmp_wrong_max(void)
216*b37d776bSEduard Zingerman {
217*b37d776bSEduard Zingerman 	asm volatile ("					\
218*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 + 8);				\
219*b37d776bSEduard Zingerman 	r1 = r10;					\
220*b37d776bSEduard Zingerman 	r1 += -64;					\
221*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
222*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
223*b37d776bSEduard Zingerman 	if r2 > 65 goto l0_%=;				\
224*b37d776bSEduard Zingerman 	r4 = 0;						\
225*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
226*b37d776bSEduard Zingerman 	r3 = 0;						\
227*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
228*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
229*b37d776bSEduard Zingerman 	exit;						\
230*b37d776bSEduard Zingerman "	:
231*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
232*b37d776bSEduard Zingerman 	: __clobber_all);
233*b37d776bSEduard Zingerman }
234*b37d776bSEduard Zingerman 
235*b37d776bSEduard Zingerman SEC("tracepoint")
236*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP, no max check")
237*b37d776bSEduard Zingerman __failure
238*b37d776bSEduard Zingerman /* because max wasn't checked, signed min is negative */
239*b37d776bSEduard Zingerman __msg("R2 min value is negative, either use unsigned or 'var &= const'")
stack_jmp_no_max_check(void)240*b37d776bSEduard Zingerman __naked void stack_jmp_no_max_check(void)
241*b37d776bSEduard Zingerman {
242*b37d776bSEduard Zingerman 	asm volatile ("					\
243*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 + 8);				\
244*b37d776bSEduard Zingerman 	r1 = r10;					\
245*b37d776bSEduard Zingerman 	r1 += -64;					\
246*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
247*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
248*b37d776bSEduard Zingerman 	r4 = 0;						\
249*b37d776bSEduard Zingerman 	if r4 >= r2 goto l0_%=;				\
250*b37d776bSEduard Zingerman 	r3 = 0;						\
251*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
252*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
253*b37d776bSEduard Zingerman 	exit;						\
254*b37d776bSEduard Zingerman "	:
255*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
256*b37d776bSEduard Zingerman 	: __clobber_all);
257*b37d776bSEduard Zingerman }
258*b37d776bSEduard Zingerman 
259*b37d776bSEduard Zingerman SEC("socket")
260*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP, no min check")
261*b37d776bSEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
262*b37d776bSEduard Zingerman __success __failure_unpriv
263*b37d776bSEduard Zingerman __msg_unpriv("invalid indirect read from stack R2 off -64+0 size 64")
264*b37d776bSEduard Zingerman __retval(0)
stack_jmp_no_min_check(void)265*b37d776bSEduard Zingerman __naked void stack_jmp_no_min_check(void)
266*b37d776bSEduard Zingerman {
267*b37d776bSEduard Zingerman 	asm volatile ("					\
268*b37d776bSEduard Zingerman 	/* set max stack size */			\
269*b37d776bSEduard Zingerman 	r6 = 0;						\
270*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r6;			\
271*b37d776bSEduard Zingerman 	/* set r3 to a random value */			\
272*b37d776bSEduard Zingerman 	call %[bpf_get_prandom_u32];			\
273*b37d776bSEduard Zingerman 	r3 = r0;					\
274*b37d776bSEduard Zingerman 	/* use JMP to limit r3 range to [0, 64] */	\
275*b37d776bSEduard Zingerman 	if r3 > 64 goto l0_%=;				\
276*b37d776bSEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
277*b37d776bSEduard Zingerman 	r2 = r10;					\
278*b37d776bSEduard Zingerman 	r2 += -64;					\
279*b37d776bSEduard Zingerman 	r4 = 0;						\
280*b37d776bSEduard Zingerman 	/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
281*b37d776bSEduard Zingerman 	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
282*b37d776bSEduard Zingerman 	 * For unpriv this should signal an error, because memory at &fp[-64] is\
283*b37d776bSEduard Zingerman 	 * not initialized.				\
284*b37d776bSEduard Zingerman 	 */						\
285*b37d776bSEduard Zingerman 	call %[bpf_ringbuf_output];			\
286*b37d776bSEduard Zingerman l0_%=:	r0 = 0;						\
287*b37d776bSEduard Zingerman 	exit;						\
288*b37d776bSEduard Zingerman "	:
289*b37d776bSEduard Zingerman 	: __imm(bpf_get_prandom_u32),
290*b37d776bSEduard Zingerman 	  __imm(bpf_ringbuf_output),
291*b37d776bSEduard Zingerman 	  __imm_addr(map_ringbuf)
292*b37d776bSEduard Zingerman 	: __clobber_all);
293*b37d776bSEduard Zingerman }
294*b37d776bSEduard Zingerman 
295*b37d776bSEduard Zingerman SEC("tracepoint")
296*b37d776bSEduard Zingerman __description("helper access to variable memory: stack, JMP (signed), no min check")
297*b37d776bSEduard Zingerman __failure __msg("R2 min value is negative")
jmp_signed_no_min_check(void)298*b37d776bSEduard Zingerman __naked void jmp_signed_no_min_check(void)
299*b37d776bSEduard Zingerman {
300*b37d776bSEduard Zingerman 	asm volatile ("					\
301*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 + 8);				\
302*b37d776bSEduard Zingerman 	r1 = r10;					\
303*b37d776bSEduard Zingerman 	r1 += -64;					\
304*b37d776bSEduard Zingerman 	*(u64*)(r1 - 128) = r2;				\
305*b37d776bSEduard Zingerman 	r2 = *(u64*)(r1 - 128);				\
306*b37d776bSEduard Zingerman 	if r2 s> 64 goto l0_%=;				\
307*b37d776bSEduard Zingerman 	r3 = 0;						\
308*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
309*b37d776bSEduard Zingerman 	r0 = 0;						\
310*b37d776bSEduard Zingerman l0_%=:	exit;						\
311*b37d776bSEduard Zingerman "	:
312*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
313*b37d776bSEduard Zingerman 	: __clobber_all);
314*b37d776bSEduard Zingerman }
315*b37d776bSEduard Zingerman 
316*b37d776bSEduard Zingerman SEC("tracepoint")
317*b37d776bSEduard Zingerman __description("helper access to variable memory: map, JMP, correct bounds")
318*b37d776bSEduard Zingerman __success
memory_map_jmp_correct_bounds(void)319*b37d776bSEduard Zingerman __naked void memory_map_jmp_correct_bounds(void)
320*b37d776bSEduard Zingerman {
321*b37d776bSEduard Zingerman 	asm volatile ("					\
322*b37d776bSEduard Zingerman 	r2 = r10;					\
323*b37d776bSEduard Zingerman 	r2 += -8;					\
324*b37d776bSEduard Zingerman 	r1 = 0;						\
325*b37d776bSEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
326*b37d776bSEduard Zingerman 	r1 = %[map_hash_48b] ll;			\
327*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
328*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
329*b37d776bSEduard Zingerman 	r1 = r0;					\
330*b37d776bSEduard Zingerman 	r2 = %[sizeof_test_val];			\
331*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r2;			\
332*b37d776bSEduard Zingerman 	r2 = *(u64*)(r10 - 128);			\
333*b37d776bSEduard Zingerman 	if r2 s> %[sizeof_test_val] goto l1_%=;		\
334*b37d776bSEduard Zingerman 	r4 = 0;						\
335*b37d776bSEduard Zingerman 	if r4 s>= r2 goto l1_%=;			\
336*b37d776bSEduard Zingerman 	r3 = 0;						\
337*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
338*b37d776bSEduard Zingerman l1_%=:	r0 = 0;						\
339*b37d776bSEduard Zingerman l0_%=:	exit;						\
340*b37d776bSEduard Zingerman "	:
341*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
342*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
343*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_48b),
344*b37d776bSEduard Zingerman 	  __imm_const(sizeof_test_val, sizeof(struct test_val))
345*b37d776bSEduard Zingerman 	: __clobber_all);
346*b37d776bSEduard Zingerman }
347*b37d776bSEduard Zingerman 
348*b37d776bSEduard Zingerman SEC("tracepoint")
349*b37d776bSEduard Zingerman __description("helper access to variable memory: map, JMP, wrong max")
350*b37d776bSEduard Zingerman __failure __msg("invalid access to map value, value_size=48 off=0 size=49")
memory_map_jmp_wrong_max(void)351*b37d776bSEduard Zingerman __naked void memory_map_jmp_wrong_max(void)
352*b37d776bSEduard Zingerman {
353*b37d776bSEduard Zingerman 	asm volatile ("					\
354*b37d776bSEduard Zingerman 	r6 = *(u64*)(r1 + 8);				\
355*b37d776bSEduard Zingerman 	r2 = r10;					\
356*b37d776bSEduard Zingerman 	r2 += -8;					\
357*b37d776bSEduard Zingerman 	r1 = 0;						\
358*b37d776bSEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
359*b37d776bSEduard Zingerman 	r1 = %[map_hash_48b] ll;			\
360*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
361*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
362*b37d776bSEduard Zingerman 	r1 = r0;					\
363*b37d776bSEduard Zingerman 	r2 = r6;					\
364*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r2;			\
365*b37d776bSEduard Zingerman 	r2 = *(u64*)(r10 - 128);			\
366*b37d776bSEduard Zingerman 	if r2 s> %[__imm_0] goto l1_%=;			\
367*b37d776bSEduard Zingerman 	r4 = 0;						\
368*b37d776bSEduard Zingerman 	if r4 s>= r2 goto l1_%=;			\
369*b37d776bSEduard Zingerman 	r3 = 0;						\
370*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
371*b37d776bSEduard Zingerman l1_%=:	r0 = 0;						\
372*b37d776bSEduard Zingerman l0_%=:	exit;						\
373*b37d776bSEduard Zingerman "	:
374*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
375*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
376*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_48b),
377*b37d776bSEduard Zingerman 	  __imm_const(__imm_0, sizeof(struct test_val) + 1)
378*b37d776bSEduard Zingerman 	: __clobber_all);
379*b37d776bSEduard Zingerman }
380*b37d776bSEduard Zingerman 
381*b37d776bSEduard Zingerman SEC("tracepoint")
382*b37d776bSEduard Zingerman __description("helper access to variable memory: map adjusted, JMP, correct bounds")
383*b37d776bSEduard Zingerman __success
map_adjusted_jmp_correct_bounds(void)384*b37d776bSEduard Zingerman __naked void map_adjusted_jmp_correct_bounds(void)
385*b37d776bSEduard Zingerman {
386*b37d776bSEduard Zingerman 	asm volatile ("					\
387*b37d776bSEduard Zingerman 	r2 = r10;					\
388*b37d776bSEduard Zingerman 	r2 += -8;					\
389*b37d776bSEduard Zingerman 	r1 = 0;						\
390*b37d776bSEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
391*b37d776bSEduard Zingerman 	r1 = %[map_hash_48b] ll;			\
392*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
393*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
394*b37d776bSEduard Zingerman 	r1 = r0;					\
395*b37d776bSEduard Zingerman 	r1 += 20;					\
396*b37d776bSEduard Zingerman 	r2 = %[sizeof_test_val];			\
397*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r2;			\
398*b37d776bSEduard Zingerman 	r2 = *(u64*)(r10 - 128);			\
399*b37d776bSEduard Zingerman 	if r2 s> %[__imm_0] goto l1_%=;			\
400*b37d776bSEduard Zingerman 	r4 = 0;						\
401*b37d776bSEduard Zingerman 	if r4 s>= r2 goto l1_%=;			\
402*b37d776bSEduard Zingerman 	r3 = 0;						\
403*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
404*b37d776bSEduard Zingerman l1_%=:	r0 = 0;						\
405*b37d776bSEduard Zingerman l0_%=:	exit;						\
406*b37d776bSEduard Zingerman "	:
407*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
408*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
409*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_48b),
410*b37d776bSEduard Zingerman 	  __imm_const(__imm_0, sizeof(struct test_val) - 20),
411*b37d776bSEduard Zingerman 	  __imm_const(sizeof_test_val, sizeof(struct test_val))
412*b37d776bSEduard Zingerman 	: __clobber_all);
413*b37d776bSEduard Zingerman }
414*b37d776bSEduard Zingerman 
415*b37d776bSEduard Zingerman SEC("tracepoint")
416*b37d776bSEduard Zingerman __description("helper access to variable memory: map adjusted, JMP, wrong max")
417*b37d776bSEduard Zingerman __failure __msg("R1 min value is outside of the allowed memory range")
map_adjusted_jmp_wrong_max(void)418*b37d776bSEduard Zingerman __naked void map_adjusted_jmp_wrong_max(void)
419*b37d776bSEduard Zingerman {
420*b37d776bSEduard Zingerman 	asm volatile ("					\
421*b37d776bSEduard Zingerman 	r6 = *(u64*)(r1 + 8);				\
422*b37d776bSEduard Zingerman 	r2 = r10;					\
423*b37d776bSEduard Zingerman 	r2 += -8;					\
424*b37d776bSEduard Zingerman 	r1 = 0;						\
425*b37d776bSEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
426*b37d776bSEduard Zingerman 	r1 = %[map_hash_48b] ll;			\
427*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
428*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
429*b37d776bSEduard Zingerman 	r1 = r0;					\
430*b37d776bSEduard Zingerman 	r1 += 20;					\
431*b37d776bSEduard Zingerman 	r2 = r6;					\
432*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r2;			\
433*b37d776bSEduard Zingerman 	r2 = *(u64*)(r10 - 128);			\
434*b37d776bSEduard Zingerman 	if r2 s> %[__imm_0] goto l1_%=;			\
435*b37d776bSEduard Zingerman 	r4 = 0;						\
436*b37d776bSEduard Zingerman 	if r4 s>= r2 goto l1_%=;			\
437*b37d776bSEduard Zingerman 	r3 = 0;						\
438*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
439*b37d776bSEduard Zingerman l1_%=:	r0 = 0;						\
440*b37d776bSEduard Zingerman l0_%=:	exit;						\
441*b37d776bSEduard Zingerman "	:
442*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
443*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
444*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_48b),
445*b37d776bSEduard Zingerman 	  __imm_const(__imm_0, sizeof(struct test_val) - 19)
446*b37d776bSEduard Zingerman 	: __clobber_all);
447*b37d776bSEduard Zingerman }
448*b37d776bSEduard Zingerman 
449*b37d776bSEduard Zingerman SEC("tc")
450*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
451*b37d776bSEduard Zingerman __success __retval(0)
ptr_to_mem_or_null_1(void)452*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_1(void)
453*b37d776bSEduard Zingerman {
454*b37d776bSEduard Zingerman 	asm volatile ("					\
455*b37d776bSEduard Zingerman 	r1 = 0;						\
456*b37d776bSEduard Zingerman 	r2 = 0;						\
457*b37d776bSEduard Zingerman 	r3 = 0;						\
458*b37d776bSEduard Zingerman 	r4 = 0;						\
459*b37d776bSEduard Zingerman 	r5 = 0;						\
460*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
461*b37d776bSEduard Zingerman 	exit;						\
462*b37d776bSEduard Zingerman "	:
463*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff)
464*b37d776bSEduard Zingerman 	: __clobber_all);
465*b37d776bSEduard Zingerman }
466*b37d776bSEduard Zingerman 
467*b37d776bSEduard Zingerman SEC("tc")
468*b37d776bSEduard Zingerman __description("helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)")
469*b37d776bSEduard Zingerman __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_2(void)470*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_2(void)
471*b37d776bSEduard Zingerman {
472*b37d776bSEduard Zingerman 	asm volatile ("					\
473*b37d776bSEduard Zingerman 	r2 = *(u32*)(r1 + 0);				\
474*b37d776bSEduard Zingerman 	r1 = 0;						\
475*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r2;			\
476*b37d776bSEduard Zingerman 	r2 = *(u64*)(r10 - 128);			\
477*b37d776bSEduard Zingerman 	r2 &= 64;					\
478*b37d776bSEduard Zingerman 	r3 = 0;						\
479*b37d776bSEduard Zingerman 	r4 = 0;						\
480*b37d776bSEduard Zingerman 	r5 = 0;						\
481*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
482*b37d776bSEduard Zingerman 	exit;						\
483*b37d776bSEduard Zingerman "	:
484*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff)
485*b37d776bSEduard Zingerman 	: __clobber_all);
486*b37d776bSEduard Zingerman }
487*b37d776bSEduard Zingerman 
488*b37d776bSEduard Zingerman SEC("tc")
489*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
490*b37d776bSEduard Zingerman __success __retval(0)
ptr_to_mem_or_null_3(void)491*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_3(void)
492*b37d776bSEduard Zingerman {
493*b37d776bSEduard Zingerman 	asm volatile ("					\
494*b37d776bSEduard Zingerman 	r1 = r10;					\
495*b37d776bSEduard Zingerman 	r1 += -8;					\
496*b37d776bSEduard Zingerman 	r2 = 0;						\
497*b37d776bSEduard Zingerman 	*(u64*)(r1 + 0) = r2;				\
498*b37d776bSEduard Zingerman 	r2 &= 8;					\
499*b37d776bSEduard Zingerman 	r3 = 0;						\
500*b37d776bSEduard Zingerman 	r4 = 0;						\
501*b37d776bSEduard Zingerman 	r5 = 0;						\
502*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
503*b37d776bSEduard Zingerman 	exit;						\
504*b37d776bSEduard Zingerman "	:
505*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff)
506*b37d776bSEduard Zingerman 	: __clobber_all);
507*b37d776bSEduard Zingerman }
508*b37d776bSEduard Zingerman 
509*b37d776bSEduard Zingerman SEC("tc")
510*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
511*b37d776bSEduard Zingerman __success __retval(0)
ptr_to_mem_or_null_4(void)512*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_4(void)
513*b37d776bSEduard Zingerman {
514*b37d776bSEduard Zingerman 	asm volatile ("					\
515*b37d776bSEduard Zingerman 	r1 = 0;						\
516*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
517*b37d776bSEduard Zingerman 	r2 = r10;					\
518*b37d776bSEduard Zingerman 	r2 += -8;					\
519*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
520*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
521*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
522*b37d776bSEduard Zingerman 	r1 = r0;					\
523*b37d776bSEduard Zingerman 	r2 = 0;						\
524*b37d776bSEduard Zingerman 	r3 = 0;						\
525*b37d776bSEduard Zingerman 	r4 = 0;						\
526*b37d776bSEduard Zingerman 	r5 = 0;						\
527*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
528*b37d776bSEduard Zingerman l0_%=:	exit;						\
529*b37d776bSEduard Zingerman "	:
530*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff),
531*b37d776bSEduard Zingerman 	  __imm(bpf_map_lookup_elem),
532*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
533*b37d776bSEduard Zingerman 	: __clobber_all);
534*b37d776bSEduard Zingerman }
535*b37d776bSEduard Zingerman 
536*b37d776bSEduard Zingerman SEC("tc")
537*b37d776bSEduard Zingerman __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)")
538*b37d776bSEduard Zingerman __success __retval(0)
ptr_to_mem_or_null_5(void)539*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_5(void)
540*b37d776bSEduard Zingerman {
541*b37d776bSEduard Zingerman 	asm volatile ("					\
542*b37d776bSEduard Zingerman 	r1 = 0;						\
543*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
544*b37d776bSEduard Zingerman 	r2 = r10;					\
545*b37d776bSEduard Zingerman 	r2 += -8;					\
546*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
547*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
548*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
549*b37d776bSEduard Zingerman 	r2 = *(u64*)(r0 + 0);				\
550*b37d776bSEduard Zingerman 	if r2 > 8 goto l0_%=;				\
551*b37d776bSEduard Zingerman 	r1 = r10;					\
552*b37d776bSEduard Zingerman 	r1 += -8;					\
553*b37d776bSEduard Zingerman 	*(u64*)(r1 + 0) = r2;				\
554*b37d776bSEduard Zingerman 	r3 = 0;						\
555*b37d776bSEduard Zingerman 	r4 = 0;						\
556*b37d776bSEduard Zingerman 	r5 = 0;						\
557*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
558*b37d776bSEduard Zingerman l0_%=:	exit;						\
559*b37d776bSEduard Zingerman "	:
560*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff),
561*b37d776bSEduard Zingerman 	  __imm(bpf_map_lookup_elem),
562*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
563*b37d776bSEduard Zingerman 	: __clobber_all);
564*b37d776bSEduard Zingerman }
565*b37d776bSEduard Zingerman 
566*b37d776bSEduard Zingerman SEC("tc")
567*b37d776bSEduard Zingerman __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)")
568*b37d776bSEduard Zingerman __success __retval(0)
ptr_to_mem_or_null_6(void)569*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_6(void)
570*b37d776bSEduard Zingerman {
571*b37d776bSEduard Zingerman 	asm volatile ("					\
572*b37d776bSEduard Zingerman 	r1 = 0;						\
573*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
574*b37d776bSEduard Zingerman 	r2 = r10;					\
575*b37d776bSEduard Zingerman 	r2 += -8;					\
576*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
577*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
578*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
579*b37d776bSEduard Zingerman 	r1 = r0;					\
580*b37d776bSEduard Zingerman 	r2 = *(u64*)(r0 + 0);				\
581*b37d776bSEduard Zingerman 	if r2 > 8 goto l0_%=;				\
582*b37d776bSEduard Zingerman 	r3 = 0;						\
583*b37d776bSEduard Zingerman 	r4 = 0;						\
584*b37d776bSEduard Zingerman 	r5 = 0;						\
585*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
586*b37d776bSEduard Zingerman l0_%=:	exit;						\
587*b37d776bSEduard Zingerman "	:
588*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff),
589*b37d776bSEduard Zingerman 	  __imm(bpf_map_lookup_elem),
590*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
591*b37d776bSEduard Zingerman 	: __clobber_all);
592*b37d776bSEduard Zingerman }
593*b37d776bSEduard Zingerman 
594*b37d776bSEduard Zingerman SEC("tc")
595*b37d776bSEduard Zingerman __description("helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)")
596*b37d776bSEduard Zingerman __success __retval(0)
597*b37d776bSEduard Zingerman /* csum_diff of 64-byte packet */
__flag(BPF_F_ANY_ALIGNMENT)598*b37d776bSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
599*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_7(void)
600*b37d776bSEduard Zingerman {
601*b37d776bSEduard Zingerman 	asm volatile ("					\
602*b37d776bSEduard Zingerman 	r6 = *(u32*)(r1 + %[__sk_buff_data]);		\
603*b37d776bSEduard Zingerman 	r3 = *(u32*)(r1 + %[__sk_buff_data_end]);	\
604*b37d776bSEduard Zingerman 	r0 = r6;					\
605*b37d776bSEduard Zingerman 	r0 += 8;					\
606*b37d776bSEduard Zingerman 	if r0 > r3 goto l0_%=;				\
607*b37d776bSEduard Zingerman 	r1 = r6;					\
608*b37d776bSEduard Zingerman 	r2 = *(u64*)(r6 + 0);				\
609*b37d776bSEduard Zingerman 	if r2 > 8 goto l0_%=;				\
610*b37d776bSEduard Zingerman 	r3 = 0;						\
611*b37d776bSEduard Zingerman 	r4 = 0;						\
612*b37d776bSEduard Zingerman 	r5 = 0;						\
613*b37d776bSEduard Zingerman 	call %[bpf_csum_diff];				\
614*b37d776bSEduard Zingerman l0_%=:	exit;						\
615*b37d776bSEduard Zingerman "	:
616*b37d776bSEduard Zingerman 	: __imm(bpf_csum_diff),
617*b37d776bSEduard Zingerman 	  __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
618*b37d776bSEduard Zingerman 	  __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
619*b37d776bSEduard Zingerman 	: __clobber_all);
620*b37d776bSEduard Zingerman }
621*b37d776bSEduard Zingerman 
622*b37d776bSEduard Zingerman SEC("tracepoint")
623*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
624*b37d776bSEduard Zingerman __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_8(void)625*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_8(void)
626*b37d776bSEduard Zingerman {
627*b37d776bSEduard Zingerman 	asm volatile ("					\
628*b37d776bSEduard Zingerman 	r1 = 0;						\
629*b37d776bSEduard Zingerman 	r2 = 0;						\
630*b37d776bSEduard Zingerman 	r3 = 0;						\
631*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
632*b37d776bSEduard Zingerman 	exit;						\
633*b37d776bSEduard Zingerman "	:
634*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
635*b37d776bSEduard Zingerman 	: __clobber_all);
636*b37d776bSEduard Zingerman }
637*b37d776bSEduard Zingerman 
638*b37d776bSEduard Zingerman SEC("tracepoint")
639*b37d776bSEduard Zingerman __description("helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)")
640*b37d776bSEduard Zingerman __failure __msg("R1 type=scalar expected=fp")
ptr_to_mem_or_null_9(void)641*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_9(void)
642*b37d776bSEduard Zingerman {
643*b37d776bSEduard Zingerman 	asm volatile ("					\
644*b37d776bSEduard Zingerman 	r1 = 0;						\
645*b37d776bSEduard Zingerman 	r2 = 1;						\
646*b37d776bSEduard Zingerman 	r3 = 0;						\
647*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
648*b37d776bSEduard Zingerman 	exit;						\
649*b37d776bSEduard Zingerman "	:
650*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
651*b37d776bSEduard Zingerman 	: __clobber_all);
652*b37d776bSEduard Zingerman }
653*b37d776bSEduard Zingerman 
654*b37d776bSEduard Zingerman SEC("tracepoint")
655*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
656*b37d776bSEduard Zingerman __success
ptr_to_mem_or_null_10(void)657*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_10(void)
658*b37d776bSEduard Zingerman {
659*b37d776bSEduard Zingerman 	asm volatile ("					\
660*b37d776bSEduard Zingerman 	r1 = r10;					\
661*b37d776bSEduard Zingerman 	r1 += -8;					\
662*b37d776bSEduard Zingerman 	r2 = 0;						\
663*b37d776bSEduard Zingerman 	r3 = 0;						\
664*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
665*b37d776bSEduard Zingerman 	exit;						\
666*b37d776bSEduard Zingerman "	:
667*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
668*b37d776bSEduard Zingerman 	: __clobber_all);
669*b37d776bSEduard Zingerman }
670*b37d776bSEduard Zingerman 
671*b37d776bSEduard Zingerman SEC("tracepoint")
672*b37d776bSEduard Zingerman __description("helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
673*b37d776bSEduard Zingerman __success
ptr_to_mem_or_null_11(void)674*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_11(void)
675*b37d776bSEduard Zingerman {
676*b37d776bSEduard Zingerman 	asm volatile ("					\
677*b37d776bSEduard Zingerman 	r1 = 0;						\
678*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
679*b37d776bSEduard Zingerman 	r2 = r10;					\
680*b37d776bSEduard Zingerman 	r2 += -8;					\
681*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
682*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
683*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
684*b37d776bSEduard Zingerman 	r1 = r0;					\
685*b37d776bSEduard Zingerman 	r2 = 0;						\
686*b37d776bSEduard Zingerman 	r3 = 0;						\
687*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
688*b37d776bSEduard Zingerman l0_%=:	exit;						\
689*b37d776bSEduard Zingerman "	:
690*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
691*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
692*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
693*b37d776bSEduard Zingerman 	: __clobber_all);
694*b37d776bSEduard Zingerman }
695*b37d776bSEduard Zingerman 
696*b37d776bSEduard Zingerman SEC("tracepoint")
697*b37d776bSEduard Zingerman __description("helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)")
698*b37d776bSEduard Zingerman __success
ptr_to_mem_or_null_12(void)699*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_12(void)
700*b37d776bSEduard Zingerman {
701*b37d776bSEduard Zingerman 	asm volatile ("					\
702*b37d776bSEduard Zingerman 	r1 = 0;						\
703*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
704*b37d776bSEduard Zingerman 	r2 = r10;					\
705*b37d776bSEduard Zingerman 	r2 += -8;					\
706*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
707*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
708*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
709*b37d776bSEduard Zingerman 	r2 = *(u64*)(r0 + 0);				\
710*b37d776bSEduard Zingerman 	if r2 > 8 goto l0_%=;				\
711*b37d776bSEduard Zingerman 	r1 = r10;					\
712*b37d776bSEduard Zingerman 	r1 += -8;					\
713*b37d776bSEduard Zingerman 	r3 = 0;						\
714*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
715*b37d776bSEduard Zingerman l0_%=:	exit;						\
716*b37d776bSEduard Zingerman "	:
717*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
718*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
719*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
720*b37d776bSEduard Zingerman 	: __clobber_all);
721*b37d776bSEduard Zingerman }
722*b37d776bSEduard Zingerman 
723*b37d776bSEduard Zingerman SEC("tracepoint")
724*b37d776bSEduard Zingerman __description("helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)")
725*b37d776bSEduard Zingerman __success
ptr_to_mem_or_null_13(void)726*b37d776bSEduard Zingerman __naked void ptr_to_mem_or_null_13(void)
727*b37d776bSEduard Zingerman {
728*b37d776bSEduard Zingerman 	asm volatile ("					\
729*b37d776bSEduard Zingerman 	r1 = 0;						\
730*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
731*b37d776bSEduard Zingerman 	r2 = r10;					\
732*b37d776bSEduard Zingerman 	r2 += -8;					\
733*b37d776bSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
734*b37d776bSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
735*b37d776bSEduard Zingerman 	if r0 == 0 goto l0_%=;				\
736*b37d776bSEduard Zingerman 	r1 = r0;					\
737*b37d776bSEduard Zingerman 	r2 = *(u64*)(r0 + 0);				\
738*b37d776bSEduard Zingerman 	if r2 > 8 goto l0_%=;				\
739*b37d776bSEduard Zingerman 	r3 = 0;						\
740*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
741*b37d776bSEduard Zingerman l0_%=:	exit;						\
742*b37d776bSEduard Zingerman "	:
743*b37d776bSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
744*b37d776bSEduard Zingerman 	  __imm(bpf_probe_read_kernel),
745*b37d776bSEduard Zingerman 	  __imm_addr(map_hash_8b)
746*b37d776bSEduard Zingerman 	: __clobber_all);
747*b37d776bSEduard Zingerman }
748*b37d776bSEduard Zingerman 
749*b37d776bSEduard Zingerman SEC("socket")
750*b37d776bSEduard Zingerman __description("helper access to variable memory: 8 bytes leak")
751*b37d776bSEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
752*b37d776bSEduard Zingerman __success __failure_unpriv
753*b37d776bSEduard Zingerman __msg_unpriv("invalid indirect read from stack R2 off -64+32 size 64")
754*b37d776bSEduard Zingerman __retval(0)
variable_memory_8_bytes_leak(void)755*b37d776bSEduard Zingerman __naked void variable_memory_8_bytes_leak(void)
756*b37d776bSEduard Zingerman {
757*b37d776bSEduard Zingerman 	asm volatile ("					\
758*b37d776bSEduard Zingerman 	/* set max stack size */			\
759*b37d776bSEduard Zingerman 	r6 = 0;						\
760*b37d776bSEduard Zingerman 	*(u64*)(r10 - 128) = r6;			\
761*b37d776bSEduard Zingerman 	/* set r3 to a random value */			\
762*b37d776bSEduard Zingerman 	call %[bpf_get_prandom_u32];			\
763*b37d776bSEduard Zingerman 	r3 = r0;					\
764*b37d776bSEduard Zingerman 	r1 = %[map_ringbuf] ll;				\
765*b37d776bSEduard Zingerman 	r2 = r10;					\
766*b37d776bSEduard Zingerman 	r2 += -64;					\
767*b37d776bSEduard Zingerman 	r0 = 0;						\
768*b37d776bSEduard Zingerman 	*(u64*)(r10 - 64) = r0;				\
769*b37d776bSEduard Zingerman 	*(u64*)(r10 - 56) = r0;				\
770*b37d776bSEduard Zingerman 	*(u64*)(r10 - 48) = r0;				\
771*b37d776bSEduard Zingerman 	*(u64*)(r10 - 40) = r0;				\
772*b37d776bSEduard Zingerman 	/* Note: fp[-32] left uninitialized */		\
773*b37d776bSEduard Zingerman 	*(u64*)(r10 - 24) = r0;				\
774*b37d776bSEduard Zingerman 	*(u64*)(r10 - 16) = r0;				\
775*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
776*b37d776bSEduard Zingerman 	/* Limit r3 range to [1, 64] */			\
777*b37d776bSEduard Zingerman 	r3 &= 63;					\
778*b37d776bSEduard Zingerman 	r3 += 1;					\
779*b37d776bSEduard Zingerman 	r4 = 0;						\
780*b37d776bSEduard Zingerman 	/* Call bpf_ringbuf_output(), it is one of a few helper functions with\
781*b37d776bSEduard Zingerman 	 * ARG_CONST_SIZE_OR_ZERO parameter allowed in unpriv mode.\
782*b37d776bSEduard Zingerman 	 * For unpriv this should signal an error, because memory region [1, 64]\
783*b37d776bSEduard Zingerman 	 * at &fp[-64] is not fully initialized.	\
784*b37d776bSEduard Zingerman 	 */						\
785*b37d776bSEduard Zingerman 	call %[bpf_ringbuf_output];			\
786*b37d776bSEduard Zingerman 	r0 = 0;						\
787*b37d776bSEduard Zingerman 	exit;						\
788*b37d776bSEduard Zingerman "	:
789*b37d776bSEduard Zingerman 	: __imm(bpf_get_prandom_u32),
790*b37d776bSEduard Zingerman 	  __imm(bpf_ringbuf_output),
791*b37d776bSEduard Zingerman 	  __imm_addr(map_ringbuf)
792*b37d776bSEduard Zingerman 	: __clobber_all);
793*b37d776bSEduard Zingerman }
794*b37d776bSEduard Zingerman 
795*b37d776bSEduard Zingerman SEC("tracepoint")
796*b37d776bSEduard Zingerman __description("helper access to variable memory: 8 bytes no leak (init memory)")
797*b37d776bSEduard Zingerman __success
bytes_no_leak_init_memory(void)798*b37d776bSEduard Zingerman __naked void bytes_no_leak_init_memory(void)
799*b37d776bSEduard Zingerman {
800*b37d776bSEduard Zingerman 	asm volatile ("					\
801*b37d776bSEduard Zingerman 	r1 = r10;					\
802*b37d776bSEduard Zingerman 	r0 = 0;						\
803*b37d776bSEduard Zingerman 	r0 = 0;						\
804*b37d776bSEduard Zingerman 	*(u64*)(r10 - 64) = r0;				\
805*b37d776bSEduard Zingerman 	*(u64*)(r10 - 56) = r0;				\
806*b37d776bSEduard Zingerman 	*(u64*)(r10 - 48) = r0;				\
807*b37d776bSEduard Zingerman 	*(u64*)(r10 - 40) = r0;				\
808*b37d776bSEduard Zingerman 	*(u64*)(r10 - 32) = r0;				\
809*b37d776bSEduard Zingerman 	*(u64*)(r10 - 24) = r0;				\
810*b37d776bSEduard Zingerman 	*(u64*)(r10 - 16) = r0;				\
811*b37d776bSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
812*b37d776bSEduard Zingerman 	r1 += -64;					\
813*b37d776bSEduard Zingerman 	r2 = 0;						\
814*b37d776bSEduard Zingerman 	r2 &= 32;					\
815*b37d776bSEduard Zingerman 	r2 += 32;					\
816*b37d776bSEduard Zingerman 	r3 = 0;						\
817*b37d776bSEduard Zingerman 	call %[bpf_probe_read_kernel];			\
818*b37d776bSEduard Zingerman 	r1 = *(u64*)(r10 - 16);				\
819*b37d776bSEduard Zingerman 	exit;						\
820*b37d776bSEduard Zingerman "	:
821*b37d776bSEduard Zingerman 	: __imm(bpf_probe_read_kernel)
822*b37d776bSEduard Zingerman 	: __clobber_all);
823*b37d776bSEduard Zingerman }
824*b37d776bSEduard Zingerman 
825*b37d776bSEduard Zingerman char _license[] SEC("license") = "GPL";
826