1*034d9ad2SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*034d9ad2SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
3*034d9ad2SEduard Zingerman 
4*034d9ad2SEduard Zingerman #include <linux/bpf.h>
5*034d9ad2SEduard Zingerman #include <bpf/bpf_helpers.h>
6*034d9ad2SEduard Zingerman #include "bpf_misc.h"
7*034d9ad2SEduard Zingerman 
8*034d9ad2SEduard Zingerman #define MAX_ENTRIES 11
9*034d9ad2SEduard Zingerman 
10*034d9ad2SEduard Zingerman struct test_val {
11*034d9ad2SEduard Zingerman 	unsigned int index;
12*034d9ad2SEduard Zingerman 	int foo[MAX_ENTRIES];
13*034d9ad2SEduard Zingerman };
14*034d9ad2SEduard Zingerman 
15*034d9ad2SEduard Zingerman struct {
16*034d9ad2SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_HASH);
17*034d9ad2SEduard Zingerman 	__uint(max_entries, 1);
18*034d9ad2SEduard Zingerman 	__type(key, long long);
19*034d9ad2SEduard Zingerman 	__type(value, struct test_val);
20*034d9ad2SEduard Zingerman } map_hash_48b SEC(".maps");
21*034d9ad2SEduard Zingerman 
22*034d9ad2SEduard Zingerman struct {
23*034d9ad2SEduard Zingerman 	__uint(type, BPF_MAP_TYPE_HASH);
24*034d9ad2SEduard Zingerman 	__uint(max_entries, 1);
25*034d9ad2SEduard Zingerman 	__type(key, long long);
26*034d9ad2SEduard Zingerman 	__type(value, long long);
27*034d9ad2SEduard Zingerman } map_hash_8b SEC(".maps");
28*034d9ad2SEduard Zingerman 
29*034d9ad2SEduard Zingerman SEC("socket")
30*034d9ad2SEduard Zingerman __description("pointer/scalar confusion in state equality check (way 1)")
31*034d9ad2SEduard Zingerman __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
__retval(POINTER_VALUE)32*034d9ad2SEduard Zingerman __retval(POINTER_VALUE)
33*034d9ad2SEduard Zingerman __naked void state_equality_check_way_1(void)
34*034d9ad2SEduard Zingerman {
35*034d9ad2SEduard Zingerman 	asm volatile ("					\
36*034d9ad2SEduard Zingerman 	r1 = 0;						\
37*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
38*034d9ad2SEduard Zingerman 	r2 = r10;					\
39*034d9ad2SEduard Zingerman 	r2 += -8;					\
40*034d9ad2SEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
41*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
42*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
43*034d9ad2SEduard Zingerman 	r0 = *(u64*)(r0 + 0);				\
44*034d9ad2SEduard Zingerman 	goto l1_%=;					\
45*034d9ad2SEduard Zingerman l0_%=:	r0 = r10;					\
46*034d9ad2SEduard Zingerman l1_%=:	goto l2_%=;					\
47*034d9ad2SEduard Zingerman l2_%=:	exit;						\
48*034d9ad2SEduard Zingerman "	:
49*034d9ad2SEduard Zingerman 	: __imm(bpf_map_lookup_elem),
50*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_8b)
51*034d9ad2SEduard Zingerman 	: __clobber_all);
52*034d9ad2SEduard Zingerman }
53*034d9ad2SEduard Zingerman 
54*034d9ad2SEduard Zingerman SEC("socket")
55*034d9ad2SEduard Zingerman __description("pointer/scalar confusion in state equality check (way 2)")
56*034d9ad2SEduard Zingerman __success __failure_unpriv __msg_unpriv("R0 leaks addr as return value")
__retval(POINTER_VALUE)57*034d9ad2SEduard Zingerman __retval(POINTER_VALUE)
58*034d9ad2SEduard Zingerman __naked void state_equality_check_way_2(void)
59*034d9ad2SEduard Zingerman {
60*034d9ad2SEduard Zingerman 	asm volatile ("					\
61*034d9ad2SEduard Zingerman 	r1 = 0;						\
62*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
63*034d9ad2SEduard Zingerman 	r2 = r10;					\
64*034d9ad2SEduard Zingerman 	r2 += -8;					\
65*034d9ad2SEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
66*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
67*034d9ad2SEduard Zingerman 	if r0 != 0 goto l0_%=;				\
68*034d9ad2SEduard Zingerman 	r0 = r10;					\
69*034d9ad2SEduard Zingerman 	goto l1_%=;					\
70*034d9ad2SEduard Zingerman l0_%=:	r0 = *(u64*)(r0 + 0);				\
71*034d9ad2SEduard Zingerman l1_%=:	exit;						\
72*034d9ad2SEduard Zingerman "	:
73*034d9ad2SEduard Zingerman 	: __imm(bpf_map_lookup_elem),
74*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_8b)
75*034d9ad2SEduard Zingerman 	: __clobber_all);
76*034d9ad2SEduard Zingerman }
77*034d9ad2SEduard Zingerman 
78*034d9ad2SEduard Zingerman SEC("lwt_in")
79*034d9ad2SEduard Zingerman __description("liveness pruning and write screening")
80*034d9ad2SEduard Zingerman __failure __msg("R0 !read_ok")
liveness_pruning_and_write_screening(void)81*034d9ad2SEduard Zingerman __naked void liveness_pruning_and_write_screening(void)
82*034d9ad2SEduard Zingerman {
83*034d9ad2SEduard Zingerman 	asm volatile ("					\
84*034d9ad2SEduard Zingerman 	/* Get an unknown value */			\
85*034d9ad2SEduard Zingerman 	r2 = *(u32*)(r1 + 0);				\
86*034d9ad2SEduard Zingerman 	/* branch conditions teach us nothing about R2 */\
87*034d9ad2SEduard Zingerman 	if r2 >= 0 goto l0_%=;				\
88*034d9ad2SEduard Zingerman 	r0 = 0;						\
89*034d9ad2SEduard Zingerman l0_%=:	if r2 >= 0 goto l1_%=;				\
90*034d9ad2SEduard Zingerman 	r0 = 0;						\
91*034d9ad2SEduard Zingerman l1_%=:	exit;						\
92*034d9ad2SEduard Zingerman "	::: __clobber_all);
93*034d9ad2SEduard Zingerman }
94*034d9ad2SEduard Zingerman 
95*034d9ad2SEduard Zingerman SEC("socket")
96*034d9ad2SEduard Zingerman __description("varlen_map_value_access pruning")
97*034d9ad2SEduard Zingerman __failure __msg("R0 unbounded memory access")
98*034d9ad2SEduard Zingerman __failure_unpriv __msg_unpriv("R0 leaks addr")
__flag(BPF_F_ANY_ALIGNMENT)99*034d9ad2SEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
100*034d9ad2SEduard Zingerman __naked void varlen_map_value_access_pruning(void)
101*034d9ad2SEduard Zingerman {
102*034d9ad2SEduard Zingerman 	asm volatile ("					\
103*034d9ad2SEduard Zingerman 	r1 = 0;						\
104*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
105*034d9ad2SEduard Zingerman 	r2 = r10;					\
106*034d9ad2SEduard Zingerman 	r2 += -8;					\
107*034d9ad2SEduard Zingerman 	r1 = %[map_hash_48b] ll;			\
108*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
109*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
110*034d9ad2SEduard Zingerman 	r1 = *(u64*)(r0 + 0);				\
111*034d9ad2SEduard Zingerman 	w2 = %[max_entries];				\
112*034d9ad2SEduard Zingerman 	if r2 s> r1 goto l1_%=;				\
113*034d9ad2SEduard Zingerman 	w1 = 0;						\
114*034d9ad2SEduard Zingerman l1_%=:	w1 <<= 2;					\
115*034d9ad2SEduard Zingerman 	r0 += r1;					\
116*034d9ad2SEduard Zingerman 	goto l2_%=;					\
117*034d9ad2SEduard Zingerman l2_%=:	r1 = %[test_val_foo];				\
118*034d9ad2SEduard Zingerman 	*(u64*)(r0 + 0) = r1;				\
119*034d9ad2SEduard Zingerman l0_%=:	exit;						\
120*034d9ad2SEduard Zingerman "	:
121*034d9ad2SEduard Zingerman 	: __imm(bpf_map_lookup_elem),
122*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_48b),
123*034d9ad2SEduard Zingerman 	  __imm_const(max_entries, MAX_ENTRIES),
124*034d9ad2SEduard Zingerman 	  __imm_const(test_val_foo, offsetof(struct test_val, foo))
125*034d9ad2SEduard Zingerman 	: __clobber_all);
126*034d9ad2SEduard Zingerman }
127*034d9ad2SEduard Zingerman 
128*034d9ad2SEduard Zingerman SEC("tracepoint")
129*034d9ad2SEduard Zingerman __description("search pruning: all branches should be verified (nop operation)")
130*034d9ad2SEduard Zingerman __failure __msg("R6 invalid mem access 'scalar'")
should_be_verified_nop_operation(void)131*034d9ad2SEduard Zingerman __naked void should_be_verified_nop_operation(void)
132*034d9ad2SEduard Zingerman {
133*034d9ad2SEduard Zingerman 	asm volatile ("					\
134*034d9ad2SEduard Zingerman 	r2 = r10;					\
135*034d9ad2SEduard Zingerman 	r2 += -8;					\
136*034d9ad2SEduard Zingerman 	r1 = 0;						\
137*034d9ad2SEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
138*034d9ad2SEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
139*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
140*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
141*034d9ad2SEduard Zingerman 	r3 = *(u64*)(r0 + 0);				\
142*034d9ad2SEduard Zingerman 	if r3 == 0xbeef goto l1_%=;			\
143*034d9ad2SEduard Zingerman 	r4 = 0;						\
144*034d9ad2SEduard Zingerman 	goto l2_%=;					\
145*034d9ad2SEduard Zingerman l1_%=:	r4 = 1;						\
146*034d9ad2SEduard Zingerman l2_%=:	*(u64*)(r10 - 16) = r4;				\
147*034d9ad2SEduard Zingerman 	call %[bpf_ktime_get_ns];			\
148*034d9ad2SEduard Zingerman 	r5 = *(u64*)(r10 - 16);				\
149*034d9ad2SEduard Zingerman 	if r5 == 0 goto l0_%=;				\
150*034d9ad2SEduard Zingerman 	r6 = 0;						\
151*034d9ad2SEduard Zingerman 	r1 = 0xdead;					\
152*034d9ad2SEduard Zingerman 	*(u64*)(r6 + 0) = r1;				\
153*034d9ad2SEduard Zingerman l0_%=:	exit;						\
154*034d9ad2SEduard Zingerman "	:
155*034d9ad2SEduard Zingerman 	: __imm(bpf_ktime_get_ns),
156*034d9ad2SEduard Zingerman 	  __imm(bpf_map_lookup_elem),
157*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_8b)
158*034d9ad2SEduard Zingerman 	: __clobber_all);
159*034d9ad2SEduard Zingerman }
160*034d9ad2SEduard Zingerman 
161*034d9ad2SEduard Zingerman SEC("socket")
162*034d9ad2SEduard Zingerman __description("search pruning: all branches should be verified (invalid stack access)")
163*034d9ad2SEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
164*034d9ad2SEduard Zingerman __success __failure_unpriv
165*034d9ad2SEduard Zingerman __msg_unpriv("invalid read from stack off -16+0 size 8")
166*034d9ad2SEduard Zingerman __retval(0)
be_verified_invalid_stack_access(void)167*034d9ad2SEduard Zingerman __naked void be_verified_invalid_stack_access(void)
168*034d9ad2SEduard Zingerman {
169*034d9ad2SEduard Zingerman 	asm volatile ("					\
170*034d9ad2SEduard Zingerman 	r2 = r10;					\
171*034d9ad2SEduard Zingerman 	r2 += -8;					\
172*034d9ad2SEduard Zingerman 	r1 = 0;						\
173*034d9ad2SEduard Zingerman 	*(u64*)(r2 + 0) = r1;				\
174*034d9ad2SEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
175*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
176*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
177*034d9ad2SEduard Zingerman 	r3 = *(u64*)(r0 + 0);				\
178*034d9ad2SEduard Zingerman 	r4 = 0;						\
179*034d9ad2SEduard Zingerman 	if r3 == 0xbeef goto l1_%=;			\
180*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 16) = r4;				\
181*034d9ad2SEduard Zingerman 	goto l2_%=;					\
182*034d9ad2SEduard Zingerman l1_%=:	*(u64*)(r10 - 24) = r4;				\
183*034d9ad2SEduard Zingerman l2_%=:	call %[bpf_ktime_get_ns];			\
184*034d9ad2SEduard Zingerman 	r5 = *(u64*)(r10 - 16);				\
185*034d9ad2SEduard Zingerman l0_%=:	exit;						\
186*034d9ad2SEduard Zingerman "	:
187*034d9ad2SEduard Zingerman 	: __imm(bpf_ktime_get_ns),
188*034d9ad2SEduard Zingerman 	  __imm(bpf_map_lookup_elem),
189*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_8b)
190*034d9ad2SEduard Zingerman 	: __clobber_all);
191*034d9ad2SEduard Zingerman }
192*034d9ad2SEduard Zingerman 
193*034d9ad2SEduard Zingerman SEC("tracepoint")
194*034d9ad2SEduard Zingerman __description("precision tracking for u32 spill/fill")
195*034d9ad2SEduard Zingerman __failure __msg("R0 min value is outside of the allowed memory range")
tracking_for_u32_spill_fill(void)196*034d9ad2SEduard Zingerman __naked void tracking_for_u32_spill_fill(void)
197*034d9ad2SEduard Zingerman {
198*034d9ad2SEduard Zingerman 	asm volatile ("					\
199*034d9ad2SEduard Zingerman 	r7 = r1;					\
200*034d9ad2SEduard Zingerman 	call %[bpf_get_prandom_u32];			\
201*034d9ad2SEduard Zingerman 	w6 = 32;					\
202*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
203*034d9ad2SEduard Zingerman 	w6 = 4;						\
204*034d9ad2SEduard Zingerman l0_%=:	/* Additional insns to introduce a pruning point. */\
205*034d9ad2SEduard Zingerman 	call %[bpf_get_prandom_u32];			\
206*034d9ad2SEduard Zingerman 	r3 = 0;						\
207*034d9ad2SEduard Zingerman 	r3 = 0;						\
208*034d9ad2SEduard Zingerman 	if r0 == 0 goto l1_%=;				\
209*034d9ad2SEduard Zingerman 	r3 = 0;						\
210*034d9ad2SEduard Zingerman l1_%=:	/* u32 spill/fill */				\
211*034d9ad2SEduard Zingerman 	*(u32*)(r10 - 8) = r6;				\
212*034d9ad2SEduard Zingerman 	r8 = *(u32*)(r10 - 8);				\
213*034d9ad2SEduard Zingerman 	/* out-of-bound map value access for r6=32 */	\
214*034d9ad2SEduard Zingerman 	r1 = 0;						\
215*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 16) = r1;				\
216*034d9ad2SEduard Zingerman 	r2 = r10;					\
217*034d9ad2SEduard Zingerman 	r2 += -16;					\
218*034d9ad2SEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
219*034d9ad2SEduard Zingerman 	call %[bpf_map_lookup_elem];			\
220*034d9ad2SEduard Zingerman 	if r0 == 0 goto l2_%=;				\
221*034d9ad2SEduard Zingerman 	r0 += r8;					\
222*034d9ad2SEduard Zingerman 	r1 = *(u32*)(r0 + 0);				\
223*034d9ad2SEduard Zingerman l2_%=:	r0 = 0;						\
224*034d9ad2SEduard Zingerman 	exit;						\
225*034d9ad2SEduard Zingerman "	:
226*034d9ad2SEduard Zingerman 	: __imm(bpf_get_prandom_u32),
227*034d9ad2SEduard Zingerman 	  __imm(bpf_map_lookup_elem),
228*034d9ad2SEduard Zingerman 	  __imm_addr(map_hash_8b)
229*034d9ad2SEduard Zingerman 	: __clobber_all);
230*034d9ad2SEduard Zingerman }
231*034d9ad2SEduard Zingerman 
232*034d9ad2SEduard Zingerman SEC("tracepoint")
233*034d9ad2SEduard Zingerman __description("precision tracking for u32 spills, u64 fill")
234*034d9ad2SEduard Zingerman __failure __msg("div by zero")
for_u32_spills_u64_fill(void)235*034d9ad2SEduard Zingerman __naked void for_u32_spills_u64_fill(void)
236*034d9ad2SEduard Zingerman {
237*034d9ad2SEduard Zingerman 	asm volatile ("					\
238*034d9ad2SEduard Zingerman 	call %[bpf_get_prandom_u32];			\
239*034d9ad2SEduard Zingerman 	r6 = r0;					\
240*034d9ad2SEduard Zingerman 	w7 = 0xffffffff;				\
241*034d9ad2SEduard Zingerman 	/* Additional insns to introduce a pruning point. */\
242*034d9ad2SEduard Zingerman 	r3 = 1;						\
243*034d9ad2SEduard Zingerman 	r3 = 1;						\
244*034d9ad2SEduard Zingerman 	r3 = 1;						\
245*034d9ad2SEduard Zingerman 	r3 = 1;						\
246*034d9ad2SEduard Zingerman 	call %[bpf_get_prandom_u32];			\
247*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
248*034d9ad2SEduard Zingerman 	r3 = 1;						\
249*034d9ad2SEduard Zingerman l0_%=:	w3 /= 0;					\
250*034d9ad2SEduard Zingerman 	/* u32 spills, u64 fill */			\
251*034d9ad2SEduard Zingerman 	*(u32*)(r10 - 4) = r6;				\
252*034d9ad2SEduard Zingerman 	*(u32*)(r10 - 8) = r7;				\
253*034d9ad2SEduard Zingerman 	r8 = *(u64*)(r10 - 8);				\
254*034d9ad2SEduard Zingerman 	/* if r8 != X goto pc+1  r8 known in fallthrough branch */\
255*034d9ad2SEduard Zingerman 	if r8 != 0xffffffff goto l1_%=;			\
256*034d9ad2SEduard Zingerman 	r3 = 1;						\
257*034d9ad2SEduard Zingerman l1_%=:	/* if r8 == X goto pc+1  condition always true on first\
258*034d9ad2SEduard Zingerman 	 * traversal, so starts backtracking to mark r8 as requiring\
259*034d9ad2SEduard Zingerman 	 * precision. r7 marked as needing precision. r6 not marked\
260*034d9ad2SEduard Zingerman 	 * since it's not tracked.			\
261*034d9ad2SEduard Zingerman 	 */						\
262*034d9ad2SEduard Zingerman 	if r8 == 0xffffffff goto l2_%=;			\
263*034d9ad2SEduard Zingerman 	/* fails if r8 correctly marked unknown after fill. */\
264*034d9ad2SEduard Zingerman 	w3 /= 0;					\
265*034d9ad2SEduard Zingerman l2_%=:	r0 = 0;						\
266*034d9ad2SEduard Zingerman 	exit;						\
267*034d9ad2SEduard Zingerman "	:
268*034d9ad2SEduard Zingerman 	: __imm(bpf_get_prandom_u32)
269*034d9ad2SEduard Zingerman 	: __clobber_all);
270*034d9ad2SEduard Zingerman }
271*034d9ad2SEduard Zingerman 
272*034d9ad2SEduard Zingerman SEC("socket")
273*034d9ad2SEduard Zingerman __description("allocated_stack")
274*034d9ad2SEduard Zingerman __success __msg("processed 15 insns")
275*034d9ad2SEduard Zingerman __success_unpriv __msg_unpriv("") __log_level(1) __retval(0)
allocated_stack(void)276*034d9ad2SEduard Zingerman __naked void allocated_stack(void)
277*034d9ad2SEduard Zingerman {
278*034d9ad2SEduard Zingerman 	asm volatile ("					\
279*034d9ad2SEduard Zingerman 	r6 = r1;					\
280*034d9ad2SEduard Zingerman 	call %[bpf_get_prandom_u32];			\
281*034d9ad2SEduard Zingerman 	r7 = r0;					\
282*034d9ad2SEduard Zingerman 	if r0 == 0 goto l0_%=;				\
283*034d9ad2SEduard Zingerman 	r0 = 0;						\
284*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 8) = r6;				\
285*034d9ad2SEduard Zingerman 	r6 = *(u64*)(r10 - 8);				\
286*034d9ad2SEduard Zingerman 	*(u8*)(r10 - 9) = r7;				\
287*034d9ad2SEduard Zingerman 	r7 = *(u8*)(r10 - 9);				\
288*034d9ad2SEduard Zingerman l0_%=:	if r0 != 0 goto l1_%=;				\
289*034d9ad2SEduard Zingerman l1_%=:	if r0 != 0 goto l2_%=;				\
290*034d9ad2SEduard Zingerman l2_%=:	if r0 != 0 goto l3_%=;				\
291*034d9ad2SEduard Zingerman l3_%=:	if r0 != 0 goto l4_%=;				\
292*034d9ad2SEduard Zingerman l4_%=:	exit;						\
293*034d9ad2SEduard Zingerman "	:
294*034d9ad2SEduard Zingerman 	: __imm(bpf_get_prandom_u32)
295*034d9ad2SEduard Zingerman 	: __clobber_all);
296*034d9ad2SEduard Zingerman }
297*034d9ad2SEduard Zingerman 
298*034d9ad2SEduard Zingerman /* The test performs a conditional 64-bit write to a stack location
299*034d9ad2SEduard Zingerman  * fp[-8], this is followed by an unconditional 8-bit write to fp[-8],
300*034d9ad2SEduard Zingerman  * then data is read from fp[-8]. This sequence is unsafe.
301*034d9ad2SEduard Zingerman  *
302*034d9ad2SEduard Zingerman  * The test would be mistakenly marked as safe w/o dst register parent
303*034d9ad2SEduard Zingerman  * preservation in verifier.c:copy_register_state() function.
304*034d9ad2SEduard Zingerman  *
305*034d9ad2SEduard Zingerman  * Note the usage of BPF_F_TEST_STATE_FREQ to force creation of the
306*034d9ad2SEduard Zingerman  * checkpoint state after conditional 64-bit assignment.
307*034d9ad2SEduard Zingerman  */
308*034d9ad2SEduard Zingerman 
309*034d9ad2SEduard Zingerman SEC("socket")
310*034d9ad2SEduard Zingerman __description("write tracking and register parent chain bug")
311*034d9ad2SEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
312*034d9ad2SEduard Zingerman __success __failure_unpriv
313*034d9ad2SEduard Zingerman __msg_unpriv("invalid read from stack off -8+1 size 8")
__flag(BPF_F_TEST_STATE_FREQ)314*034d9ad2SEduard Zingerman __retval(0) __flag(BPF_F_TEST_STATE_FREQ)
315*034d9ad2SEduard Zingerman __naked void and_register_parent_chain_bug(void)
316*034d9ad2SEduard Zingerman {
317*034d9ad2SEduard Zingerman 	asm volatile ("					\
318*034d9ad2SEduard Zingerman 	/* r6 = ktime_get_ns() */			\
319*034d9ad2SEduard Zingerman 	call %[bpf_ktime_get_ns];			\
320*034d9ad2SEduard Zingerman 	r6 = r0;					\
321*034d9ad2SEduard Zingerman 	/* r0 = ktime_get_ns() */			\
322*034d9ad2SEduard Zingerman 	call %[bpf_ktime_get_ns];			\
323*034d9ad2SEduard Zingerman 	/* if r0 > r6 goto +1 */			\
324*034d9ad2SEduard Zingerman 	if r0 > r6 goto l0_%=;				\
325*034d9ad2SEduard Zingerman 	/* *(u64 *)(r10 - 8) = 0xdeadbeef */		\
326*034d9ad2SEduard Zingerman 	r0 = 0xdeadbeef;				\
327*034d9ad2SEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
328*034d9ad2SEduard Zingerman l0_%=:	r1 = 42;					\
329*034d9ad2SEduard Zingerman 	*(u8*)(r10 - 8) = r1;				\
330*034d9ad2SEduard Zingerman 	r2 = *(u64*)(r10 - 8);				\
331*034d9ad2SEduard Zingerman 	/* exit(0) */					\
332*034d9ad2SEduard Zingerman 	r0 = 0;						\
333*034d9ad2SEduard Zingerman 	exit;						\
334*034d9ad2SEduard Zingerman "	:
335*034d9ad2SEduard Zingerman 	: __imm(bpf_ktime_get_ns)
336*034d9ad2SEduard Zingerman 	: __clobber_all);
337*034d9ad2SEduard Zingerman }
338*034d9ad2SEduard Zingerman 
339*034d9ad2SEduard Zingerman char _license[] SEC("license") = "GPL";
340