1*a8036aeaSEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*a8036aeaSEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
3*a8036aeaSEduard Zingerman 
4*a8036aeaSEduard Zingerman #include <linux/bpf.h>
5*a8036aeaSEduard Zingerman #include <bpf/bpf_helpers.h>
6*a8036aeaSEduard Zingerman #include "bpf_misc.h"
7*a8036aeaSEduard Zingerman 
8*a8036aeaSEduard Zingerman struct {
9*a8036aeaSEduard Zingerman 	__uint(type, BPF_MAP_TYPE_HASH);
10*a8036aeaSEduard Zingerman 	__uint(max_entries, 1);
11*a8036aeaSEduard Zingerman 	__type(key, long long);
12*a8036aeaSEduard Zingerman 	__type(value, long long);
13*a8036aeaSEduard Zingerman } map_hash_8b SEC(".maps");
14*a8036aeaSEduard Zingerman 
15*a8036aeaSEduard Zingerman SEC("tc")
16*a8036aeaSEduard Zingerman __description("xadd/w check unaligned stack")
17*a8036aeaSEduard Zingerman __failure __msg("misaligned stack access off")
xadd_w_check_unaligned_stack(void)18*a8036aeaSEduard Zingerman __naked void xadd_w_check_unaligned_stack(void)
19*a8036aeaSEduard Zingerman {
20*a8036aeaSEduard Zingerman 	asm volatile ("					\
21*a8036aeaSEduard Zingerman 	r0 = 1;						\
22*a8036aeaSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
23*a8036aeaSEduard Zingerman 	lock *(u32 *)(r10 - 7) += w0;			\
24*a8036aeaSEduard Zingerman 	r0 = *(u64*)(r10 - 8);				\
25*a8036aeaSEduard Zingerman 	exit;						\
26*a8036aeaSEduard Zingerman "	::: __clobber_all);
27*a8036aeaSEduard Zingerman }
28*a8036aeaSEduard Zingerman 
29*a8036aeaSEduard Zingerman SEC("tc")
30*a8036aeaSEduard Zingerman __description("xadd/w check unaligned map")
31*a8036aeaSEduard Zingerman __failure __msg("misaligned value access off")
xadd_w_check_unaligned_map(void)32*a8036aeaSEduard Zingerman __naked void xadd_w_check_unaligned_map(void)
33*a8036aeaSEduard Zingerman {
34*a8036aeaSEduard Zingerman 	asm volatile ("					\
35*a8036aeaSEduard Zingerman 	r1 = 0;						\
36*a8036aeaSEduard Zingerman 	*(u64*)(r10 - 8) = r1;				\
37*a8036aeaSEduard Zingerman 	r2 = r10;					\
38*a8036aeaSEduard Zingerman 	r2 += -8;					\
39*a8036aeaSEduard Zingerman 	r1 = %[map_hash_8b] ll;				\
40*a8036aeaSEduard Zingerman 	call %[bpf_map_lookup_elem];			\
41*a8036aeaSEduard Zingerman 	if r0 != 0 goto l0_%=;				\
42*a8036aeaSEduard Zingerman 	exit;						\
43*a8036aeaSEduard Zingerman l0_%=:	r1 = 1;						\
44*a8036aeaSEduard Zingerman 	lock *(u32 *)(r0 + 3) += w1;			\
45*a8036aeaSEduard Zingerman 	r0 = *(u32*)(r0 + 3);				\
46*a8036aeaSEduard Zingerman 	exit;						\
47*a8036aeaSEduard Zingerman "	:
48*a8036aeaSEduard Zingerman 	: __imm(bpf_map_lookup_elem),
49*a8036aeaSEduard Zingerman 	  __imm_addr(map_hash_8b)
50*a8036aeaSEduard Zingerman 	: __clobber_all);
51*a8036aeaSEduard Zingerman }
52*a8036aeaSEduard Zingerman 
53*a8036aeaSEduard Zingerman SEC("xdp")
54*a8036aeaSEduard Zingerman __description("xadd/w check unaligned pkt")
55*a8036aeaSEduard Zingerman __failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
__flag(BPF_F_ANY_ALIGNMENT)56*a8036aeaSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
57*a8036aeaSEduard Zingerman __naked void xadd_w_check_unaligned_pkt(void)
58*a8036aeaSEduard Zingerman {
59*a8036aeaSEduard Zingerman 	asm volatile ("					\
60*a8036aeaSEduard Zingerman 	r2 = *(u32*)(r1 + %[xdp_md_data]);		\
61*a8036aeaSEduard Zingerman 	r3 = *(u32*)(r1 + %[xdp_md_data_end]);		\
62*a8036aeaSEduard Zingerman 	r1 = r2;					\
63*a8036aeaSEduard Zingerman 	r1 += 8;					\
64*a8036aeaSEduard Zingerman 	if r1 < r3 goto l0_%=;				\
65*a8036aeaSEduard Zingerman 	r0 = 99;					\
66*a8036aeaSEduard Zingerman 	goto l1_%=;					\
67*a8036aeaSEduard Zingerman l0_%=:	r0 = 1;						\
68*a8036aeaSEduard Zingerman 	r1 = 0;						\
69*a8036aeaSEduard Zingerman 	*(u32*)(r2 + 0) = r1;				\
70*a8036aeaSEduard Zingerman 	r1 = 0;						\
71*a8036aeaSEduard Zingerman 	*(u32*)(r2 + 3) = r1;				\
72*a8036aeaSEduard Zingerman 	lock *(u32 *)(r2 + 1) += w0;			\
73*a8036aeaSEduard Zingerman 	lock *(u32 *)(r2 + 2) += w0;			\
74*a8036aeaSEduard Zingerman 	r0 = *(u32*)(r2 + 1);				\
75*a8036aeaSEduard Zingerman l1_%=:	exit;						\
76*a8036aeaSEduard Zingerman "	:
77*a8036aeaSEduard Zingerman 	: __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
78*a8036aeaSEduard Zingerman 	  __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
79*a8036aeaSEduard Zingerman 	: __clobber_all);
80*a8036aeaSEduard Zingerman }
81*a8036aeaSEduard Zingerman 
82*a8036aeaSEduard Zingerman SEC("tc")
83*a8036aeaSEduard Zingerman __description("xadd/w check whether src/dst got mangled, 1")
84*a8036aeaSEduard Zingerman __success __retval(3)
src_dst_got_mangled_1(void)85*a8036aeaSEduard Zingerman __naked void src_dst_got_mangled_1(void)
86*a8036aeaSEduard Zingerman {
87*a8036aeaSEduard Zingerman 	asm volatile ("					\
88*a8036aeaSEduard Zingerman 	r0 = 1;						\
89*a8036aeaSEduard Zingerman 	r6 = r0;					\
90*a8036aeaSEduard Zingerman 	r7 = r10;					\
91*a8036aeaSEduard Zingerman 	*(u64*)(r10 - 8) = r0;				\
92*a8036aeaSEduard Zingerman 	lock *(u64 *)(r10 - 8) += r0;			\
93*a8036aeaSEduard Zingerman 	lock *(u64 *)(r10 - 8) += r0;			\
94*a8036aeaSEduard Zingerman 	if r6 != r0 goto l0_%=;				\
95*a8036aeaSEduard Zingerman 	if r7 != r10 goto l0_%=;			\
96*a8036aeaSEduard Zingerman 	r0 = *(u64*)(r10 - 8);				\
97*a8036aeaSEduard Zingerman 	exit;						\
98*a8036aeaSEduard Zingerman l0_%=:	r0 = 42;					\
99*a8036aeaSEduard Zingerman 	exit;						\
100*a8036aeaSEduard Zingerman "	::: __clobber_all);
101*a8036aeaSEduard Zingerman }
102*a8036aeaSEduard Zingerman 
103*a8036aeaSEduard Zingerman SEC("tc")
104*a8036aeaSEduard Zingerman __description("xadd/w check whether src/dst got mangled, 2")
105*a8036aeaSEduard Zingerman __success __retval(3)
src_dst_got_mangled_2(void)106*a8036aeaSEduard Zingerman __naked void src_dst_got_mangled_2(void)
107*a8036aeaSEduard Zingerman {
108*a8036aeaSEduard Zingerman 	asm volatile ("					\
109*a8036aeaSEduard Zingerman 	r0 = 1;						\
110*a8036aeaSEduard Zingerman 	r6 = r0;					\
111*a8036aeaSEduard Zingerman 	r7 = r10;					\
112*a8036aeaSEduard Zingerman 	*(u32*)(r10 - 8) = r0;				\
113*a8036aeaSEduard Zingerman 	lock *(u32 *)(r10 - 8) += w0;			\
114*a8036aeaSEduard Zingerman 	lock *(u32 *)(r10 - 8) += w0;			\
115*a8036aeaSEduard Zingerman 	if r6 != r0 goto l0_%=;				\
116*a8036aeaSEduard Zingerman 	if r7 != r10 goto l0_%=;			\
117*a8036aeaSEduard Zingerman 	r0 = *(u32*)(r10 - 8);				\
118*a8036aeaSEduard Zingerman 	exit;						\
119*a8036aeaSEduard Zingerman l0_%=:	r0 = 42;					\
120*a8036aeaSEduard Zingerman 	exit;						\
121*a8036aeaSEduard Zingerman "	::: __clobber_all);
122*a8036aeaSEduard Zingerman }
123*a8036aeaSEduard Zingerman 
124*a8036aeaSEduard Zingerman char _license[] SEC("license") = "GPL";
125