1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */
3
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 #include "bpf_misc.h"
7
8 #define MAX_ENTRIES 11
9
10 struct test_val {
11 unsigned int index;
12 int foo[MAX_ENTRIES];
13 };
14
15 struct {
16 __uint(type, BPF_MAP_TYPE_ARRAY);
17 __uint(max_entries, 1);
18 __type(key, int);
19 __type(value, struct test_val);
20 } map_array_48b SEC(".maps");
21
22 struct {
23 __uint(type, BPF_MAP_TYPE_HASH);
24 __uint(max_entries, 1);
25 __type(key, long long);
26 __type(value, struct test_val);
27 } map_hash_48b SEC(".maps");
28
29 struct {
30 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
31 __uint(max_entries, 1);
32 __type(key, int);
33 __type(value, int);
34 __array(values, struct {
35 __uint(type, BPF_MAP_TYPE_ARRAY);
36 __uint(max_entries, 1);
37 __type(key, int);
38 __type(value, int);
39 });
40 } map_in_map SEC(".maps");
41
42 void dummy_prog_42_socket(void);
43 void dummy_prog_24_socket(void);
44 void dummy_prog_loop1_socket(void);
45 void dummy_prog_loop2_socket(void);
46
47 struct {
48 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
49 __uint(max_entries, 4);
50 __uint(key_size, sizeof(int));
51 __array(values, void (void));
52 } map_prog1_socket SEC(".maps") = {
53 .values = {
54 [0] = (void *)&dummy_prog_42_socket,
55 [1] = (void *)&dummy_prog_loop1_socket,
56 [2] = (void *)&dummy_prog_24_socket,
57 },
58 };
59
60 struct {
61 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
62 __uint(max_entries, 8);
63 __uint(key_size, sizeof(int));
64 __array(values, void (void));
65 } map_prog2_socket SEC(".maps") = {
66 .values = {
67 [1] = (void *)&dummy_prog_loop2_socket,
68 [2] = (void *)&dummy_prog_24_socket,
69 [7] = (void *)&dummy_prog_42_socket,
70 },
71 };
72
73 SEC("socket")
74 __auxiliary __auxiliary_unpriv
dummy_prog_42_socket(void)75 __naked void dummy_prog_42_socket(void)
76 {
77 asm volatile ("r0 = 42; exit;");
78 }
79
80 SEC("socket")
81 __auxiliary __auxiliary_unpriv
dummy_prog_24_socket(void)82 __naked void dummy_prog_24_socket(void)
83 {
84 asm volatile ("r0 = 24; exit;");
85 }
86
87 SEC("socket")
88 __auxiliary __auxiliary_unpriv
dummy_prog_loop1_socket(void)89 __naked void dummy_prog_loop1_socket(void)
90 {
91 asm volatile (" \
92 r3 = 1; \
93 r2 = %[map_prog1_socket] ll; \
94 call %[bpf_tail_call]; \
95 r0 = 41; \
96 exit; \
97 " :
98 : __imm(bpf_tail_call),
99 __imm_addr(map_prog1_socket)
100 : __clobber_all);
101 }
102
103 SEC("socket")
104 __auxiliary __auxiliary_unpriv
dummy_prog_loop2_socket(void)105 __naked void dummy_prog_loop2_socket(void)
106 {
107 asm volatile (" \
108 r3 = 1; \
109 r2 = %[map_prog2_socket] ll; \
110 call %[bpf_tail_call]; \
111 r0 = 41; \
112 exit; \
113 " :
114 : __imm(bpf_tail_call),
115 __imm_addr(map_prog2_socket)
116 : __clobber_all);
117 }
118
119 SEC("tc")
120 __description("calls: two calls returning different map pointers for lookup (hash, array)")
121 __success __retval(1)
pointers_for_lookup_hash_array(void)122 __naked void pointers_for_lookup_hash_array(void)
123 {
124 asm volatile (" \
125 /* main prog */ \
126 if r1 != 0 goto l0_%=; \
127 call pointers_for_lookup_hash_array__1; \
128 goto l1_%=; \
129 l0_%=: call pointers_for_lookup_hash_array__2; \
130 l1_%=: r1 = r0; \
131 r2 = 0; \
132 *(u64*)(r10 - 8) = r2; \
133 r2 = r10; \
134 r2 += -8; \
135 call %[bpf_map_lookup_elem]; \
136 if r0 == 0 goto l2_%=; \
137 r1 = %[test_val_foo]; \
138 *(u64*)(r0 + 0) = r1; \
139 r0 = 1; \
140 l2_%=: exit; \
141 " :
142 : __imm(bpf_map_lookup_elem),
143 __imm_const(test_val_foo, offsetof(struct test_val, foo))
144 : __clobber_all);
145 }
146
147 static __naked __noinline __attribute__((used))
pointers_for_lookup_hash_array__1(void)148 void pointers_for_lookup_hash_array__1(void)
149 {
150 asm volatile (" \
151 r0 = %[map_hash_48b] ll; \
152 exit; \
153 " :
154 : __imm_addr(map_hash_48b)
155 : __clobber_all);
156 }
157
158 static __naked __noinline __attribute__((used))
pointers_for_lookup_hash_array__2(void)159 void pointers_for_lookup_hash_array__2(void)
160 {
161 asm volatile (" \
162 r0 = %[map_array_48b] ll; \
163 exit; \
164 " :
165 : __imm_addr(map_array_48b)
166 : __clobber_all);
167 }
168
169 SEC("tc")
170 __description("calls: two calls returning different map pointers for lookup (hash, map in map)")
171 __failure __msg("only read from bpf_array is supported")
lookup_hash_map_in_map(void)172 __naked void lookup_hash_map_in_map(void)
173 {
174 asm volatile (" \
175 /* main prog */ \
176 if r1 != 0 goto l0_%=; \
177 call lookup_hash_map_in_map__1; \
178 goto l1_%=; \
179 l0_%=: call lookup_hash_map_in_map__2; \
180 l1_%=: r1 = r0; \
181 r2 = 0; \
182 *(u64*)(r10 - 8) = r2; \
183 r2 = r10; \
184 r2 += -8; \
185 call %[bpf_map_lookup_elem]; \
186 if r0 == 0 goto l2_%=; \
187 r1 = %[test_val_foo]; \
188 *(u64*)(r0 + 0) = r1; \
189 r0 = 1; \
190 l2_%=: exit; \
191 " :
192 : __imm(bpf_map_lookup_elem),
193 __imm_const(test_val_foo, offsetof(struct test_val, foo))
194 : __clobber_all);
195 }
196
197 static __naked __noinline __attribute__((used))
lookup_hash_map_in_map__1(void)198 void lookup_hash_map_in_map__1(void)
199 {
200 asm volatile (" \
201 r0 = %[map_array_48b] ll; \
202 exit; \
203 " :
204 : __imm_addr(map_array_48b)
205 : __clobber_all);
206 }
207
208 static __naked __noinline __attribute__((used))
lookup_hash_map_in_map__2(void)209 void lookup_hash_map_in_map__2(void)
210 {
211 asm volatile (" \
212 r0 = %[map_in_map] ll; \
213 exit; \
214 " :
215 : __imm_addr(map_in_map)
216 : __clobber_all);
217 }
218
219 SEC("socket")
220 __description("cond: two branches returning different map pointers for lookup (tail, tail)")
221 __success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr")
222 __retval(42)
pointers_for_lookup_tail_tail_1(void)223 __naked void pointers_for_lookup_tail_tail_1(void)
224 {
225 asm volatile (" \
226 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
227 if r6 != 0 goto l0_%=; \
228 r2 = %[map_prog2_socket] ll; \
229 goto l1_%=; \
230 l0_%=: r2 = %[map_prog1_socket] ll; \
231 l1_%=: r3 = 7; \
232 call %[bpf_tail_call]; \
233 r0 = 1; \
234 exit; \
235 " :
236 : __imm(bpf_tail_call),
237 __imm_addr(map_prog1_socket),
238 __imm_addr(map_prog2_socket),
239 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
240 : __clobber_all);
241 }
242
243 SEC("socket")
244 __description("cond: two branches returning same map pointers for lookup (tail, tail)")
245 __success __success_unpriv __retval(42)
pointers_for_lookup_tail_tail_2(void)246 __naked void pointers_for_lookup_tail_tail_2(void)
247 {
248 asm volatile (" \
249 r6 = *(u32*)(r1 + %[__sk_buff_mark]); \
250 if r6 == 0 goto l0_%=; \
251 r2 = %[map_prog2_socket] ll; \
252 goto l1_%=; \
253 l0_%=: r2 = %[map_prog2_socket] ll; \
254 l1_%=: r3 = 7; \
255 call %[bpf_tail_call]; \
256 r0 = 1; \
257 exit; \
258 " :
259 : __imm(bpf_tail_call),
260 __imm_addr(map_prog2_socket),
261 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
262 : __clobber_all);
263 }
264
265 char _license[] SEC("license") = "GPL";
266