1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #define LOOP_BOUND 0xf
8 #define MAX_ENTRIES 8
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
10 
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12 
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14 __u32 g_line = 0;
15 
16 #define VERIFY_TYPE(type, func) ({	\
17 	g_map_type = type;		\
18 	if (!func())			\
19 		return 0;		\
20 })
21 
22 
23 #define VERIFY(expr) ({		\
24 	g_line = __LINE__;	\
25 	if (!(expr))		\
26 		return 0;	\
27 })
28 
29 struct bpf_map {
30 	enum bpf_map_type map_type;
31 	__u32 key_size;
32 	__u32 value_size;
33 	__u32 max_entries;
34 	__u32 id;
35 } __attribute__((preserve_access_index));
36 
37 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
38 				       __u32 value_size, __u32 max_entries)
39 {
40 	VERIFY(map->map_type == g_map_type);
41 	VERIFY(map->key_size == key_size);
42 	VERIFY(map->value_size == value_size);
43 	VERIFY(map->max_entries == max_entries);
44 	VERIFY(map->id > 0);
45 
46 	return 1;
47 }
48 
49 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
50 				    struct bpf_map *direct)
51 {
52 	VERIFY(indirect->map_type == direct->map_type);
53 	VERIFY(indirect->key_size == direct->key_size);
54 	VERIFY(indirect->value_size == direct->value_size);
55 	VERIFY(indirect->max_entries == direct->max_entries);
56 	VERIFY(indirect->id == direct->id);
57 
58 	return 1;
59 }
60 
61 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
62 			__u32 key_size, __u32 value_size, __u32 max_entries)
63 {
64 	VERIFY(check_bpf_map_ptr(indirect, direct));
65 	VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
66 				    max_entries));
67 	return 1;
68 }
69 
70 static inline int check_default(struct bpf_map *indirect,
71 				struct bpf_map *direct)
72 {
73 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
74 		     MAX_ENTRIES));
75 	return 1;
76 }
77 
78 static __noinline int
79 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
80 {
81 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
82 		     MAX_ENTRIES));
83 	return 1;
84 }
85 
86 typedef struct {
87 	int counter;
88 } atomic_t;
89 
90 struct bpf_htab {
91 	struct bpf_map map;
92 	atomic_t count;
93 	__u32 n_buckets;
94 	__u32 elem_size;
95 } __attribute__((preserve_access_index));
96 
97 struct {
98 	__uint(type, BPF_MAP_TYPE_HASH);
99 	__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
100 	__uint(max_entries, MAX_ENTRIES);
101 	__type(key, __u32);
102 	__type(value, __u32);
103 } m_hash SEC(".maps");
104 
105 static inline int check_hash(void)
106 {
107 	struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
108 	struct bpf_map *map = (struct bpf_map *)&m_hash;
109 	int i;
110 
111 	VERIFY(check_default_noinline(&hash->map, map));
112 
113 	VERIFY(hash->n_buckets == MAX_ENTRIES);
114 	VERIFY(hash->elem_size == 64);
115 
116 	VERIFY(hash->count.counter == 0);
117 	for (i = 0; i < HALF_ENTRIES; ++i) {
118 		const __u32 key = i;
119 		const __u32 val = 1;
120 
121 		if (bpf_map_update_elem(hash, &key, &val, 0))
122 			return 0;
123 	}
124 	VERIFY(hash->count.counter == HALF_ENTRIES);
125 
126 	return 1;
127 }
128 
129 struct bpf_array {
130 	struct bpf_map map;
131 	__u32 elem_size;
132 } __attribute__((preserve_access_index));
133 
134 struct {
135 	__uint(type, BPF_MAP_TYPE_ARRAY);
136 	__uint(max_entries, MAX_ENTRIES);
137 	__type(key, __u32);
138 	__type(value, __u32);
139 } m_array SEC(".maps");
140 
141 static inline int check_array(void)
142 {
143 	struct bpf_array *array = (struct bpf_array *)&m_array;
144 	struct bpf_map *map = (struct bpf_map *)&m_array;
145 	int i, n_lookups = 0, n_keys = 0;
146 
147 	VERIFY(check_default(&array->map, map));
148 
149 	VERIFY(array->elem_size == 8);
150 
151 	for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
152 		const __u32 key = i;
153 		__u32 *val = bpf_map_lookup_elem(array, &key);
154 
155 		++n_lookups;
156 		if (val)
157 			++n_keys;
158 	}
159 
160 	VERIFY(n_lookups == MAX_ENTRIES);
161 	VERIFY(n_keys == MAX_ENTRIES);
162 
163 	return 1;
164 }
165 
166 struct {
167 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
168 	__uint(max_entries, MAX_ENTRIES);
169 	__type(key, __u32);
170 	__type(value, __u32);
171 } m_prog_array SEC(".maps");
172 
173 static inline int check_prog_array(void)
174 {
175 	struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
176 	struct bpf_map *map = (struct bpf_map *)&m_prog_array;
177 
178 	VERIFY(check_default(&prog_array->map, map));
179 
180 	return 1;
181 }
182 
183 struct {
184 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
185 	__uint(max_entries, MAX_ENTRIES);
186 	__type(key, __u32);
187 	__type(value, __u32);
188 } m_perf_event_array SEC(".maps");
189 
190 static inline int check_perf_event_array(void)
191 {
192 	struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
193 	struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
194 
195 	VERIFY(check_default(&perf_event_array->map, map));
196 
197 	return 1;
198 }
199 
200 struct {
201 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
202 	__uint(max_entries, MAX_ENTRIES);
203 	__type(key, __u32);
204 	__type(value, __u32);
205 } m_percpu_hash SEC(".maps");
206 
207 static inline int check_percpu_hash(void)
208 {
209 	struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
210 	struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
211 
212 	VERIFY(check_default(&percpu_hash->map, map));
213 
214 	return 1;
215 }
216 
217 struct {
218 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
219 	__uint(max_entries, MAX_ENTRIES);
220 	__type(key, __u32);
221 	__type(value, __u32);
222 } m_percpu_array SEC(".maps");
223 
224 static inline int check_percpu_array(void)
225 {
226 	struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
227 	struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
228 
229 	VERIFY(check_default(&percpu_array->map, map));
230 
231 	return 1;
232 }
233 
234 struct bpf_stack_map {
235 	struct bpf_map map;
236 } __attribute__((preserve_access_index));
237 
238 struct {
239 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
240 	__uint(max_entries, MAX_ENTRIES);
241 	__type(key, __u32);
242 	__type(value, __u64);
243 } m_stack_trace SEC(".maps");
244 
245 static inline int check_stack_trace(void)
246 {
247 	struct bpf_stack_map *stack_trace =
248 		(struct bpf_stack_map *)&m_stack_trace;
249 	struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
250 
251 	VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
252 		     MAX_ENTRIES));
253 
254 	return 1;
255 }
256 
257 struct {
258 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
259 	__uint(max_entries, MAX_ENTRIES);
260 	__type(key, __u32);
261 	__type(value, __u32);
262 } m_cgroup_array SEC(".maps");
263 
264 static inline int check_cgroup_array(void)
265 {
266 	struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
267 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
268 
269 	VERIFY(check_default(&cgroup_array->map, map));
270 
271 	return 1;
272 }
273 
274 struct {
275 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
276 	__uint(max_entries, MAX_ENTRIES);
277 	__type(key, __u32);
278 	__type(value, __u32);
279 } m_lru_hash SEC(".maps");
280 
281 static inline int check_lru_hash(void)
282 {
283 	struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
284 	struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
285 
286 	VERIFY(check_default(&lru_hash->map, map));
287 
288 	return 1;
289 }
290 
291 struct {
292 	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
293 	__uint(max_entries, MAX_ENTRIES);
294 	__type(key, __u32);
295 	__type(value, __u32);
296 } m_lru_percpu_hash SEC(".maps");
297 
298 static inline int check_lru_percpu_hash(void)
299 {
300 	struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
301 	struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
302 
303 	VERIFY(check_default(&lru_percpu_hash->map, map));
304 
305 	return 1;
306 }
307 
308 struct lpm_trie {
309 	struct bpf_map map;
310 } __attribute__((preserve_access_index));
311 
312 struct lpm_key {
313 	struct bpf_lpm_trie_key trie_key;
314 	__u32 data;
315 };
316 
317 struct {
318 	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
319 	__uint(map_flags, BPF_F_NO_PREALLOC);
320 	__uint(max_entries, MAX_ENTRIES);
321 	__type(key, struct lpm_key);
322 	__type(value, __u32);
323 } m_lpm_trie SEC(".maps");
324 
325 static inline int check_lpm_trie(void)
326 {
327 	struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
328 	struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
329 
330 	VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
331 		     MAX_ENTRIES));
332 
333 	return 1;
334 }
335 
336 struct inner_map {
337 	__uint(type, BPF_MAP_TYPE_ARRAY);
338 	__uint(max_entries, 1);
339 	__type(key, __u32);
340 	__type(value, __u32);
341 } inner_map SEC(".maps");
342 
343 struct {
344 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
345 	__uint(max_entries, MAX_ENTRIES);
346 	__type(key, __u32);
347 	__type(value, __u32);
348 	__array(values, struct {
349 		__uint(type, BPF_MAP_TYPE_ARRAY);
350 		__uint(max_entries, 1);
351 		__type(key, __u32);
352 		__type(value, __u32);
353 	});
354 } m_array_of_maps SEC(".maps") = {
355 	.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
356 };
357 
358 static inline int check_array_of_maps(void)
359 {
360 	struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
361 	struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
362 
363 	VERIFY(check_default(&array_of_maps->map, map));
364 
365 	return 1;
366 }
367 
368 struct {
369 	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
370 	__uint(max_entries, MAX_ENTRIES);
371 	__type(key, __u32);
372 	__type(value, __u32);
373 	__array(values, struct inner_map);
374 } m_hash_of_maps SEC(".maps") = {
375 	.values = {
376 		[2] = &inner_map,
377 	},
378 };
379 
380 static inline int check_hash_of_maps(void)
381 {
382 	struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
383 	struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
384 
385 	VERIFY(check_default(&hash_of_maps->map, map));
386 
387 	return 1;
388 }
389 
390 struct bpf_dtab {
391 	struct bpf_map map;
392 } __attribute__((preserve_access_index));
393 
394 struct {
395 	__uint(type, BPF_MAP_TYPE_DEVMAP);
396 	__uint(max_entries, MAX_ENTRIES);
397 	__type(key, __u32);
398 	__type(value, __u32);
399 } m_devmap SEC(".maps");
400 
401 static inline int check_devmap(void)
402 {
403 	struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
404 	struct bpf_map *map = (struct bpf_map *)&m_devmap;
405 
406 	VERIFY(check_default(&devmap->map, map));
407 
408 	return 1;
409 }
410 
411 struct bpf_stab {
412 	struct bpf_map map;
413 } __attribute__((preserve_access_index));
414 
415 struct {
416 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
417 	__uint(max_entries, MAX_ENTRIES);
418 	__type(key, __u32);
419 	__type(value, __u32);
420 } m_sockmap SEC(".maps");
421 
422 static inline int check_sockmap(void)
423 {
424 	struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
425 	struct bpf_map *map = (struct bpf_map *)&m_sockmap;
426 
427 	VERIFY(check_default(&sockmap->map, map));
428 
429 	return 1;
430 }
431 
432 struct bpf_cpu_map {
433 	struct bpf_map map;
434 } __attribute__((preserve_access_index));
435 
436 struct {
437 	__uint(type, BPF_MAP_TYPE_CPUMAP);
438 	__uint(max_entries, MAX_ENTRIES);
439 	__type(key, __u32);
440 	__type(value, __u32);
441 } m_cpumap SEC(".maps");
442 
443 static inline int check_cpumap(void)
444 {
445 	struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
446 	struct bpf_map *map = (struct bpf_map *)&m_cpumap;
447 
448 	VERIFY(check_default(&cpumap->map, map));
449 
450 	return 1;
451 }
452 
453 struct xsk_map {
454 	struct bpf_map map;
455 } __attribute__((preserve_access_index));
456 
457 struct {
458 	__uint(type, BPF_MAP_TYPE_XSKMAP);
459 	__uint(max_entries, MAX_ENTRIES);
460 	__type(key, __u32);
461 	__type(value, __u32);
462 } m_xskmap SEC(".maps");
463 
464 static inline int check_xskmap(void)
465 {
466 	struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
467 	struct bpf_map *map = (struct bpf_map *)&m_xskmap;
468 
469 	VERIFY(check_default(&xskmap->map, map));
470 
471 	return 1;
472 }
473 
474 struct bpf_shtab {
475 	struct bpf_map map;
476 } __attribute__((preserve_access_index));
477 
478 struct {
479 	__uint(type, BPF_MAP_TYPE_SOCKHASH);
480 	__uint(max_entries, MAX_ENTRIES);
481 	__type(key, __u32);
482 	__type(value, __u32);
483 } m_sockhash SEC(".maps");
484 
485 static inline int check_sockhash(void)
486 {
487 	struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
488 	struct bpf_map *map = (struct bpf_map *)&m_sockhash;
489 
490 	VERIFY(check_default(&sockhash->map, map));
491 
492 	return 1;
493 }
494 
495 struct bpf_cgroup_storage_map {
496 	struct bpf_map map;
497 } __attribute__((preserve_access_index));
498 
499 struct {
500 	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
501 	__type(key, struct bpf_cgroup_storage_key);
502 	__type(value, __u32);
503 } m_cgroup_storage SEC(".maps");
504 
505 static inline int check_cgroup_storage(void)
506 {
507 	struct bpf_cgroup_storage_map *cgroup_storage =
508 		(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
509 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
510 
511 	VERIFY(check(&cgroup_storage->map, map,
512 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
513 
514 	return 1;
515 }
516 
517 struct reuseport_array {
518 	struct bpf_map map;
519 } __attribute__((preserve_access_index));
520 
521 struct {
522 	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
523 	__uint(max_entries, MAX_ENTRIES);
524 	__type(key, __u32);
525 	__type(value, __u32);
526 } m_reuseport_sockarray SEC(".maps");
527 
528 static inline int check_reuseport_sockarray(void)
529 {
530 	struct reuseport_array *reuseport_sockarray =
531 		(struct reuseport_array *)&m_reuseport_sockarray;
532 	struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
533 
534 	VERIFY(check_default(&reuseport_sockarray->map, map));
535 
536 	return 1;
537 }
538 
539 struct {
540 	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
541 	__type(key, struct bpf_cgroup_storage_key);
542 	__type(value, __u32);
543 } m_percpu_cgroup_storage SEC(".maps");
544 
545 static inline int check_percpu_cgroup_storage(void)
546 {
547 	struct bpf_cgroup_storage_map *percpu_cgroup_storage =
548 		(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
549 	struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
550 
551 	VERIFY(check(&percpu_cgroup_storage->map, map,
552 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
553 
554 	return 1;
555 }
556 
557 struct bpf_queue_stack {
558 	struct bpf_map map;
559 } __attribute__((preserve_access_index));
560 
561 struct {
562 	__uint(type, BPF_MAP_TYPE_QUEUE);
563 	__uint(max_entries, MAX_ENTRIES);
564 	__type(value, __u32);
565 } m_queue SEC(".maps");
566 
567 static inline int check_queue(void)
568 {
569 	struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
570 	struct bpf_map *map = (struct bpf_map *)&m_queue;
571 
572 	VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
573 
574 	return 1;
575 }
576 
577 struct {
578 	__uint(type, BPF_MAP_TYPE_STACK);
579 	__uint(max_entries, MAX_ENTRIES);
580 	__type(value, __u32);
581 } m_stack SEC(".maps");
582 
583 static inline int check_stack(void)
584 {
585 	struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
586 	struct bpf_map *map = (struct bpf_map *)&m_stack;
587 
588 	VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
589 
590 	return 1;
591 }
592 
593 struct bpf_local_storage_map {
594 	struct bpf_map map;
595 } __attribute__((preserve_access_index));
596 
597 struct {
598 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
599 	__uint(map_flags, BPF_F_NO_PREALLOC);
600 	__type(key, __u32);
601 	__type(value, __u32);
602 } m_sk_storage SEC(".maps");
603 
604 static inline int check_sk_storage(void)
605 {
606 	struct bpf_local_storage_map *sk_storage =
607 		(struct bpf_local_storage_map *)&m_sk_storage;
608 	struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
609 
610 	VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
611 
612 	return 1;
613 }
614 
615 struct {
616 	__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
617 	__uint(max_entries, MAX_ENTRIES);
618 	__type(key, __u32);
619 	__type(value, __u32);
620 } m_devmap_hash SEC(".maps");
621 
622 static inline int check_devmap_hash(void)
623 {
624 	struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
625 	struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
626 
627 	VERIFY(check_default(&devmap_hash->map, map));
628 
629 	return 1;
630 }
631 
632 struct bpf_ringbuf_map {
633 	struct bpf_map map;
634 } __attribute__((preserve_access_index));
635 
636 struct {
637 	__uint(type, BPF_MAP_TYPE_RINGBUF);
638 	__uint(max_entries, 1 << 12);
639 } m_ringbuf SEC(".maps");
640 
641 static inline int check_ringbuf(void)
642 {
643 	struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
644 	struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
645 
646 	VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
647 
648 	return 1;
649 }
650 
651 SEC("cgroup_skb/egress")
652 int cg_skb(void *ctx)
653 {
654 	VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
655 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
656 	VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
657 	VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
658 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
659 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
660 	VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
661 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
662 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
663 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
664 	VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
665 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
666 	VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
667 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
668 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
669 	VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
670 	VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
671 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
672 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
673 	VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
674 		    check_reuseport_sockarray);
675 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
676 		    check_percpu_cgroup_storage);
677 	VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
678 	VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
679 	VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
680 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
681 	VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
682 
683 	return 1;
684 }
685 
686 __u32 _version SEC("version") = 1;
687 char _license[] SEC("license") = "GPL";
688