1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 
4 #include <linux/bpf.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #define LOOP_BOUND 0xf
8 #define MAX_ENTRIES 8
9 #define HALF_ENTRIES (MAX_ENTRIES >> 1)
10 
11 _Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12 
13 enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14 __u32 g_line = 0;
15 int page_size = 0; /* userspace should set it */
16 
17 #define VERIFY_TYPE(type, func) ({	\
18 	g_map_type = type;		\
19 	if (!func())			\
20 		return 0;		\
21 })
22 
23 
24 #define VERIFY(expr) ({		\
25 	g_line = __LINE__;	\
26 	if (!(expr))		\
27 		return 0;	\
28 })
29 
30 struct bpf_map {
31 	enum bpf_map_type map_type;
32 	__u32 key_size;
33 	__u32 value_size;
34 	__u32 max_entries;
35 	__u32 id;
36 } __attribute__((preserve_access_index));
37 
38 static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
39 				       __u32 value_size, __u32 max_entries)
40 {
41 	VERIFY(map->map_type == g_map_type);
42 	VERIFY(map->key_size == key_size);
43 	VERIFY(map->value_size == value_size);
44 	VERIFY(map->max_entries == max_entries);
45 	VERIFY(map->id > 0);
46 
47 	return 1;
48 }
49 
50 static inline int check_bpf_map_ptr(struct bpf_map *indirect,
51 				    struct bpf_map *direct)
52 {
53 	VERIFY(indirect->map_type == direct->map_type);
54 	VERIFY(indirect->key_size == direct->key_size);
55 	VERIFY(indirect->value_size == direct->value_size);
56 	VERIFY(indirect->max_entries == direct->max_entries);
57 	VERIFY(indirect->id == direct->id);
58 
59 	return 1;
60 }
61 
62 static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
63 			__u32 key_size, __u32 value_size, __u32 max_entries)
64 {
65 	VERIFY(check_bpf_map_ptr(indirect, direct));
66 	VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
67 				    max_entries));
68 	return 1;
69 }
70 
71 static inline int check_default(struct bpf_map *indirect,
72 				struct bpf_map *direct)
73 {
74 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
75 		     MAX_ENTRIES));
76 	return 1;
77 }
78 
79 static __noinline int
80 check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
81 {
82 	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
83 		     MAX_ENTRIES));
84 	return 1;
85 }
86 
87 typedef struct {
88 	int counter;
89 } atomic_t;
90 
91 struct bpf_htab {
92 	struct bpf_map map;
93 	atomic_t count;
94 	__u32 n_buckets;
95 	__u32 elem_size;
96 } __attribute__((preserve_access_index));
97 
98 struct {
99 	__uint(type, BPF_MAP_TYPE_HASH);
100 	__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101 	__uint(max_entries, MAX_ENTRIES);
102 	__type(key, __u32);
103 	__type(value, __u32);
104 } m_hash SEC(".maps");
105 
106 static inline int check_hash(void)
107 {
108 	struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
109 	struct bpf_map *map = (struct bpf_map *)&m_hash;
110 	int i;
111 
112 	VERIFY(check_default_noinline(&hash->map, map));
113 
114 	VERIFY(hash->n_buckets == MAX_ENTRIES);
115 	VERIFY(hash->elem_size == 64);
116 
117 	VERIFY(hash->count.counter == 0);
118 	for (i = 0; i < HALF_ENTRIES; ++i) {
119 		const __u32 key = i;
120 		const __u32 val = 1;
121 
122 		if (bpf_map_update_elem(hash, &key, &val, 0))
123 			return 0;
124 	}
125 	VERIFY(hash->count.counter == HALF_ENTRIES);
126 
127 	return 1;
128 }
129 
130 struct bpf_array {
131 	struct bpf_map map;
132 	__u32 elem_size;
133 } __attribute__((preserve_access_index));
134 
135 struct {
136 	__uint(type, BPF_MAP_TYPE_ARRAY);
137 	__uint(max_entries, MAX_ENTRIES);
138 	__type(key, __u32);
139 	__type(value, __u32);
140 } m_array SEC(".maps");
141 
142 static inline int check_array(void)
143 {
144 	struct bpf_array *array = (struct bpf_array *)&m_array;
145 	struct bpf_map *map = (struct bpf_map *)&m_array;
146 	int i, n_lookups = 0, n_keys = 0;
147 
148 	VERIFY(check_default(&array->map, map));
149 
150 	VERIFY(array->elem_size == 8);
151 
152 	for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
153 		const __u32 key = i;
154 		__u32 *val = bpf_map_lookup_elem(array, &key);
155 
156 		++n_lookups;
157 		if (val)
158 			++n_keys;
159 	}
160 
161 	VERIFY(n_lookups == MAX_ENTRIES);
162 	VERIFY(n_keys == MAX_ENTRIES);
163 
164 	return 1;
165 }
166 
167 struct {
168 	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
169 	__uint(max_entries, MAX_ENTRIES);
170 	__type(key, __u32);
171 	__type(value, __u32);
172 } m_prog_array SEC(".maps");
173 
174 static inline int check_prog_array(void)
175 {
176 	struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
177 	struct bpf_map *map = (struct bpf_map *)&m_prog_array;
178 
179 	VERIFY(check_default(&prog_array->map, map));
180 
181 	return 1;
182 }
183 
184 struct {
185 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
186 	__uint(max_entries, MAX_ENTRIES);
187 	__type(key, __u32);
188 	__type(value, __u32);
189 } m_perf_event_array SEC(".maps");
190 
191 static inline int check_perf_event_array(void)
192 {
193 	struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
194 	struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
195 
196 	VERIFY(check_default(&perf_event_array->map, map));
197 
198 	return 1;
199 }
200 
201 struct {
202 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
203 	__uint(max_entries, MAX_ENTRIES);
204 	__type(key, __u32);
205 	__type(value, __u32);
206 } m_percpu_hash SEC(".maps");
207 
208 static inline int check_percpu_hash(void)
209 {
210 	struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
211 	struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
212 
213 	VERIFY(check_default(&percpu_hash->map, map));
214 
215 	return 1;
216 }
217 
218 struct {
219 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
220 	__uint(max_entries, MAX_ENTRIES);
221 	__type(key, __u32);
222 	__type(value, __u32);
223 } m_percpu_array SEC(".maps");
224 
225 static inline int check_percpu_array(void)
226 {
227 	struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
228 	struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
229 
230 	VERIFY(check_default(&percpu_array->map, map));
231 
232 	return 1;
233 }
234 
235 struct bpf_stack_map {
236 	struct bpf_map map;
237 } __attribute__((preserve_access_index));
238 
239 struct {
240 	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
241 	__uint(max_entries, MAX_ENTRIES);
242 	__type(key, __u32);
243 	__type(value, __u64);
244 } m_stack_trace SEC(".maps");
245 
246 static inline int check_stack_trace(void)
247 {
248 	struct bpf_stack_map *stack_trace =
249 		(struct bpf_stack_map *)&m_stack_trace;
250 	struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
251 
252 	VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
253 		     MAX_ENTRIES));
254 
255 	return 1;
256 }
257 
258 struct {
259 	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
260 	__uint(max_entries, MAX_ENTRIES);
261 	__type(key, __u32);
262 	__type(value, __u32);
263 } m_cgroup_array SEC(".maps");
264 
265 static inline int check_cgroup_array(void)
266 {
267 	struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
268 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
269 
270 	VERIFY(check_default(&cgroup_array->map, map));
271 
272 	return 1;
273 }
274 
275 struct {
276 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
277 	__uint(max_entries, MAX_ENTRIES);
278 	__type(key, __u32);
279 	__type(value, __u32);
280 } m_lru_hash SEC(".maps");
281 
282 static inline int check_lru_hash(void)
283 {
284 	struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
285 	struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
286 
287 	VERIFY(check_default(&lru_hash->map, map));
288 
289 	return 1;
290 }
291 
292 struct {
293 	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
294 	__uint(max_entries, MAX_ENTRIES);
295 	__type(key, __u32);
296 	__type(value, __u32);
297 } m_lru_percpu_hash SEC(".maps");
298 
299 static inline int check_lru_percpu_hash(void)
300 {
301 	struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
302 	struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
303 
304 	VERIFY(check_default(&lru_percpu_hash->map, map));
305 
306 	return 1;
307 }
308 
309 struct lpm_trie {
310 	struct bpf_map map;
311 } __attribute__((preserve_access_index));
312 
313 struct lpm_key {
314 	struct bpf_lpm_trie_key trie_key;
315 	__u32 data;
316 };
317 
318 struct {
319 	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
320 	__uint(map_flags, BPF_F_NO_PREALLOC);
321 	__uint(max_entries, MAX_ENTRIES);
322 	__type(key, struct lpm_key);
323 	__type(value, __u32);
324 } m_lpm_trie SEC(".maps");
325 
326 static inline int check_lpm_trie(void)
327 {
328 	struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
329 	struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
330 
331 	VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
332 		     MAX_ENTRIES));
333 
334 	return 1;
335 }
336 
337 struct inner_map {
338 	__uint(type, BPF_MAP_TYPE_ARRAY);
339 	__uint(max_entries, 1);
340 	__type(key, __u32);
341 	__type(value, __u32);
342 } inner_map SEC(".maps");
343 
344 struct {
345 	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
346 	__uint(max_entries, MAX_ENTRIES);
347 	__type(key, __u32);
348 	__type(value, __u32);
349 	__array(values, struct {
350 		__uint(type, BPF_MAP_TYPE_ARRAY);
351 		__uint(max_entries, 1);
352 		__type(key, __u32);
353 		__type(value, __u32);
354 	});
355 } m_array_of_maps SEC(".maps") = {
356 	.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
357 };
358 
359 static inline int check_array_of_maps(void)
360 {
361 	struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
362 	struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
363 
364 	VERIFY(check_default(&array_of_maps->map, map));
365 
366 	return 1;
367 }
368 
369 struct {
370 	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
371 	__uint(max_entries, MAX_ENTRIES);
372 	__type(key, __u32);
373 	__type(value, __u32);
374 	__array(values, struct inner_map);
375 } m_hash_of_maps SEC(".maps") = {
376 	.values = {
377 		[2] = &inner_map,
378 	},
379 };
380 
381 static inline int check_hash_of_maps(void)
382 {
383 	struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
384 	struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
385 
386 	VERIFY(check_default(&hash_of_maps->map, map));
387 
388 	return 1;
389 }
390 
391 struct bpf_dtab {
392 	struct bpf_map map;
393 } __attribute__((preserve_access_index));
394 
395 struct {
396 	__uint(type, BPF_MAP_TYPE_DEVMAP);
397 	__uint(max_entries, MAX_ENTRIES);
398 	__type(key, __u32);
399 	__type(value, __u32);
400 } m_devmap SEC(".maps");
401 
402 static inline int check_devmap(void)
403 {
404 	struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
405 	struct bpf_map *map = (struct bpf_map *)&m_devmap;
406 
407 	VERIFY(check_default(&devmap->map, map));
408 
409 	return 1;
410 }
411 
412 struct bpf_stab {
413 	struct bpf_map map;
414 } __attribute__((preserve_access_index));
415 
416 struct {
417 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
418 	__uint(max_entries, MAX_ENTRIES);
419 	__type(key, __u32);
420 	__type(value, __u32);
421 } m_sockmap SEC(".maps");
422 
423 static inline int check_sockmap(void)
424 {
425 	struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
426 	struct bpf_map *map = (struct bpf_map *)&m_sockmap;
427 
428 	VERIFY(check_default(&sockmap->map, map));
429 
430 	return 1;
431 }
432 
433 struct bpf_cpu_map {
434 	struct bpf_map map;
435 } __attribute__((preserve_access_index));
436 
437 struct {
438 	__uint(type, BPF_MAP_TYPE_CPUMAP);
439 	__uint(max_entries, MAX_ENTRIES);
440 	__type(key, __u32);
441 	__type(value, __u32);
442 } m_cpumap SEC(".maps");
443 
444 static inline int check_cpumap(void)
445 {
446 	struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
447 	struct bpf_map *map = (struct bpf_map *)&m_cpumap;
448 
449 	VERIFY(check_default(&cpumap->map, map));
450 
451 	return 1;
452 }
453 
454 struct xsk_map {
455 	struct bpf_map map;
456 } __attribute__((preserve_access_index));
457 
458 struct {
459 	__uint(type, BPF_MAP_TYPE_XSKMAP);
460 	__uint(max_entries, MAX_ENTRIES);
461 	__type(key, __u32);
462 	__type(value, __u32);
463 } m_xskmap SEC(".maps");
464 
465 static inline int check_xskmap(void)
466 {
467 	struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
468 	struct bpf_map *map = (struct bpf_map *)&m_xskmap;
469 
470 	VERIFY(check_default(&xskmap->map, map));
471 
472 	return 1;
473 }
474 
475 struct bpf_shtab {
476 	struct bpf_map map;
477 } __attribute__((preserve_access_index));
478 
479 struct {
480 	__uint(type, BPF_MAP_TYPE_SOCKHASH);
481 	__uint(max_entries, MAX_ENTRIES);
482 	__type(key, __u32);
483 	__type(value, __u32);
484 } m_sockhash SEC(".maps");
485 
486 static inline int check_sockhash(void)
487 {
488 	struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
489 	struct bpf_map *map = (struct bpf_map *)&m_sockhash;
490 
491 	VERIFY(check_default(&sockhash->map, map));
492 
493 	return 1;
494 }
495 
496 struct bpf_cgroup_storage_map {
497 	struct bpf_map map;
498 } __attribute__((preserve_access_index));
499 
500 struct {
501 	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
502 	__type(key, struct bpf_cgroup_storage_key);
503 	__type(value, __u32);
504 } m_cgroup_storage SEC(".maps");
505 
506 static inline int check_cgroup_storage(void)
507 {
508 	struct bpf_cgroup_storage_map *cgroup_storage =
509 		(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
510 	struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
511 
512 	VERIFY(check(&cgroup_storage->map, map,
513 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
514 
515 	return 1;
516 }
517 
518 struct reuseport_array {
519 	struct bpf_map map;
520 } __attribute__((preserve_access_index));
521 
522 struct {
523 	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
524 	__uint(max_entries, MAX_ENTRIES);
525 	__type(key, __u32);
526 	__type(value, __u32);
527 } m_reuseport_sockarray SEC(".maps");
528 
529 static inline int check_reuseport_sockarray(void)
530 {
531 	struct reuseport_array *reuseport_sockarray =
532 		(struct reuseport_array *)&m_reuseport_sockarray;
533 	struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
534 
535 	VERIFY(check_default(&reuseport_sockarray->map, map));
536 
537 	return 1;
538 }
539 
540 struct {
541 	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
542 	__type(key, struct bpf_cgroup_storage_key);
543 	__type(value, __u32);
544 } m_percpu_cgroup_storage SEC(".maps");
545 
546 static inline int check_percpu_cgroup_storage(void)
547 {
548 	struct bpf_cgroup_storage_map *percpu_cgroup_storage =
549 		(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
550 	struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
551 
552 	VERIFY(check(&percpu_cgroup_storage->map, map,
553 		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
554 
555 	return 1;
556 }
557 
558 struct bpf_queue_stack {
559 	struct bpf_map map;
560 } __attribute__((preserve_access_index));
561 
562 struct {
563 	__uint(type, BPF_MAP_TYPE_QUEUE);
564 	__uint(max_entries, MAX_ENTRIES);
565 	__type(value, __u32);
566 } m_queue SEC(".maps");
567 
568 static inline int check_queue(void)
569 {
570 	struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
571 	struct bpf_map *map = (struct bpf_map *)&m_queue;
572 
573 	VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
574 
575 	return 1;
576 }
577 
578 struct {
579 	__uint(type, BPF_MAP_TYPE_STACK);
580 	__uint(max_entries, MAX_ENTRIES);
581 	__type(value, __u32);
582 } m_stack SEC(".maps");
583 
584 static inline int check_stack(void)
585 {
586 	struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
587 	struct bpf_map *map = (struct bpf_map *)&m_stack;
588 
589 	VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
590 
591 	return 1;
592 }
593 
594 struct bpf_local_storage_map {
595 	struct bpf_map map;
596 } __attribute__((preserve_access_index));
597 
598 struct {
599 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
600 	__uint(map_flags, BPF_F_NO_PREALLOC);
601 	__type(key, __u32);
602 	__type(value, __u32);
603 } m_sk_storage SEC(".maps");
604 
605 static inline int check_sk_storage(void)
606 {
607 	struct bpf_local_storage_map *sk_storage =
608 		(struct bpf_local_storage_map *)&m_sk_storage;
609 	struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
610 
611 	VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
612 
613 	return 1;
614 }
615 
616 struct {
617 	__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
618 	__uint(max_entries, MAX_ENTRIES);
619 	__type(key, __u32);
620 	__type(value, __u32);
621 } m_devmap_hash SEC(".maps");
622 
623 static inline int check_devmap_hash(void)
624 {
625 	struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
626 	struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
627 
628 	VERIFY(check_default(&devmap_hash->map, map));
629 
630 	return 1;
631 }
632 
633 struct bpf_ringbuf_map {
634 	struct bpf_map map;
635 } __attribute__((preserve_access_index));
636 
637 struct {
638 	__uint(type, BPF_MAP_TYPE_RINGBUF);
639 } m_ringbuf SEC(".maps");
640 
641 static inline int check_ringbuf(void)
642 {
643 	struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
644 	struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
645 
646 	VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
647 
648 	return 1;
649 }
650 
651 SEC("cgroup_skb/egress")
652 int cg_skb(void *ctx)
653 {
654 	VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
655 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
656 	VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
657 	VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
658 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
659 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
660 	VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
661 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
662 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
663 	VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
664 	VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
665 	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
666 	VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
667 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
668 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
669 	VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
670 	VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
671 	VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
672 	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
673 	VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
674 		    check_reuseport_sockarray);
675 	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
676 		    check_percpu_cgroup_storage);
677 	VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
678 	VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
679 	VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
680 	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
681 	VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
682 
683 	return 1;
684 }
685 
686 __u32 _version SEC("version") = 1;
687 char _license[] SEC("license") = "GPL";
688