1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "for_each_hash_map_elem.skel.h"
6 #include "for_each_array_map_elem.skel.h"
7 
8 static unsigned int duration;
9 
10 static void test_hash_map(void)
11 {
12 	int i, err, hashmap_fd, max_entries, percpu_map_fd;
13 	struct for_each_hash_map_elem *skel;
14 	__u64 *percpu_valbuf = NULL;
15 	__u32 key, num_cpus;
16 	__u64 val;
17 	LIBBPF_OPTS(bpf_test_run_opts, topts,
18 		.data_in = &pkt_v4,
19 		.data_size_in = sizeof(pkt_v4),
20 		.repeat = 1,
21 	);
22 
23 	skel = for_each_hash_map_elem__open_and_load();
24 	if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
25 		return;
26 
27 	hashmap_fd = bpf_map__fd(skel->maps.hashmap);
28 	max_entries = bpf_map__max_entries(skel->maps.hashmap);
29 	for (i = 0; i < max_entries; i++) {
30 		key = i;
31 		val = i + 1;
32 		err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
33 		if (!ASSERT_OK(err, "map_update"))
34 			goto out;
35 	}
36 
37 	num_cpus = bpf_num_possible_cpus();
38 	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
39 	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
40 	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
41 		goto out;
42 
43 	key = 1;
44 	for (i = 0; i < num_cpus; i++)
45 		percpu_valbuf[i] = i + 1;
46 	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
47 	if (!ASSERT_OK(err, "percpu_map_update"))
48 		goto out;
49 
50 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
51 	duration = topts.duration;
52 	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
53 		  err, errno, topts.retval))
54 		goto out;
55 
56 	ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
57 	ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
58 
59 	key = 1;
60 	err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
61 	ASSERT_ERR(err, "hashmap_lookup");
62 
63 	ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
64 	ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
65 	ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
66 	ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
67 	ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
68 	ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
69 out:
70 	free(percpu_valbuf);
71 	for_each_hash_map_elem__destroy(skel);
72 }
73 
74 static void test_array_map(void)
75 {
76 	__u32 key, num_cpus, max_entries;
77 	int i, arraymap_fd, percpu_map_fd, err;
78 	struct for_each_array_map_elem *skel;
79 	__u64 *percpu_valbuf = NULL;
80 	__u64 val, expected_total;
81 	LIBBPF_OPTS(bpf_test_run_opts, topts,
82 		.data_in = &pkt_v4,
83 		.data_size_in = sizeof(pkt_v4),
84 		.repeat = 1,
85 	);
86 
87 	skel = for_each_array_map_elem__open_and_load();
88 	if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
89 		return;
90 
91 	arraymap_fd = bpf_map__fd(skel->maps.arraymap);
92 	expected_total = 0;
93 	max_entries = bpf_map__max_entries(skel->maps.arraymap);
94 	for (i = 0; i < max_entries; i++) {
95 		key = i;
96 		val = i + 1;
97 		/* skip the last iteration for expected total */
98 		if (i != max_entries - 1)
99 			expected_total += val;
100 		err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
101 		if (!ASSERT_OK(err, "map_update"))
102 			goto out;
103 	}
104 
105 	num_cpus = bpf_num_possible_cpus();
106 	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
107 	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
108 	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
109 		goto out;
110 
111 	key = 0;
112 	for (i = 0; i < num_cpus; i++)
113 		percpu_valbuf[i] = i + 1;
114 	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
115 	if (!ASSERT_OK(err, "percpu_map_update"))
116 		goto out;
117 
118 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
119 	duration = topts.duration;
120 	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
121 		  err, errno, topts.retval))
122 		goto out;
123 
124 	ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
125 	ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
126 
127 out:
128 	free(percpu_valbuf);
129 	for_each_array_map_elem__destroy(skel);
130 }
131 
132 void test_for_each(void)
133 {
134 	if (test__start_subtest("hash_map"))
135 		test_hash_map();
136 	if (test__start_subtest("array_map"))
137 		test_array_map();
138 }
139