1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "for_each_hash_map_elem.skel.h"
6 #include "for_each_array_map_elem.skel.h"
7 #include "for_each_map_elem_write_key.skel.h"
8
9 static unsigned int duration;
10
test_hash_map(void)11 static void test_hash_map(void)
12 {
13 int i, err, max_entries;
14 struct for_each_hash_map_elem *skel;
15 __u64 *percpu_valbuf = NULL;
16 size_t percpu_val_sz;
17 __u32 key, num_cpus;
18 __u64 val;
19 LIBBPF_OPTS(bpf_test_run_opts, topts,
20 .data_in = &pkt_v4,
21 .data_size_in = sizeof(pkt_v4),
22 .repeat = 1,
23 );
24
25 skel = for_each_hash_map_elem__open_and_load();
26 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
27 return;
28
29 max_entries = bpf_map__max_entries(skel->maps.hashmap);
30 for (i = 0; i < max_entries; i++) {
31 key = i;
32 val = i + 1;
33 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
34 &val, sizeof(val), BPF_ANY);
35 if (!ASSERT_OK(err, "map_update"))
36 goto out;
37 }
38
39 num_cpus = bpf_num_possible_cpus();
40 percpu_val_sz = sizeof(__u64) * num_cpus;
41 percpu_valbuf = malloc(percpu_val_sz);
42 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
43 goto out;
44
45 key = 1;
46 for (i = 0; i < num_cpus; i++)
47 percpu_valbuf[i] = i + 1;
48 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
49 percpu_valbuf, percpu_val_sz, BPF_ANY);
50 if (!ASSERT_OK(err, "percpu_map_update"))
51 goto out;
52
53 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
54 duration = topts.duration;
55 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
56 err, errno, topts.retval))
57 goto out;
58
59 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
60 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
61
62 key = 1;
63 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
64 ASSERT_ERR(err, "hashmap_lookup");
65
66 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
67 ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
68 ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
69 ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
70 ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
71 ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
72 out:
73 free(percpu_valbuf);
74 for_each_hash_map_elem__destroy(skel);
75 }
76
test_array_map(void)77 static void test_array_map(void)
78 {
79 __u32 key, num_cpus, max_entries;
80 int i, err;
81 struct for_each_array_map_elem *skel;
82 __u64 *percpu_valbuf = NULL;
83 size_t percpu_val_sz;
84 __u64 val, expected_total;
85 LIBBPF_OPTS(bpf_test_run_opts, topts,
86 .data_in = &pkt_v4,
87 .data_size_in = sizeof(pkt_v4),
88 .repeat = 1,
89 );
90
91 skel = for_each_array_map_elem__open_and_load();
92 if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
93 return;
94
95 expected_total = 0;
96 max_entries = bpf_map__max_entries(skel->maps.arraymap);
97 for (i = 0; i < max_entries; i++) {
98 key = i;
99 val = i + 1;
100 /* skip the last iteration for expected total */
101 if (i != max_entries - 1)
102 expected_total += val;
103 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
104 &val, sizeof(val), BPF_ANY);
105 if (!ASSERT_OK(err, "map_update"))
106 goto out;
107 }
108
109 num_cpus = bpf_num_possible_cpus();
110 percpu_val_sz = sizeof(__u64) * num_cpus;
111 percpu_valbuf = malloc(percpu_val_sz);
112 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
113 goto out;
114
115 key = 0;
116 for (i = 0; i < num_cpus; i++)
117 percpu_valbuf[i] = i + 1;
118 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
119 percpu_valbuf, percpu_val_sz, BPF_ANY);
120 if (!ASSERT_OK(err, "percpu_map_update"))
121 goto out;
122
123 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
124 duration = topts.duration;
125 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
126 err, errno, topts.retval))
127 goto out;
128
129 ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
130 ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
131
132 out:
133 free(percpu_valbuf);
134 for_each_array_map_elem__destroy(skel);
135 }
136
test_write_map_key(void)137 static void test_write_map_key(void)
138 {
139 struct for_each_map_elem_write_key *skel;
140
141 skel = for_each_map_elem_write_key__open_and_load();
142 if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
143 for_each_map_elem_write_key__destroy(skel);
144 }
145
test_for_each(void)146 void test_for_each(void)
147 {
148 if (test__start_subtest("hash_map"))
149 test_hash_map();
150 if (test__start_subtest("array_map"))
151 test_array_map();
152 if (test__start_subtest("write_map_key"))
153 test_write_map_key();
154 }
155