1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021 Facebook */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include "for_each_hash_map_elem.skel.h"
6 #include "for_each_array_map_elem.skel.h"
7 #include "for_each_map_elem_write_key.skel.h"
8 
9 static unsigned int duration;
10 
11 static void test_hash_map(void)
12 {
13 	int i, err, hashmap_fd, max_entries, percpu_map_fd;
14 	struct for_each_hash_map_elem *skel;
15 	__u64 *percpu_valbuf = NULL;
16 	__u32 key, num_cpus;
17 	__u64 val;
18 	LIBBPF_OPTS(bpf_test_run_opts, topts,
19 		.data_in = &pkt_v4,
20 		.data_size_in = sizeof(pkt_v4),
21 		.repeat = 1,
22 	);
23 
24 	skel = for_each_hash_map_elem__open_and_load();
25 	if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
26 		return;
27 
28 	hashmap_fd = bpf_map__fd(skel->maps.hashmap);
29 	max_entries = bpf_map__max_entries(skel->maps.hashmap);
30 	for (i = 0; i < max_entries; i++) {
31 		key = i;
32 		val = i + 1;
33 		err = bpf_map_update_elem(hashmap_fd, &key, &val, BPF_ANY);
34 		if (!ASSERT_OK(err, "map_update"))
35 			goto out;
36 	}
37 
38 	num_cpus = bpf_num_possible_cpus();
39 	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
40 	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
41 	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
42 		goto out;
43 
44 	key = 1;
45 	for (i = 0; i < num_cpus; i++)
46 		percpu_valbuf[i] = i + 1;
47 	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
48 	if (!ASSERT_OK(err, "percpu_map_update"))
49 		goto out;
50 
51 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
52 	duration = topts.duration;
53 	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
54 		  err, errno, topts.retval))
55 		goto out;
56 
57 	ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
58 	ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
59 
60 	key = 1;
61 	err = bpf_map_lookup_elem(hashmap_fd, &key, &val);
62 	ASSERT_ERR(err, "hashmap_lookup");
63 
64 	ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
65 	ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
66 	ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
67 	ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
68 	ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
69 	ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
70 out:
71 	free(percpu_valbuf);
72 	for_each_hash_map_elem__destroy(skel);
73 }
74 
75 static void test_array_map(void)
76 {
77 	__u32 key, num_cpus, max_entries;
78 	int i, arraymap_fd, percpu_map_fd, err;
79 	struct for_each_array_map_elem *skel;
80 	__u64 *percpu_valbuf = NULL;
81 	__u64 val, expected_total;
82 	LIBBPF_OPTS(bpf_test_run_opts, topts,
83 		.data_in = &pkt_v4,
84 		.data_size_in = sizeof(pkt_v4),
85 		.repeat = 1,
86 	);
87 
88 	skel = for_each_array_map_elem__open_and_load();
89 	if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
90 		return;
91 
92 	arraymap_fd = bpf_map__fd(skel->maps.arraymap);
93 	expected_total = 0;
94 	max_entries = bpf_map__max_entries(skel->maps.arraymap);
95 	for (i = 0; i < max_entries; i++) {
96 		key = i;
97 		val = i + 1;
98 		/* skip the last iteration for expected total */
99 		if (i != max_entries - 1)
100 			expected_total += val;
101 		err = bpf_map_update_elem(arraymap_fd, &key, &val, BPF_ANY);
102 		if (!ASSERT_OK(err, "map_update"))
103 			goto out;
104 	}
105 
106 	num_cpus = bpf_num_possible_cpus();
107 	percpu_map_fd = bpf_map__fd(skel->maps.percpu_map);
108 	percpu_valbuf = malloc(sizeof(__u64) * num_cpus);
109 	if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
110 		goto out;
111 
112 	key = 0;
113 	for (i = 0; i < num_cpus; i++)
114 		percpu_valbuf[i] = i + 1;
115 	err = bpf_map_update_elem(percpu_map_fd, &key, percpu_valbuf, BPF_ANY);
116 	if (!ASSERT_OK(err, "percpu_map_update"))
117 		goto out;
118 
119 	err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
120 	duration = topts.duration;
121 	if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
122 		  err, errno, topts.retval))
123 		goto out;
124 
125 	ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
126 	ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
127 
128 out:
129 	free(percpu_valbuf);
130 	for_each_array_map_elem__destroy(skel);
131 }
132 
133 static void test_write_map_key(void)
134 {
135 	struct for_each_map_elem_write_key *skel;
136 
137 	skel = for_each_map_elem_write_key__open_and_load();
138 	if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
139 		for_each_map_elem_write_key__destroy(skel);
140 }
141 
142 void test_for_each(void)
143 {
144 	if (test__start_subtest("hash_map"))
145 		test_hash_map();
146 	if (test__start_subtest("array_map"))
147 		test_array_map();
148 	if (test__start_subtest("write_map_key"))
149 		test_write_map_key();
150 }
151