1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <test_progs.h>
4 #include "test_lookup_and_delete.skel.h"
5 
6 #define START_VALUE 1234
7 #define NEW_VALUE 4321
8 #define MAX_ENTRIES 2
9 
10 static int duration;
11 static int nr_cpus;
12 
13 static int fill_values(int map_fd)
14 {
15 	__u64 key, value = START_VALUE;
16 	int err;
17 
18 	for (key = 1; key < MAX_ENTRIES + 1; key++) {
19 		err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
20 		if (!ASSERT_OK(err, "bpf_map_update_elem"))
21 			return -1;
22 	}
23 
24 	return 0;
25 }
26 
27 static int fill_values_percpu(int map_fd)
28 {
29 	__u64 key, value[nr_cpus];
30 	int i, err;
31 
32 	for (i = 0; i < nr_cpus; i++)
33 		value[i] = START_VALUE;
34 
35 	for (key = 1; key < MAX_ENTRIES + 1; key++) {
36 		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
37 		if (!ASSERT_OK(err, "bpf_map_update_elem"))
38 			return -1;
39 	}
40 
41 	return 0;
42 }
43 
44 static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
45 						 int *map_fd)
46 {
47 	struct test_lookup_and_delete *skel;
48 	int err;
49 
50 	skel = test_lookup_and_delete__open();
51 	if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
52 		return NULL;
53 
54 	err = bpf_map__set_type(skel->maps.hash_map, map_type);
55 	if (!ASSERT_OK(err, "bpf_map__set_type"))
56 		goto cleanup;
57 
58 	err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
59 	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
60 		goto cleanup;
61 
62 	err = test_lookup_and_delete__load(skel);
63 	if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
64 		goto cleanup;
65 
66 	*map_fd = bpf_map__fd(skel->maps.hash_map);
67 	if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
68 		goto cleanup;
69 
70 	return skel;
71 
72 cleanup:
73 	test_lookup_and_delete__destroy(skel);
74 	return NULL;
75 }
76 
77 /* Triggers BPF program that updates map with given key and value */
78 static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
79 		      __u64 value)
80 {
81 	int err;
82 
83 	skel->bss->set_pid = getpid();
84 	skel->bss->set_key = key;
85 	skel->bss->set_value = value;
86 
87 	err = test_lookup_and_delete__attach(skel);
88 	if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
89 		return -1;
90 
91 	syscall(__NR_getpgid);
92 
93 	test_lookup_and_delete__detach(skel);
94 
95 	return 0;
96 }
97 
98 static void test_lookup_and_delete_hash(void)
99 {
100 	struct test_lookup_and_delete *skel;
101 	__u64 key, value;
102 	int map_fd, err;
103 
104 	/* Setup program and fill the map. */
105 	skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
106 	if (!ASSERT_OK_PTR(skel, "setup_prog"))
107 		return;
108 
109 	err = fill_values(map_fd);
110 	if (!ASSERT_OK(err, "fill_values"))
111 		goto cleanup;
112 
113 	/* Lookup and delete element. */
114 	key = 1;
115 	err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
116 	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
117 		goto cleanup;
118 
119 	/* Fetched value should match the initially set value. */
120 	if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
121 		  "unexpected value=%lld\n", value))
122 		goto cleanup;
123 
124 	/* Check that the entry is non existent. */
125 	err = bpf_map_lookup_elem(map_fd, &key, &value);
126 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
127 		goto cleanup;
128 
129 cleanup:
130 	test_lookup_and_delete__destroy(skel);
131 }
132 
133 static void test_lookup_and_delete_percpu_hash(void)
134 {
135 	struct test_lookup_and_delete *skel;
136 	__u64 key, val, value[nr_cpus];
137 	int map_fd, err, i;
138 
139 	/* Setup program and fill the map. */
140 	skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
141 	if (!ASSERT_OK_PTR(skel, "setup_prog"))
142 		return;
143 
144 	err = fill_values_percpu(map_fd);
145 	if (!ASSERT_OK(err, "fill_values_percpu"))
146 		goto cleanup;
147 
148 	/* Lookup and delete element. */
149 	key = 1;
150 	err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
151 	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
152 		goto cleanup;
153 
154 	for (i = 0; i < nr_cpus; i++) {
155 		val = value[i];
156 
157 		/* Fetched value should match the initially set value. */
158 		if (CHECK(val != START_VALUE, "map value",
159 			  "unexpected for cpu %d: %lld\n", i, val))
160 			goto cleanup;
161 	}
162 
163 	/* Check that the entry is non existent. */
164 	err = bpf_map_lookup_elem(map_fd, &key, value);
165 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
166 		goto cleanup;
167 
168 cleanup:
169 	test_lookup_and_delete__destroy(skel);
170 }
171 
172 static void test_lookup_and_delete_lru_hash(void)
173 {
174 	struct test_lookup_and_delete *skel;
175 	__u64 key, value;
176 	int map_fd, err;
177 
178 	/* Setup program and fill the LRU map. */
179 	skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
180 	if (!ASSERT_OK_PTR(skel, "setup_prog"))
181 		return;
182 
183 	err = fill_values(map_fd);
184 	if (!ASSERT_OK(err, "fill_values"))
185 		goto cleanup;
186 
187 	/* Insert new element at key=3, should reuse LRU element. */
188 	key = 3;
189 	err = trigger_tp(skel, key, NEW_VALUE);
190 	if (!ASSERT_OK(err, "trigger_tp"))
191 		goto cleanup;
192 
193 	/* Lookup and delete element 3. */
194 	err = bpf_map_lookup_and_delete_elem(map_fd, &key, &value);
195 	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
196 		goto cleanup;
197 
198 	/* Value should match the new value. */
199 	if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
200 		  "unexpected value=%lld\n", value))
201 		goto cleanup;
202 
203 	/* Check that entries 3 and 1 are non existent. */
204 	err = bpf_map_lookup_elem(map_fd, &key, &value);
205 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
206 		goto cleanup;
207 
208 	key = 1;
209 	err = bpf_map_lookup_elem(map_fd, &key, &value);
210 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
211 		goto cleanup;
212 
213 cleanup:
214 	test_lookup_and_delete__destroy(skel);
215 }
216 
217 static void test_lookup_and_delete_lru_percpu_hash(void)
218 {
219 	struct test_lookup_and_delete *skel;
220 	__u64 key, val, value[nr_cpus];
221 	int map_fd, err, i, cpucnt = 0;
222 
223 	/* Setup program and fill the LRU map. */
224 	skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
225 	if (!ASSERT_OK_PTR(skel, "setup_prog"))
226 		return;
227 
228 	err = fill_values_percpu(map_fd);
229 	if (!ASSERT_OK(err, "fill_values_percpu"))
230 		goto cleanup;
231 
232 	/* Insert new element at key=3, should reuse LRU element 1. */
233 	key = 3;
234 	err = trigger_tp(skel, key, NEW_VALUE);
235 	if (!ASSERT_OK(err, "trigger_tp"))
236 		goto cleanup;
237 
238 	/* Clean value. */
239 	for (i = 0; i < nr_cpus; i++)
240 		value[i] = 0;
241 
242 	/* Lookup and delete element 3. */
243 	err = bpf_map_lookup_and_delete_elem(map_fd, &key, value);
244 	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem")) {
245 		goto cleanup;
246 	}
247 
248 	/* Check if only one CPU has set the value. */
249 	for (i = 0; i < nr_cpus; i++) {
250 		val = value[i];
251 		if (val) {
252 			if (CHECK(val != NEW_VALUE, "map value",
253 				  "unexpected for cpu %d: %lld\n", i, val))
254 				goto cleanup;
255 			cpucnt++;
256 		}
257 	}
258 	if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
259 		  cpucnt))
260 		goto cleanup;
261 
262 	/* Check that entries 3 and 1 are non existent. */
263 	err = bpf_map_lookup_elem(map_fd, &key, &value);
264 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
265 		goto cleanup;
266 
267 	key = 1;
268 	err = bpf_map_lookup_elem(map_fd, &key, &value);
269 	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
270 		goto cleanup;
271 
272 cleanup:
273 	test_lookup_and_delete__destroy(skel);
274 }
275 
276 void test_lookup_and_delete(void)
277 {
278 	nr_cpus = bpf_num_possible_cpus();
279 
280 	if (test__start_subtest("lookup_and_delete"))
281 		test_lookup_and_delete_hash();
282 	if (test__start_subtest("lookup_and_delete_percpu"))
283 		test_lookup_and_delete_percpu_hash();
284 	if (test__start_subtest("lookup_and_delete_lru"))
285 		test_lookup_and_delete_lru_hash();
286 	if (test__start_subtest("lookup_and_delete_lru_percpu"))
287 		test_lookup_and_delete_lru_percpu_hash();
288 }
289