1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #define _GNU_SOURCE
8 #include <sched.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <asm/unistd.h>
12 #include <unistd.h>
13 #include <assert.h>
14 #include <sys/wait.h>
15 #include <stdlib.h>
16 #include <signal.h>
17 #include <linux/bpf.h>
18 #include <string.h>
19 #include <time.h>
20 #include <sys/resource.h>
21 #include <arpa/inet.h>
22 #include <errno.h>
23 
24 #include "libbpf.h"
25 #include "bpf_load.h"
26 
27 #define TEST_BIT(t) (1U << (t))
28 #define MAX_NR_CPUS 1024
29 
30 static __u64 time_get_ns(void)
31 {
32 	struct timespec ts;
33 
34 	clock_gettime(CLOCK_MONOTONIC, &ts);
35 	return ts.tv_sec * 1000000000ull + ts.tv_nsec;
36 }
37 
38 enum test_type {
39 	HASH_PREALLOC,
40 	PERCPU_HASH_PREALLOC,
41 	HASH_KMALLOC,
42 	PERCPU_HASH_KMALLOC,
43 	LRU_HASH_PREALLOC,
44 	NOCOMMON_LRU_HASH_PREALLOC,
45 	LPM_KMALLOC,
46 	HASH_LOOKUP,
47 	ARRAY_LOOKUP,
48 	INNER_LRU_HASH_PREALLOC,
49 	LRU_HASH_LOOKUP,
50 	NR_TESTS,
51 };
52 
53 const char *test_map_names[NR_TESTS] = {
54 	[HASH_PREALLOC] = "hash_map",
55 	[PERCPU_HASH_PREALLOC] = "percpu_hash_map",
56 	[HASH_KMALLOC] = "hash_map_alloc",
57 	[PERCPU_HASH_KMALLOC] = "percpu_hash_map_alloc",
58 	[LRU_HASH_PREALLOC] = "lru_hash_map",
59 	[NOCOMMON_LRU_HASH_PREALLOC] = "nocommon_lru_hash_map",
60 	[LPM_KMALLOC] = "lpm_trie_map_alloc",
61 	[HASH_LOOKUP] = "hash_map",
62 	[ARRAY_LOOKUP] = "array_map",
63 	[INNER_LRU_HASH_PREALLOC] = "inner_lru_hash_map",
64 	[LRU_HASH_LOOKUP] = "lru_hash_lookup_map",
65 };
66 
67 static int test_flags = ~0;
68 static uint32_t num_map_entries;
69 static uint32_t inner_lru_hash_size;
70 static int inner_lru_hash_idx = -1;
71 static int array_of_lru_hashs_idx = -1;
72 static int lru_hash_lookup_idx = -1;
73 static int lru_hash_lookup_test_entries = 32;
74 static uint32_t max_cnt = 1000000;
75 
76 static int check_test_flags(enum test_type t)
77 {
78 	return test_flags & TEST_BIT(t);
79 }
80 
81 static void test_hash_prealloc(int cpu)
82 {
83 	__u64 start_time;
84 	int i;
85 
86 	start_time = time_get_ns();
87 	for (i = 0; i < max_cnt; i++)
88 		syscall(__NR_getuid);
89 	printf("%d:hash_map_perf pre-alloc %lld events per sec\n",
90 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
91 }
92 
93 static int pre_test_lru_hash_lookup(int tasks)
94 {
95 	int fd = map_fd[lru_hash_lookup_idx];
96 	uint32_t key;
97 	long val = 1;
98 	int ret;
99 
100 	if (num_map_entries > lru_hash_lookup_test_entries)
101 		lru_hash_lookup_test_entries = num_map_entries;
102 
103 	/* Populate the lru_hash_map for LRU_HASH_LOOKUP perf test.
104 	 *
105 	 * It is fine that the user requests for a map with
106 	 * num_map_entries < 32 and some of the later lru hash lookup
107 	 * may return not found.  For LRU map, we are not interested
108 	 * in such small map performance.
109 	 */
110 	for (key = 0; key < lru_hash_lookup_test_entries; key++) {
111 		ret = bpf_map_update_elem(fd, &key, &val, BPF_NOEXIST);
112 		if (ret)
113 			return ret;
114 	}
115 
116 	return 0;
117 }
118 
119 static void do_test_lru(enum test_type test, int cpu)
120 {
121 	static int inner_lru_map_fds[MAX_NR_CPUS];
122 
123 	struct sockaddr_in6 in6 = { .sin6_family = AF_INET6 };
124 	const char *test_name;
125 	__u64 start_time;
126 	int i, ret;
127 
128 	if (test == INNER_LRU_HASH_PREALLOC) {
129 		int outer_fd = map_fd[array_of_lru_hashs_idx];
130 		unsigned int mycpu, mynode;
131 
132 		assert(cpu < MAX_NR_CPUS);
133 
134 		if (cpu) {
135 			ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
136 			assert(!ret);
137 
138 			inner_lru_map_fds[cpu] =
139 				bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH,
140 						    sizeof(uint32_t),
141 						    sizeof(long),
142 						    inner_lru_hash_size, 0,
143 						    mynode);
144 			if (inner_lru_map_fds[cpu] == -1) {
145 				printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
146 				       strerror(errno), errno);
147 				exit(1);
148 			}
149 		} else {
150 			inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx];
151 		}
152 
153 		ret = bpf_map_update_elem(outer_fd, &cpu,
154 					  &inner_lru_map_fds[cpu],
155 					  BPF_ANY);
156 		if (ret) {
157 			printf("cannot update ARRAY_OF_LRU_HASHS with key:%u. %s(%d)\n",
158 			       cpu, strerror(errno), errno);
159 			exit(1);
160 		}
161 	}
162 
163 	in6.sin6_addr.s6_addr16[0] = 0xdead;
164 	in6.sin6_addr.s6_addr16[1] = 0xbeef;
165 
166 	if (test == LRU_HASH_PREALLOC) {
167 		test_name = "lru_hash_map_perf";
168 		in6.sin6_addr.s6_addr16[2] = 0;
169 	} else if (test == NOCOMMON_LRU_HASH_PREALLOC) {
170 		test_name = "nocommon_lru_hash_map_perf";
171 		in6.sin6_addr.s6_addr16[2] = 1;
172 	} else if (test == INNER_LRU_HASH_PREALLOC) {
173 		test_name = "inner_lru_hash_map_perf";
174 		in6.sin6_addr.s6_addr16[2] = 2;
175 	} else if (test == LRU_HASH_LOOKUP) {
176 		test_name = "lru_hash_lookup_perf";
177 		in6.sin6_addr.s6_addr16[2] = 3;
178 		in6.sin6_addr.s6_addr32[3] = 0;
179 	} else {
180 		assert(0);
181 	}
182 
183 	start_time = time_get_ns();
184 	for (i = 0; i < max_cnt; i++) {
185 		ret = connect(-1, (const struct sockaddr *)&in6, sizeof(in6));
186 		assert(ret == -1 && errno == EBADF);
187 		if (in6.sin6_addr.s6_addr32[3] <
188 		    lru_hash_lookup_test_entries - 32)
189 			in6.sin6_addr.s6_addr32[3] += 32;
190 		else
191 			in6.sin6_addr.s6_addr32[3] = 0;
192 	}
193 	printf("%d:%s pre-alloc %lld events per sec\n",
194 	       cpu, test_name,
195 	       max_cnt * 1000000000ll / (time_get_ns() - start_time));
196 }
197 
198 static void test_lru_hash_prealloc(int cpu)
199 {
200 	do_test_lru(LRU_HASH_PREALLOC, cpu);
201 }
202 
203 static void test_nocommon_lru_hash_prealloc(int cpu)
204 {
205 	do_test_lru(NOCOMMON_LRU_HASH_PREALLOC, cpu);
206 }
207 
208 static void test_inner_lru_hash_prealloc(int cpu)
209 {
210 	do_test_lru(INNER_LRU_HASH_PREALLOC, cpu);
211 }
212 
213 static void test_lru_hash_lookup(int cpu)
214 {
215 	do_test_lru(LRU_HASH_LOOKUP, cpu);
216 }
217 
218 static void test_percpu_hash_prealloc(int cpu)
219 {
220 	__u64 start_time;
221 	int i;
222 
223 	start_time = time_get_ns();
224 	for (i = 0; i < max_cnt; i++)
225 		syscall(__NR_geteuid);
226 	printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n",
227 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
228 }
229 
230 static void test_hash_kmalloc(int cpu)
231 {
232 	__u64 start_time;
233 	int i;
234 
235 	start_time = time_get_ns();
236 	for (i = 0; i < max_cnt; i++)
237 		syscall(__NR_getgid);
238 	printf("%d:hash_map_perf kmalloc %lld events per sec\n",
239 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
240 }
241 
242 static void test_percpu_hash_kmalloc(int cpu)
243 {
244 	__u64 start_time;
245 	int i;
246 
247 	start_time = time_get_ns();
248 	for (i = 0; i < max_cnt; i++)
249 		syscall(__NR_getegid);
250 	printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n",
251 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
252 }
253 
254 static void test_lpm_kmalloc(int cpu)
255 {
256 	__u64 start_time;
257 	int i;
258 
259 	start_time = time_get_ns();
260 	for (i = 0; i < max_cnt; i++)
261 		syscall(__NR_gettid);
262 	printf("%d:lpm_perf kmalloc %lld events per sec\n",
263 	       cpu, max_cnt * 1000000000ll / (time_get_ns() - start_time));
264 }
265 
266 static void test_hash_lookup(int cpu)
267 {
268 	__u64 start_time;
269 	int i;
270 
271 	start_time = time_get_ns();
272 	for (i = 0; i < max_cnt; i++)
273 		syscall(__NR_getpgid, 0);
274 	printf("%d:hash_lookup %lld lookups per sec\n",
275 	       cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
276 }
277 
278 static void test_array_lookup(int cpu)
279 {
280 	__u64 start_time;
281 	int i;
282 
283 	start_time = time_get_ns();
284 	for (i = 0; i < max_cnt; i++)
285 		syscall(__NR_getpgrp, 0);
286 	printf("%d:array_lookup %lld lookups per sec\n",
287 	       cpu, max_cnt * 1000000000ll * 64 / (time_get_ns() - start_time));
288 }
289 
290 typedef int (*pre_test_func)(int tasks);
291 const pre_test_func pre_test_funcs[] = {
292 	[LRU_HASH_LOOKUP] = pre_test_lru_hash_lookup,
293 };
294 
295 typedef void (*test_func)(int cpu);
296 const test_func test_funcs[] = {
297 	[HASH_PREALLOC] = test_hash_prealloc,
298 	[PERCPU_HASH_PREALLOC] = test_percpu_hash_prealloc,
299 	[HASH_KMALLOC] = test_hash_kmalloc,
300 	[PERCPU_HASH_KMALLOC] = test_percpu_hash_kmalloc,
301 	[LRU_HASH_PREALLOC] = test_lru_hash_prealloc,
302 	[NOCOMMON_LRU_HASH_PREALLOC] = test_nocommon_lru_hash_prealloc,
303 	[LPM_KMALLOC] = test_lpm_kmalloc,
304 	[HASH_LOOKUP] = test_hash_lookup,
305 	[ARRAY_LOOKUP] = test_array_lookup,
306 	[INNER_LRU_HASH_PREALLOC] = test_inner_lru_hash_prealloc,
307 	[LRU_HASH_LOOKUP] = test_lru_hash_lookup,
308 };
309 
310 static int pre_test(int tasks)
311 {
312 	int i;
313 
314 	for (i = 0; i < NR_TESTS; i++) {
315 		if (pre_test_funcs[i] && check_test_flags(i)) {
316 			int ret = pre_test_funcs[i](tasks);
317 
318 			if (ret)
319 				return ret;
320 		}
321 	}
322 
323 	return 0;
324 }
325 
326 static void loop(int cpu)
327 {
328 	cpu_set_t cpuset;
329 	int i;
330 
331 	CPU_ZERO(&cpuset);
332 	CPU_SET(cpu, &cpuset);
333 	sched_setaffinity(0, sizeof(cpuset), &cpuset);
334 
335 	for (i = 0; i < NR_TESTS; i++) {
336 		if (check_test_flags(i))
337 			test_funcs[i](cpu);
338 	}
339 }
340 
341 static void run_perf_test(int tasks)
342 {
343 	pid_t pid[tasks];
344 	int i;
345 
346 	assert(!pre_test(tasks));
347 
348 	for (i = 0; i < tasks; i++) {
349 		pid[i] = fork();
350 		if (pid[i] == 0) {
351 			loop(i);
352 			exit(0);
353 		} else if (pid[i] == -1) {
354 			printf("couldn't spawn #%d process\n", i);
355 			exit(1);
356 		}
357 	}
358 	for (i = 0; i < tasks; i++) {
359 		int status;
360 
361 		assert(waitpid(pid[i], &status, 0) == pid[i]);
362 		assert(status == 0);
363 	}
364 }
365 
366 static void fill_lpm_trie(void)
367 {
368 	struct bpf_lpm_trie_key *key;
369 	unsigned long value = 0;
370 	unsigned int i;
371 	int r;
372 
373 	key = alloca(sizeof(*key) + 4);
374 	key->prefixlen = 32;
375 
376 	for (i = 0; i < 512; ++i) {
377 		key->prefixlen = rand() % 33;
378 		key->data[0] = rand() & 0xff;
379 		key->data[1] = rand() & 0xff;
380 		key->data[2] = rand() & 0xff;
381 		key->data[3] = rand() & 0xff;
382 		r = bpf_map_update_elem(map_fd[6], key, &value, 0);
383 		assert(!r);
384 	}
385 
386 	key->prefixlen = 32;
387 	key->data[0] = 192;
388 	key->data[1] = 168;
389 	key->data[2] = 0;
390 	key->data[3] = 1;
391 	value = 128;
392 
393 	r = bpf_map_update_elem(map_fd[6], key, &value, 0);
394 	assert(!r);
395 }
396 
397 static void fixup_map(struct bpf_map_data *map, int idx)
398 {
399 	int i;
400 
401 	if (!strcmp("inner_lru_hash_map", map->name)) {
402 		inner_lru_hash_idx = idx;
403 		inner_lru_hash_size = map->def.max_entries;
404 	}
405 
406 	if (!strcmp("array_of_lru_hashs", map->name)) {
407 		if (inner_lru_hash_idx == -1) {
408 			printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n");
409 			exit(1);
410 		}
411 		map->def.inner_map_idx = inner_lru_hash_idx;
412 		array_of_lru_hashs_idx = idx;
413 	}
414 
415 	if (!strcmp("lru_hash_lookup_map", map->name))
416 		lru_hash_lookup_idx = idx;
417 
418 	if (num_map_entries <= 0)
419 		return;
420 
421 	inner_lru_hash_size = num_map_entries;
422 
423 	/* Only change the max_entries for the enabled test(s) */
424 	for (i = 0; i < NR_TESTS; i++) {
425 		if (!strcmp(test_map_names[i], map->name) &&
426 		    (check_test_flags(i))) {
427 			map->def.max_entries = num_map_entries;
428 		}
429 	}
430 }
431 
432 int main(int argc, char **argv)
433 {
434 	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
435 	char filename[256];
436 	int num_cpu = 8;
437 
438 	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
439 	setrlimit(RLIMIT_MEMLOCK, &r);
440 
441 	if (argc > 1)
442 		test_flags = atoi(argv[1]) ? : test_flags;
443 
444 	if (argc > 2)
445 		num_cpu = atoi(argv[2]) ? : num_cpu;
446 
447 	if (argc > 3)
448 		num_map_entries = atoi(argv[3]);
449 
450 	if (argc > 4)
451 		max_cnt = atoi(argv[4]);
452 
453 	if (load_bpf_file_fixup_map(filename, fixup_map)) {
454 		printf("%s", bpf_log_buf);
455 		return 1;
456 	}
457 
458 	fill_lpm_trie();
459 
460 	run_perf_test(num_cpu);
461 
462 	return 0;
463 }
464