xref: /openbmc/linux/samples/bpf/xdp_sample_user.c (revision a1117495)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #define _GNU_SOURCE
3 
4 #include <arpa/inet.h>
5 #include <bpf/bpf.h>
6 #include <bpf/libbpf.h>
7 #include <errno.h>
8 #include <fcntl.h>
9 #include <getopt.h>
10 #include <linux/ethtool.h>
11 #include <linux/hashtable.h>
12 #include <linux/if_link.h>
13 #include <linux/jhash.h>
14 #include <linux/limits.h>
15 #include <linux/list.h>
16 #include <linux/sockios.h>
17 #include <locale.h>
18 #include <math.h>
19 #include <net/if.h>
20 #include <poll.h>
21 #include <signal.h>
22 #include <stdbool.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/ioctl.h>
27 #include <sys/mman.h>
28 #include <sys/resource.h>
29 #include <sys/signalfd.h>
30 #include <sys/sysinfo.h>
31 #include <sys/timerfd.h>
32 #include <sys/utsname.h>
33 #include <time.h>
34 #include <unistd.h>
35 
36 #include "bpf_util.h"
37 #include "xdp_sample_user.h"
38 
39 #define __sample_print(fmt, cond, ...)                                         \
40 	({                                                                     \
41 		if (cond)                                                      \
42 			printf(fmt, ##__VA_ARGS__);                            \
43 	})
44 
45 #define print_always(fmt, ...) __sample_print(fmt, 1, ##__VA_ARGS__)
46 #define print_default(fmt, ...)                                                \
47 	__sample_print(fmt, sample_log_level & LL_DEFAULT, ##__VA_ARGS__)
48 #define __print_err(err, fmt, ...)                                             \
49 	({                                                                     \
50 		__sample_print(fmt, err > 0 || sample_log_level & LL_DEFAULT,  \
51 			       ##__VA_ARGS__);                                 \
52 		sample_err_exp = sample_err_exp ? true : err > 0;              \
53 	})
54 #define print_err(err, fmt, ...) __print_err(err, fmt, ##__VA_ARGS__)
55 
56 #define __COLUMN(x) "%'10" x " %-13s"
57 #define FMT_COLUMNf __COLUMN(".0f")
58 #define FMT_COLUMNd __COLUMN("d")
59 #define FMT_COLUMNl __COLUMN("llu")
60 #define RX(rx) rx, "rx/s"
61 #define PPS(pps) pps, "pkt/s"
62 #define DROP(drop) drop, "drop/s"
63 #define ERR(err) err, "error/s"
64 #define HITS(hits) hits, "hit/s"
65 #define XMIT(xmit) xmit, "xmit/s"
66 #define PASS(pass) pass, "pass/s"
67 #define REDIR(redir) redir, "redir/s"
68 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
69 
70 #define XDP_UNKNOWN (XDP_REDIRECT + 1)
71 #define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
72 #define XDP_REDIRECT_ERR_MAX 7
73 
74 enum map_type {
75 	MAP_RX,
76 	MAP_REDIRECT_ERR,
77 	MAP_CPUMAP_ENQUEUE,
78 	MAP_CPUMAP_KTHREAD,
79 	MAP_EXCEPTION,
80 	MAP_DEVMAP_XMIT,
81 	MAP_DEVMAP_XMIT_MULTI,
82 	NUM_MAP,
83 };
84 
85 enum log_level {
86 	LL_DEFAULT = 1U << 0,
87 	LL_SIMPLE = 1U << 1,
88 	LL_DEBUG = 1U << 2,
89 };
90 
91 struct record {
92 	__u64 timestamp;
93 	struct datarec total;
94 	struct datarec *cpu;
95 };
96 
97 struct map_entry {
98 	struct hlist_node node;
99 	__u64 pair;
100 	struct record val;
101 };
102 
103 struct stats_record {
104 	struct record rx_cnt;
105 	struct record redir_err[XDP_REDIRECT_ERR_MAX];
106 	struct record kthread;
107 	struct record exception[XDP_ACTION_MAX];
108 	struct record devmap_xmit;
109 	DECLARE_HASHTABLE(xmit_map, 5);
110 	struct record enq[];
111 };
112 
113 struct sample_output {
114 	struct {
115 		__u64 rx;
116 		__u64 redir;
117 		__u64 drop;
118 		__u64 drop_xmit;
119 		__u64 err;
120 		__u64 xmit;
121 	} totals;
122 	struct {
123 		union {
124 			__u64 pps;
125 			__u64 num;
126 		};
127 		__u64 drop;
128 		__u64 err;
129 	} rx_cnt;
130 	struct {
131 		__u64 suc;
132 		__u64 err;
133 	} redir_cnt;
134 	struct {
135 		__u64 hits;
136 	} except_cnt;
137 	struct {
138 		__u64 pps;
139 		__u64 drop;
140 		__u64 err;
141 		double bavg;
142 	} xmit_cnt;
143 };
144 
145 struct xdp_desc {
146 	int ifindex;
147 	__u32 prog_id;
148 	int flags;
149 } sample_xdp_progs[32];
150 
151 struct datarec *sample_mmap[NUM_MAP];
152 struct bpf_map *sample_map[NUM_MAP];
153 size_t sample_map_count[NUM_MAP];
154 enum log_level sample_log_level;
155 struct sample_output sample_out;
156 unsigned long sample_interval;
157 bool sample_err_exp;
158 int sample_xdp_cnt;
159 int sample_n_cpus;
160 int sample_sig_fd;
161 int sample_mask;
162 
163 static const char *xdp_redirect_err_names[XDP_REDIRECT_ERR_MAX] = {
164 	/* Key=1 keeps unknown errors */
165 	"Success",
166 	"Unknown",
167 	"EINVAL",
168 	"ENETDOWN",
169 	"EMSGSIZE",
170 	"EOPNOTSUPP",
171 	"ENOSPC",
172 };
173 
174 /* Keyed from Unknown */
175 static const char *xdp_redirect_err_help[XDP_REDIRECT_ERR_MAX - 1] = {
176 	"Unknown error",
177 	"Invalid redirection",
178 	"Device being redirected to is down",
179 	"Packet length too large for device",
180 	"Operation not supported",
181 	"No space in ptr_ring of cpumap kthread",
182 };
183 
184 static const char *xdp_action_names[XDP_ACTION_MAX] = {
185 	[XDP_ABORTED]  = "XDP_ABORTED",
186 	[XDP_DROP]     = "XDP_DROP",
187 	[XDP_PASS]     = "XDP_PASS",
188 	[XDP_TX]       = "XDP_TX",
189 	[XDP_REDIRECT] = "XDP_REDIRECT",
190 	[XDP_UNKNOWN]  = "XDP_UNKNOWN",
191 };
192 
193 static __u64 gettime(void)
194 {
195 	struct timespec t;
196 	int res;
197 
198 	res = clock_gettime(CLOCK_MONOTONIC, &t);
199 	if (res < 0) {
200 		fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
201 		return UINT64_MAX;
202 	}
203 	return (__u64)t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
204 }
205 
206 static const char *action2str(int action)
207 {
208 	if (action < XDP_ACTION_MAX)
209 		return xdp_action_names[action];
210 	return NULL;
211 }
212 
213 static void sample_print_help(int mask)
214 {
215 	printf("Output format description\n\n"
216 	       "By default, redirect success statistics are disabled, use -s to enable.\n"
217 	       "The terse output mode is default, verbose mode can be activated using -v\n"
218 	       "Use SIGQUIT (Ctrl + \\) to switch the mode dynamically at runtime\n\n"
219 	       "Terse mode displays at most the following fields:\n"
220 	       "  rx/s        Number of packets received per second\n"
221 	       "  redir/s     Number of packets successfully redirected per second\n"
222 	       "  err,drop/s  Aggregated count of errors per second (including dropped packets)\n"
223 	       "  xmit/s      Number of packets transmitted on the output device per second\n\n"
224 	       "Output description for verbose mode:\n"
225 	       "  FIELD                 DESCRIPTION\n");
226 
227 	if (mask & SAMPLE_RX_CNT) {
228 		printf("  receive\t\tDisplays the number of packets received & errors encountered\n"
229 		       " \t\t\tWhenever an error or packet drop occurs, details of per CPU error\n"
230 		       " \t\t\tand drop statistics will be expanded inline in terse mode.\n"
231 		       " \t\t\t\tpkt/s     - Packets received per second\n"
232 		       " \t\t\t\tdrop/s    - Packets dropped per second\n"
233 		       " \t\t\t\terror/s   - Errors encountered per second\n\n");
234 	}
235 	if (mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) {
236 		printf("  redirect\t\tDisplays the number of packets successfully redirected\n"
237 		       "  \t\t\tErrors encountered are expanded under redirect_err field\n"
238 		       "  \t\t\tNote that passing -s to enable it has a per packet overhead\n"
239 		       "  \t\t\t\tredir/s   - Packets redirected successfully per second\n\n"
240 		       "  redirect_err\t\tDisplays the number of packets that failed redirection\n"
241 		       "  \t\t\tThe errno is expanded under this field with per CPU count\n"
242 		       "  \t\t\tThe recognized errors are:\n");
243 
244 		for (int i = 2; i < XDP_REDIRECT_ERR_MAX; i++)
245 			printf("\t\t\t  %s: %s\n", xdp_redirect_err_names[i],
246 			       xdp_redirect_err_help[i - 1]);
247 
248 		printf("  \n\t\t\t\terror/s   - Packets that failed redirection per second\n\n");
249 	}
250 
251 	if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT) {
252 		printf("  enqueue to cpu N\tDisplays the number of packets enqueued to bulk queue of CPU N\n"
253 		       "  \t\t\tExpands to cpu:FROM->N to display enqueue stats for each CPU enqueuing to CPU N\n"
254 		       "  \t\t\tReceived packets can be associated with the CPU redirect program is enqueuing \n"
255 		       "  \t\t\tpackets to.\n"
256 		       "  \t\t\t\tpkt/s    - Packets enqueued per second from other CPU to CPU N\n"
257 		       "  \t\t\t\tdrop/s   - Packets dropped when trying to enqueue to CPU N\n"
258 		       "  \t\t\t\tbulk-avg - Average number of packets processed for each event\n\n");
259 	}
260 
261 	if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
262 		printf("  kthread\t\tDisplays the number of packets processed in CPUMAP kthread for each CPU\n"
263 		       "  \t\t\tPackets consumed from ptr_ring in kthread, and its xdp_stats (after calling \n"
264 		       "  \t\t\tCPUMAP bpf prog) are expanded below this. xdp_stats are expanded as a total and\n"
265 		       "  \t\t\tthen per-CPU to associate it to each CPU's pinned CPUMAP kthread.\n"
266 		       "  \t\t\t\tpkt/s    - Packets consumed per second from ptr_ring\n"
267 		       "  \t\t\t\tdrop/s   - Packets dropped per second in kthread\n"
268 		       "  \t\t\t\tsched    - Number of times kthread called schedule()\n\n"
269 		       "  \t\t\txdp_stats (also expands to per-CPU counts)\n"
270 		       "  \t\t\t\tpass/s  - XDP_PASS count for CPUMAP program execution\n"
271 		       "  \t\t\t\tdrop/s  - XDP_DROP count for CPUMAP program execution\n"
272 		       "  \t\t\t\tredir/s - XDP_REDIRECT count for CPUMAP program execution\n\n");
273 	}
274 
275 	if (mask & SAMPLE_EXCEPTION_CNT) {
276 		printf("  xdp_exception\t\tDisplays xdp_exception tracepoint events\n"
277 		       "  \t\t\tThis can occur due to internal driver errors, unrecognized\n"
278 		       "  \t\t\tXDP actions and due to explicit user trigger by use of XDP_ABORTED\n"
279 		       "  \t\t\tEach action is expanded below this field with its count\n"
280 		       "  \t\t\t\thit/s     - Number of times the tracepoint was hit per second\n\n");
281 	}
282 
283 	if (mask & SAMPLE_DEVMAP_XMIT_CNT) {
284 		printf("  devmap_xmit\t\tDisplays devmap_xmit tracepoint events\n"
285 		       "  \t\t\tThis tracepoint is invoked for successful transmissions on output\n"
286 		       "  \t\t\tdevice but these statistics are not available for generic XDP mode,\n"
287 		       "  \t\t\thence they will be omitted from the output when using SKB mode\n"
288 		       "  \t\t\t\txmit/s    - Number of packets that were transmitted per second\n"
289 		       "  \t\t\t\tdrop/s    - Number of packets that failed transmissions per second\n"
290 		       "  \t\t\t\tdrv_err/s - Number of internal driver errors per second\n"
291 		       "  \t\t\t\tbulk-avg  - Average number of packets processed for each event\n\n");
292 	}
293 }
294 
295 void sample_usage(char *argv[], const struct option *long_options,
296 		  const char *doc, int mask, bool error)
297 {
298 	int i;
299 
300 	if (!error)
301 		sample_print_help(mask);
302 
303 	printf("\n%s\nOption for %s:\n", doc, argv[0]);
304 	for (i = 0; long_options[i].name != 0; i++) {
305 		printf(" --%-15s", long_options[i].name);
306 		if (long_options[i].flag != NULL)
307 			printf(" flag (internal value: %d)",
308 			       *long_options[i].flag);
309 		else
310 			printf("\t short-option: -%c", long_options[i].val);
311 		printf("\n");
312 	}
313 	printf("\n");
314 }
315 
316 static struct datarec *alloc_record_per_cpu(void)
317 {
318 	unsigned int nr_cpus = libbpf_num_possible_cpus();
319 	struct datarec *array;
320 
321 	array = calloc(nr_cpus, sizeof(*array));
322 	if (!array) {
323 		fprintf(stderr, "Failed to allocate memory (nr_cpus: %u)\n",
324 			nr_cpus);
325 		return NULL;
326 	}
327 	return array;
328 }
329 
330 static int map_entry_init(struct map_entry *e, __u64 pair)
331 {
332 	e->pair = pair;
333 	INIT_HLIST_NODE(&e->node);
334 	e->val.timestamp = gettime();
335 	e->val.cpu = alloc_record_per_cpu();
336 	if (!e->val.cpu)
337 		return -ENOMEM;
338 	return 0;
339 }
340 
341 static void map_collect_percpu(struct datarec *values, struct record *rec)
342 {
343 	/* For percpu maps, userspace gets a value per possible CPU */
344 	unsigned int nr_cpus = libbpf_num_possible_cpus();
345 	__u64 sum_xdp_redirect = 0;
346 	__u64 sum_processed = 0;
347 	__u64 sum_xdp_pass = 0;
348 	__u64 sum_xdp_drop = 0;
349 	__u64 sum_dropped = 0;
350 	__u64 sum_issue = 0;
351 	int i;
352 
353 	/* Get time as close as possible to reading map contents */
354 	rec->timestamp = gettime();
355 
356 	/* Record and sum values from each CPU */
357 	for (i = 0; i < nr_cpus; i++) {
358 		rec->cpu[i].processed = READ_ONCE(values[i].processed);
359 		rec->cpu[i].dropped = READ_ONCE(values[i].dropped);
360 		rec->cpu[i].issue = READ_ONCE(values[i].issue);
361 		rec->cpu[i].xdp_pass = READ_ONCE(values[i].xdp_pass);
362 		rec->cpu[i].xdp_drop = READ_ONCE(values[i].xdp_drop);
363 		rec->cpu[i].xdp_redirect = READ_ONCE(values[i].xdp_redirect);
364 
365 		sum_processed += rec->cpu[i].processed;
366 		sum_dropped += rec->cpu[i].dropped;
367 		sum_issue += rec->cpu[i].issue;
368 		sum_xdp_pass += rec->cpu[i].xdp_pass;
369 		sum_xdp_drop += rec->cpu[i].xdp_drop;
370 		sum_xdp_redirect += rec->cpu[i].xdp_redirect;
371 	}
372 
373 	rec->total.processed = sum_processed;
374 	rec->total.dropped = sum_dropped;
375 	rec->total.issue = sum_issue;
376 	rec->total.xdp_pass = sum_xdp_pass;
377 	rec->total.xdp_drop = sum_xdp_drop;
378 	rec->total.xdp_redirect = sum_xdp_redirect;
379 }
380 
381 static int map_collect_percpu_devmap(int map_fd, struct stats_record *rec)
382 {
383 	unsigned int nr_cpus = bpf_num_possible_cpus();
384 	__u32 batch, count = 32;
385 	struct datarec *values;
386 	bool init = false;
387 	__u64 *keys;
388 	int i, ret;
389 
390 	keys = calloc(count, sizeof(__u64));
391 	if (!keys)
392 		return -ENOMEM;
393 	values = calloc(count * nr_cpus, sizeof(struct datarec));
394 	if (!values) {
395 		free(keys);
396 		return -ENOMEM;
397 	}
398 
399 	for (;;) {
400 		bool exit = false;
401 
402 		ret = bpf_map_lookup_batch(map_fd, init ? &batch : NULL, &batch,
403 					   keys, values, &count, NULL);
404 		if (ret < 0 && errno != ENOENT)
405 			break;
406 		if (errno == ENOENT)
407 			exit = true;
408 
409 		init = true;
410 		for (i = 0; i < count; i++) {
411 			struct map_entry *e, *x = NULL;
412 			__u64 pair = keys[i];
413 			struct datarec *arr;
414 
415 			arr = &values[i * nr_cpus];
416 			hash_for_each_possible(rec->xmit_map, e, node, pair) {
417 				if (e->pair == pair) {
418 					x = e;
419 					break;
420 				}
421 			}
422 			if (!x) {
423 				x = calloc(1, sizeof(*x));
424 				if (!x)
425 					goto cleanup;
426 				if (map_entry_init(x, pair) < 0) {
427 					free(x);
428 					goto cleanup;
429 				}
430 				hash_add(rec->xmit_map, &x->node, pair);
431 			}
432 			map_collect_percpu(arr, &x->val);
433 		}
434 
435 		if (exit)
436 			break;
437 		count = 32;
438 	}
439 
440 	free(values);
441 	free(keys);
442 	return 0;
443 cleanup:
444 	free(values);
445 	free(keys);
446 	return -ENOMEM;
447 }
448 
449 static struct stats_record *alloc_stats_record(void)
450 {
451 	struct stats_record *rec;
452 	int i;
453 
454 	rec = calloc(1, sizeof(*rec) + sample_n_cpus * sizeof(struct record));
455 	if (!rec) {
456 		fprintf(stderr, "Failed to allocate memory\n");
457 		return NULL;
458 	}
459 
460 	if (sample_mask & SAMPLE_RX_CNT) {
461 		rec->rx_cnt.cpu = alloc_record_per_cpu();
462 		if (!rec->rx_cnt.cpu) {
463 			fprintf(stderr,
464 				"Failed to allocate rx_cnt per-CPU array\n");
465 			goto end_rec;
466 		}
467 	}
468 	if (sample_mask & (SAMPLE_REDIRECT_CNT | SAMPLE_REDIRECT_ERR_CNT)) {
469 		for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++) {
470 			rec->redir_err[i].cpu = alloc_record_per_cpu();
471 			if (!rec->redir_err[i].cpu) {
472 				fprintf(stderr,
473 					"Failed to allocate redir_err per-CPU array for "
474 					"\"%s\" case\n",
475 					xdp_redirect_err_names[i]);
476 				while (i--)
477 					free(rec->redir_err[i].cpu);
478 				goto end_rx_cnt;
479 			}
480 		}
481 	}
482 	if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
483 		rec->kthread.cpu = alloc_record_per_cpu();
484 		if (!rec->kthread.cpu) {
485 			fprintf(stderr,
486 				"Failed to allocate kthread per-CPU array\n");
487 			goto end_redir;
488 		}
489 	}
490 	if (sample_mask & SAMPLE_EXCEPTION_CNT) {
491 		for (i = 0; i < XDP_ACTION_MAX; i++) {
492 			rec->exception[i].cpu = alloc_record_per_cpu();
493 			if (!rec->exception[i].cpu) {
494 				fprintf(stderr,
495 					"Failed to allocate exception per-CPU array for "
496 					"\"%s\" case\n",
497 					action2str(i));
498 				while (i--)
499 					free(rec->exception[i].cpu);
500 				goto end_kthread;
501 			}
502 		}
503 	}
504 	if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT) {
505 		rec->devmap_xmit.cpu = alloc_record_per_cpu();
506 		if (!rec->devmap_xmit.cpu) {
507 			fprintf(stderr,
508 				"Failed to allocate devmap_xmit per-CPU array\n");
509 			goto end_exception;
510 		}
511 	}
512 	if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
513 		hash_init(rec->xmit_map);
514 	if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT) {
515 		for (i = 0; i < sample_n_cpus; i++) {
516 			rec->enq[i].cpu = alloc_record_per_cpu();
517 			if (!rec->enq[i].cpu) {
518 				fprintf(stderr,
519 					"Failed to allocate enqueue per-CPU array for "
520 					"CPU %d\n",
521 					i);
522 				while (i--)
523 					free(rec->enq[i].cpu);
524 				goto end_devmap_xmit;
525 			}
526 		}
527 	}
528 
529 	return rec;
530 
531 end_devmap_xmit:
532 	free(rec->devmap_xmit.cpu);
533 end_exception:
534 	for (i = 0; i < XDP_ACTION_MAX; i++)
535 		free(rec->exception[i].cpu);
536 end_kthread:
537 	free(rec->kthread.cpu);
538 end_redir:
539 	for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++)
540 		free(rec->redir_err[i].cpu);
541 end_rx_cnt:
542 	free(rec->rx_cnt.cpu);
543 end_rec:
544 	free(rec);
545 	return NULL;
546 }
547 
548 static void free_stats_record(struct stats_record *r)
549 {
550 	struct hlist_node *tmp;
551 	struct map_entry *e;
552 	int i;
553 
554 	for (i = 0; i < sample_n_cpus; i++)
555 		free(r->enq[i].cpu);
556 	hash_for_each_safe(r->xmit_map, i, tmp, e, node) {
557 		hash_del(&e->node);
558 		free(e->val.cpu);
559 		free(e);
560 	}
561 	free(r->devmap_xmit.cpu);
562 	for (i = 0; i < XDP_ACTION_MAX; i++)
563 		free(r->exception[i].cpu);
564 	free(r->kthread.cpu);
565 	for (i = 0; i < XDP_REDIRECT_ERR_MAX; i++)
566 		free(r->redir_err[i].cpu);
567 	free(r->rx_cnt.cpu);
568 	free(r);
569 }
570 
571 static double calc_period(struct record *r, struct record *p)
572 {
573 	double period_ = 0;
574 	__u64 period = 0;
575 
576 	period = r->timestamp - p->timestamp;
577 	if (period > 0)
578 		period_ = ((double)period / NANOSEC_PER_SEC);
579 
580 	return period_;
581 }
582 
583 static double sample_round(double val)
584 {
585 	if (val - floor(val) < 0.5)
586 		return floor(val);
587 	return ceil(val);
588 }
589 
590 static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
591 {
592 	__u64 packets = 0;
593 	__u64 pps = 0;
594 
595 	if (period_ > 0) {
596 		packets = r->processed - p->processed;
597 		pps = sample_round(packets / period_);
598 	}
599 	return pps;
600 }
601 
602 static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
603 {
604 	__u64 packets = 0;
605 	__u64 pps = 0;
606 
607 	if (period_ > 0) {
608 		packets = r->dropped - p->dropped;
609 		pps = sample_round(packets / period_);
610 	}
611 	return pps;
612 }
613 
614 static __u64 calc_errs_pps(struct datarec *r, struct datarec *p, double period_)
615 {
616 	__u64 packets = 0;
617 	__u64 pps = 0;
618 
619 	if (period_ > 0) {
620 		packets = r->issue - p->issue;
621 		pps = sample_round(packets / period_);
622 	}
623 	return pps;
624 }
625 
626 static __u64 calc_info_pps(struct datarec *r, struct datarec *p, double period_)
627 {
628 	__u64 packets = 0;
629 	__u64 pps = 0;
630 
631 	if (period_ > 0) {
632 		packets = r->info - p->info;
633 		pps = sample_round(packets / period_);
634 	}
635 	return pps;
636 }
637 
638 static void calc_xdp_pps(struct datarec *r, struct datarec *p, double *xdp_pass,
639 			 double *xdp_drop, double *xdp_redirect, double period_)
640 {
641 	*xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0;
642 	if (period_ > 0) {
643 		*xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_;
644 		*xdp_pass = (r->xdp_pass - p->xdp_pass) / period_;
645 		*xdp_drop = (r->xdp_drop - p->xdp_drop) / period_;
646 	}
647 }
648 
649 static void stats_get_rx_cnt(struct stats_record *stats_rec,
650 			     struct stats_record *stats_prev,
651 			     unsigned int nr_cpus, struct sample_output *out)
652 {
653 	struct record *rec, *prev;
654 	double t, pps, drop, err;
655 	int i;
656 
657 	rec = &stats_rec->rx_cnt;
658 	prev = &stats_prev->rx_cnt;
659 	t = calc_period(rec, prev);
660 
661 	for (i = 0; i < nr_cpus; i++) {
662 		struct datarec *r = &rec->cpu[i];
663 		struct datarec *p = &prev->cpu[i];
664 		char str[64];
665 
666 		pps = calc_pps(r, p, t);
667 		drop = calc_drop_pps(r, p, t);
668 		err = calc_errs_pps(r, p, t);
669 		if (!pps && !drop && !err)
670 			continue;
671 
672 		snprintf(str, sizeof(str), "cpu:%d", i);
673 		print_default("    %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
674 			      "\n",
675 			      str, PPS(pps), DROP(drop), ERR(err));
676 	}
677 
678 	if (out) {
679 		pps = calc_pps(&rec->total, &prev->total, t);
680 		drop = calc_drop_pps(&rec->total, &prev->total, t);
681 		err = calc_errs_pps(&rec->total, &prev->total, t);
682 
683 		out->rx_cnt.pps = pps;
684 		out->rx_cnt.drop = drop;
685 		out->rx_cnt.err = err;
686 		out->totals.rx += pps;
687 		out->totals.drop += drop;
688 		out->totals.err += err;
689 	}
690 }
691 
692 static void stats_get_cpumap_enqueue(struct stats_record *stats_rec,
693 				     struct stats_record *stats_prev,
694 				     unsigned int nr_cpus)
695 {
696 	struct record *rec, *prev;
697 	double t, pps, drop, err;
698 	int i, to_cpu;
699 
700 	/* cpumap enqueue stats */
701 	for (to_cpu = 0; to_cpu < sample_n_cpus; to_cpu++) {
702 		rec = &stats_rec->enq[to_cpu];
703 		prev = &stats_prev->enq[to_cpu];
704 		t = calc_period(rec, prev);
705 
706 		pps = calc_pps(&rec->total, &prev->total, t);
707 		drop = calc_drop_pps(&rec->total, &prev->total, t);
708 		err = calc_errs_pps(&rec->total, &prev->total, t);
709 
710 		if (pps > 0 || drop > 0) {
711 			char str[64];
712 
713 			snprintf(str, sizeof(str), "enqueue to cpu %d", to_cpu);
714 
715 			if (err > 0)
716 				err = pps / err; /* calc average bulk size */
717 
718 			print_err(drop,
719 				  "  %-20s " FMT_COLUMNf FMT_COLUMNf __COLUMN(
720 					  ".2f") "\n",
721 				  str, PPS(pps), DROP(drop), err, "bulk-avg");
722 		}
723 
724 		for (i = 0; i < nr_cpus; i++) {
725 			struct datarec *r = &rec->cpu[i];
726 			struct datarec *p = &prev->cpu[i];
727 			char str[64];
728 
729 			pps = calc_pps(r, p, t);
730 			drop = calc_drop_pps(r, p, t);
731 			err = calc_errs_pps(r, p, t);
732 			if (!pps && !drop && !err)
733 				continue;
734 
735 			snprintf(str, sizeof(str), "cpu:%d->%d", i, to_cpu);
736 			if (err > 0)
737 				err = pps / err; /* calc average bulk size */
738 			print_default(
739 				"    %-18s " FMT_COLUMNf FMT_COLUMNf __COLUMN(
740 					".2f") "\n",
741 				str, PPS(pps), DROP(drop), err, "bulk-avg");
742 		}
743 	}
744 }
745 
746 static void stats_get_cpumap_remote(struct stats_record *stats_rec,
747 				    struct stats_record *stats_prev,
748 				    unsigned int nr_cpus)
749 {
750 	double xdp_pass, xdp_drop, xdp_redirect;
751 	struct record *rec, *prev;
752 	double t;
753 	int i;
754 
755 	rec = &stats_rec->kthread;
756 	prev = &stats_prev->kthread;
757 	t = calc_period(rec, prev);
758 
759 	calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop,
760 		     &xdp_redirect, t);
761 	if (xdp_pass || xdp_drop || xdp_redirect) {
762 		print_err(xdp_drop,
763 			  "    %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n",
764 			  "xdp_stats", PASS(xdp_pass), DROP(xdp_drop),
765 			  REDIR(xdp_redirect));
766 	}
767 
768 	for (i = 0; i < nr_cpus; i++) {
769 		struct datarec *r = &rec->cpu[i];
770 		struct datarec *p = &prev->cpu[i];
771 		char str[64];
772 
773 		calc_xdp_pps(r, p, &xdp_pass, &xdp_drop, &xdp_redirect, t);
774 		if (!xdp_pass && !xdp_drop && !xdp_redirect)
775 			continue;
776 
777 		snprintf(str, sizeof(str), "cpu:%d", i);
778 		print_default("      %-16s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
779 			      "\n",
780 			      str, PASS(xdp_pass), DROP(xdp_drop),
781 			      REDIR(xdp_redirect));
782 	}
783 }
784 
785 static void stats_get_cpumap_kthread(struct stats_record *stats_rec,
786 				     struct stats_record *stats_prev,
787 				     unsigned int nr_cpus)
788 {
789 	struct record *rec, *prev;
790 	double t, pps, drop, err;
791 	int i;
792 
793 	rec = &stats_rec->kthread;
794 	prev = &stats_prev->kthread;
795 	t = calc_period(rec, prev);
796 
797 	pps = calc_pps(&rec->total, &prev->total, t);
798 	drop = calc_drop_pps(&rec->total, &prev->total, t);
799 	err = calc_errs_pps(&rec->total, &prev->total, t);
800 
801 	print_err(drop, "  %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf "\n",
802 		  pps ? "kthread total" : "kthread", PPS(pps), DROP(drop), err,
803 		  "sched");
804 
805 	for (i = 0; i < nr_cpus; i++) {
806 		struct datarec *r = &rec->cpu[i];
807 		struct datarec *p = &prev->cpu[i];
808 		char str[64];
809 
810 		pps = calc_pps(r, p, t);
811 		drop = calc_drop_pps(r, p, t);
812 		err = calc_errs_pps(r, p, t);
813 		if (!pps && !drop && !err)
814 			continue;
815 
816 		snprintf(str, sizeof(str), "cpu:%d", i);
817 		print_default("    %-18s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
818 			      "\n",
819 			      str, PPS(pps), DROP(drop), err, "sched");
820 	}
821 }
822 
823 static void stats_get_redirect_cnt(struct stats_record *stats_rec,
824 				   struct stats_record *stats_prev,
825 				   unsigned int nr_cpus,
826 				   struct sample_output *out)
827 {
828 	struct record *rec, *prev;
829 	double t, pps;
830 	int i;
831 
832 	rec = &stats_rec->redir_err[0];
833 	prev = &stats_prev->redir_err[0];
834 	t = calc_period(rec, prev);
835 	for (i = 0; i < nr_cpus; i++) {
836 		struct datarec *r = &rec->cpu[i];
837 		struct datarec *p = &prev->cpu[i];
838 		char str[64];
839 
840 		pps = calc_pps(r, p, t);
841 		if (!pps)
842 			continue;
843 
844 		snprintf(str, sizeof(str), "cpu:%d", i);
845 		print_default("    %-18s " FMT_COLUMNf "\n", str, REDIR(pps));
846 	}
847 
848 	if (out) {
849 		pps = calc_pps(&rec->total, &prev->total, t);
850 		out->redir_cnt.suc = pps;
851 		out->totals.redir += pps;
852 	}
853 }
854 
855 static void stats_get_redirect_err_cnt(struct stats_record *stats_rec,
856 				       struct stats_record *stats_prev,
857 				       unsigned int nr_cpus,
858 				       struct sample_output *out)
859 {
860 	struct record *rec, *prev;
861 	double t, drop, sum = 0;
862 	int rec_i, i;
863 
864 	for (rec_i = 1; rec_i < XDP_REDIRECT_ERR_MAX; rec_i++) {
865 		char str[64];
866 
867 		rec = &stats_rec->redir_err[rec_i];
868 		prev = &stats_prev->redir_err[rec_i];
869 		t = calc_period(rec, prev);
870 
871 		drop = calc_drop_pps(&rec->total, &prev->total, t);
872 		if (drop > 0 && !out) {
873 			snprintf(str, sizeof(str),
874 				 sample_log_level & LL_DEFAULT ? "%s total" :
875 								       "%s",
876 				 xdp_redirect_err_names[rec_i]);
877 			print_err(drop, "    %-18s " FMT_COLUMNf "\n", str,
878 				  ERR(drop));
879 		}
880 
881 		for (i = 0; i < nr_cpus; i++) {
882 			struct datarec *r = &rec->cpu[i];
883 			struct datarec *p = &prev->cpu[i];
884 			double drop;
885 
886 			drop = calc_drop_pps(r, p, t);
887 			if (!drop)
888 				continue;
889 
890 			snprintf(str, sizeof(str), "cpu:%d", i);
891 			print_default("       %-16s" FMT_COLUMNf "\n", str,
892 				      ERR(drop));
893 		}
894 
895 		sum += drop;
896 	}
897 
898 	if (out) {
899 		out->redir_cnt.err = sum;
900 		out->totals.err += sum;
901 	}
902 }
903 
904 static void stats_get_exception_cnt(struct stats_record *stats_rec,
905 				    struct stats_record *stats_prev,
906 				    unsigned int nr_cpus,
907 				    struct sample_output *out)
908 {
909 	double t, drop, sum = 0;
910 	struct record *rec, *prev;
911 	int rec_i, i;
912 
913 	for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
914 		rec = &stats_rec->exception[rec_i];
915 		prev = &stats_prev->exception[rec_i];
916 		t = calc_period(rec, prev);
917 
918 		drop = calc_drop_pps(&rec->total, &prev->total, t);
919 		/* Fold out errors after heading */
920 		sum += drop;
921 
922 		if (drop > 0 && !out) {
923 			print_always("    %-18s " FMT_COLUMNf "\n",
924 				     action2str(rec_i), ERR(drop));
925 
926 			for (i = 0; i < nr_cpus; i++) {
927 				struct datarec *r = &rec->cpu[i];
928 				struct datarec *p = &prev->cpu[i];
929 				char str[64];
930 				double drop;
931 
932 				drop = calc_drop_pps(r, p, t);
933 				if (!drop)
934 					continue;
935 
936 				snprintf(str, sizeof(str), "cpu:%d", i);
937 				print_default("       %-16s" FMT_COLUMNf "\n",
938 					      str, ERR(drop));
939 			}
940 		}
941 	}
942 
943 	if (out) {
944 		out->except_cnt.hits = sum;
945 		out->totals.err += sum;
946 	}
947 }
948 
949 static void stats_get_devmap_xmit(struct stats_record *stats_rec,
950 				  struct stats_record *stats_prev,
951 				  unsigned int nr_cpus,
952 				  struct sample_output *out)
953 {
954 	double pps, drop, info, err;
955 	struct record *rec, *prev;
956 	double t;
957 	int i;
958 
959 	rec = &stats_rec->devmap_xmit;
960 	prev = &stats_prev->devmap_xmit;
961 	t = calc_period(rec, prev);
962 	for (i = 0; i < nr_cpus; i++) {
963 		struct datarec *r = &rec->cpu[i];
964 		struct datarec *p = &prev->cpu[i];
965 		char str[64];
966 
967 		pps = calc_pps(r, p, t);
968 		drop = calc_drop_pps(r, p, t);
969 		err = calc_errs_pps(r, p, t);
970 
971 		if (!pps && !drop && !err)
972 			continue;
973 
974 		snprintf(str, sizeof(str), "cpu:%d", i);
975 		info = calc_info_pps(r, p, t);
976 		if (info > 0)
977 			info = (pps + drop) / info; /* calc avg bulk */
978 		print_default("     %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
979 				      __COLUMN(".2f") "\n",
980 			      str, XMIT(pps), DROP(drop), err, "drv_err/s",
981 			      info, "bulk-avg");
982 	}
983 	if (out) {
984 		pps = calc_pps(&rec->total, &prev->total, t);
985 		drop = calc_drop_pps(&rec->total, &prev->total, t);
986 		info = calc_info_pps(&rec->total, &prev->total, t);
987 		if (info > 0)
988 			info = (pps + drop) / info; /* calc avg bulk */
989 		err = calc_errs_pps(&rec->total, &prev->total, t);
990 
991 		out->xmit_cnt.pps = pps;
992 		out->xmit_cnt.drop = drop;
993 		out->xmit_cnt.bavg = info;
994 		out->xmit_cnt.err = err;
995 		out->totals.xmit += pps;
996 		out->totals.drop_xmit += drop;
997 		out->totals.err += err;
998 	}
999 }
1000 
1001 static void stats_get_devmap_xmit_multi(struct stats_record *stats_rec,
1002 					struct stats_record *stats_prev,
1003 					unsigned int nr_cpus,
1004 					struct sample_output *out,
1005 					bool xmit_total)
1006 {
1007 	double pps, drop, info, err;
1008 	struct map_entry *entry;
1009 	struct record *r, *p;
1010 	double t;
1011 	int bkt;
1012 
1013 	hash_for_each(stats_rec->xmit_map, bkt, entry, node) {
1014 		struct map_entry *e, *x = NULL;
1015 		char ifname_from[IFNAMSIZ];
1016 		char ifname_to[IFNAMSIZ];
1017 		const char *fstr, *tstr;
1018 		unsigned long prev_time;
1019 		struct record beg = {};
1020 		__u32 from_idx, to_idx;
1021 		char str[128];
1022 		__u64 pair;
1023 		int i;
1024 
1025 		prev_time = sample_interval * NANOSEC_PER_SEC;
1026 
1027 		pair = entry->pair;
1028 		from_idx = pair >> 32;
1029 		to_idx = pair & 0xFFFFFFFF;
1030 
1031 		r = &entry->val;
1032 		beg.timestamp = r->timestamp - prev_time;
1033 
1034 		/* Find matching entry from stats_prev map */
1035 		hash_for_each_possible(stats_prev->xmit_map, e, node, pair) {
1036 			if (e->pair == pair) {
1037 				x = e;
1038 				break;
1039 			}
1040 		}
1041 		if (x)
1042 			p = &x->val;
1043 		else
1044 			p = &beg;
1045 		t = calc_period(r, p);
1046 		pps = calc_pps(&r->total, &p->total, t);
1047 		drop = calc_drop_pps(&r->total, &p->total, t);
1048 		info = calc_info_pps(&r->total, &p->total, t);
1049 		if (info > 0)
1050 			info = (pps + drop) / info; /* calc avg bulk */
1051 		err = calc_errs_pps(&r->total, &p->total, t);
1052 
1053 		if (out) {
1054 			/* We are responsible for filling out totals */
1055 			out->totals.xmit += pps;
1056 			out->totals.drop_xmit += drop;
1057 			out->totals.err += err;
1058 			continue;
1059 		}
1060 
1061 		fstr = tstr = NULL;
1062 		if (if_indextoname(from_idx, ifname_from))
1063 			fstr = ifname_from;
1064 		if (if_indextoname(to_idx, ifname_to))
1065 			tstr = ifname_to;
1066 
1067 		snprintf(str, sizeof(str), "xmit %s->%s", fstr ?: "?",
1068 			 tstr ?: "?");
1069 		/* Skip idle streams of redirection */
1070 		if (pps || drop || err) {
1071 			print_err(drop,
1072 				  "  %-20s " FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
1073 				  __COLUMN(".2f") "\n", str, XMIT(pps), DROP(drop),
1074 				  err, "drv_err/s", info, "bulk-avg");
1075 		}
1076 
1077 		for (i = 0; i < nr_cpus; i++) {
1078 			struct datarec *rc = &r->cpu[i];
1079 			struct datarec *pc, p_beg = {};
1080 			char str[64];
1081 
1082 			pc = p == &beg ? &p_beg : &p->cpu[i];
1083 
1084 			pps = calc_pps(rc, pc, t);
1085 			drop = calc_drop_pps(rc, pc, t);
1086 			err = calc_errs_pps(rc, pc, t);
1087 
1088 			if (!pps && !drop && !err)
1089 				continue;
1090 
1091 			snprintf(str, sizeof(str), "cpu:%d", i);
1092 			info = calc_info_pps(rc, pc, t);
1093 			if (info > 0)
1094 				info = (pps + drop) / info; /* calc avg bulk */
1095 
1096 			print_default("     %-18s" FMT_COLUMNf FMT_COLUMNf FMT_COLUMNf
1097 				      __COLUMN(".2f") "\n", str, XMIT(pps),
1098 				      DROP(drop), err, "drv_err/s", info, "bulk-avg");
1099 		}
1100 	}
1101 }
1102 
1103 static void stats_print(const char *prefix, int mask, struct stats_record *r,
1104 			struct stats_record *p, struct sample_output *out)
1105 {
1106 	int nr_cpus = libbpf_num_possible_cpus();
1107 	const char *str;
1108 
1109 	print_always("%-23s", prefix ?: "Summary");
1110 	if (mask & SAMPLE_RX_CNT)
1111 		print_always(FMT_COLUMNl, RX(out->totals.rx));
1112 	if (mask & SAMPLE_REDIRECT_CNT)
1113 		print_always(FMT_COLUMNl, REDIR(out->totals.redir));
1114 	printf(FMT_COLUMNl,
1115 	       out->totals.err + out->totals.drop + out->totals.drop_xmit,
1116 	       "err,drop/s");
1117 	if (mask & SAMPLE_DEVMAP_XMIT_CNT ||
1118 	    mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
1119 		printf(FMT_COLUMNl, XMIT(out->totals.xmit));
1120 	printf("\n");
1121 
1122 	if (mask & SAMPLE_RX_CNT) {
1123 		str = (sample_log_level & LL_DEFAULT) && out->rx_cnt.pps ?
1124 				    "receive total" :
1125 				    "receive";
1126 		print_err((out->rx_cnt.err || out->rx_cnt.drop),
1127 			  "  %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl "\n",
1128 			  str, PPS(out->rx_cnt.pps), DROP(out->rx_cnt.drop),
1129 			  ERR(out->rx_cnt.err));
1130 
1131 		stats_get_rx_cnt(r, p, nr_cpus, NULL);
1132 	}
1133 
1134 	if (mask & SAMPLE_CPUMAP_ENQUEUE_CNT)
1135 		stats_get_cpumap_enqueue(r, p, nr_cpus);
1136 
1137 	if (mask & SAMPLE_CPUMAP_KTHREAD_CNT) {
1138 		stats_get_cpumap_kthread(r, p, nr_cpus);
1139 		stats_get_cpumap_remote(r, p, nr_cpus);
1140 	}
1141 
1142 	if (mask & SAMPLE_REDIRECT_CNT) {
1143 		str = out->redir_cnt.suc ? "redirect total" : "redirect";
1144 		print_default("  %-20s " FMT_COLUMNl "\n", str,
1145 			      REDIR(out->redir_cnt.suc));
1146 
1147 		stats_get_redirect_cnt(r, p, nr_cpus, NULL);
1148 	}
1149 
1150 	if (mask & SAMPLE_REDIRECT_ERR_CNT) {
1151 		str = (sample_log_level & LL_DEFAULT) && out->redir_cnt.err ?
1152 				    "redirect_err total" :
1153 				    "redirect_err";
1154 		print_err(out->redir_cnt.err, "  %-20s " FMT_COLUMNl "\n", str,
1155 			  ERR(out->redir_cnt.err));
1156 
1157 		stats_get_redirect_err_cnt(r, p, nr_cpus, NULL);
1158 	}
1159 
1160 	if (mask & SAMPLE_EXCEPTION_CNT) {
1161 		str = out->except_cnt.hits ? "xdp_exception total" :
1162 						   "xdp_exception";
1163 
1164 		print_err(out->except_cnt.hits, "  %-20s " FMT_COLUMNl "\n", str,
1165 			  HITS(out->except_cnt.hits));
1166 
1167 		stats_get_exception_cnt(r, p, nr_cpus, NULL);
1168 	}
1169 
1170 	if (mask & SAMPLE_DEVMAP_XMIT_CNT) {
1171 		str = (sample_log_level & LL_DEFAULT) && out->xmit_cnt.pps ?
1172 				    "devmap_xmit total" :
1173 				    "devmap_xmit";
1174 
1175 		print_err(out->xmit_cnt.err || out->xmit_cnt.drop,
1176 			  "  %-20s " FMT_COLUMNl FMT_COLUMNl FMT_COLUMNl
1177 				  __COLUMN(".2f") "\n",
1178 			  str, XMIT(out->xmit_cnt.pps),
1179 			  DROP(out->xmit_cnt.drop), out->xmit_cnt.err,
1180 			  "drv_err/s", out->xmit_cnt.bavg, "bulk-avg");
1181 
1182 		stats_get_devmap_xmit(r, p, nr_cpus, NULL);
1183 	}
1184 
1185 	if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
1186 		stats_get_devmap_xmit_multi(r, p, nr_cpus, NULL,
1187 					    mask & SAMPLE_DEVMAP_XMIT_CNT);
1188 
1189 	if (sample_log_level & LL_DEFAULT ||
1190 	    ((sample_log_level & LL_SIMPLE) && sample_err_exp)) {
1191 		sample_err_exp = false;
1192 		printf("\n");
1193 	}
1194 }
1195 
1196 int sample_setup_maps(struct bpf_map **maps)
1197 {
1198 	sample_n_cpus = libbpf_num_possible_cpus();
1199 
1200 	for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) {
1201 		sample_map[i] = maps[i];
1202 
1203 		switch (i) {
1204 		case MAP_RX:
1205 		case MAP_CPUMAP_KTHREAD:
1206 		case MAP_DEVMAP_XMIT:
1207 			sample_map_count[i] = sample_n_cpus;
1208 			break;
1209 		case MAP_REDIRECT_ERR:
1210 			sample_map_count[i] =
1211 				XDP_REDIRECT_ERR_MAX * sample_n_cpus;
1212 			break;
1213 		case MAP_EXCEPTION:
1214 			sample_map_count[i] = XDP_ACTION_MAX * sample_n_cpus;
1215 		case MAP_CPUMAP_ENQUEUE:
1216 			sample_map_count[i] = sample_n_cpus * sample_n_cpus;
1217 			break;
1218 		default:
1219 			return -EINVAL;
1220 		}
1221 		if (bpf_map__resize(sample_map[i], sample_map_count[i]) < 0)
1222 			return -errno;
1223 	}
1224 	sample_map[MAP_DEVMAP_XMIT_MULTI] = maps[MAP_DEVMAP_XMIT_MULTI];
1225 	return 0;
1226 }
1227 
1228 static int sample_setup_maps_mappings(void)
1229 {
1230 	for (int i = 0; i < MAP_DEVMAP_XMIT_MULTI; i++) {
1231 		size_t size = sample_map_count[i] * sizeof(struct datarec);
1232 
1233 		sample_mmap[i] = mmap(NULL, size, PROT_READ | PROT_WRITE,
1234 				      MAP_SHARED, bpf_map__fd(sample_map[i]), 0);
1235 		if (sample_mmap[i] == MAP_FAILED)
1236 			return -errno;
1237 	}
1238 	return 0;
1239 }
1240 
1241 int __sample_init(int mask)
1242 {
1243 	sigset_t st;
1244 
1245 	sigemptyset(&st);
1246 	sigaddset(&st, SIGQUIT);
1247 	sigaddset(&st, SIGINT);
1248 	sigaddset(&st, SIGTERM);
1249 
1250 	if (sigprocmask(SIG_BLOCK, &st, NULL) < 0)
1251 		return -errno;
1252 
1253 	sample_sig_fd = signalfd(-1, &st, SFD_CLOEXEC | SFD_NONBLOCK);
1254 	if (sample_sig_fd < 0)
1255 		return -errno;
1256 
1257 	sample_mask = mask;
1258 
1259 	return sample_setup_maps_mappings();
1260 }
1261 
1262 static int __sample_remove_xdp(int ifindex, __u32 prog_id, int xdp_flags)
1263 {
1264 	__u32 cur_prog_id = 0;
1265 	int ret;
1266 
1267 	if (prog_id) {
1268 		ret = bpf_get_link_xdp_id(ifindex, &cur_prog_id, xdp_flags);
1269 		if (ret < 0)
1270 			return -errno;
1271 
1272 		if (prog_id != cur_prog_id) {
1273 			print_always(
1274 				"Program on ifindex %d does not match installed "
1275 				"program, skipping unload\n",
1276 				ifindex);
1277 			return -ENOENT;
1278 		}
1279 	}
1280 
1281 	return bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
1282 }
1283 
1284 int sample_install_xdp(struct bpf_program *xdp_prog, int ifindex, bool generic,
1285 		       bool force)
1286 {
1287 	int ret, xdp_flags = 0;
1288 	__u32 prog_id = 0;
1289 
1290 	if (sample_xdp_cnt == 32) {
1291 		fprintf(stderr,
1292 			"Total limit for installed XDP programs in a sample reached\n");
1293 		return -ENOTSUP;
1294 	}
1295 
1296 	xdp_flags |= !force ? XDP_FLAGS_UPDATE_IF_NOEXIST : 0;
1297 	xdp_flags |= generic ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE;
1298 	ret = bpf_set_link_xdp_fd(ifindex, bpf_program__fd(xdp_prog),
1299 				  xdp_flags);
1300 	if (ret < 0) {
1301 		ret = -errno;
1302 		fprintf(stderr,
1303 			"Failed to install program \"%s\" on ifindex %d, mode = %s, "
1304 			"force = %s: %s\n",
1305 			bpf_program__name(xdp_prog), ifindex,
1306 			generic ? "skb" : "native", force ? "true" : "false",
1307 			strerror(-ret));
1308 		return ret;
1309 	}
1310 
1311 	ret = bpf_get_link_xdp_id(ifindex, &prog_id, xdp_flags);
1312 	if (ret < 0) {
1313 		ret = -errno;
1314 		fprintf(stderr,
1315 			"Failed to get XDP program id for ifindex %d, removing program: %s\n",
1316 			ifindex, strerror(errno));
1317 		__sample_remove_xdp(ifindex, 0, xdp_flags);
1318 		return ret;
1319 	}
1320 	sample_xdp_progs[sample_xdp_cnt++] =
1321 		(struct xdp_desc){ ifindex, prog_id, xdp_flags };
1322 
1323 	return 0;
1324 }
1325 
1326 static void sample_summary_print(void)
1327 {
1328 	double num = sample_out.rx_cnt.num;
1329 
1330 	if (sample_out.totals.rx) {
1331 		double pkts = sample_out.totals.rx;
1332 
1333 		print_always("  Packets received    : %'-10llu\n",
1334 			     sample_out.totals.rx);
1335 		print_always("  Average packets/s   : %'-10.0f\n",
1336 			     sample_round(pkts / num));
1337 	}
1338 	if (sample_out.totals.redir) {
1339 		double pkts = sample_out.totals.redir;
1340 
1341 		print_always("  Packets redirected  : %'-10llu\n",
1342 			     sample_out.totals.redir);
1343 		print_always("  Average redir/s     : %'-10.0f\n",
1344 			     sample_round(pkts / num));
1345 	}
1346 	if (sample_out.totals.drop)
1347 		print_always("  Rx dropped          : %'-10llu\n",
1348 			     sample_out.totals.drop);
1349 	if (sample_out.totals.drop_xmit)
1350 		print_always("  Tx dropped          : %'-10llu\n",
1351 			     sample_out.totals.drop_xmit);
1352 	if (sample_out.totals.err)
1353 		print_always("  Errors recorded     : %'-10llu\n",
1354 			     sample_out.totals.err);
1355 	if (sample_out.totals.xmit) {
1356 		double pkts = sample_out.totals.xmit;
1357 
1358 		print_always("  Packets transmitted : %'-10llu\n",
1359 			     sample_out.totals.xmit);
1360 		print_always("  Average transmit/s  : %'-10.0f\n",
1361 			     sample_round(pkts / num));
1362 	}
1363 }
1364 
1365 void sample_exit(int status)
1366 {
1367 	size_t size;
1368 
1369 	for (int i = 0; i < NUM_MAP; i++) {
1370 		size = sample_map_count[i] * sizeof(**sample_mmap);
1371 		munmap(sample_mmap[i], size);
1372 	}
1373 	while (sample_xdp_cnt--) {
1374 		int i = sample_xdp_cnt, ifindex, xdp_flags;
1375 		__u32 prog_id;
1376 
1377 		prog_id = sample_xdp_progs[i].prog_id;
1378 		ifindex = sample_xdp_progs[i].ifindex;
1379 		xdp_flags = sample_xdp_progs[i].flags;
1380 
1381 		__sample_remove_xdp(ifindex, prog_id, xdp_flags);
1382 	}
1383 	sample_summary_print();
1384 	close(sample_sig_fd);
1385 	exit(status);
1386 }
1387 
1388 static int sample_stats_collect(struct stats_record *rec)
1389 {
1390 	int i;
1391 
1392 	if (sample_mask & SAMPLE_RX_CNT)
1393 		map_collect_percpu(sample_mmap[MAP_RX], &rec->rx_cnt);
1394 
1395 	if (sample_mask & SAMPLE_REDIRECT_CNT)
1396 		map_collect_percpu(sample_mmap[MAP_REDIRECT_ERR], &rec->redir_err[0]);
1397 
1398 	if (sample_mask & SAMPLE_REDIRECT_ERR_CNT) {
1399 		for (i = 1; i < XDP_REDIRECT_ERR_MAX; i++)
1400 			map_collect_percpu(&sample_mmap[MAP_REDIRECT_ERR][i * sample_n_cpus],
1401 					   &rec->redir_err[i]);
1402 	}
1403 
1404 	if (sample_mask & SAMPLE_CPUMAP_ENQUEUE_CNT)
1405 		for (i = 0; i < sample_n_cpus; i++)
1406 			map_collect_percpu(&sample_mmap[MAP_CPUMAP_ENQUEUE][i * sample_n_cpus],
1407 					   &rec->enq[i]);
1408 
1409 	if (sample_mask & SAMPLE_CPUMAP_KTHREAD_CNT)
1410 		map_collect_percpu(sample_mmap[MAP_CPUMAP_KTHREAD],
1411 				   &rec->kthread);
1412 
1413 	if (sample_mask & SAMPLE_EXCEPTION_CNT)
1414 		for (i = 0; i < XDP_ACTION_MAX; i++)
1415 			map_collect_percpu(&sample_mmap[MAP_EXCEPTION][i * sample_n_cpus],
1416 					   &rec->exception[i]);
1417 
1418 	if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT)
1419 		map_collect_percpu(sample_mmap[MAP_DEVMAP_XMIT], &rec->devmap_xmit);
1420 
1421 	if (sample_mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI) {
1422 		if (map_collect_percpu_devmap(bpf_map__fd(sample_map[MAP_DEVMAP_XMIT_MULTI]), rec) < 0)
1423 			return -EINVAL;
1424 	}
1425 	return 0;
1426 }
1427 
1428 static void sample_summary_update(struct sample_output *out)
1429 {
1430 	sample_out.totals.rx += out->totals.rx;
1431 	sample_out.totals.redir += out->totals.redir;
1432 	sample_out.totals.drop += out->totals.drop;
1433 	sample_out.totals.drop_xmit += out->totals.drop_xmit;
1434 	sample_out.totals.err += out->totals.err;
1435 	sample_out.totals.xmit += out->totals.xmit;
1436 	sample_out.rx_cnt.num++;
1437 }
1438 
1439 static void sample_stats_print(int mask, struct stats_record *cur,
1440 			       struct stats_record *prev, char *prog_name)
1441 {
1442 	struct sample_output out = {};
1443 
1444 	if (mask & SAMPLE_RX_CNT)
1445 		stats_get_rx_cnt(cur, prev, 0, &out);
1446 	if (mask & SAMPLE_REDIRECT_CNT)
1447 		stats_get_redirect_cnt(cur, prev, 0, &out);
1448 	if (mask & SAMPLE_REDIRECT_ERR_CNT)
1449 		stats_get_redirect_err_cnt(cur, prev, 0, &out);
1450 	if (mask & SAMPLE_EXCEPTION_CNT)
1451 		stats_get_exception_cnt(cur, prev, 0, &out);
1452 	if (mask & SAMPLE_DEVMAP_XMIT_CNT)
1453 		stats_get_devmap_xmit(cur, prev, 0, &out);
1454 	else if (mask & SAMPLE_DEVMAP_XMIT_CNT_MULTI)
1455 		stats_get_devmap_xmit_multi(cur, prev, 0, &out,
1456 					    mask & SAMPLE_DEVMAP_XMIT_CNT);
1457 	sample_summary_update(&out);
1458 
1459 	stats_print(prog_name, mask, cur, prev, &out);
1460 }
1461 
1462 void sample_switch_mode(void)
1463 {
1464 	sample_log_level ^= LL_DEBUG - 1;
1465 }
1466 
1467 static int sample_signal_cb(void)
1468 {
1469 	struct signalfd_siginfo si;
1470 	int r;
1471 
1472 	r = read(sample_sig_fd, &si, sizeof(si));
1473 	if (r < 0)
1474 		return -errno;
1475 
1476 	switch (si.ssi_signo) {
1477 	case SIGQUIT:
1478 		sample_switch_mode();
1479 		printf("\n");
1480 		break;
1481 	default:
1482 		printf("\n");
1483 		return 1;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 /* Pointer swap trick */
1490 static void swap(struct stats_record **a, struct stats_record **b)
1491 {
1492 	struct stats_record *tmp;
1493 
1494 	tmp = *a;
1495 	*a = *b;
1496 	*b = tmp;
1497 }
1498 
1499 static int sample_timer_cb(int timerfd, struct stats_record **rec,
1500 			   struct stats_record **prev)
1501 {
1502 	char line[64] = "Summary";
1503 	int ret;
1504 	__u64 t;
1505 
1506 	ret = read(timerfd, &t, sizeof(t));
1507 	if (ret < 0)
1508 		return -errno;
1509 
1510 	swap(prev, rec);
1511 	ret = sample_stats_collect(*rec);
1512 	if (ret < 0)
1513 		return ret;
1514 
1515 	if (sample_xdp_cnt == 2 && !(sample_mask & SAMPLE_SKIP_HEADING)) {
1516 		char fi[IFNAMSIZ];
1517 		char to[IFNAMSIZ];
1518 		const char *f, *t;
1519 
1520 		f = t = NULL;
1521 		if (if_indextoname(sample_xdp_progs[0].ifindex, fi))
1522 			f = fi;
1523 		if (if_indextoname(sample_xdp_progs[1].ifindex, to))
1524 			t = to;
1525 
1526 		snprintf(line, sizeof(line), "%s->%s", f ?: "?", t ?: "?");
1527 	}
1528 
1529 	sample_stats_print(sample_mask, *rec, *prev, line);
1530 	return 0;
1531 }
1532 
1533 int sample_run(int interval, void (*post_cb)(void *), void *ctx)
1534 {
1535 	struct timespec ts = { interval, 0 };
1536 	struct itimerspec its = { ts, ts };
1537 	struct stats_record *rec, *prev;
1538 	struct pollfd pfd[2] = {};
1539 	int timerfd, ret;
1540 
1541 	if (!interval) {
1542 		fprintf(stderr, "Incorrect interval 0\n");
1543 		return -EINVAL;
1544 	}
1545 	sample_interval = interval;
1546 	/* Pretty print numbers */
1547 	setlocale(LC_NUMERIC, "en_US.UTF-8");
1548 
1549 	timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC | TFD_NONBLOCK);
1550 	if (timerfd < 0)
1551 		return -errno;
1552 	timerfd_settime(timerfd, 0, &its, NULL);
1553 
1554 	pfd[0].fd = sample_sig_fd;
1555 	pfd[0].events = POLLIN;
1556 
1557 	pfd[1].fd = timerfd;
1558 	pfd[1].events = POLLIN;
1559 
1560 	ret = -ENOMEM;
1561 	rec = alloc_stats_record();
1562 	if (!rec)
1563 		goto end;
1564 	prev = alloc_stats_record();
1565 	if (!prev)
1566 		goto end_rec;
1567 
1568 	ret = sample_stats_collect(rec);
1569 	if (ret < 0)
1570 		goto end_rec_prev;
1571 
1572 	for (;;) {
1573 		ret = poll(pfd, 2, -1);
1574 		if (ret < 0) {
1575 			if (errno == EINTR)
1576 				continue;
1577 			else
1578 				break;
1579 		}
1580 
1581 		if (pfd[0].revents & POLLIN)
1582 			ret = sample_signal_cb();
1583 		else if (pfd[1].revents & POLLIN)
1584 			ret = sample_timer_cb(timerfd, &rec, &prev);
1585 
1586 		if (ret)
1587 			break;
1588 
1589 		if (post_cb)
1590 			post_cb(ctx);
1591 	}
1592 
1593 end_rec_prev:
1594 	free_stats_record(prev);
1595 end_rec:
1596 	free_stats_record(rec);
1597 end:
1598 	close(timerfd);
1599 
1600 	return ret;
1601 }
1602 
1603 const char *get_driver_name(int ifindex)
1604 {
1605 	struct ethtool_drvinfo drv = {};
1606 	char ifname[IF_NAMESIZE];
1607 	static char drvname[32];
1608 	struct ifreq ifr = {};
1609 	int fd, r = 0;
1610 
1611 	fd = socket(AF_INET, SOCK_DGRAM, 0);
1612 	if (fd < 0)
1613 		return "[error]";
1614 
1615 	if (!if_indextoname(ifindex, ifname))
1616 		goto end;
1617 
1618 	drv.cmd = ETHTOOL_GDRVINFO;
1619 	safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
1620 	ifr.ifr_data = (void *)&drv;
1621 
1622 	r = ioctl(fd, SIOCETHTOOL, &ifr);
1623 	if (r)
1624 		goto end;
1625 
1626 	safe_strncpy(drvname, drv.driver, sizeof(drvname));
1627 
1628 	close(fd);
1629 	return drvname;
1630 
1631 end:
1632 	r = errno;
1633 	close(fd);
1634 	return r == EOPNOTSUPP ? "loopback" : "[error]";
1635 }
1636 
1637 int get_mac_addr(int ifindex, void *mac_addr)
1638 {
1639 	char ifname[IF_NAMESIZE];
1640 	struct ifreq ifr = {};
1641 	int fd, r;
1642 
1643 	fd = socket(AF_INET, SOCK_DGRAM, 0);
1644 	if (fd < 0)
1645 		return -errno;
1646 
1647 	if (!if_indextoname(ifindex, ifname)) {
1648 		r = -errno;
1649 		goto end;
1650 	}
1651 
1652 	safe_strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
1653 
1654 	r = ioctl(fd, SIOCGIFHWADDR, &ifr);
1655 	if (r) {
1656 		r = -errno;
1657 		goto end;
1658 	}
1659 
1660 	memcpy(mac_addr, ifr.ifr_hwaddr.sa_data, 6 * sizeof(char));
1661 
1662 end:
1663 	close(fd);
1664 	return r;
1665 }
1666 
1667 __attribute__((constructor)) static void sample_ctor(void)
1668 {
1669 	if (libbpf_set_strict_mode(LIBBPF_STRICT_ALL) < 0) {
1670 		fprintf(stderr, "Failed to set libbpf strict mode: %s\n",
1671 			strerror(errno));
1672 		/* Just exit, nothing to cleanup right now */
1673 		exit(EXIT_FAIL_BPF);
1674 	}
1675 }
1676