xref: /openbmc/linux/include/trace/events/xdp.h (revision 2022ca0a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM xdp
4 
5 #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define _TRACE_XDP_H
7 
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/tracepoint.h>
11 #include <linux/bpf.h>
12 
13 #define __XDP_ACT_MAP(FN)	\
14 	FN(ABORTED)		\
15 	FN(DROP)		\
16 	FN(PASS)		\
17 	FN(TX)			\
18 	FN(REDIRECT)
19 
20 #define __XDP_ACT_TP_FN(x)	\
21 	TRACE_DEFINE_ENUM(XDP_##x);
22 #define __XDP_ACT_SYM_FN(x)	\
23 	{ XDP_##x, #x },
24 #define __XDP_ACT_SYM_TAB	\
25 	__XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
26 __XDP_ACT_MAP(__XDP_ACT_TP_FN)
27 
28 TRACE_EVENT(xdp_exception,
29 
30 	TP_PROTO(const struct net_device *dev,
31 		 const struct bpf_prog *xdp, u32 act),
32 
33 	TP_ARGS(dev, xdp, act),
34 
35 	TP_STRUCT__entry(
36 		__field(int, prog_id)
37 		__field(u32, act)
38 		__field(int, ifindex)
39 	),
40 
41 	TP_fast_assign(
42 		__entry->prog_id	= xdp->aux->id;
43 		__entry->act		= act;
44 		__entry->ifindex	= dev->ifindex;
45 	),
46 
47 	TP_printk("prog_id=%d action=%s ifindex=%d",
48 		  __entry->prog_id,
49 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50 		  __entry->ifindex)
51 );
52 
53 TRACE_EVENT(xdp_bulk_tx,
54 
55 	TP_PROTO(const struct net_device *dev,
56 		 int sent, int drops, int err),
57 
58 	TP_ARGS(dev, sent, drops, err),
59 
60 	TP_STRUCT__entry(
61 		__field(int, ifindex)
62 		__field(u32, act)
63 		__field(int, drops)
64 		__field(int, sent)
65 		__field(int, err)
66 	),
67 
68 	TP_fast_assign(
69 		__entry->ifindex	= dev->ifindex;
70 		__entry->act		= XDP_TX;
71 		__entry->drops		= drops;
72 		__entry->sent		= sent;
73 		__entry->err		= err;
74 	),
75 
76 	TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77 		  __entry->ifindex,
78 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 		  __entry->sent, __entry->drops, __entry->err)
80 );
81 
82 DECLARE_EVENT_CLASS(xdp_redirect_template,
83 
84 	TP_PROTO(const struct net_device *dev,
85 		 const struct bpf_prog *xdp,
86 		 int to_ifindex, int err,
87 		 const struct bpf_map *map, u32 map_index),
88 
89 	TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
90 
91 	TP_STRUCT__entry(
92 		__field(int, prog_id)
93 		__field(u32, act)
94 		__field(int, ifindex)
95 		__field(int, err)
96 		__field(int, to_ifindex)
97 		__field(u32, map_id)
98 		__field(int, map_index)
99 	),
100 
101 	TP_fast_assign(
102 		__entry->prog_id	= xdp->aux->id;
103 		__entry->act		= XDP_REDIRECT;
104 		__entry->ifindex	= dev->ifindex;
105 		__entry->err		= err;
106 		__entry->to_ifindex	= to_ifindex;
107 		__entry->map_id		= map ? map->id : 0;
108 		__entry->map_index	= map_index;
109 	),
110 
111 	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
112 		  __entry->prog_id,
113 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
114 		  __entry->ifindex, __entry->to_ifindex,
115 		  __entry->err)
116 );
117 
118 DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
119 	TP_PROTO(const struct net_device *dev,
120 		 const struct bpf_prog *xdp,
121 		 int to_ifindex, int err,
122 		 const struct bpf_map *map, u32 map_index),
123 	TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
124 );
125 
126 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
127 	TP_PROTO(const struct net_device *dev,
128 		 const struct bpf_prog *xdp,
129 		 int to_ifindex, int err,
130 		 const struct bpf_map *map, u32 map_index),
131 	TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
132 );
133 
134 #define _trace_xdp_redirect(dev, xdp, to)		\
135 	 trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
136 
137 #define _trace_xdp_redirect_err(dev, xdp, to, err)	\
138 	 trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
139 
140 DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
141 	TP_PROTO(const struct net_device *dev,
142 		 const struct bpf_prog *xdp,
143 		 int to_ifindex, int err,
144 		 const struct bpf_map *map, u32 map_index),
145 	TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
146 	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
147 		  " map_id=%d map_index=%d",
148 		  __entry->prog_id,
149 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
150 		  __entry->ifindex, __entry->to_ifindex,
151 		  __entry->err,
152 		  __entry->map_id, __entry->map_index)
153 );
154 
155 DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
156 	TP_PROTO(const struct net_device *dev,
157 		 const struct bpf_prog *xdp,
158 		 int to_ifindex, int err,
159 		 const struct bpf_map *map, u32 map_index),
160 	TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
161 	TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
162 		  " map_id=%d map_index=%d",
163 		  __entry->prog_id,
164 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
165 		  __entry->ifindex, __entry->to_ifindex,
166 		  __entry->err,
167 		  __entry->map_id, __entry->map_index)
168 );
169 
170 #ifndef __DEVMAP_OBJ_TYPE
171 #define __DEVMAP_OBJ_TYPE
172 struct _bpf_dtab_netdev {
173 	struct net_device *dev;
174 };
175 #endif /* __DEVMAP_OBJ_TYPE */
176 
177 #define devmap_ifindex(fwd, map)				\
178 	((map->map_type == BPF_MAP_TYPE_DEVMAP ||		\
179 	  map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ?		\
180 	  ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
181 
182 #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx)		\
183 	 trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map),	\
184 				0, map, idx)
185 
186 #define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err)	\
187 	 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map),	\
188 				    err, map, idx)
189 
190 TRACE_EVENT(xdp_cpumap_kthread,
191 
192 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
193 		 int sched),
194 
195 	TP_ARGS(map_id, processed, drops, sched),
196 
197 	TP_STRUCT__entry(
198 		__field(int, map_id)
199 		__field(u32, act)
200 		__field(int, cpu)
201 		__field(unsigned int, drops)
202 		__field(unsigned int, processed)
203 		__field(int, sched)
204 	),
205 
206 	TP_fast_assign(
207 		__entry->map_id		= map_id;
208 		__entry->act		= XDP_REDIRECT;
209 		__entry->cpu		= smp_processor_id();
210 		__entry->drops		= drops;
211 		__entry->processed	= processed;
212 		__entry->sched	= sched;
213 	),
214 
215 	TP_printk("kthread"
216 		  " cpu=%d map_id=%d action=%s"
217 		  " processed=%u drops=%u"
218 		  " sched=%d",
219 		  __entry->cpu, __entry->map_id,
220 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
221 		  __entry->processed, __entry->drops,
222 		  __entry->sched)
223 );
224 
225 TRACE_EVENT(xdp_cpumap_enqueue,
226 
227 	TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
228 		 int to_cpu),
229 
230 	TP_ARGS(map_id, processed, drops, to_cpu),
231 
232 	TP_STRUCT__entry(
233 		__field(int, map_id)
234 		__field(u32, act)
235 		__field(int, cpu)
236 		__field(unsigned int, drops)
237 		__field(unsigned int, processed)
238 		__field(int, to_cpu)
239 	),
240 
241 	TP_fast_assign(
242 		__entry->map_id		= map_id;
243 		__entry->act		= XDP_REDIRECT;
244 		__entry->cpu		= smp_processor_id();
245 		__entry->drops		= drops;
246 		__entry->processed	= processed;
247 		__entry->to_cpu		= to_cpu;
248 	),
249 
250 	TP_printk("enqueue"
251 		  " cpu=%d map_id=%d action=%s"
252 		  " processed=%u drops=%u"
253 		  " to_cpu=%d",
254 		  __entry->cpu, __entry->map_id,
255 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
256 		  __entry->processed, __entry->drops,
257 		  __entry->to_cpu)
258 );
259 
260 TRACE_EVENT(xdp_devmap_xmit,
261 
262 	TP_PROTO(const struct bpf_map *map, u32 map_index,
263 		 int sent, int drops,
264 		 const struct net_device *from_dev,
265 		 const struct net_device *to_dev, int err),
266 
267 	TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
268 
269 	TP_STRUCT__entry(
270 		__field(int, map_id)
271 		__field(u32, act)
272 		__field(u32, map_index)
273 		__field(int, drops)
274 		__field(int, sent)
275 		__field(int, from_ifindex)
276 		__field(int, to_ifindex)
277 		__field(int, err)
278 	),
279 
280 	TP_fast_assign(
281 		__entry->map_id		= map->id;
282 		__entry->act		= XDP_REDIRECT;
283 		__entry->map_index	= map_index;
284 		__entry->drops		= drops;
285 		__entry->sent		= sent;
286 		__entry->from_ifindex	= from_dev->ifindex;
287 		__entry->to_ifindex	= to_dev->ifindex;
288 		__entry->err		= err;
289 	),
290 
291 	TP_printk("ndo_xdp_xmit"
292 		  " map_id=%d map_index=%d action=%s"
293 		  " sent=%d drops=%d"
294 		  " from_ifindex=%d to_ifindex=%d err=%d",
295 		  __entry->map_id, __entry->map_index,
296 		  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
297 		  __entry->sent, __entry->drops,
298 		  __entry->from_ifindex, __entry->to_ifindex, __entry->err)
299 );
300 
301 /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
302 #include <net/xdp_priv.h>
303 
304 #define __MEM_TYPE_MAP(FN)	\
305 	FN(PAGE_SHARED)		\
306 	FN(PAGE_ORDER0)		\
307 	FN(PAGE_POOL)		\
308 	FN(ZERO_COPY)
309 
310 #define __MEM_TYPE_TP_FN(x)	\
311 	TRACE_DEFINE_ENUM(MEM_TYPE_##x);
312 #define __MEM_TYPE_SYM_FN(x)	\
313 	{ MEM_TYPE_##x, #x },
314 #define __MEM_TYPE_SYM_TAB	\
315 	__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
316 __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
317 
318 TRACE_EVENT(mem_disconnect,
319 
320 	TP_PROTO(const struct xdp_mem_allocator *xa,
321 		 bool safe_to_remove, bool force),
322 
323 	TP_ARGS(xa, safe_to_remove, force),
324 
325 	TP_STRUCT__entry(
326 		__field(const struct xdp_mem_allocator *,	xa)
327 		__field(u32,		mem_id)
328 		__field(u32,		mem_type)
329 		__field(const void *,	allocator)
330 		__field(bool,		safe_to_remove)
331 		__field(bool,		force)
332 		__field(int,		disconnect_cnt)
333 	),
334 
335 	TP_fast_assign(
336 		__entry->xa		= xa;
337 		__entry->mem_id		= xa->mem.id;
338 		__entry->mem_type	= xa->mem.type;
339 		__entry->allocator	= xa->allocator;
340 		__entry->safe_to_remove	= safe_to_remove;
341 		__entry->force		= force;
342 		__entry->disconnect_cnt	= xa->disconnect_cnt;
343 	),
344 
345 	TP_printk("mem_id=%d mem_type=%s allocator=%p"
346 		  " safe_to_remove=%s force=%s disconnect_cnt=%d",
347 		  __entry->mem_id,
348 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
349 		  __entry->allocator,
350 		  __entry->safe_to_remove ? "true" : "false",
351 		  __entry->force ? "true" : "false",
352 		  __entry->disconnect_cnt
353 	)
354 );
355 
356 TRACE_EVENT(mem_connect,
357 
358 	TP_PROTO(const struct xdp_mem_allocator *xa,
359 		 const struct xdp_rxq_info *rxq),
360 
361 	TP_ARGS(xa, rxq),
362 
363 	TP_STRUCT__entry(
364 		__field(const struct xdp_mem_allocator *,	xa)
365 		__field(u32,		mem_id)
366 		__field(u32,		mem_type)
367 		__field(const void *,	allocator)
368 		__field(const struct xdp_rxq_info *,		rxq)
369 		__field(int,		ifindex)
370 	),
371 
372 	TP_fast_assign(
373 		__entry->xa		= xa;
374 		__entry->mem_id		= xa->mem.id;
375 		__entry->mem_type	= xa->mem.type;
376 		__entry->allocator	= xa->allocator;
377 		__entry->rxq		= rxq;
378 		__entry->ifindex	= rxq->dev->ifindex;
379 	),
380 
381 	TP_printk("mem_id=%d mem_type=%s allocator=%p"
382 		  " ifindex=%d",
383 		  __entry->mem_id,
384 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
385 		  __entry->allocator,
386 		  __entry->ifindex
387 	)
388 );
389 
390 TRACE_EVENT(mem_return_failed,
391 
392 	TP_PROTO(const struct xdp_mem_info *mem,
393 		 const struct page *page),
394 
395 	TP_ARGS(mem, page),
396 
397 	TP_STRUCT__entry(
398 		__field(const struct page *,	page)
399 		__field(u32,		mem_id)
400 		__field(u32,		mem_type)
401 	),
402 
403 	TP_fast_assign(
404 		__entry->page		= page;
405 		__entry->mem_id		= mem->id;
406 		__entry->mem_type	= mem->type;
407 	),
408 
409 	TP_printk("mem_id=%d mem_type=%s page=%p",
410 		  __entry->mem_id,
411 		  __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
412 		  __entry->page
413 	)
414 );
415 
416 #endif /* _TRACE_XDP_H */
417 
418 #include <trace/define_trace.h>
419