xref: /openbmc/linux/include/net/xdp.h (revision 2e88d4ff)
1ddc64d0aSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2aecd67b6SJesper Dangaard Brouer /* include/net/xdp.h
3aecd67b6SJesper Dangaard Brouer  *
4aecd67b6SJesper Dangaard Brouer  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5aecd67b6SJesper Dangaard Brouer  */
6aecd67b6SJesper Dangaard Brouer #ifndef __LINUX_NET_XDP_H__
7aecd67b6SJesper Dangaard Brouer #define __LINUX_NET_XDP_H__
8aecd67b6SJesper Dangaard Brouer 
9f95f0f95SJesper Dangaard Brouer #include <linux/skbuff.h> /* skb_shared_info */
10f95f0f95SJesper Dangaard Brouer 
11aecd67b6SJesper Dangaard Brouer /**
12aecd67b6SJesper Dangaard Brouer  * DOC: XDP RX-queue information
13aecd67b6SJesper Dangaard Brouer  *
14aecd67b6SJesper Dangaard Brouer  * The XDP RX-queue info (xdp_rxq_info) is associated with the driver
15aecd67b6SJesper Dangaard Brouer  * level RX-ring queues.  It is information that is specific to how
16aecd67b6SJesper Dangaard Brouer  * the driver have configured a given RX-ring queue.
17aecd67b6SJesper Dangaard Brouer  *
186bbc7103SKev Jackson  * Each xdp_buff frame received in the driver carries a (pointer)
19aecd67b6SJesper Dangaard Brouer  * reference to this xdp_rxq_info structure.  This provides the XDP
20aecd67b6SJesper Dangaard Brouer  * data-path read-access to RX-info for both kernel and bpf-side
21aecd67b6SJesper Dangaard Brouer  * (limited subset).
22aecd67b6SJesper Dangaard Brouer  *
23aecd67b6SJesper Dangaard Brouer  * For now, direct access is only safe while running in NAPI/softirq
246bbc7103SKev Jackson  * context.  Contents are read-mostly and must not be updated during
25aecd67b6SJesper Dangaard Brouer  * driver NAPI/softirq poll.
26aecd67b6SJesper Dangaard Brouer  *
27aecd67b6SJesper Dangaard Brouer  * The driver usage API is a register and unregister API.
28aecd67b6SJesper Dangaard Brouer  *
29aecd67b6SJesper Dangaard Brouer  * The struct is not directly tied to the XDP prog.  A new XDP prog
30aecd67b6SJesper Dangaard Brouer  * can be attached as long as it doesn't change the underlying
31aecd67b6SJesper Dangaard Brouer  * RX-ring.  If the RX-ring does change significantly, the NIC driver
32aecd67b6SJesper Dangaard Brouer  * naturally need to stop the RX-ring before purging and reallocating
336bbc7103SKev Jackson  * memory.  In that process the driver MUST call unregister (which
346bbc7103SKev Jackson  * also applies for driver shutdown and unload).  The register API is
35aecd67b6SJesper Dangaard Brouer  * also mandatory during RX-ring setup.
36aecd67b6SJesper Dangaard Brouer  */
37aecd67b6SJesper Dangaard Brouer 
385ab073ffSJesper Dangaard Brouer enum xdp_mem_type {
395ab073ffSJesper Dangaard Brouer 	MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */
405ab073ffSJesper Dangaard Brouer 	MEM_TYPE_PAGE_ORDER0,     /* Orig XDP full page model */
4157d0a1c1SJesper Dangaard Brouer 	MEM_TYPE_PAGE_POOL,
422b43470aSBjörn Töpel 	MEM_TYPE_XSK_BUFF_POOL,
435ab073ffSJesper Dangaard Brouer 	MEM_TYPE_MAX,
445ab073ffSJesper Dangaard Brouer };
455ab073ffSJesper Dangaard Brouer 
4642b33468SJesper Dangaard Brouer /* XDP flags for ndo_xdp_xmit */
4742b33468SJesper Dangaard Brouer #define XDP_XMIT_FLUSH		(1U << 0)	/* doorbell signal consumer */
4842b33468SJesper Dangaard Brouer #define XDP_XMIT_FLAGS_MASK	XDP_XMIT_FLUSH
4942b33468SJesper Dangaard Brouer 
505ab073ffSJesper Dangaard Brouer struct xdp_mem_info {
515ab073ffSJesper Dangaard Brouer 	u32 type; /* enum xdp_mem_type, but known size type */
528d5d8852SJesper Dangaard Brouer 	u32 id;
535ab073ffSJesper Dangaard Brouer };
545ab073ffSJesper Dangaard Brouer 
5557d0a1c1SJesper Dangaard Brouer struct page_pool;
5657d0a1c1SJesper Dangaard Brouer 
57aecd67b6SJesper Dangaard Brouer struct xdp_rxq_info {
58aecd67b6SJesper Dangaard Brouer 	struct net_device *dev;
59aecd67b6SJesper Dangaard Brouer 	u32 queue_index;
60aecd67b6SJesper Dangaard Brouer 	u32 reg_state;
615ab073ffSJesper Dangaard Brouer 	struct xdp_mem_info mem;
62b02e5a0eSBjörn Töpel 	unsigned int napi_id;
63aecd67b6SJesper Dangaard Brouer } ____cacheline_aligned; /* perf critical, avoid false-sharing */
64aecd67b6SJesper Dangaard Brouer 
6564b59025SDavid Ahern struct xdp_txq_info {
6664b59025SDavid Ahern 	struct net_device *dev;
6764b59025SDavid Ahern };
6864b59025SDavid Ahern 
69*2e88d4ffSLorenzo Bianconi enum xdp_buff_flags {
70*2e88d4ffSLorenzo Bianconi 	XDP_FLAGS_HAS_FRAGS	= BIT(0), /* non-linear xdp buff */
71*2e88d4ffSLorenzo Bianconi };
72*2e88d4ffSLorenzo Bianconi 
73106ca27fSJesper Dangaard Brouer struct xdp_buff {
74106ca27fSJesper Dangaard Brouer 	void *data;
75106ca27fSJesper Dangaard Brouer 	void *data_end;
76106ca27fSJesper Dangaard Brouer 	void *data_meta;
77106ca27fSJesper Dangaard Brouer 	void *data_hard_start;
78106ca27fSJesper Dangaard Brouer 	struct xdp_rxq_info *rxq;
7964b59025SDavid Ahern 	struct xdp_txq_info *txq;
80f95f0f95SJesper Dangaard Brouer 	u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/
81*2e88d4ffSLorenzo Bianconi 	u32 flags; /* supported values defined in xdp_buff_flags */
82106ca27fSJesper Dangaard Brouer };
835ab073ffSJesper Dangaard Brouer 
84*2e88d4ffSLorenzo Bianconi static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp)
85*2e88d4ffSLorenzo Bianconi {
86*2e88d4ffSLorenzo Bianconi 	return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS);
87*2e88d4ffSLorenzo Bianconi }
88*2e88d4ffSLorenzo Bianconi 
89*2e88d4ffSLorenzo Bianconi static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp)
90*2e88d4ffSLorenzo Bianconi {
91*2e88d4ffSLorenzo Bianconi 	xdp->flags |= XDP_FLAGS_HAS_FRAGS;
92*2e88d4ffSLorenzo Bianconi }
93*2e88d4ffSLorenzo Bianconi 
94*2e88d4ffSLorenzo Bianconi static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp)
95*2e88d4ffSLorenzo Bianconi {
96*2e88d4ffSLorenzo Bianconi 	xdp->flags &= ~XDP_FLAGS_HAS_FRAGS;
97*2e88d4ffSLorenzo Bianconi }
98*2e88d4ffSLorenzo Bianconi 
9943b5169dSLorenzo Bianconi static __always_inline void
10043b5169dSLorenzo Bianconi xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq)
10143b5169dSLorenzo Bianconi {
10243b5169dSLorenzo Bianconi 	xdp->frame_sz = frame_sz;
10343b5169dSLorenzo Bianconi 	xdp->rxq = rxq;
104*2e88d4ffSLorenzo Bianconi 	xdp->flags = 0;
10543b5169dSLorenzo Bianconi }
10643b5169dSLorenzo Bianconi 
107be9df4afSLorenzo Bianconi static __always_inline void
108be9df4afSLorenzo Bianconi xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start,
109be9df4afSLorenzo Bianconi 		 int headroom, int data_len, const bool meta_valid)
110be9df4afSLorenzo Bianconi {
111be9df4afSLorenzo Bianconi 	unsigned char *data = hard_start + headroom;
112be9df4afSLorenzo Bianconi 
113be9df4afSLorenzo Bianconi 	xdp->data_hard_start = hard_start;
114be9df4afSLorenzo Bianconi 	xdp->data = data;
115be9df4afSLorenzo Bianconi 	xdp->data_end = data + data_len;
116be9df4afSLorenzo Bianconi 	xdp->data_meta = meta_valid ? data : data + 1;
117be9df4afSLorenzo Bianconi }
118be9df4afSLorenzo Bianconi 
119f95f0f95SJesper Dangaard Brouer /* Reserve memory area at end-of data area.
120f95f0f95SJesper Dangaard Brouer  *
121f95f0f95SJesper Dangaard Brouer  * This macro reserves tailroom in the XDP buffer by limiting the
122f95f0f95SJesper Dangaard Brouer  * XDP/BPF data access to data_hard_end.  Notice same area (and size)
123f95f0f95SJesper Dangaard Brouer  * is used for XDP_PASS, when constructing the SKB via build_skb().
124f95f0f95SJesper Dangaard Brouer  */
125f95f0f95SJesper Dangaard Brouer #define xdp_data_hard_end(xdp)				\
126f95f0f95SJesper Dangaard Brouer 	((xdp)->data_hard_start + (xdp)->frame_sz -	\
127f95f0f95SJesper Dangaard Brouer 	 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
128f95f0f95SJesper Dangaard Brouer 
1292f0bc54bSLorenzo Bianconi static inline struct skb_shared_info *
1302f0bc54bSLorenzo Bianconi xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
1312f0bc54bSLorenzo Bianconi {
1322f0bc54bSLorenzo Bianconi 	return (struct skb_shared_info *)xdp_data_hard_end(xdp);
1332f0bc54bSLorenzo Bianconi }
1342f0bc54bSLorenzo Bianconi 
135c0048cffSJesper Dangaard Brouer struct xdp_frame {
136c0048cffSJesper Dangaard Brouer 	void *data;
137c0048cffSJesper Dangaard Brouer 	u16 len;
138c0048cffSJesper Dangaard Brouer 	u16 headroom;
13934cc0b33SJesper Dangaard Brouer 	u32 metasize:8;
14034cc0b33SJesper Dangaard Brouer 	u32 frame_sz:24;
141c0048cffSJesper Dangaard Brouer 	/* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time,
142c0048cffSJesper Dangaard Brouer 	 * while mem info is valid on remote CPU.
143c0048cffSJesper Dangaard Brouer 	 */
144c0048cffSJesper Dangaard Brouer 	struct xdp_mem_info mem;
14570280ed9SJesper Dangaard Brouer 	struct net_device *dev_rx; /* used by cpumap */
146*2e88d4ffSLorenzo Bianconi 	u32 flags; /* supported values defined in xdp_buff_flags */
147c0048cffSJesper Dangaard Brouer };
148c0048cffSJesper Dangaard Brouer 
149*2e88d4ffSLorenzo Bianconi static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame)
150*2e88d4ffSLorenzo Bianconi {
151*2e88d4ffSLorenzo Bianconi 	return !!(frame->flags & XDP_FLAGS_HAS_FRAGS);
152*2e88d4ffSLorenzo Bianconi }
153*2e88d4ffSLorenzo Bianconi 
15489653987SLorenzo Bianconi #define XDP_BULK_QUEUE_SIZE	16
15589653987SLorenzo Bianconi struct xdp_frame_bulk {
15689653987SLorenzo Bianconi 	int count;
15789653987SLorenzo Bianconi 	void *xa;
15889653987SLorenzo Bianconi 	void *q[XDP_BULK_QUEUE_SIZE];
15989653987SLorenzo Bianconi };
16089653987SLorenzo Bianconi 
16189653987SLorenzo Bianconi static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq)
16289653987SLorenzo Bianconi {
16389653987SLorenzo Bianconi 	/* bq->count will be zero'ed when bq->xa gets updated */
16489653987SLorenzo Bianconi 	bq->xa = NULL;
16589653987SLorenzo Bianconi }
166dee72f8aSDavid S. Miller 
1672f0bc54bSLorenzo Bianconi static inline struct skb_shared_info *
1682f0bc54bSLorenzo Bianconi xdp_get_shared_info_from_frame(struct xdp_frame *frame)
1692f0bc54bSLorenzo Bianconi {
1702f0bc54bSLorenzo Bianconi 	void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
1712f0bc54bSLorenzo Bianconi 
1722f0bc54bSLorenzo Bianconi 	return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
1732f0bc54bSLorenzo Bianconi 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1742f0bc54bSLorenzo Bianconi }
1752f0bc54bSLorenzo Bianconi 
17692164774SLorenzo Bianconi struct xdp_cpumap_stats {
17728b1520eSLorenzo Bianconi 	unsigned int redirect;
17892164774SLorenzo Bianconi 	unsigned int pass;
17992164774SLorenzo Bianconi 	unsigned int drop;
18092164774SLorenzo Bianconi };
18192164774SLorenzo Bianconi 
182a8d5b4abSToshiaki Makita /* Clear kernel pointers in xdp_frame */
183a8d5b4abSToshiaki Makita static inline void xdp_scrub_frame(struct xdp_frame *frame)
184a8d5b4abSToshiaki Makita {
185a8d5b4abSToshiaki Makita 	frame->data = NULL;
186a8d5b4abSToshiaki Makita 	frame->dev_rx = NULL;
187a8d5b4abSToshiaki Makita }
188a8d5b4abSToshiaki Makita 
18934cc0b33SJesper Dangaard Brouer /* Avoids inlining WARN macro in fast-path */
19034cc0b33SJesper Dangaard Brouer void xdp_warn(const char *msg, const char *func, const int line);
19134cc0b33SJesper Dangaard Brouer #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__)
19234cc0b33SJesper Dangaard Brouer 
193b0d1beefSBjörn Töpel struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp);
19497a0e1eaSLorenzo Bianconi struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
19597a0e1eaSLorenzo Bianconi 					   struct sk_buff *skb,
19697a0e1eaSLorenzo Bianconi 					   struct net_device *dev);
19789f479f0SLorenzo Bianconi struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
19889f479f0SLorenzo Bianconi 					 struct net_device *dev);
19965e6dcf7SLorenzo Bianconi int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp);
200e624d4edSHangbin Liu struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf);
201b0d1beefSBjörn Töpel 
202fc379872SLorenzo Bianconi static inline
203fc379872SLorenzo Bianconi void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
204fc379872SLorenzo Bianconi {
205fc379872SLorenzo Bianconi 	xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame);
206fc379872SLorenzo Bianconi 	xdp->data = frame->data;
207fc379872SLorenzo Bianconi 	xdp->data_end = frame->data + frame->len;
208fc379872SLorenzo Bianconi 	xdp->data_meta = frame->data - frame->metasize;
209fc379872SLorenzo Bianconi 	xdp->frame_sz = frame->frame_sz;
210*2e88d4ffSLorenzo Bianconi 	xdp->flags = frame->flags;
211fc379872SLorenzo Bianconi }
212fc379872SLorenzo Bianconi 
213c0048cffSJesper Dangaard Brouer static inline
214daa5cdc3SDavid Ahern int xdp_update_frame_from_buff(struct xdp_buff *xdp,
215daa5cdc3SDavid Ahern 			       struct xdp_frame *xdp_frame)
216c0048cffSJesper Dangaard Brouer {
217daa5cdc3SDavid Ahern 	int metasize, headroom;
21802b55e56SBjörn Töpel 
219c0048cffSJesper Dangaard Brouer 	/* Assure headroom is available for storing info */
220c0048cffSJesper Dangaard Brouer 	headroom = xdp->data - xdp->data_hard_start;
221c0048cffSJesper Dangaard Brouer 	metasize = xdp->data - xdp->data_meta;
222c0048cffSJesper Dangaard Brouer 	metasize = metasize > 0 ? metasize : 0;
223c0048cffSJesper Dangaard Brouer 	if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
224daa5cdc3SDavid Ahern 		return -ENOSPC;
225c0048cffSJesper Dangaard Brouer 
22634cc0b33SJesper Dangaard Brouer 	/* Catch if driver didn't reserve tailroom for skb_shared_info */
22734cc0b33SJesper Dangaard Brouer 	if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
22834cc0b33SJesper Dangaard Brouer 		XDP_WARN("Driver BUG: missing reserved tailroom");
229daa5cdc3SDavid Ahern 		return -ENOSPC;
23034cc0b33SJesper Dangaard Brouer 	}
23134cc0b33SJesper Dangaard Brouer 
232c0048cffSJesper Dangaard Brouer 	xdp_frame->data = xdp->data;
233c0048cffSJesper Dangaard Brouer 	xdp_frame->len  = xdp->data_end - xdp->data;
234c0048cffSJesper Dangaard Brouer 	xdp_frame->headroom = headroom - sizeof(*xdp_frame);
235c0048cffSJesper Dangaard Brouer 	xdp_frame->metasize = metasize;
23634cc0b33SJesper Dangaard Brouer 	xdp_frame->frame_sz = xdp->frame_sz;
237*2e88d4ffSLorenzo Bianconi 	xdp_frame->flags = xdp->flags;
238c0048cffSJesper Dangaard Brouer 
239daa5cdc3SDavid Ahern 	return 0;
240daa5cdc3SDavid Ahern }
241daa5cdc3SDavid Ahern 
242daa5cdc3SDavid Ahern /* Convert xdp_buff to xdp_frame */
243daa5cdc3SDavid Ahern static inline
244daa5cdc3SDavid Ahern struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
245daa5cdc3SDavid Ahern {
246daa5cdc3SDavid Ahern 	struct xdp_frame *xdp_frame;
247daa5cdc3SDavid Ahern 
248daa5cdc3SDavid Ahern 	if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
249daa5cdc3SDavid Ahern 		return xdp_convert_zc_to_xdp_frame(xdp);
250daa5cdc3SDavid Ahern 
251daa5cdc3SDavid Ahern 	/* Store info in top of packet */
252daa5cdc3SDavid Ahern 	xdp_frame = xdp->data_hard_start;
253daa5cdc3SDavid Ahern 	if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
254daa5cdc3SDavid Ahern 		return NULL;
255daa5cdc3SDavid Ahern 
256c0048cffSJesper Dangaard Brouer 	/* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
257c0048cffSJesper Dangaard Brouer 	xdp_frame->mem = xdp->rxq->mem;
258c0048cffSJesper Dangaard Brouer 
259c0048cffSJesper Dangaard Brouer 	return xdp_frame;
260c0048cffSJesper Dangaard Brouer }
261c0048cffSJesper Dangaard Brouer 
26203993094SJesper Dangaard Brouer void xdp_return_frame(struct xdp_frame *xdpf);
263389ab7f0SJesper Dangaard Brouer void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
264c497176cSBjörn Töpel void xdp_return_buff(struct xdp_buff *xdp);
26589653987SLorenzo Bianconi void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
26689653987SLorenzo Bianconi void xdp_return_frame_bulk(struct xdp_frame *xdpf,
26789653987SLorenzo Bianconi 			   struct xdp_frame_bulk *bq);
2685ab073ffSJesper Dangaard Brouer 
2696bf071bfSJesper Dangaard Brouer /* When sending xdp_frame into the network stack, then there is no
2706bf071bfSJesper Dangaard Brouer  * return point callback, which is needed to release e.g. DMA-mapping
2716bf071bfSJesper Dangaard Brouer  * resources with page_pool.  Thus, have explicit function to release
2726bf071bfSJesper Dangaard Brouer  * frame resources.
2736bf071bfSJesper Dangaard Brouer  */
2746bf071bfSJesper Dangaard Brouer void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
2756bf071bfSJesper Dangaard Brouer static inline void xdp_release_frame(struct xdp_frame *xdpf)
2766bf071bfSJesper Dangaard Brouer {
2776bf071bfSJesper Dangaard Brouer 	struct xdp_mem_info *mem = &xdpf->mem;
2786bf071bfSJesper Dangaard Brouer 
2796bf071bfSJesper Dangaard Brouer 	/* Curr only page_pool needs this */
2806bf071bfSJesper Dangaard Brouer 	if (mem->type == MEM_TYPE_PAGE_POOL)
2816bf071bfSJesper Dangaard Brouer 		__xdp_release_frame(xdpf->data, mem);
2826bf071bfSJesper Dangaard Brouer }
2836bf071bfSJesper Dangaard Brouer 
284aecd67b6SJesper Dangaard Brouer int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
285b02e5a0eSBjörn Töpel 		     struct net_device *dev, u32 queue_index, unsigned int napi_id);
286aecd67b6SJesper Dangaard Brouer void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq);
287aecd67b6SJesper Dangaard Brouer void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq);
288c0124f32SJesper Dangaard Brouer bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq);
2895ab073ffSJesper Dangaard Brouer int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
2905ab073ffSJesper Dangaard Brouer 			       enum xdp_mem_type type, void *allocator);
291dce5bd61SBjörn Töpel void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq);
2924a48ef70SToke Høiland-Jørgensen int xdp_reg_mem_model(struct xdp_mem_info *mem,
2934a48ef70SToke Høiland-Jørgensen 		      enum xdp_mem_type type, void *allocator);
2944a48ef70SToke Høiland-Jørgensen void xdp_unreg_mem_model(struct xdp_mem_info *mem);
295aecd67b6SJesper Dangaard Brouer 
296106ca27fSJesper Dangaard Brouer /* Drivers not supporting XDP metadata can use this helper, which
297106ca27fSJesper Dangaard Brouer  * rejects any room expansion for metadata as a result.
298106ca27fSJesper Dangaard Brouer  */
299106ca27fSJesper Dangaard Brouer static __always_inline void
300106ca27fSJesper Dangaard Brouer xdp_set_data_meta_invalid(struct xdp_buff *xdp)
301106ca27fSJesper Dangaard Brouer {
302106ca27fSJesper Dangaard Brouer 	xdp->data_meta = xdp->data + 1;
303106ca27fSJesper Dangaard Brouer }
304106ca27fSJesper Dangaard Brouer 
305106ca27fSJesper Dangaard Brouer static __always_inline bool
306106ca27fSJesper Dangaard Brouer xdp_data_meta_unsupported(const struct xdp_buff *xdp)
307106ca27fSJesper Dangaard Brouer {
308106ca27fSJesper Dangaard Brouer 	return unlikely(xdp->data_meta > xdp->data);
309106ca27fSJesper Dangaard Brouer }
310106ca27fSJesper Dangaard Brouer 
3117445cf31SZvi Effron static inline bool xdp_metalen_invalid(unsigned long metalen)
3127445cf31SZvi Effron {
3137445cf31SZvi Effron 	return (metalen & (sizeof(__u32) - 1)) || (metalen > 32);
3147445cf31SZvi Effron }
3157445cf31SZvi Effron 
31605296620SJakub Kicinski struct xdp_attachment_info {
31705296620SJakub Kicinski 	struct bpf_prog *prog;
31805296620SJakub Kicinski 	u32 flags;
31905296620SJakub Kicinski };
32005296620SJakub Kicinski 
32105296620SJakub Kicinski struct netdev_bpf;
32205296620SJakub Kicinski void xdp_attachment_setup(struct xdp_attachment_info *info,
32305296620SJakub Kicinski 			  struct netdev_bpf *bpf);
32405296620SJakub Kicinski 
32589653987SLorenzo Bianconi #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE
326788f87acSIoana Ciornei 
327aecd67b6SJesper Dangaard Brouer #endif /* __LINUX_NET_XDP_H__ */
328