1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* include/net/xdp.h 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6 #ifndef __LINUX_NET_XDP_H__ 7 #define __LINUX_NET_XDP_H__ 8 9 #include <linux/skbuff.h> /* skb_shared_info */ 10 11 /** 12 * DOC: XDP RX-queue information 13 * 14 * The XDP RX-queue info (xdp_rxq_info) is associated with the driver 15 * level RX-ring queues. It is information that is specific to how 16 * the driver have configured a given RX-ring queue. 17 * 18 * Each xdp_buff frame received in the driver carries a (pointer) 19 * reference to this xdp_rxq_info structure. This provides the XDP 20 * data-path read-access to RX-info for both kernel and bpf-side 21 * (limited subset). 22 * 23 * For now, direct access is only safe while running in NAPI/softirq 24 * context. Contents are read-mostly and must not be updated during 25 * driver NAPI/softirq poll. 26 * 27 * The driver usage API is a register and unregister API. 28 * 29 * The struct is not directly tied to the XDP prog. A new XDP prog 30 * can be attached as long as it doesn't change the underlying 31 * RX-ring. If the RX-ring does change significantly, the NIC driver 32 * naturally need to stop the RX-ring before purging and reallocating 33 * memory. In that process the driver MUST call unregister (which 34 * also applies for driver shutdown and unload). The register API is 35 * also mandatory during RX-ring setup. 36 */ 37 38 enum xdp_mem_type { 39 MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ 40 MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ 41 MEM_TYPE_PAGE_POOL, 42 MEM_TYPE_XSK_BUFF_POOL, 43 MEM_TYPE_MAX, 44 }; 45 46 /* XDP flags for ndo_xdp_xmit */ 47 #define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ 48 #define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH 49 50 struct xdp_mem_info { 51 u32 type; /* enum xdp_mem_type, but known size type */ 52 u32 id; 53 }; 54 55 struct page_pool; 56 57 struct xdp_rxq_info { 58 struct net_device *dev; 59 u32 queue_index; 60 u32 reg_state; 61 struct xdp_mem_info mem; 62 unsigned int napi_id; 63 } ____cacheline_aligned; /* perf critical, avoid false-sharing */ 64 65 struct xdp_txq_info { 66 struct net_device *dev; 67 }; 68 69 enum xdp_buff_flags { 70 XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ 71 XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under 72 * pressure 73 */ 74 }; 75 76 struct xdp_buff { 77 void *data; 78 void *data_end; 79 void *data_meta; 80 void *data_hard_start; 81 struct xdp_rxq_info *rxq; 82 struct xdp_txq_info *txq; 83 u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ 84 u32 flags; /* supported values defined in xdp_buff_flags */ 85 }; 86 87 static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) 88 { 89 return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); 90 } 91 92 static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) 93 { 94 xdp->flags |= XDP_FLAGS_HAS_FRAGS; 95 } 96 97 static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) 98 { 99 xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; 100 } 101 102 static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) 103 { 104 return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); 105 } 106 107 static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) 108 { 109 xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; 110 } 111 112 static __always_inline void 113 xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) 114 { 115 xdp->frame_sz = frame_sz; 116 xdp->rxq = rxq; 117 xdp->flags = 0; 118 } 119 120 static __always_inline void 121 xdp_prepare_buff(struct xdp_buff *xdp, unsigned char *hard_start, 122 int headroom, int data_len, const bool meta_valid) 123 { 124 unsigned char *data = hard_start + headroom; 125 126 xdp->data_hard_start = hard_start; 127 xdp->data = data; 128 xdp->data_end = data + data_len; 129 xdp->data_meta = meta_valid ? data : data + 1; 130 } 131 132 /* Reserve memory area at end-of data area. 133 * 134 * This macro reserves tailroom in the XDP buffer by limiting the 135 * XDP/BPF data access to data_hard_end. Notice same area (and size) 136 * is used for XDP_PASS, when constructing the SKB via build_skb(). 137 */ 138 #define xdp_data_hard_end(xdp) \ 139 ((xdp)->data_hard_start + (xdp)->frame_sz - \ 140 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 141 142 static inline struct skb_shared_info * 143 xdp_get_shared_info_from_buff(struct xdp_buff *xdp) 144 { 145 return (struct skb_shared_info *)xdp_data_hard_end(xdp); 146 } 147 148 struct xdp_frame { 149 void *data; 150 u16 len; 151 u16 headroom; 152 u32 metasize:8; 153 u32 frame_sz:24; 154 /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, 155 * while mem info is valid on remote CPU. 156 */ 157 struct xdp_mem_info mem; 158 struct net_device *dev_rx; /* used by cpumap */ 159 u32 flags; /* supported values defined in xdp_buff_flags */ 160 }; 161 162 static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) 163 { 164 return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); 165 } 166 167 static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame) 168 { 169 return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); 170 } 171 172 #define XDP_BULK_QUEUE_SIZE 16 173 struct xdp_frame_bulk { 174 int count; 175 void *xa; 176 void *q[XDP_BULK_QUEUE_SIZE]; 177 }; 178 179 static __always_inline void xdp_frame_bulk_init(struct xdp_frame_bulk *bq) 180 { 181 /* bq->count will be zero'ed when bq->xa gets updated */ 182 bq->xa = NULL; 183 } 184 185 static inline struct skb_shared_info * 186 xdp_get_shared_info_from_frame(struct xdp_frame *frame) 187 { 188 void *data_hard_start = frame->data - frame->headroom - sizeof(*frame); 189 190 return (struct skb_shared_info *)(data_hard_start + frame->frame_sz - 191 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); 192 } 193 194 struct xdp_cpumap_stats { 195 unsigned int redirect; 196 unsigned int pass; 197 unsigned int drop; 198 }; 199 200 /* Clear kernel pointers in xdp_frame */ 201 static inline void xdp_scrub_frame(struct xdp_frame *frame) 202 { 203 frame->data = NULL; 204 frame->dev_rx = NULL; 205 } 206 207 static inline void 208 xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, 209 unsigned int size, unsigned int truesize, 210 bool pfmemalloc) 211 { 212 skb_shinfo(skb)->nr_frags = nr_frags; 213 214 skb->len += size; 215 skb->data_len += size; 216 skb->truesize += truesize; 217 skb->pfmemalloc |= pfmemalloc; 218 } 219 220 /* Avoids inlining WARN macro in fast-path */ 221 void xdp_warn(const char *msg, const char *func, const int line); 222 #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) 223 224 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); 225 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, 226 struct sk_buff *skb, 227 struct net_device *dev); 228 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, 229 struct net_device *dev); 230 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp); 231 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf); 232 233 static inline 234 void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) 235 { 236 xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame); 237 xdp->data = frame->data; 238 xdp->data_end = frame->data + frame->len; 239 xdp->data_meta = frame->data - frame->metasize; 240 xdp->frame_sz = frame->frame_sz; 241 xdp->flags = frame->flags; 242 } 243 244 static inline 245 int xdp_update_frame_from_buff(struct xdp_buff *xdp, 246 struct xdp_frame *xdp_frame) 247 { 248 int metasize, headroom; 249 250 /* Assure headroom is available for storing info */ 251 headroom = xdp->data - xdp->data_hard_start; 252 metasize = xdp->data - xdp->data_meta; 253 metasize = metasize > 0 ? metasize : 0; 254 if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) 255 return -ENOSPC; 256 257 /* Catch if driver didn't reserve tailroom for skb_shared_info */ 258 if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { 259 XDP_WARN("Driver BUG: missing reserved tailroom"); 260 return -ENOSPC; 261 } 262 263 xdp_frame->data = xdp->data; 264 xdp_frame->len = xdp->data_end - xdp->data; 265 xdp_frame->headroom = headroom - sizeof(*xdp_frame); 266 xdp_frame->metasize = metasize; 267 xdp_frame->frame_sz = xdp->frame_sz; 268 xdp_frame->flags = xdp->flags; 269 270 return 0; 271 } 272 273 /* Convert xdp_buff to xdp_frame */ 274 static inline 275 struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) 276 { 277 struct xdp_frame *xdp_frame; 278 279 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) 280 return xdp_convert_zc_to_xdp_frame(xdp); 281 282 /* Store info in top of packet */ 283 xdp_frame = xdp->data_hard_start; 284 if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0)) 285 return NULL; 286 287 /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ 288 xdp_frame->mem = xdp->rxq->mem; 289 290 return xdp_frame; 291 } 292 293 void xdp_return_frame(struct xdp_frame *xdpf); 294 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); 295 void xdp_return_buff(struct xdp_buff *xdp); 296 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq); 297 void xdp_return_frame_bulk(struct xdp_frame *xdpf, 298 struct xdp_frame_bulk *bq); 299 300 /* When sending xdp_frame into the network stack, then there is no 301 * return point callback, which is needed to release e.g. DMA-mapping 302 * resources with page_pool. Thus, have explicit function to release 303 * frame resources. 304 */ 305 void __xdp_release_frame(void *data, struct xdp_mem_info *mem); 306 static inline void xdp_release_frame(struct xdp_frame *xdpf) 307 { 308 struct xdp_mem_info *mem = &xdpf->mem; 309 struct skb_shared_info *sinfo; 310 int i; 311 312 /* Curr only page_pool needs this */ 313 if (mem->type != MEM_TYPE_PAGE_POOL) 314 return; 315 316 if (likely(!xdp_frame_has_frags(xdpf))) 317 goto out; 318 319 sinfo = xdp_get_shared_info_from_frame(xdpf); 320 for (i = 0; i < sinfo->nr_frags; i++) { 321 struct page *page = skb_frag_page(&sinfo->frags[i]); 322 323 __xdp_release_frame(page_address(page), mem); 324 } 325 out: 326 __xdp_release_frame(xdpf->data, mem); 327 } 328 329 int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 330 struct net_device *dev, u32 queue_index, unsigned int napi_id); 331 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); 332 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); 333 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); 334 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 335 enum xdp_mem_type type, void *allocator); 336 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); 337 int xdp_reg_mem_model(struct xdp_mem_info *mem, 338 enum xdp_mem_type type, void *allocator); 339 void xdp_unreg_mem_model(struct xdp_mem_info *mem); 340 341 /* Drivers not supporting XDP metadata can use this helper, which 342 * rejects any room expansion for metadata as a result. 343 */ 344 static __always_inline void 345 xdp_set_data_meta_invalid(struct xdp_buff *xdp) 346 { 347 xdp->data_meta = xdp->data + 1; 348 } 349 350 static __always_inline bool 351 xdp_data_meta_unsupported(const struct xdp_buff *xdp) 352 { 353 return unlikely(xdp->data_meta > xdp->data); 354 } 355 356 static inline bool xdp_metalen_invalid(unsigned long metalen) 357 { 358 return (metalen & (sizeof(__u32) - 1)) || (metalen > 32); 359 } 360 361 struct xdp_attachment_info { 362 struct bpf_prog *prog; 363 u32 flags; 364 }; 365 366 struct netdev_bpf; 367 void xdp_attachment_setup(struct xdp_attachment_info *info, 368 struct netdev_bpf *bpf); 369 370 #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE 371 372 #endif /* __LINUX_NET_XDP_H__ */ 373