1 // SPDX-License-Identifier: GPL-2.0-only 2 /* net/core/xdp.c 3 * 4 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. 5 */ 6 #include <linux/bpf.h> 7 #include <linux/btf.h> 8 #include <linux/btf_ids.h> 9 #include <linux/filter.h> 10 #include <linux/types.h> 11 #include <linux/mm.h> 12 #include <linux/netdevice.h> 13 #include <linux/slab.h> 14 #include <linux/idr.h> 15 #include <linux/rhashtable.h> 16 #include <linux/bug.h> 17 #include <net/page_pool/helpers.h> 18 19 #include <net/xdp.h> 20 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */ 21 #include <trace/events/xdp.h> 22 #include <net/xdp_sock_drv.h> 23 24 #define REG_STATE_NEW 0x0 25 #define REG_STATE_REGISTERED 0x1 26 #define REG_STATE_UNREGISTERED 0x2 27 #define REG_STATE_UNUSED 0x3 28 29 static DEFINE_IDA(mem_id_pool); 30 static DEFINE_MUTEX(mem_id_lock); 31 #define MEM_ID_MAX 0xFFFE 32 #define MEM_ID_MIN 1 33 static int mem_id_next = MEM_ID_MIN; 34 35 static bool mem_id_init; /* false */ 36 static struct rhashtable *mem_id_ht; 37 38 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) 39 { 40 const u32 *k = data; 41 const u32 key = *k; 42 43 BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) 44 != sizeof(u32)); 45 46 /* Use cyclic increasing ID as direct hash key */ 47 return key; 48 } 49 50 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg, 51 const void *ptr) 52 { 53 const struct xdp_mem_allocator *xa = ptr; 54 u32 mem_id = *(u32 *)arg->key; 55 56 return xa->mem.id != mem_id; 57 } 58 59 static const struct rhashtable_params mem_id_rht_params = { 60 .nelem_hint = 64, 61 .head_offset = offsetof(struct xdp_mem_allocator, node), 62 .key_offset = offsetof(struct xdp_mem_allocator, mem.id), 63 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), 64 .max_size = MEM_ID_MAX, 65 .min_size = 8, 66 .automatic_shrinking = true, 67 .hashfn = xdp_mem_id_hashfn, 68 .obj_cmpfn = xdp_mem_id_cmp, 69 }; 70 71 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu) 72 { 73 struct xdp_mem_allocator *xa; 74 75 xa = container_of(rcu, struct xdp_mem_allocator, rcu); 76 77 /* Allow this ID to be reused */ 78 ida_simple_remove(&mem_id_pool, xa->mem.id); 79 80 kfree(xa); 81 } 82 83 static void mem_xa_remove(struct xdp_mem_allocator *xa) 84 { 85 trace_mem_disconnect(xa); 86 87 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) 88 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); 89 } 90 91 static void mem_allocator_disconnect(void *allocator) 92 { 93 struct xdp_mem_allocator *xa; 94 struct rhashtable_iter iter; 95 96 mutex_lock(&mem_id_lock); 97 98 rhashtable_walk_enter(mem_id_ht, &iter); 99 do { 100 rhashtable_walk_start(&iter); 101 102 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { 103 if (xa->allocator == allocator) 104 mem_xa_remove(xa); 105 } 106 107 rhashtable_walk_stop(&iter); 108 109 } while (xa == ERR_PTR(-EAGAIN)); 110 rhashtable_walk_exit(&iter); 111 112 mutex_unlock(&mem_id_lock); 113 } 114 115 void xdp_unreg_mem_model(struct xdp_mem_info *mem) 116 { 117 struct xdp_mem_allocator *xa; 118 int type = mem->type; 119 int id = mem->id; 120 121 /* Reset mem info to defaults */ 122 mem->id = 0; 123 mem->type = 0; 124 125 if (id == 0) 126 return; 127 128 if (type == MEM_TYPE_PAGE_POOL) { 129 rcu_read_lock(); 130 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); 131 page_pool_destroy(xa->page_pool); 132 rcu_read_unlock(); 133 } 134 } 135 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); 136 137 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq) 138 { 139 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 140 WARN(1, "Missing register, driver bug"); 141 return; 142 } 143 144 xdp_unreg_mem_model(&xdp_rxq->mem); 145 } 146 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model); 147 148 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq) 149 { 150 /* Simplify driver cleanup code paths, allow unreg "unused" */ 151 if (xdp_rxq->reg_state == REG_STATE_UNUSED) 152 return; 153 154 xdp_rxq_info_unreg_mem_model(xdp_rxq); 155 156 xdp_rxq->reg_state = REG_STATE_UNREGISTERED; 157 xdp_rxq->dev = NULL; 158 } 159 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg); 160 161 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq) 162 { 163 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); 164 } 165 166 /* Returns 0 on success, negative on failure */ 167 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, 168 struct net_device *dev, u32 queue_index, 169 unsigned int napi_id, u32 frag_size) 170 { 171 if (!dev) { 172 WARN(1, "Missing net_device from driver"); 173 return -ENODEV; 174 } 175 176 if (xdp_rxq->reg_state == REG_STATE_UNUSED) { 177 WARN(1, "Driver promised not to register this"); 178 return -EINVAL; 179 } 180 181 if (xdp_rxq->reg_state == REG_STATE_REGISTERED) { 182 WARN(1, "Missing unregister, handled but fix driver"); 183 xdp_rxq_info_unreg(xdp_rxq); 184 } 185 186 /* State either UNREGISTERED or NEW */ 187 xdp_rxq_info_init(xdp_rxq); 188 xdp_rxq->dev = dev; 189 xdp_rxq->queue_index = queue_index; 190 xdp_rxq->napi_id = napi_id; 191 xdp_rxq->frag_size = frag_size; 192 193 xdp_rxq->reg_state = REG_STATE_REGISTERED; 194 return 0; 195 } 196 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg); 197 198 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq) 199 { 200 xdp_rxq->reg_state = REG_STATE_UNUSED; 201 } 202 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused); 203 204 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq) 205 { 206 return (xdp_rxq->reg_state == REG_STATE_REGISTERED); 207 } 208 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg); 209 210 static int __mem_id_init_hash_table(void) 211 { 212 struct rhashtable *rht; 213 int ret; 214 215 if (unlikely(mem_id_init)) 216 return 0; 217 218 rht = kzalloc(sizeof(*rht), GFP_KERNEL); 219 if (!rht) 220 return -ENOMEM; 221 222 ret = rhashtable_init(rht, &mem_id_rht_params); 223 if (ret < 0) { 224 kfree(rht); 225 return ret; 226 } 227 mem_id_ht = rht; 228 smp_mb(); /* mutex lock should provide enough pairing */ 229 mem_id_init = true; 230 231 return 0; 232 } 233 234 /* Allocate a cyclic ID that maps to allocator pointer. 235 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html 236 * 237 * Caller must lock mem_id_lock. 238 */ 239 static int __mem_id_cyclic_get(gfp_t gfp) 240 { 241 int retries = 1; 242 int id; 243 244 again: 245 id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp); 246 if (id < 0) { 247 if (id == -ENOSPC) { 248 /* Cyclic allocator, reset next id */ 249 if (retries--) { 250 mem_id_next = MEM_ID_MIN; 251 goto again; 252 } 253 } 254 return id; /* errno */ 255 } 256 mem_id_next = id + 1; 257 258 return id; 259 } 260 261 static bool __is_supported_mem_type(enum xdp_mem_type type) 262 { 263 if (type == MEM_TYPE_PAGE_POOL) 264 return is_page_pool_compiled_in(); 265 266 if (type >= MEM_TYPE_MAX) 267 return false; 268 269 return true; 270 } 271 272 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, 273 enum xdp_mem_type type, 274 void *allocator) 275 { 276 struct xdp_mem_allocator *xdp_alloc; 277 gfp_t gfp = GFP_KERNEL; 278 int id, errno, ret; 279 void *ptr; 280 281 if (!__is_supported_mem_type(type)) 282 return ERR_PTR(-EOPNOTSUPP); 283 284 mem->type = type; 285 286 if (!allocator) { 287 if (type == MEM_TYPE_PAGE_POOL) 288 return ERR_PTR(-EINVAL); /* Setup time check page_pool req */ 289 return NULL; 290 } 291 292 /* Delay init of rhashtable to save memory if feature isn't used */ 293 if (!mem_id_init) { 294 mutex_lock(&mem_id_lock); 295 ret = __mem_id_init_hash_table(); 296 mutex_unlock(&mem_id_lock); 297 if (ret < 0) 298 return ERR_PTR(ret); 299 } 300 301 xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp); 302 if (!xdp_alloc) 303 return ERR_PTR(-ENOMEM); 304 305 mutex_lock(&mem_id_lock); 306 id = __mem_id_cyclic_get(gfp); 307 if (id < 0) { 308 errno = id; 309 goto err; 310 } 311 mem->id = id; 312 xdp_alloc->mem = *mem; 313 xdp_alloc->allocator = allocator; 314 315 /* Insert allocator into ID lookup table */ 316 ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node); 317 if (IS_ERR(ptr)) { 318 ida_simple_remove(&mem_id_pool, mem->id); 319 mem->id = 0; 320 errno = PTR_ERR(ptr); 321 goto err; 322 } 323 324 if (type == MEM_TYPE_PAGE_POOL) 325 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem); 326 327 mutex_unlock(&mem_id_lock); 328 329 return xdp_alloc; 330 err: 331 mutex_unlock(&mem_id_lock); 332 kfree(xdp_alloc); 333 return ERR_PTR(errno); 334 } 335 336 int xdp_reg_mem_model(struct xdp_mem_info *mem, 337 enum xdp_mem_type type, void *allocator) 338 { 339 struct xdp_mem_allocator *xdp_alloc; 340 341 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); 342 if (IS_ERR(xdp_alloc)) 343 return PTR_ERR(xdp_alloc); 344 return 0; 345 } 346 EXPORT_SYMBOL_GPL(xdp_reg_mem_model); 347 348 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, 349 enum xdp_mem_type type, void *allocator) 350 { 351 struct xdp_mem_allocator *xdp_alloc; 352 353 if (xdp_rxq->reg_state != REG_STATE_REGISTERED) { 354 WARN(1, "Missing register, driver bug"); 355 return -EFAULT; 356 } 357 358 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator); 359 if (IS_ERR(xdp_alloc)) 360 return PTR_ERR(xdp_alloc); 361 362 if (trace_mem_connect_enabled() && xdp_alloc) 363 trace_mem_connect(xdp_alloc, xdp_rxq); 364 return 0; 365 } 366 367 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); 368 369 /* XDP RX runs under NAPI protection, and in different delivery error 370 * scenarios (e.g. queue full), it is possible to return the xdp_frame 371 * while still leveraging this protection. The @napi_direct boolean 372 * is used for those calls sites. Thus, allowing for faster recycling 373 * of xdp_frames/pages in those cases. 374 */ 375 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, 376 struct xdp_buff *xdp) 377 { 378 struct page *page; 379 380 switch (mem->type) { 381 case MEM_TYPE_PAGE_POOL: 382 page = virt_to_head_page(data); 383 if (napi_direct && xdp_return_frame_no_direct()) 384 napi_direct = false; 385 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) 386 * as mem->type knows this a page_pool page 387 */ 388 page_pool_put_full_page(page->pp, page, napi_direct); 389 break; 390 case MEM_TYPE_PAGE_SHARED: 391 page_frag_free(data); 392 break; 393 case MEM_TYPE_PAGE_ORDER0: 394 page = virt_to_page(data); /* Assumes order0 page*/ 395 put_page(page); 396 break; 397 case MEM_TYPE_XSK_BUFF_POOL: 398 /* NB! Only valid from an xdp_buff! */ 399 xsk_buff_free(xdp); 400 break; 401 default: 402 /* Not possible, checked in xdp_rxq_info_reg_mem_model() */ 403 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); 404 break; 405 } 406 } 407 408 void xdp_return_frame(struct xdp_frame *xdpf) 409 { 410 struct skb_shared_info *sinfo; 411 int i; 412 413 if (likely(!xdp_frame_has_frags(xdpf))) 414 goto out; 415 416 sinfo = xdp_get_shared_info_from_frame(xdpf); 417 for (i = 0; i < sinfo->nr_frags; i++) { 418 struct page *page = skb_frag_page(&sinfo->frags[i]); 419 420 __xdp_return(page_address(page), &xdpf->mem, false, NULL); 421 } 422 out: 423 __xdp_return(xdpf->data, &xdpf->mem, false, NULL); 424 } 425 EXPORT_SYMBOL_GPL(xdp_return_frame); 426 427 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) 428 { 429 struct skb_shared_info *sinfo; 430 int i; 431 432 if (likely(!xdp_frame_has_frags(xdpf))) 433 goto out; 434 435 sinfo = xdp_get_shared_info_from_frame(xdpf); 436 for (i = 0; i < sinfo->nr_frags; i++) { 437 struct page *page = skb_frag_page(&sinfo->frags[i]); 438 439 __xdp_return(page_address(page), &xdpf->mem, true, NULL); 440 } 441 out: 442 __xdp_return(xdpf->data, &xdpf->mem, true, NULL); 443 } 444 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); 445 446 /* XDP bulk APIs introduce a defer/flush mechanism to return 447 * pages belonging to the same xdp_mem_allocator object 448 * (identified via the mem.id field) in bulk to optimize 449 * I-cache and D-cache. 450 * The bulk queue size is set to 16 to be aligned to how 451 * XDP_REDIRECT bulking works. The bulk is flushed when 452 * it is full or when mem.id changes. 453 * xdp_frame_bulk is usually stored/allocated on the function 454 * call-stack to avoid locking penalties. 455 */ 456 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq) 457 { 458 struct xdp_mem_allocator *xa = bq->xa; 459 460 if (unlikely(!xa || !bq->count)) 461 return; 462 463 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); 464 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ 465 bq->count = 0; 466 } 467 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk); 468 469 /* Must be called with rcu_read_lock held */ 470 void xdp_return_frame_bulk(struct xdp_frame *xdpf, 471 struct xdp_frame_bulk *bq) 472 { 473 struct xdp_mem_info *mem = &xdpf->mem; 474 struct xdp_mem_allocator *xa; 475 476 if (mem->type != MEM_TYPE_PAGE_POOL) { 477 xdp_return_frame(xdpf); 478 return; 479 } 480 481 xa = bq->xa; 482 if (unlikely(!xa)) { 483 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 484 bq->count = 0; 485 bq->xa = xa; 486 } 487 488 if (bq->count == XDP_BULK_QUEUE_SIZE) 489 xdp_flush_frame_bulk(bq); 490 491 if (unlikely(mem->id != xa->mem.id)) { 492 xdp_flush_frame_bulk(bq); 493 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); 494 } 495 496 if (unlikely(xdp_frame_has_frags(xdpf))) { 497 struct skb_shared_info *sinfo; 498 int i; 499 500 sinfo = xdp_get_shared_info_from_frame(xdpf); 501 for (i = 0; i < sinfo->nr_frags; i++) { 502 skb_frag_t *frag = &sinfo->frags[i]; 503 504 bq->q[bq->count++] = skb_frag_address(frag); 505 if (bq->count == XDP_BULK_QUEUE_SIZE) 506 xdp_flush_frame_bulk(bq); 507 } 508 } 509 bq->q[bq->count++] = xdpf->data; 510 } 511 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk); 512 513 void xdp_return_buff(struct xdp_buff *xdp) 514 { 515 struct skb_shared_info *sinfo; 516 int i; 517 518 if (likely(!xdp_buff_has_frags(xdp))) 519 goto out; 520 521 sinfo = xdp_get_shared_info_from_buff(xdp); 522 for (i = 0; i < sinfo->nr_frags; i++) { 523 struct page *page = skb_frag_page(&sinfo->frags[i]); 524 525 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); 526 } 527 out: 528 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); 529 } 530 EXPORT_SYMBOL_GPL(xdp_return_buff); 531 532 void xdp_attachment_setup(struct xdp_attachment_info *info, 533 struct netdev_bpf *bpf) 534 { 535 if (info->prog) 536 bpf_prog_put(info->prog); 537 info->prog = bpf->prog; 538 info->flags = bpf->flags; 539 } 540 EXPORT_SYMBOL_GPL(xdp_attachment_setup); 541 542 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) 543 { 544 unsigned int metasize, totsize; 545 void *addr, *data_to_copy; 546 struct xdp_frame *xdpf; 547 struct page *page; 548 549 /* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */ 550 metasize = xdp_data_meta_unsupported(xdp) ? 0 : 551 xdp->data - xdp->data_meta; 552 totsize = xdp->data_end - xdp->data + metasize; 553 554 if (sizeof(*xdpf) + totsize > PAGE_SIZE) 555 return NULL; 556 557 page = dev_alloc_page(); 558 if (!page) 559 return NULL; 560 561 addr = page_to_virt(page); 562 xdpf = addr; 563 memset(xdpf, 0, sizeof(*xdpf)); 564 565 addr += sizeof(*xdpf); 566 data_to_copy = metasize ? xdp->data_meta : xdp->data; 567 memcpy(addr, data_to_copy, totsize); 568 569 xdpf->data = addr + metasize; 570 xdpf->len = totsize - metasize; 571 xdpf->headroom = 0; 572 xdpf->metasize = metasize; 573 xdpf->frame_sz = PAGE_SIZE; 574 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; 575 576 xsk_buff_free(xdp); 577 return xdpf; 578 } 579 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); 580 581 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */ 582 void xdp_warn(const char *msg, const char *func, const int line) 583 { 584 WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg); 585 }; 586 EXPORT_SYMBOL_GPL(xdp_warn); 587 588 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) 589 { 590 n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs); 591 if (unlikely(!n_skb)) 592 return -ENOMEM; 593 594 return 0; 595 } 596 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk); 597 598 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf, 599 struct sk_buff *skb, 600 struct net_device *dev) 601 { 602 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); 603 unsigned int headroom, frame_size; 604 void *hard_start; 605 u8 nr_frags; 606 607 /* xdp frags frame */ 608 if (unlikely(xdp_frame_has_frags(xdpf))) 609 nr_frags = sinfo->nr_frags; 610 611 /* Part of headroom was reserved to xdpf */ 612 headroom = sizeof(*xdpf) + xdpf->headroom; 613 614 /* Memory size backing xdp_frame data already have reserved 615 * room for build_skb to place skb_shared_info in tailroom. 616 */ 617 frame_size = xdpf->frame_sz; 618 619 hard_start = xdpf->data - headroom; 620 skb = build_skb_around(skb, hard_start, frame_size); 621 if (unlikely(!skb)) 622 return NULL; 623 624 skb_reserve(skb, headroom); 625 __skb_put(skb, xdpf->len); 626 if (xdpf->metasize) 627 skb_metadata_set(skb, xdpf->metasize); 628 629 if (unlikely(xdp_frame_has_frags(xdpf))) 630 xdp_update_skb_shared_info(skb, nr_frags, 631 sinfo->xdp_frags_size, 632 nr_frags * xdpf->frame_sz, 633 xdp_frame_is_frag_pfmemalloc(xdpf)); 634 635 /* Essential SKB info: protocol and skb->dev */ 636 skb->protocol = eth_type_trans(skb, dev); 637 638 /* Optional SKB info, currently missing: 639 * - HW checksum info (skb->ip_summed) 640 * - HW RX hash (skb_set_hash) 641 * - RX ring dev queue index (skb_record_rx_queue) 642 */ 643 644 if (xdpf->mem.type == MEM_TYPE_PAGE_POOL) 645 skb_mark_for_recycle(skb); 646 647 /* Allow SKB to reuse area used by xdp_frame */ 648 xdp_scrub_frame(xdpf); 649 650 return skb; 651 } 652 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame); 653 654 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, 655 struct net_device *dev) 656 { 657 struct sk_buff *skb; 658 659 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); 660 if (unlikely(!skb)) 661 return NULL; 662 663 memset(skb, 0, offsetof(struct sk_buff, tail)); 664 665 return __xdp_build_skb_from_frame(xdpf, skb, dev); 666 } 667 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame); 668 669 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) 670 { 671 unsigned int headroom, totalsize; 672 struct xdp_frame *nxdpf; 673 struct page *page; 674 void *addr; 675 676 headroom = xdpf->headroom + sizeof(*xdpf); 677 totalsize = headroom + xdpf->len; 678 679 if (unlikely(totalsize > PAGE_SIZE)) 680 return NULL; 681 page = dev_alloc_page(); 682 if (!page) 683 return NULL; 684 addr = page_to_virt(page); 685 686 memcpy(addr, xdpf, totalsize); 687 688 nxdpf = addr; 689 nxdpf->data = addr + headroom; 690 nxdpf->frame_sz = PAGE_SIZE; 691 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0; 692 nxdpf->mem.id = 0; 693 694 return nxdpf; 695 } 696 697 __diag_push(); 698 __diag_ignore_all("-Wmissing-prototypes", 699 "Global functions as their definitions will be in vmlinux BTF"); 700 701 /** 702 * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp. 703 * @ctx: XDP context pointer. 704 * @timestamp: Return value pointer. 705 * 706 * Return: 707 * * Returns 0 on success or ``-errno`` on error. 708 * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc 709 * * ``-ENODATA`` : means no RX-timestamp available for this frame 710 */ 711 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) 712 { 713 return -EOPNOTSUPP; 714 } 715 716 /** 717 * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash. 718 * @ctx: XDP context pointer. 719 * @hash: Return value pointer. 720 * @rss_type: Return value pointer for RSS type. 721 * 722 * The RSS hash type (@rss_type) specifies what portion of packet headers NIC 723 * hardware used when calculating RSS hash value. The RSS type can be decoded 724 * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits 725 * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types* 726 * ``XDP_RSS_TYPE_L*``. 727 * 728 * Return: 729 * * Returns 0 on success or ``-errno`` on error. 730 * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc 731 * * ``-ENODATA`` : means no RX-hash available for this frame 732 */ 733 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash, 734 enum xdp_rss_hash_type *rss_type) 735 { 736 return -EOPNOTSUPP; 737 } 738 739 __diag_pop(); 740 741 BTF_SET8_START(xdp_metadata_kfunc_ids) 742 #define XDP_METADATA_KFUNC(_, name) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS) 743 XDP_METADATA_KFUNC_xxx 744 #undef XDP_METADATA_KFUNC 745 BTF_SET8_END(xdp_metadata_kfunc_ids) 746 747 static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = { 748 .owner = THIS_MODULE, 749 .set = &xdp_metadata_kfunc_ids, 750 }; 751 752 BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted) 753 #define XDP_METADATA_KFUNC(name, str) BTF_ID(func, str) 754 XDP_METADATA_KFUNC_xxx 755 #undef XDP_METADATA_KFUNC 756 757 u32 bpf_xdp_metadata_kfunc_id(int id) 758 { 759 /* xdp_metadata_kfunc_ids is sorted and can't be used */ 760 return xdp_metadata_kfunc_ids_unsorted[id]; 761 } 762 763 bool bpf_dev_bound_kfunc_id(u32 btf_id) 764 { 765 return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id); 766 } 767 768 static int __init xdp_metadata_init(void) 769 { 770 return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set); 771 } 772 late_initcall(xdp_metadata_init); 773 774 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) 775 { 776 val &= NETDEV_XDP_ACT_MASK; 777 if (dev->xdp_features == val) 778 return; 779 780 dev->xdp_features = val; 781 782 if (dev->reg_state == NETREG_REGISTERED) 783 call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); 784 } 785 EXPORT_SYMBOL_GPL(xdp_set_features_flag); 786 787 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) 788 { 789 xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); 790 791 if (support_sg) 792 val |= NETDEV_XDP_ACT_NDO_XMIT_SG; 793 xdp_set_features_flag(dev, val); 794 } 795 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); 796 797 void xdp_features_clear_redirect_target(struct net_device *dev) 798 { 799 xdp_features_t val = dev->xdp_features; 800 801 val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); 802 xdp_set_features_flag(dev, val); 803 } 804 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); 805