1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <net/xsk_buff_pool.h> 4 #include <net/xdp_sock.h> 5 #include <net/xdp_sock_drv.h> 6 7 #include "xsk_queue.h" 8 #include "xdp_umem.h" 9 #include "xsk.h" 10 11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 12 { 13 unsigned long flags; 14 15 if (!xs->tx) 16 return; 17 18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); 20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 21 } 22 23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) 24 { 25 unsigned long flags; 26 27 if (!xs->tx) 28 return; 29 30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); 31 list_del_rcu(&xs->tx_list); 32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); 33 } 34 35 void xp_destroy(struct xsk_buff_pool *pool) 36 { 37 if (!pool) 38 return; 39 40 kvfree(pool->heads); 41 kvfree(pool); 42 } 43 44 struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, 45 struct xdp_umem *umem) 46 { 47 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 48 struct xsk_buff_pool *pool; 49 struct xdp_buff_xsk *xskb; 50 u32 i, entries; 51 52 entries = unaligned ? umem->chunks : 0; 53 pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL); 54 if (!pool) 55 goto out; 56 57 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); 58 if (!pool->heads) 59 goto out; 60 61 pool->chunk_mask = ~((u64)umem->chunk_size - 1); 62 pool->addrs_cnt = umem->size; 63 pool->heads_cnt = umem->chunks; 64 pool->free_heads_cnt = umem->chunks; 65 pool->headroom = umem->headroom; 66 pool->chunk_size = umem->chunk_size; 67 pool->chunk_shift = ffs(umem->chunk_size) - 1; 68 pool->unaligned = unaligned; 69 pool->frame_len = umem->chunk_size - umem->headroom - 70 XDP_PACKET_HEADROOM; 71 pool->umem = umem; 72 pool->addrs = umem->addrs; 73 INIT_LIST_HEAD(&pool->free_list); 74 INIT_LIST_HEAD(&pool->xsk_tx_list); 75 spin_lock_init(&pool->xsk_tx_list_lock); 76 spin_lock_init(&pool->cq_lock); 77 refcount_set(&pool->users, 1); 78 79 pool->fq = xs->fq_tmp; 80 pool->cq = xs->cq_tmp; 81 82 for (i = 0; i < pool->free_heads_cnt; i++) { 83 xskb = &pool->heads[i]; 84 xskb->pool = pool; 85 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; 86 if (pool->unaligned) 87 pool->free_heads[i] = xskb; 88 else 89 xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); 90 } 91 92 return pool; 93 94 out: 95 xp_destroy(pool); 96 return NULL; 97 } 98 99 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) 100 { 101 u32 i; 102 103 for (i = 0; i < pool->heads_cnt; i++) 104 pool->heads[i].xdp.rxq = rxq; 105 } 106 EXPORT_SYMBOL(xp_set_rxq_info); 107 108 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) 109 { 110 struct netdev_bpf bpf; 111 int err; 112 113 ASSERT_RTNL(); 114 115 if (pool->umem->zc) { 116 bpf.command = XDP_SETUP_XSK_POOL; 117 bpf.xsk.pool = NULL; 118 bpf.xsk.queue_id = pool->queue_id; 119 120 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); 121 122 if (err) 123 WARN(1, "Failed to disable zero-copy!\n"); 124 } 125 } 126 127 int xp_assign_dev(struct xsk_buff_pool *pool, 128 struct net_device *netdev, u16 queue_id, u16 flags) 129 { 130 bool force_zc, force_copy; 131 struct netdev_bpf bpf; 132 int err = 0; 133 134 ASSERT_RTNL(); 135 136 force_zc = flags & XDP_ZEROCOPY; 137 force_copy = flags & XDP_COPY; 138 139 if (force_zc && force_copy) 140 return -EINVAL; 141 142 if (xsk_get_pool_from_qid(netdev, queue_id)) 143 return -EBUSY; 144 145 pool->netdev = netdev; 146 pool->queue_id = queue_id; 147 err = xsk_reg_pool_at_qid(netdev, pool, queue_id); 148 if (err) 149 return err; 150 151 if (flags & XDP_USE_NEED_WAKEUP) 152 pool->uses_need_wakeup = true; 153 /* Tx needs to be explicitly woken up the first time. Also 154 * for supporting drivers that do not implement this 155 * feature. They will always have to call sendto() or poll(). 156 */ 157 pool->cached_need_wakeup = XDP_WAKEUP_TX; 158 159 dev_hold(netdev); 160 161 if (force_copy) 162 /* For copy-mode, we are done. */ 163 return 0; 164 165 if (!netdev->netdev_ops->ndo_bpf || 166 !netdev->netdev_ops->ndo_xsk_wakeup) { 167 err = -EOPNOTSUPP; 168 goto err_unreg_pool; 169 } 170 171 bpf.command = XDP_SETUP_XSK_POOL; 172 bpf.xsk.pool = pool; 173 bpf.xsk.queue_id = queue_id; 174 175 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); 176 if (err) 177 goto err_unreg_pool; 178 179 if (!pool->dma_pages) { 180 WARN(1, "Driver did not DMA map zero-copy buffers"); 181 err = -EINVAL; 182 goto err_unreg_xsk; 183 } 184 pool->umem->zc = true; 185 return 0; 186 187 err_unreg_xsk: 188 xp_disable_drv_zc(pool); 189 err_unreg_pool: 190 if (!force_zc) 191 err = 0; /* fallback to copy mode */ 192 if (err) { 193 xsk_clear_pool_at_qid(netdev, queue_id); 194 dev_put(netdev); 195 } 196 return err; 197 } 198 199 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem, 200 struct net_device *dev, u16 queue_id) 201 { 202 u16 flags; 203 204 /* One fill and completion ring required for each queue id. */ 205 if (!pool->fq || !pool->cq) 206 return -EINVAL; 207 208 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; 209 if (pool->uses_need_wakeup) 210 flags |= XDP_USE_NEED_WAKEUP; 211 212 return xp_assign_dev(pool, dev, queue_id, flags); 213 } 214 215 void xp_clear_dev(struct xsk_buff_pool *pool) 216 { 217 if (!pool->netdev) 218 return; 219 220 xp_disable_drv_zc(pool); 221 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); 222 dev_put(pool->netdev); 223 pool->netdev = NULL; 224 } 225 226 static void xp_release_deferred(struct work_struct *work) 227 { 228 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, 229 work); 230 231 rtnl_lock(); 232 xp_clear_dev(pool); 233 rtnl_unlock(); 234 235 if (pool->fq) { 236 xskq_destroy(pool->fq); 237 pool->fq = NULL; 238 } 239 240 if (pool->cq) { 241 xskq_destroy(pool->cq); 242 pool->cq = NULL; 243 } 244 245 xdp_put_umem(pool->umem, false); 246 xp_destroy(pool); 247 } 248 249 void xp_get_pool(struct xsk_buff_pool *pool) 250 { 251 refcount_inc(&pool->users); 252 } 253 254 bool xp_put_pool(struct xsk_buff_pool *pool) 255 { 256 if (!pool) 257 return false; 258 259 if (refcount_dec_and_test(&pool->users)) { 260 INIT_WORK(&pool->work, xp_release_deferred); 261 schedule_work(&pool->work); 262 return true; 263 } 264 265 return false; 266 } 267 268 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) 269 { 270 struct xsk_dma_map *dma_map; 271 272 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { 273 if (dma_map->netdev == pool->netdev) 274 return dma_map; 275 } 276 277 return NULL; 278 } 279 280 static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev, 281 u32 nr_pages, struct xdp_umem *umem) 282 { 283 struct xsk_dma_map *dma_map; 284 285 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); 286 if (!dma_map) 287 return NULL; 288 289 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); 290 if (!dma_map->dma_pages) { 291 kfree(dma_map); 292 return NULL; 293 } 294 295 dma_map->netdev = netdev; 296 dma_map->dev = dev; 297 dma_map->dma_need_sync = false; 298 dma_map->dma_pages_cnt = nr_pages; 299 refcount_set(&dma_map->users, 1); 300 list_add(&dma_map->list, &umem->xsk_dma_list); 301 return dma_map; 302 } 303 304 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) 305 { 306 list_del(&dma_map->list); 307 kvfree(dma_map->dma_pages); 308 kfree(dma_map); 309 } 310 311 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) 312 { 313 dma_addr_t *dma; 314 u32 i; 315 316 for (i = 0; i < dma_map->dma_pages_cnt; i++) { 317 dma = &dma_map->dma_pages[i]; 318 if (*dma) { 319 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, 320 DMA_BIDIRECTIONAL, attrs); 321 *dma = 0; 322 } 323 } 324 325 xp_destroy_dma_map(dma_map); 326 } 327 328 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) 329 { 330 struct xsk_dma_map *dma_map; 331 332 if (pool->dma_pages_cnt == 0) 333 return; 334 335 dma_map = xp_find_dma_map(pool); 336 if (!dma_map) { 337 WARN(1, "Could not find dma_map for device"); 338 return; 339 } 340 341 if (!refcount_dec_and_test(&dma_map->users)) 342 return; 343 344 __xp_dma_unmap(dma_map, attrs); 345 kvfree(pool->dma_pages); 346 pool->dma_pages_cnt = 0; 347 pool->dev = NULL; 348 } 349 EXPORT_SYMBOL(xp_dma_unmap); 350 351 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) 352 { 353 u32 i; 354 355 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { 356 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) 357 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; 358 else 359 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; 360 } 361 } 362 363 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) 364 { 365 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); 366 if (!pool->dma_pages) 367 return -ENOMEM; 368 369 pool->dev = dma_map->dev; 370 pool->dma_pages_cnt = dma_map->dma_pages_cnt; 371 pool->dma_need_sync = dma_map->dma_need_sync; 372 memcpy(pool->dma_pages, dma_map->dma_pages, 373 pool->dma_pages_cnt * sizeof(*pool->dma_pages)); 374 375 return 0; 376 } 377 378 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, 379 unsigned long attrs, struct page **pages, u32 nr_pages) 380 { 381 struct xsk_dma_map *dma_map; 382 dma_addr_t dma; 383 int err; 384 u32 i; 385 386 dma_map = xp_find_dma_map(pool); 387 if (dma_map) { 388 err = xp_init_dma_info(pool, dma_map); 389 if (err) 390 return err; 391 392 refcount_inc(&dma_map->users); 393 return 0; 394 } 395 396 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); 397 if (!dma_map) 398 return -ENOMEM; 399 400 for (i = 0; i < dma_map->dma_pages_cnt; i++) { 401 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, 402 DMA_BIDIRECTIONAL, attrs); 403 if (dma_mapping_error(dev, dma)) { 404 __xp_dma_unmap(dma_map, attrs); 405 return -ENOMEM; 406 } 407 if (dma_need_sync(dev, dma)) 408 dma_map->dma_need_sync = true; 409 dma_map->dma_pages[i] = dma; 410 } 411 412 if (pool->unaligned) 413 xp_check_dma_contiguity(dma_map); 414 else 415 for (i = 0; i < pool->heads_cnt; i++) { 416 struct xdp_buff_xsk *xskb = &pool->heads[i]; 417 418 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); 419 } 420 421 err = xp_init_dma_info(pool, dma_map); 422 if (err) { 423 __xp_dma_unmap(dma_map, attrs); 424 return err; 425 } 426 427 return 0; 428 } 429 EXPORT_SYMBOL(xp_dma_map); 430 431 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, 432 u64 addr) 433 { 434 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); 435 } 436 437 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) 438 { 439 *addr = xp_unaligned_extract_addr(*addr); 440 if (*addr >= pool->addrs_cnt || 441 *addr + pool->chunk_size > pool->addrs_cnt || 442 xp_addr_crosses_non_contig_pg(pool, *addr)) 443 return false; 444 return true; 445 } 446 447 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) 448 { 449 *addr = xp_aligned_extract_addr(pool, *addr); 450 return *addr < pool->addrs_cnt; 451 } 452 453 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) 454 { 455 struct xdp_buff_xsk *xskb; 456 u64 addr; 457 bool ok; 458 459 if (pool->free_heads_cnt == 0) 460 return NULL; 461 462 for (;;) { 463 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { 464 pool->fq->queue_empty_descs++; 465 return NULL; 466 } 467 468 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : 469 xp_check_aligned(pool, &addr); 470 if (!ok) { 471 pool->fq->invalid_descs++; 472 xskq_cons_release(pool->fq); 473 continue; 474 } 475 break; 476 } 477 478 if (pool->unaligned) { 479 xskb = pool->free_heads[--pool->free_heads_cnt]; 480 xp_init_xskb_addr(xskb, pool, addr); 481 if (pool->dma_pages_cnt) 482 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); 483 } else { 484 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; 485 } 486 487 xskq_cons_release(pool->fq); 488 return xskb; 489 } 490 491 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) 492 { 493 struct xdp_buff_xsk *xskb; 494 495 if (!pool->free_list_cnt) { 496 xskb = __xp_alloc(pool); 497 if (!xskb) 498 return NULL; 499 } else { 500 pool->free_list_cnt--; 501 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, 502 free_list_node); 503 list_del_init(&xskb->free_list_node); 504 } 505 506 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; 507 xskb->xdp.data_meta = xskb->xdp.data; 508 509 if (pool->dma_need_sync) { 510 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, 511 pool->frame_len, 512 DMA_BIDIRECTIONAL); 513 } 514 return &xskb->xdp; 515 } 516 EXPORT_SYMBOL(xp_alloc); 517 518 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 519 { 520 u32 i, cached_cons, nb_entries; 521 522 if (max > pool->free_heads_cnt) 523 max = pool->free_heads_cnt; 524 max = xskq_cons_nb_entries(pool->fq, max); 525 526 cached_cons = pool->fq->cached_cons; 527 nb_entries = max; 528 i = max; 529 while (i--) { 530 struct xdp_buff_xsk *xskb; 531 u64 addr; 532 bool ok; 533 534 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); 535 536 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : 537 xp_check_aligned(pool, &addr); 538 if (unlikely(!ok)) { 539 pool->fq->invalid_descs++; 540 nb_entries--; 541 continue; 542 } 543 544 if (pool->unaligned) { 545 xskb = pool->free_heads[--pool->free_heads_cnt]; 546 xp_init_xskb_addr(xskb, pool, addr); 547 if (pool->dma_pages_cnt) 548 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); 549 } else { 550 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; 551 } 552 553 *xdp = &xskb->xdp; 554 xdp++; 555 } 556 557 xskq_cons_release_n(pool->fq, max); 558 return nb_entries; 559 } 560 561 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) 562 { 563 struct xdp_buff_xsk *xskb; 564 u32 i; 565 566 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); 567 568 i = nb_entries; 569 while (i--) { 570 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); 571 list_del_init(&xskb->free_list_node); 572 573 *xdp = &xskb->xdp; 574 xdp++; 575 } 576 pool->free_list_cnt -= nb_entries; 577 578 return nb_entries; 579 } 580 581 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) 582 { 583 u32 nb_entries1 = 0, nb_entries2; 584 585 if (unlikely(pool->dma_need_sync)) { 586 /* Slow path */ 587 *xdp = xp_alloc(pool); 588 return !!*xdp; 589 } 590 591 if (unlikely(pool->free_list_cnt)) { 592 nb_entries1 = xp_alloc_reused(pool, xdp, max); 593 if (nb_entries1 == max) 594 return nb_entries1; 595 596 max -= nb_entries1; 597 xdp += nb_entries1; 598 } 599 600 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max); 601 if (!nb_entries2) 602 pool->fq->queue_empty_descs++; 603 604 return nb_entries1 + nb_entries2; 605 } 606 EXPORT_SYMBOL(xp_alloc_batch); 607 608 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) 609 { 610 if (pool->free_list_cnt >= count) 611 return true; 612 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); 613 } 614 EXPORT_SYMBOL(xp_can_alloc); 615 616 void xp_free(struct xdp_buff_xsk *xskb) 617 { 618 if (!list_empty(&xskb->free_list_node)) 619 return; 620 621 xskb->pool->free_list_cnt++; 622 list_add(&xskb->free_list_node, &xskb->pool->free_list); 623 } 624 EXPORT_SYMBOL(xp_free); 625 626 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) 627 { 628 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; 629 return pool->addrs + addr; 630 } 631 EXPORT_SYMBOL(xp_raw_get_data); 632 633 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) 634 { 635 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; 636 return (pool->dma_pages[addr >> PAGE_SHIFT] & 637 ~XSK_NEXT_PG_CONTIG_MASK) + 638 (addr & ~PAGE_MASK); 639 } 640 EXPORT_SYMBOL(xp_raw_get_dma); 641 642 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb) 643 { 644 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, 645 xskb->pool->frame_len, DMA_BIDIRECTIONAL); 646 } 647 EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow); 648 649 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, 650 size_t size) 651 { 652 dma_sync_single_range_for_device(pool->dev, dma, 0, 653 size, DMA_BIDIRECTIONAL); 654 } 655 EXPORT_SYMBOL(xp_dma_sync_for_device_slow); 656