1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2018 Intel Corporation. */ 3 4 #include <linux/bpf_trace.h> 5 #include <net/xdp_sock.h> 6 #include <net/xdp.h> 7 8 #include "ixgbe.h" 9 #include "ixgbe_txrx_common.h" 10 11 struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter, 12 struct ixgbe_ring *ring) 13 { 14 bool xdp_on = READ_ONCE(adapter->xdp_prog); 15 int qid = ring->ring_idx; 16 17 if (!adapter->xsk_umems || !adapter->xsk_umems[qid] || 18 qid >= adapter->num_xsk_umems || !xdp_on) 19 return NULL; 20 21 return adapter->xsk_umems[qid]; 22 } 23 24 static int ixgbe_alloc_xsk_umems(struct ixgbe_adapter *adapter) 25 { 26 if (adapter->xsk_umems) 27 return 0; 28 29 adapter->num_xsk_umems_used = 0; 30 adapter->num_xsk_umems = adapter->num_rx_queues; 31 adapter->xsk_umems = kcalloc(adapter->num_xsk_umems, 32 sizeof(*adapter->xsk_umems), 33 GFP_KERNEL); 34 if (!adapter->xsk_umems) { 35 adapter->num_xsk_umems = 0; 36 return -ENOMEM; 37 } 38 39 return 0; 40 } 41 42 static int ixgbe_add_xsk_umem(struct ixgbe_adapter *adapter, 43 struct xdp_umem *umem, 44 u16 qid) 45 { 46 int err; 47 48 err = ixgbe_alloc_xsk_umems(adapter); 49 if (err) 50 return err; 51 52 adapter->xsk_umems[qid] = umem; 53 adapter->num_xsk_umems_used++; 54 55 return 0; 56 } 57 58 static void ixgbe_remove_xsk_umem(struct ixgbe_adapter *adapter, u16 qid) 59 { 60 adapter->xsk_umems[qid] = NULL; 61 adapter->num_xsk_umems_used--; 62 63 if (adapter->num_xsk_umems == 0) { 64 kfree(adapter->xsk_umems); 65 adapter->xsk_umems = NULL; 66 adapter->num_xsk_umems = 0; 67 } 68 } 69 70 static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter, 71 struct xdp_umem *umem) 72 { 73 struct device *dev = &adapter->pdev->dev; 74 unsigned int i, j; 75 dma_addr_t dma; 76 77 for (i = 0; i < umem->npgs; i++) { 78 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, 79 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); 80 if (dma_mapping_error(dev, dma)) 81 goto out_unmap; 82 83 umem->pages[i].dma = dma; 84 } 85 86 return 0; 87 88 out_unmap: 89 for (j = 0; j < i; j++) { 90 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, 91 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); 92 umem->pages[i].dma = 0; 93 } 94 95 return -1; 96 } 97 98 static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter, 99 struct xdp_umem *umem) 100 { 101 struct device *dev = &adapter->pdev->dev; 102 unsigned int i; 103 104 for (i = 0; i < umem->npgs; i++) { 105 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, 106 DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR); 107 108 umem->pages[i].dma = 0; 109 } 110 } 111 112 static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, 113 struct xdp_umem *umem, 114 u16 qid) 115 { 116 struct xdp_umem_fq_reuse *reuseq; 117 bool if_running; 118 int err; 119 120 if (qid >= adapter->num_rx_queues) 121 return -EINVAL; 122 123 if (adapter->xsk_umems) { 124 if (qid >= adapter->num_xsk_umems) 125 return -EINVAL; 126 if (adapter->xsk_umems[qid]) 127 return -EBUSY; 128 } 129 130 reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count); 131 if (!reuseq) 132 return -ENOMEM; 133 134 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); 135 136 err = ixgbe_xsk_umem_dma_map(adapter, umem); 137 if (err) 138 return err; 139 140 if_running = netif_running(adapter->netdev) && 141 READ_ONCE(adapter->xdp_prog); 142 143 if (if_running) 144 ixgbe_txrx_ring_disable(adapter, qid); 145 146 err = ixgbe_add_xsk_umem(adapter, umem, qid); 147 if (err) 148 return err; 149 150 if (if_running) { 151 ixgbe_txrx_ring_enable(adapter, qid); 152 153 /* Kick start the NAPI context so that receiving will start */ 154 err = ixgbe_xsk_async_xmit(adapter->netdev, qid); 155 if (err) 156 return err; 157 } 158 159 return 0; 160 } 161 162 static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) 163 { 164 bool if_running; 165 166 if (!adapter->xsk_umems || qid >= adapter->num_xsk_umems || 167 !adapter->xsk_umems[qid]) 168 return -EINVAL; 169 170 if_running = netif_running(adapter->netdev) && 171 READ_ONCE(adapter->xdp_prog); 172 173 if (if_running) 174 ixgbe_txrx_ring_disable(adapter, qid); 175 176 ixgbe_xsk_umem_dma_unmap(adapter, adapter->xsk_umems[qid]); 177 ixgbe_remove_xsk_umem(adapter, qid); 178 179 if (if_running) 180 ixgbe_txrx_ring_enable(adapter, qid); 181 182 return 0; 183 } 184 185 int ixgbe_xsk_umem_query(struct ixgbe_adapter *adapter, struct xdp_umem **umem, 186 u16 qid) 187 { 188 if (qid >= adapter->num_rx_queues) 189 return -EINVAL; 190 191 if (adapter->xsk_umems) { 192 if (qid >= adapter->num_xsk_umems) 193 return -EINVAL; 194 *umem = adapter->xsk_umems[qid]; 195 return 0; 196 } 197 198 *umem = NULL; 199 return 0; 200 } 201 202 int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem, 203 u16 qid) 204 { 205 return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) : 206 ixgbe_xsk_umem_disable(adapter, qid); 207 } 208 209 static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, 210 struct ixgbe_ring *rx_ring, 211 struct xdp_buff *xdp) 212 { 213 int err, result = IXGBE_XDP_PASS; 214 struct bpf_prog *xdp_prog; 215 struct xdp_frame *xdpf; 216 u32 act; 217 218 rcu_read_lock(); 219 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 220 act = bpf_prog_run_xdp(xdp_prog, xdp); 221 xdp->handle += xdp->data - xdp->data_hard_start; 222 switch (act) { 223 case XDP_PASS: 224 break; 225 case XDP_TX: 226 xdpf = convert_to_xdp_frame(xdp); 227 if (unlikely(!xdpf)) { 228 result = IXGBE_XDP_CONSUMED; 229 break; 230 } 231 result = ixgbe_xmit_xdp_ring(adapter, xdpf); 232 break; 233 case XDP_REDIRECT: 234 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 235 result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; 236 break; 237 default: 238 bpf_warn_invalid_xdp_action(act); 239 /* fallthrough */ 240 case XDP_ABORTED: 241 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 242 /* fallthrough -- handle aborts by dropping packet */ 243 case XDP_DROP: 244 result = IXGBE_XDP_CONSUMED; 245 break; 246 } 247 rcu_read_unlock(); 248 return result; 249 } 250 251 static struct 252 ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring, 253 unsigned int size) 254 { 255 struct ixgbe_rx_buffer *bi; 256 257 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 258 259 /* we are reusing so sync this buffer for CPU use */ 260 dma_sync_single_range_for_cpu(rx_ring->dev, 261 bi->dma, 0, 262 size, 263 DMA_BIDIRECTIONAL); 264 265 return bi; 266 } 267 268 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, 269 struct ixgbe_rx_buffer *obi) 270 { 271 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; 272 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 273 u16 nta = rx_ring->next_to_alloc; 274 struct ixgbe_rx_buffer *nbi; 275 276 nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc]; 277 /* update, and store next to alloc */ 278 nta++; 279 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 280 281 /* transfer page from old buffer to new buffer */ 282 nbi->dma = obi->dma & mask; 283 nbi->dma += hr; 284 285 nbi->addr = (void *)((unsigned long)obi->addr & mask); 286 nbi->addr += hr; 287 288 nbi->handle = obi->handle & mask; 289 nbi->handle += rx_ring->xsk_umem->headroom; 290 291 obi->addr = NULL; 292 obi->skb = NULL; 293 } 294 295 void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) 296 { 297 struct ixgbe_rx_buffer *bi; 298 struct ixgbe_ring *rx_ring; 299 u64 hr, mask; 300 u16 nta; 301 302 rx_ring = container_of(alloc, struct ixgbe_ring, zca); 303 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 304 mask = rx_ring->xsk_umem->chunk_mask; 305 306 nta = rx_ring->next_to_alloc; 307 bi = rx_ring->rx_buffer_info; 308 309 nta++; 310 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 311 312 handle &= mask; 313 314 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); 315 bi->dma += hr; 316 317 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); 318 bi->addr += hr; 319 320 bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; 321 } 322 323 static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, 324 struct ixgbe_rx_buffer *bi) 325 { 326 struct xdp_umem *umem = rx_ring->xsk_umem; 327 void *addr = bi->addr; 328 u64 handle, hr; 329 330 if (addr) 331 return true; 332 333 if (!xsk_umem_peek_addr(umem, &handle)) { 334 rx_ring->rx_stats.alloc_rx_page_failed++; 335 return false; 336 } 337 338 hr = umem->headroom + XDP_PACKET_HEADROOM; 339 340 bi->dma = xdp_umem_get_dma(umem, handle); 341 bi->dma += hr; 342 343 bi->addr = xdp_umem_get_data(umem, handle); 344 bi->addr += hr; 345 346 bi->handle = handle + umem->headroom; 347 348 xsk_umem_discard_addr(umem); 349 return true; 350 } 351 352 static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring, 353 struct ixgbe_rx_buffer *bi) 354 { 355 struct xdp_umem *umem = rx_ring->xsk_umem; 356 u64 handle, hr; 357 358 if (!xsk_umem_peek_addr_rq(umem, &handle)) { 359 rx_ring->rx_stats.alloc_rx_page_failed++; 360 return false; 361 } 362 363 handle &= rx_ring->xsk_umem->chunk_mask; 364 365 hr = umem->headroom + XDP_PACKET_HEADROOM; 366 367 bi->dma = xdp_umem_get_dma(umem, handle); 368 bi->dma += hr; 369 370 bi->addr = xdp_umem_get_data(umem, handle); 371 bi->addr += hr; 372 373 bi->handle = handle + umem->headroom; 374 375 xsk_umem_discard_addr_rq(umem); 376 return true; 377 } 378 379 static __always_inline bool 380 __ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count, 381 bool alloc(struct ixgbe_ring *rx_ring, 382 struct ixgbe_rx_buffer *bi)) 383 { 384 union ixgbe_adv_rx_desc *rx_desc; 385 struct ixgbe_rx_buffer *bi; 386 u16 i = rx_ring->next_to_use; 387 bool ok = true; 388 389 /* nothing to do */ 390 if (!cleaned_count) 391 return true; 392 393 rx_desc = IXGBE_RX_DESC(rx_ring, i); 394 bi = &rx_ring->rx_buffer_info[i]; 395 i -= rx_ring->count; 396 397 do { 398 if (!alloc(rx_ring, bi)) { 399 ok = false; 400 break; 401 } 402 403 /* sync the buffer for use by the device */ 404 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 405 bi->page_offset, 406 rx_ring->rx_buf_len, 407 DMA_BIDIRECTIONAL); 408 409 /* Refresh the desc even if buffer_addrs didn't change 410 * because each write-back erases this info. 411 */ 412 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 413 414 rx_desc++; 415 bi++; 416 i++; 417 if (unlikely(!i)) { 418 rx_desc = IXGBE_RX_DESC(rx_ring, 0); 419 bi = rx_ring->rx_buffer_info; 420 i -= rx_ring->count; 421 } 422 423 /* clear the length for the next_to_use descriptor */ 424 rx_desc->wb.upper.length = 0; 425 426 cleaned_count--; 427 } while (cleaned_count); 428 429 i += rx_ring->count; 430 431 if (rx_ring->next_to_use != i) { 432 rx_ring->next_to_use = i; 433 434 /* update next to alloc since we have filled the ring */ 435 rx_ring->next_to_alloc = i; 436 437 /* Force memory writes to complete before letting h/w 438 * know there are new descriptors to fetch. (Only 439 * applicable for weak-ordered memory model archs, 440 * such as IA-64). 441 */ 442 wmb(); 443 writel(i, rx_ring->tail); 444 } 445 446 return ok; 447 } 448 449 void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count) 450 { 451 __ixgbe_alloc_rx_buffers_zc(rx_ring, count, 452 ixgbe_alloc_buffer_slow_zc); 453 } 454 455 static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring, 456 u16 count) 457 { 458 return __ixgbe_alloc_rx_buffers_zc(rx_ring, count, 459 ixgbe_alloc_buffer_zc); 460 } 461 462 static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, 463 struct ixgbe_rx_buffer *bi, 464 struct xdp_buff *xdp) 465 { 466 unsigned int metasize = xdp->data - xdp->data_meta; 467 unsigned int datasize = xdp->data_end - xdp->data; 468 struct sk_buff *skb; 469 470 /* allocate a skb to store the frags */ 471 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 472 xdp->data_end - xdp->data_hard_start, 473 GFP_ATOMIC | __GFP_NOWARN); 474 if (unlikely(!skb)) 475 return NULL; 476 477 skb_reserve(skb, xdp->data - xdp->data_hard_start); 478 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 479 if (metasize) 480 skb_metadata_set(skb, metasize); 481 482 ixgbe_reuse_rx_buffer_zc(rx_ring, bi); 483 return skb; 484 } 485 486 static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring) 487 { 488 u32 ntc = rx_ring->next_to_clean + 1; 489 490 ntc = (ntc < rx_ring->count) ? ntc : 0; 491 rx_ring->next_to_clean = ntc; 492 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); 493 } 494 495 int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, 496 struct ixgbe_ring *rx_ring, 497 const int budget) 498 { 499 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 500 struct ixgbe_adapter *adapter = q_vector->adapter; 501 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 502 unsigned int xdp_res, xdp_xmit = 0; 503 bool failure = false; 504 struct sk_buff *skb; 505 struct xdp_buff xdp; 506 507 xdp.rxq = &rx_ring->xdp_rxq; 508 509 while (likely(total_rx_packets < budget)) { 510 union ixgbe_adv_rx_desc *rx_desc; 511 struct ixgbe_rx_buffer *bi; 512 unsigned int size; 513 514 /* return some buffers to hardware, one at a time is too slow */ 515 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 516 failure = failure || 517 !ixgbe_alloc_rx_buffers_fast_zc(rx_ring, 518 cleaned_count); 519 cleaned_count = 0; 520 } 521 522 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); 523 size = le16_to_cpu(rx_desc->wb.upper.length); 524 if (!size) 525 break; 526 527 /* This memory barrier is needed to keep us from reading 528 * any other fields out of the rx_desc until we know the 529 * descriptor has been written back 530 */ 531 dma_rmb(); 532 533 bi = ixgbe_get_rx_buffer_zc(rx_ring, size); 534 535 if (unlikely(!ixgbe_test_staterr(rx_desc, 536 IXGBE_RXD_STAT_EOP))) { 537 struct ixgbe_rx_buffer *next_bi; 538 539 ixgbe_reuse_rx_buffer_zc(rx_ring, bi); 540 ixgbe_inc_ntc(rx_ring); 541 next_bi = 542 &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 543 next_bi->skb = ERR_PTR(-EINVAL); 544 continue; 545 } 546 547 if (unlikely(bi->skb)) { 548 ixgbe_reuse_rx_buffer_zc(rx_ring, bi); 549 ixgbe_inc_ntc(rx_ring); 550 continue; 551 } 552 553 xdp.data = bi->addr; 554 xdp.data_meta = xdp.data; 555 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; 556 xdp.data_end = xdp.data + size; 557 xdp.handle = bi->handle; 558 559 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp); 560 561 if (xdp_res) { 562 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) { 563 xdp_xmit |= xdp_res; 564 bi->addr = NULL; 565 bi->skb = NULL; 566 } else { 567 ixgbe_reuse_rx_buffer_zc(rx_ring, bi); 568 } 569 total_rx_packets++; 570 total_rx_bytes += size; 571 572 cleaned_count++; 573 ixgbe_inc_ntc(rx_ring); 574 continue; 575 } 576 577 /* XDP_PASS path */ 578 skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp); 579 if (!skb) { 580 rx_ring->rx_stats.alloc_rx_buff_failed++; 581 break; 582 } 583 584 cleaned_count++; 585 ixgbe_inc_ntc(rx_ring); 586 587 if (eth_skb_pad(skb)) 588 continue; 589 590 total_rx_bytes += skb->len; 591 total_rx_packets++; 592 593 ixgbe_process_skb_fields(rx_ring, rx_desc, skb); 594 ixgbe_rx_skb(q_vector, skb); 595 } 596 597 if (xdp_xmit & IXGBE_XDP_REDIR) 598 xdp_do_flush_map(); 599 600 if (xdp_xmit & IXGBE_XDP_TX) { 601 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; 602 603 /* Force memory writes to complete before letting h/w 604 * know there are new descriptors to fetch. 605 */ 606 wmb(); 607 writel(ring->next_to_use, ring->tail); 608 } 609 610 u64_stats_update_begin(&rx_ring->syncp); 611 rx_ring->stats.packets += total_rx_packets; 612 rx_ring->stats.bytes += total_rx_bytes; 613 u64_stats_update_end(&rx_ring->syncp); 614 q_vector->rx.total_packets += total_rx_packets; 615 q_vector->rx.total_bytes += total_rx_bytes; 616 617 return failure ? budget : (int)total_rx_packets; 618 } 619 620 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring) 621 { 622 u16 i = rx_ring->next_to_clean; 623 struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i]; 624 625 while (i != rx_ring->next_to_alloc) { 626 xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle); 627 i++; 628 bi++; 629 if (i == rx_ring->count) { 630 i = 0; 631 bi = rx_ring->rx_buffer_info; 632 } 633 } 634 } 635 636 static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) 637 { 638 union ixgbe_adv_tx_desc *tx_desc = NULL; 639 struct ixgbe_tx_buffer *tx_bi; 640 bool work_done = true; 641 u32 len, cmd_type; 642 dma_addr_t dma; 643 644 while (budget-- > 0) { 645 if (unlikely(!ixgbe_desc_unused(xdp_ring)) || 646 !netif_carrier_ok(xdp_ring->netdev)) { 647 work_done = false; 648 break; 649 } 650 651 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) 652 break; 653 654 dma_sync_single_for_device(xdp_ring->dev, dma, len, 655 DMA_BIDIRECTIONAL); 656 657 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; 658 tx_bi->bytecount = len; 659 tx_bi->xdpf = NULL; 660 661 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); 662 tx_desc->read.buffer_addr = cpu_to_le64(dma); 663 664 /* put descriptor type bits */ 665 cmd_type = IXGBE_ADVTXD_DTYP_DATA | 666 IXGBE_ADVTXD_DCMD_DEXT | 667 IXGBE_ADVTXD_DCMD_IFCS; 668 cmd_type |= len | IXGBE_TXD_CMD; 669 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); 670 tx_desc->read.olinfo_status = 671 cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); 672 673 xdp_ring->next_to_use++; 674 if (xdp_ring->next_to_use == xdp_ring->count) 675 xdp_ring->next_to_use = 0; 676 } 677 678 if (tx_desc) { 679 ixgbe_xdp_ring_update_tail(xdp_ring); 680 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 681 } 682 683 return !!budget && work_done; 684 } 685 686 static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring, 687 struct ixgbe_tx_buffer *tx_bi) 688 { 689 xdp_return_frame(tx_bi->xdpf); 690 dma_unmap_single(tx_ring->dev, 691 dma_unmap_addr(tx_bi, dma), 692 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 693 dma_unmap_len_set(tx_bi, len, 0); 694 } 695 696 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, 697 struct ixgbe_ring *tx_ring, int napi_budget) 698 { 699 unsigned int total_packets = 0, total_bytes = 0; 700 u32 i = tx_ring->next_to_clean, xsk_frames = 0; 701 unsigned int budget = q_vector->tx.work_limit; 702 struct xdp_umem *umem = tx_ring->xsk_umem; 703 union ixgbe_adv_tx_desc *tx_desc; 704 struct ixgbe_tx_buffer *tx_bi; 705 bool xmit_done; 706 707 tx_bi = &tx_ring->tx_buffer_info[i]; 708 tx_desc = IXGBE_TX_DESC(tx_ring, i); 709 i -= tx_ring->count; 710 711 do { 712 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 713 break; 714 715 total_bytes += tx_bi->bytecount; 716 total_packets += tx_bi->gso_segs; 717 718 if (tx_bi->xdpf) 719 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); 720 else 721 xsk_frames++; 722 723 tx_bi->xdpf = NULL; 724 total_bytes += tx_bi->bytecount; 725 726 tx_bi++; 727 tx_desc++; 728 i++; 729 if (unlikely(!i)) { 730 i -= tx_ring->count; 731 tx_bi = tx_ring->tx_buffer_info; 732 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 733 } 734 735 /* issue prefetch for next Tx descriptor */ 736 prefetch(tx_desc); 737 738 /* update budget accounting */ 739 budget--; 740 } while (likely(budget)); 741 742 i += tx_ring->count; 743 tx_ring->next_to_clean = i; 744 745 u64_stats_update_begin(&tx_ring->syncp); 746 tx_ring->stats.bytes += total_bytes; 747 tx_ring->stats.packets += total_packets; 748 u64_stats_update_end(&tx_ring->syncp); 749 q_vector->tx.total_bytes += total_bytes; 750 q_vector->tx.total_packets += total_packets; 751 752 if (xsk_frames) 753 xsk_umem_complete_tx(umem, xsk_frames); 754 755 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 756 return budget > 0 && xmit_done; 757 } 758 759 int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) 760 { 761 struct ixgbe_adapter *adapter = netdev_priv(dev); 762 struct ixgbe_ring *ring; 763 764 if (test_bit(__IXGBE_DOWN, &adapter->state)) 765 return -ENETDOWN; 766 767 if (!READ_ONCE(adapter->xdp_prog)) 768 return -ENXIO; 769 770 if (qid >= adapter->num_xdp_queues) 771 return -ENXIO; 772 773 if (!adapter->xsk_umems || !adapter->xsk_umems[qid]) 774 return -ENXIO; 775 776 ring = adapter->xdp_ring[qid]; 777 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { 778 u64 eics = BIT_ULL(ring->q_vector->v_idx); 779 780 ixgbe_irq_rearm_queues(adapter, eics); 781 } 782 783 return 0; 784 } 785 786 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring) 787 { 788 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 789 struct xdp_umem *umem = tx_ring->xsk_umem; 790 struct ixgbe_tx_buffer *tx_bi; 791 u32 xsk_frames = 0; 792 793 while (ntc != ntu) { 794 tx_bi = &tx_ring->tx_buffer_info[ntc]; 795 796 if (tx_bi->xdpf) 797 ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); 798 else 799 xsk_frames++; 800 801 tx_bi->xdpf = NULL; 802 803 ntc++; 804 if (ntc == tx_ring->count) 805 ntc = 0; 806 } 807 808 if (xsk_frames) 809 xsk_umem_complete_tx(umem, xsk_frames); 810 } 811