1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2018 Intel Corporation. */ 3 4 #include <linux/bpf_trace.h> 5 #include <linux/stringify.h> 6 #include <net/xdp_sock_drv.h> 7 #include <net/xdp.h> 8 9 #include "i40e.h" 10 #include "i40e_txrx_common.h" 11 #include "i40e_xsk.h" 12 13 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) 14 { 15 unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; 16 17 rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); 18 return rx_ring->rx_bi_zc ? 0 : -ENOMEM; 19 } 20 21 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) 22 { 23 memset(rx_ring->rx_bi_zc, 0, 24 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); 25 } 26 27 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 28 { 29 return &rx_ring->rx_bi_zc[idx]; 30 } 31 32 /** 33 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a 34 * certain ring/qid 35 * @vsi: Current VSI 36 * @pool: buffer pool 37 * @qid: Rx ring to associate buffer pool with 38 * 39 * Returns 0 on success, <0 on failure 40 **/ 41 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, 42 struct xsk_buff_pool *pool, 43 u16 qid) 44 { 45 struct net_device *netdev = vsi->netdev; 46 bool if_running; 47 int err; 48 49 if (vsi->type != I40E_VSI_MAIN) 50 return -EINVAL; 51 52 if (qid >= vsi->num_queue_pairs) 53 return -EINVAL; 54 55 if (qid >= netdev->real_num_rx_queues || 56 qid >= netdev->real_num_tx_queues) 57 return -EINVAL; 58 59 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); 60 if (err) 61 return err; 62 63 set_bit(qid, vsi->af_xdp_zc_qps); 64 65 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 66 67 if (if_running) { 68 err = i40e_queue_pair_disable(vsi, qid); 69 if (err) 70 return err; 71 72 err = i40e_queue_pair_enable(vsi, qid); 73 if (err) 74 return err; 75 76 /* Kick start the NAPI context so that receiving will start */ 77 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 78 if (err) 79 return err; 80 } 81 82 return 0; 83 } 84 85 /** 86 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a 87 * certain ring/qid 88 * @vsi: Current VSI 89 * @qid: Rx ring to associate buffer pool with 90 * 91 * Returns 0 on success, <0 on failure 92 **/ 93 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) 94 { 95 struct net_device *netdev = vsi->netdev; 96 struct xsk_buff_pool *pool; 97 bool if_running; 98 int err; 99 100 pool = xsk_get_pool_from_qid(netdev, qid); 101 if (!pool) 102 return -EINVAL; 103 104 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 105 106 if (if_running) { 107 err = i40e_queue_pair_disable(vsi, qid); 108 if (err) 109 return err; 110 } 111 112 clear_bit(qid, vsi->af_xdp_zc_qps); 113 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); 114 115 if (if_running) { 116 err = i40e_queue_pair_enable(vsi, qid); 117 if (err) 118 return err; 119 } 120 121 return 0; 122 } 123 124 /** 125 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from 126 * a ring/qid 127 * @vsi: Current VSI 128 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable 129 * @qid: Rx ring to (dis)associate buffer pool (from)to 130 * 131 * This function enables or disables a buffer pool to a certain ring. 132 * 133 * Returns 0 on success, <0 on failure 134 **/ 135 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, 136 u16 qid) 137 { 138 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : 139 i40e_xsk_pool_disable(vsi, qid); 140 } 141 142 /** 143 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 144 * @rx_ring: Rx ring 145 * @xdp: xdp_buff used as input to the XDP program 146 * 147 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 148 **/ 149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 150 { 151 int err, result = I40E_XDP_PASS; 152 struct i40e_ring *xdp_ring; 153 struct bpf_prog *xdp_prog; 154 u32 act; 155 156 /* NB! xdp_prog will always be !NULL, due to the fact that 157 * this path is enabled by setting an XDP program. 158 */ 159 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 160 act = bpf_prog_run_xdp(xdp_prog, xdp); 161 162 if (likely(act == XDP_REDIRECT)) { 163 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 164 if (err) 165 goto out_failure; 166 return I40E_XDP_REDIR; 167 } 168 169 switch (act) { 170 case XDP_PASS: 171 break; 172 case XDP_TX: 173 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 174 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 175 if (result == I40E_XDP_CONSUMED) 176 goto out_failure; 177 break; 178 default: 179 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); 180 fallthrough; 181 case XDP_ABORTED: 182 out_failure: 183 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 184 fallthrough; /* handle aborts by dropping packet */ 185 case XDP_DROP: 186 result = I40E_XDP_CONSUMED; 187 break; 188 } 189 return result; 190 } 191 192 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) 193 { 194 u16 ntu = rx_ring->next_to_use; 195 union i40e_rx_desc *rx_desc; 196 struct xdp_buff **xdp; 197 u32 nb_buffs, i; 198 dma_addr_t dma; 199 200 rx_desc = I40E_RX_DESC(rx_ring, ntu); 201 xdp = i40e_rx_bi(rx_ring, ntu); 202 203 nb_buffs = min_t(u16, count, rx_ring->count - ntu); 204 nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs); 205 if (!nb_buffs) 206 return false; 207 208 i = nb_buffs; 209 while (i--) { 210 dma = xsk_buff_xdp_get_dma(*xdp); 211 rx_desc->read.pkt_addr = cpu_to_le64(dma); 212 rx_desc->read.hdr_addr = 0; 213 214 rx_desc++; 215 xdp++; 216 } 217 218 ntu += nb_buffs; 219 if (ntu == rx_ring->count) { 220 rx_desc = I40E_RX_DESC(rx_ring, 0); 221 xdp = i40e_rx_bi(rx_ring, 0); 222 ntu = 0; 223 } 224 225 /* clear the status bits for the next_to_use descriptor */ 226 rx_desc->wb.qword1.status_error_len = 0; 227 i40e_release_rx_desc(rx_ring, ntu); 228 229 return count == nb_buffs; 230 } 231 232 /** 233 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer 234 * @rx_ring: Rx ring 235 * @xdp: xdp_buff 236 * 237 * This functions allocates a new skb from a zero-copy Rx buffer. 238 * 239 * Returns the skb, or NULL on failure. 240 **/ 241 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, 242 struct xdp_buff *xdp) 243 { 244 unsigned int metasize = xdp->data - xdp->data_meta; 245 unsigned int datasize = xdp->data_end - xdp->data; 246 struct sk_buff *skb; 247 248 /* allocate a skb to store the frags */ 249 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 250 xdp->data_end - xdp->data_hard_start, 251 GFP_ATOMIC | __GFP_NOWARN); 252 if (unlikely(!skb)) 253 goto out; 254 255 skb_reserve(skb, xdp->data - xdp->data_hard_start); 256 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 257 if (metasize) 258 skb_metadata_set(skb, metasize); 259 260 out: 261 xsk_buff_free(xdp); 262 return skb; 263 } 264 265 static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring, 266 struct xdp_buff *xdp_buff, 267 union i40e_rx_desc *rx_desc, 268 unsigned int *rx_packets, 269 unsigned int *rx_bytes, 270 unsigned int size, 271 unsigned int xdp_res) 272 { 273 struct sk_buff *skb; 274 275 *rx_packets = 1; 276 *rx_bytes = size; 277 278 if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX) 279 return; 280 281 if (xdp_res == I40E_XDP_CONSUMED) { 282 xsk_buff_free(xdp_buff); 283 return; 284 } 285 286 if (xdp_res == I40E_XDP_PASS) { 287 /* NB! We are not checking for errors using 288 * i40e_test_staterr with 289 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that 290 * SBP is *not* set in PRT_SBPVSI (default not set). 291 */ 292 skb = i40e_construct_skb_zc(rx_ring, xdp_buff); 293 if (!skb) { 294 rx_ring->rx_stats.alloc_buff_failed++; 295 *rx_packets = 0; 296 *rx_bytes = 0; 297 return; 298 } 299 300 if (eth_skb_pad(skb)) { 301 *rx_packets = 0; 302 *rx_bytes = 0; 303 return; 304 } 305 306 *rx_bytes = skb->len; 307 i40e_process_skb_fields(rx_ring, rx_desc, skb); 308 napi_gro_receive(&rx_ring->q_vector->napi, skb); 309 return; 310 } 311 312 /* Should never get here, as all valid cases have been handled already. 313 */ 314 WARN_ON_ONCE(1); 315 } 316 317 /** 318 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring 319 * @rx_ring: Rx ring 320 * @budget: NAPI budget 321 * 322 * Returns amount of work completed 323 **/ 324 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) 325 { 326 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 327 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 328 u16 next_to_clean = rx_ring->next_to_clean; 329 u16 count_mask = rx_ring->count - 1; 330 unsigned int xdp_res, xdp_xmit = 0; 331 bool failure = false; 332 333 while (likely(total_rx_packets < (unsigned int)budget)) { 334 union i40e_rx_desc *rx_desc; 335 unsigned int rx_packets; 336 unsigned int rx_bytes; 337 struct xdp_buff *bi; 338 unsigned int size; 339 u64 qword; 340 341 rx_desc = I40E_RX_DESC(rx_ring, next_to_clean); 342 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 343 344 /* This memory barrier is needed to keep us from reading 345 * any other fields out of the rx_desc until we have 346 * verified the descriptor has been written back. 347 */ 348 dma_rmb(); 349 350 if (i40e_rx_is_programming_status(qword)) { 351 i40e_clean_programming_status(rx_ring, 352 rx_desc->raw.qword[0], 353 qword); 354 bi = *i40e_rx_bi(rx_ring, next_to_clean); 355 xsk_buff_free(bi); 356 next_to_clean = (next_to_clean + 1) & count_mask; 357 continue; 358 } 359 360 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 361 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 362 if (!size) 363 break; 364 365 bi = *i40e_rx_bi(rx_ring, next_to_clean); 366 xsk_buff_set_size(bi, size); 367 xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); 368 369 xdp_res = i40e_run_xdp_zc(rx_ring, bi); 370 i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, 371 &rx_bytes, size, xdp_res); 372 total_rx_packets += rx_packets; 373 total_rx_bytes += rx_bytes; 374 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR); 375 next_to_clean = (next_to_clean + 1) & count_mask; 376 } 377 378 rx_ring->next_to_clean = next_to_clean; 379 cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask; 380 381 if (cleaned_count >= I40E_RX_BUFFER_WRITE) 382 failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); 383 384 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 385 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 386 387 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { 388 if (failure || next_to_clean == rx_ring->next_to_use) 389 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); 390 else 391 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); 392 393 return (int)total_rx_packets; 394 } 395 return failure ? budget : (int)total_rx_packets; 396 } 397 398 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 399 unsigned int *total_bytes) 400 { 401 struct i40e_tx_desc *tx_desc; 402 dma_addr_t dma; 403 404 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 405 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 406 407 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 408 tx_desc->buffer_addr = cpu_to_le64(dma); 409 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, 410 0, desc->len, 0); 411 412 *total_bytes += desc->len; 413 } 414 415 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 416 unsigned int *total_bytes) 417 { 418 u16 ntu = xdp_ring->next_to_use; 419 struct i40e_tx_desc *tx_desc; 420 dma_addr_t dma; 421 u32 i; 422 423 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 424 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); 425 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); 426 427 tx_desc = I40E_TX_DESC(xdp_ring, ntu++); 428 tx_desc->buffer_addr = cpu_to_le64(dma); 429 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | 430 I40E_TX_DESC_CMD_EOP, 431 0, desc[i].len, 0); 432 433 *total_bytes += desc[i].len; 434 } 435 436 xdp_ring->next_to_use = ntu; 437 } 438 439 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, 440 unsigned int *total_bytes) 441 { 442 u32 batched, leftover, i; 443 444 batched = nb_pkts & ~(PKTS_PER_BATCH - 1); 445 leftover = nb_pkts & (PKTS_PER_BATCH - 1); 446 for (i = 0; i < batched; i += PKTS_PER_BATCH) 447 i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 448 for (i = batched; i < batched + leftover; i++) 449 i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); 450 } 451 452 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) 453 { 454 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; 455 struct i40e_tx_desc *tx_desc; 456 457 tx_desc = I40E_TX_DESC(xdp_ring, ntu); 458 tx_desc->cmd_type_offset_bsz |= cpu_to_le64(I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); 459 } 460 461 /** 462 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP 463 * @xdp_ring: XDP Tx ring 464 * @budget: NAPI budget 465 * 466 * Returns true if the work is finished. 467 **/ 468 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) 469 { 470 struct xdp_desc *descs = xdp_ring->xsk_descs; 471 u32 nb_pkts, nb_processed = 0; 472 unsigned int total_bytes = 0; 473 474 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); 475 if (!nb_pkts) 476 return true; 477 478 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 479 nb_processed = xdp_ring->count - xdp_ring->next_to_use; 480 i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 481 xdp_ring->next_to_use = 0; 482 } 483 484 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 485 &total_bytes); 486 487 /* Request an interrupt for the last frame and bump tail ptr. */ 488 i40e_set_rs_bit(xdp_ring); 489 i40e_xdp_ring_update_tail(xdp_ring); 490 491 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); 492 493 return nb_pkts < budget; 494 } 495 496 /** 497 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry 498 * @tx_ring: XDP Tx ring 499 * @tx_bi: Tx buffer info to clean 500 **/ 501 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, 502 struct i40e_tx_buffer *tx_bi) 503 { 504 xdp_return_frame(tx_bi->xdpf); 505 tx_ring->xdp_tx_active--; 506 dma_unmap_single(tx_ring->dev, 507 dma_unmap_addr(tx_bi, dma), 508 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 509 dma_unmap_len_set(tx_bi, len, 0); 510 } 511 512 /** 513 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries 514 * @vsi: Current VSI 515 * @tx_ring: XDP Tx ring 516 * 517 * Returns true if cleanup/tranmission is done. 518 **/ 519 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) 520 { 521 struct xsk_buff_pool *bp = tx_ring->xsk_pool; 522 u32 i, completed_frames, xsk_frames = 0; 523 u32 head_idx = i40e_get_head(tx_ring); 524 struct i40e_tx_buffer *tx_bi; 525 unsigned int ntc; 526 527 if (head_idx < tx_ring->next_to_clean) 528 head_idx += tx_ring->count; 529 completed_frames = head_idx - tx_ring->next_to_clean; 530 531 if (completed_frames == 0) 532 goto out_xmit; 533 534 if (likely(!tx_ring->xdp_tx_active)) { 535 xsk_frames = completed_frames; 536 goto skip; 537 } 538 539 ntc = tx_ring->next_to_clean; 540 541 for (i = 0; i < completed_frames; i++) { 542 tx_bi = &tx_ring->tx_bi[ntc]; 543 544 if (tx_bi->xdpf) { 545 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 546 tx_bi->xdpf = NULL; 547 } else { 548 xsk_frames++; 549 } 550 551 if (++ntc >= tx_ring->count) 552 ntc = 0; 553 } 554 555 skip: 556 tx_ring->next_to_clean += completed_frames; 557 if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) 558 tx_ring->next_to_clean -= tx_ring->count; 559 560 if (xsk_frames) 561 xsk_tx_completed(bp, xsk_frames); 562 563 i40e_arm_wb(tx_ring, vsi, completed_frames); 564 565 out_xmit: 566 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 567 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 568 569 return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); 570 } 571 572 /** 573 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 574 * @dev: the netdevice 575 * @queue_id: queue id to wake up 576 * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 577 * 578 * Returns <0 for errors, 0 otherwise. 579 **/ 580 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 581 { 582 struct i40e_netdev_priv *np = netdev_priv(dev); 583 struct i40e_vsi *vsi = np->vsi; 584 struct i40e_pf *pf = vsi->back; 585 struct i40e_ring *ring; 586 587 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 588 return -EAGAIN; 589 590 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 591 return -ENETDOWN; 592 593 if (!i40e_enabled_xdp_vsi(vsi)) 594 return -ENXIO; 595 596 if (queue_id >= vsi->num_queue_pairs) 597 return -ENXIO; 598 599 if (!vsi->xdp_rings[queue_id]->xsk_pool) 600 return -ENXIO; 601 602 ring = vsi->xdp_rings[queue_id]; 603 604 /* The idea here is that if NAPI is running, mark a miss, so 605 * it will run again. If not, trigger an interrupt and 606 * schedule the NAPI from interrupt context. If NAPI would be 607 * scheduled here, the interrupt affinity would not be 608 * honored. 609 */ 610 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) 611 i40e_force_wb(vsi, ring->q_vector); 612 613 return 0; 614 } 615 616 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) 617 { 618 u16 count_mask = rx_ring->count - 1; 619 u16 ntc = rx_ring->next_to_clean; 620 u16 ntu = rx_ring->next_to_use; 621 622 for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { 623 struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc); 624 625 xsk_buff_free(rx_bi); 626 } 627 } 628 629 /** 630 * i40e_xsk_clean_tx_ring - Clean the XDP Tx ring on shutdown 631 * @tx_ring: XDP Tx ring 632 **/ 633 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) 634 { 635 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 636 struct xsk_buff_pool *bp = tx_ring->xsk_pool; 637 struct i40e_tx_buffer *tx_bi; 638 u32 xsk_frames = 0; 639 640 while (ntc != ntu) { 641 tx_bi = &tx_ring->tx_bi[ntc]; 642 643 if (tx_bi->xdpf) 644 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 645 else 646 xsk_frames++; 647 648 tx_bi->xdpf = NULL; 649 650 ntc++; 651 if (ntc >= tx_ring->count) 652 ntc = 0; 653 } 654 655 if (xsk_frames) 656 xsk_tx_completed(bp, xsk_frames); 657 } 658 659 /** 660 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP 661 * buffer pool attached 662 * @vsi: vsi 663 * 664 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached 665 **/ 666 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) 667 { 668 struct net_device *netdev = vsi->netdev; 669 int i; 670 671 for (i = 0; i < vsi->num_queue_pairs; i++) { 672 if (xsk_get_pool_from_qid(netdev, i)) 673 return true; 674 } 675 676 return false; 677 } 678