1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2018 Intel Corporation. */ 3 4 #include <linux/bpf_trace.h> 5 #include <linux/stringify.h> 6 #include <net/xdp_sock_drv.h> 7 #include <net/xdp.h> 8 9 #include "i40e.h" 10 #include "i40e_txrx_common.h" 11 #include "i40e_xsk.h" 12 13 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) 14 { 15 unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; 16 17 rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); 18 return rx_ring->rx_bi_zc ? 0 : -ENOMEM; 19 } 20 21 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) 22 { 23 memset(rx_ring->rx_bi_zc, 0, 24 sizeof(*rx_ring->rx_bi_zc) * rx_ring->count); 25 } 26 27 static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) 28 { 29 return &rx_ring->rx_bi_zc[idx]; 30 } 31 32 /** 33 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a 34 * certain ring/qid 35 * @vsi: Current VSI 36 * @pool: buffer pool 37 * @qid: Rx ring to associate buffer pool with 38 * 39 * Returns 0 on success, <0 on failure 40 **/ 41 static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, 42 struct xsk_buff_pool *pool, 43 u16 qid) 44 { 45 struct net_device *netdev = vsi->netdev; 46 bool if_running; 47 int err; 48 49 if (vsi->type != I40E_VSI_MAIN) 50 return -EINVAL; 51 52 if (qid >= vsi->num_queue_pairs) 53 return -EINVAL; 54 55 if (qid >= netdev->real_num_rx_queues || 56 qid >= netdev->real_num_tx_queues) 57 return -EINVAL; 58 59 err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR); 60 if (err) 61 return err; 62 63 set_bit(qid, vsi->af_xdp_zc_qps); 64 65 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 66 67 if (if_running) { 68 err = i40e_queue_pair_disable(vsi, qid); 69 if (err) 70 return err; 71 72 err = i40e_queue_pair_enable(vsi, qid); 73 if (err) 74 return err; 75 76 /* Kick start the NAPI context so that receiving will start */ 77 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 78 if (err) 79 return err; 80 } 81 82 return 0; 83 } 84 85 /** 86 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a 87 * certain ring/qid 88 * @vsi: Current VSI 89 * @qid: Rx ring to associate buffer pool with 90 * 91 * Returns 0 on success, <0 on failure 92 **/ 93 static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) 94 { 95 struct net_device *netdev = vsi->netdev; 96 struct xsk_buff_pool *pool; 97 bool if_running; 98 int err; 99 100 pool = xsk_get_pool_from_qid(netdev, qid); 101 if (!pool) 102 return -EINVAL; 103 104 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 105 106 if (if_running) { 107 err = i40e_queue_pair_disable(vsi, qid); 108 if (err) 109 return err; 110 } 111 112 clear_bit(qid, vsi->af_xdp_zc_qps); 113 xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); 114 115 if (if_running) { 116 err = i40e_queue_pair_enable(vsi, qid); 117 if (err) 118 return err; 119 } 120 121 return 0; 122 } 123 124 /** 125 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from 126 * a ring/qid 127 * @vsi: Current VSI 128 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable 129 * @qid: Rx ring to (dis)associate buffer pool (from)to 130 * 131 * This function enables or disables a buffer pool to a certain ring. 132 * 133 * Returns 0 on success, <0 on failure 134 **/ 135 int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, 136 u16 qid) 137 { 138 return pool ? i40e_xsk_pool_enable(vsi, pool, qid) : 139 i40e_xsk_pool_disable(vsi, qid); 140 } 141 142 /** 143 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff 144 * @rx_ring: Rx ring 145 * @xdp: xdp_buff used as input to the XDP program 146 * 147 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} 148 **/ 149 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 150 { 151 int err, result = I40E_XDP_PASS; 152 struct i40e_ring *xdp_ring; 153 struct bpf_prog *xdp_prog; 154 u32 act; 155 156 rcu_read_lock(); 157 /* NB! xdp_prog will always be !NULL, due to the fact that 158 * this path is enabled by setting an XDP program. 159 */ 160 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 161 act = bpf_prog_run_xdp(xdp_prog, xdp); 162 163 switch (act) { 164 case XDP_PASS: 165 break; 166 case XDP_TX: 167 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 168 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); 169 break; 170 case XDP_REDIRECT: 171 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 172 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; 173 break; 174 default: 175 bpf_warn_invalid_xdp_action(act); 176 fallthrough; 177 case XDP_ABORTED: 178 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); 179 fallthrough; /* handle aborts by dropping packet */ 180 case XDP_DROP: 181 result = I40E_XDP_CONSUMED; 182 break; 183 } 184 rcu_read_unlock(); 185 return result; 186 } 187 188 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) 189 { 190 u16 ntu = rx_ring->next_to_use; 191 union i40e_rx_desc *rx_desc; 192 struct xdp_buff **bi, *xdp; 193 dma_addr_t dma; 194 bool ok = true; 195 196 rx_desc = I40E_RX_DESC(rx_ring, ntu); 197 bi = i40e_rx_bi(rx_ring, ntu); 198 do { 199 xdp = xsk_buff_alloc(rx_ring->xsk_pool); 200 if (!xdp) { 201 ok = false; 202 goto no_buffers; 203 } 204 *bi = xdp; 205 dma = xsk_buff_xdp_get_dma(xdp); 206 rx_desc->read.pkt_addr = cpu_to_le64(dma); 207 rx_desc->read.hdr_addr = 0; 208 209 rx_desc++; 210 bi++; 211 ntu++; 212 213 if (unlikely(ntu == rx_ring->count)) { 214 rx_desc = I40E_RX_DESC(rx_ring, 0); 215 bi = i40e_rx_bi(rx_ring, 0); 216 ntu = 0; 217 } 218 219 count--; 220 } while (count); 221 222 no_buffers: 223 if (rx_ring->next_to_use != ntu) 224 i40e_release_rx_desc(rx_ring, ntu); 225 226 return ok; 227 } 228 229 /** 230 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer 231 * @rx_ring: Rx ring 232 * @xdp: xdp_buff 233 * 234 * This functions allocates a new skb from a zero-copy Rx buffer. 235 * 236 * Returns the skb, or NULL on failure. 237 **/ 238 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, 239 struct xdp_buff *xdp) 240 { 241 unsigned int metasize = xdp->data - xdp->data_meta; 242 unsigned int datasize = xdp->data_end - xdp->data; 243 struct sk_buff *skb; 244 245 /* allocate a skb to store the frags */ 246 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, 247 xdp->data_end - xdp->data_hard_start, 248 GFP_ATOMIC | __GFP_NOWARN); 249 if (unlikely(!skb)) 250 return NULL; 251 252 skb_reserve(skb, xdp->data - xdp->data_hard_start); 253 memcpy(__skb_put(skb, datasize), xdp->data, datasize); 254 if (metasize) 255 skb_metadata_set(skb, metasize); 256 257 xsk_buff_free(xdp); 258 return skb; 259 } 260 261 /** 262 * i40e_inc_ntc: Advance the next_to_clean index 263 * @rx_ring: Rx ring 264 **/ 265 static void i40e_inc_ntc(struct i40e_ring *rx_ring) 266 { 267 u32 ntc = rx_ring->next_to_clean + 1; 268 269 ntc = (ntc < rx_ring->count) ? ntc : 0; 270 rx_ring->next_to_clean = ntc; 271 } 272 273 /** 274 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring 275 * @rx_ring: Rx ring 276 * @budget: NAPI budget 277 * 278 * Returns amount of work completed 279 **/ 280 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) 281 { 282 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 283 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 284 unsigned int xdp_res, xdp_xmit = 0; 285 bool failure = false; 286 struct sk_buff *skb; 287 288 while (likely(total_rx_packets < (unsigned int)budget)) { 289 union i40e_rx_desc *rx_desc; 290 struct xdp_buff **bi; 291 unsigned int size; 292 u64 qword; 293 294 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); 295 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 296 297 /* This memory barrier is needed to keep us from reading 298 * any other fields out of the rx_desc until we have 299 * verified the descriptor has been written back. 300 */ 301 dma_rmb(); 302 303 if (i40e_rx_is_programming_status(qword)) { 304 i40e_clean_programming_status(rx_ring, 305 rx_desc->raw.qword[0], 306 qword); 307 bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 308 xsk_buff_free(*bi); 309 *bi = NULL; 310 cleaned_count++; 311 i40e_inc_ntc(rx_ring); 312 continue; 313 } 314 315 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> 316 I40E_RXD_QW1_LENGTH_PBUF_SHIFT; 317 if (!size) 318 break; 319 320 bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); 321 (*bi)->data_end = (*bi)->data + size; 322 xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool); 323 324 xdp_res = i40e_run_xdp_zc(rx_ring, *bi); 325 if (xdp_res) { 326 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) 327 xdp_xmit |= xdp_res; 328 else 329 xsk_buff_free(*bi); 330 331 *bi = NULL; 332 total_rx_bytes += size; 333 total_rx_packets++; 334 335 cleaned_count++; 336 i40e_inc_ntc(rx_ring); 337 continue; 338 } 339 340 /* XDP_PASS path */ 341 342 /* NB! We are not checking for errors using 343 * i40e_test_staterr with 344 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that 345 * SBP is *not* set in PRT_SBPVSI (default not set). 346 */ 347 skb = i40e_construct_skb_zc(rx_ring, *bi); 348 *bi = NULL; 349 if (!skb) { 350 rx_ring->rx_stats.alloc_buff_failed++; 351 break; 352 } 353 354 cleaned_count++; 355 i40e_inc_ntc(rx_ring); 356 357 if (eth_skb_pad(skb)) 358 continue; 359 360 total_rx_bytes += skb->len; 361 total_rx_packets++; 362 363 i40e_process_skb_fields(rx_ring, rx_desc, skb); 364 napi_gro_receive(&rx_ring->q_vector->napi, skb); 365 } 366 367 if (cleaned_count >= I40E_RX_BUFFER_WRITE) 368 failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count); 369 370 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 371 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 372 373 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { 374 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) 375 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); 376 else 377 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); 378 379 return (int)total_rx_packets; 380 } 381 return failure ? budget : (int)total_rx_packets; 382 } 383 384 static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 385 unsigned int *total_bytes) 386 { 387 struct i40e_tx_desc *tx_desc; 388 dma_addr_t dma; 389 390 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); 391 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); 392 393 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++); 394 tx_desc->buffer_addr = cpu_to_le64(dma); 395 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP, 396 0, desc->len, 0); 397 398 *total_bytes += desc->len; 399 } 400 401 static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc, 402 unsigned int *total_bytes) 403 { 404 u16 ntu = xdp_ring->next_to_use; 405 struct i40e_tx_desc *tx_desc; 406 dma_addr_t dma; 407 u32 i; 408 409 loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { 410 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr); 411 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len); 412 413 tx_desc = I40E_TX_DESC(xdp_ring, ntu++); 414 tx_desc->buffer_addr = cpu_to_le64(dma); 415 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | 416 I40E_TX_DESC_CMD_EOP, 417 0, desc[i].len, 0); 418 419 *total_bytes += desc[i].len; 420 } 421 422 xdp_ring->next_to_use = ntu; 423 } 424 425 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, 426 unsigned int *total_bytes) 427 { 428 u32 batched, leftover, i; 429 430 batched = nb_pkts & ~(PKTS_PER_BATCH - 1); 431 leftover = nb_pkts & (PKTS_PER_BATCH - 1); 432 for (i = 0; i < batched; i += PKTS_PER_BATCH) 433 i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); 434 for (i = batched; i < batched + leftover; i++) 435 i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes); 436 } 437 438 static void i40e_set_rs_bit(struct i40e_ring *xdp_ring) 439 { 440 u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; 441 struct i40e_tx_desc *tx_desc; 442 443 tx_desc = I40E_TX_DESC(xdp_ring, ntu); 444 tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT); 445 } 446 447 /** 448 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP 449 * @xdp_ring: XDP Tx ring 450 * @budget: NAPI budget 451 * 452 * Returns true if the work is finished. 453 **/ 454 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) 455 { 456 struct xdp_desc *descs = xdp_ring->xsk_descs; 457 u32 nb_pkts, nb_processed = 0; 458 unsigned int total_bytes = 0; 459 460 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget); 461 if (!nb_pkts) 462 return false; 463 464 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { 465 nb_processed = xdp_ring->count - xdp_ring->next_to_use; 466 i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); 467 xdp_ring->next_to_use = 0; 468 } 469 470 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, 471 &total_bytes); 472 473 /* Request an interrupt for the last frame and bump tail ptr. */ 474 i40e_set_rs_bit(xdp_ring); 475 i40e_xdp_ring_update_tail(xdp_ring); 476 477 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); 478 479 return true; 480 } 481 482 /** 483 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry 484 * @tx_ring: XDP Tx ring 485 * @tx_bi: Tx buffer info to clean 486 **/ 487 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, 488 struct i40e_tx_buffer *tx_bi) 489 { 490 xdp_return_frame(tx_bi->xdpf); 491 tx_ring->xdp_tx_active--; 492 dma_unmap_single(tx_ring->dev, 493 dma_unmap_addr(tx_bi, dma), 494 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); 495 dma_unmap_len_set(tx_bi, len, 0); 496 } 497 498 /** 499 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries 500 * @vsi: Current VSI 501 * @tx_ring: XDP Tx ring 502 * 503 * Returns true if cleanup/tranmission is done. 504 **/ 505 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) 506 { 507 struct xsk_buff_pool *bp = tx_ring->xsk_pool; 508 u32 i, completed_frames, xsk_frames = 0; 509 u32 head_idx = i40e_get_head(tx_ring); 510 struct i40e_tx_buffer *tx_bi; 511 unsigned int ntc; 512 513 if (head_idx < tx_ring->next_to_clean) 514 head_idx += tx_ring->count; 515 completed_frames = head_idx - tx_ring->next_to_clean; 516 517 if (completed_frames == 0) 518 goto out_xmit; 519 520 if (likely(!tx_ring->xdp_tx_active)) { 521 xsk_frames = completed_frames; 522 goto skip; 523 } 524 525 ntc = tx_ring->next_to_clean; 526 527 for (i = 0; i < completed_frames; i++) { 528 tx_bi = &tx_ring->tx_bi[ntc]; 529 530 if (tx_bi->xdpf) { 531 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 532 tx_bi->xdpf = NULL; 533 } else { 534 xsk_frames++; 535 } 536 537 if (++ntc >= tx_ring->count) 538 ntc = 0; 539 } 540 541 skip: 542 tx_ring->next_to_clean += completed_frames; 543 if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) 544 tx_ring->next_to_clean -= tx_ring->count; 545 546 if (xsk_frames) 547 xsk_tx_completed(bp, xsk_frames); 548 549 i40e_arm_wb(tx_ring, vsi, completed_frames); 550 551 out_xmit: 552 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) 553 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); 554 555 return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); 556 } 557 558 /** 559 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 560 * @dev: the netdevice 561 * @queue_id: queue id to wake up 562 * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 563 * 564 * Returns <0 for errors, 0 otherwise. 565 **/ 566 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 567 { 568 struct i40e_netdev_priv *np = netdev_priv(dev); 569 struct i40e_vsi *vsi = np->vsi; 570 struct i40e_pf *pf = vsi->back; 571 struct i40e_ring *ring; 572 573 if (test_bit(__I40E_CONFIG_BUSY, pf->state)) 574 return -EAGAIN; 575 576 if (test_bit(__I40E_VSI_DOWN, vsi->state)) 577 return -ENETDOWN; 578 579 if (!i40e_enabled_xdp_vsi(vsi)) 580 return -ENXIO; 581 582 if (queue_id >= vsi->num_queue_pairs) 583 return -ENXIO; 584 585 if (!vsi->xdp_rings[queue_id]->xsk_pool) 586 return -ENXIO; 587 588 ring = vsi->xdp_rings[queue_id]; 589 590 /* The idea here is that if NAPI is running, mark a miss, so 591 * it will run again. If not, trigger an interrupt and 592 * schedule the NAPI from interrupt context. If NAPI would be 593 * scheduled here, the interrupt affinity would not be 594 * honored. 595 */ 596 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) 597 i40e_force_wb(vsi, ring->q_vector); 598 599 return 0; 600 } 601 602 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) 603 { 604 u16 i; 605 606 for (i = 0; i < rx_ring->count; i++) { 607 struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i); 608 609 if (!rx_bi) 610 continue; 611 612 xsk_buff_free(rx_bi); 613 rx_bi = NULL; 614 } 615 } 616 617 /** 618 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown 619 * @tx_ring: XDP Tx ring 620 **/ 621 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) 622 { 623 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; 624 struct xsk_buff_pool *bp = tx_ring->xsk_pool; 625 struct i40e_tx_buffer *tx_bi; 626 u32 xsk_frames = 0; 627 628 while (ntc != ntu) { 629 tx_bi = &tx_ring->tx_bi[ntc]; 630 631 if (tx_bi->xdpf) 632 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); 633 else 634 xsk_frames++; 635 636 tx_bi->xdpf = NULL; 637 638 ntc++; 639 if (ntc >= tx_ring->count) 640 ntc = 0; 641 } 642 643 if (xsk_frames) 644 xsk_tx_completed(bp, xsk_frames); 645 } 646 647 /** 648 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP 649 * buffer pool attached 650 * @vsi: vsi 651 * 652 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached 653 **/ 654 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) 655 { 656 struct net_device *netdev = vsi->netdev; 657 int i; 658 659 for (i = 0; i < vsi->num_queue_pairs; i++) { 660 if (xsk_get_pool_from_qid(netdev, i)) 661 return true; 662 } 663 664 return false; 665 } 666