1 /* 2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/bpf_trace.h> 34 #include <net/xdp_sock_drv.h> 35 #include "en/xdp.h" 36 #include "en/params.h" 37 38 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) 39 { 40 int hr = mlx5e_get_linear_rq_headroom(params, xsk); 41 42 /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). 43 * The condition checked in mlx5e_rx_is_linear_skb is: 44 * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) 45 * (Note that hw_mtu == sw_mtu + hard_mtu.) 46 * What is returned from this function is: 47 * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) 48 * After assigning sw_mtu := max_mtu, the left side of (1) turns to 49 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, 50 * because both PAGE_SIZE and S are already aligned. Any number greater 51 * than max_mtu would make the left side of (1) greater than PAGE_SIZE, 52 * so max_mtu is the maximum MTU allowed. 53 */ 54 55 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); 56 } 57 58 static inline bool 59 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, 60 struct mlx5e_dma_info *di, struct xdp_buff *xdp) 61 { 62 struct mlx5e_xmit_data xdptxd; 63 struct mlx5e_xdp_info xdpi; 64 struct xdp_frame *xdpf; 65 dma_addr_t dma_addr; 66 67 xdpf = xdp_convert_buff_to_frame(xdp); 68 if (unlikely(!xdpf)) 69 return false; 70 71 xdptxd.data = xdpf->data; 72 xdptxd.len = xdpf->len; 73 74 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 75 /* The xdp_buff was in the UMEM and was copied into a newly 76 * allocated page. The UMEM page was returned via the ZCA, and 77 * this new page has to be mapped at this point and has to be 78 * unmapped and returned via xdp_return_frame on completion. 79 */ 80 81 /* Prevent double recycling of the UMEM page. Even in case this 82 * function returns false, the xdp_buff shouldn't be recycled, 83 * as it was already done in xdp_convert_zc_to_xdp_frame. 84 */ 85 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 86 87 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; 88 89 dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, 90 DMA_TO_DEVICE); 91 if (dma_mapping_error(sq->pdev, dma_addr)) { 92 xdp_return_frame(xdpf); 93 return false; 94 } 95 96 xdptxd.dma_addr = dma_addr; 97 xdpi.frame.xdpf = xdpf; 98 xdpi.frame.dma_addr = dma_addr; 99 } else { 100 /* Driver assumes that xdp_convert_buff_to_frame returns 101 * an xdp_frame that points to the same memory region as 102 * the original xdp_buff. It allows to map the memory only 103 * once and to use the DMA_BIDIRECTIONAL mode. 104 */ 105 106 xdpi.mode = MLX5E_XDP_XMIT_MODE_PAGE; 107 108 dma_addr = di->addr + (xdpf->data - (void *)xdpf); 109 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, 110 DMA_TO_DEVICE); 111 112 xdptxd.dma_addr = dma_addr; 113 xdpi.page.rq = rq; 114 xdpi.page.di = *di; 115 } 116 117 return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 118 mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); 119 } 120 121 /* returns true if packet was consumed by xdp */ 122 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 123 u32 *len, struct xdp_buff *xdp) 124 { 125 struct bpf_prog *prog = rcu_dereference(rq->xdp_prog); 126 u32 act; 127 int err; 128 129 if (!prog) 130 return false; 131 132 act = bpf_prog_run_xdp(prog, xdp); 133 switch (act) { 134 case XDP_PASS: 135 *len = xdp->data_end - xdp->data; 136 return false; 137 case XDP_TX: 138 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, di, xdp))) 139 goto xdp_abort; 140 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 141 return true; 142 case XDP_REDIRECT: 143 /* When XDP enabled then page-refcnt==1 here */ 144 err = xdp_do_redirect(rq->netdev, xdp, prog); 145 if (unlikely(err)) 146 goto xdp_abort; 147 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 148 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 149 if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL) 150 mlx5e_page_dma_unmap(rq, di); 151 rq->stats->xdp_redirect++; 152 return true; 153 default: 154 bpf_warn_invalid_xdp_action(act); 155 fallthrough; 156 case XDP_ABORTED: 157 xdp_abort: 158 trace_xdp_exception(rq->netdev, prog, act); 159 fallthrough; 160 case XDP_DROP: 161 rq->stats->xdp_drop++; 162 return true; 163 } 164 } 165 166 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) 167 { 168 struct mlx5_wq_cyc *wq = &sq->wq; 169 u16 pi, contig_wqebbs; 170 171 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 172 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 173 if (unlikely(contig_wqebbs < size)) { 174 struct mlx5e_xdp_wqe_info *wi, *edge_wi; 175 176 wi = &sq->db.wqe_info[pi]; 177 edge_wi = wi + contig_wqebbs; 178 179 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 180 for (; wi < edge_wi; wi++) { 181 *wi = (struct mlx5e_xdp_wqe_info) { 182 .num_wqebbs = 1, 183 .num_pkts = 0, 184 }; 185 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 186 } 187 sq->stats->nops += contig_wqebbs; 188 189 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 190 } 191 192 return pi; 193 } 194 195 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) 196 { 197 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 198 struct mlx5e_xdpsq_stats *stats = sq->stats; 199 struct mlx5e_tx_wqe *wqe; 200 u16 pi; 201 202 pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); 203 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 204 net_prefetchw(wqe->data); 205 206 *session = (struct mlx5e_tx_mpwqe) { 207 .wqe = wqe, 208 .bytes_count = 0, 209 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, 210 .pkt_count = 0, 211 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), 212 }; 213 214 stats->mpwqe++; 215 } 216 217 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) 218 { 219 struct mlx5_wq_cyc *wq = &sq->wq; 220 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 221 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; 222 u16 ds_count = session->ds_count; 223 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 224 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; 225 226 cseg->opmod_idx_opcode = 227 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); 228 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); 229 230 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); 231 wi->num_pkts = session->pkt_count; 232 233 sq->pc += wi->num_wqebbs; 234 235 sq->doorbell_cseg = cseg; 236 237 session->wqe = NULL; /* Close session */ 238 } 239 240 enum { 241 MLX5E_XDP_CHECK_OK = 1, 242 MLX5E_XDP_CHECK_START_MPWQE = 2, 243 }; 244 245 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) 246 { 247 if (unlikely(!sq->mpwqe.wqe)) { 248 const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); 249 250 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 251 stop_room))) { 252 /* SQ is full, ring doorbell */ 253 mlx5e_xmit_xdp_doorbell(sq); 254 sq->stats->full++; 255 return -EBUSY; 256 } 257 258 return MLX5E_XDP_CHECK_START_MPWQE; 259 } 260 261 return MLX5E_XDP_CHECK_OK; 262 } 263 264 INDIRECT_CALLABLE_SCOPE bool 265 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 266 struct mlx5e_xdp_info *xdpi, int check_result) 267 { 268 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 269 struct mlx5e_xdpsq_stats *stats = sq->stats; 270 271 if (unlikely(xdptxd->len > sq->hw_mtu)) { 272 stats->err++; 273 return false; 274 } 275 276 if (!check_result) 277 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); 278 if (unlikely(check_result < 0)) 279 return false; 280 281 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) { 282 /* Start the session when nothing can fail, so it's guaranteed 283 * that if there is an active session, it has at least one dseg, 284 * and it's safe to complete it at any time. 285 */ 286 mlx5e_xdp_mpwqe_session_start(sq); 287 } 288 289 mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); 290 291 if (unlikely(mlx5e_xdp_mpqwe_is_full(session))) 292 mlx5e_xdp_mpwqe_complete(sq); 293 294 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); 295 stats->xmit++; 296 return true; 297 } 298 299 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) 300 { 301 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { 302 /* SQ is full, ring doorbell */ 303 mlx5e_xmit_xdp_doorbell(sq); 304 sq->stats->full++; 305 return -EBUSY; 306 } 307 308 return MLX5E_XDP_CHECK_OK; 309 } 310 311 INDIRECT_CALLABLE_SCOPE bool 312 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 313 struct mlx5e_xdp_info *xdpi, int check_result) 314 { 315 struct mlx5_wq_cyc *wq = &sq->wq; 316 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 317 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 318 319 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 320 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 321 struct mlx5_wqe_data_seg *dseg = wqe->data; 322 323 dma_addr_t dma_addr = xdptxd->dma_addr; 324 u32 dma_len = xdptxd->len; 325 326 struct mlx5e_xdpsq_stats *stats = sq->stats; 327 328 net_prefetchw(wqe); 329 330 if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { 331 stats->err++; 332 return false; 333 } 334 335 if (!check_result) 336 check_result = mlx5e_xmit_xdp_frame_check(sq); 337 if (unlikely(check_result < 0)) 338 return false; 339 340 cseg->fm_ce_se = 0; 341 342 /* copy the inline part if required */ 343 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 344 memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); 345 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); 346 dma_len -= MLX5E_XDP_MIN_INLINE; 347 dma_addr += MLX5E_XDP_MIN_INLINE; 348 dseg++; 349 } 350 351 /* write the dma part */ 352 dseg->addr = cpu_to_be64(dma_addr); 353 dseg->byte_count = cpu_to_be32(dma_len); 354 355 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); 356 357 sq->pc++; 358 359 sq->doorbell_cseg = cseg; 360 361 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); 362 stats->xmit++; 363 return true; 364 } 365 366 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, 367 struct mlx5e_xdp_wqe_info *wi, 368 u32 *xsk_frames, 369 bool recycle, 370 struct xdp_frame_bulk *bq) 371 { 372 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; 373 u16 i; 374 375 for (i = 0; i < wi->num_pkts; i++) { 376 struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 377 378 switch (xdpi.mode) { 379 case MLX5E_XDP_XMIT_MODE_FRAME: 380 /* XDP_TX from the XSK RQ and XDP_REDIRECT */ 381 dma_unmap_single(sq->pdev, xdpi.frame.dma_addr, 382 xdpi.frame.xdpf->len, DMA_TO_DEVICE); 383 xdp_return_frame_bulk(xdpi.frame.xdpf, bq); 384 break; 385 case MLX5E_XDP_XMIT_MODE_PAGE: 386 /* XDP_TX from the regular RQ */ 387 mlx5e_page_release_dynamic(xdpi.page.rq, &xdpi.page.di, recycle); 388 break; 389 case MLX5E_XDP_XMIT_MODE_XSK: 390 /* AF_XDP send */ 391 (*xsk_frames)++; 392 break; 393 default: 394 WARN_ON_ONCE(true); 395 } 396 } 397 } 398 399 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) 400 { 401 struct xdp_frame_bulk bq; 402 struct mlx5e_xdpsq *sq; 403 struct mlx5_cqe64 *cqe; 404 u32 xsk_frames = 0; 405 u16 sqcc; 406 int i; 407 408 xdp_frame_bulk_init(&bq); 409 410 sq = container_of(cq, struct mlx5e_xdpsq, cq); 411 412 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 413 return false; 414 415 cqe = mlx5_cqwq_get_cqe(&cq->wq); 416 if (!cqe) 417 return false; 418 419 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 420 * otherwise a cq overrun may occur 421 */ 422 sqcc = sq->cc; 423 424 i = 0; 425 do { 426 struct mlx5e_xdp_wqe_info *wi; 427 u16 wqe_counter, ci; 428 bool last_wqe; 429 430 mlx5_cqwq_pop(&cq->wq); 431 432 wqe_counter = be16_to_cpu(cqe->wqe_counter); 433 434 do { 435 last_wqe = (sqcc == wqe_counter); 436 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 437 wi = &sq->db.wqe_info[ci]; 438 439 sqcc += wi->num_wqebbs; 440 441 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true, &bq); 442 } while (!last_wqe); 443 444 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 445 netdev_WARN_ONCE(sq->channel->netdev, 446 "Bad OP in XDPSQ CQE: 0x%x\n", 447 get_cqe_opcode(cqe)); 448 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 449 (struct mlx5_err_cqe *)cqe); 450 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 451 } 452 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 453 454 xdp_flush_frame_bulk(&bq); 455 456 if (xsk_frames) 457 xsk_tx_completed(sq->xsk_pool, xsk_frames); 458 459 sq->stats->cqes += i; 460 461 mlx5_cqwq_update_db_record(&cq->wq); 462 463 /* ensure cq space is freed before enabling more cqes */ 464 wmb(); 465 466 sq->cc = sqcc; 467 return (i == MLX5E_TX_CQ_POLL_BUDGET); 468 } 469 470 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 471 { 472 struct xdp_frame_bulk bq; 473 u32 xsk_frames = 0; 474 475 xdp_frame_bulk_init(&bq); 476 477 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 478 479 while (sq->cc != sq->pc) { 480 struct mlx5e_xdp_wqe_info *wi; 481 u16 ci; 482 483 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 484 wi = &sq->db.wqe_info[ci]; 485 486 sq->cc += wi->num_wqebbs; 487 488 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false, &bq); 489 } 490 491 xdp_flush_frame_bulk(&bq); 492 rcu_read_unlock(); 493 494 if (xsk_frames) 495 xsk_tx_completed(sq->xsk_pool, xsk_frames); 496 } 497 498 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 499 u32 flags) 500 { 501 struct mlx5e_priv *priv = netdev_priv(dev); 502 struct mlx5e_xdpsq *sq; 503 int nxmit = 0; 504 int sq_num; 505 int i; 506 507 /* this flag is sufficient, no need to test internal sq state */ 508 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) 509 return -ENETDOWN; 510 511 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 512 return -EINVAL; 513 514 sq_num = smp_processor_id(); 515 516 if (unlikely(sq_num >= priv->channels.num)) 517 return -ENXIO; 518 519 sq = &priv->channels.c[sq_num]->xdpsq; 520 521 for (i = 0; i < n; i++) { 522 struct xdp_frame *xdpf = frames[i]; 523 struct mlx5e_xmit_data xdptxd; 524 struct mlx5e_xdp_info xdpi; 525 bool ret; 526 527 xdptxd.data = xdpf->data; 528 xdptxd.len = xdpf->len; 529 xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data, 530 xdptxd.len, DMA_TO_DEVICE); 531 532 if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) 533 break; 534 535 xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; 536 xdpi.frame.xdpf = xdpf; 537 xdpi.frame.dma_addr = xdptxd.dma_addr; 538 539 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 540 mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); 541 if (unlikely(!ret)) { 542 dma_unmap_single(sq->pdev, xdptxd.dma_addr, 543 xdptxd.len, DMA_TO_DEVICE); 544 break; 545 } 546 nxmit++; 547 } 548 549 if (flags & XDP_XMIT_FLUSH) { 550 if (sq->mpwqe.wqe) 551 mlx5e_xdp_mpwqe_complete(sq); 552 mlx5e_xmit_xdp_doorbell(sq); 553 } 554 555 return nxmit; 556 } 557 558 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) 559 { 560 struct mlx5e_xdpsq *xdpsq = rq->xdpsq; 561 562 if (xdpsq->mpwqe.wqe) 563 mlx5e_xdp_mpwqe_complete(xdpsq); 564 565 mlx5e_xmit_xdp_doorbell(xdpsq); 566 567 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { 568 xdp_do_flush_map(); 569 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 570 } 571 } 572 573 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) 574 { 575 sq->xmit_xdp_frame_check = is_mpw ? 576 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check; 577 sq->xmit_xdp_frame = is_mpw ? 578 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; 579 } 580