1 /* 2 * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/bpf_trace.h> 34 #include <net/xdp_sock_drv.h> 35 #include "en/xdp.h" 36 #include "en/params.h" 37 #include <linux/bitfield.h> 38 39 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) 40 { 41 int hr = mlx5e_get_linear_rq_headroom(params, xsk); 42 43 /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). 44 * The condition checked in mlx5e_rx_is_linear_skb is: 45 * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) 46 * (Note that hw_mtu == sw_mtu + hard_mtu.) 47 * What is returned from this function is: 48 * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) 49 * After assigning sw_mtu := max_mtu, the left side of (1) turns to 50 * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, 51 * because both PAGE_SIZE and S are already aligned. Any number greater 52 * than max_mtu would make the left side of (1) greater than PAGE_SIZE, 53 * so max_mtu is the maximum MTU allowed. 54 */ 55 56 return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); 57 } 58 59 static inline bool 60 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, 61 struct xdp_buff *xdp) 62 { 63 struct page *page = virt_to_page(xdp->data); 64 struct mlx5e_xmit_data_frags xdptxdf = {}; 65 struct mlx5e_xmit_data *xdptxd; 66 struct xdp_frame *xdpf; 67 dma_addr_t dma_addr; 68 int i; 69 70 xdpf = xdp_convert_buff_to_frame(xdp); 71 if (unlikely(!xdpf)) 72 return false; 73 74 xdptxd = &xdptxdf.xd; 75 xdptxd->data = xdpf->data; 76 xdptxd->len = xdpf->len; 77 xdptxd->has_frags = xdp_frame_has_frags(xdpf); 78 79 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { 80 /* The xdp_buff was in the UMEM and was copied into a newly 81 * allocated page. The UMEM page was returned via the ZCA, and 82 * this new page has to be mapped at this point and has to be 83 * unmapped and returned via xdp_return_frame on completion. 84 */ 85 86 /* Prevent double recycling of the UMEM page. Even in case this 87 * function returns false, the xdp_buff shouldn't be recycled, 88 * as it was already done in xdp_convert_zc_to_xdp_frame. 89 */ 90 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 91 92 if (unlikely(xdptxd->has_frags)) 93 return false; 94 95 dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len, 96 DMA_TO_DEVICE); 97 if (dma_mapping_error(sq->pdev, dma_addr)) { 98 xdp_return_frame(xdpf); 99 return false; 100 } 101 102 xdptxd->dma_addr = dma_addr; 103 104 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 105 mlx5e_xmit_xdp_frame, sq, xdptxd, 0))) 106 return false; 107 108 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ 109 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 110 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); 111 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 112 (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); 113 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 114 (union mlx5e_xdp_info) { .frame.dma_addr = dma_addr }); 115 return true; 116 } 117 118 /* Driver assumes that xdp_convert_buff_to_frame returns an xdp_frame 119 * that points to the same memory region as the original xdp_buff. It 120 * allows to map the memory only once and to use the DMA_BIDIRECTIONAL 121 * mode. 122 */ 123 124 dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); 125 dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL); 126 127 if (xdptxd->has_frags) { 128 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf); 129 xdptxdf.dma_arr = NULL; 130 131 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { 132 skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; 133 dma_addr_t addr; 134 u32 len; 135 136 addr = page_pool_get_dma_addr(skb_frag_page(frag)) + 137 skb_frag_off(frag); 138 len = skb_frag_size(frag); 139 dma_sync_single_for_device(sq->pdev, addr, len, 140 DMA_BIDIRECTIONAL); 141 } 142 } 143 144 xdptxd->dma_addr = dma_addr; 145 146 if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 147 mlx5e_xmit_xdp_frame, sq, xdptxd, 0))) 148 return false; 149 150 /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */ 151 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 152 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_PAGE }); 153 154 if (xdptxd->has_frags) { 155 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 156 (union mlx5e_xdp_info) 157 { .page.num = 1 + xdptxdf.sinfo->nr_frags }); 158 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 159 (union mlx5e_xdp_info) { .page.page = page }); 160 for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) { 161 skb_frag_t *frag = &xdptxdf.sinfo->frags[i]; 162 163 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 164 (union mlx5e_xdp_info) 165 { .page.page = skb_frag_page(frag) }); 166 } 167 } else { 168 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 169 (union mlx5e_xdp_info) { .page.num = 1 }); 170 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 171 (union mlx5e_xdp_info) { .page.page = page }); 172 } 173 174 return true; 175 } 176 177 static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp) 178 { 179 const struct mlx5e_xdp_buff *_ctx = (void *)ctx; 180 181 if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp))) 182 return -ENODATA; 183 184 *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time, 185 _ctx->rq->clock, get_cqe_ts(_ctx->cqe)); 186 return 0; 187 } 188 189 /* Mapping HW RSS Type bits CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 into 4-bits*/ 190 #define RSS_TYPE_MAX_TABLE 16 /* 4-bits max 16 entries */ 191 #define RSS_L4 GENMASK(1, 0) 192 #define RSS_L3 GENMASK(3, 2) /* Same as CQE_RSS_HTYPE_IP */ 193 194 /* Valid combinations of CQE_RSS_HTYPE_IP + CQE_RSS_HTYPE_L4 sorted numerical */ 195 enum mlx5_rss_hash_type { 196 RSS_TYPE_NO_HASH = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IP_NONE) | 197 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 198 RSS_TYPE_L3_IPV4 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 199 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 200 RSS_TYPE_L4_IPV4_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 201 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), 202 RSS_TYPE_L4_IPV4_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 203 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), 204 RSS_TYPE_L4_IPV4_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV4) | 205 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), 206 RSS_TYPE_L3_IPV6 = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 207 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_NONE)), 208 RSS_TYPE_L4_IPV6_TCP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 209 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_TCP)), 210 RSS_TYPE_L4_IPV6_UDP = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 211 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_UDP)), 212 RSS_TYPE_L4_IPV6_IPSEC = (FIELD_PREP_CONST(RSS_L3, CQE_RSS_IPV6) | 213 FIELD_PREP_CONST(RSS_L4, CQE_RSS_L4_IPSEC)), 214 }; 215 216 /* Invalid combinations will simply return zero, allows no boundary checks */ 217 static const enum xdp_rss_hash_type mlx5_xdp_rss_type[RSS_TYPE_MAX_TABLE] = { 218 [RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_NONE, 219 [1] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 220 [2] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 221 [3] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 222 [RSS_TYPE_L3_IPV4] = XDP_RSS_TYPE_L3_IPV4, 223 [RSS_TYPE_L4_IPV4_TCP] = XDP_RSS_TYPE_L4_IPV4_TCP, 224 [RSS_TYPE_L4_IPV4_UDP] = XDP_RSS_TYPE_L4_IPV4_UDP, 225 [RSS_TYPE_L4_IPV4_IPSEC] = XDP_RSS_TYPE_L4_IPV4_IPSEC, 226 [RSS_TYPE_L3_IPV6] = XDP_RSS_TYPE_L3_IPV6, 227 [RSS_TYPE_L4_IPV6_TCP] = XDP_RSS_TYPE_L4_IPV6_TCP, 228 [RSS_TYPE_L4_IPV6_UDP] = XDP_RSS_TYPE_L4_IPV6_UDP, 229 [RSS_TYPE_L4_IPV6_IPSEC] = XDP_RSS_TYPE_L4_IPV6_IPSEC, 230 [12] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 231 [13] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 232 [14] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 233 [15] = XDP_RSS_TYPE_NONE, /* Implicit zero */ 234 }; 235 236 static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash, 237 enum xdp_rss_hash_type *rss_type) 238 { 239 const struct mlx5e_xdp_buff *_ctx = (void *)ctx; 240 const struct mlx5_cqe64 *cqe = _ctx->cqe; 241 u32 hash_type, l4_type, ip_type, lookup; 242 243 if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))) 244 return -ENODATA; 245 246 *hash = be32_to_cpu(cqe->rss_hash_result); 247 248 hash_type = cqe->rss_hash_type; 249 BUILD_BUG_ON(CQE_RSS_HTYPE_IP != RSS_L3); /* same mask */ 250 ip_type = hash_type & CQE_RSS_HTYPE_IP; 251 l4_type = FIELD_GET(CQE_RSS_HTYPE_L4, hash_type); 252 lookup = ip_type | l4_type; 253 *rss_type = mlx5_xdp_rss_type[lookup]; 254 255 return 0; 256 } 257 258 const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = { 259 .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp, 260 .xmo_rx_hash = mlx5e_xdp_rx_hash, 261 }; 262 263 /* returns true if packet was consumed by xdp */ 264 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, 265 struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf) 266 { 267 struct xdp_buff *xdp = &mxbuf->xdp; 268 u32 act; 269 int err; 270 271 act = bpf_prog_run_xdp(prog, xdp); 272 switch (act) { 273 case XDP_PASS: 274 return false; 275 case XDP_TX: 276 if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp))) 277 goto xdp_abort; 278 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 279 return true; 280 case XDP_REDIRECT: 281 /* When XDP enabled then page-refcnt==1 here */ 282 err = xdp_do_redirect(rq->netdev, xdp, prog); 283 if (unlikely(err)) 284 goto xdp_abort; 285 __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 286 __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 287 rq->stats->xdp_redirect++; 288 return true; 289 default: 290 bpf_warn_invalid_xdp_action(rq->netdev, prog, act); 291 fallthrough; 292 case XDP_ABORTED: 293 xdp_abort: 294 trace_xdp_exception(rq->netdev, prog, act); 295 fallthrough; 296 case XDP_DROP: 297 rq->stats->xdp_drop++; 298 return true; 299 } 300 } 301 302 static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size) 303 { 304 struct mlx5_wq_cyc *wq = &sq->wq; 305 u16 pi, contig_wqebbs; 306 307 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 308 contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 309 if (unlikely(contig_wqebbs < size)) { 310 struct mlx5e_xdp_wqe_info *wi, *edge_wi; 311 312 wi = &sq->db.wqe_info[pi]; 313 edge_wi = wi + contig_wqebbs; 314 315 /* Fill SQ frag edge with NOPs to avoid WQE wrapping two pages. */ 316 for (; wi < edge_wi; wi++) { 317 *wi = (struct mlx5e_xdp_wqe_info) { 318 .num_wqebbs = 1, 319 .num_pkts = 0, 320 }; 321 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 322 } 323 sq->stats->nops += contig_wqebbs; 324 325 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 326 } 327 328 return pi; 329 } 330 331 static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) 332 { 333 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 334 struct mlx5e_xdpsq_stats *stats = sq->stats; 335 struct mlx5e_tx_wqe *wqe; 336 u16 pi; 337 338 pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs); 339 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 340 net_prefetchw(wqe->data); 341 342 *session = (struct mlx5e_tx_mpwqe) { 343 .wqe = wqe, 344 .bytes_count = 0, 345 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, 346 .pkt_count = 0, 347 .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on), 348 }; 349 350 stats->mpwqe++; 351 } 352 353 void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) 354 { 355 struct mlx5_wq_cyc *wq = &sq->wq; 356 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 357 struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; 358 u16 ds_count = session->ds_count; 359 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 360 struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; 361 362 cseg->opmod_idx_opcode = 363 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); 364 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); 365 366 wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); 367 wi->num_pkts = session->pkt_count; 368 369 sq->pc += wi->num_wqebbs; 370 371 sq->doorbell_cseg = cseg; 372 373 session->wqe = NULL; /* Close session */ 374 } 375 376 enum { 377 MLX5E_XDP_CHECK_OK = 1, 378 MLX5E_XDP_CHECK_START_MPWQE = 2, 379 }; 380 381 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) 382 { 383 if (unlikely(!sq->mpwqe.wqe)) { 384 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 385 sq->stop_room))) { 386 /* SQ is full, ring doorbell */ 387 mlx5e_xmit_xdp_doorbell(sq); 388 sq->stats->full++; 389 return -EBUSY; 390 } 391 392 return MLX5E_XDP_CHECK_START_MPWQE; 393 } 394 395 return MLX5E_XDP_CHECK_OK; 396 } 397 398 INDIRECT_CALLABLE_SCOPE bool 399 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 400 int check_result); 401 402 INDIRECT_CALLABLE_SCOPE bool 403 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 404 int check_result) 405 { 406 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 407 struct mlx5e_xdpsq_stats *stats = sq->stats; 408 struct mlx5e_xmit_data *p = xdptxd; 409 struct mlx5e_xmit_data tmp; 410 411 if (xdptxd->has_frags) { 412 struct mlx5e_xmit_data_frags *xdptxdf = 413 container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); 414 415 if (!!xdptxd->len + xdptxdf->sinfo->nr_frags > 1) { 416 /* MPWQE is enabled, but a multi-buffer packet is queued for 417 * transmission. MPWQE can't send fragmented packets, so close 418 * the current session and fall back to a regular WQE. 419 */ 420 if (unlikely(sq->mpwqe.wqe)) 421 mlx5e_xdp_mpwqe_complete(sq); 422 return mlx5e_xmit_xdp_frame(sq, xdptxd, 0); 423 } 424 if (!xdptxd->len) { 425 skb_frag_t *frag = &xdptxdf->sinfo->frags[0]; 426 427 tmp.data = skb_frag_address(frag); 428 tmp.len = skb_frag_size(frag); 429 tmp.dma_addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[0] : 430 page_pool_get_dma_addr(skb_frag_page(frag)) + 431 skb_frag_off(frag); 432 p = &tmp; 433 } 434 } 435 436 if (unlikely(p->len > sq->hw_mtu)) { 437 stats->err++; 438 return false; 439 } 440 441 if (!check_result) 442 check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); 443 if (unlikely(check_result < 0)) 444 return false; 445 446 if (check_result == MLX5E_XDP_CHECK_START_MPWQE) { 447 /* Start the session when nothing can fail, so it's guaranteed 448 * that if there is an active session, it has at least one dseg, 449 * and it's safe to complete it at any time. 450 */ 451 mlx5e_xdp_mpwqe_session_start(sq); 452 } 453 454 mlx5e_xdp_mpwqe_add_dseg(sq, p, stats); 455 456 if (unlikely(mlx5e_xdp_mpwqe_is_full(session, sq->max_sq_mpw_wqebbs))) 457 mlx5e_xdp_mpwqe_complete(sq); 458 459 stats->xmit++; 460 return true; 461 } 462 463 static int mlx5e_xmit_xdp_frame_check_stop_room(struct mlx5e_xdpsq *sq, int stop_room) 464 { 465 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, stop_room))) { 466 /* SQ is full, ring doorbell */ 467 mlx5e_xmit_xdp_doorbell(sq); 468 sq->stats->full++; 469 return -EBUSY; 470 } 471 472 return MLX5E_XDP_CHECK_OK; 473 } 474 475 INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) 476 { 477 return mlx5e_xmit_xdp_frame_check_stop_room(sq, 1); 478 } 479 480 INDIRECT_CALLABLE_SCOPE bool 481 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, 482 int check_result) 483 { 484 struct mlx5e_xmit_data_frags *xdptxdf = 485 container_of(xdptxd, struct mlx5e_xmit_data_frags, xd); 486 struct mlx5_wq_cyc *wq = &sq->wq; 487 struct mlx5_wqe_ctrl_seg *cseg; 488 struct mlx5_wqe_data_seg *dseg; 489 struct mlx5_wqe_eth_seg *eseg; 490 struct mlx5e_tx_wqe *wqe; 491 492 dma_addr_t dma_addr = xdptxd->dma_addr; 493 u32 dma_len = xdptxd->len; 494 u16 ds_cnt, inline_hdr_sz; 495 u8 num_wqebbs = 1; 496 int num_frags = 0; 497 bool inline_ok; 498 bool linear; 499 u16 pi; 500 501 struct mlx5e_xdpsq_stats *stats = sq->stats; 502 503 inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE || 504 dma_len >= MLX5E_XDP_MIN_INLINE; 505 506 if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) { 507 stats->err++; 508 return false; 509 } 510 511 inline_hdr_sz = 0; 512 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) 513 inline_hdr_sz = MLX5E_XDP_MIN_INLINE; 514 515 linear = !!(dma_len - inline_hdr_sz); 516 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz; 517 518 /* check_result must be 0 if sinfo is passed. */ 519 if (!check_result) { 520 int stop_room = 1; 521 522 if (xdptxd->has_frags) { 523 ds_cnt += xdptxdf->sinfo->nr_frags; 524 num_frags = xdptxdf->sinfo->nr_frags; 525 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 526 /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big 527 * enough to hold all fragments. 528 */ 529 stop_room = MLX5E_STOP_ROOM(num_wqebbs); 530 } 531 532 check_result = mlx5e_xmit_xdp_frame_check_stop_room(sq, stop_room); 533 } 534 if (unlikely(check_result < 0)) 535 return false; 536 537 pi = mlx5e_xdpsq_get_next_pi(sq, num_wqebbs); 538 wqe = mlx5_wq_cyc_get_wqe(wq, pi); 539 net_prefetchw(wqe); 540 541 cseg = &wqe->ctrl; 542 eseg = &wqe->eth; 543 dseg = wqe->data; 544 545 /* copy the inline part if required */ 546 if (inline_hdr_sz) { 547 memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); 548 memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), 549 inline_hdr_sz - sizeof(eseg->inline_hdr.start)); 550 dma_len -= inline_hdr_sz; 551 dma_addr += inline_hdr_sz; 552 dseg++; 553 } 554 555 /* write the dma part */ 556 if (linear) { 557 dseg->addr = cpu_to_be64(dma_addr); 558 dseg->byte_count = cpu_to_be32(dma_len); 559 dseg->lkey = sq->mkey_be; 560 dseg++; 561 } 562 563 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); 564 565 if (test_bit(MLX5E_SQ_STATE_XDP_MULTIBUF, &sq->state)) { 566 int i; 567 568 memset(&cseg->trailer, 0, sizeof(cseg->trailer)); 569 memset(eseg, 0, sizeof(*eseg) - sizeof(eseg->trailer)); 570 571 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); 572 573 for (i = 0; i < num_frags; i++) { 574 skb_frag_t *frag = &xdptxdf->sinfo->frags[i]; 575 dma_addr_t addr; 576 577 addr = xdptxdf->dma_arr ? xdptxdf->dma_arr[i] : 578 page_pool_get_dma_addr(skb_frag_page(frag)) + 579 skb_frag_off(frag); 580 581 dseg->addr = cpu_to_be64(addr); 582 dseg->byte_count = cpu_to_be32(skb_frag_size(frag)); 583 dseg->lkey = sq->mkey_be; 584 dseg++; 585 } 586 587 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 588 589 sq->db.wqe_info[pi] = (struct mlx5e_xdp_wqe_info) { 590 .num_wqebbs = num_wqebbs, 591 .num_pkts = 1, 592 }; 593 594 sq->pc += num_wqebbs; 595 } else { 596 cseg->fm_ce_se = 0; 597 598 sq->pc++; 599 } 600 601 sq->doorbell_cseg = cseg; 602 603 stats->xmit++; 604 return true; 605 } 606 607 static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, 608 struct mlx5e_xdp_wqe_info *wi, 609 u32 *xsk_frames, 610 struct xdp_frame_bulk *bq) 611 { 612 struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; 613 u16 i; 614 615 for (i = 0; i < wi->num_pkts; i++) { 616 union mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 617 618 switch (xdpi.mode) { 619 case MLX5E_XDP_XMIT_MODE_FRAME: { 620 /* XDP_TX from the XSK RQ and XDP_REDIRECT */ 621 struct xdp_frame *xdpf; 622 dma_addr_t dma_addr; 623 624 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 625 xdpf = xdpi.frame.xdpf; 626 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 627 dma_addr = xdpi.frame.dma_addr; 628 629 dma_unmap_single(sq->pdev, dma_addr, 630 xdpf->len, DMA_TO_DEVICE); 631 if (xdp_frame_has_frags(xdpf)) { 632 struct skb_shared_info *sinfo; 633 int j; 634 635 sinfo = xdp_get_shared_info_from_frame(xdpf); 636 for (j = 0; j < sinfo->nr_frags; j++) { 637 skb_frag_t *frag = &sinfo->frags[j]; 638 639 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 640 dma_addr = xdpi.frame.dma_addr; 641 642 dma_unmap_single(sq->pdev, dma_addr, 643 skb_frag_size(frag), DMA_TO_DEVICE); 644 } 645 } 646 xdp_return_frame_bulk(xdpf, bq); 647 break; 648 } 649 case MLX5E_XDP_XMIT_MODE_PAGE: { 650 /* XDP_TX from the regular RQ */ 651 u8 num, n = 0; 652 653 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 654 num = xdpi.page.num; 655 656 do { 657 struct page *page; 658 659 xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo); 660 page = xdpi.page.page; 661 662 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) 663 * as we know this is a page_pool page. 664 */ 665 page_pool_put_defragged_page(page->pp, 666 page, -1, true); 667 } while (++n < num); 668 669 break; 670 } 671 case MLX5E_XDP_XMIT_MODE_XSK: 672 /* AF_XDP send */ 673 (*xsk_frames)++; 674 break; 675 default: 676 WARN_ON_ONCE(true); 677 } 678 } 679 } 680 681 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) 682 { 683 struct xdp_frame_bulk bq; 684 struct mlx5e_xdpsq *sq; 685 struct mlx5_cqe64 *cqe; 686 u32 xsk_frames = 0; 687 u16 sqcc; 688 int i; 689 690 xdp_frame_bulk_init(&bq); 691 692 sq = container_of(cq, struct mlx5e_xdpsq, cq); 693 694 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 695 return false; 696 697 cqe = mlx5_cqwq_get_cqe(&cq->wq); 698 if (!cqe) 699 return false; 700 701 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 702 * otherwise a cq overrun may occur 703 */ 704 sqcc = sq->cc; 705 706 i = 0; 707 do { 708 struct mlx5e_xdp_wqe_info *wi; 709 u16 wqe_counter, ci; 710 bool last_wqe; 711 712 mlx5_cqwq_pop(&cq->wq); 713 714 wqe_counter = be16_to_cpu(cqe->wqe_counter); 715 716 do { 717 last_wqe = (sqcc == wqe_counter); 718 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 719 wi = &sq->db.wqe_info[ci]; 720 721 sqcc += wi->num_wqebbs; 722 723 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq); 724 } while (!last_wqe); 725 726 if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { 727 netdev_WARN_ONCE(sq->channel->netdev, 728 "Bad OP in XDPSQ CQE: 0x%x\n", 729 get_cqe_opcode(cqe)); 730 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 731 (struct mlx5_err_cqe *)cqe); 732 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 733 } 734 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 735 736 xdp_flush_frame_bulk(&bq); 737 738 if (xsk_frames) 739 xsk_tx_completed(sq->xsk_pool, xsk_frames); 740 741 sq->stats->cqes += i; 742 743 mlx5_cqwq_update_db_record(&cq->wq); 744 745 /* ensure cq space is freed before enabling more cqes */ 746 wmb(); 747 748 sq->cc = sqcc; 749 return (i == MLX5E_TX_CQ_POLL_BUDGET); 750 } 751 752 void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 753 { 754 struct xdp_frame_bulk bq; 755 u32 xsk_frames = 0; 756 757 xdp_frame_bulk_init(&bq); 758 759 rcu_read_lock(); /* need for xdp_return_frame_bulk */ 760 761 while (sq->cc != sq->pc) { 762 struct mlx5e_xdp_wqe_info *wi; 763 u16 ci; 764 765 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 766 wi = &sq->db.wqe_info[ci]; 767 768 sq->cc += wi->num_wqebbs; 769 770 mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq); 771 } 772 773 xdp_flush_frame_bulk(&bq); 774 rcu_read_unlock(); 775 776 if (xsk_frames) 777 xsk_tx_completed(sq->xsk_pool, xsk_frames); 778 } 779 780 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 781 u32 flags) 782 { 783 struct mlx5e_priv *priv = netdev_priv(dev); 784 struct mlx5e_xdpsq *sq; 785 int nxmit = 0; 786 int sq_num; 787 int i; 788 789 /* this flag is sufficient, no need to test internal sq state */ 790 if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) 791 return -ENETDOWN; 792 793 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 794 return -EINVAL; 795 796 sq_num = smp_processor_id(); 797 798 if (unlikely(sq_num >= priv->channels.num)) 799 return -ENXIO; 800 801 sq = &priv->channels.c[sq_num]->xdpsq; 802 803 for (i = 0; i < n; i++) { 804 struct mlx5e_xmit_data_frags xdptxdf = {}; 805 struct xdp_frame *xdpf = frames[i]; 806 dma_addr_t dma_arr[MAX_SKB_FRAGS]; 807 struct mlx5e_xmit_data *xdptxd; 808 bool ret; 809 810 xdptxd = &xdptxdf.xd; 811 xdptxd->data = xdpf->data; 812 xdptxd->len = xdpf->len; 813 xdptxd->has_frags = xdp_frame_has_frags(xdpf); 814 xdptxd->dma_addr = dma_map_single(sq->pdev, xdptxd->data, 815 xdptxd->len, DMA_TO_DEVICE); 816 817 if (unlikely(dma_mapping_error(sq->pdev, xdptxd->dma_addr))) 818 break; 819 820 if (xdptxd->has_frags) { 821 int j; 822 823 xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf); 824 xdptxdf.dma_arr = dma_arr; 825 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) { 826 skb_frag_t *frag = &xdptxdf.sinfo->frags[j]; 827 828 dma_arr[j] = dma_map_single(sq->pdev, skb_frag_address(frag), 829 skb_frag_size(frag), DMA_TO_DEVICE); 830 831 if (!dma_mapping_error(sq->pdev, dma_arr[j])) 832 continue; 833 /* mapping error */ 834 while (--j >= 0) 835 dma_unmap_single(sq->pdev, dma_arr[j], 836 skb_frag_size(&xdptxdf.sinfo->frags[j]), 837 DMA_TO_DEVICE); 838 goto out; 839 } 840 } 841 842 ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, 843 mlx5e_xmit_xdp_frame, sq, xdptxd, 0); 844 if (unlikely(!ret)) { 845 int j; 846 847 dma_unmap_single(sq->pdev, xdptxd->dma_addr, 848 xdptxd->len, DMA_TO_DEVICE); 849 if (!xdptxd->has_frags) 850 break; 851 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) 852 dma_unmap_single(sq->pdev, dma_arr[j], 853 skb_frag_size(&xdptxdf.sinfo->frags[j]), 854 DMA_TO_DEVICE); 855 break; 856 } 857 858 /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */ 859 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 860 (union mlx5e_xdp_info) { .mode = MLX5E_XDP_XMIT_MODE_FRAME }); 861 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 862 (union mlx5e_xdp_info) { .frame.xdpf = xdpf }); 863 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 864 (union mlx5e_xdp_info) { .frame.dma_addr = xdptxd->dma_addr }); 865 if (xdptxd->has_frags) { 866 int j; 867 868 for (j = 0; j < xdptxdf.sinfo->nr_frags; j++) 869 mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, 870 (union mlx5e_xdp_info) 871 { .frame.dma_addr = dma_arr[j] }); 872 } 873 nxmit++; 874 } 875 876 out: 877 if (flags & XDP_XMIT_FLUSH) { 878 if (sq->mpwqe.wqe) 879 mlx5e_xdp_mpwqe_complete(sq); 880 mlx5e_xmit_xdp_doorbell(sq); 881 } 882 883 return nxmit; 884 } 885 886 void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) 887 { 888 struct mlx5e_xdpsq *xdpsq = rq->xdpsq; 889 890 if (xdpsq->mpwqe.wqe) 891 mlx5e_xdp_mpwqe_complete(xdpsq); 892 893 mlx5e_xmit_xdp_doorbell(xdpsq); 894 895 if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) { 896 xdp_do_flush_map(); 897 __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags); 898 } 899 } 900 901 void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) 902 { 903 sq->xmit_xdp_frame_check = is_mpw ? 904 mlx5e_xmit_xdp_frame_check_mpwqe : mlx5e_xmit_xdp_frame_check; 905 sq->xmit_xdp_frame = is_mpw ? 906 mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; 907 } 908