1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/tcp.h> 34 #include <linux/if_vlan.h> 35 #include "en.h" 36 #include "ipoib/ipoib.h" 37 #include "en_accel/ipsec_rxtx.h" 38 39 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS 40 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ 41 MLX5E_SQ_NOPS_ROOM) 42 43 static inline void mlx5e_tx_dma_unmap(struct device *pdev, 44 struct mlx5e_sq_dma *dma) 45 { 46 switch (dma->type) { 47 case MLX5E_DMA_MAP_SINGLE: 48 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 49 break; 50 case MLX5E_DMA_MAP_PAGE: 51 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 52 break; 53 default: 54 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); 55 } 56 } 57 58 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, 59 dma_addr_t addr, 60 u32 size, 61 enum mlx5e_dma_map_type map_type) 62 { 63 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; 64 65 sq->db.dma_fifo[i].addr = addr; 66 sq->db.dma_fifo[i].size = size; 67 sq->db.dma_fifo[i].type = map_type; 68 sq->dma_fifo_pc++; 69 } 70 71 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 72 { 73 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 74 } 75 76 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) 77 { 78 int i; 79 80 for (i = 0; i < num_dma; i++) { 81 struct mlx5e_sq_dma *last_pushed_dma = 82 mlx5e_dma_get(sq, --sq->dma_fifo_pc); 83 84 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); 85 } 86 } 87 88 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 89 void *accel_priv, select_queue_fallback_t fallback) 90 { 91 struct mlx5e_priv *priv = netdev_priv(dev); 92 int channel_ix = fallback(dev, skb); 93 u16 num_channels; 94 int up = 0; 95 96 if (!netdev_get_num_tc(dev)) 97 return channel_ix; 98 99 if (skb_vlan_tag_present(skb)) 100 up = skb->vlan_tci >> VLAN_PRIO_SHIFT; 101 102 /* channel_ix can be larger than num_channels since 103 * dev->num_real_tx_queues = num_channels * num_tc 104 */ 105 num_channels = priv->channels.params.num_channels; 106 if (channel_ix >= num_channels) 107 channel_ix = reciprocal_scale(channel_ix, num_channels); 108 109 return priv->channel_tc2txq[channel_ix][up]; 110 } 111 112 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) 113 { 114 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 115 116 return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 117 } 118 119 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) 120 { 121 struct flow_keys keys; 122 123 if (skb_transport_header_was_set(skb)) 124 return skb_transport_offset(skb); 125 else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) 126 return keys.control.thoff; 127 else 128 return mlx5e_skb_l2_header_offset(skb); 129 } 130 131 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 132 struct sk_buff *skb) 133 { 134 u16 hlen; 135 136 switch (mode) { 137 case MLX5_INLINE_MODE_NONE: 138 return 0; 139 case MLX5_INLINE_MODE_TCP_UDP: 140 hlen = eth_get_headlen(skb->data, skb_headlen(skb)); 141 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 142 hlen += VLAN_HLEN; 143 break; 144 case MLX5_INLINE_MODE_IP: 145 /* When transport header is set to zero, it means no transport 146 * header. When transport header is set to 0xff's, it means 147 * transport header wasn't set. 148 */ 149 if (skb_transport_offset(skb)) { 150 hlen = mlx5e_skb_l3_header_offset(skb); 151 break; 152 } 153 /* fall through */ 154 case MLX5_INLINE_MODE_L2: 155 default: 156 hlen = mlx5e_skb_l2_header_offset(skb); 157 } 158 return min_t(u16, hlen, skb->len); 159 } 160 161 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, 162 unsigned int *skb_len, 163 unsigned int len) 164 { 165 *skb_len -= len; 166 *skb_data += len; 167 } 168 169 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, 170 unsigned char **skb_data, 171 unsigned int *skb_len) 172 { 173 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; 174 int cpy1_sz = 2 * ETH_ALEN; 175 int cpy2_sz = ihs - cpy1_sz; 176 177 memcpy(vhdr, *skb_data, cpy1_sz); 178 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); 179 vhdr->h_vlan_proto = skb->vlan_proto; 180 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 181 memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); 182 mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); 183 } 184 185 static inline void 186 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) 187 { 188 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 189 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 190 if (skb->encapsulation) { 191 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 192 MLX5_ETH_WQE_L4_INNER_CSUM; 193 sq->stats.csum_partial_inner++; 194 } else { 195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 196 sq->stats.csum_partial++; 197 } 198 } else 199 sq->stats.csum_none++; 200 } 201 202 static inline u16 203 mlx5e_txwqe_build_eseg_gso(struct mlx5e_txqsq *sq, struct sk_buff *skb, 204 struct mlx5_wqe_eth_seg *eseg, unsigned int *num_bytes) 205 { 206 u16 ihs; 207 208 eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 209 210 if (skb->encapsulation) { 211 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 212 sq->stats.tso_inner_packets++; 213 sq->stats.tso_inner_bytes += skb->len - ihs; 214 } else { 215 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 216 sq->stats.tso_packets++; 217 sq->stats.tso_bytes += skb->len - ihs; 218 } 219 220 *num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 221 return ihs; 222 } 223 224 static inline int 225 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, 226 unsigned char *skb_data, u16 headlen, 227 struct mlx5_wqe_data_seg *dseg) 228 { 229 dma_addr_t dma_addr = 0; 230 u8 num_dma = 0; 231 int i; 232 233 if (headlen) { 234 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 235 DMA_TO_DEVICE); 236 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 237 return -ENOMEM; 238 239 dseg->addr = cpu_to_be64(dma_addr); 240 dseg->lkey = sq->mkey_be; 241 dseg->byte_count = cpu_to_be32(headlen); 242 243 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); 244 num_dma++; 245 dseg++; 246 } 247 248 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 249 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 250 int fsz = skb_frag_size(frag); 251 252 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 253 DMA_TO_DEVICE); 254 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 255 return -ENOMEM; 256 257 dseg->addr = cpu_to_be64(dma_addr); 258 dseg->lkey = sq->mkey_be; 259 dseg->byte_count = cpu_to_be32(fsz); 260 261 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 262 num_dma++; 263 dseg++; 264 } 265 266 return num_dma; 267 } 268 269 static inline void 270 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, 271 u8 opcode, u16 ds_cnt, u32 num_bytes, u8 num_dma, 272 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) 273 { 274 struct mlx5_wq_cyc *wq = &sq->wq; 275 u16 pi; 276 277 wi->num_bytes = num_bytes; 278 wi->num_dma = num_dma; 279 wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 280 wi->skb = skb; 281 282 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 283 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 284 285 netdev_tx_sent_queue(sq->txq, num_bytes); 286 287 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 288 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 289 290 sq->pc += wi->num_wqebbs; 291 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { 292 netif_tx_stop_queue(sq->txq); 293 sq->stats.stopped++; 294 } 295 296 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) 297 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); 298 299 /* fill sq edge with nops to avoid wqe wrap around */ 300 while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { 301 sq->db.wqe_info[pi].skb = NULL; 302 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 303 sq->stats.nop++; 304 } 305 } 306 307 static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 308 struct mlx5e_tx_wqe *wqe, u16 pi) 309 { 310 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 311 312 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 313 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 314 315 unsigned char *skb_data = skb->data; 316 unsigned int skb_len = skb->len; 317 u8 opcode = MLX5_OPCODE_SEND; 318 unsigned int num_bytes; 319 int num_dma; 320 u16 headlen; 321 u16 ds_cnt; 322 u16 ihs; 323 324 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 325 326 if (skb_is_gso(skb)) { 327 opcode = MLX5_OPCODE_LSO; 328 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); 329 sq->stats.packets += skb_shinfo(skb)->gso_segs; 330 } else { 331 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); 332 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 333 sq->stats.packets++; 334 } 335 sq->stats.bytes += num_bytes; 336 sq->stats.xmit_more += skb->xmit_more; 337 338 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 339 if (ihs) { 340 if (skb_vlan_tag_present(skb)) { 341 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); 342 ihs += VLAN_HLEN; 343 } else { 344 memcpy(eseg->inline_hdr.start, skb_data, ihs); 345 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); 346 } 347 eseg->inline_hdr.sz = cpu_to_be16(ihs); 348 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); 349 } else if (skb_vlan_tag_present(skb)) { 350 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); 351 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); 352 } 353 354 headlen = skb_len - skb->data_len; 355 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 356 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 357 if (unlikely(num_dma < 0)) 358 goto dma_unmap_wqe_err; 359 360 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 361 num_bytes, num_dma, wi, cseg); 362 363 return NETDEV_TX_OK; 364 365 dma_unmap_wqe_err: 366 sq->stats.dropped++; 367 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); 368 369 dev_kfree_skb_any(skb); 370 371 return NETDEV_TX_OK; 372 } 373 374 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 375 { 376 struct mlx5e_priv *priv = netdev_priv(dev); 377 struct mlx5e_txqsq *sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 378 struct mlx5_wq_cyc *wq = &sq->wq; 379 u16 pi = sq->pc & wq->sz_m1; 380 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 381 382 memset(wqe, 0, sizeof(*wqe)); 383 384 #ifdef CONFIG_MLX5_EN_IPSEC 385 if (sq->state & BIT(MLX5E_SQ_STATE_IPSEC)) { 386 skb = mlx5e_ipsec_handle_tx_skb(dev, wqe, skb); 387 if (unlikely(!skb)) 388 return NETDEV_TX_OK; 389 } 390 #endif 391 392 return mlx5e_sq_xmit(sq, skb, wqe, pi); 393 } 394 395 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 396 { 397 struct mlx5e_txqsq *sq; 398 struct mlx5_cqe64 *cqe; 399 u32 dma_fifo_cc; 400 u32 nbytes; 401 u16 npkts; 402 u16 sqcc; 403 int i; 404 405 sq = container_of(cq, struct mlx5e_txqsq, cq); 406 407 if (unlikely(!MLX5E_TEST_BIT(sq->state, MLX5E_SQ_STATE_ENABLED))) 408 return false; 409 410 cqe = mlx5_cqwq_get_cqe(&cq->wq); 411 if (!cqe) 412 return false; 413 414 npkts = 0; 415 nbytes = 0; 416 417 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 418 * otherwise a cq overrun may occur 419 */ 420 sqcc = sq->cc; 421 422 /* avoid dirtying sq cache line every cqe */ 423 dma_fifo_cc = sq->dma_fifo_cc; 424 425 i = 0; 426 do { 427 u16 wqe_counter; 428 bool last_wqe; 429 430 mlx5_cqwq_pop(&cq->wq); 431 432 wqe_counter = be16_to_cpu(cqe->wqe_counter); 433 434 do { 435 struct mlx5e_tx_wqe_info *wi; 436 struct sk_buff *skb; 437 u16 ci; 438 int j; 439 440 last_wqe = (sqcc == wqe_counter); 441 442 ci = sqcc & sq->wq.sz_m1; 443 wi = &sq->db.wqe_info[ci]; 444 skb = wi->skb; 445 446 if (unlikely(!skb)) { /* nop */ 447 sqcc++; 448 continue; 449 } 450 451 if (unlikely(skb_shinfo(skb)->tx_flags & 452 SKBTX_HW_TSTAMP)) { 453 struct skb_shared_hwtstamps hwts = {}; 454 455 mlx5e_fill_hwstamp(sq->tstamp, 456 get_cqe_ts(cqe), &hwts); 457 skb_tstamp_tx(skb, &hwts); 458 } 459 460 for (j = 0; j < wi->num_dma; j++) { 461 struct mlx5e_sq_dma *dma = 462 mlx5e_dma_get(sq, dma_fifo_cc++); 463 464 mlx5e_tx_dma_unmap(sq->pdev, dma); 465 } 466 467 npkts++; 468 nbytes += wi->num_bytes; 469 sqcc += wi->num_wqebbs; 470 napi_consume_skb(skb, napi_budget); 471 } while (!last_wqe); 472 473 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 474 475 mlx5_cqwq_update_db_record(&cq->wq); 476 477 /* ensure cq space is freed before enabling more cqes */ 478 wmb(); 479 480 sq->dma_fifo_cc = dma_fifo_cc; 481 sq->cc = sqcc; 482 483 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 484 485 if (netif_tx_queue_stopped(sq->txq) && 486 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM)) { 487 netif_tx_wake_queue(sq->txq); 488 sq->stats.wake++; 489 } 490 491 return (i == MLX5E_TX_CQ_POLL_BUDGET); 492 } 493 494 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 495 { 496 struct mlx5e_tx_wqe_info *wi; 497 struct sk_buff *skb; 498 u16 ci; 499 int i; 500 501 while (sq->cc != sq->pc) { 502 ci = sq->cc & sq->wq.sz_m1; 503 wi = &sq->db.wqe_info[ci]; 504 skb = wi->skb; 505 506 if (!skb) { /* nop */ 507 sq->cc++; 508 continue; 509 } 510 511 for (i = 0; i < wi->num_dma; i++) { 512 struct mlx5e_sq_dma *dma = 513 mlx5e_dma_get(sq, sq->dma_fifo_cc++); 514 515 mlx5e_tx_dma_unmap(sq->pdev, dma); 516 } 517 518 dev_kfree_skb_any(skb); 519 sq->cc += wi->num_wqebbs; 520 } 521 } 522 523 #ifdef CONFIG_MLX5_CORE_IPOIB 524 525 struct mlx5_wqe_eth_pad { 526 u8 rsvd0[16]; 527 }; 528 529 struct mlx5i_tx_wqe { 530 struct mlx5_wqe_ctrl_seg ctrl; 531 struct mlx5_wqe_datagram_seg datagram; 532 struct mlx5_wqe_eth_pad pad; 533 struct mlx5_wqe_eth_seg eth; 534 }; 535 536 static inline void 537 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, 538 struct mlx5_wqe_datagram_seg *dseg) 539 { 540 memcpy(&dseg->av, av, sizeof(struct mlx5_av)); 541 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); 542 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); 543 } 544 545 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 546 struct mlx5_av *av, u32 dqpn, u32 dqkey) 547 { 548 struct mlx5_wq_cyc *wq = &sq->wq; 549 u16 pi = sq->pc & wq->sz_m1; 550 struct mlx5i_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 551 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; 552 553 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 554 struct mlx5_wqe_datagram_seg *datagram = &wqe->datagram; 555 struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 556 557 unsigned char *skb_data = skb->data; 558 unsigned int skb_len = skb->len; 559 u8 opcode = MLX5_OPCODE_SEND; 560 unsigned int num_bytes; 561 int num_dma; 562 u16 headlen; 563 u16 ds_cnt; 564 u16 ihs; 565 566 memset(wqe, 0, sizeof(*wqe)); 567 568 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 569 570 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 571 572 if (skb_is_gso(skb)) { 573 opcode = MLX5_OPCODE_LSO; 574 ihs = mlx5e_txwqe_build_eseg_gso(sq, skb, eseg, &num_bytes); 575 sq->stats.packets += skb_shinfo(skb)->gso_segs; 576 } else { 577 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); 578 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 579 sq->stats.packets++; 580 } 581 582 sq->stats.bytes += num_bytes; 583 sq->stats.xmit_more += skb->xmit_more; 584 585 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 586 if (ihs) { 587 memcpy(eseg->inline_hdr.start, skb_data, ihs); 588 mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); 589 eseg->inline_hdr.sz = cpu_to_be16(ihs); 590 ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); 591 } 592 593 headlen = skb_len - skb->data_len; 594 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen, 595 (struct mlx5_wqe_data_seg *)cseg + ds_cnt); 596 if (unlikely(num_dma < 0)) 597 goto dma_unmap_wqe_err; 598 599 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma, 600 num_bytes, num_dma, wi, cseg); 601 602 return NETDEV_TX_OK; 603 604 dma_unmap_wqe_err: 605 sq->stats.dropped++; 606 mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); 607 608 dev_kfree_skb_any(skb); 609 610 return NETDEV_TX_OK; 611 } 612 613 #endif 614