1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/tcp.h> 34 #include <linux/if_vlan.h> 35 #include <net/geneve.h> 36 #include <net/dsfield.h> 37 #include "en.h" 38 #include "en/txrx.h" 39 #include "ipoib/ipoib.h" 40 #include "en_accel/en_accel.h" 41 #include "en_accel/ktls.h" 42 #include "lib/clock.h" 43 44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) 45 { 46 int i; 47 48 for (i = 0; i < num_dma; i++) { 49 struct mlx5e_sq_dma *last_pushed_dma = 50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); 51 52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); 53 } 54 } 55 56 #ifdef CONFIG_MLX5_CORE_EN_DCB 57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) 58 { 59 int dscp_cp = 0; 60 61 if (skb->protocol == htons(ETH_P_IP)) 62 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 63 else if (skb->protocol == htons(ETH_P_IPV6)) 64 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 65 66 return priv->dcbx_dp.dscp2prio[dscp_cp]; 67 } 68 #endif 69 70 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 71 struct net_device *sb_dev) 72 { 73 int txq_ix = netdev_pick_tx(dev, skb, NULL); 74 struct mlx5e_priv *priv = netdev_priv(dev); 75 int up = 0; 76 int ch_ix; 77 78 if (!netdev_get_num_tc(dev)) 79 return txq_ix; 80 81 #ifdef CONFIG_MLX5_CORE_EN_DCB 82 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 83 up = mlx5e_get_dscp_up(priv, skb); 84 else 85 #endif 86 if (skb_vlan_tag_present(skb)) 87 up = skb_vlan_tag_get_prio(skb); 88 89 /* Normalize any picked txq_ix to [0, num_channels), 90 * So we can return a txq_ix that matches the channel and 91 * packet UP. 92 */ 93 ch_ix = priv->txq2sq[txq_ix]->ch_ix; 94 95 return priv->channel_tc2realtxq[ch_ix][up]; 96 } 97 98 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) 99 { 100 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 101 102 return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 103 } 104 105 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) 106 { 107 if (skb_transport_header_was_set(skb)) 108 return skb_transport_offset(skb); 109 else 110 return mlx5e_skb_l2_header_offset(skb); 111 } 112 113 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 114 struct sk_buff *skb) 115 { 116 u16 hlen; 117 118 switch (mode) { 119 case MLX5_INLINE_MODE_NONE: 120 return 0; 121 case MLX5_INLINE_MODE_TCP_UDP: 122 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); 123 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 124 hlen += VLAN_HLEN; 125 break; 126 case MLX5_INLINE_MODE_IP: 127 hlen = mlx5e_skb_l3_header_offset(skb); 128 break; 129 case MLX5_INLINE_MODE_L2: 130 default: 131 hlen = mlx5e_skb_l2_header_offset(skb); 132 } 133 return min_t(u16, hlen, skb_headlen(skb)); 134 } 135 136 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) 137 { 138 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; 139 int cpy1_sz = 2 * ETH_ALEN; 140 int cpy2_sz = ihs - cpy1_sz; 141 142 memcpy(vhdr, skb->data, cpy1_sz); 143 vhdr->h_vlan_proto = skb->vlan_proto; 144 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 145 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); 146 } 147 148 static inline void 149 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) 150 { 151 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 152 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 153 if (skb->encapsulation) { 154 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 155 MLX5_ETH_WQE_L4_INNER_CSUM; 156 sq->stats->csum_partial_inner++; 157 } else { 158 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 159 sq->stats->csum_partial++; 160 } 161 } else 162 sq->stats->csum_none++; 163 } 164 165 static inline u16 166 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) 167 { 168 struct mlx5e_sq_stats *stats = sq->stats; 169 u16 ihs; 170 171 if (skb->encapsulation) { 172 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 173 stats->tso_inner_packets++; 174 stats->tso_inner_bytes += skb->len - ihs; 175 } else { 176 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 177 ihs = skb_transport_offset(skb) + sizeof(struct udphdr); 178 else 179 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 180 stats->tso_packets++; 181 stats->tso_bytes += skb->len - ihs; 182 } 183 184 return ihs; 185 } 186 187 static inline int 188 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, 189 unsigned char *skb_data, u16 headlen, 190 struct mlx5_wqe_data_seg *dseg) 191 { 192 dma_addr_t dma_addr = 0; 193 u8 num_dma = 0; 194 int i; 195 196 if (headlen) { 197 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 198 DMA_TO_DEVICE); 199 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 200 goto dma_unmap_wqe_err; 201 202 dseg->addr = cpu_to_be64(dma_addr); 203 dseg->lkey = sq->mkey_be; 204 dseg->byte_count = cpu_to_be32(headlen); 205 206 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); 207 num_dma++; 208 dseg++; 209 } 210 211 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 212 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 213 int fsz = skb_frag_size(frag); 214 215 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 216 DMA_TO_DEVICE); 217 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 218 goto dma_unmap_wqe_err; 219 220 dseg->addr = cpu_to_be64(dma_addr); 221 dseg->lkey = sq->mkey_be; 222 dseg->byte_count = cpu_to_be32(fsz); 223 224 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 225 num_dma++; 226 dseg++; 227 } 228 229 return num_dma; 230 231 dma_unmap_wqe_err: 232 mlx5e_dma_unmap_wqe_err(sq, num_dma); 233 return -ENOMEM; 234 } 235 236 static inline void 237 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, 238 u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, 239 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, 240 bool xmit_more) 241 { 242 struct mlx5_wq_cyc *wq = &sq->wq; 243 bool send_doorbell; 244 245 wi->num_bytes = num_bytes; 246 wi->num_dma = num_dma; 247 wi->num_wqebbs = num_wqebbs; 248 wi->skb = skb; 249 250 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 251 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 252 253 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 254 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 255 256 sq->pc += wi->num_wqebbs; 257 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { 258 netif_tx_stop_queue(sq->txq); 259 sq->stats->stopped++; 260 } 261 262 send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes, 263 xmit_more); 264 if (send_doorbell) 265 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); 266 } 267 268 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 269 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) 270 { 271 struct mlx5_wq_cyc *wq = &sq->wq; 272 struct mlx5_wqe_ctrl_seg *cseg; 273 struct mlx5_wqe_eth_seg *eseg; 274 struct mlx5_wqe_data_seg *dseg; 275 struct mlx5e_tx_wqe_info *wi; 276 277 struct mlx5e_sq_stats *stats = sq->stats; 278 u16 headlen, ihs, contig_wqebbs_room; 279 u16 ds_cnt, ds_cnt_inl = 0; 280 u8 num_wqebbs, opcode; 281 u32 num_bytes; 282 int num_dma; 283 __be16 mss; 284 285 /* Calc ihs and ds cnt, no writes to wqe yet */ 286 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 287 if (skb_is_gso(skb)) { 288 opcode = MLX5_OPCODE_LSO; 289 mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 290 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 291 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 292 stats->packets += skb_shinfo(skb)->gso_segs; 293 } else { 294 u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb); 295 296 opcode = MLX5_OPCODE_SEND; 297 mss = 0; 298 ihs = mlx5e_calc_min_inline(mode, skb); 299 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 300 stats->packets++; 301 } 302 303 stats->bytes += num_bytes; 304 stats->xmit_more += xmit_more; 305 306 headlen = skb->len - ihs - skb->data_len; 307 ds_cnt += !!headlen; 308 ds_cnt += skb_shinfo(skb)->nr_frags; 309 310 if (ihs) { 311 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN; 312 313 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); 314 ds_cnt += ds_cnt_inl; 315 } 316 317 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 318 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 319 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 320 #ifdef CONFIG_MLX5_EN_IPSEC 321 struct mlx5_wqe_eth_seg cur_eth = wqe->eth; 322 #endif 323 #ifdef CONFIG_MLX5_EN_TLS 324 struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl; 325 #endif 326 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 327 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 328 #ifdef CONFIG_MLX5_EN_IPSEC 329 wqe->eth = cur_eth; 330 #endif 331 #ifdef CONFIG_MLX5_EN_TLS 332 wqe->ctrl = cur_ctrl; 333 #endif 334 } 335 336 /* fill wqe */ 337 wi = &sq->db.wqe_info[pi]; 338 cseg = &wqe->ctrl; 339 eseg = &wqe->eth; 340 dseg = wqe->data; 341 342 #if IS_ENABLED(CONFIG_GENEVE) 343 if (skb->encapsulation) 344 mlx5e_tx_tunnel_accel(skb, eseg); 345 #endif 346 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 347 348 eseg->mss = mss; 349 350 if (ihs) { 351 eseg->inline_hdr.sz = cpu_to_be16(ihs); 352 if (skb_vlan_tag_present(skb)) { 353 ihs -= VLAN_HLEN; 354 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); 355 stats->added_vlan_packets++; 356 } else { 357 memcpy(eseg->inline_hdr.start, skb->data, ihs); 358 } 359 dseg += ds_cnt_inl; 360 } else if (skb_vlan_tag_present(skb)) { 361 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); 362 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) 363 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); 364 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); 365 stats->added_vlan_packets++; 366 } 367 368 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); 369 if (unlikely(num_dma < 0)) 370 goto err_drop; 371 372 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, 373 num_dma, wi, cseg, xmit_more); 374 375 return NETDEV_TX_OK; 376 377 err_drop: 378 stats->dropped++; 379 dev_kfree_skb_any(skb); 380 381 return NETDEV_TX_OK; 382 } 383 384 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 385 { 386 struct mlx5e_priv *priv = netdev_priv(dev); 387 struct mlx5e_tx_wqe *wqe; 388 struct mlx5e_txqsq *sq; 389 u16 pi; 390 391 sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 392 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); 393 394 /* might send skbs and update wqe and pi */ 395 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); 396 if (unlikely(!skb)) 397 return NETDEV_TX_OK; 398 399 return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more()); 400 } 401 402 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, 403 struct mlx5_err_cqe *err_cqe) 404 { 405 struct mlx5_cqwq *wq = &sq->cq.wq; 406 u32 ci; 407 408 ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); 409 410 netdev_err(sq->channel->netdev, 411 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 412 sq->cq.mcq.cqn, ci, sq->sqn, 413 get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), 414 err_cqe->syndrome, err_cqe->vendor_err_synd); 415 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); 416 } 417 418 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 419 { 420 struct mlx5e_sq_stats *stats; 421 struct mlx5e_txqsq *sq; 422 struct mlx5_cqe64 *cqe; 423 u32 dma_fifo_cc; 424 u32 nbytes; 425 u16 npkts; 426 u16 sqcc; 427 int i; 428 429 sq = container_of(cq, struct mlx5e_txqsq, cq); 430 431 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 432 return false; 433 434 cqe = mlx5_cqwq_get_cqe(&cq->wq); 435 if (!cqe) 436 return false; 437 438 stats = sq->stats; 439 440 npkts = 0; 441 nbytes = 0; 442 443 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 444 * otherwise a cq overrun may occur 445 */ 446 sqcc = sq->cc; 447 448 /* avoid dirtying sq cache line every cqe */ 449 dma_fifo_cc = sq->dma_fifo_cc; 450 451 i = 0; 452 do { 453 struct mlx5e_tx_wqe_info *wi; 454 u16 wqe_counter; 455 bool last_wqe; 456 u16 ci; 457 458 mlx5_cqwq_pop(&cq->wq); 459 460 wqe_counter = be16_to_cpu(cqe->wqe_counter); 461 462 do { 463 struct sk_buff *skb; 464 int j; 465 466 last_wqe = (sqcc == wqe_counter); 467 468 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 469 wi = &sq->db.wqe_info[ci]; 470 skb = wi->skb; 471 472 if (unlikely(!skb)) { 473 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); 474 sqcc += wi->num_wqebbs; 475 continue; 476 } 477 478 if (unlikely(skb_shinfo(skb)->tx_flags & 479 SKBTX_HW_TSTAMP)) { 480 struct skb_shared_hwtstamps hwts = {}; 481 482 hwts.hwtstamp = 483 mlx5_timecounter_cyc2time(sq->clock, 484 get_cqe_ts(cqe)); 485 skb_tstamp_tx(skb, &hwts); 486 } 487 488 for (j = 0; j < wi->num_dma; j++) { 489 struct mlx5e_sq_dma *dma = 490 mlx5e_dma_get(sq, dma_fifo_cc++); 491 492 mlx5e_tx_dma_unmap(sq->pdev, dma); 493 } 494 495 npkts++; 496 nbytes += wi->num_bytes; 497 sqcc += wi->num_wqebbs; 498 napi_consume_skb(skb, napi_budget); 499 } while (!last_wqe); 500 501 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 502 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 503 &sq->state)) { 504 mlx5e_dump_error_cqe(sq, 505 (struct mlx5_err_cqe *)cqe); 506 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 507 queue_work(cq->channel->priv->wq, 508 &sq->recover_work); 509 } 510 stats->cqe_err++; 511 } 512 513 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 514 515 stats->cqes += i; 516 517 mlx5_cqwq_update_db_record(&cq->wq); 518 519 /* ensure cq space is freed before enabling more cqes */ 520 wmb(); 521 522 sq->dma_fifo_cc = dma_fifo_cc; 523 sq->cc = sqcc; 524 525 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 526 527 if (netif_tx_queue_stopped(sq->txq) && 528 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && 529 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { 530 netif_tx_wake_queue(sq->txq); 531 stats->wake++; 532 } 533 534 return (i == MLX5E_TX_CQ_POLL_BUDGET); 535 } 536 537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 538 { 539 struct mlx5e_tx_wqe_info *wi; 540 struct sk_buff *skb; 541 u32 dma_fifo_cc; 542 u16 sqcc; 543 u16 ci; 544 int i; 545 546 sqcc = sq->cc; 547 dma_fifo_cc = sq->dma_fifo_cc; 548 549 while (sqcc != sq->pc) { 550 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 551 wi = &sq->db.wqe_info[ci]; 552 skb = wi->skb; 553 554 if (!skb) { 555 mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); 556 sqcc += wi->num_wqebbs; 557 continue; 558 } 559 560 for (i = 0; i < wi->num_dma; i++) { 561 struct mlx5e_sq_dma *dma = 562 mlx5e_dma_get(sq, dma_fifo_cc++); 563 564 mlx5e_tx_dma_unmap(sq->pdev, dma); 565 } 566 567 dev_kfree_skb_any(skb); 568 sqcc += wi->num_wqebbs; 569 } 570 571 sq->dma_fifo_cc = dma_fifo_cc; 572 sq->cc = sqcc; 573 } 574 575 #ifdef CONFIG_MLX5_CORE_IPOIB 576 static inline void 577 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, 578 struct mlx5_wqe_datagram_seg *dseg) 579 { 580 memcpy(&dseg->av, av, sizeof(struct mlx5_av)); 581 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); 582 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); 583 } 584 585 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 586 struct mlx5_av *av, u32 dqpn, u32 dqkey, 587 bool xmit_more) 588 { 589 struct mlx5_wq_cyc *wq = &sq->wq; 590 struct mlx5i_tx_wqe *wqe; 591 592 struct mlx5_wqe_datagram_seg *datagram; 593 struct mlx5_wqe_ctrl_seg *cseg; 594 struct mlx5_wqe_eth_seg *eseg; 595 struct mlx5_wqe_data_seg *dseg; 596 struct mlx5e_tx_wqe_info *wi; 597 598 struct mlx5e_sq_stats *stats = sq->stats; 599 u16 headlen, ihs, pi, contig_wqebbs_room; 600 u16 ds_cnt, ds_cnt_inl = 0; 601 u8 num_wqebbs, opcode; 602 u32 num_bytes; 603 int num_dma; 604 __be16 mss; 605 606 /* Calc ihs and ds cnt, no writes to wqe yet */ 607 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 608 if (skb_is_gso(skb)) { 609 opcode = MLX5_OPCODE_LSO; 610 mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 611 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 612 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 613 stats->packets += skb_shinfo(skb)->gso_segs; 614 } else { 615 u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb); 616 617 opcode = MLX5_OPCODE_SEND; 618 mss = 0; 619 ihs = mlx5e_calc_min_inline(mode, skb); 620 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 621 stats->packets++; 622 } 623 624 stats->bytes += num_bytes; 625 stats->xmit_more += xmit_more; 626 627 headlen = skb->len - ihs - skb->data_len; 628 ds_cnt += !!headlen; 629 ds_cnt += skb_shinfo(skb)->nr_frags; 630 631 if (ihs) { 632 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); 633 ds_cnt += ds_cnt_inl; 634 } 635 636 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 637 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 638 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); 639 if (unlikely(contig_wqebbs_room < num_wqebbs)) { 640 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); 641 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 642 } 643 644 mlx5i_sq_fetch_wqe(sq, &wqe, pi); 645 646 /* fill wqe */ 647 wi = &sq->db.wqe_info[pi]; 648 cseg = &wqe->ctrl; 649 datagram = &wqe->datagram; 650 eseg = &wqe->eth; 651 dseg = wqe->data; 652 653 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 654 655 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 656 657 eseg->mss = mss; 658 659 if (ihs) { 660 memcpy(eseg->inline_hdr.start, skb->data, ihs); 661 eseg->inline_hdr.sz = cpu_to_be16(ihs); 662 dseg += ds_cnt_inl; 663 } 664 665 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); 666 if (unlikely(num_dma < 0)) 667 goto err_drop; 668 669 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, 670 num_dma, wi, cseg, xmit_more); 671 672 return NETDEV_TX_OK; 673 674 err_drop: 675 stats->dropped++; 676 dev_kfree_skb_any(skb); 677 678 return NETDEV_TX_OK; 679 } 680 #endif 681