1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/tcp.h> 34 #include <linux/if_vlan.h> 35 #include <net/geneve.h> 36 #include <net/dsfield.h> 37 #include "en.h" 38 #include "en/txrx.h" 39 #include "ipoib/ipoib.h" 40 #include "en_accel/en_accel.h" 41 #include "en_accel/ipsec_rxtx.h" 42 #include "en/ptp.h" 43 44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) 45 { 46 int i; 47 48 for (i = 0; i < num_dma; i++) { 49 struct mlx5e_sq_dma *last_pushed_dma = 50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); 51 52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); 53 } 54 } 55 56 #ifdef CONFIG_MLX5_CORE_EN_DCB 57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) 58 { 59 int dscp_cp = 0; 60 61 if (skb->protocol == htons(ETH_P_IP)) 62 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 63 else if (skb->protocol == htons(ETH_P_IPV6)) 64 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 65 66 return priv->dcbx_dp.dscp2prio[dscp_cp]; 67 } 68 #endif 69 70 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) 71 { 72 struct mlx5e_priv *priv = netdev_priv(dev); 73 int up = 0; 74 75 if (!netdev_get_num_tc(dev)) 76 goto return_txq; 77 78 #ifdef CONFIG_MLX5_CORE_EN_DCB 79 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 80 up = mlx5e_get_dscp_up(priv, skb); 81 else 82 #endif 83 if (skb_vlan_tag_present(skb)) 84 up = skb_vlan_tag_get_prio(skb); 85 86 return_txq: 87 return priv->port_ptp_tc2realtxq[up]; 88 } 89 90 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, 91 u16 htb_maj_id) 92 { 93 u16 classid; 94 95 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) 96 classid = TC_H_MIN(skb->priority); 97 else 98 classid = READ_ONCE(priv->htb.defcls); 99 100 if (!classid) 101 return 0; 102 103 return mlx5e_get_txq_by_classid(priv, classid); 104 } 105 106 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 107 struct net_device *sb_dev) 108 { 109 struct mlx5e_priv *priv = netdev_priv(dev); 110 int num_tc_x_num_ch; 111 int txq_ix; 112 int up = 0; 113 int ch_ix; 114 115 /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ 116 num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); 117 if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { 118 struct mlx5e_ptp *ptp_channel; 119 120 /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ 121 u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); 122 123 if (unlikely(htb_maj_id)) { 124 txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); 125 if (txq_ix > 0) 126 return txq_ix; 127 } 128 129 ptp_channel = READ_ONCE(priv->channels.ptp); 130 if (unlikely(ptp_channel && 131 test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && 132 mlx5e_use_ptpsq(skb))) 133 return mlx5e_select_ptpsq(dev, skb); 134 135 txq_ix = netdev_pick_tx(dev, skb, NULL); 136 /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. 137 * If they are selected, switch to regular queues. 138 * Driver to select these queues only at mlx5e_select_ptpsq() 139 * and mlx5e_select_htb_queue(). 140 */ 141 if (unlikely(txq_ix >= num_tc_x_num_ch)) 142 txq_ix %= num_tc_x_num_ch; 143 } else { 144 txq_ix = netdev_pick_tx(dev, skb, NULL); 145 } 146 147 if (!netdev_get_num_tc(dev)) 148 return txq_ix; 149 150 #ifdef CONFIG_MLX5_CORE_EN_DCB 151 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 152 up = mlx5e_get_dscp_up(priv, skb); 153 else 154 #endif 155 if (skb_vlan_tag_present(skb)) 156 up = skb_vlan_tag_get_prio(skb); 157 158 /* Normalize any picked txq_ix to [0, num_channels), 159 * So we can return a txq_ix that matches the channel and 160 * packet UP. 161 */ 162 ch_ix = priv->txq2sq[txq_ix]->ch_ix; 163 164 return priv->channel_tc2realtxq[ch_ix][up]; 165 } 166 167 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) 168 { 169 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 170 171 return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 172 } 173 174 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) 175 { 176 if (skb_transport_header_was_set(skb)) 177 return skb_transport_offset(skb); 178 else 179 return mlx5e_skb_l2_header_offset(skb); 180 } 181 182 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 183 struct sk_buff *skb) 184 { 185 u16 hlen; 186 187 switch (mode) { 188 case MLX5_INLINE_MODE_NONE: 189 return 0; 190 case MLX5_INLINE_MODE_TCP_UDP: 191 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); 192 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 193 hlen += VLAN_HLEN; 194 break; 195 case MLX5_INLINE_MODE_IP: 196 hlen = mlx5e_skb_l3_header_offset(skb); 197 break; 198 case MLX5_INLINE_MODE_L2: 199 default: 200 hlen = mlx5e_skb_l2_header_offset(skb); 201 } 202 return min_t(u16, hlen, skb_headlen(skb)); 203 } 204 205 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) 206 { 207 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; 208 int cpy1_sz = 2 * ETH_ALEN; 209 int cpy2_sz = ihs - cpy1_sz; 210 211 memcpy(vhdr, skb->data, cpy1_sz); 212 vhdr->h_vlan_proto = skb->vlan_proto; 213 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 214 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); 215 } 216 217 static inline void 218 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 219 struct mlx5e_accel_tx_state *accel, 220 struct mlx5_wqe_eth_seg *eseg) 221 { 222 if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg))) 223 return; 224 225 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 226 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 227 if (skb->encapsulation) { 228 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 229 MLX5_ETH_WQE_L4_INNER_CSUM; 230 sq->stats->csum_partial_inner++; 231 } else { 232 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 233 sq->stats->csum_partial++; 234 } 235 #ifdef CONFIG_MLX5_EN_TLS 236 } else if (unlikely(accel && accel->tls.tls_tisn)) { 237 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 238 sq->stats->csum_partial++; 239 #endif 240 } else 241 sq->stats->csum_none++; 242 } 243 244 static inline u16 245 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) 246 { 247 struct mlx5e_sq_stats *stats = sq->stats; 248 u16 ihs; 249 250 if (skb->encapsulation) { 251 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 252 stats->tso_inner_packets++; 253 stats->tso_inner_bytes += skb->len - ihs; 254 } else { 255 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 256 ihs = skb_transport_offset(skb) + sizeof(struct udphdr); 257 else 258 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 259 stats->tso_packets++; 260 stats->tso_bytes += skb->len - ihs; 261 } 262 263 return ihs; 264 } 265 266 static inline int 267 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, 268 unsigned char *skb_data, u16 headlen, 269 struct mlx5_wqe_data_seg *dseg) 270 { 271 dma_addr_t dma_addr = 0; 272 u8 num_dma = 0; 273 int i; 274 275 if (headlen) { 276 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 277 DMA_TO_DEVICE); 278 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 279 goto dma_unmap_wqe_err; 280 281 dseg->addr = cpu_to_be64(dma_addr); 282 dseg->lkey = sq->mkey_be; 283 dseg->byte_count = cpu_to_be32(headlen); 284 285 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); 286 num_dma++; 287 dseg++; 288 } 289 290 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 291 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 292 int fsz = skb_frag_size(frag); 293 294 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 295 DMA_TO_DEVICE); 296 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 297 goto dma_unmap_wqe_err; 298 299 dseg->addr = cpu_to_be64(dma_addr); 300 dseg->lkey = sq->mkey_be; 301 dseg->byte_count = cpu_to_be32(fsz); 302 303 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 304 num_dma++; 305 dseg++; 306 } 307 308 return num_dma; 309 310 dma_unmap_wqe_err: 311 mlx5e_dma_unmap_wqe_err(sq, num_dma); 312 return -ENOMEM; 313 } 314 315 struct mlx5e_tx_attr { 316 u32 num_bytes; 317 u16 headlen; 318 u16 ihs; 319 __be16 mss; 320 u16 insz; 321 u8 opcode; 322 }; 323 324 struct mlx5e_tx_wqe_attr { 325 u16 ds_cnt; 326 u16 ds_cnt_inl; 327 u16 ds_cnt_ids; 328 u8 num_wqebbs; 329 }; 330 331 static u8 332 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb, 333 struct mlx5e_accel_tx_state *accel) 334 { 335 u8 mode; 336 337 #ifdef CONFIG_MLX5_EN_TLS 338 if (accel && accel->tls.tls_tisn) 339 return MLX5_INLINE_MODE_TCP_UDP; 340 #endif 341 342 mode = sq->min_inline_mode; 343 344 if (skb_vlan_tag_present(skb) && 345 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) 346 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); 347 348 return mode; 349 } 350 351 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, 352 struct mlx5e_accel_tx_state *accel, 353 struct mlx5e_tx_attr *attr) 354 { 355 struct mlx5e_sq_stats *stats = sq->stats; 356 357 if (skb_is_gso(skb)) { 358 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 359 360 *attr = (struct mlx5e_tx_attr) { 361 .opcode = MLX5_OPCODE_LSO, 362 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), 363 .ihs = ihs, 364 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, 365 .headlen = skb_headlen(skb) - ihs, 366 }; 367 368 stats->packets += skb_shinfo(skb)->gso_segs; 369 } else { 370 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel); 371 u16 ihs = mlx5e_calc_min_inline(mode, skb); 372 373 *attr = (struct mlx5e_tx_attr) { 374 .opcode = MLX5_OPCODE_SEND, 375 .mss = cpu_to_be16(0), 376 .ihs = ihs, 377 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN), 378 .headlen = skb_headlen(skb) - ihs, 379 }; 380 381 stats->packets++; 382 } 383 384 attr->insz = mlx5e_accel_tx_ids_len(sq, accel); 385 stats->bytes += attr->num_bytes; 386 } 387 388 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr, 389 struct mlx5e_tx_wqe_attr *wqe_attr) 390 { 391 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT; 392 u16 ds_cnt_inl = 0; 393 u16 ds_cnt_ids = 0; 394 395 if (attr->insz) 396 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz, 397 MLX5_SEND_WQE_DS); 398 399 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids; 400 if (attr->ihs) { 401 u16 inl = attr->ihs - INL_HDR_START_SZ; 402 403 if (skb_vlan_tag_present(skb)) 404 inl += VLAN_HLEN; 405 406 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); 407 ds_cnt += ds_cnt_inl; 408 } 409 410 *wqe_attr = (struct mlx5e_tx_wqe_attr) { 411 .ds_cnt = ds_cnt, 412 .ds_cnt_inl = ds_cnt_inl, 413 .ds_cnt_ids = ds_cnt_ids, 414 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), 415 }; 416 } 417 418 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb) 419 { 420 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 421 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 422 } 423 424 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq) 425 { 426 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { 427 netif_tx_stop_queue(sq->txq); 428 sq->stats->stopped++; 429 } 430 } 431 432 static inline void 433 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, 434 const struct mlx5e_tx_attr *attr, 435 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma, 436 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, 437 bool xmit_more) 438 { 439 struct mlx5_wq_cyc *wq = &sq->wq; 440 bool send_doorbell; 441 442 *wi = (struct mlx5e_tx_wqe_info) { 443 .skb = skb, 444 .num_bytes = attr->num_bytes, 445 .num_dma = num_dma, 446 .num_wqebbs = wqe_attr->num_wqebbs, 447 .num_fifo_pkts = 0, 448 }; 449 450 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode); 451 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt); 452 453 mlx5e_tx_skb_update_hwts_flags(skb); 454 455 sq->pc += wi->num_wqebbs; 456 457 mlx5e_tx_check_stop(sq); 458 459 if (unlikely(sq->ptpsq)) { 460 mlx5e_skb_cb_hwtstamp_init(skb); 461 mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); 462 skb_get(skb); 463 } 464 465 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); 466 if (send_doorbell) 467 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); 468 } 469 470 static void 471 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, 472 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr, 473 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) 474 { 475 struct mlx5_wqe_ctrl_seg *cseg; 476 struct mlx5_wqe_eth_seg *eseg; 477 struct mlx5_wqe_data_seg *dseg; 478 struct mlx5e_tx_wqe_info *wi; 479 480 struct mlx5e_sq_stats *stats = sq->stats; 481 int num_dma; 482 483 stats->xmit_more += xmit_more; 484 485 /* fill wqe */ 486 wi = &sq->db.wqe_info[pi]; 487 cseg = &wqe->ctrl; 488 eseg = &wqe->eth; 489 dseg = wqe->data; 490 491 eseg->mss = attr->mss; 492 493 if (attr->ihs) { 494 if (skb_vlan_tag_present(skb)) { 495 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN); 496 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs); 497 stats->added_vlan_packets++; 498 } else { 499 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs); 500 memcpy(eseg->inline_hdr.start, skb->data, attr->ihs); 501 } 502 dseg += wqe_attr->ds_cnt_inl; 503 } else if (skb_vlan_tag_present(skb)) { 504 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); 505 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) 506 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); 507 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); 508 stats->added_vlan_packets++; 509 } 510 511 dseg += wqe_attr->ds_cnt_ids; 512 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs, 513 attr->headlen, dseg); 514 if (unlikely(num_dma < 0)) 515 goto err_drop; 516 517 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more); 518 519 return; 520 521 err_drop: 522 stats->dropped++; 523 dev_kfree_skb_any(skb); 524 } 525 526 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) 527 { 528 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs && 529 !attr->insz; 530 } 531 532 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) 533 { 534 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 535 536 /* Assumes the session is already running and has at least one packet. */ 537 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); 538 } 539 540 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, 541 struct mlx5_wqe_eth_seg *eseg) 542 { 543 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 544 struct mlx5e_tx_wqe *wqe; 545 u16 pi; 546 547 pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); 548 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 549 net_prefetchw(wqe->data); 550 551 *session = (struct mlx5e_tx_mpwqe) { 552 .wqe = wqe, 553 .bytes_count = 0, 554 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, 555 .pkt_count = 0, 556 .inline_on = 0, 557 }; 558 559 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); 560 561 sq->stats->mpwqe_blks++; 562 } 563 564 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq) 565 { 566 return sq->mpwqe.wqe; 567 } 568 569 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd) 570 { 571 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 572 struct mlx5_wqe_data_seg *dseg; 573 574 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; 575 576 session->pkt_count++; 577 session->bytes_count += txd->len; 578 579 dseg->addr = cpu_to_be64(txd->dma_addr); 580 dseg->byte_count = cpu_to_be32(txd->len); 581 dseg->lkey = sq->mkey_be; 582 session->ds_count++; 583 584 sq->stats->mpwqe_pkts++; 585 } 586 587 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq) 588 { 589 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 590 u8 ds_count = session->ds_count; 591 struct mlx5_wqe_ctrl_seg *cseg; 592 struct mlx5e_tx_wqe_info *wi; 593 u16 pi; 594 595 cseg = &session->wqe->ctrl; 596 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); 597 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); 598 599 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); 600 wi = &sq->db.wqe_info[pi]; 601 *wi = (struct mlx5e_tx_wqe_info) { 602 .skb = NULL, 603 .num_bytes = session->bytes_count, 604 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS), 605 .num_dma = session->pkt_count, 606 .num_fifo_pkts = session->pkt_count, 607 }; 608 609 sq->pc += wi->num_wqebbs; 610 611 session->wqe = NULL; 612 613 mlx5e_tx_check_stop(sq); 614 615 return cseg; 616 } 617 618 static void 619 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, 620 struct mlx5_wqe_eth_seg *eseg, bool xmit_more) 621 { 622 struct mlx5_wqe_ctrl_seg *cseg; 623 struct mlx5e_xmit_data txd; 624 625 if (!mlx5e_tx_mpwqe_session_is_active(sq)) { 626 mlx5e_tx_mpwqe_session_start(sq, eseg); 627 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { 628 mlx5e_tx_mpwqe_session_complete(sq); 629 mlx5e_tx_mpwqe_session_start(sq, eseg); 630 } 631 632 sq->stats->xmit_more += xmit_more; 633 634 txd.data = skb->data; 635 txd.len = skb->len; 636 637 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); 638 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) 639 goto err_unmap; 640 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); 641 642 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); 643 644 mlx5e_tx_mpwqe_add_dseg(sq, &txd); 645 646 mlx5e_tx_skb_update_hwts_flags(skb); 647 648 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { 649 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ 650 cseg = mlx5e_tx_mpwqe_session_complete(sq); 651 652 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) 653 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 654 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) { 655 /* Might stop the queue, but we were asked to ring the doorbell anyway. */ 656 cseg = mlx5e_tx_mpwqe_session_complete(sq); 657 658 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 659 } 660 661 return; 662 663 err_unmap: 664 mlx5e_dma_unmap_wqe_err(sq, 1); 665 sq->stats->dropped++; 666 dev_kfree_skb_any(skb); 667 } 668 669 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) 670 { 671 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */ 672 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq))) 673 mlx5e_tx_mpwqe_session_complete(sq); 674 } 675 676 static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, 677 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, 678 struct mlx5_wqe_eth_seg *eseg, u16 ihs) 679 { 680 mlx5e_accel_tx_eseg(priv, skb, eseg, ihs); 681 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); 682 } 683 684 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 685 { 686 struct mlx5e_priv *priv = netdev_priv(dev); 687 struct mlx5e_accel_tx_state accel = {}; 688 struct mlx5e_tx_wqe_attr wqe_attr; 689 struct mlx5e_tx_attr attr; 690 struct mlx5e_tx_wqe *wqe; 691 struct mlx5e_txqsq *sq; 692 u16 pi; 693 694 sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 695 if (unlikely(!sq)) { 696 dev_kfree_skb_any(skb); 697 return NETDEV_TX_OK; 698 } 699 700 /* May send SKBs and WQEs. */ 701 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel))) 702 return NETDEV_TX_OK; 703 704 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); 705 706 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) { 707 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { 708 struct mlx5_wqe_eth_seg eseg = {}; 709 710 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs); 711 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); 712 return NETDEV_TX_OK; 713 } 714 715 mlx5e_tx_mpwqe_ensure_complete(sq); 716 } 717 718 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 719 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 720 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 721 722 /* May update the WQE, but may not post other WQEs. */ 723 mlx5e_accel_tx_finish(sq, wqe, &accel, 724 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); 725 mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs); 726 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more()); 727 728 return NETDEV_TX_OK; 729 } 730 731 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more) 732 { 733 struct mlx5e_tx_wqe_attr wqe_attr; 734 struct mlx5e_tx_attr attr; 735 struct mlx5e_tx_wqe *wqe; 736 u16 pi; 737 738 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); 739 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 740 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 741 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 742 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth); 743 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more); 744 } 745 746 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, 747 u32 *dma_fifo_cc) 748 { 749 int i; 750 751 for (i = 0; i < wi->num_dma; i++) { 752 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); 753 754 mlx5e_tx_dma_unmap(sq->pdev, dma); 755 } 756 } 757 758 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, 759 struct mlx5_cqe64 *cqe, int napi_budget) 760 { 761 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 762 struct skb_shared_hwtstamps hwts = {}; 763 u64 ts = get_cqe_ts(cqe); 764 765 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); 766 if (sq->ptpsq) 767 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, 768 hwts.hwtstamp, sq->ptpsq->cq_stats); 769 else 770 skb_tstamp_tx(skb, &hwts); 771 } 772 773 napi_consume_skb(skb, napi_budget); 774 } 775 776 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, 777 struct mlx5_cqe64 *cqe, int napi_budget) 778 { 779 int i; 780 781 for (i = 0; i < wi->num_fifo_pkts; i++) { 782 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo); 783 784 mlx5e_consume_skb(sq, skb, cqe, napi_budget); 785 } 786 } 787 788 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 789 { 790 struct mlx5e_sq_stats *stats; 791 struct mlx5e_txqsq *sq; 792 struct mlx5_cqe64 *cqe; 793 u32 dma_fifo_cc; 794 u32 nbytes; 795 u16 npkts; 796 u16 sqcc; 797 int i; 798 799 sq = container_of(cq, struct mlx5e_txqsq, cq); 800 801 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 802 return false; 803 804 cqe = mlx5_cqwq_get_cqe(&cq->wq); 805 if (!cqe) 806 return false; 807 808 stats = sq->stats; 809 810 npkts = 0; 811 nbytes = 0; 812 813 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 814 * otherwise a cq overrun may occur 815 */ 816 sqcc = sq->cc; 817 818 /* avoid dirtying sq cache line every cqe */ 819 dma_fifo_cc = sq->dma_fifo_cc; 820 821 i = 0; 822 do { 823 struct mlx5e_tx_wqe_info *wi; 824 u16 wqe_counter; 825 bool last_wqe; 826 u16 ci; 827 828 mlx5_cqwq_pop(&cq->wq); 829 830 wqe_counter = be16_to_cpu(cqe->wqe_counter); 831 832 do { 833 last_wqe = (sqcc == wqe_counter); 834 835 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 836 wi = &sq->db.wqe_info[ci]; 837 838 sqcc += wi->num_wqebbs; 839 840 if (likely(wi->skb)) { 841 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 842 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget); 843 844 npkts++; 845 nbytes += wi->num_bytes; 846 continue; 847 } 848 849 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, 850 &dma_fifo_cc))) 851 continue; 852 853 if (wi->num_fifo_pkts) { 854 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 855 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget); 856 857 npkts += wi->num_fifo_pkts; 858 nbytes += wi->num_bytes; 859 } 860 } while (!last_wqe); 861 862 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 863 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 864 &sq->state)) { 865 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 866 (struct mlx5_err_cqe *)cqe); 867 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 868 queue_work(cq->priv->wq, &sq->recover_work); 869 } 870 stats->cqe_err++; 871 } 872 873 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 874 875 stats->cqes += i; 876 877 mlx5_cqwq_update_db_record(&cq->wq); 878 879 /* ensure cq space is freed before enabling more cqes */ 880 wmb(); 881 882 sq->dma_fifo_cc = dma_fifo_cc; 883 sq->cc = sqcc; 884 885 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 886 887 if (netif_tx_queue_stopped(sq->txq) && 888 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && 889 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { 890 netif_tx_wake_queue(sq->txq); 891 stats->wake++; 892 } 893 894 return (i == MLX5E_TX_CQ_POLL_BUDGET); 895 } 896 897 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi) 898 { 899 int i; 900 901 for (i = 0; i < wi->num_fifo_pkts; i++) 902 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo)); 903 } 904 905 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 906 { 907 struct mlx5e_tx_wqe_info *wi; 908 u32 dma_fifo_cc, nbytes = 0; 909 u16 ci, sqcc, npkts = 0; 910 911 sqcc = sq->cc; 912 dma_fifo_cc = sq->dma_fifo_cc; 913 914 while (sqcc != sq->pc) { 915 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 916 wi = &sq->db.wqe_info[ci]; 917 918 sqcc += wi->num_wqebbs; 919 920 if (likely(wi->skb)) { 921 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 922 dev_kfree_skb_any(wi->skb); 923 924 npkts++; 925 nbytes += wi->num_bytes; 926 continue; 927 } 928 929 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc))) 930 continue; 931 932 if (wi->num_fifo_pkts) { 933 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 934 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi); 935 936 npkts += wi->num_fifo_pkts; 937 nbytes += wi->num_bytes; 938 } 939 } 940 941 sq->dma_fifo_cc = dma_fifo_cc; 942 sq->cc = sqcc; 943 944 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 945 } 946 947 #ifdef CONFIG_MLX5_CORE_IPOIB 948 static inline void 949 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, 950 struct mlx5_wqe_datagram_seg *dseg) 951 { 952 memcpy(&dseg->av, av, sizeof(struct mlx5_av)); 953 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); 954 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); 955 } 956 957 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb, 958 const struct mlx5e_tx_attr *attr, 959 struct mlx5e_tx_wqe_attr *wqe_attr) 960 { 961 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS; 962 u16 ds_cnt_inl = 0; 963 964 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags; 965 966 if (attr->ihs) { 967 u16 inl = attr->ihs - INL_HDR_START_SZ; 968 969 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); 970 ds_cnt += ds_cnt_inl; 971 } 972 973 *wqe_attr = (struct mlx5e_tx_wqe_attr) { 974 .ds_cnt = ds_cnt, 975 .ds_cnt_inl = ds_cnt_inl, 976 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), 977 }; 978 } 979 980 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 981 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more) 982 { 983 struct mlx5e_tx_wqe_attr wqe_attr; 984 struct mlx5e_tx_attr attr; 985 struct mlx5i_tx_wqe *wqe; 986 987 struct mlx5_wqe_datagram_seg *datagram; 988 struct mlx5_wqe_ctrl_seg *cseg; 989 struct mlx5_wqe_eth_seg *eseg; 990 struct mlx5_wqe_data_seg *dseg; 991 struct mlx5e_tx_wqe_info *wi; 992 993 struct mlx5e_sq_stats *stats = sq->stats; 994 int num_dma; 995 u16 pi; 996 997 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); 998 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 999 1000 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 1001 wqe = MLX5I_SQ_FETCH_WQE(sq, pi); 1002 1003 stats->xmit_more += xmit_more; 1004 1005 /* fill wqe */ 1006 wi = &sq->db.wqe_info[pi]; 1007 cseg = &wqe->ctrl; 1008 datagram = &wqe->datagram; 1009 eseg = &wqe->eth; 1010 dseg = wqe->data; 1011 1012 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 1013 1014 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg); 1015 1016 eseg->mss = attr.mss; 1017 1018 if (attr.ihs) { 1019 memcpy(eseg->inline_hdr.start, skb->data, attr.ihs); 1020 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs); 1021 dseg += wqe_attr.ds_cnt_inl; 1022 } 1023 1024 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs, 1025 attr.headlen, dseg); 1026 if (unlikely(num_dma < 0)) 1027 goto err_drop; 1028 1029 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more); 1030 1031 return; 1032 1033 err_drop: 1034 stats->dropped++; 1035 dev_kfree_skb_any(skb); 1036 } 1037 #endif 1038