1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/tcp.h> 34 #include <linux/if_vlan.h> 35 #include <net/dsfield.h> 36 #include "en.h" 37 #include "ipoib/ipoib.h" 38 #include "en_accel/en_accel.h" 39 #include "lib/clock.h" 40 41 #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS 42 43 #ifndef CONFIG_MLX5_EN_TLS 44 #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ 45 MLX5E_SQ_NOPS_ROOM) 46 #else 47 /* TLS offload requires MLX5E_SQ_STOP_ROOM to have 48 * enough room for a resync SKB, a normal SKB and a NOP 49 */ 50 #define MLX5E_SQ_STOP_ROOM (2 * MLX5_SEND_WQE_MAX_WQEBBS +\ 51 MLX5E_SQ_NOPS_ROOM) 52 #endif 53 54 static inline void mlx5e_tx_dma_unmap(struct device *pdev, 55 struct mlx5e_sq_dma *dma) 56 { 57 switch (dma->type) { 58 case MLX5E_DMA_MAP_SINGLE: 59 dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 60 break; 61 case MLX5E_DMA_MAP_PAGE: 62 dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); 63 break; 64 default: 65 WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); 66 } 67 } 68 69 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, 70 dma_addr_t addr, 71 u32 size, 72 enum mlx5e_dma_map_type map_type) 73 { 74 u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; 75 76 sq->db.dma_fifo[i].addr = addr; 77 sq->db.dma_fifo[i].size = size; 78 sq->db.dma_fifo[i].type = map_type; 79 sq->dma_fifo_pc++; 80 } 81 82 static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 83 { 84 return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 85 } 86 87 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) 88 { 89 int i; 90 91 for (i = 0; i < num_dma; i++) { 92 struct mlx5e_sq_dma *last_pushed_dma = 93 mlx5e_dma_get(sq, --sq->dma_fifo_pc); 94 95 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); 96 } 97 } 98 99 #ifdef CONFIG_MLX5_CORE_EN_DCB 100 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) 101 { 102 int dscp_cp = 0; 103 104 if (skb->protocol == htons(ETH_P_IP)) 105 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 106 else if (skb->protocol == htons(ETH_P_IPV6)) 107 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 108 109 return priv->dcbx_dp.dscp2prio[dscp_cp]; 110 } 111 #endif 112 113 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 114 void *accel_priv, select_queue_fallback_t fallback) 115 { 116 struct mlx5e_priv *priv = netdev_priv(dev); 117 int channel_ix = fallback(dev, skb); 118 u16 num_channels; 119 int up = 0; 120 121 if (!netdev_get_num_tc(dev)) 122 return channel_ix; 123 124 #ifdef CONFIG_MLX5_CORE_EN_DCB 125 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 126 up = mlx5e_get_dscp_up(priv, skb); 127 else 128 #endif 129 if (skb_vlan_tag_present(skb)) 130 up = skb->vlan_tci >> VLAN_PRIO_SHIFT; 131 132 /* channel_ix can be larger than num_channels since 133 * dev->num_real_tx_queues = num_channels * num_tc 134 */ 135 num_channels = priv->channels.params.num_channels; 136 if (channel_ix >= num_channels) 137 channel_ix = reciprocal_scale(channel_ix, num_channels); 138 139 return priv->channel_tc2txq[channel_ix][up]; 140 } 141 142 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) 143 { 144 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 145 146 return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 147 } 148 149 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) 150 { 151 struct flow_keys keys; 152 153 if (skb_transport_header_was_set(skb)) 154 return skb_transport_offset(skb); 155 else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) 156 return keys.control.thoff; 157 else 158 return mlx5e_skb_l2_header_offset(skb); 159 } 160 161 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 162 struct sk_buff *skb) 163 { 164 u16 hlen; 165 166 switch (mode) { 167 case MLX5_INLINE_MODE_NONE: 168 return 0; 169 case MLX5_INLINE_MODE_TCP_UDP: 170 hlen = eth_get_headlen(skb->data, skb_headlen(skb)); 171 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 172 hlen += VLAN_HLEN; 173 break; 174 case MLX5_INLINE_MODE_IP: 175 /* When transport header is set to zero, it means no transport 176 * header. When transport header is set to 0xff's, it means 177 * transport header wasn't set. 178 */ 179 if (skb_transport_offset(skb)) { 180 hlen = mlx5e_skb_l3_header_offset(skb); 181 break; 182 } 183 /* fall through */ 184 case MLX5_INLINE_MODE_L2: 185 default: 186 hlen = mlx5e_skb_l2_header_offset(skb); 187 } 188 return min_t(u16, hlen, skb_headlen(skb)); 189 } 190 191 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) 192 { 193 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; 194 int cpy1_sz = 2 * ETH_ALEN; 195 int cpy2_sz = ihs - cpy1_sz; 196 197 memcpy(vhdr, skb->data, cpy1_sz); 198 vhdr->h_vlan_proto = skb->vlan_proto; 199 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 200 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); 201 } 202 203 static inline void 204 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) 205 { 206 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 207 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 208 if (skb->encapsulation) { 209 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 210 MLX5_ETH_WQE_L4_INNER_CSUM; 211 sq->stats->csum_partial_inner++; 212 } else { 213 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 214 sq->stats->csum_partial++; 215 } 216 } else 217 sq->stats->csum_none++; 218 } 219 220 static inline u16 221 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) 222 { 223 struct mlx5e_sq_stats *stats = sq->stats; 224 u16 ihs; 225 226 if (skb->encapsulation) { 227 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 228 stats->tso_inner_packets++; 229 stats->tso_inner_bytes += skb->len - ihs; 230 } else { 231 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 232 stats->tso_packets++; 233 stats->tso_bytes += skb->len - ihs; 234 } 235 236 return ihs; 237 } 238 239 static inline int 240 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, 241 unsigned char *skb_data, u16 headlen, 242 struct mlx5_wqe_data_seg *dseg) 243 { 244 dma_addr_t dma_addr = 0; 245 u8 num_dma = 0; 246 int i; 247 248 if (headlen) { 249 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 250 DMA_TO_DEVICE); 251 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 252 goto dma_unmap_wqe_err; 253 254 dseg->addr = cpu_to_be64(dma_addr); 255 dseg->lkey = sq->mkey_be; 256 dseg->byte_count = cpu_to_be32(headlen); 257 258 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); 259 num_dma++; 260 dseg++; 261 } 262 263 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 264 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 265 int fsz = skb_frag_size(frag); 266 267 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 268 DMA_TO_DEVICE); 269 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 270 goto dma_unmap_wqe_err; 271 272 dseg->addr = cpu_to_be64(dma_addr); 273 dseg->lkey = sq->mkey_be; 274 dseg->byte_count = cpu_to_be32(fsz); 275 276 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 277 num_dma++; 278 dseg++; 279 } 280 281 return num_dma; 282 283 dma_unmap_wqe_err: 284 mlx5e_dma_unmap_wqe_err(sq, num_dma); 285 return -ENOMEM; 286 } 287 288 static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, 289 struct mlx5_wq_cyc *wq, 290 u16 pi, u16 frag_pi) 291 { 292 struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; 293 u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; 294 295 edge_wi = wi + nnops; 296 297 /* fill sq frag edge with nops to avoid wqe wrapping two pages */ 298 for (; wi < edge_wi; wi++) { 299 wi->skb = NULL; 300 wi->num_wqebbs = 1; 301 mlx5e_post_nop(wq, sq->sqn, &sq->pc); 302 } 303 sq->stats->nop += nnops; 304 } 305 306 static inline void 307 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, 308 u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma, 309 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg) 310 { 311 struct mlx5_wq_cyc *wq = &sq->wq; 312 313 wi->num_bytes = num_bytes; 314 wi->num_dma = num_dma; 315 wi->num_wqebbs = num_wqebbs; 316 wi->skb = skb; 317 318 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); 319 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); 320 321 netdev_tx_sent_queue(sq->txq, num_bytes); 322 323 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 324 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 325 326 sq->pc += wi->num_wqebbs; 327 if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, MLX5E_SQ_STOP_ROOM))) { 328 netif_tx_stop_queue(sq->txq); 329 sq->stats->stopped++; 330 } 331 332 if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) 333 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); 334 } 335 336 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) 337 338 netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 339 struct mlx5e_tx_wqe *wqe, u16 pi) 340 { 341 struct mlx5_wq_cyc *wq = &sq->wq; 342 struct mlx5_wqe_ctrl_seg *cseg; 343 struct mlx5_wqe_eth_seg *eseg; 344 struct mlx5_wqe_data_seg *dseg; 345 struct mlx5e_tx_wqe_info *wi; 346 347 struct mlx5e_sq_stats *stats = sq->stats; 348 u16 ds_cnt, ds_cnt_inl = 0; 349 u16 headlen, ihs, frag_pi; 350 u8 num_wqebbs, opcode; 351 u32 num_bytes; 352 int num_dma; 353 __be16 mss; 354 355 /* Calc ihs and ds cnt, no writes to wqe yet */ 356 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 357 if (skb_is_gso(skb)) { 358 opcode = MLX5_OPCODE_LSO; 359 mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 360 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 361 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 362 stats->packets += skb_shinfo(skb)->gso_segs; 363 } else { 364 opcode = MLX5_OPCODE_SEND; 365 mss = 0; 366 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); 367 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 368 stats->packets++; 369 } 370 371 stats->bytes += num_bytes; 372 stats->xmit_more += skb->xmit_more; 373 374 headlen = skb->len - ihs - skb->data_len; 375 ds_cnt += !!headlen; 376 ds_cnt += skb_shinfo(skb)->nr_frags; 377 378 if (ihs) { 379 ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN; 380 381 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); 382 ds_cnt += ds_cnt_inl; 383 } 384 385 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 386 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); 387 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { 388 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); 389 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 390 } 391 392 /* fill wqe */ 393 wi = &sq->db.wqe_info[pi]; 394 cseg = &wqe->ctrl; 395 eseg = &wqe->eth; 396 dseg = wqe->data; 397 398 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 399 400 eseg->mss = mss; 401 402 if (ihs) { 403 eseg->inline_hdr.sz = cpu_to_be16(ihs); 404 if (skb_vlan_tag_present(skb)) { 405 ihs -= VLAN_HLEN; 406 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs); 407 stats->added_vlan_packets++; 408 } else { 409 memcpy(eseg->inline_hdr.start, skb->data, ihs); 410 } 411 dseg += ds_cnt_inl; 412 } else if (skb_vlan_tag_present(skb)) { 413 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); 414 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) 415 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); 416 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); 417 stats->added_vlan_packets++; 418 } 419 420 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); 421 if (unlikely(num_dma < 0)) 422 goto err_drop; 423 424 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, 425 num_dma, wi, cseg); 426 427 return NETDEV_TX_OK; 428 429 err_drop: 430 stats->dropped++; 431 dev_kfree_skb_any(skb); 432 433 return NETDEV_TX_OK; 434 } 435 436 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 437 { 438 struct mlx5e_priv *priv = netdev_priv(dev); 439 struct mlx5e_tx_wqe *wqe; 440 struct mlx5e_txqsq *sq; 441 u16 pi; 442 443 sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 444 mlx5e_sq_fetch_wqe(sq, &wqe, &pi); 445 446 #ifdef CONFIG_MLX5_ACCEL 447 /* might send skbs and update wqe and pi */ 448 skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); 449 if (unlikely(!skb)) 450 return NETDEV_TX_OK; 451 #endif 452 return mlx5e_sq_xmit(sq, skb, wqe, pi); 453 } 454 455 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, 456 struct mlx5_err_cqe *err_cqe) 457 { 458 u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); 459 460 netdev_err(sq->channel->netdev, 461 "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", 462 sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome, 463 err_cqe->vendor_err_synd); 464 mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); 465 } 466 467 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 468 { 469 struct mlx5e_txqsq *sq; 470 struct mlx5_cqe64 *cqe; 471 u32 dma_fifo_cc; 472 u32 nbytes; 473 u16 npkts; 474 u16 sqcc; 475 int i; 476 477 sq = container_of(cq, struct mlx5e_txqsq, cq); 478 479 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 480 return false; 481 482 cqe = mlx5_cqwq_get_cqe(&cq->wq); 483 if (!cqe) 484 return false; 485 486 npkts = 0; 487 nbytes = 0; 488 489 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 490 * otherwise a cq overrun may occur 491 */ 492 sqcc = sq->cc; 493 494 /* avoid dirtying sq cache line every cqe */ 495 dma_fifo_cc = sq->dma_fifo_cc; 496 497 i = 0; 498 do { 499 u16 wqe_counter; 500 bool last_wqe; 501 502 mlx5_cqwq_pop(&cq->wq); 503 504 wqe_counter = be16_to_cpu(cqe->wqe_counter); 505 506 if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { 507 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 508 &sq->state)) { 509 mlx5e_dump_error_cqe(sq, 510 (struct mlx5_err_cqe *)cqe); 511 queue_work(cq->channel->priv->wq, 512 &sq->recover.recover_work); 513 } 514 sq->stats->cqe_err++; 515 } 516 517 do { 518 struct mlx5e_tx_wqe_info *wi; 519 struct sk_buff *skb; 520 u16 ci; 521 int j; 522 523 last_wqe = (sqcc == wqe_counter); 524 525 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 526 wi = &sq->db.wqe_info[ci]; 527 skb = wi->skb; 528 529 if (unlikely(!skb)) { /* nop */ 530 sqcc++; 531 continue; 532 } 533 534 if (unlikely(skb_shinfo(skb)->tx_flags & 535 SKBTX_HW_TSTAMP)) { 536 struct skb_shared_hwtstamps hwts = {}; 537 538 hwts.hwtstamp = 539 mlx5_timecounter_cyc2time(sq->clock, 540 get_cqe_ts(cqe)); 541 skb_tstamp_tx(skb, &hwts); 542 } 543 544 for (j = 0; j < wi->num_dma; j++) { 545 struct mlx5e_sq_dma *dma = 546 mlx5e_dma_get(sq, dma_fifo_cc++); 547 548 mlx5e_tx_dma_unmap(sq->pdev, dma); 549 } 550 551 npkts++; 552 nbytes += wi->num_bytes; 553 sqcc += wi->num_wqebbs; 554 napi_consume_skb(skb, napi_budget); 555 } while (!last_wqe); 556 557 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 558 559 mlx5_cqwq_update_db_record(&cq->wq); 560 561 /* ensure cq space is freed before enabling more cqes */ 562 wmb(); 563 564 sq->dma_fifo_cc = dma_fifo_cc; 565 sq->cc = sqcc; 566 567 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 568 569 if (netif_tx_queue_stopped(sq->txq) && 570 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 571 MLX5E_SQ_STOP_ROOM) && 572 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { 573 netif_tx_wake_queue(sq->txq); 574 sq->stats->wake++; 575 } 576 577 return (i == MLX5E_TX_CQ_POLL_BUDGET); 578 } 579 580 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 581 { 582 struct mlx5e_tx_wqe_info *wi; 583 struct sk_buff *skb; 584 u16 ci; 585 int i; 586 587 while (sq->cc != sq->pc) { 588 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 589 wi = &sq->db.wqe_info[ci]; 590 skb = wi->skb; 591 592 if (!skb) { /* nop */ 593 sq->cc++; 594 continue; 595 } 596 597 for (i = 0; i < wi->num_dma; i++) { 598 struct mlx5e_sq_dma *dma = 599 mlx5e_dma_get(sq, sq->dma_fifo_cc++); 600 601 mlx5e_tx_dma_unmap(sq->pdev, dma); 602 } 603 604 dev_kfree_skb_any(skb); 605 sq->cc += wi->num_wqebbs; 606 } 607 } 608 609 #ifdef CONFIG_MLX5_CORE_IPOIB 610 static inline void 611 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, 612 struct mlx5_wqe_datagram_seg *dseg) 613 { 614 memcpy(&dseg->av, av, sizeof(struct mlx5_av)); 615 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); 616 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); 617 } 618 619 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 620 struct mlx5_av *av, u32 dqpn, u32 dqkey) 621 { 622 struct mlx5_wq_cyc *wq = &sq->wq; 623 struct mlx5i_tx_wqe *wqe; 624 625 struct mlx5_wqe_datagram_seg *datagram; 626 struct mlx5_wqe_ctrl_seg *cseg; 627 struct mlx5_wqe_eth_seg *eseg; 628 struct mlx5_wqe_data_seg *dseg; 629 struct mlx5e_tx_wqe_info *wi; 630 631 struct mlx5e_sq_stats *stats = sq->stats; 632 u16 headlen, ihs, pi, frag_pi; 633 u16 ds_cnt, ds_cnt_inl = 0; 634 u8 num_wqebbs, opcode; 635 u32 num_bytes; 636 int num_dma; 637 __be16 mss; 638 639 /* Calc ihs and ds cnt, no writes to wqe yet */ 640 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; 641 if (skb_is_gso(skb)) { 642 opcode = MLX5_OPCODE_LSO; 643 mss = cpu_to_be16(skb_shinfo(skb)->gso_size); 644 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 645 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; 646 stats->packets += skb_shinfo(skb)->gso_segs; 647 } else { 648 opcode = MLX5_OPCODE_SEND; 649 mss = 0; 650 ihs = mlx5e_calc_min_inline(sq->min_inline_mode, skb); 651 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 652 stats->packets++; 653 } 654 655 stats->bytes += num_bytes; 656 stats->xmit_more += skb->xmit_more; 657 658 headlen = skb->len - ihs - skb->data_len; 659 ds_cnt += !!headlen; 660 ds_cnt += skb_shinfo(skb)->nr_frags; 661 662 if (ihs) { 663 ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS); 664 ds_cnt += ds_cnt_inl; 665 } 666 667 num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); 668 frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); 669 if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { 670 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 671 mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); 672 } 673 674 mlx5i_sq_fetch_wqe(sq, &wqe, &pi); 675 676 /* fill wqe */ 677 wi = &sq->db.wqe_info[pi]; 678 cseg = &wqe->ctrl; 679 datagram = &wqe->datagram; 680 eseg = &wqe->eth; 681 dseg = wqe->data; 682 683 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 684 685 mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); 686 687 eseg->mss = mss; 688 689 if (ihs) { 690 memcpy(eseg->inline_hdr.start, skb->data, ihs); 691 eseg->inline_hdr.sz = cpu_to_be16(ihs); 692 dseg += ds_cnt_inl; 693 } 694 695 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); 696 if (unlikely(num_dma < 0)) 697 goto err_drop; 698 699 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, 700 num_dma, wi, cseg); 701 702 return NETDEV_TX_OK; 703 704 err_drop: 705 stats->dropped++; 706 dev_kfree_skb_any(skb); 707 708 return NETDEV_TX_OK; 709 } 710 #endif 711