1 /* 2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #include <asm/page.h> 35 #include <linux/mlx4/cq.h> 36 #include <linux/slab.h> 37 #include <linux/mlx4/qp.h> 38 #include <linux/skbuff.h> 39 #include <linux/if_vlan.h> 40 #include <linux/vmalloc.h> 41 #include <linux/tcp.h> 42 #include <linux/moduleparam.h> 43 44 #include "mlx4_en.h" 45 46 enum { 47 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */ 48 MAX_BF = 256, 49 }; 50 51 static int inline_thold __read_mostly = MAX_INLINE; 52 53 module_param_named(inline_thold, inline_thold, int, 0444); 54 MODULE_PARM_DESC(inline_thold, "threshold for using inline data"); 55 56 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 57 struct mlx4_en_tx_ring *ring, int qpn, u32 size, 58 u16 stride) 59 { 60 struct mlx4_en_dev *mdev = priv->mdev; 61 int tmp; 62 int err; 63 64 ring->size = size; 65 ring->size_mask = size - 1; 66 ring->stride = stride; 67 68 inline_thold = min(inline_thold, MAX_INLINE); 69 70 spin_lock_init(&ring->comp_lock); 71 72 tmp = size * sizeof(struct mlx4_en_tx_info); 73 ring->tx_info = vmalloc(tmp); 74 if (!ring->tx_info) 75 return -ENOMEM; 76 77 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 78 ring->tx_info, tmp); 79 80 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 81 if (!ring->bounce_buf) { 82 err = -ENOMEM; 83 goto err_tx; 84 } 85 ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); 86 87 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 88 2 * PAGE_SIZE); 89 if (err) { 90 en_err(priv, "Failed allocating hwq resources\n"); 91 goto err_bounce; 92 } 93 94 err = mlx4_en_map_buffer(&ring->wqres.buf); 95 if (err) { 96 en_err(priv, "Failed to map TX buffer\n"); 97 goto err_hwq_res; 98 } 99 100 ring->buf = ring->wqres.buf.direct.buf; 101 102 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 103 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 104 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 105 106 ring->qpn = qpn; 107 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 108 if (err) { 109 en_err(priv, "Failed allocating qp %d\n", ring->qpn); 110 goto err_map; 111 } 112 ring->qp.event = mlx4_en_sqp_event; 113 114 err = mlx4_bf_alloc(mdev->dev, &ring->bf); 115 if (err) { 116 en_dbg(DRV, priv, "working without blueflame (%d)", err); 117 ring->bf.uar = &mdev->priv_uar; 118 ring->bf.uar->map = mdev->uar_map; 119 ring->bf_enabled = false; 120 } else 121 ring->bf_enabled = true; 122 123 return 0; 124 125 err_map: 126 mlx4_en_unmap_buffer(&ring->wqres.buf); 127 err_hwq_res: 128 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 129 err_bounce: 130 kfree(ring->bounce_buf); 131 ring->bounce_buf = NULL; 132 err_tx: 133 vfree(ring->tx_info); 134 ring->tx_info = NULL; 135 return err; 136 } 137 138 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, 139 struct mlx4_en_tx_ring *ring) 140 { 141 struct mlx4_en_dev *mdev = priv->mdev; 142 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 143 144 if (ring->bf_enabled) 145 mlx4_bf_free(mdev->dev, &ring->bf); 146 mlx4_qp_remove(mdev->dev, &ring->qp); 147 mlx4_qp_free(mdev->dev, &ring->qp); 148 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); 149 mlx4_en_unmap_buffer(&ring->wqres.buf); 150 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 151 kfree(ring->bounce_buf); 152 ring->bounce_buf = NULL; 153 vfree(ring->tx_info); 154 ring->tx_info = NULL; 155 } 156 157 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 158 struct mlx4_en_tx_ring *ring, 159 int cq) 160 { 161 struct mlx4_en_dev *mdev = priv->mdev; 162 int err; 163 164 ring->cqn = cq; 165 ring->prod = 0; 166 ring->cons = 0xffffffff; 167 ring->last_nr_txbb = 1; 168 ring->poll_cnt = 0; 169 ring->blocked = 0; 170 memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); 171 memset(ring->buf, 0, ring->buf_size); 172 173 ring->qp_state = MLX4_QP_STATE_RST; 174 ring->doorbell_qpn = ring->qp.qpn << 8; 175 176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 177 ring->cqn, &ring->context); 178 if (ring->bf_enabled) 179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 180 181 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 182 &ring->qp, &ring->qp_state); 183 184 return err; 185 } 186 187 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 188 struct mlx4_en_tx_ring *ring) 189 { 190 struct mlx4_en_dev *mdev = priv->mdev; 191 192 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 193 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 194 } 195 196 197 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, 198 struct mlx4_en_tx_ring *ring, 199 int index, u8 owner) 200 { 201 struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; 202 struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; 203 struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset; 204 struct sk_buff *skb = tx_info->skb; 205 struct skb_frag_struct *frag; 206 void *end = ring->buf + ring->buf_size; 207 int frags = skb_shinfo(skb)->nr_frags; 208 int i; 209 __be32 *ptr = (__be32 *)tx_desc; 210 __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT)); 211 212 /* Optimize the common case when there are no wraparounds */ 213 if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) { 214 if (!tx_info->inl) { 215 if (tx_info->linear) { 216 dma_unmap_single(priv->ddev, 217 (dma_addr_t) be64_to_cpu(data->addr), 218 be32_to_cpu(data->byte_count), 219 PCI_DMA_TODEVICE); 220 ++data; 221 } 222 223 for (i = 0; i < frags; i++) { 224 frag = &skb_shinfo(skb)->frags[i]; 225 dma_unmap_page(priv->ddev, 226 (dma_addr_t) be64_to_cpu(data[i].addr), 227 skb_frag_size(frag), PCI_DMA_TODEVICE); 228 } 229 } 230 /* Stamp the freed descriptor */ 231 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 232 *ptr = stamp; 233 ptr += STAMP_DWORDS; 234 } 235 236 } else { 237 if (!tx_info->inl) { 238 if ((void *) data >= end) { 239 data = ring->buf + ((void *)data - end); 240 } 241 242 if (tx_info->linear) { 243 dma_unmap_single(priv->ddev, 244 (dma_addr_t) be64_to_cpu(data->addr), 245 be32_to_cpu(data->byte_count), 246 PCI_DMA_TODEVICE); 247 ++data; 248 } 249 250 for (i = 0; i < frags; i++) { 251 /* Check for wraparound before unmapping */ 252 if ((void *) data >= end) 253 data = ring->buf; 254 frag = &skb_shinfo(skb)->frags[i]; 255 dma_unmap_page(priv->ddev, 256 (dma_addr_t) be64_to_cpu(data->addr), 257 skb_frag_size(frag), PCI_DMA_TODEVICE); 258 ++data; 259 } 260 } 261 /* Stamp the freed descriptor */ 262 for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) { 263 *ptr = stamp; 264 ptr += STAMP_DWORDS; 265 if ((void *) ptr >= end) { 266 ptr = ring->buf; 267 stamp ^= cpu_to_be32(0x80000000); 268 } 269 } 270 271 } 272 dev_kfree_skb_any(skb); 273 return tx_info->nr_txbb; 274 } 275 276 277 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) 278 { 279 struct mlx4_en_priv *priv = netdev_priv(dev); 280 int cnt = 0; 281 282 /* Skip last polled descriptor */ 283 ring->cons += ring->last_nr_txbb; 284 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 285 ring->cons, ring->prod); 286 287 if ((u32) (ring->prod - ring->cons) > ring->size) { 288 if (netif_msg_tx_err(priv)) 289 en_warn(priv, "Tx consumer passed producer!\n"); 290 return 0; 291 } 292 293 while (ring->cons != ring->prod) { 294 ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, 295 ring->cons & ring->size_mask, 296 !!(ring->cons & ring->size)); 297 ring->cons += ring->last_nr_txbb; 298 cnt++; 299 } 300 301 if (cnt) 302 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 303 304 return cnt; 305 } 306 307 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) 308 { 309 struct mlx4_en_priv *priv = netdev_priv(dev); 310 struct mlx4_cq *mcq = &cq->mcq; 311 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 312 struct mlx4_cqe *cqe; 313 u16 index; 314 u16 new_index, ring_index; 315 u32 txbbs_skipped = 0; 316 u32 cons_index = mcq->cons_index; 317 int size = cq->size; 318 u32 size_mask = ring->size_mask; 319 struct mlx4_cqe *buf = cq->buf; 320 321 if (!priv->port_up) 322 return; 323 324 index = cons_index & size_mask; 325 cqe = &buf[index]; 326 ring_index = ring->cons & size_mask; 327 328 /* Process all completed CQEs */ 329 while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, 330 cons_index & size)) { 331 /* 332 * make sure we read the CQE after we read the 333 * ownership bit 334 */ 335 rmb(); 336 337 /* Skip over last polled CQE */ 338 new_index = be16_to_cpu(cqe->wqe_index) & size_mask; 339 340 do { 341 txbbs_skipped += ring->last_nr_txbb; 342 ring_index = (ring_index + ring->last_nr_txbb) & size_mask; 343 /* free next descriptor */ 344 ring->last_nr_txbb = mlx4_en_free_tx_desc( 345 priv, ring, ring_index, 346 !!((ring->cons + txbbs_skipped) & 347 ring->size)); 348 } while (ring_index != new_index); 349 350 ++cons_index; 351 index = cons_index & size_mask; 352 cqe = &buf[index]; 353 } 354 355 356 /* 357 * To prevent CQ overflow we first update CQ consumer and only then 358 * the ring consumer. 359 */ 360 mcq->cons_index = cons_index; 361 mlx4_cq_set_ci(mcq); 362 wmb(); 363 ring->cons += txbbs_skipped; 364 365 /* Wakeup Tx queue if this ring stopped it */ 366 if (unlikely(ring->blocked)) { 367 if ((u32) (ring->prod - ring->cons) <= 368 ring->size - HEADROOM - MAX_DESC_TXBBS) { 369 ring->blocked = 0; 370 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 371 priv->port_stats.wake_queue++; 372 } 373 } 374 } 375 376 void mlx4_en_tx_irq(struct mlx4_cq *mcq) 377 { 378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 379 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 381 382 if (!spin_trylock(&ring->comp_lock)) 383 return; 384 mlx4_en_process_tx_cq(cq->dev, cq); 385 mod_timer(&cq->timer, jiffies + 1); 386 spin_unlock(&ring->comp_lock); 387 } 388 389 390 void mlx4_en_poll_tx_cq(unsigned long data) 391 { 392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data; 393 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 395 u32 inflight; 396 397 INC_PERF_COUNTER(priv->pstats.tx_poll); 398 399 if (!spin_trylock_irq(&ring->comp_lock)) { 400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 401 return; 402 } 403 mlx4_en_process_tx_cq(cq->dev, cq); 404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 405 406 /* If there are still packets in flight and the timer has not already 407 * been scheduled by the Tx routine then schedule it here to guarantee 408 * completion processing of these packets */ 409 if (inflight && priv->port_up) 410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 411 412 spin_unlock_irq(&ring->comp_lock); 413 } 414 415 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 416 struct mlx4_en_tx_ring *ring, 417 u32 index, 418 unsigned int desc_size) 419 { 420 u32 copy = (ring->size - index) * TXBB_SIZE; 421 int i; 422 423 for (i = desc_size - copy - 4; i >= 0; i -= 4) { 424 if ((i & (TXBB_SIZE - 1)) == 0) 425 wmb(); 426 427 *((u32 *) (ring->buf + i)) = 428 *((u32 *) (ring->bounce_buf + copy + i)); 429 } 430 431 for (i = copy - 4; i >= 4 ; i -= 4) { 432 if ((i & (TXBB_SIZE - 1)) == 0) 433 wmb(); 434 435 *((u32 *) (ring->buf + index * TXBB_SIZE + i)) = 436 *((u32 *) (ring->bounce_buf + i)); 437 } 438 439 /* Return real descriptor location */ 440 return ring->buf + index * TXBB_SIZE; 441 } 442 443 static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind) 444 { 445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; 446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; 447 unsigned long flags; 448 449 /* If we don't have a pending timer, set one up to catch our recent 450 post in case the interface becomes idle */ 451 if (!timer_pending(&cq->timer)) 452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 453 454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) { 457 mlx4_en_process_tx_cq(priv->dev, cq); 458 spin_unlock_irqrestore(&ring->comp_lock, flags); 459 } 460 } 461 462 static int is_inline(struct sk_buff *skb, void **pfrag) 463 { 464 void *ptr; 465 466 if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) { 467 if (skb_shinfo(skb)->nr_frags == 1) { 468 ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]); 469 if (unlikely(!ptr)) 470 return 0; 471 472 if (pfrag) 473 *pfrag = ptr; 474 475 return 1; 476 } else if (unlikely(skb_shinfo(skb)->nr_frags)) 477 return 0; 478 else 479 return 1; 480 } 481 482 return 0; 483 } 484 485 static int inline_size(struct sk_buff *skb) 486 { 487 if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg) 488 <= MLX4_INLINE_ALIGN) 489 return ALIGN(skb->len + CTRL_SIZE + 490 sizeof(struct mlx4_wqe_inline_seg), 16); 491 else 492 return ALIGN(skb->len + CTRL_SIZE + 2 * 493 sizeof(struct mlx4_wqe_inline_seg), 16); 494 } 495 496 static int get_real_size(struct sk_buff *skb, struct net_device *dev, 497 int *lso_header_size) 498 { 499 struct mlx4_en_priv *priv = netdev_priv(dev); 500 int real_size; 501 502 if (skb_is_gso(skb)) { 503 *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb); 504 real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE + 505 ALIGN(*lso_header_size + 4, DS_SIZE); 506 if (unlikely(*lso_header_size != skb_headlen(skb))) { 507 /* We add a segment for the skb linear buffer only if 508 * it contains data */ 509 if (*lso_header_size < skb_headlen(skb)) 510 real_size += DS_SIZE; 511 else { 512 if (netif_msg_tx_err(priv)) 513 en_warn(priv, "Non-linear headers\n"); 514 return 0; 515 } 516 } 517 } else { 518 *lso_header_size = 0; 519 if (!is_inline(skb, NULL)) 520 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; 521 else 522 real_size = inline_size(skb); 523 } 524 525 return real_size; 526 } 527 528 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb, 529 int real_size, u16 *vlan_tag, int tx_ind, void *fragptr) 530 { 531 struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; 532 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; 533 534 if (skb->len <= spc) { 535 inl->byte_count = cpu_to_be32(1 << 31 | skb->len); 536 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 537 if (skb_shinfo(skb)->nr_frags) 538 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 539 skb_frag_size(&skb_shinfo(skb)->frags[0])); 540 541 } else { 542 inl->byte_count = cpu_to_be32(1 << 31 | spc); 543 if (skb_headlen(skb) <= spc) { 544 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 545 if (skb_headlen(skb) < spc) { 546 memcpy(((void *)(inl + 1)) + skb_headlen(skb), 547 fragptr, spc - skb_headlen(skb)); 548 fragptr += spc - skb_headlen(skb); 549 } 550 inl = (void *) (inl + 1) + spc; 551 memcpy(((void *)(inl + 1)), fragptr, skb->len - spc); 552 } else { 553 skb_copy_from_linear_data(skb, inl + 1, spc); 554 inl = (void *) (inl + 1) + spc; 555 skb_copy_from_linear_data_offset(skb, spc, inl + 1, 556 skb_headlen(skb) - spc); 557 if (skb_shinfo(skb)->nr_frags) 558 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, 559 fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); 560 } 561 562 wmb(); 563 inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc)); 564 } 565 tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag); 566 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * 567 (!!vlan_tx_tag_present(skb)); 568 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 569 } 570 571 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 572 { 573 struct mlx4_en_priv *priv = netdev_priv(dev); 574 u16 vlan_tag = 0; 575 576 /* If we support per priority flow control and the packet contains 577 * a vlan tag, send the packet to the TX ring assigned to that priority 578 */ 579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) { 580 vlan_tag = vlan_tx_tag_get(skb); 581 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 582 } 583 584 return skb_tx_hash(dev, skb); 585 } 586 587 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt) 588 { 589 __iowrite64_copy(dst, src, bytecnt / 8); 590 } 591 592 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) 593 { 594 struct mlx4_en_priv *priv = netdev_priv(dev); 595 struct mlx4_en_dev *mdev = priv->mdev; 596 struct mlx4_en_tx_ring *ring; 597 struct mlx4_en_cq *cq; 598 struct mlx4_en_tx_desc *tx_desc; 599 struct mlx4_wqe_data_seg *data; 600 struct skb_frag_struct *frag; 601 struct mlx4_en_tx_info *tx_info; 602 struct ethhdr *ethh; 603 int tx_ind = 0; 604 int nr_txbb; 605 int desc_size; 606 int real_size; 607 dma_addr_t dma; 608 u32 index, bf_index; 609 __be32 op_own; 610 u16 vlan_tag = 0; 611 int i; 612 int lso_header_size; 613 void *fragptr; 614 bool bounce = false; 615 616 if (!priv->port_up) 617 goto tx_drop; 618 619 real_size = get_real_size(skb, dev, &lso_header_size); 620 if (unlikely(!real_size)) 621 goto tx_drop; 622 623 /* Align descriptor to TXBB size */ 624 desc_size = ALIGN(real_size, TXBB_SIZE); 625 nr_txbb = desc_size / TXBB_SIZE; 626 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 627 if (netif_msg_tx_err(priv)) 628 en_warn(priv, "Oversized header or SG list\n"); 629 goto tx_drop; 630 } 631 632 tx_ind = skb->queue_mapping; 633 ring = &priv->tx_ring[tx_ind]; 634 if (vlan_tx_tag_present(skb)) 635 vlan_tag = vlan_tx_tag_get(skb); 636 637 /* Check available TXBBs And 2K spare for prefetch */ 638 if (unlikely(((int)(ring->prod - ring->cons)) > 639 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 640 /* every full Tx ring stops queue */ 641 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 642 ring->blocked = 1; 643 priv->port_stats.queue_stopped++; 644 645 /* Use interrupts to find out when queue opened */ 646 cq = &priv->tx_cq[tx_ind]; 647 mlx4_en_arm_cq(priv, cq); 648 return NETDEV_TX_BUSY; 649 } 650 651 /* Track current inflight packets for performance analysis */ 652 AVG_PERF_COUNTER(priv->pstats.inflight_avg, 653 (u32) (ring->prod - ring->cons - 1)); 654 655 /* Packet is good - grab an index and transmit it */ 656 index = ring->prod & ring->size_mask; 657 bf_index = ring->prod; 658 659 /* See if we have enough space for whole descriptor TXBB for setting 660 * SW ownership on next descriptor; if not, use a bounce buffer. */ 661 if (likely(index + nr_txbb <= ring->size)) 662 tx_desc = ring->buf + index * TXBB_SIZE; 663 else { 664 tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; 665 bounce = true; 666 } 667 668 /* Save skb in tx_info ring */ 669 tx_info = &ring->tx_info[index]; 670 tx_info->skb = skb; 671 tx_info->nr_txbb = nr_txbb; 672 673 /* Prepare ctrl segement apart opcode+ownership, which depends on 674 * whether LSO is used */ 675 tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); 676 tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * 677 !!vlan_tx_tag_present(skb); 678 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f; 679 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; 680 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 681 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 682 MLX4_WQE_CTRL_TCP_UDP_CSUM); 683 ring->tx_csum++; 684 } 685 686 /* Copy dst mac address to wqe */ 687 ethh = (struct ethhdr *)skb->data; 688 tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest); 689 tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2)); 690 /* Handle LSO (TSO) packets */ 691 if (lso_header_size) { 692 /* Mark opcode as LSO */ 693 op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) | 694 ((ring->prod & ring->size) ? 695 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 696 697 /* Fill in the LSO prefix */ 698 tx_desc->lso.mss_hdr_size = cpu_to_be32( 699 skb_shinfo(skb)->gso_size << 16 | lso_header_size); 700 701 /* Copy headers; 702 * note that we already verified that it is linear */ 703 memcpy(tx_desc->lso.header, skb->data, lso_header_size); 704 data = ((void *) &tx_desc->lso + 705 ALIGN(lso_header_size + 4, DS_SIZE)); 706 707 priv->port_stats.tso_packets++; 708 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 709 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 710 ring->bytes += skb->len + (i - 1) * lso_header_size; 711 ring->packets += i; 712 } else { 713 /* Normal (Non LSO) packet */ 714 op_own = cpu_to_be32(MLX4_OPCODE_SEND) | 715 ((ring->prod & ring->size) ? 716 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 717 data = &tx_desc->data; 718 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 719 ring->packets++; 720 721 } 722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 723 724 725 /* valid only for none inline segments */ 726 tx_info->data_offset = (void *) data - (void *) tx_desc; 727 728 tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0; 729 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; 730 731 if (!is_inline(skb, &fragptr)) { 732 /* Map fragments */ 733 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 734 frag = &skb_shinfo(skb)->frags[i]; 735 dma = skb_frag_dma_map(priv->ddev, frag, 736 0, skb_frag_size(frag), 737 DMA_TO_DEVICE); 738 data->addr = cpu_to_be64(dma); 739 data->lkey = cpu_to_be32(mdev->mr.key); 740 wmb(); 741 data->byte_count = cpu_to_be32(skb_frag_size(frag)); 742 --data; 743 } 744 745 /* Map linear part */ 746 if (tx_info->linear) { 747 dma = dma_map_single(priv->ddev, skb->data + lso_header_size, 748 skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE); 749 data->addr = cpu_to_be64(dma); 750 data->lkey = cpu_to_be32(mdev->mr.key); 751 wmb(); 752 data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size); 753 } 754 tx_info->inl = 0; 755 } else { 756 build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); 757 tx_info->inl = 1; 758 } 759 760 ring->prod += nr_txbb; 761 762 /* If we used a bounce buffer then copy descriptor back into place */ 763 if (bounce) 764 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 765 766 /* Run destructor before passing skb to HW */ 767 if (likely(!skb_shared(skb))) 768 skb_orphan(skb); 769 770 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 771 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 772 op_own |= htonl((bf_index & 0xffff) << 8); 773 /* Ensure new descirptor hits memory 774 * before setting ownership of this descriptor to HW */ 775 wmb(); 776 tx_desc->ctrl.owner_opcode = op_own; 777 778 wmb(); 779 780 mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl, 781 desc_size); 782 783 wmb(); 784 785 ring->bf.offset ^= ring->bf.buf_size; 786 } else { 787 /* Ensure new descirptor hits memory 788 * before setting ownership of this descriptor to HW */ 789 wmb(); 790 tx_desc->ctrl.owner_opcode = op_own; 791 wmb(); 792 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 793 } 794 795 /* Poll CQ here */ 796 mlx4_en_xmit_poll(priv, tx_ind); 797 798 return NETDEV_TX_OK; 799 800 tx_drop: 801 dev_kfree_skb_any(skb); 802 priv->stats.tx_dropped++; 803 return NETDEV_TX_OK; 804 } 805 806