1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <asm/page.h>
35 #include <linux/mlx4/cq.h>
36 #include <linux/slab.h>
37 #include <linux/mlx4/qp.h>
38 #include <linux/skbuff.h>
39 #include <linux/if_vlan.h>
40 #include <linux/vmalloc.h>
41 #include <linux/tcp.h>
42 #include <linux/moduleparam.h>
43 
44 #include "mlx4_en.h"
45 
46 enum {
47 	MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
48 	MAX_BF = 256,
49 };
50 
51 static int inline_thold __read_mostly = MAX_INLINE;
52 
53 module_param_named(inline_thold, inline_thold, int, 0444);
54 MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
55 
56 int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
57 			   struct mlx4_en_tx_ring *ring, int qpn, u32 size,
58 			   u16 stride)
59 {
60 	struct mlx4_en_dev *mdev = priv->mdev;
61 	int tmp;
62 	int err;
63 
64 	ring->size = size;
65 	ring->size_mask = size - 1;
66 	ring->stride = stride;
67 
68 	inline_thold = min(inline_thold, MAX_INLINE);
69 
70 	tmp = size * sizeof(struct mlx4_en_tx_info);
71 	ring->tx_info = vmalloc(tmp);
72 	if (!ring->tx_info)
73 		return -ENOMEM;
74 
75 	en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
76 		 ring->tx_info, tmp);
77 
78 	ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
79 	if (!ring->bounce_buf) {
80 		err = -ENOMEM;
81 		goto err_tx;
82 	}
83 	ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
84 
85 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 				 2 * PAGE_SIZE);
87 	if (err) {
88 		en_err(priv, "Failed allocating hwq resources\n");
89 		goto err_bounce;
90 	}
91 
92 	err = mlx4_en_map_buffer(&ring->wqres.buf);
93 	if (err) {
94 		en_err(priv, "Failed to map TX buffer\n");
95 		goto err_hwq_res;
96 	}
97 
98 	ring->buf = ring->wqres.buf.direct.buf;
99 
100 	en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 	       "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 	       ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103 
104 	ring->qpn = qpn;
105 	err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
106 	if (err) {
107 		en_err(priv, "Failed allocating qp %d\n", ring->qpn);
108 		goto err_map;
109 	}
110 	ring->qp.event = mlx4_en_sqp_event;
111 
112 	err = mlx4_bf_alloc(mdev->dev, &ring->bf);
113 	if (err) {
114 		en_dbg(DRV, priv, "working without blueflame (%d)", err);
115 		ring->bf.uar = &mdev->priv_uar;
116 		ring->bf.uar->map = mdev->uar_map;
117 		ring->bf_enabled = false;
118 	} else
119 		ring->bf_enabled = true;
120 
121 	return 0;
122 
123 err_map:
124 	mlx4_en_unmap_buffer(&ring->wqres.buf);
125 err_hwq_res:
126 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
127 err_bounce:
128 	kfree(ring->bounce_buf);
129 	ring->bounce_buf = NULL;
130 err_tx:
131 	vfree(ring->tx_info);
132 	ring->tx_info = NULL;
133 	return err;
134 }
135 
136 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
137 			     struct mlx4_en_tx_ring *ring)
138 {
139 	struct mlx4_en_dev *mdev = priv->mdev;
140 	en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
141 
142 	if (ring->bf_enabled)
143 		mlx4_bf_free(mdev->dev, &ring->bf);
144 	mlx4_qp_remove(mdev->dev, &ring->qp);
145 	mlx4_qp_free(mdev->dev, &ring->qp);
146 	mlx4_en_unmap_buffer(&ring->wqres.buf);
147 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
148 	kfree(ring->bounce_buf);
149 	ring->bounce_buf = NULL;
150 	vfree(ring->tx_info);
151 	ring->tx_info = NULL;
152 }
153 
154 int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
155 			     struct mlx4_en_tx_ring *ring,
156 			     int cq, int user_prio)
157 {
158 	struct mlx4_en_dev *mdev = priv->mdev;
159 	int err;
160 
161 	ring->cqn = cq;
162 	ring->prod = 0;
163 	ring->cons = 0xffffffff;
164 	ring->last_nr_txbb = 1;
165 	ring->poll_cnt = 0;
166 	memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
167 	memset(ring->buf, 0, ring->buf_size);
168 
169 	ring->qp_state = MLX4_QP_STATE_RST;
170 	ring->doorbell_qpn = ring->qp.qpn << 8;
171 
172 	mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
173 				ring->cqn, user_prio, &ring->context);
174 	if (ring->bf_enabled)
175 		ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
176 
177 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
178 			       &ring->qp, &ring->qp_state);
179 
180 	return err;
181 }
182 
183 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
184 				struct mlx4_en_tx_ring *ring)
185 {
186 	struct mlx4_en_dev *mdev = priv->mdev;
187 
188 	mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
189 		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
190 }
191 
192 
193 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
194 				struct mlx4_en_tx_ring *ring,
195 				int index, u8 owner)
196 {
197 	struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
198 	struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
199 	struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
200 	struct sk_buff *skb = tx_info->skb;
201 	struct skb_frag_struct *frag;
202 	void *end = ring->buf + ring->buf_size;
203 	int frags = skb_shinfo(skb)->nr_frags;
204 	int i;
205 	__be32 *ptr = (__be32 *)tx_desc;
206 	__be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
207 
208 	/* Optimize the common case when there are no wraparounds */
209 	if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
210 		if (!tx_info->inl) {
211 			if (tx_info->linear) {
212 				dma_unmap_single(priv->ddev,
213 					(dma_addr_t) be64_to_cpu(data->addr),
214 					 be32_to_cpu(data->byte_count),
215 					 PCI_DMA_TODEVICE);
216 				++data;
217 			}
218 
219 			for (i = 0; i < frags; i++) {
220 				frag = &skb_shinfo(skb)->frags[i];
221 				dma_unmap_page(priv->ddev,
222 					(dma_addr_t) be64_to_cpu(data[i].addr),
223 					skb_frag_size(frag), PCI_DMA_TODEVICE);
224 			}
225 		}
226 		/* Stamp the freed descriptor */
227 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
228 			*ptr = stamp;
229 			ptr += STAMP_DWORDS;
230 		}
231 
232 	} else {
233 		if (!tx_info->inl) {
234 			if ((void *) data >= end) {
235 				data = ring->buf + ((void *)data - end);
236 			}
237 
238 			if (tx_info->linear) {
239 				dma_unmap_single(priv->ddev,
240 					(dma_addr_t) be64_to_cpu(data->addr),
241 					 be32_to_cpu(data->byte_count),
242 					 PCI_DMA_TODEVICE);
243 				++data;
244 			}
245 
246 			for (i = 0; i < frags; i++) {
247 				/* Check for wraparound before unmapping */
248 				if ((void *) data >= end)
249 					data = ring->buf;
250 				frag = &skb_shinfo(skb)->frags[i];
251 				dma_unmap_page(priv->ddev,
252 					(dma_addr_t) be64_to_cpu(data->addr),
253 					 skb_frag_size(frag), PCI_DMA_TODEVICE);
254 				++data;
255 			}
256 		}
257 		/* Stamp the freed descriptor */
258 		for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
259 			*ptr = stamp;
260 			ptr += STAMP_DWORDS;
261 			if ((void *) ptr >= end) {
262 				ptr = ring->buf;
263 				stamp ^= cpu_to_be32(0x80000000);
264 			}
265 		}
266 
267 	}
268 	dev_kfree_skb_any(skb);
269 	return tx_info->nr_txbb;
270 }
271 
272 
273 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
274 {
275 	struct mlx4_en_priv *priv = netdev_priv(dev);
276 	int cnt = 0;
277 
278 	/* Skip last polled descriptor */
279 	ring->cons += ring->last_nr_txbb;
280 	en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
281 		 ring->cons, ring->prod);
282 
283 	if ((u32) (ring->prod - ring->cons) > ring->size) {
284 		if (netif_msg_tx_err(priv))
285 			en_warn(priv, "Tx consumer passed producer!\n");
286 		return 0;
287 	}
288 
289 	while (ring->cons != ring->prod) {
290 		ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
291 						ring->cons & ring->size_mask,
292 						!!(ring->cons & ring->size));
293 		ring->cons += ring->last_nr_txbb;
294 		cnt++;
295 	}
296 
297 	if (cnt)
298 		en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
299 
300 	return cnt;
301 }
302 
303 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
304 {
305 	struct mlx4_en_priv *priv = netdev_priv(dev);
306 	struct mlx4_cq *mcq = &cq->mcq;
307 	struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
308 	struct mlx4_cqe *cqe;
309 	u16 index;
310 	u16 new_index, ring_index;
311 	u32 txbbs_skipped = 0;
312 	u32 cons_index = mcq->cons_index;
313 	int size = cq->size;
314 	u32 size_mask = ring->size_mask;
315 	struct mlx4_cqe *buf = cq->buf;
316 	u32 packets = 0;
317 	u32 bytes = 0;
318 
319 	if (!priv->port_up)
320 		return;
321 
322 	index = cons_index & size_mask;
323 	cqe = &buf[index];
324 	ring_index = ring->cons & size_mask;
325 
326 	/* Process all completed CQEs */
327 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
328 			cons_index & size)) {
329 		/*
330 		 * make sure we read the CQE after we read the
331 		 * ownership bit
332 		 */
333 		rmb();
334 
335 		/* Skip over last polled CQE */
336 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
337 
338 		do {
339 			txbbs_skipped += ring->last_nr_txbb;
340 			ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
341 			/* free next descriptor */
342 			ring->last_nr_txbb = mlx4_en_free_tx_desc(
343 					priv, ring, ring_index,
344 					!!((ring->cons + txbbs_skipped) &
345 							ring->size));
346 			packets++;
347 			bytes += ring->tx_info[ring_index].nr_bytes;
348 		} while (ring_index != new_index);
349 
350 		++cons_index;
351 		index = cons_index & size_mask;
352 		cqe = &buf[index];
353 	}
354 
355 
356 	/*
357 	 * To prevent CQ overflow we first update CQ consumer and only then
358 	 * the ring consumer.
359 	 */
360 	mcq->cons_index = cons_index;
361 	mlx4_cq_set_ci(mcq);
362 	wmb();
363 	ring->cons += txbbs_skipped;
364 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
365 
366 	/*
367 	 * Wakeup Tx queue if this stopped, and at least 1 packet
368 	 * was completed
369 	 */
370 	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
371 		netif_tx_wake_queue(ring->tx_queue);
372 		priv->port_stats.wake_queue++;
373 	}
374 }
375 
376 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377 {
378 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 
381 	mlx4_en_process_tx_cq(cq->dev, cq);
382 	mlx4_en_arm_cq(priv, cq);
383 }
384 
385 
386 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
387 						      struct mlx4_en_tx_ring *ring,
388 						      u32 index,
389 						      unsigned int desc_size)
390 {
391 	u32 copy = (ring->size - index) * TXBB_SIZE;
392 	int i;
393 
394 	for (i = desc_size - copy - 4; i >= 0; i -= 4) {
395 		if ((i & (TXBB_SIZE - 1)) == 0)
396 			wmb();
397 
398 		*((u32 *) (ring->buf + i)) =
399 			*((u32 *) (ring->bounce_buf + copy + i));
400 	}
401 
402 	for (i = copy - 4; i >= 4 ; i -= 4) {
403 		if ((i & (TXBB_SIZE - 1)) == 0)
404 			wmb();
405 
406 		*((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
407 			*((u32 *) (ring->bounce_buf + i));
408 	}
409 
410 	/* Return real descriptor location */
411 	return ring->buf + index * TXBB_SIZE;
412 }
413 
414 static int is_inline(struct sk_buff *skb, void **pfrag)
415 {
416 	void *ptr;
417 
418 	if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
419 		if (skb_shinfo(skb)->nr_frags == 1) {
420 			ptr = skb_frag_address_safe(&skb_shinfo(skb)->frags[0]);
421 			if (unlikely(!ptr))
422 				return 0;
423 
424 			if (pfrag)
425 				*pfrag = ptr;
426 
427 			return 1;
428 		} else if (unlikely(skb_shinfo(skb)->nr_frags))
429 			return 0;
430 		else
431 			return 1;
432 	}
433 
434 	return 0;
435 }
436 
437 static int inline_size(struct sk_buff *skb)
438 {
439 	if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
440 	    <= MLX4_INLINE_ALIGN)
441 		return ALIGN(skb->len + CTRL_SIZE +
442 			     sizeof(struct mlx4_wqe_inline_seg), 16);
443 	else
444 		return ALIGN(skb->len + CTRL_SIZE + 2 *
445 			     sizeof(struct mlx4_wqe_inline_seg), 16);
446 }
447 
448 static int get_real_size(struct sk_buff *skb, struct net_device *dev,
449 			 int *lso_header_size)
450 {
451 	struct mlx4_en_priv *priv = netdev_priv(dev);
452 	int real_size;
453 
454 	if (skb_is_gso(skb)) {
455 		*lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
456 		real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
457 			ALIGN(*lso_header_size + 4, DS_SIZE);
458 		if (unlikely(*lso_header_size != skb_headlen(skb))) {
459 			/* We add a segment for the skb linear buffer only if
460 			 * it contains data */
461 			if (*lso_header_size < skb_headlen(skb))
462 				real_size += DS_SIZE;
463 			else {
464 				if (netif_msg_tx_err(priv))
465 					en_warn(priv, "Non-linear headers\n");
466 				return 0;
467 			}
468 		}
469 	} else {
470 		*lso_header_size = 0;
471 		if (!is_inline(skb, NULL))
472 			real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
473 		else
474 			real_size = inline_size(skb);
475 	}
476 
477 	return real_size;
478 }
479 
480 static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
481 			     int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
482 {
483 	struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
484 	int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
485 
486 	if (skb->len <= spc) {
487 		inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
488 		skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
489 		if (skb_shinfo(skb)->nr_frags)
490 			memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
491 			       skb_frag_size(&skb_shinfo(skb)->frags[0]));
492 
493 	} else {
494 		inl->byte_count = cpu_to_be32(1 << 31 | spc);
495 		if (skb_headlen(skb) <= spc) {
496 			skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
497 			if (skb_headlen(skb) < spc) {
498 				memcpy(((void *)(inl + 1)) + skb_headlen(skb),
499 					fragptr, spc - skb_headlen(skb));
500 				fragptr +=  spc - skb_headlen(skb);
501 			}
502 			inl = (void *) (inl + 1) + spc;
503 			memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
504 		} else {
505 			skb_copy_from_linear_data(skb, inl + 1, spc);
506 			inl = (void *) (inl + 1) + spc;
507 			skb_copy_from_linear_data_offset(skb, spc, inl + 1,
508 					skb_headlen(skb) - spc);
509 			if (skb_shinfo(skb)->nr_frags)
510 				memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
511 					fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0]));
512 		}
513 
514 		wmb();
515 		inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
516 	}
517 	tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
518 	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
519 		(!!vlan_tx_tag_present(skb));
520 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
521 }
522 
523 u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
524 {
525 	struct mlx4_en_priv *priv = netdev_priv(dev);
526 	u16 rings_p_up = priv->mdev->profile.num_tx_rings_p_up;
527 	u8 up = 0;
528 
529 	if (dev->num_tc)
530 		return skb_tx_hash(dev, skb);
531 
532 	if (vlan_tx_tag_present(skb))
533 		up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
534 
535 	return __skb_tx_hash(dev, skb, rings_p_up) + up * rings_p_up;
536 }
537 
538 static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
539 {
540 	__iowrite64_copy(dst, src, bytecnt / 8);
541 }
542 
543 netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
544 {
545 	struct mlx4_en_priv *priv = netdev_priv(dev);
546 	struct mlx4_en_dev *mdev = priv->mdev;
547 	struct mlx4_en_tx_ring *ring;
548 	struct mlx4_en_tx_desc *tx_desc;
549 	struct mlx4_wqe_data_seg *data;
550 	struct skb_frag_struct *frag;
551 	struct mlx4_en_tx_info *tx_info;
552 	struct ethhdr *ethh;
553 	int tx_ind = 0;
554 	int nr_txbb;
555 	int desc_size;
556 	int real_size;
557 	dma_addr_t dma;
558 	u32 index, bf_index;
559 	__be32 op_own;
560 	u16 vlan_tag = 0;
561 	int i;
562 	int lso_header_size;
563 	void *fragptr;
564 	bool bounce = false;
565 
566 	if (!priv->port_up)
567 		goto tx_drop;
568 
569 	real_size = get_real_size(skb, dev, &lso_header_size);
570 	if (unlikely(!real_size))
571 		goto tx_drop;
572 
573 	/* Align descriptor to TXBB size */
574 	desc_size = ALIGN(real_size, TXBB_SIZE);
575 	nr_txbb = desc_size / TXBB_SIZE;
576 	if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
577 		if (netif_msg_tx_err(priv))
578 			en_warn(priv, "Oversized header or SG list\n");
579 		goto tx_drop;
580 	}
581 
582 	tx_ind = skb->queue_mapping;
583 	ring = &priv->tx_ring[tx_ind];
584 	if (vlan_tx_tag_present(skb))
585 		vlan_tag = vlan_tx_tag_get(skb);
586 
587 	/* Check available TXBBs And 2K spare for prefetch */
588 	if (unlikely(((int)(ring->prod - ring->cons)) >
589 		     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
590 		/* every full Tx ring stops queue */
591 		netif_tx_stop_queue(ring->tx_queue);
592 		priv->port_stats.queue_stopped++;
593 
594 		return NETDEV_TX_BUSY;
595 	}
596 
597 	/* Track current inflight packets for performance analysis */
598 	AVG_PERF_COUNTER(priv->pstats.inflight_avg,
599 			 (u32) (ring->prod - ring->cons - 1));
600 
601 	/* Packet is good - grab an index and transmit it */
602 	index = ring->prod & ring->size_mask;
603 	bf_index = ring->prod;
604 
605 	/* See if we have enough space for whole descriptor TXBB for setting
606 	 * SW ownership on next descriptor; if not, use a bounce buffer. */
607 	if (likely(index + nr_txbb <= ring->size))
608 		tx_desc = ring->buf + index * TXBB_SIZE;
609 	else {
610 		tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
611 		bounce = true;
612 	}
613 
614 	/* Save skb in tx_info ring */
615 	tx_info = &ring->tx_info[index];
616 	tx_info->skb = skb;
617 	tx_info->nr_txbb = nr_txbb;
618 
619 	/* Prepare ctrl segement apart opcode+ownership, which depends on
620 	 * whether LSO is used */
621 	tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
622 	tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
623 		!!vlan_tx_tag_present(skb);
624 	tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
625 	tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
626 	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
627 		tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
628 							 MLX4_WQE_CTRL_TCP_UDP_CSUM);
629 		ring->tx_csum++;
630 	}
631 
632 	/* Copy dst mac address to wqe */
633 	ethh = (struct ethhdr *)skb->data;
634 	tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
635 	tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
636 	/* Handle LSO (TSO) packets */
637 	if (lso_header_size) {
638 		/* Mark opcode as LSO */
639 		op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
640 			((ring->prod & ring->size) ?
641 				cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
642 
643 		/* Fill in the LSO prefix */
644 		tx_desc->lso.mss_hdr_size = cpu_to_be32(
645 			skb_shinfo(skb)->gso_size << 16 | lso_header_size);
646 
647 		/* Copy headers;
648 		 * note that we already verified that it is linear */
649 		memcpy(tx_desc->lso.header, skb->data, lso_header_size);
650 		data = ((void *) &tx_desc->lso +
651 			ALIGN(lso_header_size + 4, DS_SIZE));
652 
653 		priv->port_stats.tso_packets++;
654 		i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
655 			!!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
656 		tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
657 		ring->packets += i;
658 	} else {
659 		/* Normal (Non LSO) packet */
660 		op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
661 			((ring->prod & ring->size) ?
662 			 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
663 		data = &tx_desc->data;
664 		tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
665 		ring->packets++;
666 
667 	}
668 	ring->bytes += tx_info->nr_bytes;
669 	netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
670 	AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
671 
672 
673 	/* valid only for none inline segments */
674 	tx_info->data_offset = (void *) data - (void *) tx_desc;
675 
676 	tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
677 	data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
678 
679 	if (!is_inline(skb, &fragptr)) {
680 		/* Map fragments */
681 		for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
682 			frag = &skb_shinfo(skb)->frags[i];
683 			dma = skb_frag_dma_map(priv->ddev, frag,
684 					       0, skb_frag_size(frag),
685 					       DMA_TO_DEVICE);
686 			data->addr = cpu_to_be64(dma);
687 			data->lkey = cpu_to_be32(mdev->mr.key);
688 			wmb();
689 			data->byte_count = cpu_to_be32(skb_frag_size(frag));
690 			--data;
691 		}
692 
693 		/* Map linear part */
694 		if (tx_info->linear) {
695 			dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
696 					     skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
697 			data->addr = cpu_to_be64(dma);
698 			data->lkey = cpu_to_be32(mdev->mr.key);
699 			wmb();
700 			data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
701 		}
702 		tx_info->inl = 0;
703 	} else {
704 		build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
705 		tx_info->inl = 1;
706 	}
707 
708 	ring->prod += nr_txbb;
709 
710 	/* If we used a bounce buffer then copy descriptor back into place */
711 	if (bounce)
712 		tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
713 
714 	if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
715 		*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
716 		op_own |= htonl((bf_index & 0xffff) << 8);
717 		/* Ensure new descirptor hits memory
718 		* before setting ownership of this descriptor to HW */
719 		wmb();
720 		tx_desc->ctrl.owner_opcode = op_own;
721 
722 		wmb();
723 
724 		mlx4_bf_copy(ring->bf.reg + ring->bf.offset, (unsigned long *) &tx_desc->ctrl,
725 		     desc_size);
726 
727 		wmb();
728 
729 		ring->bf.offset ^= ring->bf.buf_size;
730 	} else {
731 		/* Ensure new descirptor hits memory
732 		* before setting ownership of this descriptor to HW */
733 		wmb();
734 		tx_desc->ctrl.owner_opcode = op_own;
735 		wmb();
736 		iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
737 	}
738 
739 	return NETDEV_TX_OK;
740 
741 tx_drop:
742 	dev_kfree_skb_any(skb);
743 	priv->stats.tx_dropped++;
744 	return NETDEV_TX_OK;
745 }
746 
747