xref: /openbmc/linux/drivers/net/ethernet/huawei/hinic/hinic_tx.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
12025cf9eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2c3e79bafSAviad Krawczyk /*
3c3e79bafSAviad Krawczyk  * Huawei HiNIC PCI Express Linux driver
4c3e79bafSAviad Krawczyk  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5c3e79bafSAviad Krawczyk  */
6c3e79bafSAviad Krawczyk 
7b6459415SJakub Kicinski #include <linux/if_vlan.h>
800e57a6dSAviad Krawczyk #include <linux/kernel.h>
9c3e79bafSAviad Krawczyk #include <linux/netdevice.h>
10c3e79bafSAviad Krawczyk #include <linux/u64_stats_sync.h>
1100e57a6dSAviad Krawczyk #include <linux/errno.h>
1200e57a6dSAviad Krawczyk #include <linux/types.h>
1300e57a6dSAviad Krawczyk #include <linux/pci.h>
1400e57a6dSAviad Krawczyk #include <linux/device.h>
1500e57a6dSAviad Krawczyk #include <linux/dma-mapping.h>
1600e57a6dSAviad Krawczyk #include <linux/slab.h>
1700e57a6dSAviad Krawczyk #include <linux/interrupt.h>
1800e57a6dSAviad Krawczyk #include <linux/skbuff.h>
1900e57a6dSAviad Krawczyk #include <linux/smp.h>
2000e57a6dSAviad Krawczyk #include <asm/byteorder.h>
21cc18a754SZhao Chen #include <linux/ip.h>
22cc18a754SZhao Chen #include <linux/tcp.h>
23cc18a754SZhao Chen #include <linux/sctp.h>
24cc18a754SZhao Chen #include <linux/ipv6.h>
25cc18a754SZhao Chen #include <net/ipv6.h>
26cc18a754SZhao Chen #include <net/checksum.h>
27cc18a754SZhao Chen #include <net/ip6_checksum.h>
28c3e79bafSAviad Krawczyk 
2900e57a6dSAviad Krawczyk #include "hinic_common.h"
3000e57a6dSAviad Krawczyk #include "hinic_hw_if.h"
3100e57a6dSAviad Krawczyk #include "hinic_hw_wqe.h"
3200e57a6dSAviad Krawczyk #include "hinic_hw_wq.h"
33c3e79bafSAviad Krawczyk #include "hinic_hw_qp.h"
3400e57a6dSAviad Krawczyk #include "hinic_hw_dev.h"
3500e57a6dSAviad Krawczyk #include "hinic_dev.h"
36c3e79bafSAviad Krawczyk #include "hinic_tx.h"
37c3e79bafSAviad Krawczyk 
3800e57a6dSAviad Krawczyk #define TX_IRQ_NO_PENDING               0
3900e57a6dSAviad Krawczyk #define TX_IRQ_NO_COALESC               0
4000e57a6dSAviad Krawczyk #define TX_IRQ_NO_LLI_TIMER             0
4100e57a6dSAviad Krawczyk #define TX_IRQ_NO_CREDIT                0
4200e57a6dSAviad Krawczyk #define TX_IRQ_NO_RESEND_TIMER          0
4300e57a6dSAviad Krawczyk 
4400e57a6dSAviad Krawczyk #define CI_UPDATE_NO_PENDING            0
4500e57a6dSAviad Krawczyk #define CI_UPDATE_NO_COALESC            0
4600e57a6dSAviad Krawczyk 
4700e57a6dSAviad Krawczyk #define HW_CONS_IDX(sq)                 be16_to_cpu(*(u16 *)((sq)->hw_ci_addr))
4800e57a6dSAviad Krawczyk 
497296695fSLuo bin #define MIN_SKB_LEN			32
50cc18a754SZhao Chen 
51cc18a754SZhao Chen #define	MAX_PAYLOAD_OFFSET	        221
52cc18a754SZhao Chen #define TRANSPORT_OFFSET(l4_hdr, skb)	((u32)((l4_hdr) - (skb)->data))
53cc18a754SZhao Chen 
54cc18a754SZhao Chen union hinic_l3 {
55cc18a754SZhao Chen 	struct iphdr *v4;
56cc18a754SZhao Chen 	struct ipv6hdr *v6;
57cc18a754SZhao Chen 	unsigned char *hdr;
58cc18a754SZhao Chen };
59cc18a754SZhao Chen 
60cc18a754SZhao Chen union hinic_l4 {
61cc18a754SZhao Chen 	struct tcphdr *tcp;
62cc18a754SZhao Chen 	struct udphdr *udp;
63cc18a754SZhao Chen 	unsigned char *hdr;
64cc18a754SZhao Chen };
65cc18a754SZhao Chen 
66cc18a754SZhao Chen enum hinic_offload_type {
67cc18a754SZhao Chen 	TX_OFFLOAD_TSO     = BIT(0),
68cc18a754SZhao Chen 	TX_OFFLOAD_CSUM    = BIT(1),
69cc18a754SZhao Chen 	TX_OFFLOAD_VLAN    = BIT(2),
70cc18a754SZhao Chen 	TX_OFFLOAD_INVALID = BIT(3),
71cc18a754SZhao Chen };
7200e57a6dSAviad Krawczyk 
73c3e79bafSAviad Krawczyk /**
74c3e79bafSAviad Krawczyk  * hinic_txq_clean_stats - Clean the statistics of specific queue
75c3e79bafSAviad Krawczyk  * @txq: Logical Tx Queue
76c3e79bafSAviad Krawczyk  **/
hinic_txq_clean_stats(struct hinic_txq * txq)7773f25f16SZhengchao Shao static void hinic_txq_clean_stats(struct hinic_txq *txq)
78c3e79bafSAviad Krawczyk {
79c3e79bafSAviad Krawczyk 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
80c3e79bafSAviad Krawczyk 
81c3e79bafSAviad Krawczyk 	u64_stats_update_begin(&txq_stats->syncp);
82c3e79bafSAviad Krawczyk 	txq_stats->pkts    = 0;
83c3e79bafSAviad Krawczyk 	txq_stats->bytes   = 0;
84c3e79bafSAviad Krawczyk 	txq_stats->tx_busy = 0;
85c3e79bafSAviad Krawczyk 	txq_stats->tx_wake = 0;
86c3e79bafSAviad Krawczyk 	txq_stats->tx_dropped = 0;
87e54fbbdfSXue Chaojing 	txq_stats->big_frags_pkts = 0;
88c3e79bafSAviad Krawczyk 	u64_stats_update_end(&txq_stats->syncp);
89c3e79bafSAviad Krawczyk }
90c3e79bafSAviad Krawczyk 
91c3e79bafSAviad Krawczyk /**
92edd384f6SAviad Krawczyk  * hinic_txq_get_stats - get statistics of Tx Queue
93edd384f6SAviad Krawczyk  * @txq: Logical Tx Queue
94edd384f6SAviad Krawczyk  * @stats: return updated stats here
95edd384f6SAviad Krawczyk  **/
hinic_txq_get_stats(struct hinic_txq * txq,struct hinic_txq_stats * stats)96edd384f6SAviad Krawczyk void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
97edd384f6SAviad Krawczyk {
98edd384f6SAviad Krawczyk 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
99edd384f6SAviad Krawczyk 	unsigned int start;
100edd384f6SAviad Krawczyk 
101edd384f6SAviad Krawczyk 	do {
102*068c38adSThomas Gleixner 		start = u64_stats_fetch_begin(&txq_stats->syncp);
103edd384f6SAviad Krawczyk 		stats->pkts    = txq_stats->pkts;
104edd384f6SAviad Krawczyk 		stats->bytes   = txq_stats->bytes;
105edd384f6SAviad Krawczyk 		stats->tx_busy = txq_stats->tx_busy;
106edd384f6SAviad Krawczyk 		stats->tx_wake = txq_stats->tx_wake;
107edd384f6SAviad Krawczyk 		stats->tx_dropped = txq_stats->tx_dropped;
108e54fbbdfSXue Chaojing 		stats->big_frags_pkts = txq_stats->big_frags_pkts;
109*068c38adSThomas Gleixner 	} while (u64_stats_fetch_retry(&txq_stats->syncp, start));
110edd384f6SAviad Krawczyk }
111edd384f6SAviad Krawczyk 
112edd384f6SAviad Krawczyk /**
113c3e79bafSAviad Krawczyk  * txq_stats_init - Initialize the statistics of specific queue
114c3e79bafSAviad Krawczyk  * @txq: Logical Tx Queue
115c3e79bafSAviad Krawczyk  **/
txq_stats_init(struct hinic_txq * txq)116c3e79bafSAviad Krawczyk static void txq_stats_init(struct hinic_txq *txq)
117c3e79bafSAviad Krawczyk {
118c3e79bafSAviad Krawczyk 	struct hinic_txq_stats *txq_stats = &txq->txq_stats;
119c3e79bafSAviad Krawczyk 
120c3e79bafSAviad Krawczyk 	u64_stats_init(&txq_stats->syncp);
121c3e79bafSAviad Krawczyk 	hinic_txq_clean_stats(txq);
122c3e79bafSAviad Krawczyk }
123c3e79bafSAviad Krawczyk 
124c3e79bafSAviad Krawczyk /**
12500e57a6dSAviad Krawczyk  * tx_map_skb - dma mapping for skb and return sges
12600e57a6dSAviad Krawczyk  * @nic_dev: nic device
12700e57a6dSAviad Krawczyk  * @skb: the skb
12800e57a6dSAviad Krawczyk  * @sges: returned sges
12900e57a6dSAviad Krawczyk  *
13000e57a6dSAviad Krawczyk  * Return 0 - Success, negative - Failure
13100e57a6dSAviad Krawczyk  **/
tx_map_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)13200e57a6dSAviad Krawczyk static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
13300e57a6dSAviad Krawczyk 		      struct hinic_sge *sges)
13400e57a6dSAviad Krawczyk {
13500e57a6dSAviad Krawczyk 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
13600e57a6dSAviad Krawczyk 	struct hinic_hwif *hwif = hwdev->hwif;
13700e57a6dSAviad Krawczyk 	struct pci_dev *pdev = hwif->pdev;
138d7840976SMatthew Wilcox (Oracle) 	skb_frag_t *frag;
13900e57a6dSAviad Krawczyk 	dma_addr_t dma_addr;
14000e57a6dSAviad Krawczyk 	int i, j;
14100e57a6dSAviad Krawczyk 
14200e57a6dSAviad Krawczyk 	dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
14300e57a6dSAviad Krawczyk 				  DMA_TO_DEVICE);
14400e57a6dSAviad Krawczyk 	if (dma_mapping_error(&pdev->dev, dma_addr)) {
14500e57a6dSAviad Krawczyk 		dev_err(&pdev->dev, "Failed to map Tx skb data\n");
14600e57a6dSAviad Krawczyk 		return -EFAULT;
14700e57a6dSAviad Krawczyk 	}
14800e57a6dSAviad Krawczyk 
14900e57a6dSAviad Krawczyk 	hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb));
15000e57a6dSAviad Krawczyk 
15100e57a6dSAviad Krawczyk 	for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) {
15200e57a6dSAviad Krawczyk 		frag = &skb_shinfo(skb)->frags[i];
15300e57a6dSAviad Krawczyk 
15400e57a6dSAviad Krawczyk 		dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0,
15500e57a6dSAviad Krawczyk 					    skb_frag_size(frag),
15600e57a6dSAviad Krawczyk 					    DMA_TO_DEVICE);
15700e57a6dSAviad Krawczyk 		if (dma_mapping_error(&pdev->dev, dma_addr)) {
15800e57a6dSAviad Krawczyk 			dev_err(&pdev->dev, "Failed to map Tx skb frag\n");
15900e57a6dSAviad Krawczyk 			goto err_tx_map;
16000e57a6dSAviad Krawczyk 		}
16100e57a6dSAviad Krawczyk 
16200e57a6dSAviad Krawczyk 		hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag));
16300e57a6dSAviad Krawczyk 	}
16400e57a6dSAviad Krawczyk 
16500e57a6dSAviad Krawczyk 	return 0;
16600e57a6dSAviad Krawczyk 
16700e57a6dSAviad Krawczyk err_tx_map:
16800e57a6dSAviad Krawczyk 	for (j = 0; j < i; j++)
16900e57a6dSAviad Krawczyk 		dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]),
17000e57a6dSAviad Krawczyk 			       sges[j + 1].len, DMA_TO_DEVICE);
17100e57a6dSAviad Krawczyk 
17200e57a6dSAviad Krawczyk 	dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
17300e57a6dSAviad Krawczyk 			 DMA_TO_DEVICE);
17400e57a6dSAviad Krawczyk 	return -EFAULT;
17500e57a6dSAviad Krawczyk }
17600e57a6dSAviad Krawczyk 
17700e57a6dSAviad Krawczyk /**
17800e57a6dSAviad Krawczyk  * tx_unmap_skb - unmap the dma address of the skb
17900e57a6dSAviad Krawczyk  * @nic_dev: nic device
18000e57a6dSAviad Krawczyk  * @skb: the skb
18100e57a6dSAviad Krawczyk  * @sges: the sges that are connected to the skb
18200e57a6dSAviad Krawczyk  **/
tx_unmap_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)18300e57a6dSAviad Krawczyk static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
18400e57a6dSAviad Krawczyk 			 struct hinic_sge *sges)
18500e57a6dSAviad Krawczyk {
18600e57a6dSAviad Krawczyk 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
18700e57a6dSAviad Krawczyk 	struct hinic_hwif *hwif = hwdev->hwif;
18800e57a6dSAviad Krawczyk 	struct pci_dev *pdev = hwif->pdev;
18900e57a6dSAviad Krawczyk 	int i;
19000e57a6dSAviad Krawczyk 
19100e57a6dSAviad Krawczyk 	for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++)
19200e57a6dSAviad Krawczyk 		dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]),
19300e57a6dSAviad Krawczyk 			       sges[i + 1].len, DMA_TO_DEVICE);
19400e57a6dSAviad Krawczyk 
19500e57a6dSAviad Krawczyk 	dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len,
19600e57a6dSAviad Krawczyk 			 DMA_TO_DEVICE);
19700e57a6dSAviad Krawczyk }
19800e57a6dSAviad Krawczyk 
get_inner_l3_l4_type(struct sk_buff * skb,union hinic_l3 * ip,union hinic_l4 * l4,enum hinic_offload_type offload_type,enum hinic_l3_offload_type * l3_type,u8 * l4_proto)199cc18a754SZhao Chen static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip,
200cc18a754SZhao Chen 				 union hinic_l4 *l4,
201cc18a754SZhao Chen 				 enum hinic_offload_type offload_type,
202cc18a754SZhao Chen 				 enum hinic_l3_offload_type *l3_type,
203cc18a754SZhao Chen 				 u8 *l4_proto)
204cc18a754SZhao Chen {
205cc18a754SZhao Chen 	u8 *exthdr;
206cc18a754SZhao Chen 
207cc18a754SZhao Chen 	if (ip->v4->version == 4) {
208cc18a754SZhao Chen 		*l3_type = (offload_type == TX_OFFLOAD_CSUM) ?
209cc18a754SZhao Chen 			   IPV4_PKT_NO_CHKSUM_OFFLOAD :
210cc18a754SZhao Chen 			   IPV4_PKT_WITH_CHKSUM_OFFLOAD;
211cc18a754SZhao Chen 		*l4_proto = ip->v4->protocol;
212cc18a754SZhao Chen 	} else if (ip->v4->version == 6) {
213cc18a754SZhao Chen 		*l3_type = IPV6_PKT;
214cc18a754SZhao Chen 		exthdr = ip->hdr + sizeof(*ip->v6);
215cc18a754SZhao Chen 		*l4_proto = ip->v6->nexthdr;
216cc18a754SZhao Chen 		if (exthdr != l4->hdr) {
217cc18a754SZhao Chen 			int start = exthdr - skb->data;
218cc18a754SZhao Chen 			__be16 frag_off;
219cc18a754SZhao Chen 
220cc18a754SZhao Chen 			ipv6_skip_exthdr(skb, start, l4_proto, &frag_off);
221cc18a754SZhao Chen 		}
222cc18a754SZhao Chen 	} else {
223cc18a754SZhao Chen 		*l3_type = L3TYPE_UNKNOWN;
224cc18a754SZhao Chen 		*l4_proto = 0;
225cc18a754SZhao Chen 	}
226cc18a754SZhao Chen }
227cc18a754SZhao Chen 
get_inner_l4_info(struct sk_buff * skb,union hinic_l4 * l4,enum hinic_offload_type offload_type,u8 l4_proto,enum hinic_l4_offload_type * l4_offload,u32 * l4_len,u32 * offset)228cc18a754SZhao Chen static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4,
229cc18a754SZhao Chen 			      enum hinic_offload_type offload_type, u8 l4_proto,
230cc18a754SZhao Chen 			      enum hinic_l4_offload_type *l4_offload,
231cc18a754SZhao Chen 			      u32 *l4_len, u32 *offset)
232cc18a754SZhao Chen {
233cc18a754SZhao Chen 	*l4_offload = OFFLOAD_DISABLE;
234cc18a754SZhao Chen 	*offset = 0;
235cc18a754SZhao Chen 	*l4_len = 0;
236cc18a754SZhao Chen 
237cc18a754SZhao Chen 	switch (l4_proto) {
238cc18a754SZhao Chen 	case IPPROTO_TCP:
239cc18a754SZhao Chen 		*l4_offload = TCP_OFFLOAD_ENABLE;
240cc18a754SZhao Chen 		/* doff in unit of 4B */
241cc18a754SZhao Chen 		*l4_len = l4->tcp->doff * 4;
242cc18a754SZhao Chen 		*offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb);
243cc18a754SZhao Chen 		break;
244cc18a754SZhao Chen 
245cc18a754SZhao Chen 	case IPPROTO_UDP:
246cc18a754SZhao Chen 		*l4_offload = UDP_OFFLOAD_ENABLE;
247cc18a754SZhao Chen 		*l4_len = sizeof(struct udphdr);
248cc18a754SZhao Chen 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
249cc18a754SZhao Chen 		break;
250cc18a754SZhao Chen 
251cc18a754SZhao Chen 	case IPPROTO_SCTP:
252cc18a754SZhao Chen 		/* only csum offload support sctp */
253cc18a754SZhao Chen 		if (offload_type != TX_OFFLOAD_CSUM)
254cc18a754SZhao Chen 			break;
255cc18a754SZhao Chen 
256cc18a754SZhao Chen 		*l4_offload = SCTP_OFFLOAD_ENABLE;
257cc18a754SZhao Chen 		*l4_len = sizeof(struct sctphdr);
258cc18a754SZhao Chen 		*offset = TRANSPORT_OFFSET(l4->hdr, skb);
259cc18a754SZhao Chen 		break;
260cc18a754SZhao Chen 
261cc18a754SZhao Chen 	default:
262cc18a754SZhao Chen 		break;
263cc18a754SZhao Chen 	}
264cc18a754SZhao Chen }
265cc18a754SZhao Chen 
csum_magic(union hinic_l3 * ip,unsigned short proto)266cc18a754SZhao Chen static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto)
267cc18a754SZhao Chen {
268cc18a754SZhao Chen 	return (ip->v4->version == 4) ?
269cc18a754SZhao Chen 		csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) :
270cc18a754SZhao Chen 		csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0);
271cc18a754SZhao Chen }
272cc18a754SZhao Chen 
offload_tso(struct hinic_sq_task * task,u32 * queue_info,struct sk_buff * skb)273cc18a754SZhao Chen static int offload_tso(struct hinic_sq_task *task, u32 *queue_info,
274cc18a754SZhao Chen 		       struct sk_buff *skb)
275cc18a754SZhao Chen {
276cc18a754SZhao Chen 	u32 offset, l4_len, ip_identify, network_hdr_len;
277cc18a754SZhao Chen 	enum hinic_l3_offload_type l3_offload;
278cc18a754SZhao Chen 	enum hinic_l4_offload_type l4_offload;
279cc18a754SZhao Chen 	union hinic_l3 ip;
280cc18a754SZhao Chen 	union hinic_l4 l4;
281cc18a754SZhao Chen 	u8 l4_proto;
282cc18a754SZhao Chen 
283cc18a754SZhao Chen 	if (!skb_is_gso(skb))
284cc18a754SZhao Chen 		return 0;
285cc18a754SZhao Chen 
286cc18a754SZhao Chen 	if (skb_cow_head(skb, 0) < 0)
287cc18a754SZhao Chen 		return -EPROTONOSUPPORT;
288cc18a754SZhao Chen 
289cc18a754SZhao Chen 	if (skb->encapsulation) {
290cc18a754SZhao Chen 		u32 gso_type = skb_shinfo(skb)->gso_type;
291cc18a754SZhao Chen 		u32 tunnel_type = 0;
292cc18a754SZhao Chen 		u32 l4_tunnel_len;
293cc18a754SZhao Chen 
294cc18a754SZhao Chen 		ip.hdr = skb_network_header(skb);
295cc18a754SZhao Chen 		l4.hdr = skb_transport_header(skb);
296cc18a754SZhao Chen 		network_hdr_len = skb_inner_network_header_len(skb);
297cc18a754SZhao Chen 
298cc18a754SZhao Chen 		if (ip.v4->version == 4) {
299cc18a754SZhao Chen 			ip.v4->tot_len = 0;
300cc18a754SZhao Chen 			l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD;
301cc18a754SZhao Chen 		} else if (ip.v4->version == 6) {
302cc18a754SZhao Chen 			l3_offload = IPV6_PKT;
303cc18a754SZhao Chen 		} else {
304cc18a754SZhao Chen 			l3_offload = 0;
305cc18a754SZhao Chen 		}
306cc18a754SZhao Chen 
307cc18a754SZhao Chen 		hinic_task_set_outter_l3(task, l3_offload,
308cc18a754SZhao Chen 					 skb_network_header_len(skb));
309cc18a754SZhao Chen 
310cc18a754SZhao Chen 		if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
311cc18a754SZhao Chen 			l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP);
312cc18a754SZhao Chen 			tunnel_type = TUNNEL_UDP_CSUM;
313cc18a754SZhao Chen 		} else if (gso_type & SKB_GSO_UDP_TUNNEL) {
314cc18a754SZhao Chen 			tunnel_type = TUNNEL_UDP_NO_CSUM;
315cc18a754SZhao Chen 		}
316cc18a754SZhao Chen 
317cc18a754SZhao Chen 		l4_tunnel_len = skb_inner_network_offset(skb) -
318cc18a754SZhao Chen 				skb_transport_offset(skb);
319cc18a754SZhao Chen 		hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
320cc18a754SZhao Chen 
321cc18a754SZhao Chen 		ip.hdr = skb_inner_network_header(skb);
322cc18a754SZhao Chen 		l4.hdr = skb_inner_transport_header(skb);
323cc18a754SZhao Chen 	} else {
324cc18a754SZhao Chen 		ip.hdr = skb_network_header(skb);
325cc18a754SZhao Chen 		l4.hdr = skb_transport_header(skb);
326cc18a754SZhao Chen 		network_hdr_len = skb_network_header_len(skb);
327cc18a754SZhao Chen 	}
328cc18a754SZhao Chen 
329cc18a754SZhao Chen 	/* initialize inner IP header fields */
330cc18a754SZhao Chen 	if (ip.v4->version == 4)
331cc18a754SZhao Chen 		ip.v4->tot_len = 0;
332cc18a754SZhao Chen 	else
333cc18a754SZhao Chen 		ip.v6->payload_len = 0;
334cc18a754SZhao Chen 
335cc18a754SZhao Chen 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload,
336cc18a754SZhao Chen 			     &l4_proto);
337cc18a754SZhao Chen 
338cc18a754SZhao Chen 	hinic_task_set_inner_l3(task, l3_offload, network_hdr_len);
339cc18a754SZhao Chen 
340cc18a754SZhao Chen 	ip_identify = 0;
341cc18a754SZhao Chen 	if (l4_proto == IPPROTO_TCP)
342cc18a754SZhao Chen 		l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP);
343cc18a754SZhao Chen 
344cc18a754SZhao Chen 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload,
345cc18a754SZhao Chen 			  &l4_len, &offset);
346cc18a754SZhao Chen 
347cc18a754SZhao Chen 	hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset,
348cc18a754SZhao Chen 			       ip_identify, skb_shinfo(skb)->gso_size);
349cc18a754SZhao Chen 
350cc18a754SZhao Chen 	return 1;
351cc18a754SZhao Chen }
352cc18a754SZhao Chen 
offload_csum(struct hinic_sq_task * task,u32 * queue_info,struct sk_buff * skb)353cc18a754SZhao Chen static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
354cc18a754SZhao Chen 			struct sk_buff *skb)
355cc18a754SZhao Chen {
356cc18a754SZhao Chen 	enum hinic_l4_offload_type l4_offload;
357cc18a754SZhao Chen 	u32 offset, l4_len, network_hdr_len;
358cc18a754SZhao Chen 	enum hinic_l3_offload_type l3_type;
35933acd755SLuo bin 	u32 tunnel_type = NOT_TUNNEL;
360cc18a754SZhao Chen 	union hinic_l3 ip;
361cc18a754SZhao Chen 	union hinic_l4 l4;
362cc18a754SZhao Chen 	u8 l4_proto;
363cc18a754SZhao Chen 
364cc18a754SZhao Chen 	if (skb->ip_summed != CHECKSUM_PARTIAL)
365cc18a754SZhao Chen 		return 0;
366cc18a754SZhao Chen 
367cc18a754SZhao Chen 	if (skb->encapsulation) {
368cc18a754SZhao Chen 		u32 l4_tunnel_len;
369cc18a754SZhao Chen 
37033acd755SLuo bin 		tunnel_type = TUNNEL_UDP_NO_CSUM;
371cc18a754SZhao Chen 		ip.hdr = skb_network_header(skb);
372cc18a754SZhao Chen 
37333acd755SLuo bin 		if (ip.v4->version == 4) {
374cc18a754SZhao Chen 			l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
37533acd755SLuo bin 			l4_proto = ip.v4->protocol;
37633acd755SLuo bin 		} else if (ip.v4->version == 6) {
37733acd755SLuo bin 			unsigned char *exthdr;
37833acd755SLuo bin 			__be16 frag_off;
37944401b67SDaode Huang 
380cc18a754SZhao Chen 			l3_type = IPV6_PKT;
38133acd755SLuo bin 			tunnel_type = TUNNEL_UDP_CSUM;
38233acd755SLuo bin 			exthdr = ip.hdr + sizeof(*ip.v6);
38333acd755SLuo bin 			l4_proto = ip.v6->nexthdr;
38433acd755SLuo bin 			l4.hdr = skb_transport_header(skb);
38533acd755SLuo bin 			if (l4.hdr != exthdr)
38633acd755SLuo bin 				ipv6_skip_exthdr(skb, exthdr - skb->data,
38733acd755SLuo bin 						 &l4_proto, &frag_off);
38833acd755SLuo bin 		} else {
389cc18a754SZhao Chen 			l3_type = L3TYPE_UNKNOWN;
39033acd755SLuo bin 			l4_proto = IPPROTO_RAW;
39133acd755SLuo bin 		}
392cc18a754SZhao Chen 
393cc18a754SZhao Chen 		hinic_task_set_outter_l3(task, l3_type,
394cc18a754SZhao Chen 					 skb_network_header_len(skb));
395cc18a754SZhao Chen 
39633acd755SLuo bin 		switch (l4_proto) {
39733acd755SLuo bin 		case IPPROTO_UDP:
398cc18a754SZhao Chen 			l4_tunnel_len = skb_inner_network_offset(skb) -
399cc18a754SZhao Chen 					skb_transport_offset(skb);
400cc18a754SZhao Chen 			ip.hdr = skb_inner_network_header(skb);
401cc18a754SZhao Chen 			l4.hdr = skb_inner_transport_header(skb);
402cc18a754SZhao Chen 			network_hdr_len = skb_inner_network_header_len(skb);
40333acd755SLuo bin 			break;
40433acd755SLuo bin 		case IPPROTO_IPIP:
40533acd755SLuo bin 		case IPPROTO_IPV6:
40633acd755SLuo bin 			tunnel_type = NOT_TUNNEL;
40733acd755SLuo bin 			l4_tunnel_len = 0;
40833acd755SLuo bin 
40933acd755SLuo bin 			ip.hdr = skb_inner_network_header(skb);
41033acd755SLuo bin 			l4.hdr = skb_transport_header(skb);
41133acd755SLuo bin 			network_hdr_len = skb_network_header_len(skb);
41233acd755SLuo bin 			break;
41333acd755SLuo bin 		default:
41433acd755SLuo bin 			/* Unsupported tunnel packet, disable csum offload */
41533acd755SLuo bin 			skb_checksum_help(skb);
41633acd755SLuo bin 			return 0;
41733acd755SLuo bin 		}
41833acd755SLuo bin 
41933acd755SLuo bin 		hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
420cc18a754SZhao Chen 	} else {
421cc18a754SZhao Chen 		ip.hdr = skb_network_header(skb);
422cc18a754SZhao Chen 		l4.hdr = skb_transport_header(skb);
423cc18a754SZhao Chen 		network_hdr_len = skb_network_header_len(skb);
424cc18a754SZhao Chen 	}
425cc18a754SZhao Chen 
426cc18a754SZhao Chen 	get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type,
427cc18a754SZhao Chen 			     &l4_proto);
428cc18a754SZhao Chen 
429cc18a754SZhao Chen 	hinic_task_set_inner_l3(task, l3_type, network_hdr_len);
430cc18a754SZhao Chen 
431cc18a754SZhao Chen 	get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload,
432cc18a754SZhao Chen 			  &l4_len, &offset);
433cc18a754SZhao Chen 
434cc18a754SZhao Chen 	hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset);
435cc18a754SZhao Chen 
436cc18a754SZhao Chen 	return 1;
437cc18a754SZhao Chen }
438cc18a754SZhao Chen 
offload_vlan(struct hinic_sq_task * task,u32 * queue_info,u16 vlan_tag,u16 vlan_pri)439aebd17b7SXue Chaojing static void offload_vlan(struct hinic_sq_task *task, u32 *queue_info,
440aebd17b7SXue Chaojing 			 u16 vlan_tag, u16 vlan_pri)
441aebd17b7SXue Chaojing {
442aebd17b7SXue Chaojing 	task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(vlan_tag, VLAN_TAG) |
443aebd17b7SXue Chaojing 				HINIC_SQ_TASK_INFO0_SET(1U, VLAN_OFFLOAD);
444aebd17b7SXue Chaojing 
445aebd17b7SXue Chaojing 	*queue_info |= HINIC_SQ_CTRL_SET(vlan_pri, QUEUE_INFO_PRI);
446aebd17b7SXue Chaojing }
447aebd17b7SXue Chaojing 
hinic_tx_offload(struct sk_buff * skb,struct hinic_sq_task * task,u32 * queue_info)448cc18a754SZhao Chen static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
449cc18a754SZhao Chen 			    u32 *queue_info)
450cc18a754SZhao Chen {
451cc18a754SZhao Chen 	enum hinic_offload_type offload = 0;
452aebd17b7SXue Chaojing 	u16 vlan_tag;
453cc18a754SZhao Chen 	int enabled;
454cc18a754SZhao Chen 
455cc18a754SZhao Chen 	enabled = offload_tso(task, queue_info, skb);
456cc18a754SZhao Chen 	if (enabled > 0) {
457cc18a754SZhao Chen 		offload |= TX_OFFLOAD_TSO;
458cc18a754SZhao Chen 	} else if (enabled == 0) {
459cc18a754SZhao Chen 		enabled = offload_csum(task, queue_info, skb);
460cc18a754SZhao Chen 		if (enabled)
461cc18a754SZhao Chen 			offload |= TX_OFFLOAD_CSUM;
462cc18a754SZhao Chen 	} else {
463cc18a754SZhao Chen 		return -EPROTONOSUPPORT;
464cc18a754SZhao Chen 	}
465cc18a754SZhao Chen 
466aebd17b7SXue Chaojing 	if (unlikely(skb_vlan_tag_present(skb))) {
467aebd17b7SXue Chaojing 		vlan_tag = skb_vlan_tag_get(skb);
468aebd17b7SXue Chaojing 		offload_vlan(task, queue_info, vlan_tag,
469aebd17b7SXue Chaojing 			     vlan_tag >> VLAN_PRIO_SHIFT);
470aebd17b7SXue Chaojing 		offload |= TX_OFFLOAD_VLAN;
471aebd17b7SXue Chaojing 	}
472aebd17b7SXue Chaojing 
473cc18a754SZhao Chen 	if (offload)
474cc18a754SZhao Chen 		hinic_task_set_l2hdr(task, skb_network_offset(skb));
475cc18a754SZhao Chen 
476cc18a754SZhao Chen 	/* payload offset should not more than 221 */
477cc18a754SZhao Chen 	if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) >
478cc18a754SZhao Chen 	    MAX_PAYLOAD_OFFSET) {
479cc18a754SZhao Chen 		return -EPROTONOSUPPORT;
480cc18a754SZhao Chen 	}
481cc18a754SZhao Chen 
482cc18a754SZhao Chen 	/* mss should not less than 80 */
483cc18a754SZhao Chen 	if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) {
484cc18a754SZhao Chen 		*queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS);
485cc18a754SZhao Chen 		*queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS);
486cc18a754SZhao Chen 	}
487cc18a754SZhao Chen 
488cc18a754SZhao Chen 	return 0;
489cc18a754SZhao Chen }
490cc18a754SZhao Chen 
hinic_lb_xmit_frame(struct sk_buff * skb,struct net_device * netdev)4914aa218a4SLuo bin netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4924aa218a4SLuo bin {
4934aa218a4SLuo bin 	struct hinic_dev *nic_dev = netdev_priv(netdev);
4944aa218a4SLuo bin 	u16 prod_idx, q_id = skb->queue_mapping;
4954aa218a4SLuo bin 	struct netdev_queue *netdev_txq;
4964aa218a4SLuo bin 	int nr_sges, err = NETDEV_TX_OK;
4974aa218a4SLuo bin 	struct hinic_sq_wqe *sq_wqe;
4984aa218a4SLuo bin 	unsigned int wqe_size;
4994aa218a4SLuo bin 	struct hinic_txq *txq;
5004aa218a4SLuo bin 	struct hinic_qp *qp;
5014aa218a4SLuo bin 
5024aa218a4SLuo bin 	txq = &nic_dev->txqs[q_id];
5034aa218a4SLuo bin 	qp = container_of(txq->sq, struct hinic_qp, sq);
5044aa218a4SLuo bin 	nr_sges = skb_shinfo(skb)->nr_frags + 1;
5054aa218a4SLuo bin 
5064aa218a4SLuo bin 	err = tx_map_skb(nic_dev, skb, txq->sges);
5074aa218a4SLuo bin 	if (err)
5084aa218a4SLuo bin 		goto skb_error;
5094aa218a4SLuo bin 
5104aa218a4SLuo bin 	wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
5114aa218a4SLuo bin 
5124aa218a4SLuo bin 	sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
5134aa218a4SLuo bin 	if (!sq_wqe) {
5144aa218a4SLuo bin 		netif_stop_subqueue(netdev, qp->q_id);
5154aa218a4SLuo bin 
5164aa218a4SLuo bin 		sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
5174aa218a4SLuo bin 		if (sq_wqe) {
5184aa218a4SLuo bin 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
5194aa218a4SLuo bin 			goto process_sq_wqe;
5204aa218a4SLuo bin 		}
5214aa218a4SLuo bin 
5224aa218a4SLuo bin 		tx_unmap_skb(nic_dev, skb, txq->sges);
5234aa218a4SLuo bin 
5244aa218a4SLuo bin 		u64_stats_update_begin(&txq->txq_stats.syncp);
5254aa218a4SLuo bin 		txq->txq_stats.tx_busy++;
5264aa218a4SLuo bin 		u64_stats_update_end(&txq->txq_stats.syncp);
5274aa218a4SLuo bin 		err = NETDEV_TX_BUSY;
5284aa218a4SLuo bin 		wqe_size = 0;
5294aa218a4SLuo bin 		goto flush_skbs;
5304aa218a4SLuo bin 	}
5314aa218a4SLuo bin 
5324aa218a4SLuo bin process_sq_wqe:
533c706df6dSZhengchao Shao 	hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
5344aa218a4SLuo bin 	hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
5354aa218a4SLuo bin 
5364aa218a4SLuo bin flush_skbs:
5374aa218a4SLuo bin 	netdev_txq = netdev_get_tx_queue(netdev, q_id);
5384aa218a4SLuo bin 	if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
5394aa218a4SLuo bin 		hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
5404aa218a4SLuo bin 
5414aa218a4SLuo bin 	return err;
5424aa218a4SLuo bin 
5434aa218a4SLuo bin skb_error:
5444aa218a4SLuo bin 	dev_kfree_skb_any(skb);
5454aa218a4SLuo bin 	u64_stats_update_begin(&txq->txq_stats.syncp);
5464aa218a4SLuo bin 	txq->txq_stats.tx_dropped++;
5474aa218a4SLuo bin 	u64_stats_update_end(&txq->txq_stats.syncp);
5484aa218a4SLuo bin 
5494aa218a4SLuo bin 	return NETDEV_TX_OK;
5504aa218a4SLuo bin }
5514aa218a4SLuo bin 
hinic_xmit_frame(struct sk_buff * skb,struct net_device * netdev)55200e57a6dSAviad Krawczyk netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
55300e57a6dSAviad Krawczyk {
55400e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev = netdev_priv(netdev);
555cc18a754SZhao Chen 	u16 prod_idx, q_id = skb->queue_mapping;
55600e57a6dSAviad Krawczyk 	struct netdev_queue *netdev_txq;
55700e57a6dSAviad Krawczyk 	int nr_sges, err = NETDEV_TX_OK;
55800e57a6dSAviad Krawczyk 	struct hinic_sq_wqe *sq_wqe;
55900e57a6dSAviad Krawczyk 	unsigned int wqe_size;
56000e57a6dSAviad Krawczyk 	struct hinic_txq *txq;
56100e57a6dSAviad Krawczyk 	struct hinic_qp *qp;
56200e57a6dSAviad Krawczyk 
563cc18a754SZhao Chen 	txq = &nic_dev->txqs[q_id];
56400e57a6dSAviad Krawczyk 	qp = container_of(txq->sq, struct hinic_qp, sq);
56500e57a6dSAviad Krawczyk 
56600e57a6dSAviad Krawczyk 	if (skb->len < MIN_SKB_LEN) {
56700e57a6dSAviad Krawczyk 		if (skb_pad(skb, MIN_SKB_LEN - skb->len)) {
56800e57a6dSAviad Krawczyk 			netdev_err(netdev, "Failed to pad skb\n");
5697d8697afSDan Carpenter 			goto update_error_stats;
57000e57a6dSAviad Krawczyk 		}
57100e57a6dSAviad Krawczyk 
57200e57a6dSAviad Krawczyk 		skb->len = MIN_SKB_LEN;
57300e57a6dSAviad Krawczyk 	}
57400e57a6dSAviad Krawczyk 
57500e57a6dSAviad Krawczyk 	nr_sges = skb_shinfo(skb)->nr_frags + 1;
576e54fbbdfSXue Chaojing 	if (nr_sges > 17) {
577e54fbbdfSXue Chaojing 		u64_stats_update_begin(&txq->txq_stats.syncp);
578e54fbbdfSXue Chaojing 		txq->txq_stats.big_frags_pkts++;
579e54fbbdfSXue Chaojing 		u64_stats_update_end(&txq->txq_stats.syncp);
580e54fbbdfSXue Chaojing 	}
581e54fbbdfSXue Chaojing 
58200e57a6dSAviad Krawczyk 	if (nr_sges > txq->max_sges) {
58300e57a6dSAviad Krawczyk 		netdev_err(netdev, "Too many Tx sges\n");
58400e57a6dSAviad Krawczyk 		goto skb_error;
58500e57a6dSAviad Krawczyk 	}
58600e57a6dSAviad Krawczyk 
58700e57a6dSAviad Krawczyk 	err = tx_map_skb(nic_dev, skb, txq->sges);
58800e57a6dSAviad Krawczyk 	if (err)
58900e57a6dSAviad Krawczyk 		goto skb_error;
59000e57a6dSAviad Krawczyk 
59100e57a6dSAviad Krawczyk 	wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
59200e57a6dSAviad Krawczyk 
59300e57a6dSAviad Krawczyk 	sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
59400e57a6dSAviad Krawczyk 	if (!sq_wqe) {
59500e57a6dSAviad Krawczyk 		netif_stop_subqueue(netdev, qp->q_id);
59600e57a6dSAviad Krawczyk 
597bbdc9e68SAviad Krawczyk 		/* Check for the case free_tx_poll is called in another cpu
598bbdc9e68SAviad Krawczyk 		 * and we stopped the subqueue after free_tx_poll check.
599bbdc9e68SAviad Krawczyk 		 */
600bbdc9e68SAviad Krawczyk 		sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
601bbdc9e68SAviad Krawczyk 		if (sq_wqe) {
602bbdc9e68SAviad Krawczyk 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
603bbdc9e68SAviad Krawczyk 			goto process_sq_wqe;
604bbdc9e68SAviad Krawczyk 		}
605bbdc9e68SAviad Krawczyk 
606bbdc9e68SAviad Krawczyk 		tx_unmap_skb(nic_dev, skb, txq->sges);
607bbdc9e68SAviad Krawczyk 
60800e57a6dSAviad Krawczyk 		u64_stats_update_begin(&txq->txq_stats.syncp);
60900e57a6dSAviad Krawczyk 		txq->txq_stats.tx_busy++;
61000e57a6dSAviad Krawczyk 		u64_stats_update_end(&txq->txq_stats.syncp);
61100e57a6dSAviad Krawczyk 		err = NETDEV_TX_BUSY;
612f7482683SZhao Chen 		wqe_size = 0;
61300e57a6dSAviad Krawczyk 		goto flush_skbs;
61400e57a6dSAviad Krawczyk 	}
61500e57a6dSAviad Krawczyk 
616bbdc9e68SAviad Krawczyk process_sq_wqe:
617c706df6dSZhengchao Shao 	hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
61800e57a6dSAviad Krawczyk 
619cc18a754SZhao Chen 	err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
620cc18a754SZhao Chen 	if (err)
621cc18a754SZhao Chen 		goto offload_error;
622cc18a754SZhao Chen 
62300e57a6dSAviad Krawczyk 	hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
62400e57a6dSAviad Krawczyk 
62500e57a6dSAviad Krawczyk flush_skbs:
626cc18a754SZhao Chen 	netdev_txq = netdev_get_tx_queue(netdev, q_id);
6276b16f9eeSFlorian Westphal 	if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
62800e57a6dSAviad Krawczyk 		hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
62900e57a6dSAviad Krawczyk 
63000e57a6dSAviad Krawczyk 	return err;
63100e57a6dSAviad Krawczyk 
632cc18a754SZhao Chen offload_error:
633cc18a754SZhao Chen 	hinic_sq_return_wqe(txq->sq, wqe_size);
634cc18a754SZhao Chen 	tx_unmap_skb(nic_dev, skb, txq->sges);
635cc18a754SZhao Chen 
63600e57a6dSAviad Krawczyk skb_error:
63700e57a6dSAviad Krawczyk 	dev_kfree_skb_any(skb);
63800e57a6dSAviad Krawczyk 
6397d8697afSDan Carpenter update_error_stats:
64000e57a6dSAviad Krawczyk 	u64_stats_update_begin(&txq->txq_stats.syncp);
64100e57a6dSAviad Krawczyk 	txq->txq_stats.tx_dropped++;
64200e57a6dSAviad Krawczyk 	u64_stats_update_end(&txq->txq_stats.syncp);
643cc18a754SZhao Chen 
644cc18a754SZhao Chen 	return NETDEV_TX_OK;
64500e57a6dSAviad Krawczyk }
64600e57a6dSAviad Krawczyk 
64700e57a6dSAviad Krawczyk /**
64800e57a6dSAviad Krawczyk  * tx_free_skb - unmap and free skb
64900e57a6dSAviad Krawczyk  * @nic_dev: nic device
65000e57a6dSAviad Krawczyk  * @skb: the skb
65100e57a6dSAviad Krawczyk  * @sges: the sges that are connected to the skb
65200e57a6dSAviad Krawczyk  **/
tx_free_skb(struct hinic_dev * nic_dev,struct sk_buff * skb,struct hinic_sge * sges)65300e57a6dSAviad Krawczyk static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb,
65400e57a6dSAviad Krawczyk 			struct hinic_sge *sges)
65500e57a6dSAviad Krawczyk {
65600e57a6dSAviad Krawczyk 	tx_unmap_skb(nic_dev, skb, sges);
65700e57a6dSAviad Krawczyk 
65800e57a6dSAviad Krawczyk 	dev_kfree_skb_any(skb);
65900e57a6dSAviad Krawczyk }
66000e57a6dSAviad Krawczyk 
66100e57a6dSAviad Krawczyk /**
662d6174870SYang Shen  * free_all_tx_skbs - free all skbs in tx queue
66300e57a6dSAviad Krawczyk  * @txq: tx queue
66400e57a6dSAviad Krawczyk  **/
free_all_tx_skbs(struct hinic_txq * txq)66500e57a6dSAviad Krawczyk static void free_all_tx_skbs(struct hinic_txq *txq)
66600e57a6dSAviad Krawczyk {
66700e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
66800e57a6dSAviad Krawczyk 	struct hinic_sq *sq = txq->sq;
66900e57a6dSAviad Krawczyk 	struct hinic_sq_wqe *sq_wqe;
67000e57a6dSAviad Krawczyk 	unsigned int wqe_size;
67100e57a6dSAviad Krawczyk 	struct sk_buff *skb;
67200e57a6dSAviad Krawczyk 	int nr_sges;
67300e57a6dSAviad Krawczyk 	u16 ci;
67400e57a6dSAviad Krawczyk 
6759c2956d2SZhao Chen 	while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) {
6769c2956d2SZhao Chen 		sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci);
6779c2956d2SZhao Chen 		if (!sq_wqe)
6789c2956d2SZhao Chen 			break;
6799c2956d2SZhao Chen 
68000e57a6dSAviad Krawczyk 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
68100e57a6dSAviad Krawczyk 
68200e57a6dSAviad Krawczyk 		hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
68300e57a6dSAviad Krawczyk 
68400e57a6dSAviad Krawczyk 		hinic_sq_put_wqe(sq, wqe_size);
68500e57a6dSAviad Krawczyk 
68600e57a6dSAviad Krawczyk 		tx_free_skb(nic_dev, skb, txq->free_sges);
68700e57a6dSAviad Krawczyk 	}
68800e57a6dSAviad Krawczyk }
68900e57a6dSAviad Krawczyk 
69000e57a6dSAviad Krawczyk /**
69100e57a6dSAviad Krawczyk  * free_tx_poll - free finished tx skbs in tx queue that connected to napi
69200e57a6dSAviad Krawczyk  * @napi: napi
69300e57a6dSAviad Krawczyk  * @budget: number of tx
69400e57a6dSAviad Krawczyk  *
69500e57a6dSAviad Krawczyk  * Return 0 - Success, negative - Failure
69600e57a6dSAviad Krawczyk  **/
free_tx_poll(struct napi_struct * napi,int budget)69700e57a6dSAviad Krawczyk static int free_tx_poll(struct napi_struct *napi, int budget)
69800e57a6dSAviad Krawczyk {
69900e57a6dSAviad Krawczyk 	struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi);
70000e57a6dSAviad Krawczyk 	struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq);
70100e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
70200e57a6dSAviad Krawczyk 	struct netdev_queue *netdev_txq;
70300e57a6dSAviad Krawczyk 	struct hinic_sq *sq = txq->sq;
70400e57a6dSAviad Krawczyk 	struct hinic_wq *wq = sq->wq;
70500e57a6dSAviad Krawczyk 	struct hinic_sq_wqe *sq_wqe;
70600e57a6dSAviad Krawczyk 	unsigned int wqe_size;
70700e57a6dSAviad Krawczyk 	int nr_sges, pkts = 0;
70800e57a6dSAviad Krawczyk 	struct sk_buff *skb;
70900e57a6dSAviad Krawczyk 	u64 tx_bytes = 0;
71000e57a6dSAviad Krawczyk 	u16 hw_ci, sw_ci;
71100e57a6dSAviad Krawczyk 
71200e57a6dSAviad Krawczyk 	do {
71300e57a6dSAviad Krawczyk 		hw_ci = HW_CONS_IDX(sq) & wq->mask;
71400e57a6dSAviad Krawczyk 
71533f15da2SLuo bin 		dma_rmb();
71633f15da2SLuo bin 
7179c2956d2SZhao Chen 		/* Reading a WQEBB to get real WQE size and consumer index. */
7189c2956d2SZhao Chen 		sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci);
719c8ad5df6SGuangbin Huang 		if (!sq_wqe ||
72000e57a6dSAviad Krawczyk 		    (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size))
72100e57a6dSAviad Krawczyk 			break;
72200e57a6dSAviad Krawczyk 
7239c2956d2SZhao Chen 		/* If this WQE have multiple WQEBBs, we will read again to get
7249c2956d2SZhao Chen 		 * full size WQE.
7259c2956d2SZhao Chen 		 */
7269c2956d2SZhao Chen 		if (wqe_size > wq->wqebb_size) {
7279c2956d2SZhao Chen 			sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci);
7289c2956d2SZhao Chen 			if (unlikely(!sq_wqe))
7299c2956d2SZhao Chen 				break;
7309c2956d2SZhao Chen 		}
7319c2956d2SZhao Chen 
73200e57a6dSAviad Krawczyk 		tx_bytes += skb->len;
73300e57a6dSAviad Krawczyk 		pkts++;
73400e57a6dSAviad Krawczyk 
73500e57a6dSAviad Krawczyk 		nr_sges = skb_shinfo(skb)->nr_frags + 1;
73600e57a6dSAviad Krawczyk 
73700e57a6dSAviad Krawczyk 		hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges);
73800e57a6dSAviad Krawczyk 
73900e57a6dSAviad Krawczyk 		hinic_sq_put_wqe(sq, wqe_size);
74000e57a6dSAviad Krawczyk 
74100e57a6dSAviad Krawczyk 		tx_free_skb(nic_dev, skb, txq->free_sges);
74200e57a6dSAviad Krawczyk 	} while (pkts < budget);
74300e57a6dSAviad Krawczyk 
74400e57a6dSAviad Krawczyk 	if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) &&
74500e57a6dSAviad Krawczyk 	    hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) {
74600e57a6dSAviad Krawczyk 		netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id);
74700e57a6dSAviad Krawczyk 
74800e57a6dSAviad Krawczyk 		__netif_tx_lock(netdev_txq, smp_processor_id());
749fc25fa97SLuo bin 		if (!netif_testing(nic_dev->netdev))
75000e57a6dSAviad Krawczyk 			netif_wake_subqueue(nic_dev->netdev, qp->q_id);
75100e57a6dSAviad Krawczyk 
75200e57a6dSAviad Krawczyk 		__netif_tx_unlock(netdev_txq);
75300e57a6dSAviad Krawczyk 
75400e57a6dSAviad Krawczyk 		u64_stats_update_begin(&txq->txq_stats.syncp);
75500e57a6dSAviad Krawczyk 		txq->txq_stats.tx_wake++;
75600e57a6dSAviad Krawczyk 		u64_stats_update_end(&txq->txq_stats.syncp);
75700e57a6dSAviad Krawczyk 	}
75800e57a6dSAviad Krawczyk 
75900e57a6dSAviad Krawczyk 	u64_stats_update_begin(&txq->txq_stats.syncp);
76000e57a6dSAviad Krawczyk 	txq->txq_stats.bytes += tx_bytes;
76100e57a6dSAviad Krawczyk 	txq->txq_stats.pkts += pkts;
76200e57a6dSAviad Krawczyk 	u64_stats_update_end(&txq->txq_stats.syncp);
76300e57a6dSAviad Krawczyk 
76400e57a6dSAviad Krawczyk 	if (pkts < budget) {
76500e57a6dSAviad Krawczyk 		napi_complete(napi);
7667dd29ee1SLuo bin 		if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
767905b464aSXue Chaojing 			hinic_hwdev_set_msix_state(nic_dev->hwdev,
768905b464aSXue Chaojing 						   sq->msix_entry,
769905b464aSXue Chaojing 						   HINIC_MSIX_ENABLE);
7707dd29ee1SLuo bin 
77100e57a6dSAviad Krawczyk 		return pkts;
77200e57a6dSAviad Krawczyk 	}
77300e57a6dSAviad Krawczyk 
77400e57a6dSAviad Krawczyk 	return budget;
77500e57a6dSAviad Krawczyk }
77600e57a6dSAviad Krawczyk 
tx_irq(int irq,void * data)77700e57a6dSAviad Krawczyk static irqreturn_t tx_irq(int irq, void *data)
77800e57a6dSAviad Krawczyk {
77900e57a6dSAviad Krawczyk 	struct hinic_txq *txq = data;
78000e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev;
78100e57a6dSAviad Krawczyk 
78200e57a6dSAviad Krawczyk 	nic_dev = netdev_priv(txq->netdev);
78300e57a6dSAviad Krawczyk 
7847dd29ee1SLuo bin 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
78500e57a6dSAviad Krawczyk 		/* Disable the interrupt until napi will be completed */
786905b464aSXue Chaojing 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
787905b464aSXue Chaojing 					   txq->sq->msix_entry,
788905b464aSXue Chaojing 					   HINIC_MSIX_DISABLE);
78900e57a6dSAviad Krawczyk 
79000e57a6dSAviad Krawczyk 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry);
79100e57a6dSAviad Krawczyk 
79200e57a6dSAviad Krawczyk 	napi_schedule(&txq->napi);
79300e57a6dSAviad Krawczyk 	return IRQ_HANDLED;
79400e57a6dSAviad Krawczyk }
79500e57a6dSAviad Krawczyk 
tx_request_irq(struct hinic_txq * txq)79600e57a6dSAviad Krawczyk static int tx_request_irq(struct hinic_txq *txq)
79700e57a6dSAviad Krawczyk {
79800e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
799a0337c0dSLuo bin 	struct hinic_msix_config interrupt_info = {0};
800a0337c0dSLuo bin 	struct hinic_intr_coal_info *intr_coal = NULL;
80100e57a6dSAviad Krawczyk 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
80200e57a6dSAviad Krawczyk 	struct hinic_hwif *hwif = hwdev->hwif;
80300e57a6dSAviad Krawczyk 	struct pci_dev *pdev = hwif->pdev;
80400e57a6dSAviad Krawczyk 	struct hinic_sq *sq = txq->sq;
805a0337c0dSLuo bin 	struct hinic_qp *qp;
80600e57a6dSAviad Krawczyk 	int err;
80700e57a6dSAviad Krawczyk 
808a0337c0dSLuo bin 	qp = container_of(sq, struct hinic_qp, sq);
809a0337c0dSLuo bin 
810b707b89fSJakub Kicinski 	netif_napi_add_weight(txq->netdev, &txq->napi, free_tx_poll,
811b707b89fSJakub Kicinski 			      nic_dev->tx_weight);
81200e57a6dSAviad Krawczyk 
81300e57a6dSAviad Krawczyk 	hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
81400e57a6dSAviad Krawczyk 			     TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC,
81500e57a6dSAviad Krawczyk 			     TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
81600e57a6dSAviad Krawczyk 			     TX_IRQ_NO_RESEND_TIMER);
81700e57a6dSAviad Krawczyk 
818a0337c0dSLuo bin 	intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
819a0337c0dSLuo bin 	interrupt_info.msix_index = sq->msix_entry;
820a0337c0dSLuo bin 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
821a0337c0dSLuo bin 	interrupt_info.pending_cnt = intr_coal->pending_limt;
822a0337c0dSLuo bin 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
823a0337c0dSLuo bin 
824a0337c0dSLuo bin 	err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
825a0337c0dSLuo bin 	if (err) {
826a0337c0dSLuo bin 		netif_err(nic_dev, drv, txq->netdev,
827a0337c0dSLuo bin 			  "Failed to set TX interrupt coalescing attribute\n");
828a1b80e01SLuo bin 		netif_napi_del(&txq->napi);
829a0337c0dSLuo bin 		return err;
830a0337c0dSLuo bin 	}
831a0337c0dSLuo bin 
83200e57a6dSAviad Krawczyk 	err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
83300e57a6dSAviad Krawczyk 	if (err) {
83400e57a6dSAviad Krawczyk 		dev_err(&pdev->dev, "Failed to request Tx irq\n");
835a1b80e01SLuo bin 		netif_napi_del(&txq->napi);
83600e57a6dSAviad Krawczyk 		return err;
83700e57a6dSAviad Krawczyk 	}
83800e57a6dSAviad Krawczyk 
83900e57a6dSAviad Krawczyk 	return 0;
84000e57a6dSAviad Krawczyk }
84100e57a6dSAviad Krawczyk 
tx_free_irq(struct hinic_txq * txq)84200e57a6dSAviad Krawczyk static void tx_free_irq(struct hinic_txq *txq)
84300e57a6dSAviad Krawczyk {
84400e57a6dSAviad Krawczyk 	struct hinic_sq *sq = txq->sq;
84500e57a6dSAviad Krawczyk 
84600e57a6dSAviad Krawczyk 	free_irq(sq->irq, txq);
847a1b80e01SLuo bin 	netif_napi_del(&txq->napi);
84800e57a6dSAviad Krawczyk }
84900e57a6dSAviad Krawczyk 
85000e57a6dSAviad Krawczyk /**
851c3e79bafSAviad Krawczyk  * hinic_init_txq - Initialize the Tx Queue
852c3e79bafSAviad Krawczyk  * @txq: Logical Tx Queue
853c3e79bafSAviad Krawczyk  * @sq: Hardware Tx Queue to connect the Logical queue with
854c3e79bafSAviad Krawczyk  * @netdev: network device to connect the Logical queue with
855c3e79bafSAviad Krawczyk  *
856c3e79bafSAviad Krawczyk  * Return 0 - Success, negative - Failure
857c3e79bafSAviad Krawczyk  **/
hinic_init_txq(struct hinic_txq * txq,struct hinic_sq * sq,struct net_device * netdev)858c3e79bafSAviad Krawczyk int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
859c3e79bafSAviad Krawczyk 		   struct net_device *netdev)
860c3e79bafSAviad Krawczyk {
86100e57a6dSAviad Krawczyk 	struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
86200e57a6dSAviad Krawczyk 	struct hinic_dev *nic_dev = netdev_priv(netdev);
86300e57a6dSAviad Krawczyk 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
86400e57a6dSAviad Krawczyk 	int err, irqname_len;
86500e57a6dSAviad Krawczyk 
866c3e79bafSAviad Krawczyk 	txq->netdev = netdev;
867c3e79bafSAviad Krawczyk 	txq->sq = sq;
868c3e79bafSAviad Krawczyk 
869c3e79bafSAviad Krawczyk 	txq_stats_init(txq);
87000e57a6dSAviad Krawczyk 
87100e57a6dSAviad Krawczyk 	txq->max_sges = HINIC_MAX_SQ_BUFDESCS;
87200e57a6dSAviad Krawczyk 
8739d922f5dSGustavo A. R. Silva 	txq->sges = devm_kcalloc(&netdev->dev, txq->max_sges,
8749d922f5dSGustavo A. R. Silva 				 sizeof(*txq->sges), GFP_KERNEL);
87500e57a6dSAviad Krawczyk 	if (!txq->sges)
87600e57a6dSAviad Krawczyk 		return -ENOMEM;
87700e57a6dSAviad Krawczyk 
8789d922f5dSGustavo A. R. Silva 	txq->free_sges = devm_kcalloc(&netdev->dev, txq->max_sges,
8799d922f5dSGustavo A. R. Silva 				      sizeof(*txq->free_sges), GFP_KERNEL);
88000e57a6dSAviad Krawczyk 	if (!txq->free_sges) {
88100e57a6dSAviad Krawczyk 		err = -ENOMEM;
88200e57a6dSAviad Krawczyk 		goto err_alloc_free_sges;
88300e57a6dSAviad Krawczyk 	}
88400e57a6dSAviad Krawczyk 
885a9fd686aSLuo bin 	irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
88600e57a6dSAviad Krawczyk 	txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
88700e57a6dSAviad Krawczyk 	if (!txq->irq_name) {
88800e57a6dSAviad Krawczyk 		err = -ENOMEM;
88900e57a6dSAviad Krawczyk 		goto err_alloc_irqname;
89000e57a6dSAviad Krawczyk 	}
89100e57a6dSAviad Krawczyk 
892a9fd686aSLuo bin 	sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
89300e57a6dSAviad Krawczyk 
89400e57a6dSAviad Krawczyk 	err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
89500e57a6dSAviad Krawczyk 					 CI_UPDATE_NO_COALESC);
89600e57a6dSAviad Krawczyk 	if (err)
89700e57a6dSAviad Krawczyk 		goto err_hw_ci;
89800e57a6dSAviad Krawczyk 
89900e57a6dSAviad Krawczyk 	err = tx_request_irq(txq);
90000e57a6dSAviad Krawczyk 	if (err) {
90100e57a6dSAviad Krawczyk 		netdev_err(netdev, "Failed to request Tx irq\n");
90200e57a6dSAviad Krawczyk 		goto err_req_tx_irq;
90300e57a6dSAviad Krawczyk 	}
90400e57a6dSAviad Krawczyk 
905c3e79bafSAviad Krawczyk 	return 0;
90600e57a6dSAviad Krawczyk 
90700e57a6dSAviad Krawczyk err_req_tx_irq:
90800e57a6dSAviad Krawczyk err_hw_ci:
90900e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->irq_name);
91000e57a6dSAviad Krawczyk 
91100e57a6dSAviad Krawczyk err_alloc_irqname:
91200e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->free_sges);
91300e57a6dSAviad Krawczyk 
91400e57a6dSAviad Krawczyk err_alloc_free_sges:
91500e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->sges);
91600e57a6dSAviad Krawczyk 	return err;
917c3e79bafSAviad Krawczyk }
918c3e79bafSAviad Krawczyk 
919c3e79bafSAviad Krawczyk /**
920c3e79bafSAviad Krawczyk  * hinic_clean_txq - Clean the Tx Queue
921c3e79bafSAviad Krawczyk  * @txq: Logical Tx Queue
922c3e79bafSAviad Krawczyk  **/
hinic_clean_txq(struct hinic_txq * txq)923c3e79bafSAviad Krawczyk void hinic_clean_txq(struct hinic_txq *txq)
924c3e79bafSAviad Krawczyk {
92500e57a6dSAviad Krawczyk 	struct net_device *netdev = txq->netdev;
92600e57a6dSAviad Krawczyk 
92700e57a6dSAviad Krawczyk 	tx_free_irq(txq);
92800e57a6dSAviad Krawczyk 
92900e57a6dSAviad Krawczyk 	free_all_tx_skbs(txq);
93000e57a6dSAviad Krawczyk 
93100e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->irq_name);
93200e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->free_sges);
93300e57a6dSAviad Krawczyk 	devm_kfree(&netdev->dev, txq->sges);
934c3e79bafSAviad Krawczyk }
935