1*ca9c54d2SDexuan Cui // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*ca9c54d2SDexuan Cui /* Copyright (c) 2021, Microsoft Corporation. */
3*ca9c54d2SDexuan Cui 
4*ca9c54d2SDexuan Cui #include <linux/inetdevice.h>
5*ca9c54d2SDexuan Cui #include <linux/etherdevice.h>
6*ca9c54d2SDexuan Cui #include <linux/ethtool.h>
7*ca9c54d2SDexuan Cui #include <linux/mm.h>
8*ca9c54d2SDexuan Cui 
9*ca9c54d2SDexuan Cui #include <net/checksum.h>
10*ca9c54d2SDexuan Cui #include <net/ip6_checksum.h>
11*ca9c54d2SDexuan Cui 
12*ca9c54d2SDexuan Cui #include "mana.h"
13*ca9c54d2SDexuan Cui 
14*ca9c54d2SDexuan Cui /* Microsoft Azure Network Adapter (MANA) functions */
15*ca9c54d2SDexuan Cui 
16*ca9c54d2SDexuan Cui static int mana_open(struct net_device *ndev)
17*ca9c54d2SDexuan Cui {
18*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
19*ca9c54d2SDexuan Cui 	int err;
20*ca9c54d2SDexuan Cui 
21*ca9c54d2SDexuan Cui 	err = mana_alloc_queues(ndev);
22*ca9c54d2SDexuan Cui 	if (err)
23*ca9c54d2SDexuan Cui 		return err;
24*ca9c54d2SDexuan Cui 
25*ca9c54d2SDexuan Cui 	apc->port_is_up = true;
26*ca9c54d2SDexuan Cui 
27*ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
28*ca9c54d2SDexuan Cui 	smp_wmb();
29*ca9c54d2SDexuan Cui 
30*ca9c54d2SDexuan Cui 	netif_carrier_on(ndev);
31*ca9c54d2SDexuan Cui 	netif_tx_wake_all_queues(ndev);
32*ca9c54d2SDexuan Cui 
33*ca9c54d2SDexuan Cui 	return 0;
34*ca9c54d2SDexuan Cui }
35*ca9c54d2SDexuan Cui 
36*ca9c54d2SDexuan Cui static int mana_close(struct net_device *ndev)
37*ca9c54d2SDexuan Cui {
38*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
39*ca9c54d2SDexuan Cui 
40*ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
41*ca9c54d2SDexuan Cui 		return 0;
42*ca9c54d2SDexuan Cui 
43*ca9c54d2SDexuan Cui 	return mana_detach(ndev, true);
44*ca9c54d2SDexuan Cui }
45*ca9c54d2SDexuan Cui 
46*ca9c54d2SDexuan Cui static bool mana_can_tx(struct gdma_queue *wq)
47*ca9c54d2SDexuan Cui {
48*ca9c54d2SDexuan Cui 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
49*ca9c54d2SDexuan Cui }
50*ca9c54d2SDexuan Cui 
51*ca9c54d2SDexuan Cui static unsigned int mana_checksum_info(struct sk_buff *skb)
52*ca9c54d2SDexuan Cui {
53*ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP)) {
54*ca9c54d2SDexuan Cui 		struct iphdr *ip = ip_hdr(skb);
55*ca9c54d2SDexuan Cui 
56*ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_TCP)
57*ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
58*ca9c54d2SDexuan Cui 
59*ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_UDP)
60*ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
61*ca9c54d2SDexuan Cui 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
62*ca9c54d2SDexuan Cui 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
63*ca9c54d2SDexuan Cui 
64*ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_TCP)
65*ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
66*ca9c54d2SDexuan Cui 
67*ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_UDP)
68*ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
69*ca9c54d2SDexuan Cui 	}
70*ca9c54d2SDexuan Cui 
71*ca9c54d2SDexuan Cui 	/* No csum offloading */
72*ca9c54d2SDexuan Cui 	return 0;
73*ca9c54d2SDexuan Cui }
74*ca9c54d2SDexuan Cui 
75*ca9c54d2SDexuan Cui static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
76*ca9c54d2SDexuan Cui 			struct mana_tx_package *tp)
77*ca9c54d2SDexuan Cui {
78*ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
79*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
80*ca9c54d2SDexuan Cui 	struct gdma_context *gc;
81*ca9c54d2SDexuan Cui 	struct device *dev;
82*ca9c54d2SDexuan Cui 	skb_frag_t *frag;
83*ca9c54d2SDexuan Cui 	dma_addr_t da;
84*ca9c54d2SDexuan Cui 	int i;
85*ca9c54d2SDexuan Cui 
86*ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
87*ca9c54d2SDexuan Cui 	dev = gc->dev;
88*ca9c54d2SDexuan Cui 	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
89*ca9c54d2SDexuan Cui 
90*ca9c54d2SDexuan Cui 	if (dma_mapping_error(dev, da))
91*ca9c54d2SDexuan Cui 		return -ENOMEM;
92*ca9c54d2SDexuan Cui 
93*ca9c54d2SDexuan Cui 	ash->dma_handle[0] = da;
94*ca9c54d2SDexuan Cui 	ash->size[0] = skb_headlen(skb);
95*ca9c54d2SDexuan Cui 
96*ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
97*ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
98*ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].size = ash->size[0];
99*ca9c54d2SDexuan Cui 
100*ca9c54d2SDexuan Cui 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
101*ca9c54d2SDexuan Cui 		frag = &skb_shinfo(skb)->frags[i];
102*ca9c54d2SDexuan Cui 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
103*ca9c54d2SDexuan Cui 				      DMA_TO_DEVICE);
104*ca9c54d2SDexuan Cui 
105*ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da))
106*ca9c54d2SDexuan Cui 			goto frag_err;
107*ca9c54d2SDexuan Cui 
108*ca9c54d2SDexuan Cui 		ash->dma_handle[i + 1] = da;
109*ca9c54d2SDexuan Cui 		ash->size[i + 1] = skb_frag_size(frag);
110*ca9c54d2SDexuan Cui 
111*ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
112*ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
113*ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
114*ca9c54d2SDexuan Cui 	}
115*ca9c54d2SDexuan Cui 
116*ca9c54d2SDexuan Cui 	return 0;
117*ca9c54d2SDexuan Cui 
118*ca9c54d2SDexuan Cui frag_err:
119*ca9c54d2SDexuan Cui 	for (i = i - 1; i >= 0; i--)
120*ca9c54d2SDexuan Cui 		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
121*ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
122*ca9c54d2SDexuan Cui 
123*ca9c54d2SDexuan Cui 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
124*ca9c54d2SDexuan Cui 
125*ca9c54d2SDexuan Cui 	return -ENOMEM;
126*ca9c54d2SDexuan Cui }
127*ca9c54d2SDexuan Cui 
128*ca9c54d2SDexuan Cui static int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
129*ca9c54d2SDexuan Cui {
130*ca9c54d2SDexuan Cui 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
131*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
132*ca9c54d2SDexuan Cui 	u16 txq_idx = skb_get_queue_mapping(skb);
133*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
134*ca9c54d2SDexuan Cui 	bool ipv4 = false, ipv6 = false;
135*ca9c54d2SDexuan Cui 	struct mana_tx_package pkg = {};
136*ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
137*ca9c54d2SDexuan Cui 	struct mana_stats *tx_stats;
138*ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_sq;
139*ca9c54d2SDexuan Cui 	unsigned int csum_type;
140*ca9c54d2SDexuan Cui 	struct mana_txq *txq;
141*ca9c54d2SDexuan Cui 	struct mana_cq *cq;
142*ca9c54d2SDexuan Cui 	int err, len;
143*ca9c54d2SDexuan Cui 
144*ca9c54d2SDexuan Cui 	if (unlikely(!apc->port_is_up))
145*ca9c54d2SDexuan Cui 		goto tx_drop;
146*ca9c54d2SDexuan Cui 
147*ca9c54d2SDexuan Cui 	if (skb_cow_head(skb, MANA_HEADROOM))
148*ca9c54d2SDexuan Cui 		goto tx_drop_count;
149*ca9c54d2SDexuan Cui 
150*ca9c54d2SDexuan Cui 	txq = &apc->tx_qp[txq_idx].txq;
151*ca9c54d2SDexuan Cui 	gdma_sq = txq->gdma_sq;
152*ca9c54d2SDexuan Cui 	cq = &apc->tx_qp[txq_idx].tx_cq;
153*ca9c54d2SDexuan Cui 
154*ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
155*ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
156*ca9c54d2SDexuan Cui 
157*ca9c54d2SDexuan Cui 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
158*ca9c54d2SDexuan Cui 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
159*ca9c54d2SDexuan Cui 		pkt_fmt = MANA_LONG_PKT_FMT;
160*ca9c54d2SDexuan Cui 	} else {
161*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
162*ca9c54d2SDexuan Cui 	}
163*ca9c54d2SDexuan Cui 
164*ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
165*ca9c54d2SDexuan Cui 
166*ca9c54d2SDexuan Cui 	if (pkt_fmt == MANA_SHORT_PKT_FMT)
167*ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
168*ca9c54d2SDexuan Cui 	else
169*ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
170*ca9c54d2SDexuan Cui 
171*ca9c54d2SDexuan Cui 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
172*ca9c54d2SDexuan Cui 	pkg.wqe_req.flags = 0;
173*ca9c54d2SDexuan Cui 	pkg.wqe_req.client_data_unit = 0;
174*ca9c54d2SDexuan Cui 
175*ca9c54d2SDexuan Cui 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
176*ca9c54d2SDexuan Cui 	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
177*ca9c54d2SDexuan Cui 
178*ca9c54d2SDexuan Cui 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
179*ca9c54d2SDexuan Cui 		pkg.wqe_req.sgl = pkg.sgl_array;
180*ca9c54d2SDexuan Cui 	} else {
181*ca9c54d2SDexuan Cui 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
182*ca9c54d2SDexuan Cui 					    sizeof(struct gdma_sge),
183*ca9c54d2SDexuan Cui 					    GFP_ATOMIC);
184*ca9c54d2SDexuan Cui 		if (!pkg.sgl_ptr)
185*ca9c54d2SDexuan Cui 			goto tx_drop_count;
186*ca9c54d2SDexuan Cui 
187*ca9c54d2SDexuan Cui 		pkg.wqe_req.sgl = pkg.sgl_ptr;
188*ca9c54d2SDexuan Cui 	}
189*ca9c54d2SDexuan Cui 
190*ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP))
191*ca9c54d2SDexuan Cui 		ipv4 = true;
192*ca9c54d2SDexuan Cui 	else if (skb->protocol == htons(ETH_P_IPV6))
193*ca9c54d2SDexuan Cui 		ipv6 = true;
194*ca9c54d2SDexuan Cui 
195*ca9c54d2SDexuan Cui 	if (skb_is_gso(skb)) {
196*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
197*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
198*ca9c54d2SDexuan Cui 
199*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
200*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
201*ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
202*ca9c54d2SDexuan Cui 
203*ca9c54d2SDexuan Cui 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
204*ca9c54d2SDexuan Cui 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
205*ca9c54d2SDexuan Cui 		if (ipv4) {
206*ca9c54d2SDexuan Cui 			ip_hdr(skb)->tot_len = 0;
207*ca9c54d2SDexuan Cui 			ip_hdr(skb)->check = 0;
208*ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
209*ca9c54d2SDexuan Cui 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
210*ca9c54d2SDexuan Cui 						   ip_hdr(skb)->daddr, 0,
211*ca9c54d2SDexuan Cui 						   IPPROTO_TCP, 0);
212*ca9c54d2SDexuan Cui 		} else {
213*ca9c54d2SDexuan Cui 			ipv6_hdr(skb)->payload_len = 0;
214*ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
215*ca9c54d2SDexuan Cui 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
216*ca9c54d2SDexuan Cui 						 &ipv6_hdr(skb)->daddr, 0,
217*ca9c54d2SDexuan Cui 						 IPPROTO_TCP, 0);
218*ca9c54d2SDexuan Cui 		}
219*ca9c54d2SDexuan Cui 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
220*ca9c54d2SDexuan Cui 		csum_type = mana_checksum_info(skb);
221*ca9c54d2SDexuan Cui 
222*ca9c54d2SDexuan Cui 		if (csum_type == IPPROTO_TCP) {
223*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
224*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
225*ca9c54d2SDexuan Cui 
226*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
227*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
228*ca9c54d2SDexuan Cui 
229*ca9c54d2SDexuan Cui 		} else if (csum_type == IPPROTO_UDP) {
230*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
231*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
232*ca9c54d2SDexuan Cui 
233*ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
234*ca9c54d2SDexuan Cui 		} else {
235*ca9c54d2SDexuan Cui 			/* Can't do offload of this type of checksum */
236*ca9c54d2SDexuan Cui 			if (skb_checksum_help(skb))
237*ca9c54d2SDexuan Cui 				goto free_sgl_ptr;
238*ca9c54d2SDexuan Cui 		}
239*ca9c54d2SDexuan Cui 	}
240*ca9c54d2SDexuan Cui 
241*ca9c54d2SDexuan Cui 	if (mana_map_skb(skb, apc, &pkg))
242*ca9c54d2SDexuan Cui 		goto free_sgl_ptr;
243*ca9c54d2SDexuan Cui 
244*ca9c54d2SDexuan Cui 	skb_queue_tail(&txq->pending_skbs, skb);
245*ca9c54d2SDexuan Cui 
246*ca9c54d2SDexuan Cui 	len = skb->len;
247*ca9c54d2SDexuan Cui 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
248*ca9c54d2SDexuan Cui 
249*ca9c54d2SDexuan Cui 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
250*ca9c54d2SDexuan Cui 					(struct gdma_posted_wqe_info *)skb->cb);
251*ca9c54d2SDexuan Cui 	if (!mana_can_tx(gdma_sq)) {
252*ca9c54d2SDexuan Cui 		netif_tx_stop_queue(net_txq);
253*ca9c54d2SDexuan Cui 		apc->eth_stats.stop_queue++;
254*ca9c54d2SDexuan Cui 	}
255*ca9c54d2SDexuan Cui 
256*ca9c54d2SDexuan Cui 	if (err) {
257*ca9c54d2SDexuan Cui 		(void)skb_dequeue_tail(&txq->pending_skbs);
258*ca9c54d2SDexuan Cui 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
259*ca9c54d2SDexuan Cui 		err = NETDEV_TX_BUSY;
260*ca9c54d2SDexuan Cui 		goto tx_busy;
261*ca9c54d2SDexuan Cui 	}
262*ca9c54d2SDexuan Cui 
263*ca9c54d2SDexuan Cui 	err = NETDEV_TX_OK;
264*ca9c54d2SDexuan Cui 	atomic_inc(&txq->pending_sends);
265*ca9c54d2SDexuan Cui 
266*ca9c54d2SDexuan Cui 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
267*ca9c54d2SDexuan Cui 
268*ca9c54d2SDexuan Cui 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
269*ca9c54d2SDexuan Cui 	skb = NULL;
270*ca9c54d2SDexuan Cui 
271*ca9c54d2SDexuan Cui 	tx_stats = &txq->stats;
272*ca9c54d2SDexuan Cui 	u64_stats_update_begin(&tx_stats->syncp);
273*ca9c54d2SDexuan Cui 	tx_stats->packets++;
274*ca9c54d2SDexuan Cui 	tx_stats->bytes += len;
275*ca9c54d2SDexuan Cui 	u64_stats_update_end(&tx_stats->syncp);
276*ca9c54d2SDexuan Cui 
277*ca9c54d2SDexuan Cui tx_busy:
278*ca9c54d2SDexuan Cui 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
279*ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
280*ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
281*ca9c54d2SDexuan Cui 	}
282*ca9c54d2SDexuan Cui 
283*ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
284*ca9c54d2SDexuan Cui 	return err;
285*ca9c54d2SDexuan Cui 
286*ca9c54d2SDexuan Cui free_sgl_ptr:
287*ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
288*ca9c54d2SDexuan Cui tx_drop_count:
289*ca9c54d2SDexuan Cui 	ndev->stats.tx_dropped++;
290*ca9c54d2SDexuan Cui tx_drop:
291*ca9c54d2SDexuan Cui 	dev_kfree_skb_any(skb);
292*ca9c54d2SDexuan Cui 	return NETDEV_TX_OK;
293*ca9c54d2SDexuan Cui }
294*ca9c54d2SDexuan Cui 
295*ca9c54d2SDexuan Cui static void mana_get_stats64(struct net_device *ndev,
296*ca9c54d2SDexuan Cui 			     struct rtnl_link_stats64 *st)
297*ca9c54d2SDexuan Cui {
298*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
299*ca9c54d2SDexuan Cui 	unsigned int num_queues = apc->num_queues;
300*ca9c54d2SDexuan Cui 	struct mana_stats *stats;
301*ca9c54d2SDexuan Cui 	unsigned int start;
302*ca9c54d2SDexuan Cui 	u64 packets, bytes;
303*ca9c54d2SDexuan Cui 	int q;
304*ca9c54d2SDexuan Cui 
305*ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
306*ca9c54d2SDexuan Cui 		return;
307*ca9c54d2SDexuan Cui 
308*ca9c54d2SDexuan Cui 	netdev_stats_to_stats64(st, &ndev->stats);
309*ca9c54d2SDexuan Cui 
310*ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
311*ca9c54d2SDexuan Cui 		stats = &apc->rxqs[q]->stats;
312*ca9c54d2SDexuan Cui 
313*ca9c54d2SDexuan Cui 		do {
314*ca9c54d2SDexuan Cui 			start = u64_stats_fetch_begin_irq(&stats->syncp);
315*ca9c54d2SDexuan Cui 			packets = stats->packets;
316*ca9c54d2SDexuan Cui 			bytes = stats->bytes;
317*ca9c54d2SDexuan Cui 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
318*ca9c54d2SDexuan Cui 
319*ca9c54d2SDexuan Cui 		st->rx_packets += packets;
320*ca9c54d2SDexuan Cui 		st->rx_bytes += bytes;
321*ca9c54d2SDexuan Cui 	}
322*ca9c54d2SDexuan Cui 
323*ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
324*ca9c54d2SDexuan Cui 		stats = &apc->tx_qp[q].txq.stats;
325*ca9c54d2SDexuan Cui 
326*ca9c54d2SDexuan Cui 		do {
327*ca9c54d2SDexuan Cui 			start = u64_stats_fetch_begin_irq(&stats->syncp);
328*ca9c54d2SDexuan Cui 			packets = stats->packets;
329*ca9c54d2SDexuan Cui 			bytes = stats->bytes;
330*ca9c54d2SDexuan Cui 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
331*ca9c54d2SDexuan Cui 
332*ca9c54d2SDexuan Cui 		st->tx_packets += packets;
333*ca9c54d2SDexuan Cui 		st->tx_bytes += bytes;
334*ca9c54d2SDexuan Cui 	}
335*ca9c54d2SDexuan Cui }
336*ca9c54d2SDexuan Cui 
337*ca9c54d2SDexuan Cui static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
338*ca9c54d2SDexuan Cui 			     int old_q)
339*ca9c54d2SDexuan Cui {
340*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
341*ca9c54d2SDexuan Cui 	u32 hash = skb_get_hash(skb);
342*ca9c54d2SDexuan Cui 	struct sock *sk = skb->sk;
343*ca9c54d2SDexuan Cui 	int txq;
344*ca9c54d2SDexuan Cui 
345*ca9c54d2SDexuan Cui 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
346*ca9c54d2SDexuan Cui 
347*ca9c54d2SDexuan Cui 	if (txq != old_q && sk && sk_fullsock(sk) &&
348*ca9c54d2SDexuan Cui 	    rcu_access_pointer(sk->sk_dst_cache))
349*ca9c54d2SDexuan Cui 		sk_tx_queue_set(sk, txq);
350*ca9c54d2SDexuan Cui 
351*ca9c54d2SDexuan Cui 	return txq;
352*ca9c54d2SDexuan Cui }
353*ca9c54d2SDexuan Cui 
354*ca9c54d2SDexuan Cui static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
355*ca9c54d2SDexuan Cui 			     struct net_device *sb_dev)
356*ca9c54d2SDexuan Cui {
357*ca9c54d2SDexuan Cui 	int txq;
358*ca9c54d2SDexuan Cui 
359*ca9c54d2SDexuan Cui 	if (ndev->real_num_tx_queues == 1)
360*ca9c54d2SDexuan Cui 		return 0;
361*ca9c54d2SDexuan Cui 
362*ca9c54d2SDexuan Cui 	txq = sk_tx_queue_get(skb->sk);
363*ca9c54d2SDexuan Cui 
364*ca9c54d2SDexuan Cui 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
365*ca9c54d2SDexuan Cui 		if (skb_rx_queue_recorded(skb))
366*ca9c54d2SDexuan Cui 			txq = skb_get_rx_queue(skb);
367*ca9c54d2SDexuan Cui 		else
368*ca9c54d2SDexuan Cui 			txq = mana_get_tx_queue(ndev, skb, txq);
369*ca9c54d2SDexuan Cui 	}
370*ca9c54d2SDexuan Cui 
371*ca9c54d2SDexuan Cui 	return txq;
372*ca9c54d2SDexuan Cui }
373*ca9c54d2SDexuan Cui 
374*ca9c54d2SDexuan Cui static const struct net_device_ops mana_devops = {
375*ca9c54d2SDexuan Cui 	.ndo_open		= mana_open,
376*ca9c54d2SDexuan Cui 	.ndo_stop		= mana_close,
377*ca9c54d2SDexuan Cui 	.ndo_select_queue	= mana_select_queue,
378*ca9c54d2SDexuan Cui 	.ndo_start_xmit		= mana_start_xmit,
379*ca9c54d2SDexuan Cui 	.ndo_validate_addr	= eth_validate_addr,
380*ca9c54d2SDexuan Cui 	.ndo_get_stats64	= mana_get_stats64,
381*ca9c54d2SDexuan Cui };
382*ca9c54d2SDexuan Cui 
383*ca9c54d2SDexuan Cui static void mana_cleanup_port_context(struct mana_port_context *apc)
384*ca9c54d2SDexuan Cui {
385*ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
386*ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
387*ca9c54d2SDexuan Cui }
388*ca9c54d2SDexuan Cui 
389*ca9c54d2SDexuan Cui static int mana_init_port_context(struct mana_port_context *apc)
390*ca9c54d2SDexuan Cui {
391*ca9c54d2SDexuan Cui 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
392*ca9c54d2SDexuan Cui 			    GFP_KERNEL);
393*ca9c54d2SDexuan Cui 
394*ca9c54d2SDexuan Cui 	return !apc->rxqs ? -ENOMEM : 0;
395*ca9c54d2SDexuan Cui }
396*ca9c54d2SDexuan Cui 
397*ca9c54d2SDexuan Cui static int mana_send_request(struct mana_context *ac, void *in_buf,
398*ca9c54d2SDexuan Cui 			     u32 in_len, void *out_buf, u32 out_len)
399*ca9c54d2SDexuan Cui {
400*ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
401*ca9c54d2SDexuan Cui 	struct gdma_resp_hdr *resp = out_buf;
402*ca9c54d2SDexuan Cui 	struct gdma_req_hdr *req = in_buf;
403*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
404*ca9c54d2SDexuan Cui 	static atomic_t activity_id;
405*ca9c54d2SDexuan Cui 	int err;
406*ca9c54d2SDexuan Cui 
407*ca9c54d2SDexuan Cui 	req->dev_id = gc->mana.dev_id;
408*ca9c54d2SDexuan Cui 	req->activity_id = atomic_inc_return(&activity_id);
409*ca9c54d2SDexuan Cui 
410*ca9c54d2SDexuan Cui 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
411*ca9c54d2SDexuan Cui 				   out_buf);
412*ca9c54d2SDexuan Cui 	if (err || resp->status) {
413*ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
414*ca9c54d2SDexuan Cui 			err, resp->status);
415*ca9c54d2SDexuan Cui 		return err ? err : -EPROTO;
416*ca9c54d2SDexuan Cui 	}
417*ca9c54d2SDexuan Cui 
418*ca9c54d2SDexuan Cui 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
419*ca9c54d2SDexuan Cui 	    req->activity_id != resp->activity_id) {
420*ca9c54d2SDexuan Cui 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
421*ca9c54d2SDexuan Cui 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
422*ca9c54d2SDexuan Cui 			req->activity_id, resp->activity_id);
423*ca9c54d2SDexuan Cui 		return -EPROTO;
424*ca9c54d2SDexuan Cui 	}
425*ca9c54d2SDexuan Cui 
426*ca9c54d2SDexuan Cui 	return 0;
427*ca9c54d2SDexuan Cui }
428*ca9c54d2SDexuan Cui 
429*ca9c54d2SDexuan Cui static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
430*ca9c54d2SDexuan Cui 				const enum mana_command_code expected_code,
431*ca9c54d2SDexuan Cui 				const u32 min_size)
432*ca9c54d2SDexuan Cui {
433*ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_type != expected_code)
434*ca9c54d2SDexuan Cui 		return -EPROTO;
435*ca9c54d2SDexuan Cui 
436*ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
437*ca9c54d2SDexuan Cui 		return -EPROTO;
438*ca9c54d2SDexuan Cui 
439*ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_size < min_size)
440*ca9c54d2SDexuan Cui 		return -EPROTO;
441*ca9c54d2SDexuan Cui 
442*ca9c54d2SDexuan Cui 	return 0;
443*ca9c54d2SDexuan Cui }
444*ca9c54d2SDexuan Cui 
445*ca9c54d2SDexuan Cui static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
446*ca9c54d2SDexuan Cui 				 u32 proto_minor_ver, u32 proto_micro_ver,
447*ca9c54d2SDexuan Cui 				 u16 *max_num_vports)
448*ca9c54d2SDexuan Cui {
449*ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
450*ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_resp resp = {};
451*ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_req req = {};
452*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
453*ca9c54d2SDexuan Cui 	int err = 0;
454*ca9c54d2SDexuan Cui 
455*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
456*ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
457*ca9c54d2SDexuan Cui 	req.proto_major_ver = proto_major_ver;
458*ca9c54d2SDexuan Cui 	req.proto_minor_ver = proto_minor_ver;
459*ca9c54d2SDexuan Cui 	req.proto_micro_ver = proto_micro_ver;
460*ca9c54d2SDexuan Cui 
461*ca9c54d2SDexuan Cui 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
462*ca9c54d2SDexuan Cui 	if (err) {
463*ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to query config: %d", err);
464*ca9c54d2SDexuan Cui 		return err;
465*ca9c54d2SDexuan Cui 	}
466*ca9c54d2SDexuan Cui 
467*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
468*ca9c54d2SDexuan Cui 				   sizeof(resp));
469*ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
470*ca9c54d2SDexuan Cui 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
471*ca9c54d2SDexuan Cui 			resp.hdr.status);
472*ca9c54d2SDexuan Cui 		if (!err)
473*ca9c54d2SDexuan Cui 			err = -EPROTO;
474*ca9c54d2SDexuan Cui 		return err;
475*ca9c54d2SDexuan Cui 	}
476*ca9c54d2SDexuan Cui 
477*ca9c54d2SDexuan Cui 	*max_num_vports = resp.max_num_vports;
478*ca9c54d2SDexuan Cui 
479*ca9c54d2SDexuan Cui 	return 0;
480*ca9c54d2SDexuan Cui }
481*ca9c54d2SDexuan Cui 
482*ca9c54d2SDexuan Cui static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
483*ca9c54d2SDexuan Cui 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
484*ca9c54d2SDexuan Cui {
485*ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_resp resp = {};
486*ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_req req = {};
487*ca9c54d2SDexuan Cui 	int err;
488*ca9c54d2SDexuan Cui 
489*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
490*ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
491*ca9c54d2SDexuan Cui 
492*ca9c54d2SDexuan Cui 	req.vport_index = vport_index;
493*ca9c54d2SDexuan Cui 
494*ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
495*ca9c54d2SDexuan Cui 				sizeof(resp));
496*ca9c54d2SDexuan Cui 	if (err)
497*ca9c54d2SDexuan Cui 		return err;
498*ca9c54d2SDexuan Cui 
499*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
500*ca9c54d2SDexuan Cui 				   sizeof(resp));
501*ca9c54d2SDexuan Cui 	if (err)
502*ca9c54d2SDexuan Cui 		return err;
503*ca9c54d2SDexuan Cui 
504*ca9c54d2SDexuan Cui 	if (resp.hdr.status)
505*ca9c54d2SDexuan Cui 		return -EPROTO;
506*ca9c54d2SDexuan Cui 
507*ca9c54d2SDexuan Cui 	*max_sq = resp.max_num_sq;
508*ca9c54d2SDexuan Cui 	*max_rq = resp.max_num_rq;
509*ca9c54d2SDexuan Cui 	*num_indir_entry = resp.num_indirection_ent;
510*ca9c54d2SDexuan Cui 
511*ca9c54d2SDexuan Cui 	apc->port_handle = resp.vport;
512*ca9c54d2SDexuan Cui 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
513*ca9c54d2SDexuan Cui 
514*ca9c54d2SDexuan Cui 	return 0;
515*ca9c54d2SDexuan Cui }
516*ca9c54d2SDexuan Cui 
517*ca9c54d2SDexuan Cui static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
518*ca9c54d2SDexuan Cui 			  u32 doorbell_pg_id)
519*ca9c54d2SDexuan Cui {
520*ca9c54d2SDexuan Cui 	struct mana_config_vport_resp resp = {};
521*ca9c54d2SDexuan Cui 	struct mana_config_vport_req req = {};
522*ca9c54d2SDexuan Cui 	int err;
523*ca9c54d2SDexuan Cui 
524*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
525*ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
526*ca9c54d2SDexuan Cui 	req.vport = apc->port_handle;
527*ca9c54d2SDexuan Cui 	req.pdid = protection_dom_id;
528*ca9c54d2SDexuan Cui 	req.doorbell_pageid = doorbell_pg_id;
529*ca9c54d2SDexuan Cui 
530*ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
531*ca9c54d2SDexuan Cui 				sizeof(resp));
532*ca9c54d2SDexuan Cui 	if (err) {
533*ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
534*ca9c54d2SDexuan Cui 		goto out;
535*ca9c54d2SDexuan Cui 	}
536*ca9c54d2SDexuan Cui 
537*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
538*ca9c54d2SDexuan Cui 				   sizeof(resp));
539*ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
540*ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
541*ca9c54d2SDexuan Cui 			   err, resp.hdr.status);
542*ca9c54d2SDexuan Cui 		if (!err)
543*ca9c54d2SDexuan Cui 			err = -EPROTO;
544*ca9c54d2SDexuan Cui 
545*ca9c54d2SDexuan Cui 		goto out;
546*ca9c54d2SDexuan Cui 	}
547*ca9c54d2SDexuan Cui 
548*ca9c54d2SDexuan Cui 	apc->tx_shortform_allowed = resp.short_form_allowed;
549*ca9c54d2SDexuan Cui 	apc->tx_vp_offset = resp.tx_vport_offset;
550*ca9c54d2SDexuan Cui out:
551*ca9c54d2SDexuan Cui 	return err;
552*ca9c54d2SDexuan Cui }
553*ca9c54d2SDexuan Cui 
554*ca9c54d2SDexuan Cui static int mana_cfg_vport_steering(struct mana_port_context *apc,
555*ca9c54d2SDexuan Cui 				   enum TRI_STATE rx,
556*ca9c54d2SDexuan Cui 				   bool update_default_rxobj, bool update_key,
557*ca9c54d2SDexuan Cui 				   bool update_tab)
558*ca9c54d2SDexuan Cui {
559*ca9c54d2SDexuan Cui 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
560*ca9c54d2SDexuan Cui 	struct mana_cfg_rx_steer_req *req = NULL;
561*ca9c54d2SDexuan Cui 	struct mana_cfg_rx_steer_resp resp = {};
562*ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
563*ca9c54d2SDexuan Cui 	mana_handle_t *req_indir_tab;
564*ca9c54d2SDexuan Cui 	u32 req_buf_size;
565*ca9c54d2SDexuan Cui 	int err;
566*ca9c54d2SDexuan Cui 
567*ca9c54d2SDexuan Cui 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
568*ca9c54d2SDexuan Cui 	req = kzalloc(req_buf_size, GFP_KERNEL);
569*ca9c54d2SDexuan Cui 	if (!req)
570*ca9c54d2SDexuan Cui 		return -ENOMEM;
571*ca9c54d2SDexuan Cui 
572*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
573*ca9c54d2SDexuan Cui 			     sizeof(resp));
574*ca9c54d2SDexuan Cui 
575*ca9c54d2SDexuan Cui 	req->vport = apc->port_handle;
576*ca9c54d2SDexuan Cui 	req->num_indir_entries = num_entries;
577*ca9c54d2SDexuan Cui 	req->indir_tab_offset = sizeof(*req);
578*ca9c54d2SDexuan Cui 	req->rx_enable = rx;
579*ca9c54d2SDexuan Cui 	req->rss_enable = apc->rss_state;
580*ca9c54d2SDexuan Cui 	req->update_default_rxobj = update_default_rxobj;
581*ca9c54d2SDexuan Cui 	req->update_hashkey = update_key;
582*ca9c54d2SDexuan Cui 	req->update_indir_tab = update_tab;
583*ca9c54d2SDexuan Cui 	req->default_rxobj = apc->default_rxobj;
584*ca9c54d2SDexuan Cui 
585*ca9c54d2SDexuan Cui 	if (update_key)
586*ca9c54d2SDexuan Cui 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
587*ca9c54d2SDexuan Cui 
588*ca9c54d2SDexuan Cui 	if (update_tab) {
589*ca9c54d2SDexuan Cui 		req_indir_tab = (mana_handle_t *)(req + 1);
590*ca9c54d2SDexuan Cui 		memcpy(req_indir_tab, apc->rxobj_table,
591*ca9c54d2SDexuan Cui 		       req->num_indir_entries * sizeof(mana_handle_t));
592*ca9c54d2SDexuan Cui 	}
593*ca9c54d2SDexuan Cui 
594*ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
595*ca9c54d2SDexuan Cui 				sizeof(resp));
596*ca9c54d2SDexuan Cui 	if (err) {
597*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
598*ca9c54d2SDexuan Cui 		goto out;
599*ca9c54d2SDexuan Cui 	}
600*ca9c54d2SDexuan Cui 
601*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
602*ca9c54d2SDexuan Cui 				   sizeof(resp));
603*ca9c54d2SDexuan Cui 	if (err) {
604*ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
605*ca9c54d2SDexuan Cui 		goto out;
606*ca9c54d2SDexuan Cui 	}
607*ca9c54d2SDexuan Cui 
608*ca9c54d2SDexuan Cui 	if (resp.hdr.status) {
609*ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
610*ca9c54d2SDexuan Cui 			   resp.hdr.status);
611*ca9c54d2SDexuan Cui 		err = -EPROTO;
612*ca9c54d2SDexuan Cui 	}
613*ca9c54d2SDexuan Cui out:
614*ca9c54d2SDexuan Cui 	kfree(req);
615*ca9c54d2SDexuan Cui 	return err;
616*ca9c54d2SDexuan Cui }
617*ca9c54d2SDexuan Cui 
618*ca9c54d2SDexuan Cui static int mana_create_wq_obj(struct mana_port_context *apc,
619*ca9c54d2SDexuan Cui 			      mana_handle_t vport,
620*ca9c54d2SDexuan Cui 			      u32 wq_type, struct mana_obj_spec *wq_spec,
621*ca9c54d2SDexuan Cui 			      struct mana_obj_spec *cq_spec,
622*ca9c54d2SDexuan Cui 			      mana_handle_t *wq_obj)
623*ca9c54d2SDexuan Cui {
624*ca9c54d2SDexuan Cui 	struct mana_create_wqobj_resp resp = {};
625*ca9c54d2SDexuan Cui 	struct mana_create_wqobj_req req = {};
626*ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
627*ca9c54d2SDexuan Cui 	int err;
628*ca9c54d2SDexuan Cui 
629*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
630*ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
631*ca9c54d2SDexuan Cui 	req.vport = vport;
632*ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
633*ca9c54d2SDexuan Cui 	req.wq_gdma_region = wq_spec->gdma_region;
634*ca9c54d2SDexuan Cui 	req.cq_gdma_region = cq_spec->gdma_region;
635*ca9c54d2SDexuan Cui 	req.wq_size = wq_spec->queue_size;
636*ca9c54d2SDexuan Cui 	req.cq_size = cq_spec->queue_size;
637*ca9c54d2SDexuan Cui 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
638*ca9c54d2SDexuan Cui 	req.cq_parent_qid = cq_spec->attached_eq;
639*ca9c54d2SDexuan Cui 
640*ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
641*ca9c54d2SDexuan Cui 				sizeof(resp));
642*ca9c54d2SDexuan Cui 	if (err) {
643*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
644*ca9c54d2SDexuan Cui 		goto out;
645*ca9c54d2SDexuan Cui 	}
646*ca9c54d2SDexuan Cui 
647*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
648*ca9c54d2SDexuan Cui 				   sizeof(resp));
649*ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
650*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
651*ca9c54d2SDexuan Cui 			   resp.hdr.status);
652*ca9c54d2SDexuan Cui 		if (!err)
653*ca9c54d2SDexuan Cui 			err = -EPROTO;
654*ca9c54d2SDexuan Cui 		goto out;
655*ca9c54d2SDexuan Cui 	}
656*ca9c54d2SDexuan Cui 
657*ca9c54d2SDexuan Cui 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
658*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Got an invalid WQ object handle\n");
659*ca9c54d2SDexuan Cui 		err = -EPROTO;
660*ca9c54d2SDexuan Cui 		goto out;
661*ca9c54d2SDexuan Cui 	}
662*ca9c54d2SDexuan Cui 
663*ca9c54d2SDexuan Cui 	*wq_obj = resp.wq_obj;
664*ca9c54d2SDexuan Cui 	wq_spec->queue_index = resp.wq_id;
665*ca9c54d2SDexuan Cui 	cq_spec->queue_index = resp.cq_id;
666*ca9c54d2SDexuan Cui 
667*ca9c54d2SDexuan Cui 	return 0;
668*ca9c54d2SDexuan Cui out:
669*ca9c54d2SDexuan Cui 	return err;
670*ca9c54d2SDexuan Cui }
671*ca9c54d2SDexuan Cui 
672*ca9c54d2SDexuan Cui static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
673*ca9c54d2SDexuan Cui 				mana_handle_t wq_obj)
674*ca9c54d2SDexuan Cui {
675*ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_resp resp = {};
676*ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_req req = {};
677*ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
678*ca9c54d2SDexuan Cui 	int err;
679*ca9c54d2SDexuan Cui 
680*ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
681*ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
682*ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
683*ca9c54d2SDexuan Cui 	req.wq_obj_handle = wq_obj;
684*ca9c54d2SDexuan Cui 
685*ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
686*ca9c54d2SDexuan Cui 				sizeof(resp));
687*ca9c54d2SDexuan Cui 	if (err) {
688*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
689*ca9c54d2SDexuan Cui 		return;
690*ca9c54d2SDexuan Cui 	}
691*ca9c54d2SDexuan Cui 
692*ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
693*ca9c54d2SDexuan Cui 				   sizeof(resp));
694*ca9c54d2SDexuan Cui 	if (err || resp.hdr.status)
695*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
696*ca9c54d2SDexuan Cui 			   resp.hdr.status);
697*ca9c54d2SDexuan Cui }
698*ca9c54d2SDexuan Cui 
699*ca9c54d2SDexuan Cui static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf)
700*ca9c54d2SDexuan Cui {
701*ca9c54d2SDexuan Cui 	int i;
702*ca9c54d2SDexuan Cui 
703*ca9c54d2SDexuan Cui 	for (i = 0; i < CQE_POLLING_BUFFER; i++)
704*ca9c54d2SDexuan Cui 		memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp));
705*ca9c54d2SDexuan Cui }
706*ca9c54d2SDexuan Cui 
707*ca9c54d2SDexuan Cui static void mana_destroy_eq(struct gdma_context *gc,
708*ca9c54d2SDexuan Cui 			    struct mana_port_context *apc)
709*ca9c54d2SDexuan Cui {
710*ca9c54d2SDexuan Cui 	struct gdma_queue *eq;
711*ca9c54d2SDexuan Cui 	int i;
712*ca9c54d2SDexuan Cui 
713*ca9c54d2SDexuan Cui 	if (!apc->eqs)
714*ca9c54d2SDexuan Cui 		return;
715*ca9c54d2SDexuan Cui 
716*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
717*ca9c54d2SDexuan Cui 		eq = apc->eqs[i].eq;
718*ca9c54d2SDexuan Cui 		if (!eq)
719*ca9c54d2SDexuan Cui 			continue;
720*ca9c54d2SDexuan Cui 
721*ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, eq);
722*ca9c54d2SDexuan Cui 	}
723*ca9c54d2SDexuan Cui 
724*ca9c54d2SDexuan Cui 	kfree(apc->eqs);
725*ca9c54d2SDexuan Cui 	apc->eqs = NULL;
726*ca9c54d2SDexuan Cui }
727*ca9c54d2SDexuan Cui 
728*ca9c54d2SDexuan Cui static int mana_create_eq(struct mana_port_context *apc)
729*ca9c54d2SDexuan Cui {
730*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
731*ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec = {};
732*ca9c54d2SDexuan Cui 	int err;
733*ca9c54d2SDexuan Cui 	int i;
734*ca9c54d2SDexuan Cui 
735*ca9c54d2SDexuan Cui 	apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq),
736*ca9c54d2SDexuan Cui 			   GFP_KERNEL);
737*ca9c54d2SDexuan Cui 	if (!apc->eqs)
738*ca9c54d2SDexuan Cui 		return -ENOMEM;
739*ca9c54d2SDexuan Cui 
740*ca9c54d2SDexuan Cui 	spec.type = GDMA_EQ;
741*ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
742*ca9c54d2SDexuan Cui 	spec.queue_size = EQ_SIZE;
743*ca9c54d2SDexuan Cui 	spec.eq.callback = NULL;
744*ca9c54d2SDexuan Cui 	spec.eq.context = apc->eqs;
745*ca9c54d2SDexuan Cui 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
746*ca9c54d2SDexuan Cui 	spec.eq.ndev = apc->ndev;
747*ca9c54d2SDexuan Cui 
748*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
749*ca9c54d2SDexuan Cui 		mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll);
750*ca9c54d2SDexuan Cui 
751*ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq);
752*ca9c54d2SDexuan Cui 		if (err)
753*ca9c54d2SDexuan Cui 			goto out;
754*ca9c54d2SDexuan Cui 	}
755*ca9c54d2SDexuan Cui 
756*ca9c54d2SDexuan Cui 	return 0;
757*ca9c54d2SDexuan Cui out:
758*ca9c54d2SDexuan Cui 	mana_destroy_eq(gd->gdma_context, apc);
759*ca9c54d2SDexuan Cui 	return err;
760*ca9c54d2SDexuan Cui }
761*ca9c54d2SDexuan Cui 
762*ca9c54d2SDexuan Cui static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
763*ca9c54d2SDexuan Cui {
764*ca9c54d2SDexuan Cui 	u32 used_space_old;
765*ca9c54d2SDexuan Cui 	u32 used_space_new;
766*ca9c54d2SDexuan Cui 
767*ca9c54d2SDexuan Cui 	used_space_old = wq->head - wq->tail;
768*ca9c54d2SDexuan Cui 	used_space_new = wq->head - (wq->tail + num_units);
769*ca9c54d2SDexuan Cui 
770*ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(used_space_new > used_space_old))
771*ca9c54d2SDexuan Cui 		return -ERANGE;
772*ca9c54d2SDexuan Cui 
773*ca9c54d2SDexuan Cui 	wq->tail += num_units;
774*ca9c54d2SDexuan Cui 	return 0;
775*ca9c54d2SDexuan Cui }
776*ca9c54d2SDexuan Cui 
777*ca9c54d2SDexuan Cui static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
778*ca9c54d2SDexuan Cui {
779*ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
780*ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
781*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
782*ca9c54d2SDexuan Cui 	int i;
783*ca9c54d2SDexuan Cui 
784*ca9c54d2SDexuan Cui 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
785*ca9c54d2SDexuan Cui 
786*ca9c54d2SDexuan Cui 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
787*ca9c54d2SDexuan Cui 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
788*ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
789*ca9c54d2SDexuan Cui }
790*ca9c54d2SDexuan Cui 
791*ca9c54d2SDexuan Cui static void mana_poll_tx_cq(struct mana_cq *cq)
792*ca9c54d2SDexuan Cui {
793*ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent;
794*ca9c54d2SDexuan Cui 	struct gdma_comp *completions = cq->gdma_comp_buf;
795*ca9c54d2SDexuan Cui 	struct gdma_posted_wqe_info *wqe_info;
796*ca9c54d2SDexuan Cui 	unsigned int pkt_transmitted = 0;
797*ca9c54d2SDexuan Cui 	unsigned int wqe_unit_cnt = 0;
798*ca9c54d2SDexuan Cui 	struct mana_txq *txq = cq->txq;
799*ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
800*ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
801*ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_wq;
802*ca9c54d2SDexuan Cui 	unsigned int avail_space;
803*ca9c54d2SDexuan Cui 	struct net_device *ndev;
804*ca9c54d2SDexuan Cui 	struct sk_buff *skb;
805*ca9c54d2SDexuan Cui 	bool txq_stopped;
806*ca9c54d2SDexuan Cui 	int comp_read;
807*ca9c54d2SDexuan Cui 	int i;
808*ca9c54d2SDexuan Cui 
809*ca9c54d2SDexuan Cui 	ndev = txq->ndev;
810*ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
811*ca9c54d2SDexuan Cui 
812*ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
813*ca9c54d2SDexuan Cui 				    CQE_POLLING_BUFFER);
814*ca9c54d2SDexuan Cui 
815*ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
816*ca9c54d2SDexuan Cui 		struct mana_tx_comp_oob *cqe_oob;
817*ca9c54d2SDexuan Cui 
818*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!completions[i].is_sq))
819*ca9c54d2SDexuan Cui 			return;
820*ca9c54d2SDexuan Cui 
821*ca9c54d2SDexuan Cui 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
822*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
823*ca9c54d2SDexuan Cui 				 MANA_CQE_COMPLETION))
824*ca9c54d2SDexuan Cui 			return;
825*ca9c54d2SDexuan Cui 
826*ca9c54d2SDexuan Cui 		switch (cqe_oob->cqe_hdr.cqe_type) {
827*ca9c54d2SDexuan Cui 		case CQE_TX_OKAY:
828*ca9c54d2SDexuan Cui 			break;
829*ca9c54d2SDexuan Cui 
830*ca9c54d2SDexuan Cui 		case CQE_TX_SA_DROP:
831*ca9c54d2SDexuan Cui 		case CQE_TX_MTU_DROP:
832*ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_OOB:
833*ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_ETH_TYPE:
834*ca9c54d2SDexuan Cui 		case CQE_TX_HDR_PROCESSING_ERROR:
835*ca9c54d2SDexuan Cui 		case CQE_TX_VF_DISABLED:
836*ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
837*ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_DISABLED:
838*ca9c54d2SDexuan Cui 		case CQE_TX_VLAN_TAGGING_VIOLATION:
839*ca9c54d2SDexuan Cui 			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
840*ca9c54d2SDexuan Cui 				  cqe_oob->cqe_hdr.cqe_type);
841*ca9c54d2SDexuan Cui 			break;
842*ca9c54d2SDexuan Cui 
843*ca9c54d2SDexuan Cui 		default:
844*ca9c54d2SDexuan Cui 			/* If the CQE type is unexpected, log an error, assert,
845*ca9c54d2SDexuan Cui 			 * and go through the error path.
846*ca9c54d2SDexuan Cui 			 */
847*ca9c54d2SDexuan Cui 			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
848*ca9c54d2SDexuan Cui 				  cqe_oob->cqe_hdr.cqe_type);
849*ca9c54d2SDexuan Cui 			return;
850*ca9c54d2SDexuan Cui 		}
851*ca9c54d2SDexuan Cui 
852*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
853*ca9c54d2SDexuan Cui 			return;
854*ca9c54d2SDexuan Cui 
855*ca9c54d2SDexuan Cui 		skb = skb_dequeue(&txq->pending_skbs);
856*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!skb))
857*ca9c54d2SDexuan Cui 			return;
858*ca9c54d2SDexuan Cui 
859*ca9c54d2SDexuan Cui 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
860*ca9c54d2SDexuan Cui 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
861*ca9c54d2SDexuan Cui 
862*ca9c54d2SDexuan Cui 		mana_unmap_skb(skb, apc);
863*ca9c54d2SDexuan Cui 
864*ca9c54d2SDexuan Cui 		napi_consume_skb(skb, gdma_eq->eq.budget);
865*ca9c54d2SDexuan Cui 
866*ca9c54d2SDexuan Cui 		pkt_transmitted++;
867*ca9c54d2SDexuan Cui 	}
868*ca9c54d2SDexuan Cui 
869*ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
870*ca9c54d2SDexuan Cui 		return;
871*ca9c54d2SDexuan Cui 
872*ca9c54d2SDexuan Cui 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
873*ca9c54d2SDexuan Cui 
874*ca9c54d2SDexuan Cui 	gdma_wq = txq->gdma_sq;
875*ca9c54d2SDexuan Cui 	avail_space = mana_gd_wq_avail_space(gdma_wq);
876*ca9c54d2SDexuan Cui 
877*ca9c54d2SDexuan Cui 	/* Ensure tail updated before checking q stop */
878*ca9c54d2SDexuan Cui 	smp_mb();
879*ca9c54d2SDexuan Cui 
880*ca9c54d2SDexuan Cui 	net_txq = txq->net_txq;
881*ca9c54d2SDexuan Cui 	txq_stopped = netif_tx_queue_stopped(net_txq);
882*ca9c54d2SDexuan Cui 
883*ca9c54d2SDexuan Cui 	/* Ensure checking txq_stopped before apc->port_is_up. */
884*ca9c54d2SDexuan Cui 	smp_rmb();
885*ca9c54d2SDexuan Cui 
886*ca9c54d2SDexuan Cui 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
887*ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
888*ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
889*ca9c54d2SDexuan Cui 	}
890*ca9c54d2SDexuan Cui 
891*ca9c54d2SDexuan Cui 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
892*ca9c54d2SDexuan Cui 		WARN_ON_ONCE(1);
893*ca9c54d2SDexuan Cui }
894*ca9c54d2SDexuan Cui 
895*ca9c54d2SDexuan Cui static void mana_post_pkt_rxq(struct mana_rxq *rxq)
896*ca9c54d2SDexuan Cui {
897*ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *recv_buf_oob;
898*ca9c54d2SDexuan Cui 	u32 curr_index;
899*ca9c54d2SDexuan Cui 	int err;
900*ca9c54d2SDexuan Cui 
901*ca9c54d2SDexuan Cui 	curr_index = rxq->buf_index++;
902*ca9c54d2SDexuan Cui 	if (rxq->buf_index == rxq->num_rx_buf)
903*ca9c54d2SDexuan Cui 		rxq->buf_index = 0;
904*ca9c54d2SDexuan Cui 
905*ca9c54d2SDexuan Cui 	recv_buf_oob = &rxq->rx_oobs[curr_index];
906*ca9c54d2SDexuan Cui 
907*ca9c54d2SDexuan Cui 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
908*ca9c54d2SDexuan Cui 				    &recv_buf_oob->wqe_inf);
909*ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(err))
910*ca9c54d2SDexuan Cui 		return;
911*ca9c54d2SDexuan Cui 
912*ca9c54d2SDexuan Cui 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
913*ca9c54d2SDexuan Cui }
914*ca9c54d2SDexuan Cui 
915*ca9c54d2SDexuan Cui static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
916*ca9c54d2SDexuan Cui 			struct mana_rxq *rxq)
917*ca9c54d2SDexuan Cui {
918*ca9c54d2SDexuan Cui 	struct mana_stats *rx_stats = &rxq->stats;
919*ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
920*ca9c54d2SDexuan Cui 	uint pkt_len = cqe->ppi[0].pkt_len;
921*ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
922*ca9c54d2SDexuan Cui 	u16 rxq_idx = rxq->rxq_idx;
923*ca9c54d2SDexuan Cui 	struct napi_struct *napi;
924*ca9c54d2SDexuan Cui 	struct gdma_queue *eq;
925*ca9c54d2SDexuan Cui 	struct sk_buff *skb;
926*ca9c54d2SDexuan Cui 	u32 hash_value;
927*ca9c54d2SDexuan Cui 
928*ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
929*ca9c54d2SDexuan Cui 	eq = apc->eqs[rxq_idx].eq;
930*ca9c54d2SDexuan Cui 	eq->eq.work_done++;
931*ca9c54d2SDexuan Cui 	napi = &eq->eq.napi;
932*ca9c54d2SDexuan Cui 
933*ca9c54d2SDexuan Cui 	if (!buf_va) {
934*ca9c54d2SDexuan Cui 		++ndev->stats.rx_dropped;
935*ca9c54d2SDexuan Cui 		return;
936*ca9c54d2SDexuan Cui 	}
937*ca9c54d2SDexuan Cui 
938*ca9c54d2SDexuan Cui 	skb = build_skb(buf_va, PAGE_SIZE);
939*ca9c54d2SDexuan Cui 
940*ca9c54d2SDexuan Cui 	if (!skb) {
941*ca9c54d2SDexuan Cui 		free_page((unsigned long)buf_va);
942*ca9c54d2SDexuan Cui 		++ndev->stats.rx_dropped;
943*ca9c54d2SDexuan Cui 		return;
944*ca9c54d2SDexuan Cui 	}
945*ca9c54d2SDexuan Cui 
946*ca9c54d2SDexuan Cui 	skb_put(skb, pkt_len);
947*ca9c54d2SDexuan Cui 	skb->dev = napi->dev;
948*ca9c54d2SDexuan Cui 
949*ca9c54d2SDexuan Cui 	skb->protocol = eth_type_trans(skb, ndev);
950*ca9c54d2SDexuan Cui 	skb_checksum_none_assert(skb);
951*ca9c54d2SDexuan Cui 	skb_record_rx_queue(skb, rxq_idx);
952*ca9c54d2SDexuan Cui 
953*ca9c54d2SDexuan Cui 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
954*ca9c54d2SDexuan Cui 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
955*ca9c54d2SDexuan Cui 			skb->ip_summed = CHECKSUM_UNNECESSARY;
956*ca9c54d2SDexuan Cui 	}
957*ca9c54d2SDexuan Cui 
958*ca9c54d2SDexuan Cui 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
959*ca9c54d2SDexuan Cui 		hash_value = cqe->ppi[0].pkt_hash;
960*ca9c54d2SDexuan Cui 
961*ca9c54d2SDexuan Cui 		if (cqe->rx_hashtype & MANA_HASH_L4)
962*ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
963*ca9c54d2SDexuan Cui 		else
964*ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
965*ca9c54d2SDexuan Cui 	}
966*ca9c54d2SDexuan Cui 
967*ca9c54d2SDexuan Cui 	napi_gro_receive(napi, skb);
968*ca9c54d2SDexuan Cui 
969*ca9c54d2SDexuan Cui 	u64_stats_update_begin(&rx_stats->syncp);
970*ca9c54d2SDexuan Cui 	rx_stats->packets++;
971*ca9c54d2SDexuan Cui 	rx_stats->bytes += pkt_len;
972*ca9c54d2SDexuan Cui 	u64_stats_update_end(&rx_stats->syncp);
973*ca9c54d2SDexuan Cui }
974*ca9c54d2SDexuan Cui 
975*ca9c54d2SDexuan Cui static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
976*ca9c54d2SDexuan Cui 				struct gdma_comp *cqe)
977*ca9c54d2SDexuan Cui {
978*ca9c54d2SDexuan Cui 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
979*ca9c54d2SDexuan Cui 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
980*ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
981*ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rxbuf_oob;
982*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
983*ca9c54d2SDexuan Cui 	void *new_buf, *old_buf;
984*ca9c54d2SDexuan Cui 	struct page *new_page;
985*ca9c54d2SDexuan Cui 	u32 curr, pktlen;
986*ca9c54d2SDexuan Cui 	dma_addr_t da;
987*ca9c54d2SDexuan Cui 
988*ca9c54d2SDexuan Cui 	switch (oob->cqe_hdr.cqe_type) {
989*ca9c54d2SDexuan Cui 	case CQE_RX_OKAY:
990*ca9c54d2SDexuan Cui 		break;
991*ca9c54d2SDexuan Cui 
992*ca9c54d2SDexuan Cui 	case CQE_RX_TRUNCATED:
993*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Dropped a truncated packet\n");
994*ca9c54d2SDexuan Cui 		return;
995*ca9c54d2SDexuan Cui 
996*ca9c54d2SDexuan Cui 	case CQE_RX_COALESCED_4:
997*ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX coalescing is unsupported\n");
998*ca9c54d2SDexuan Cui 		return;
999*ca9c54d2SDexuan Cui 
1000*ca9c54d2SDexuan Cui 	case CQE_RX_OBJECT_FENCE:
1001*ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX Fencing is unsupported\n");
1002*ca9c54d2SDexuan Cui 		return;
1003*ca9c54d2SDexuan Cui 
1004*ca9c54d2SDexuan Cui 	default:
1005*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1006*ca9c54d2SDexuan Cui 			   oob->cqe_hdr.cqe_type);
1007*ca9c54d2SDexuan Cui 		return;
1008*ca9c54d2SDexuan Cui 	}
1009*ca9c54d2SDexuan Cui 
1010*ca9c54d2SDexuan Cui 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1011*ca9c54d2SDexuan Cui 		return;
1012*ca9c54d2SDexuan Cui 
1013*ca9c54d2SDexuan Cui 	pktlen = oob->ppi[0].pkt_len;
1014*ca9c54d2SDexuan Cui 
1015*ca9c54d2SDexuan Cui 	if (pktlen == 0) {
1016*ca9c54d2SDexuan Cui 		/* data packets should never have packetlength of zero */
1017*ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1018*ca9c54d2SDexuan Cui 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1019*ca9c54d2SDexuan Cui 		return;
1020*ca9c54d2SDexuan Cui 	}
1021*ca9c54d2SDexuan Cui 
1022*ca9c54d2SDexuan Cui 	curr = rxq->buf_index;
1023*ca9c54d2SDexuan Cui 	rxbuf_oob = &rxq->rx_oobs[curr];
1024*ca9c54d2SDexuan Cui 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1025*ca9c54d2SDexuan Cui 
1026*ca9c54d2SDexuan Cui 	new_page = alloc_page(GFP_ATOMIC);
1027*ca9c54d2SDexuan Cui 
1028*ca9c54d2SDexuan Cui 	if (new_page) {
1029*ca9c54d2SDexuan Cui 		da = dma_map_page(dev, new_page, 0, rxq->datasize,
1030*ca9c54d2SDexuan Cui 				  DMA_FROM_DEVICE);
1031*ca9c54d2SDexuan Cui 
1032*ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da)) {
1033*ca9c54d2SDexuan Cui 			__free_page(new_page);
1034*ca9c54d2SDexuan Cui 			new_page = NULL;
1035*ca9c54d2SDexuan Cui 		}
1036*ca9c54d2SDexuan Cui 	}
1037*ca9c54d2SDexuan Cui 
1038*ca9c54d2SDexuan Cui 	new_buf = new_page ? page_to_virt(new_page) : NULL;
1039*ca9c54d2SDexuan Cui 
1040*ca9c54d2SDexuan Cui 	if (new_buf) {
1041*ca9c54d2SDexuan Cui 		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1042*ca9c54d2SDexuan Cui 			       DMA_FROM_DEVICE);
1043*ca9c54d2SDexuan Cui 
1044*ca9c54d2SDexuan Cui 		old_buf = rxbuf_oob->buf_va;
1045*ca9c54d2SDexuan Cui 
1046*ca9c54d2SDexuan Cui 		/* refresh the rxbuf_oob with the new page */
1047*ca9c54d2SDexuan Cui 		rxbuf_oob->buf_va = new_buf;
1048*ca9c54d2SDexuan Cui 		rxbuf_oob->buf_dma_addr = da;
1049*ca9c54d2SDexuan Cui 		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1050*ca9c54d2SDexuan Cui 	} else {
1051*ca9c54d2SDexuan Cui 		old_buf = NULL; /* drop the packet if no memory */
1052*ca9c54d2SDexuan Cui 	}
1053*ca9c54d2SDexuan Cui 
1054*ca9c54d2SDexuan Cui 	mana_rx_skb(old_buf, oob, rxq);
1055*ca9c54d2SDexuan Cui 
1056*ca9c54d2SDexuan Cui 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1057*ca9c54d2SDexuan Cui 
1058*ca9c54d2SDexuan Cui 	mana_post_pkt_rxq(rxq);
1059*ca9c54d2SDexuan Cui }
1060*ca9c54d2SDexuan Cui 
1061*ca9c54d2SDexuan Cui static void mana_poll_rx_cq(struct mana_cq *cq)
1062*ca9c54d2SDexuan Cui {
1063*ca9c54d2SDexuan Cui 	struct gdma_comp *comp = cq->gdma_comp_buf;
1064*ca9c54d2SDexuan Cui 	u32 comp_read, i;
1065*ca9c54d2SDexuan Cui 
1066*ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1067*ca9c54d2SDexuan Cui 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1068*ca9c54d2SDexuan Cui 
1069*ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
1070*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].is_sq))
1071*ca9c54d2SDexuan Cui 			return;
1072*ca9c54d2SDexuan Cui 
1073*ca9c54d2SDexuan Cui 		/* verify recv cqe references the right rxq */
1074*ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1075*ca9c54d2SDexuan Cui 			return;
1076*ca9c54d2SDexuan Cui 
1077*ca9c54d2SDexuan Cui 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1078*ca9c54d2SDexuan Cui 	}
1079*ca9c54d2SDexuan Cui }
1080*ca9c54d2SDexuan Cui 
1081*ca9c54d2SDexuan Cui static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1082*ca9c54d2SDexuan Cui {
1083*ca9c54d2SDexuan Cui 	struct mana_cq *cq = context;
1084*ca9c54d2SDexuan Cui 
1085*ca9c54d2SDexuan Cui 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1086*ca9c54d2SDexuan Cui 
1087*ca9c54d2SDexuan Cui 	if (cq->type == MANA_CQ_TYPE_RX)
1088*ca9c54d2SDexuan Cui 		mana_poll_rx_cq(cq);
1089*ca9c54d2SDexuan Cui 	else
1090*ca9c54d2SDexuan Cui 		mana_poll_tx_cq(cq);
1091*ca9c54d2SDexuan Cui 
1092*ca9c54d2SDexuan Cui 	mana_gd_arm_cq(gdma_queue);
1093*ca9c54d2SDexuan Cui }
1094*ca9c54d2SDexuan Cui 
1095*ca9c54d2SDexuan Cui static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1096*ca9c54d2SDexuan Cui {
1097*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1098*ca9c54d2SDexuan Cui 
1099*ca9c54d2SDexuan Cui 	if (!cq->gdma_cq)
1100*ca9c54d2SDexuan Cui 		return;
1101*ca9c54d2SDexuan Cui 
1102*ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1103*ca9c54d2SDexuan Cui }
1104*ca9c54d2SDexuan Cui 
1105*ca9c54d2SDexuan Cui static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1106*ca9c54d2SDexuan Cui {
1107*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1108*ca9c54d2SDexuan Cui 
1109*ca9c54d2SDexuan Cui 	if (!txq->gdma_sq)
1110*ca9c54d2SDexuan Cui 		return;
1111*ca9c54d2SDexuan Cui 
1112*ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1113*ca9c54d2SDexuan Cui }
1114*ca9c54d2SDexuan Cui 
1115*ca9c54d2SDexuan Cui static void mana_destroy_txq(struct mana_port_context *apc)
1116*ca9c54d2SDexuan Cui {
1117*ca9c54d2SDexuan Cui 	int i;
1118*ca9c54d2SDexuan Cui 
1119*ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1120*ca9c54d2SDexuan Cui 		return;
1121*ca9c54d2SDexuan Cui 
1122*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1123*ca9c54d2SDexuan Cui 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1124*ca9c54d2SDexuan Cui 
1125*ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1126*ca9c54d2SDexuan Cui 
1127*ca9c54d2SDexuan Cui 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1128*ca9c54d2SDexuan Cui 	}
1129*ca9c54d2SDexuan Cui 
1130*ca9c54d2SDexuan Cui 	kfree(apc->tx_qp);
1131*ca9c54d2SDexuan Cui 	apc->tx_qp = NULL;
1132*ca9c54d2SDexuan Cui }
1133*ca9c54d2SDexuan Cui 
1134*ca9c54d2SDexuan Cui static int mana_create_txq(struct mana_port_context *apc,
1135*ca9c54d2SDexuan Cui 			   struct net_device *net)
1136*ca9c54d2SDexuan Cui {
1137*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1138*ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
1139*ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
1140*ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
1141*ca9c54d2SDexuan Cui 	struct gdma_context *gc;
1142*ca9c54d2SDexuan Cui 	struct mana_txq *txq;
1143*ca9c54d2SDexuan Cui 	struct mana_cq *cq;
1144*ca9c54d2SDexuan Cui 	u32 txq_size;
1145*ca9c54d2SDexuan Cui 	u32 cq_size;
1146*ca9c54d2SDexuan Cui 	int err;
1147*ca9c54d2SDexuan Cui 	int i;
1148*ca9c54d2SDexuan Cui 
1149*ca9c54d2SDexuan Cui 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1150*ca9c54d2SDexuan Cui 			     GFP_KERNEL);
1151*ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1152*ca9c54d2SDexuan Cui 		return -ENOMEM;
1153*ca9c54d2SDexuan Cui 
1154*ca9c54d2SDexuan Cui 	/*  The minimum size of the WQE is 32 bytes, hence
1155*ca9c54d2SDexuan Cui 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1156*ca9c54d2SDexuan Cui 	 *  the SQ can store. This value is then used to size other queues
1157*ca9c54d2SDexuan Cui 	 *  to prevent overflow.
1158*ca9c54d2SDexuan Cui 	 */
1159*ca9c54d2SDexuan Cui 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1160*ca9c54d2SDexuan Cui 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1161*ca9c54d2SDexuan Cui 
1162*ca9c54d2SDexuan Cui 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1163*ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
1164*ca9c54d2SDexuan Cui 
1165*ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
1166*ca9c54d2SDexuan Cui 
1167*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1168*ca9c54d2SDexuan Cui 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1169*ca9c54d2SDexuan Cui 
1170*ca9c54d2SDexuan Cui 		/* Create SQ */
1171*ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
1172*ca9c54d2SDexuan Cui 
1173*ca9c54d2SDexuan Cui 		u64_stats_init(&txq->stats.syncp);
1174*ca9c54d2SDexuan Cui 		txq->ndev = net;
1175*ca9c54d2SDexuan Cui 		txq->net_txq = netdev_get_tx_queue(net, i);
1176*ca9c54d2SDexuan Cui 		txq->vp_offset = apc->tx_vp_offset;
1177*ca9c54d2SDexuan Cui 		skb_queue_head_init(&txq->pending_skbs);
1178*ca9c54d2SDexuan Cui 
1179*ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1180*ca9c54d2SDexuan Cui 		spec.type = GDMA_SQ;
1181*ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = true;
1182*ca9c54d2SDexuan Cui 		spec.queue_size = txq_size;
1183*ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1184*ca9c54d2SDexuan Cui 		if (err)
1185*ca9c54d2SDexuan Cui 			goto out;
1186*ca9c54d2SDexuan Cui 
1187*ca9c54d2SDexuan Cui 		/* Create SQ's CQ */
1188*ca9c54d2SDexuan Cui 		cq = &apc->tx_qp[i].tx_cq;
1189*ca9c54d2SDexuan Cui 		cq->gdma_comp_buf = apc->eqs[i].cqe_poll;
1190*ca9c54d2SDexuan Cui 		cq->type = MANA_CQ_TYPE_TX;
1191*ca9c54d2SDexuan Cui 
1192*ca9c54d2SDexuan Cui 		cq->txq = txq;
1193*ca9c54d2SDexuan Cui 
1194*ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1195*ca9c54d2SDexuan Cui 		spec.type = GDMA_CQ;
1196*ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = false;
1197*ca9c54d2SDexuan Cui 		spec.queue_size = cq_size;
1198*ca9c54d2SDexuan Cui 		spec.cq.callback = mana_cq_handler;
1199*ca9c54d2SDexuan Cui 		spec.cq.parent_eq = apc->eqs[i].eq;
1200*ca9c54d2SDexuan Cui 		spec.cq.context = cq;
1201*ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1202*ca9c54d2SDexuan Cui 		if (err)
1203*ca9c54d2SDexuan Cui 			goto out;
1204*ca9c54d2SDexuan Cui 
1205*ca9c54d2SDexuan Cui 		memset(&wq_spec, 0, sizeof(wq_spec));
1206*ca9c54d2SDexuan Cui 		memset(&cq_spec, 0, sizeof(cq_spec));
1207*ca9c54d2SDexuan Cui 
1208*ca9c54d2SDexuan Cui 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1209*ca9c54d2SDexuan Cui 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1210*ca9c54d2SDexuan Cui 
1211*ca9c54d2SDexuan Cui 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1212*ca9c54d2SDexuan Cui 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1213*ca9c54d2SDexuan Cui 		cq_spec.modr_ctx_id = 0;
1214*ca9c54d2SDexuan Cui 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1215*ca9c54d2SDexuan Cui 
1216*ca9c54d2SDexuan Cui 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1217*ca9c54d2SDexuan Cui 					 &wq_spec, &cq_spec,
1218*ca9c54d2SDexuan Cui 					 &apc->tx_qp[i].tx_object);
1219*ca9c54d2SDexuan Cui 
1220*ca9c54d2SDexuan Cui 		if (err)
1221*ca9c54d2SDexuan Cui 			goto out;
1222*ca9c54d2SDexuan Cui 
1223*ca9c54d2SDexuan Cui 		txq->gdma_sq->id = wq_spec.queue_index;
1224*ca9c54d2SDexuan Cui 		cq->gdma_cq->id = cq_spec.queue_index;
1225*ca9c54d2SDexuan Cui 
1226*ca9c54d2SDexuan Cui 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1227*ca9c54d2SDexuan Cui 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1228*ca9c54d2SDexuan Cui 
1229*ca9c54d2SDexuan Cui 		txq->gdma_txq_id = txq->gdma_sq->id;
1230*ca9c54d2SDexuan Cui 
1231*ca9c54d2SDexuan Cui 		cq->gdma_id = cq->gdma_cq->id;
1232*ca9c54d2SDexuan Cui 
1233*ca9c54d2SDexuan Cui 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs))
1234*ca9c54d2SDexuan Cui 			return -EINVAL;
1235*ca9c54d2SDexuan Cui 
1236*ca9c54d2SDexuan Cui 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1237*ca9c54d2SDexuan Cui 
1238*ca9c54d2SDexuan Cui 		mana_gd_arm_cq(cq->gdma_cq);
1239*ca9c54d2SDexuan Cui 	}
1240*ca9c54d2SDexuan Cui 
1241*ca9c54d2SDexuan Cui 	return 0;
1242*ca9c54d2SDexuan Cui out:
1243*ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
1244*ca9c54d2SDexuan Cui 	return err;
1245*ca9c54d2SDexuan Cui }
1246*ca9c54d2SDexuan Cui 
1247*ca9c54d2SDexuan Cui static void mana_napi_sync_for_rx(struct mana_rxq *rxq)
1248*ca9c54d2SDexuan Cui {
1249*ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
1250*ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
1251*ca9c54d2SDexuan Cui 	u16 rxq_idx = rxq->rxq_idx;
1252*ca9c54d2SDexuan Cui 	struct napi_struct *napi;
1253*ca9c54d2SDexuan Cui 	struct gdma_queue *eq;
1254*ca9c54d2SDexuan Cui 
1255*ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
1256*ca9c54d2SDexuan Cui 	eq = apc->eqs[rxq_idx].eq;
1257*ca9c54d2SDexuan Cui 	napi = &eq->eq.napi;
1258*ca9c54d2SDexuan Cui 
1259*ca9c54d2SDexuan Cui 	napi_synchronize(napi);
1260*ca9c54d2SDexuan Cui }
1261*ca9c54d2SDexuan Cui 
1262*ca9c54d2SDexuan Cui static void mana_destroy_rxq(struct mana_port_context *apc,
1263*ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, bool validate_state)
1264*ca9c54d2SDexuan Cui 
1265*ca9c54d2SDexuan Cui {
1266*ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1267*ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1268*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1269*ca9c54d2SDexuan Cui 	int i;
1270*ca9c54d2SDexuan Cui 
1271*ca9c54d2SDexuan Cui 	if (!rxq)
1272*ca9c54d2SDexuan Cui 		return;
1273*ca9c54d2SDexuan Cui 
1274*ca9c54d2SDexuan Cui 	if (validate_state)
1275*ca9c54d2SDexuan Cui 		mana_napi_sync_for_rx(rxq);
1276*ca9c54d2SDexuan Cui 
1277*ca9c54d2SDexuan Cui 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1278*ca9c54d2SDexuan Cui 
1279*ca9c54d2SDexuan Cui 	mana_deinit_cq(apc, &rxq->rx_cq);
1280*ca9c54d2SDexuan Cui 
1281*ca9c54d2SDexuan Cui 	for (i = 0; i < rxq->num_rx_buf; i++) {
1282*ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[i];
1283*ca9c54d2SDexuan Cui 
1284*ca9c54d2SDexuan Cui 		if (!rx_oob->buf_va)
1285*ca9c54d2SDexuan Cui 			continue;
1286*ca9c54d2SDexuan Cui 
1287*ca9c54d2SDexuan Cui 		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1288*ca9c54d2SDexuan Cui 			       DMA_FROM_DEVICE);
1289*ca9c54d2SDexuan Cui 
1290*ca9c54d2SDexuan Cui 		free_page((unsigned long)rx_oob->buf_va);
1291*ca9c54d2SDexuan Cui 		rx_oob->buf_va = NULL;
1292*ca9c54d2SDexuan Cui 	}
1293*ca9c54d2SDexuan Cui 
1294*ca9c54d2SDexuan Cui 	if (rxq->gdma_rq)
1295*ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1296*ca9c54d2SDexuan Cui 
1297*ca9c54d2SDexuan Cui 	kfree(rxq);
1298*ca9c54d2SDexuan Cui }
1299*ca9c54d2SDexuan Cui 
1300*ca9c54d2SDexuan Cui #define MANA_WQE_HEADER_SIZE 16
1301*ca9c54d2SDexuan Cui #define MANA_WQE_SGE_SIZE 16
1302*ca9c54d2SDexuan Cui 
1303*ca9c54d2SDexuan Cui static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1304*ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1305*ca9c54d2SDexuan Cui {
1306*ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1307*ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1308*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1309*ca9c54d2SDexuan Cui 	struct page *page;
1310*ca9c54d2SDexuan Cui 	dma_addr_t da;
1311*ca9c54d2SDexuan Cui 	u32 buf_idx;
1312*ca9c54d2SDexuan Cui 
1313*ca9c54d2SDexuan Cui 	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1314*ca9c54d2SDexuan Cui 
1315*ca9c54d2SDexuan Cui 	*rxq_size = 0;
1316*ca9c54d2SDexuan Cui 	*cq_size = 0;
1317*ca9c54d2SDexuan Cui 
1318*ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1319*ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
1320*ca9c54d2SDexuan Cui 		memset(rx_oob, 0, sizeof(*rx_oob));
1321*ca9c54d2SDexuan Cui 
1322*ca9c54d2SDexuan Cui 		page = alloc_page(GFP_KERNEL);
1323*ca9c54d2SDexuan Cui 		if (!page)
1324*ca9c54d2SDexuan Cui 			return -ENOMEM;
1325*ca9c54d2SDexuan Cui 
1326*ca9c54d2SDexuan Cui 		da = dma_map_page(dev, page, 0, rxq->datasize, DMA_FROM_DEVICE);
1327*ca9c54d2SDexuan Cui 
1328*ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da)) {
1329*ca9c54d2SDexuan Cui 			__free_page(page);
1330*ca9c54d2SDexuan Cui 			return -ENOMEM;
1331*ca9c54d2SDexuan Cui 		}
1332*ca9c54d2SDexuan Cui 
1333*ca9c54d2SDexuan Cui 		rx_oob->buf_va = page_to_virt(page);
1334*ca9c54d2SDexuan Cui 		rx_oob->buf_dma_addr = da;
1335*ca9c54d2SDexuan Cui 
1336*ca9c54d2SDexuan Cui 		rx_oob->num_sge = 1;
1337*ca9c54d2SDexuan Cui 		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1338*ca9c54d2SDexuan Cui 		rx_oob->sgl[0].size = rxq->datasize;
1339*ca9c54d2SDexuan Cui 		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1340*ca9c54d2SDexuan Cui 
1341*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.sgl = rx_oob->sgl;
1342*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1343*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_size = 0;
1344*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_data = NULL;
1345*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.flags = 0;
1346*ca9c54d2SDexuan Cui 		rx_oob->wqe_req.client_data_unit = 0;
1347*ca9c54d2SDexuan Cui 
1348*ca9c54d2SDexuan Cui 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1349*ca9c54d2SDexuan Cui 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1350*ca9c54d2SDexuan Cui 		*cq_size += COMP_ENTRY_SIZE;
1351*ca9c54d2SDexuan Cui 	}
1352*ca9c54d2SDexuan Cui 
1353*ca9c54d2SDexuan Cui 	return 0;
1354*ca9c54d2SDexuan Cui }
1355*ca9c54d2SDexuan Cui 
1356*ca9c54d2SDexuan Cui static int mana_push_wqe(struct mana_rxq *rxq)
1357*ca9c54d2SDexuan Cui {
1358*ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1359*ca9c54d2SDexuan Cui 	u32 buf_idx;
1360*ca9c54d2SDexuan Cui 	int err;
1361*ca9c54d2SDexuan Cui 
1362*ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1363*ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
1364*ca9c54d2SDexuan Cui 
1365*ca9c54d2SDexuan Cui 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1366*ca9c54d2SDexuan Cui 					    &rx_oob->wqe_inf);
1367*ca9c54d2SDexuan Cui 		if (err)
1368*ca9c54d2SDexuan Cui 			return -ENOSPC;
1369*ca9c54d2SDexuan Cui 	}
1370*ca9c54d2SDexuan Cui 
1371*ca9c54d2SDexuan Cui 	return 0;
1372*ca9c54d2SDexuan Cui }
1373*ca9c54d2SDexuan Cui 
1374*ca9c54d2SDexuan Cui static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1375*ca9c54d2SDexuan Cui 					u32 rxq_idx, struct mana_eq *eq,
1376*ca9c54d2SDexuan Cui 					struct net_device *ndev)
1377*ca9c54d2SDexuan Cui {
1378*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1379*ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
1380*ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
1381*ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
1382*ca9c54d2SDexuan Cui 	struct mana_cq *cq = NULL;
1383*ca9c54d2SDexuan Cui 	struct gdma_context *gc;
1384*ca9c54d2SDexuan Cui 	u32 cq_size, rq_size;
1385*ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1386*ca9c54d2SDexuan Cui 	int err;
1387*ca9c54d2SDexuan Cui 
1388*ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
1389*ca9c54d2SDexuan Cui 
1390*ca9c54d2SDexuan Cui 	rxq = kzalloc(sizeof(*rxq) +
1391*ca9c54d2SDexuan Cui 		      RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
1392*ca9c54d2SDexuan Cui 		      GFP_KERNEL);
1393*ca9c54d2SDexuan Cui 	if (!rxq)
1394*ca9c54d2SDexuan Cui 		return NULL;
1395*ca9c54d2SDexuan Cui 
1396*ca9c54d2SDexuan Cui 	rxq->ndev = ndev;
1397*ca9c54d2SDexuan Cui 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1398*ca9c54d2SDexuan Cui 	rxq->rxq_idx = rxq_idx;
1399*ca9c54d2SDexuan Cui 	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1400*ca9c54d2SDexuan Cui 	rxq->rxobj = INVALID_MANA_HANDLE;
1401*ca9c54d2SDexuan Cui 
1402*ca9c54d2SDexuan Cui 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1403*ca9c54d2SDexuan Cui 	if (err)
1404*ca9c54d2SDexuan Cui 		goto out;
1405*ca9c54d2SDexuan Cui 
1406*ca9c54d2SDexuan Cui 	rq_size = PAGE_ALIGN(rq_size);
1407*ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
1408*ca9c54d2SDexuan Cui 
1409*ca9c54d2SDexuan Cui 	/* Create RQ */
1410*ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
1411*ca9c54d2SDexuan Cui 	spec.type = GDMA_RQ;
1412*ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = true;
1413*ca9c54d2SDexuan Cui 	spec.queue_size = rq_size;
1414*ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1415*ca9c54d2SDexuan Cui 	if (err)
1416*ca9c54d2SDexuan Cui 		goto out;
1417*ca9c54d2SDexuan Cui 
1418*ca9c54d2SDexuan Cui 	/* Create RQ's CQ */
1419*ca9c54d2SDexuan Cui 	cq = &rxq->rx_cq;
1420*ca9c54d2SDexuan Cui 	cq->gdma_comp_buf = eq->cqe_poll;
1421*ca9c54d2SDexuan Cui 	cq->type = MANA_CQ_TYPE_RX;
1422*ca9c54d2SDexuan Cui 	cq->rxq = rxq;
1423*ca9c54d2SDexuan Cui 
1424*ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
1425*ca9c54d2SDexuan Cui 	spec.type = GDMA_CQ;
1426*ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
1427*ca9c54d2SDexuan Cui 	spec.queue_size = cq_size;
1428*ca9c54d2SDexuan Cui 	spec.cq.callback = mana_cq_handler;
1429*ca9c54d2SDexuan Cui 	spec.cq.parent_eq = eq->eq;
1430*ca9c54d2SDexuan Cui 	spec.cq.context = cq;
1431*ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1432*ca9c54d2SDexuan Cui 	if (err)
1433*ca9c54d2SDexuan Cui 		goto out;
1434*ca9c54d2SDexuan Cui 
1435*ca9c54d2SDexuan Cui 	memset(&wq_spec, 0, sizeof(wq_spec));
1436*ca9c54d2SDexuan Cui 	memset(&cq_spec, 0, sizeof(cq_spec));
1437*ca9c54d2SDexuan Cui 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1438*ca9c54d2SDexuan Cui 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1439*ca9c54d2SDexuan Cui 
1440*ca9c54d2SDexuan Cui 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1441*ca9c54d2SDexuan Cui 	cq_spec.queue_size = cq->gdma_cq->queue_size;
1442*ca9c54d2SDexuan Cui 	cq_spec.modr_ctx_id = 0;
1443*ca9c54d2SDexuan Cui 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1444*ca9c54d2SDexuan Cui 
1445*ca9c54d2SDexuan Cui 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1446*ca9c54d2SDexuan Cui 				 &wq_spec, &cq_spec, &rxq->rxobj);
1447*ca9c54d2SDexuan Cui 	if (err)
1448*ca9c54d2SDexuan Cui 		goto out;
1449*ca9c54d2SDexuan Cui 
1450*ca9c54d2SDexuan Cui 	rxq->gdma_rq->id = wq_spec.queue_index;
1451*ca9c54d2SDexuan Cui 	cq->gdma_cq->id = cq_spec.queue_index;
1452*ca9c54d2SDexuan Cui 
1453*ca9c54d2SDexuan Cui 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1454*ca9c54d2SDexuan Cui 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1455*ca9c54d2SDexuan Cui 
1456*ca9c54d2SDexuan Cui 	rxq->gdma_id = rxq->gdma_rq->id;
1457*ca9c54d2SDexuan Cui 	cq->gdma_id = cq->gdma_cq->id;
1458*ca9c54d2SDexuan Cui 
1459*ca9c54d2SDexuan Cui 	err = mana_push_wqe(rxq);
1460*ca9c54d2SDexuan Cui 	if (err)
1461*ca9c54d2SDexuan Cui 		goto out;
1462*ca9c54d2SDexuan Cui 
1463*ca9c54d2SDexuan Cui 	if (cq->gdma_id >= gc->max_num_cqs)
1464*ca9c54d2SDexuan Cui 		goto out;
1465*ca9c54d2SDexuan Cui 
1466*ca9c54d2SDexuan Cui 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1467*ca9c54d2SDexuan Cui 
1468*ca9c54d2SDexuan Cui 	mana_gd_arm_cq(cq->gdma_cq);
1469*ca9c54d2SDexuan Cui out:
1470*ca9c54d2SDexuan Cui 	if (!err)
1471*ca9c54d2SDexuan Cui 		return rxq;
1472*ca9c54d2SDexuan Cui 
1473*ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1474*ca9c54d2SDexuan Cui 
1475*ca9c54d2SDexuan Cui 	mana_destroy_rxq(apc, rxq, false);
1476*ca9c54d2SDexuan Cui 
1477*ca9c54d2SDexuan Cui 	if (cq)
1478*ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, cq);
1479*ca9c54d2SDexuan Cui 
1480*ca9c54d2SDexuan Cui 	return NULL;
1481*ca9c54d2SDexuan Cui }
1482*ca9c54d2SDexuan Cui 
1483*ca9c54d2SDexuan Cui static int mana_add_rx_queues(struct mana_port_context *apc,
1484*ca9c54d2SDexuan Cui 			      struct net_device *ndev)
1485*ca9c54d2SDexuan Cui {
1486*ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1487*ca9c54d2SDexuan Cui 	int err = 0;
1488*ca9c54d2SDexuan Cui 	int i;
1489*ca9c54d2SDexuan Cui 
1490*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1491*ca9c54d2SDexuan Cui 		rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev);
1492*ca9c54d2SDexuan Cui 		if (!rxq) {
1493*ca9c54d2SDexuan Cui 			err = -ENOMEM;
1494*ca9c54d2SDexuan Cui 			goto out;
1495*ca9c54d2SDexuan Cui 		}
1496*ca9c54d2SDexuan Cui 
1497*ca9c54d2SDexuan Cui 		u64_stats_init(&rxq->stats.syncp);
1498*ca9c54d2SDexuan Cui 
1499*ca9c54d2SDexuan Cui 		apc->rxqs[i] = rxq;
1500*ca9c54d2SDexuan Cui 	}
1501*ca9c54d2SDexuan Cui 
1502*ca9c54d2SDexuan Cui 	apc->default_rxobj = apc->rxqs[0]->rxobj;
1503*ca9c54d2SDexuan Cui out:
1504*ca9c54d2SDexuan Cui 	return err;
1505*ca9c54d2SDexuan Cui }
1506*ca9c54d2SDexuan Cui 
1507*ca9c54d2SDexuan Cui static void mana_destroy_vport(struct mana_port_context *apc)
1508*ca9c54d2SDexuan Cui {
1509*ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1510*ca9c54d2SDexuan Cui 	u32 rxq_idx;
1511*ca9c54d2SDexuan Cui 
1512*ca9c54d2SDexuan Cui 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1513*ca9c54d2SDexuan Cui 		rxq = apc->rxqs[rxq_idx];
1514*ca9c54d2SDexuan Cui 		if (!rxq)
1515*ca9c54d2SDexuan Cui 			continue;
1516*ca9c54d2SDexuan Cui 
1517*ca9c54d2SDexuan Cui 		mana_destroy_rxq(apc, rxq, true);
1518*ca9c54d2SDexuan Cui 		apc->rxqs[rxq_idx] = NULL;
1519*ca9c54d2SDexuan Cui 	}
1520*ca9c54d2SDexuan Cui 
1521*ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
1522*ca9c54d2SDexuan Cui }
1523*ca9c54d2SDexuan Cui 
1524*ca9c54d2SDexuan Cui static int mana_create_vport(struct mana_port_context *apc,
1525*ca9c54d2SDexuan Cui 			     struct net_device *net)
1526*ca9c54d2SDexuan Cui {
1527*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1528*ca9c54d2SDexuan Cui 	int err;
1529*ca9c54d2SDexuan Cui 
1530*ca9c54d2SDexuan Cui 	apc->default_rxobj = INVALID_MANA_HANDLE;
1531*ca9c54d2SDexuan Cui 
1532*ca9c54d2SDexuan Cui 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1533*ca9c54d2SDexuan Cui 	if (err)
1534*ca9c54d2SDexuan Cui 		return err;
1535*ca9c54d2SDexuan Cui 
1536*ca9c54d2SDexuan Cui 	return mana_create_txq(apc, net);
1537*ca9c54d2SDexuan Cui }
1538*ca9c54d2SDexuan Cui 
1539*ca9c54d2SDexuan Cui static void mana_rss_table_init(struct mana_port_context *apc)
1540*ca9c54d2SDexuan Cui {
1541*ca9c54d2SDexuan Cui 	int i;
1542*ca9c54d2SDexuan Cui 
1543*ca9c54d2SDexuan Cui 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1544*ca9c54d2SDexuan Cui 		apc->indir_table[i] =
1545*ca9c54d2SDexuan Cui 			ethtool_rxfh_indir_default(i, apc->num_queues);
1546*ca9c54d2SDexuan Cui }
1547*ca9c54d2SDexuan Cui 
1548*ca9c54d2SDexuan Cui int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1549*ca9c54d2SDexuan Cui 		    bool update_hash, bool update_tab)
1550*ca9c54d2SDexuan Cui {
1551*ca9c54d2SDexuan Cui 	u32 queue_idx;
1552*ca9c54d2SDexuan Cui 	int i;
1553*ca9c54d2SDexuan Cui 
1554*ca9c54d2SDexuan Cui 	if (update_tab) {
1555*ca9c54d2SDexuan Cui 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1556*ca9c54d2SDexuan Cui 			queue_idx = apc->indir_table[i];
1557*ca9c54d2SDexuan Cui 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1558*ca9c54d2SDexuan Cui 		}
1559*ca9c54d2SDexuan Cui 	}
1560*ca9c54d2SDexuan Cui 
1561*ca9c54d2SDexuan Cui 	return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1562*ca9c54d2SDexuan Cui }
1563*ca9c54d2SDexuan Cui 
1564*ca9c54d2SDexuan Cui static int mana_init_port(struct net_device *ndev)
1565*ca9c54d2SDexuan Cui {
1566*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1567*ca9c54d2SDexuan Cui 	u32 max_txq, max_rxq, max_queues;
1568*ca9c54d2SDexuan Cui 	int port_idx = apc->port_idx;
1569*ca9c54d2SDexuan Cui 	u32 num_indirect_entries;
1570*ca9c54d2SDexuan Cui 	int err;
1571*ca9c54d2SDexuan Cui 
1572*ca9c54d2SDexuan Cui 	err = mana_init_port_context(apc);
1573*ca9c54d2SDexuan Cui 	if (err)
1574*ca9c54d2SDexuan Cui 		return err;
1575*ca9c54d2SDexuan Cui 
1576*ca9c54d2SDexuan Cui 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1577*ca9c54d2SDexuan Cui 				   &num_indirect_entries);
1578*ca9c54d2SDexuan Cui 	if (err) {
1579*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to query info for vPort 0\n");
1580*ca9c54d2SDexuan Cui 		goto reset_apc;
1581*ca9c54d2SDexuan Cui 	}
1582*ca9c54d2SDexuan Cui 
1583*ca9c54d2SDexuan Cui 	max_queues = min_t(u32, max_txq, max_rxq);
1584*ca9c54d2SDexuan Cui 	if (apc->max_queues > max_queues)
1585*ca9c54d2SDexuan Cui 		apc->max_queues = max_queues;
1586*ca9c54d2SDexuan Cui 
1587*ca9c54d2SDexuan Cui 	if (apc->num_queues > apc->max_queues)
1588*ca9c54d2SDexuan Cui 		apc->num_queues = apc->max_queues;
1589*ca9c54d2SDexuan Cui 
1590*ca9c54d2SDexuan Cui 	ether_addr_copy(ndev->dev_addr, apc->mac_addr);
1591*ca9c54d2SDexuan Cui 
1592*ca9c54d2SDexuan Cui 	return 0;
1593*ca9c54d2SDexuan Cui 
1594*ca9c54d2SDexuan Cui reset_apc:
1595*ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
1596*ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
1597*ca9c54d2SDexuan Cui 	return err;
1598*ca9c54d2SDexuan Cui }
1599*ca9c54d2SDexuan Cui 
1600*ca9c54d2SDexuan Cui int mana_alloc_queues(struct net_device *ndev)
1601*ca9c54d2SDexuan Cui {
1602*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1603*ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1604*ca9c54d2SDexuan Cui 	int err;
1605*ca9c54d2SDexuan Cui 
1606*ca9c54d2SDexuan Cui 	err = mana_create_eq(apc);
1607*ca9c54d2SDexuan Cui 	if (err)
1608*ca9c54d2SDexuan Cui 		return err;
1609*ca9c54d2SDexuan Cui 
1610*ca9c54d2SDexuan Cui 	err = mana_create_vport(apc, ndev);
1611*ca9c54d2SDexuan Cui 	if (err)
1612*ca9c54d2SDexuan Cui 		goto destroy_eq;
1613*ca9c54d2SDexuan Cui 
1614*ca9c54d2SDexuan Cui 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1615*ca9c54d2SDexuan Cui 	if (err)
1616*ca9c54d2SDexuan Cui 		goto destroy_vport;
1617*ca9c54d2SDexuan Cui 
1618*ca9c54d2SDexuan Cui 	err = mana_add_rx_queues(apc, ndev);
1619*ca9c54d2SDexuan Cui 	if (err)
1620*ca9c54d2SDexuan Cui 		goto destroy_vport;
1621*ca9c54d2SDexuan Cui 
1622*ca9c54d2SDexuan Cui 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1623*ca9c54d2SDexuan Cui 
1624*ca9c54d2SDexuan Cui 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1625*ca9c54d2SDexuan Cui 	if (err)
1626*ca9c54d2SDexuan Cui 		goto destroy_vport;
1627*ca9c54d2SDexuan Cui 
1628*ca9c54d2SDexuan Cui 	mana_rss_table_init(apc);
1629*ca9c54d2SDexuan Cui 
1630*ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1631*ca9c54d2SDexuan Cui 	if (err)
1632*ca9c54d2SDexuan Cui 		goto destroy_vport;
1633*ca9c54d2SDexuan Cui 
1634*ca9c54d2SDexuan Cui 	return 0;
1635*ca9c54d2SDexuan Cui 
1636*ca9c54d2SDexuan Cui destroy_vport:
1637*ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
1638*ca9c54d2SDexuan Cui destroy_eq:
1639*ca9c54d2SDexuan Cui 	mana_destroy_eq(gd->gdma_context, apc);
1640*ca9c54d2SDexuan Cui 	return err;
1641*ca9c54d2SDexuan Cui }
1642*ca9c54d2SDexuan Cui 
1643*ca9c54d2SDexuan Cui int mana_attach(struct net_device *ndev)
1644*ca9c54d2SDexuan Cui {
1645*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1646*ca9c54d2SDexuan Cui 	int err;
1647*ca9c54d2SDexuan Cui 
1648*ca9c54d2SDexuan Cui 	ASSERT_RTNL();
1649*ca9c54d2SDexuan Cui 
1650*ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
1651*ca9c54d2SDexuan Cui 	if (err)
1652*ca9c54d2SDexuan Cui 		return err;
1653*ca9c54d2SDexuan Cui 
1654*ca9c54d2SDexuan Cui 	err = mana_alloc_queues(ndev);
1655*ca9c54d2SDexuan Cui 	if (err) {
1656*ca9c54d2SDexuan Cui 		kfree(apc->rxqs);
1657*ca9c54d2SDexuan Cui 		apc->rxqs = NULL;
1658*ca9c54d2SDexuan Cui 		return err;
1659*ca9c54d2SDexuan Cui 	}
1660*ca9c54d2SDexuan Cui 
1661*ca9c54d2SDexuan Cui 	netif_device_attach(ndev);
1662*ca9c54d2SDexuan Cui 
1663*ca9c54d2SDexuan Cui 	apc->port_is_up = apc->port_st_save;
1664*ca9c54d2SDexuan Cui 
1665*ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
1666*ca9c54d2SDexuan Cui 	smp_wmb();
1667*ca9c54d2SDexuan Cui 
1668*ca9c54d2SDexuan Cui 	if (apc->port_is_up) {
1669*ca9c54d2SDexuan Cui 		netif_carrier_on(ndev);
1670*ca9c54d2SDexuan Cui 		netif_tx_wake_all_queues(ndev);
1671*ca9c54d2SDexuan Cui 	}
1672*ca9c54d2SDexuan Cui 
1673*ca9c54d2SDexuan Cui 	return 0;
1674*ca9c54d2SDexuan Cui }
1675*ca9c54d2SDexuan Cui 
1676*ca9c54d2SDexuan Cui static int mana_dealloc_queues(struct net_device *ndev)
1677*ca9c54d2SDexuan Cui {
1678*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1679*ca9c54d2SDexuan Cui 	struct mana_txq *txq;
1680*ca9c54d2SDexuan Cui 	int i, err;
1681*ca9c54d2SDexuan Cui 
1682*ca9c54d2SDexuan Cui 	if (apc->port_is_up)
1683*ca9c54d2SDexuan Cui 		return -EINVAL;
1684*ca9c54d2SDexuan Cui 
1685*ca9c54d2SDexuan Cui 	/* No packet can be transmitted now since apc->port_is_up is false.
1686*ca9c54d2SDexuan Cui 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1687*ca9c54d2SDexuan Cui 	 * a txq because it may not timely see apc->port_is_up being cleared
1688*ca9c54d2SDexuan Cui 	 * to false, but it doesn't matter since mana_start_xmit() drops any
1689*ca9c54d2SDexuan Cui 	 * new packets due to apc->port_is_up being false.
1690*ca9c54d2SDexuan Cui 	 *
1691*ca9c54d2SDexuan Cui 	 * Drain all the in-flight TX packets
1692*ca9c54d2SDexuan Cui 	 */
1693*ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1694*ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
1695*ca9c54d2SDexuan Cui 
1696*ca9c54d2SDexuan Cui 		while (atomic_read(&txq->pending_sends) > 0)
1697*ca9c54d2SDexuan Cui 			usleep_range(1000, 2000);
1698*ca9c54d2SDexuan Cui 	}
1699*ca9c54d2SDexuan Cui 
1700*ca9c54d2SDexuan Cui 	/* We're 100% sure the queues can no longer be woken up, because
1701*ca9c54d2SDexuan Cui 	 * we're sure now mana_poll_tx_cq() can't be running.
1702*ca9c54d2SDexuan Cui 	 */
1703*ca9c54d2SDexuan Cui 
1704*ca9c54d2SDexuan Cui 	apc->rss_state = TRI_STATE_FALSE;
1705*ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1706*ca9c54d2SDexuan Cui 	if (err) {
1707*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1708*ca9c54d2SDexuan Cui 		return err;
1709*ca9c54d2SDexuan Cui 	}
1710*ca9c54d2SDexuan Cui 
1711*ca9c54d2SDexuan Cui 	/* TODO: Implement RX fencing */
1712*ca9c54d2SDexuan Cui 	ssleep(1);
1713*ca9c54d2SDexuan Cui 
1714*ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
1715*ca9c54d2SDexuan Cui 
1716*ca9c54d2SDexuan Cui 	mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc);
1717*ca9c54d2SDexuan Cui 
1718*ca9c54d2SDexuan Cui 	return 0;
1719*ca9c54d2SDexuan Cui }
1720*ca9c54d2SDexuan Cui 
1721*ca9c54d2SDexuan Cui int mana_detach(struct net_device *ndev, bool from_close)
1722*ca9c54d2SDexuan Cui {
1723*ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1724*ca9c54d2SDexuan Cui 	int err;
1725*ca9c54d2SDexuan Cui 
1726*ca9c54d2SDexuan Cui 	ASSERT_RTNL();
1727*ca9c54d2SDexuan Cui 
1728*ca9c54d2SDexuan Cui 	apc->port_st_save = apc->port_is_up;
1729*ca9c54d2SDexuan Cui 	apc->port_is_up = false;
1730*ca9c54d2SDexuan Cui 
1731*ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
1732*ca9c54d2SDexuan Cui 	smp_wmb();
1733*ca9c54d2SDexuan Cui 
1734*ca9c54d2SDexuan Cui 	netif_tx_disable(ndev);
1735*ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
1736*ca9c54d2SDexuan Cui 
1737*ca9c54d2SDexuan Cui 	if (apc->port_st_save) {
1738*ca9c54d2SDexuan Cui 		err = mana_dealloc_queues(ndev);
1739*ca9c54d2SDexuan Cui 		if (err)
1740*ca9c54d2SDexuan Cui 			return err;
1741*ca9c54d2SDexuan Cui 	}
1742*ca9c54d2SDexuan Cui 
1743*ca9c54d2SDexuan Cui 	if (!from_close) {
1744*ca9c54d2SDexuan Cui 		netif_device_detach(ndev);
1745*ca9c54d2SDexuan Cui 		mana_cleanup_port_context(apc);
1746*ca9c54d2SDexuan Cui 	}
1747*ca9c54d2SDexuan Cui 
1748*ca9c54d2SDexuan Cui 	return 0;
1749*ca9c54d2SDexuan Cui }
1750*ca9c54d2SDexuan Cui 
1751*ca9c54d2SDexuan Cui static int mana_probe_port(struct mana_context *ac, int port_idx,
1752*ca9c54d2SDexuan Cui 			   struct net_device **ndev_storage)
1753*ca9c54d2SDexuan Cui {
1754*ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1755*ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
1756*ca9c54d2SDexuan Cui 	struct net_device *ndev;
1757*ca9c54d2SDexuan Cui 	int err;
1758*ca9c54d2SDexuan Cui 
1759*ca9c54d2SDexuan Cui 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1760*ca9c54d2SDexuan Cui 				 gc->max_num_queues);
1761*ca9c54d2SDexuan Cui 	if (!ndev)
1762*ca9c54d2SDexuan Cui 		return -ENOMEM;
1763*ca9c54d2SDexuan Cui 
1764*ca9c54d2SDexuan Cui 	*ndev_storage = ndev;
1765*ca9c54d2SDexuan Cui 
1766*ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
1767*ca9c54d2SDexuan Cui 	apc->ac = ac;
1768*ca9c54d2SDexuan Cui 	apc->ndev = ndev;
1769*ca9c54d2SDexuan Cui 	apc->max_queues = gc->max_num_queues;
1770*ca9c54d2SDexuan Cui 	apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES);
1771*ca9c54d2SDexuan Cui 	apc->port_handle = INVALID_MANA_HANDLE;
1772*ca9c54d2SDexuan Cui 	apc->port_idx = port_idx;
1773*ca9c54d2SDexuan Cui 
1774*ca9c54d2SDexuan Cui 	ndev->netdev_ops = &mana_devops;
1775*ca9c54d2SDexuan Cui 	ndev->ethtool_ops = &mana_ethtool_ops;
1776*ca9c54d2SDexuan Cui 	ndev->mtu = ETH_DATA_LEN;
1777*ca9c54d2SDexuan Cui 	ndev->max_mtu = ndev->mtu;
1778*ca9c54d2SDexuan Cui 	ndev->min_mtu = ndev->mtu;
1779*ca9c54d2SDexuan Cui 	ndev->needed_headroom = MANA_HEADROOM;
1780*ca9c54d2SDexuan Cui 	SET_NETDEV_DEV(ndev, gc->dev);
1781*ca9c54d2SDexuan Cui 
1782*ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
1783*ca9c54d2SDexuan Cui 
1784*ca9c54d2SDexuan Cui 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1785*ca9c54d2SDexuan Cui 
1786*ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
1787*ca9c54d2SDexuan Cui 	if (err)
1788*ca9c54d2SDexuan Cui 		goto free_net;
1789*ca9c54d2SDexuan Cui 
1790*ca9c54d2SDexuan Cui 	netdev_lockdep_set_classes(ndev);
1791*ca9c54d2SDexuan Cui 
1792*ca9c54d2SDexuan Cui 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1793*ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXCSUM;
1794*ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1795*ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXHASH;
1796*ca9c54d2SDexuan Cui 	ndev->features = ndev->hw_features;
1797*ca9c54d2SDexuan Cui 	ndev->vlan_features = 0;
1798*ca9c54d2SDexuan Cui 
1799*ca9c54d2SDexuan Cui 	err = register_netdev(ndev);
1800*ca9c54d2SDexuan Cui 	if (err) {
1801*ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unable to register netdev.\n");
1802*ca9c54d2SDexuan Cui 		goto reset_apc;
1803*ca9c54d2SDexuan Cui 	}
1804*ca9c54d2SDexuan Cui 
1805*ca9c54d2SDexuan Cui 	return 0;
1806*ca9c54d2SDexuan Cui 
1807*ca9c54d2SDexuan Cui reset_apc:
1808*ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
1809*ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
1810*ca9c54d2SDexuan Cui free_net:
1811*ca9c54d2SDexuan Cui 	*ndev_storage = NULL;
1812*ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1813*ca9c54d2SDexuan Cui 	free_netdev(ndev);
1814*ca9c54d2SDexuan Cui 	return err;
1815*ca9c54d2SDexuan Cui }
1816*ca9c54d2SDexuan Cui 
1817*ca9c54d2SDexuan Cui int mana_probe(struct gdma_dev *gd)
1818*ca9c54d2SDexuan Cui {
1819*ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
1820*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1821*ca9c54d2SDexuan Cui 	struct mana_context *ac;
1822*ca9c54d2SDexuan Cui 	int err;
1823*ca9c54d2SDexuan Cui 	int i;
1824*ca9c54d2SDexuan Cui 
1825*ca9c54d2SDexuan Cui 	dev_info(dev,
1826*ca9c54d2SDexuan Cui 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1827*ca9c54d2SDexuan Cui 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1828*ca9c54d2SDexuan Cui 
1829*ca9c54d2SDexuan Cui 	err = mana_gd_register_device(gd);
1830*ca9c54d2SDexuan Cui 	if (err)
1831*ca9c54d2SDexuan Cui 		return err;
1832*ca9c54d2SDexuan Cui 
1833*ca9c54d2SDexuan Cui 	ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1834*ca9c54d2SDexuan Cui 	if (!ac)
1835*ca9c54d2SDexuan Cui 		return -ENOMEM;
1836*ca9c54d2SDexuan Cui 
1837*ca9c54d2SDexuan Cui 	ac->gdma_dev = gd;
1838*ca9c54d2SDexuan Cui 	ac->num_ports = 1;
1839*ca9c54d2SDexuan Cui 	gd->driver_data = ac;
1840*ca9c54d2SDexuan Cui 
1841*ca9c54d2SDexuan Cui 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1842*ca9c54d2SDexuan Cui 				    MANA_MICRO_VERSION, &ac->num_ports);
1843*ca9c54d2SDexuan Cui 	if (err)
1844*ca9c54d2SDexuan Cui 		goto out;
1845*ca9c54d2SDexuan Cui 
1846*ca9c54d2SDexuan Cui 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
1847*ca9c54d2SDexuan Cui 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
1848*ca9c54d2SDexuan Cui 
1849*ca9c54d2SDexuan Cui 	for (i = 0; i < ac->num_ports; i++) {
1850*ca9c54d2SDexuan Cui 		err = mana_probe_port(ac, i, &ac->ports[i]);
1851*ca9c54d2SDexuan Cui 		if (err)
1852*ca9c54d2SDexuan Cui 			break;
1853*ca9c54d2SDexuan Cui 	}
1854*ca9c54d2SDexuan Cui out:
1855*ca9c54d2SDexuan Cui 	if (err)
1856*ca9c54d2SDexuan Cui 		mana_remove(gd);
1857*ca9c54d2SDexuan Cui 
1858*ca9c54d2SDexuan Cui 	return err;
1859*ca9c54d2SDexuan Cui }
1860*ca9c54d2SDexuan Cui 
1861*ca9c54d2SDexuan Cui void mana_remove(struct gdma_dev *gd)
1862*ca9c54d2SDexuan Cui {
1863*ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
1864*ca9c54d2SDexuan Cui 	struct mana_context *ac = gd->driver_data;
1865*ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1866*ca9c54d2SDexuan Cui 	struct net_device *ndev;
1867*ca9c54d2SDexuan Cui 	int i;
1868*ca9c54d2SDexuan Cui 
1869*ca9c54d2SDexuan Cui 	for (i = 0; i < ac->num_ports; i++) {
1870*ca9c54d2SDexuan Cui 		ndev = ac->ports[i];
1871*ca9c54d2SDexuan Cui 		if (!ndev) {
1872*ca9c54d2SDexuan Cui 			if (i == 0)
1873*ca9c54d2SDexuan Cui 				dev_err(dev, "No net device to remove\n");
1874*ca9c54d2SDexuan Cui 			goto out;
1875*ca9c54d2SDexuan Cui 		}
1876*ca9c54d2SDexuan Cui 
1877*ca9c54d2SDexuan Cui 		/* All cleanup actions should stay after rtnl_lock(), otherwise
1878*ca9c54d2SDexuan Cui 		 * other functions may access partially cleaned up data.
1879*ca9c54d2SDexuan Cui 		 */
1880*ca9c54d2SDexuan Cui 		rtnl_lock();
1881*ca9c54d2SDexuan Cui 
1882*ca9c54d2SDexuan Cui 		mana_detach(ndev, false);
1883*ca9c54d2SDexuan Cui 
1884*ca9c54d2SDexuan Cui 		unregister_netdevice(ndev);
1885*ca9c54d2SDexuan Cui 
1886*ca9c54d2SDexuan Cui 		rtnl_unlock();
1887*ca9c54d2SDexuan Cui 
1888*ca9c54d2SDexuan Cui 		free_netdev(ndev);
1889*ca9c54d2SDexuan Cui 	}
1890*ca9c54d2SDexuan Cui out:
1891*ca9c54d2SDexuan Cui 	mana_gd_deregister_device(gd);
1892*ca9c54d2SDexuan Cui 	gd->driver_data = NULL;
1893*ca9c54d2SDexuan Cui 	gd->gdma_context = NULL;
1894*ca9c54d2SDexuan Cui 	kfree(ac);
1895*ca9c54d2SDexuan Cui }
1896