1ca9c54d2SDexuan Cui // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2ca9c54d2SDexuan Cui /* Copyright (c) 2021, Microsoft Corporation. */
3ca9c54d2SDexuan Cui 
43b80b73aSJakub Kicinski #include <uapi/linux/bpf.h>
53b80b73aSJakub Kicinski 
6ca9c54d2SDexuan Cui #include <linux/inetdevice.h>
7ca9c54d2SDexuan Cui #include <linux/etherdevice.h>
8ca9c54d2SDexuan Cui #include <linux/ethtool.h>
9ca9c54d2SDexuan Cui #include <linux/mm.h>
10ca9c54d2SDexuan Cui 
11ca9c54d2SDexuan Cui #include <net/checksum.h>
12ca9c54d2SDexuan Cui #include <net/ip6_checksum.h>
13ca9c54d2SDexuan Cui 
14ca9c54d2SDexuan Cui #include "mana.h"
15ca9c54d2SDexuan Cui 
16ca9c54d2SDexuan Cui /* Microsoft Azure Network Adapter (MANA) functions */
17ca9c54d2SDexuan Cui 
18ca9c54d2SDexuan Cui static int mana_open(struct net_device *ndev)
19ca9c54d2SDexuan Cui {
20ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
21ca9c54d2SDexuan Cui 	int err;
22ca9c54d2SDexuan Cui 
23ca9c54d2SDexuan Cui 	err = mana_alloc_queues(ndev);
24ca9c54d2SDexuan Cui 	if (err)
25ca9c54d2SDexuan Cui 		return err;
26ca9c54d2SDexuan Cui 
27ca9c54d2SDexuan Cui 	apc->port_is_up = true;
28ca9c54d2SDexuan Cui 
29ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
30ca9c54d2SDexuan Cui 	smp_wmb();
31ca9c54d2SDexuan Cui 
32ca9c54d2SDexuan Cui 	netif_carrier_on(ndev);
33ca9c54d2SDexuan Cui 	netif_tx_wake_all_queues(ndev);
34ca9c54d2SDexuan Cui 
35ca9c54d2SDexuan Cui 	return 0;
36ca9c54d2SDexuan Cui }
37ca9c54d2SDexuan Cui 
38ca9c54d2SDexuan Cui static int mana_close(struct net_device *ndev)
39ca9c54d2SDexuan Cui {
40ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
41ca9c54d2SDexuan Cui 
42ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
43ca9c54d2SDexuan Cui 		return 0;
44ca9c54d2SDexuan Cui 
45ca9c54d2SDexuan Cui 	return mana_detach(ndev, true);
46ca9c54d2SDexuan Cui }
47ca9c54d2SDexuan Cui 
48ca9c54d2SDexuan Cui static bool mana_can_tx(struct gdma_queue *wq)
49ca9c54d2SDexuan Cui {
50ca9c54d2SDexuan Cui 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
51ca9c54d2SDexuan Cui }
52ca9c54d2SDexuan Cui 
53ca9c54d2SDexuan Cui static unsigned int mana_checksum_info(struct sk_buff *skb)
54ca9c54d2SDexuan Cui {
55ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP)) {
56ca9c54d2SDexuan Cui 		struct iphdr *ip = ip_hdr(skb);
57ca9c54d2SDexuan Cui 
58ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_TCP)
59ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
60ca9c54d2SDexuan Cui 
61ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_UDP)
62ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
63ca9c54d2SDexuan Cui 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
64ca9c54d2SDexuan Cui 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
65ca9c54d2SDexuan Cui 
66ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_TCP)
67ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
68ca9c54d2SDexuan Cui 
69ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_UDP)
70ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
71ca9c54d2SDexuan Cui 	}
72ca9c54d2SDexuan Cui 
73ca9c54d2SDexuan Cui 	/* No csum offloading */
74ca9c54d2SDexuan Cui 	return 0;
75ca9c54d2SDexuan Cui }
76ca9c54d2SDexuan Cui 
77ca9c54d2SDexuan Cui static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
78ca9c54d2SDexuan Cui 			struct mana_tx_package *tp)
79ca9c54d2SDexuan Cui {
80ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
81ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
82ca9c54d2SDexuan Cui 	struct gdma_context *gc;
83ca9c54d2SDexuan Cui 	struct device *dev;
84ca9c54d2SDexuan Cui 	skb_frag_t *frag;
85ca9c54d2SDexuan Cui 	dma_addr_t da;
86ca9c54d2SDexuan Cui 	int i;
87ca9c54d2SDexuan Cui 
88ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
89ca9c54d2SDexuan Cui 	dev = gc->dev;
90ca9c54d2SDexuan Cui 	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
91ca9c54d2SDexuan Cui 
92ca9c54d2SDexuan Cui 	if (dma_mapping_error(dev, da))
93ca9c54d2SDexuan Cui 		return -ENOMEM;
94ca9c54d2SDexuan Cui 
95ca9c54d2SDexuan Cui 	ash->dma_handle[0] = da;
96ca9c54d2SDexuan Cui 	ash->size[0] = skb_headlen(skb);
97ca9c54d2SDexuan Cui 
98ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
99ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
100ca9c54d2SDexuan Cui 	tp->wqe_req.sgl[0].size = ash->size[0];
101ca9c54d2SDexuan Cui 
102ca9c54d2SDexuan Cui 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103ca9c54d2SDexuan Cui 		frag = &skb_shinfo(skb)->frags[i];
104ca9c54d2SDexuan Cui 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
105ca9c54d2SDexuan Cui 				      DMA_TO_DEVICE);
106ca9c54d2SDexuan Cui 
107ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da))
108ca9c54d2SDexuan Cui 			goto frag_err;
109ca9c54d2SDexuan Cui 
110ca9c54d2SDexuan Cui 		ash->dma_handle[i + 1] = da;
111ca9c54d2SDexuan Cui 		ash->size[i + 1] = skb_frag_size(frag);
112ca9c54d2SDexuan Cui 
113ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
114ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
115ca9c54d2SDexuan Cui 		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
116ca9c54d2SDexuan Cui 	}
117ca9c54d2SDexuan Cui 
118ca9c54d2SDexuan Cui 	return 0;
119ca9c54d2SDexuan Cui 
120ca9c54d2SDexuan Cui frag_err:
121ca9c54d2SDexuan Cui 	for (i = i - 1; i >= 0; i--)
122ca9c54d2SDexuan Cui 		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
123ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
124ca9c54d2SDexuan Cui 
125ca9c54d2SDexuan Cui 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
126ca9c54d2SDexuan Cui 
127ca9c54d2SDexuan Cui 	return -ENOMEM;
128ca9c54d2SDexuan Cui }
129ca9c54d2SDexuan Cui 
130ed5356b5SHaiyang Zhang int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
131ca9c54d2SDexuan Cui {
132ca9c54d2SDexuan Cui 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
133ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
134ca9c54d2SDexuan Cui 	u16 txq_idx = skb_get_queue_mapping(skb);
135ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
136ca9c54d2SDexuan Cui 	bool ipv4 = false, ipv6 = false;
137ca9c54d2SDexuan Cui 	struct mana_tx_package pkg = {};
138ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
139f90f8420SHaiyang Zhang 	struct mana_stats_tx *tx_stats;
140ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_sq;
141ca9c54d2SDexuan Cui 	unsigned int csum_type;
142ca9c54d2SDexuan Cui 	struct mana_txq *txq;
143ca9c54d2SDexuan Cui 	struct mana_cq *cq;
144ca9c54d2SDexuan Cui 	int err, len;
145ca9c54d2SDexuan Cui 
146ca9c54d2SDexuan Cui 	if (unlikely(!apc->port_is_up))
147ca9c54d2SDexuan Cui 		goto tx_drop;
148ca9c54d2SDexuan Cui 
149ca9c54d2SDexuan Cui 	if (skb_cow_head(skb, MANA_HEADROOM))
150ca9c54d2SDexuan Cui 		goto tx_drop_count;
151ca9c54d2SDexuan Cui 
152ca9c54d2SDexuan Cui 	txq = &apc->tx_qp[txq_idx].txq;
153ca9c54d2SDexuan Cui 	gdma_sq = txq->gdma_sq;
154ca9c54d2SDexuan Cui 	cq = &apc->tx_qp[txq_idx].tx_cq;
155ca9c54d2SDexuan Cui 
156ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
157ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
158ca9c54d2SDexuan Cui 
159ca9c54d2SDexuan Cui 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
160ca9c54d2SDexuan Cui 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
161ca9c54d2SDexuan Cui 		pkt_fmt = MANA_LONG_PKT_FMT;
162ca9c54d2SDexuan Cui 	} else {
163ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
164ca9c54d2SDexuan Cui 	}
165ca9c54d2SDexuan Cui 
166ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
167ca9c54d2SDexuan Cui 
168ca9c54d2SDexuan Cui 	if (pkt_fmt == MANA_SHORT_PKT_FMT)
169ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
170ca9c54d2SDexuan Cui 	else
171ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
172ca9c54d2SDexuan Cui 
173ca9c54d2SDexuan Cui 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
174ca9c54d2SDexuan Cui 	pkg.wqe_req.flags = 0;
175ca9c54d2SDexuan Cui 	pkg.wqe_req.client_data_unit = 0;
176ca9c54d2SDexuan Cui 
177ca9c54d2SDexuan Cui 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
178ca9c54d2SDexuan Cui 	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
179ca9c54d2SDexuan Cui 
180ca9c54d2SDexuan Cui 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
181ca9c54d2SDexuan Cui 		pkg.wqe_req.sgl = pkg.sgl_array;
182ca9c54d2SDexuan Cui 	} else {
183ca9c54d2SDexuan Cui 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
184ca9c54d2SDexuan Cui 					    sizeof(struct gdma_sge),
185ca9c54d2SDexuan Cui 					    GFP_ATOMIC);
186ca9c54d2SDexuan Cui 		if (!pkg.sgl_ptr)
187ca9c54d2SDexuan Cui 			goto tx_drop_count;
188ca9c54d2SDexuan Cui 
189ca9c54d2SDexuan Cui 		pkg.wqe_req.sgl = pkg.sgl_ptr;
190ca9c54d2SDexuan Cui 	}
191ca9c54d2SDexuan Cui 
192ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP))
193ca9c54d2SDexuan Cui 		ipv4 = true;
194ca9c54d2SDexuan Cui 	else if (skb->protocol == htons(ETH_P_IPV6))
195ca9c54d2SDexuan Cui 		ipv6 = true;
196ca9c54d2SDexuan Cui 
197ca9c54d2SDexuan Cui 	if (skb_is_gso(skb)) {
198ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
199ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
200ca9c54d2SDexuan Cui 
201ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
202ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
203ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
204ca9c54d2SDexuan Cui 
205ca9c54d2SDexuan Cui 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
206ca9c54d2SDexuan Cui 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
207ca9c54d2SDexuan Cui 		if (ipv4) {
208ca9c54d2SDexuan Cui 			ip_hdr(skb)->tot_len = 0;
209ca9c54d2SDexuan Cui 			ip_hdr(skb)->check = 0;
210ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
211ca9c54d2SDexuan Cui 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
212ca9c54d2SDexuan Cui 						   ip_hdr(skb)->daddr, 0,
213ca9c54d2SDexuan Cui 						   IPPROTO_TCP, 0);
214ca9c54d2SDexuan Cui 		} else {
215ca9c54d2SDexuan Cui 			ipv6_hdr(skb)->payload_len = 0;
216ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
217ca9c54d2SDexuan Cui 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
218ca9c54d2SDexuan Cui 						 &ipv6_hdr(skb)->daddr, 0,
219ca9c54d2SDexuan Cui 						 IPPROTO_TCP, 0);
220ca9c54d2SDexuan Cui 		}
221ca9c54d2SDexuan Cui 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
222ca9c54d2SDexuan Cui 		csum_type = mana_checksum_info(skb);
223ca9c54d2SDexuan Cui 
224ca9c54d2SDexuan Cui 		if (csum_type == IPPROTO_TCP) {
225ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
226ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
227ca9c54d2SDexuan Cui 
228ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
229ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
230ca9c54d2SDexuan Cui 
231ca9c54d2SDexuan Cui 		} else if (csum_type == IPPROTO_UDP) {
232ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
233ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
234ca9c54d2SDexuan Cui 
235ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
236ca9c54d2SDexuan Cui 		} else {
237ca9c54d2SDexuan Cui 			/* Can't do offload of this type of checksum */
238ca9c54d2SDexuan Cui 			if (skb_checksum_help(skb))
239ca9c54d2SDexuan Cui 				goto free_sgl_ptr;
240ca9c54d2SDexuan Cui 		}
241ca9c54d2SDexuan Cui 	}
242ca9c54d2SDexuan Cui 
243ca9c54d2SDexuan Cui 	if (mana_map_skb(skb, apc, &pkg))
244ca9c54d2SDexuan Cui 		goto free_sgl_ptr;
245ca9c54d2SDexuan Cui 
246ca9c54d2SDexuan Cui 	skb_queue_tail(&txq->pending_skbs, skb);
247ca9c54d2SDexuan Cui 
248ca9c54d2SDexuan Cui 	len = skb->len;
249ca9c54d2SDexuan Cui 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
250ca9c54d2SDexuan Cui 
251ca9c54d2SDexuan Cui 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
252ca9c54d2SDexuan Cui 					(struct gdma_posted_wqe_info *)skb->cb);
253ca9c54d2SDexuan Cui 	if (!mana_can_tx(gdma_sq)) {
254ca9c54d2SDexuan Cui 		netif_tx_stop_queue(net_txq);
255ca9c54d2SDexuan Cui 		apc->eth_stats.stop_queue++;
256ca9c54d2SDexuan Cui 	}
257ca9c54d2SDexuan Cui 
258ca9c54d2SDexuan Cui 	if (err) {
259ca9c54d2SDexuan Cui 		(void)skb_dequeue_tail(&txq->pending_skbs);
260ca9c54d2SDexuan Cui 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
261ca9c54d2SDexuan Cui 		err = NETDEV_TX_BUSY;
262ca9c54d2SDexuan Cui 		goto tx_busy;
263ca9c54d2SDexuan Cui 	}
264ca9c54d2SDexuan Cui 
265ca9c54d2SDexuan Cui 	err = NETDEV_TX_OK;
266ca9c54d2SDexuan Cui 	atomic_inc(&txq->pending_sends);
267ca9c54d2SDexuan Cui 
268ca9c54d2SDexuan Cui 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
269ca9c54d2SDexuan Cui 
270ca9c54d2SDexuan Cui 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
271ca9c54d2SDexuan Cui 	skb = NULL;
272ca9c54d2SDexuan Cui 
273ca9c54d2SDexuan Cui 	tx_stats = &txq->stats;
274ca9c54d2SDexuan Cui 	u64_stats_update_begin(&tx_stats->syncp);
275ca9c54d2SDexuan Cui 	tx_stats->packets++;
276ca9c54d2SDexuan Cui 	tx_stats->bytes += len;
277ca9c54d2SDexuan Cui 	u64_stats_update_end(&tx_stats->syncp);
278ca9c54d2SDexuan Cui 
279ca9c54d2SDexuan Cui tx_busy:
280ca9c54d2SDexuan Cui 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
281ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
282ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
283ca9c54d2SDexuan Cui 	}
284ca9c54d2SDexuan Cui 
285ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
286ca9c54d2SDexuan Cui 	return err;
287ca9c54d2SDexuan Cui 
288ca9c54d2SDexuan Cui free_sgl_ptr:
289ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
290ca9c54d2SDexuan Cui tx_drop_count:
291ca9c54d2SDexuan Cui 	ndev->stats.tx_dropped++;
292ca9c54d2SDexuan Cui tx_drop:
293ca9c54d2SDexuan Cui 	dev_kfree_skb_any(skb);
294ca9c54d2SDexuan Cui 	return NETDEV_TX_OK;
295ca9c54d2SDexuan Cui }
296ca9c54d2SDexuan Cui 
297ca9c54d2SDexuan Cui static void mana_get_stats64(struct net_device *ndev,
298ca9c54d2SDexuan Cui 			     struct rtnl_link_stats64 *st)
299ca9c54d2SDexuan Cui {
300ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
301ca9c54d2SDexuan Cui 	unsigned int num_queues = apc->num_queues;
302f90f8420SHaiyang Zhang 	struct mana_stats_rx *rx_stats;
303f90f8420SHaiyang Zhang 	struct mana_stats_tx *tx_stats;
304ca9c54d2SDexuan Cui 	unsigned int start;
305ca9c54d2SDexuan Cui 	u64 packets, bytes;
306ca9c54d2SDexuan Cui 	int q;
307ca9c54d2SDexuan Cui 
308ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
309ca9c54d2SDexuan Cui 		return;
310ca9c54d2SDexuan Cui 
311ca9c54d2SDexuan Cui 	netdev_stats_to_stats64(st, &ndev->stats);
312ca9c54d2SDexuan Cui 
313ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
314f90f8420SHaiyang Zhang 		rx_stats = &apc->rxqs[q]->stats;
315ca9c54d2SDexuan Cui 
316ca9c54d2SDexuan Cui 		do {
317f90f8420SHaiyang Zhang 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
318f90f8420SHaiyang Zhang 			packets = rx_stats->packets;
319f90f8420SHaiyang Zhang 			bytes = rx_stats->bytes;
320f90f8420SHaiyang Zhang 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
321ca9c54d2SDexuan Cui 
322ca9c54d2SDexuan Cui 		st->rx_packets += packets;
323ca9c54d2SDexuan Cui 		st->rx_bytes += bytes;
324ca9c54d2SDexuan Cui 	}
325ca9c54d2SDexuan Cui 
326ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
327f90f8420SHaiyang Zhang 		tx_stats = &apc->tx_qp[q].txq.stats;
328ca9c54d2SDexuan Cui 
329ca9c54d2SDexuan Cui 		do {
330f90f8420SHaiyang Zhang 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
331f90f8420SHaiyang Zhang 			packets = tx_stats->packets;
332f90f8420SHaiyang Zhang 			bytes = tx_stats->bytes;
333f90f8420SHaiyang Zhang 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
334ca9c54d2SDexuan Cui 
335ca9c54d2SDexuan Cui 		st->tx_packets += packets;
336ca9c54d2SDexuan Cui 		st->tx_bytes += bytes;
337ca9c54d2SDexuan Cui 	}
338ca9c54d2SDexuan Cui }
339ca9c54d2SDexuan Cui 
340ca9c54d2SDexuan Cui static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
341ca9c54d2SDexuan Cui 			     int old_q)
342ca9c54d2SDexuan Cui {
343ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
344ca9c54d2SDexuan Cui 	u32 hash = skb_get_hash(skb);
345ca9c54d2SDexuan Cui 	struct sock *sk = skb->sk;
346ca9c54d2SDexuan Cui 	int txq;
347ca9c54d2SDexuan Cui 
348ca9c54d2SDexuan Cui 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
349ca9c54d2SDexuan Cui 
350ca9c54d2SDexuan Cui 	if (txq != old_q && sk && sk_fullsock(sk) &&
351ca9c54d2SDexuan Cui 	    rcu_access_pointer(sk->sk_dst_cache))
352ca9c54d2SDexuan Cui 		sk_tx_queue_set(sk, txq);
353ca9c54d2SDexuan Cui 
354ca9c54d2SDexuan Cui 	return txq;
355ca9c54d2SDexuan Cui }
356ca9c54d2SDexuan Cui 
357ca9c54d2SDexuan Cui static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
358ca9c54d2SDexuan Cui 			     struct net_device *sb_dev)
359ca9c54d2SDexuan Cui {
360ca9c54d2SDexuan Cui 	int txq;
361ca9c54d2SDexuan Cui 
362ca9c54d2SDexuan Cui 	if (ndev->real_num_tx_queues == 1)
363ca9c54d2SDexuan Cui 		return 0;
364ca9c54d2SDexuan Cui 
365ca9c54d2SDexuan Cui 	txq = sk_tx_queue_get(skb->sk);
366ca9c54d2SDexuan Cui 
367ca9c54d2SDexuan Cui 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
368ca9c54d2SDexuan Cui 		if (skb_rx_queue_recorded(skb))
369ca9c54d2SDexuan Cui 			txq = skb_get_rx_queue(skb);
370ca9c54d2SDexuan Cui 		else
371ca9c54d2SDexuan Cui 			txq = mana_get_tx_queue(ndev, skb, txq);
372ca9c54d2SDexuan Cui 	}
373ca9c54d2SDexuan Cui 
374ca9c54d2SDexuan Cui 	return txq;
375ca9c54d2SDexuan Cui }
376ca9c54d2SDexuan Cui 
377ca9c54d2SDexuan Cui static const struct net_device_ops mana_devops = {
378ca9c54d2SDexuan Cui 	.ndo_open		= mana_open,
379ca9c54d2SDexuan Cui 	.ndo_stop		= mana_close,
380ca9c54d2SDexuan Cui 	.ndo_select_queue	= mana_select_queue,
381ca9c54d2SDexuan Cui 	.ndo_start_xmit		= mana_start_xmit,
382ca9c54d2SDexuan Cui 	.ndo_validate_addr	= eth_validate_addr,
383ca9c54d2SDexuan Cui 	.ndo_get_stats64	= mana_get_stats64,
384ed5356b5SHaiyang Zhang 	.ndo_bpf		= mana_bpf,
385ca9c54d2SDexuan Cui };
386ca9c54d2SDexuan Cui 
387ca9c54d2SDexuan Cui static void mana_cleanup_port_context(struct mana_port_context *apc)
388ca9c54d2SDexuan Cui {
389ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
390ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
391ca9c54d2SDexuan Cui }
392ca9c54d2SDexuan Cui 
393ca9c54d2SDexuan Cui static int mana_init_port_context(struct mana_port_context *apc)
394ca9c54d2SDexuan Cui {
395ca9c54d2SDexuan Cui 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
396ca9c54d2SDexuan Cui 			    GFP_KERNEL);
397ca9c54d2SDexuan Cui 
398ca9c54d2SDexuan Cui 	return !apc->rxqs ? -ENOMEM : 0;
399ca9c54d2SDexuan Cui }
400ca9c54d2SDexuan Cui 
401ca9c54d2SDexuan Cui static int mana_send_request(struct mana_context *ac, void *in_buf,
402ca9c54d2SDexuan Cui 			     u32 in_len, void *out_buf, u32 out_len)
403ca9c54d2SDexuan Cui {
404ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
405ca9c54d2SDexuan Cui 	struct gdma_resp_hdr *resp = out_buf;
406ca9c54d2SDexuan Cui 	struct gdma_req_hdr *req = in_buf;
407ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
408ca9c54d2SDexuan Cui 	static atomic_t activity_id;
409ca9c54d2SDexuan Cui 	int err;
410ca9c54d2SDexuan Cui 
411ca9c54d2SDexuan Cui 	req->dev_id = gc->mana.dev_id;
412ca9c54d2SDexuan Cui 	req->activity_id = atomic_inc_return(&activity_id);
413ca9c54d2SDexuan Cui 
414ca9c54d2SDexuan Cui 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
415ca9c54d2SDexuan Cui 				   out_buf);
416ca9c54d2SDexuan Cui 	if (err || resp->status) {
417ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
418ca9c54d2SDexuan Cui 			err, resp->status);
419ca9c54d2SDexuan Cui 		return err ? err : -EPROTO;
420ca9c54d2SDexuan Cui 	}
421ca9c54d2SDexuan Cui 
422ca9c54d2SDexuan Cui 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
423ca9c54d2SDexuan Cui 	    req->activity_id != resp->activity_id) {
424ca9c54d2SDexuan Cui 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
425ca9c54d2SDexuan Cui 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
426ca9c54d2SDexuan Cui 			req->activity_id, resp->activity_id);
427ca9c54d2SDexuan Cui 		return -EPROTO;
428ca9c54d2SDexuan Cui 	}
429ca9c54d2SDexuan Cui 
430ca9c54d2SDexuan Cui 	return 0;
431ca9c54d2SDexuan Cui }
432ca9c54d2SDexuan Cui 
433ca9c54d2SDexuan Cui static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
434ca9c54d2SDexuan Cui 				const enum mana_command_code expected_code,
435ca9c54d2SDexuan Cui 				const u32 min_size)
436ca9c54d2SDexuan Cui {
437ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_type != expected_code)
438ca9c54d2SDexuan Cui 		return -EPROTO;
439ca9c54d2SDexuan Cui 
440ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
441ca9c54d2SDexuan Cui 		return -EPROTO;
442ca9c54d2SDexuan Cui 
443ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_size < min_size)
444ca9c54d2SDexuan Cui 		return -EPROTO;
445ca9c54d2SDexuan Cui 
446ca9c54d2SDexuan Cui 	return 0;
447ca9c54d2SDexuan Cui }
448ca9c54d2SDexuan Cui 
449ca9c54d2SDexuan Cui static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
450ca9c54d2SDexuan Cui 				 u32 proto_minor_ver, u32 proto_micro_ver,
451ca9c54d2SDexuan Cui 				 u16 *max_num_vports)
452ca9c54d2SDexuan Cui {
453ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
454ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_resp resp = {};
455ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_req req = {};
456ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
457ca9c54d2SDexuan Cui 	int err = 0;
458ca9c54d2SDexuan Cui 
459ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
460ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
461ca9c54d2SDexuan Cui 	req.proto_major_ver = proto_major_ver;
462ca9c54d2SDexuan Cui 	req.proto_minor_ver = proto_minor_ver;
463ca9c54d2SDexuan Cui 	req.proto_micro_ver = proto_micro_ver;
464ca9c54d2SDexuan Cui 
465ca9c54d2SDexuan Cui 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
466ca9c54d2SDexuan Cui 	if (err) {
467ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to query config: %d", err);
468ca9c54d2SDexuan Cui 		return err;
469ca9c54d2SDexuan Cui 	}
470ca9c54d2SDexuan Cui 
471ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
472ca9c54d2SDexuan Cui 				   sizeof(resp));
473ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
474ca9c54d2SDexuan Cui 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
475ca9c54d2SDexuan Cui 			resp.hdr.status);
476ca9c54d2SDexuan Cui 		if (!err)
477ca9c54d2SDexuan Cui 			err = -EPROTO;
478ca9c54d2SDexuan Cui 		return err;
479ca9c54d2SDexuan Cui 	}
480ca9c54d2SDexuan Cui 
481ca9c54d2SDexuan Cui 	*max_num_vports = resp.max_num_vports;
482ca9c54d2SDexuan Cui 
483ca9c54d2SDexuan Cui 	return 0;
484ca9c54d2SDexuan Cui }
485ca9c54d2SDexuan Cui 
486ca9c54d2SDexuan Cui static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
487ca9c54d2SDexuan Cui 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
488ca9c54d2SDexuan Cui {
489ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_resp resp = {};
490ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_req req = {};
491ca9c54d2SDexuan Cui 	int err;
492ca9c54d2SDexuan Cui 
493ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
494ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
495ca9c54d2SDexuan Cui 
496ca9c54d2SDexuan Cui 	req.vport_index = vport_index;
497ca9c54d2SDexuan Cui 
498ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
499ca9c54d2SDexuan Cui 				sizeof(resp));
500ca9c54d2SDexuan Cui 	if (err)
501ca9c54d2SDexuan Cui 		return err;
502ca9c54d2SDexuan Cui 
503ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
504ca9c54d2SDexuan Cui 				   sizeof(resp));
505ca9c54d2SDexuan Cui 	if (err)
506ca9c54d2SDexuan Cui 		return err;
507ca9c54d2SDexuan Cui 
508ca9c54d2SDexuan Cui 	if (resp.hdr.status)
509ca9c54d2SDexuan Cui 		return -EPROTO;
510ca9c54d2SDexuan Cui 
511ca9c54d2SDexuan Cui 	*max_sq = resp.max_num_sq;
512ca9c54d2SDexuan Cui 	*max_rq = resp.max_num_rq;
513ca9c54d2SDexuan Cui 	*num_indir_entry = resp.num_indirection_ent;
514ca9c54d2SDexuan Cui 
515ca9c54d2SDexuan Cui 	apc->port_handle = resp.vport;
516ca9c54d2SDexuan Cui 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
517ca9c54d2SDexuan Cui 
518ca9c54d2SDexuan Cui 	return 0;
519ca9c54d2SDexuan Cui }
520ca9c54d2SDexuan Cui 
521ca9c54d2SDexuan Cui static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
522ca9c54d2SDexuan Cui 			  u32 doorbell_pg_id)
523ca9c54d2SDexuan Cui {
524ca9c54d2SDexuan Cui 	struct mana_config_vport_resp resp = {};
525ca9c54d2SDexuan Cui 	struct mana_config_vport_req req = {};
526ca9c54d2SDexuan Cui 	int err;
527ca9c54d2SDexuan Cui 
528ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
529ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
530ca9c54d2SDexuan Cui 	req.vport = apc->port_handle;
531ca9c54d2SDexuan Cui 	req.pdid = protection_dom_id;
532ca9c54d2SDexuan Cui 	req.doorbell_pageid = doorbell_pg_id;
533ca9c54d2SDexuan Cui 
534ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
535ca9c54d2SDexuan Cui 				sizeof(resp));
536ca9c54d2SDexuan Cui 	if (err) {
537ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
538ca9c54d2SDexuan Cui 		goto out;
539ca9c54d2SDexuan Cui 	}
540ca9c54d2SDexuan Cui 
541ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
542ca9c54d2SDexuan Cui 				   sizeof(resp));
543ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
544ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
545ca9c54d2SDexuan Cui 			   err, resp.hdr.status);
546ca9c54d2SDexuan Cui 		if (!err)
547ca9c54d2SDexuan Cui 			err = -EPROTO;
548ca9c54d2SDexuan Cui 
549ca9c54d2SDexuan Cui 		goto out;
550ca9c54d2SDexuan Cui 	}
551ca9c54d2SDexuan Cui 
552ca9c54d2SDexuan Cui 	apc->tx_shortform_allowed = resp.short_form_allowed;
553ca9c54d2SDexuan Cui 	apc->tx_vp_offset = resp.tx_vport_offset;
554ca9c54d2SDexuan Cui out:
555ca9c54d2SDexuan Cui 	return err;
556ca9c54d2SDexuan Cui }
557ca9c54d2SDexuan Cui 
558ca9c54d2SDexuan Cui static int mana_cfg_vport_steering(struct mana_port_context *apc,
559ca9c54d2SDexuan Cui 				   enum TRI_STATE rx,
560ca9c54d2SDexuan Cui 				   bool update_default_rxobj, bool update_key,
561ca9c54d2SDexuan Cui 				   bool update_tab)
562ca9c54d2SDexuan Cui {
563ca9c54d2SDexuan Cui 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
564ca9c54d2SDexuan Cui 	struct mana_cfg_rx_steer_req *req = NULL;
565ca9c54d2SDexuan Cui 	struct mana_cfg_rx_steer_resp resp = {};
566ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
567ca9c54d2SDexuan Cui 	mana_handle_t *req_indir_tab;
568ca9c54d2SDexuan Cui 	u32 req_buf_size;
569ca9c54d2SDexuan Cui 	int err;
570ca9c54d2SDexuan Cui 
571ca9c54d2SDexuan Cui 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
572ca9c54d2SDexuan Cui 	req = kzalloc(req_buf_size, GFP_KERNEL);
573ca9c54d2SDexuan Cui 	if (!req)
574ca9c54d2SDexuan Cui 		return -ENOMEM;
575ca9c54d2SDexuan Cui 
576ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
577ca9c54d2SDexuan Cui 			     sizeof(resp));
578ca9c54d2SDexuan Cui 
579ca9c54d2SDexuan Cui 	req->vport = apc->port_handle;
580ca9c54d2SDexuan Cui 	req->num_indir_entries = num_entries;
581ca9c54d2SDexuan Cui 	req->indir_tab_offset = sizeof(*req);
582ca9c54d2SDexuan Cui 	req->rx_enable = rx;
583ca9c54d2SDexuan Cui 	req->rss_enable = apc->rss_state;
584ca9c54d2SDexuan Cui 	req->update_default_rxobj = update_default_rxobj;
585ca9c54d2SDexuan Cui 	req->update_hashkey = update_key;
586ca9c54d2SDexuan Cui 	req->update_indir_tab = update_tab;
587ca9c54d2SDexuan Cui 	req->default_rxobj = apc->default_rxobj;
588ca9c54d2SDexuan Cui 
589ca9c54d2SDexuan Cui 	if (update_key)
590ca9c54d2SDexuan Cui 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
591ca9c54d2SDexuan Cui 
592ca9c54d2SDexuan Cui 	if (update_tab) {
593ca9c54d2SDexuan Cui 		req_indir_tab = (mana_handle_t *)(req + 1);
594ca9c54d2SDexuan Cui 		memcpy(req_indir_tab, apc->rxobj_table,
595ca9c54d2SDexuan Cui 		       req->num_indir_entries * sizeof(mana_handle_t));
596ca9c54d2SDexuan Cui 	}
597ca9c54d2SDexuan Cui 
598ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
599ca9c54d2SDexuan Cui 				sizeof(resp));
600ca9c54d2SDexuan Cui 	if (err) {
601ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
602ca9c54d2SDexuan Cui 		goto out;
603ca9c54d2SDexuan Cui 	}
604ca9c54d2SDexuan Cui 
605ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
606ca9c54d2SDexuan Cui 				   sizeof(resp));
607ca9c54d2SDexuan Cui 	if (err) {
608ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
609ca9c54d2SDexuan Cui 		goto out;
610ca9c54d2SDexuan Cui 	}
611ca9c54d2SDexuan Cui 
612ca9c54d2SDexuan Cui 	if (resp.hdr.status) {
613ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
614ca9c54d2SDexuan Cui 			   resp.hdr.status);
615ca9c54d2SDexuan Cui 		err = -EPROTO;
616ca9c54d2SDexuan Cui 	}
617ca9c54d2SDexuan Cui out:
618ca9c54d2SDexuan Cui 	kfree(req);
619ca9c54d2SDexuan Cui 	return err;
620ca9c54d2SDexuan Cui }
621ca9c54d2SDexuan Cui 
622ca9c54d2SDexuan Cui static int mana_create_wq_obj(struct mana_port_context *apc,
623ca9c54d2SDexuan Cui 			      mana_handle_t vport,
624ca9c54d2SDexuan Cui 			      u32 wq_type, struct mana_obj_spec *wq_spec,
625ca9c54d2SDexuan Cui 			      struct mana_obj_spec *cq_spec,
626ca9c54d2SDexuan Cui 			      mana_handle_t *wq_obj)
627ca9c54d2SDexuan Cui {
628ca9c54d2SDexuan Cui 	struct mana_create_wqobj_resp resp = {};
629ca9c54d2SDexuan Cui 	struct mana_create_wqobj_req req = {};
630ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
631ca9c54d2SDexuan Cui 	int err;
632ca9c54d2SDexuan Cui 
633ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
634ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
635ca9c54d2SDexuan Cui 	req.vport = vport;
636ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
637ca9c54d2SDexuan Cui 	req.wq_gdma_region = wq_spec->gdma_region;
638ca9c54d2SDexuan Cui 	req.cq_gdma_region = cq_spec->gdma_region;
639ca9c54d2SDexuan Cui 	req.wq_size = wq_spec->queue_size;
640ca9c54d2SDexuan Cui 	req.cq_size = cq_spec->queue_size;
641ca9c54d2SDexuan Cui 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
642ca9c54d2SDexuan Cui 	req.cq_parent_qid = cq_spec->attached_eq;
643ca9c54d2SDexuan Cui 
644ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
645ca9c54d2SDexuan Cui 				sizeof(resp));
646ca9c54d2SDexuan Cui 	if (err) {
647ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
648ca9c54d2SDexuan Cui 		goto out;
649ca9c54d2SDexuan Cui 	}
650ca9c54d2SDexuan Cui 
651ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
652ca9c54d2SDexuan Cui 				   sizeof(resp));
653ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
654ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
655ca9c54d2SDexuan Cui 			   resp.hdr.status);
656ca9c54d2SDexuan Cui 		if (!err)
657ca9c54d2SDexuan Cui 			err = -EPROTO;
658ca9c54d2SDexuan Cui 		goto out;
659ca9c54d2SDexuan Cui 	}
660ca9c54d2SDexuan Cui 
661ca9c54d2SDexuan Cui 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
662ca9c54d2SDexuan Cui 		netdev_err(ndev, "Got an invalid WQ object handle\n");
663ca9c54d2SDexuan Cui 		err = -EPROTO;
664ca9c54d2SDexuan Cui 		goto out;
665ca9c54d2SDexuan Cui 	}
666ca9c54d2SDexuan Cui 
667ca9c54d2SDexuan Cui 	*wq_obj = resp.wq_obj;
668ca9c54d2SDexuan Cui 	wq_spec->queue_index = resp.wq_id;
669ca9c54d2SDexuan Cui 	cq_spec->queue_index = resp.cq_id;
670ca9c54d2SDexuan Cui 
671ca9c54d2SDexuan Cui 	return 0;
672ca9c54d2SDexuan Cui out:
673ca9c54d2SDexuan Cui 	return err;
674ca9c54d2SDexuan Cui }
675ca9c54d2SDexuan Cui 
676ca9c54d2SDexuan Cui static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
677ca9c54d2SDexuan Cui 				mana_handle_t wq_obj)
678ca9c54d2SDexuan Cui {
679ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_resp resp = {};
680ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_req req = {};
681ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
682ca9c54d2SDexuan Cui 	int err;
683ca9c54d2SDexuan Cui 
684ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
685ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
686ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
687ca9c54d2SDexuan Cui 	req.wq_obj_handle = wq_obj;
688ca9c54d2SDexuan Cui 
689ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
690ca9c54d2SDexuan Cui 				sizeof(resp));
691ca9c54d2SDexuan Cui 	if (err) {
692ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
693ca9c54d2SDexuan Cui 		return;
694ca9c54d2SDexuan Cui 	}
695ca9c54d2SDexuan Cui 
696ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
697ca9c54d2SDexuan Cui 				   sizeof(resp));
698ca9c54d2SDexuan Cui 	if (err || resp.hdr.status)
699ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
700ca9c54d2SDexuan Cui 			   resp.hdr.status);
701ca9c54d2SDexuan Cui }
702ca9c54d2SDexuan Cui 
7031e2d0824SHaiyang Zhang static void mana_destroy_eq(struct mana_context *ac)
704ca9c54d2SDexuan Cui {
7051e2d0824SHaiyang Zhang 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
706ca9c54d2SDexuan Cui 	struct gdma_queue *eq;
707ca9c54d2SDexuan Cui 	int i;
708ca9c54d2SDexuan Cui 
7091e2d0824SHaiyang Zhang 	if (!ac->eqs)
710ca9c54d2SDexuan Cui 		return;
711ca9c54d2SDexuan Cui 
7121e2d0824SHaiyang Zhang 	for (i = 0; i < gc->max_num_queues; i++) {
7131e2d0824SHaiyang Zhang 		eq = ac->eqs[i].eq;
714ca9c54d2SDexuan Cui 		if (!eq)
715ca9c54d2SDexuan Cui 			continue;
716ca9c54d2SDexuan Cui 
717ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, eq);
718ca9c54d2SDexuan Cui 	}
719ca9c54d2SDexuan Cui 
7201e2d0824SHaiyang Zhang 	kfree(ac->eqs);
7211e2d0824SHaiyang Zhang 	ac->eqs = NULL;
722ca9c54d2SDexuan Cui }
723ca9c54d2SDexuan Cui 
7241e2d0824SHaiyang Zhang static int mana_create_eq(struct mana_context *ac)
725ca9c54d2SDexuan Cui {
7261e2d0824SHaiyang Zhang 	struct gdma_dev *gd = ac->gdma_dev;
7271e2d0824SHaiyang Zhang 	struct gdma_context *gc = gd->gdma_context;
728ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec = {};
729ca9c54d2SDexuan Cui 	int err;
730ca9c54d2SDexuan Cui 	int i;
731ca9c54d2SDexuan Cui 
7321e2d0824SHaiyang Zhang 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
733ca9c54d2SDexuan Cui 			  GFP_KERNEL);
7341e2d0824SHaiyang Zhang 	if (!ac->eqs)
735ca9c54d2SDexuan Cui 		return -ENOMEM;
736ca9c54d2SDexuan Cui 
737ca9c54d2SDexuan Cui 	spec.type = GDMA_EQ;
738ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
739ca9c54d2SDexuan Cui 	spec.queue_size = EQ_SIZE;
740ca9c54d2SDexuan Cui 	spec.eq.callback = NULL;
7411e2d0824SHaiyang Zhang 	spec.eq.context = ac->eqs;
742ca9c54d2SDexuan Cui 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
743ca9c54d2SDexuan Cui 
7441e2d0824SHaiyang Zhang 	for (i = 0; i < gc->max_num_queues; i++) {
7451e2d0824SHaiyang Zhang 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
746ca9c54d2SDexuan Cui 		if (err)
747ca9c54d2SDexuan Cui 			goto out;
748ca9c54d2SDexuan Cui 	}
749ca9c54d2SDexuan Cui 
750ca9c54d2SDexuan Cui 	return 0;
751ca9c54d2SDexuan Cui out:
7521e2d0824SHaiyang Zhang 	mana_destroy_eq(ac);
753ca9c54d2SDexuan Cui 	return err;
754ca9c54d2SDexuan Cui }
755ca9c54d2SDexuan Cui 
7566cc74443SDexuan Cui static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
7576cc74443SDexuan Cui {
7586cc74443SDexuan Cui 	struct mana_fence_rq_resp resp = {};
7596cc74443SDexuan Cui 	struct mana_fence_rq_req req = {};
7606cc74443SDexuan Cui 	int err;
7616cc74443SDexuan Cui 
7626cc74443SDexuan Cui 	init_completion(&rxq->fence_event);
7636cc74443SDexuan Cui 
7646cc74443SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
7656cc74443SDexuan Cui 			     sizeof(req), sizeof(resp));
7666cc74443SDexuan Cui 	req.wq_obj_handle =  rxq->rxobj;
7676cc74443SDexuan Cui 
7686cc74443SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
7696cc74443SDexuan Cui 				sizeof(resp));
7706cc74443SDexuan Cui 	if (err) {
7716cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
7726cc74443SDexuan Cui 			   rxq->rxq_idx, err);
7736cc74443SDexuan Cui 		return err;
7746cc74443SDexuan Cui 	}
7756cc74443SDexuan Cui 
7766cc74443SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
7776cc74443SDexuan Cui 	if (err || resp.hdr.status) {
7786cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
7796cc74443SDexuan Cui 			   rxq->rxq_idx, err, resp.hdr.status);
7806cc74443SDexuan Cui 		if (!err)
7816cc74443SDexuan Cui 			err = -EPROTO;
7826cc74443SDexuan Cui 
7836cc74443SDexuan Cui 		return err;
7846cc74443SDexuan Cui 	}
7856cc74443SDexuan Cui 
7866cc74443SDexuan Cui 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
7876cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
7886cc74443SDexuan Cui 			   rxq->rxq_idx);
7896cc74443SDexuan Cui 		return -ETIMEDOUT;
7906cc74443SDexuan Cui 	}
7916cc74443SDexuan Cui 
7926cc74443SDexuan Cui 	return 0;
7936cc74443SDexuan Cui }
7946cc74443SDexuan Cui 
7956cc74443SDexuan Cui static void mana_fence_rqs(struct mana_port_context *apc)
7966cc74443SDexuan Cui {
7976cc74443SDexuan Cui 	unsigned int rxq_idx;
7986cc74443SDexuan Cui 	struct mana_rxq *rxq;
7996cc74443SDexuan Cui 	int err;
8006cc74443SDexuan Cui 
8016cc74443SDexuan Cui 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
8026cc74443SDexuan Cui 		rxq = apc->rxqs[rxq_idx];
8036cc74443SDexuan Cui 		err = mana_fence_rq(apc, rxq);
8046cc74443SDexuan Cui 
8056cc74443SDexuan Cui 		/* In case of any error, use sleep instead. */
8066cc74443SDexuan Cui 		if (err)
8076cc74443SDexuan Cui 			msleep(100);
8086cc74443SDexuan Cui 	}
8096cc74443SDexuan Cui }
8106cc74443SDexuan Cui 
811ca9c54d2SDexuan Cui static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
812ca9c54d2SDexuan Cui {
813ca9c54d2SDexuan Cui 	u32 used_space_old;
814ca9c54d2SDexuan Cui 	u32 used_space_new;
815ca9c54d2SDexuan Cui 
816ca9c54d2SDexuan Cui 	used_space_old = wq->head - wq->tail;
817ca9c54d2SDexuan Cui 	used_space_new = wq->head - (wq->tail + num_units);
818ca9c54d2SDexuan Cui 
819ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(used_space_new > used_space_old))
820ca9c54d2SDexuan Cui 		return -ERANGE;
821ca9c54d2SDexuan Cui 
822ca9c54d2SDexuan Cui 	wq->tail += num_units;
823ca9c54d2SDexuan Cui 	return 0;
824ca9c54d2SDexuan Cui }
825ca9c54d2SDexuan Cui 
826ca9c54d2SDexuan Cui static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
827ca9c54d2SDexuan Cui {
828ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
829ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
830ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
831ca9c54d2SDexuan Cui 	int i;
832ca9c54d2SDexuan Cui 
833ca9c54d2SDexuan Cui 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
834ca9c54d2SDexuan Cui 
835ca9c54d2SDexuan Cui 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
836ca9c54d2SDexuan Cui 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
837ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
838ca9c54d2SDexuan Cui }
839ca9c54d2SDexuan Cui 
840ca9c54d2SDexuan Cui static void mana_poll_tx_cq(struct mana_cq *cq)
841ca9c54d2SDexuan Cui {
842ca9c54d2SDexuan Cui 	struct gdma_comp *completions = cq->gdma_comp_buf;
843ca9c54d2SDexuan Cui 	struct gdma_posted_wqe_info *wqe_info;
844ca9c54d2SDexuan Cui 	unsigned int pkt_transmitted = 0;
845ca9c54d2SDexuan Cui 	unsigned int wqe_unit_cnt = 0;
846ca9c54d2SDexuan Cui 	struct mana_txq *txq = cq->txq;
847ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
848ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
849ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_wq;
850ca9c54d2SDexuan Cui 	unsigned int avail_space;
851ca9c54d2SDexuan Cui 	struct net_device *ndev;
852ca9c54d2SDexuan Cui 	struct sk_buff *skb;
853ca9c54d2SDexuan Cui 	bool txq_stopped;
854ca9c54d2SDexuan Cui 	int comp_read;
855ca9c54d2SDexuan Cui 	int i;
856ca9c54d2SDexuan Cui 
857ca9c54d2SDexuan Cui 	ndev = txq->ndev;
858ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
859ca9c54d2SDexuan Cui 
860ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
861ca9c54d2SDexuan Cui 				    CQE_POLLING_BUFFER);
862ca9c54d2SDexuan Cui 
863e1b5683fSHaiyang Zhang 	if (comp_read < 1)
864e1b5683fSHaiyang Zhang 		return;
865e1b5683fSHaiyang Zhang 
866ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
867ca9c54d2SDexuan Cui 		struct mana_tx_comp_oob *cqe_oob;
868ca9c54d2SDexuan Cui 
869ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!completions[i].is_sq))
870ca9c54d2SDexuan Cui 			return;
871ca9c54d2SDexuan Cui 
872ca9c54d2SDexuan Cui 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
873ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
874ca9c54d2SDexuan Cui 				 MANA_CQE_COMPLETION))
875ca9c54d2SDexuan Cui 			return;
876ca9c54d2SDexuan Cui 
877ca9c54d2SDexuan Cui 		switch (cqe_oob->cqe_hdr.cqe_type) {
878ca9c54d2SDexuan Cui 		case CQE_TX_OKAY:
879ca9c54d2SDexuan Cui 			break;
880ca9c54d2SDexuan Cui 
881ca9c54d2SDexuan Cui 		case CQE_TX_SA_DROP:
882ca9c54d2SDexuan Cui 		case CQE_TX_MTU_DROP:
883ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_OOB:
884ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_ETH_TYPE:
885ca9c54d2SDexuan Cui 		case CQE_TX_HDR_PROCESSING_ERROR:
886ca9c54d2SDexuan Cui 		case CQE_TX_VF_DISABLED:
887ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
888ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_DISABLED:
889ca9c54d2SDexuan Cui 		case CQE_TX_VLAN_TAGGING_VIOLATION:
890ca9c54d2SDexuan Cui 			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
891ca9c54d2SDexuan Cui 				  cqe_oob->cqe_hdr.cqe_type);
892ca9c54d2SDexuan Cui 			break;
893ca9c54d2SDexuan Cui 
894ca9c54d2SDexuan Cui 		default:
895ca9c54d2SDexuan Cui 			/* If the CQE type is unexpected, log an error, assert,
896ca9c54d2SDexuan Cui 			 * and go through the error path.
897ca9c54d2SDexuan Cui 			 */
898ca9c54d2SDexuan Cui 			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
899ca9c54d2SDexuan Cui 				  cqe_oob->cqe_hdr.cqe_type);
900ca9c54d2SDexuan Cui 			return;
901ca9c54d2SDexuan Cui 		}
902ca9c54d2SDexuan Cui 
903ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
904ca9c54d2SDexuan Cui 			return;
905ca9c54d2SDexuan Cui 
906ca9c54d2SDexuan Cui 		skb = skb_dequeue(&txq->pending_skbs);
907ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!skb))
908ca9c54d2SDexuan Cui 			return;
909ca9c54d2SDexuan Cui 
910ca9c54d2SDexuan Cui 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
911ca9c54d2SDexuan Cui 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
912ca9c54d2SDexuan Cui 
913ca9c54d2SDexuan Cui 		mana_unmap_skb(skb, apc);
914ca9c54d2SDexuan Cui 
915e1b5683fSHaiyang Zhang 		napi_consume_skb(skb, cq->budget);
916ca9c54d2SDexuan Cui 
917ca9c54d2SDexuan Cui 		pkt_transmitted++;
918ca9c54d2SDexuan Cui 	}
919ca9c54d2SDexuan Cui 
920ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
921ca9c54d2SDexuan Cui 		return;
922ca9c54d2SDexuan Cui 
923ca9c54d2SDexuan Cui 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
924ca9c54d2SDexuan Cui 
925ca9c54d2SDexuan Cui 	gdma_wq = txq->gdma_sq;
926ca9c54d2SDexuan Cui 	avail_space = mana_gd_wq_avail_space(gdma_wq);
927ca9c54d2SDexuan Cui 
928ca9c54d2SDexuan Cui 	/* Ensure tail updated before checking q stop */
929ca9c54d2SDexuan Cui 	smp_mb();
930ca9c54d2SDexuan Cui 
931ca9c54d2SDexuan Cui 	net_txq = txq->net_txq;
932ca9c54d2SDexuan Cui 	txq_stopped = netif_tx_queue_stopped(net_txq);
933ca9c54d2SDexuan Cui 
934ca9c54d2SDexuan Cui 	/* Ensure checking txq_stopped before apc->port_is_up. */
935ca9c54d2SDexuan Cui 	smp_rmb();
936ca9c54d2SDexuan Cui 
937ca9c54d2SDexuan Cui 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
938ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
939ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
940ca9c54d2SDexuan Cui 	}
941ca9c54d2SDexuan Cui 
942ca9c54d2SDexuan Cui 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
943ca9c54d2SDexuan Cui 		WARN_ON_ONCE(1);
944e1b5683fSHaiyang Zhang 
945e1b5683fSHaiyang Zhang 	cq->work_done = pkt_transmitted;
946ca9c54d2SDexuan Cui }
947ca9c54d2SDexuan Cui 
948ca9c54d2SDexuan Cui static void mana_post_pkt_rxq(struct mana_rxq *rxq)
949ca9c54d2SDexuan Cui {
950ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *recv_buf_oob;
951ca9c54d2SDexuan Cui 	u32 curr_index;
952ca9c54d2SDexuan Cui 	int err;
953ca9c54d2SDexuan Cui 
954ca9c54d2SDexuan Cui 	curr_index = rxq->buf_index++;
955ca9c54d2SDexuan Cui 	if (rxq->buf_index == rxq->num_rx_buf)
956ca9c54d2SDexuan Cui 		rxq->buf_index = 0;
957ca9c54d2SDexuan Cui 
958ca9c54d2SDexuan Cui 	recv_buf_oob = &rxq->rx_oobs[curr_index];
959ca9c54d2SDexuan Cui 
960ca9c54d2SDexuan Cui 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
961ca9c54d2SDexuan Cui 				    &recv_buf_oob->wqe_inf);
962ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(err))
963ca9c54d2SDexuan Cui 		return;
964ca9c54d2SDexuan Cui 
965ca9c54d2SDexuan Cui 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
966ca9c54d2SDexuan Cui }
967ca9c54d2SDexuan Cui 
968ed5356b5SHaiyang Zhang static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
969ed5356b5SHaiyang Zhang 				      struct xdp_buff *xdp)
970ed5356b5SHaiyang Zhang {
971ed5356b5SHaiyang Zhang 	struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
972ed5356b5SHaiyang Zhang 
973ed5356b5SHaiyang Zhang 	if (!skb)
974ed5356b5SHaiyang Zhang 		return NULL;
975ed5356b5SHaiyang Zhang 
976ed5356b5SHaiyang Zhang 	if (xdp->data_hard_start) {
977ed5356b5SHaiyang Zhang 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
978ed5356b5SHaiyang Zhang 		skb_put(skb, xdp->data_end - xdp->data);
979ed5356b5SHaiyang Zhang 	} else {
980ed5356b5SHaiyang Zhang 		skb_reserve(skb, XDP_PACKET_HEADROOM);
981ed5356b5SHaiyang Zhang 		skb_put(skb, pkt_len);
982ed5356b5SHaiyang Zhang 	}
983ed5356b5SHaiyang Zhang 
984ed5356b5SHaiyang Zhang 	return skb;
985ed5356b5SHaiyang Zhang }
986ed5356b5SHaiyang Zhang 
987ca9c54d2SDexuan Cui static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
988ca9c54d2SDexuan Cui 			struct mana_rxq *rxq)
989ca9c54d2SDexuan Cui {
990f90f8420SHaiyang Zhang 	struct mana_stats_rx *rx_stats = &rxq->stats;
991ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
992ca9c54d2SDexuan Cui 	uint pkt_len = cqe->ppi[0].pkt_len;
993ca9c54d2SDexuan Cui 	u16 rxq_idx = rxq->rxq_idx;
994ca9c54d2SDexuan Cui 	struct napi_struct *napi;
995ed5356b5SHaiyang Zhang 	struct xdp_buff xdp = {};
996ca9c54d2SDexuan Cui 	struct sk_buff *skb;
997ca9c54d2SDexuan Cui 	u32 hash_value;
998ed5356b5SHaiyang Zhang 	u32 act;
999ca9c54d2SDexuan Cui 
1000e1b5683fSHaiyang Zhang 	rxq->rx_cq.work_done++;
1001e1b5683fSHaiyang Zhang 	napi = &rxq->rx_cq.napi;
1002ca9c54d2SDexuan Cui 
1003ca9c54d2SDexuan Cui 	if (!buf_va) {
1004ca9c54d2SDexuan Cui 		++ndev->stats.rx_dropped;
1005ca9c54d2SDexuan Cui 		return;
1006ca9c54d2SDexuan Cui 	}
1007ca9c54d2SDexuan Cui 
1008ed5356b5SHaiyang Zhang 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1009ca9c54d2SDexuan Cui 
1010ed5356b5SHaiyang Zhang 	if (act != XDP_PASS && act != XDP_TX)
1011f90f8420SHaiyang Zhang 		goto drop_xdp;
1012ca9c54d2SDexuan Cui 
1013ed5356b5SHaiyang Zhang 	skb = mana_build_skb(buf_va, pkt_len, &xdp);
1014ed5356b5SHaiyang Zhang 
1015ed5356b5SHaiyang Zhang 	if (!skb)
1016ed5356b5SHaiyang Zhang 		goto drop;
1017ed5356b5SHaiyang Zhang 
1018ca9c54d2SDexuan Cui 	skb->dev = napi->dev;
1019ca9c54d2SDexuan Cui 
1020ca9c54d2SDexuan Cui 	skb->protocol = eth_type_trans(skb, ndev);
1021ca9c54d2SDexuan Cui 	skb_checksum_none_assert(skb);
1022ca9c54d2SDexuan Cui 	skb_record_rx_queue(skb, rxq_idx);
1023ca9c54d2SDexuan Cui 
1024ca9c54d2SDexuan Cui 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1025ca9c54d2SDexuan Cui 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1026ca9c54d2SDexuan Cui 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1027ca9c54d2SDexuan Cui 	}
1028ca9c54d2SDexuan Cui 
1029ca9c54d2SDexuan Cui 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1030ca9c54d2SDexuan Cui 		hash_value = cqe->ppi[0].pkt_hash;
1031ca9c54d2SDexuan Cui 
1032ca9c54d2SDexuan Cui 		if (cqe->rx_hashtype & MANA_HASH_L4)
1033ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1034ca9c54d2SDexuan Cui 		else
1035ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1036ca9c54d2SDexuan Cui 	}
1037ca9c54d2SDexuan Cui 
1038d356abb9SHaiyang Zhang 	u64_stats_update_begin(&rx_stats->syncp);
1039d356abb9SHaiyang Zhang 	rx_stats->packets++;
1040d356abb9SHaiyang Zhang 	rx_stats->bytes += pkt_len;
1041d356abb9SHaiyang Zhang 
1042d356abb9SHaiyang Zhang 	if (act == XDP_TX)
1043d356abb9SHaiyang Zhang 		rx_stats->xdp_tx++;
1044d356abb9SHaiyang Zhang 	u64_stats_update_end(&rx_stats->syncp);
1045d356abb9SHaiyang Zhang 
1046ed5356b5SHaiyang Zhang 	if (act == XDP_TX) {
1047ed5356b5SHaiyang Zhang 		skb_set_queue_mapping(skb, rxq_idx);
1048ed5356b5SHaiyang Zhang 		mana_xdp_tx(skb, ndev);
1049ed5356b5SHaiyang Zhang 		return;
1050ed5356b5SHaiyang Zhang 	}
1051ed5356b5SHaiyang Zhang 
1052ca9c54d2SDexuan Cui 	napi_gro_receive(napi, skb);
1053ca9c54d2SDexuan Cui 
1054ed5356b5SHaiyang Zhang 	return;
1055ed5356b5SHaiyang Zhang 
1056f90f8420SHaiyang Zhang drop_xdp:
1057f90f8420SHaiyang Zhang 	u64_stats_update_begin(&rx_stats->syncp);
1058f90f8420SHaiyang Zhang 	rx_stats->xdp_drop++;
1059f90f8420SHaiyang Zhang 	u64_stats_update_end(&rx_stats->syncp);
1060f90f8420SHaiyang Zhang 
1061ed5356b5SHaiyang Zhang drop:
1062a6bf5703SHaiyang Zhang 	WARN_ON_ONCE(rxq->xdp_save_page);
1063a6bf5703SHaiyang Zhang 	rxq->xdp_save_page = virt_to_page(buf_va);
1064a6bf5703SHaiyang Zhang 
1065ed5356b5SHaiyang Zhang 	++ndev->stats.rx_dropped;
1066f90f8420SHaiyang Zhang 
1067ed5356b5SHaiyang Zhang 	return;
1068ca9c54d2SDexuan Cui }
1069ca9c54d2SDexuan Cui 
1070ca9c54d2SDexuan Cui static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1071ca9c54d2SDexuan Cui 				struct gdma_comp *cqe)
1072ca9c54d2SDexuan Cui {
1073ca9c54d2SDexuan Cui 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1074ca9c54d2SDexuan Cui 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1075ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
1076ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rxbuf_oob;
1077ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1078ca9c54d2SDexuan Cui 	void *new_buf, *old_buf;
1079ca9c54d2SDexuan Cui 	struct page *new_page;
1080ca9c54d2SDexuan Cui 	u32 curr, pktlen;
1081ca9c54d2SDexuan Cui 	dma_addr_t da;
1082ca9c54d2SDexuan Cui 
1083ca9c54d2SDexuan Cui 	switch (oob->cqe_hdr.cqe_type) {
1084ca9c54d2SDexuan Cui 	case CQE_RX_OKAY:
1085ca9c54d2SDexuan Cui 		break;
1086ca9c54d2SDexuan Cui 
1087ca9c54d2SDexuan Cui 	case CQE_RX_TRUNCATED:
1088*e4b76219SHaiyang Zhang 		++ndev->stats.rx_dropped;
1089*e4b76219SHaiyang Zhang 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1090*e4b76219SHaiyang Zhang 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1091*e4b76219SHaiyang Zhang 		goto drop;
1092ca9c54d2SDexuan Cui 
1093ca9c54d2SDexuan Cui 	case CQE_RX_COALESCED_4:
1094ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX coalescing is unsupported\n");
1095ca9c54d2SDexuan Cui 		return;
1096ca9c54d2SDexuan Cui 
1097ca9c54d2SDexuan Cui 	case CQE_RX_OBJECT_FENCE:
10986cc74443SDexuan Cui 		complete(&rxq->fence_event);
1099ca9c54d2SDexuan Cui 		return;
1100ca9c54d2SDexuan Cui 
1101ca9c54d2SDexuan Cui 	default:
1102ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1103ca9c54d2SDexuan Cui 			   oob->cqe_hdr.cqe_type);
1104ca9c54d2SDexuan Cui 		return;
1105ca9c54d2SDexuan Cui 	}
1106ca9c54d2SDexuan Cui 
1107ca9c54d2SDexuan Cui 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1108ca9c54d2SDexuan Cui 		return;
1109ca9c54d2SDexuan Cui 
1110ca9c54d2SDexuan Cui 	pktlen = oob->ppi[0].pkt_len;
1111ca9c54d2SDexuan Cui 
1112ca9c54d2SDexuan Cui 	if (pktlen == 0) {
1113ca9c54d2SDexuan Cui 		/* data packets should never have packetlength of zero */
1114ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1115ca9c54d2SDexuan Cui 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1116ca9c54d2SDexuan Cui 		return;
1117ca9c54d2SDexuan Cui 	}
1118ca9c54d2SDexuan Cui 
1119ca9c54d2SDexuan Cui 	curr = rxq->buf_index;
1120ca9c54d2SDexuan Cui 	rxbuf_oob = &rxq->rx_oobs[curr];
1121ca9c54d2SDexuan Cui 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1122ca9c54d2SDexuan Cui 
1123a6bf5703SHaiyang Zhang 	/* Reuse XDP dropped page if available */
1124a6bf5703SHaiyang Zhang 	if (rxq->xdp_save_page) {
1125a6bf5703SHaiyang Zhang 		new_page = rxq->xdp_save_page;
1126a6bf5703SHaiyang Zhang 		rxq->xdp_save_page = NULL;
1127a6bf5703SHaiyang Zhang 	} else {
1128ca9c54d2SDexuan Cui 		new_page = alloc_page(GFP_ATOMIC);
1129a6bf5703SHaiyang Zhang 	}
1130ca9c54d2SDexuan Cui 
1131ca9c54d2SDexuan Cui 	if (new_page) {
1132ed5356b5SHaiyang Zhang 		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1133ca9c54d2SDexuan Cui 				  DMA_FROM_DEVICE);
1134ca9c54d2SDexuan Cui 
1135ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da)) {
1136ca9c54d2SDexuan Cui 			__free_page(new_page);
1137ca9c54d2SDexuan Cui 			new_page = NULL;
1138ca9c54d2SDexuan Cui 		}
1139ca9c54d2SDexuan Cui 	}
1140ca9c54d2SDexuan Cui 
1141ca9c54d2SDexuan Cui 	new_buf = new_page ? page_to_virt(new_page) : NULL;
1142ca9c54d2SDexuan Cui 
1143ca9c54d2SDexuan Cui 	if (new_buf) {
1144ca9c54d2SDexuan Cui 		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1145ca9c54d2SDexuan Cui 			       DMA_FROM_DEVICE);
1146ca9c54d2SDexuan Cui 
1147ca9c54d2SDexuan Cui 		old_buf = rxbuf_oob->buf_va;
1148ca9c54d2SDexuan Cui 
1149ca9c54d2SDexuan Cui 		/* refresh the rxbuf_oob with the new page */
1150ca9c54d2SDexuan Cui 		rxbuf_oob->buf_va = new_buf;
1151ca9c54d2SDexuan Cui 		rxbuf_oob->buf_dma_addr = da;
1152ca9c54d2SDexuan Cui 		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1153ca9c54d2SDexuan Cui 	} else {
1154ca9c54d2SDexuan Cui 		old_buf = NULL; /* drop the packet if no memory */
1155ca9c54d2SDexuan Cui 	}
1156ca9c54d2SDexuan Cui 
1157ca9c54d2SDexuan Cui 	mana_rx_skb(old_buf, oob, rxq);
1158ca9c54d2SDexuan Cui 
1159*e4b76219SHaiyang Zhang drop:
1160ca9c54d2SDexuan Cui 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1161ca9c54d2SDexuan Cui 
1162ca9c54d2SDexuan Cui 	mana_post_pkt_rxq(rxq);
1163ca9c54d2SDexuan Cui }
1164ca9c54d2SDexuan Cui 
1165ca9c54d2SDexuan Cui static void mana_poll_rx_cq(struct mana_cq *cq)
1166ca9c54d2SDexuan Cui {
1167ca9c54d2SDexuan Cui 	struct gdma_comp *comp = cq->gdma_comp_buf;
1168d90a9468SDexuan Cui 	int comp_read, i;
1169ca9c54d2SDexuan Cui 
1170ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1171ca9c54d2SDexuan Cui 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1172ca9c54d2SDexuan Cui 
1173ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
1174ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].is_sq))
1175ca9c54d2SDexuan Cui 			return;
1176ca9c54d2SDexuan Cui 
1177ca9c54d2SDexuan Cui 		/* verify recv cqe references the right rxq */
1178ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1179ca9c54d2SDexuan Cui 			return;
1180ca9c54d2SDexuan Cui 
1181ca9c54d2SDexuan Cui 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1182ca9c54d2SDexuan Cui 	}
1183ca9c54d2SDexuan Cui }
1184ca9c54d2SDexuan Cui 
1185ca9c54d2SDexuan Cui static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1186ca9c54d2SDexuan Cui {
1187ca9c54d2SDexuan Cui 	struct mana_cq *cq = context;
1188e1b5683fSHaiyang Zhang 	u8 arm_bit;
1189ca9c54d2SDexuan Cui 
1190ca9c54d2SDexuan Cui 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1191ca9c54d2SDexuan Cui 
1192ca9c54d2SDexuan Cui 	if (cq->type == MANA_CQ_TYPE_RX)
1193ca9c54d2SDexuan Cui 		mana_poll_rx_cq(cq);
1194ca9c54d2SDexuan Cui 	else
1195ca9c54d2SDexuan Cui 		mana_poll_tx_cq(cq);
1196ca9c54d2SDexuan Cui 
1197e1b5683fSHaiyang Zhang 	if (cq->work_done < cq->budget &&
1198e1b5683fSHaiyang Zhang 	    napi_complete_done(&cq->napi, cq->work_done)) {
1199e1b5683fSHaiyang Zhang 		arm_bit = SET_ARM_BIT;
1200e1b5683fSHaiyang Zhang 	} else {
1201e1b5683fSHaiyang Zhang 		arm_bit = 0;
1202e1b5683fSHaiyang Zhang 	}
1203e1b5683fSHaiyang Zhang 
1204e1b5683fSHaiyang Zhang 	mana_gd_ring_cq(gdma_queue, arm_bit);
1205e1b5683fSHaiyang Zhang }
1206e1b5683fSHaiyang Zhang 
1207e1b5683fSHaiyang Zhang static int mana_poll(struct napi_struct *napi, int budget)
1208e1b5683fSHaiyang Zhang {
1209e1b5683fSHaiyang Zhang 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1210e1b5683fSHaiyang Zhang 
1211e1b5683fSHaiyang Zhang 	cq->work_done = 0;
1212e1b5683fSHaiyang Zhang 	cq->budget = budget;
1213e1b5683fSHaiyang Zhang 
1214e1b5683fSHaiyang Zhang 	mana_cq_handler(cq, cq->gdma_cq);
1215e1b5683fSHaiyang Zhang 
1216e1b5683fSHaiyang Zhang 	return min(cq->work_done, budget);
1217e1b5683fSHaiyang Zhang }
1218e1b5683fSHaiyang Zhang 
1219e1b5683fSHaiyang Zhang static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1220e1b5683fSHaiyang Zhang {
1221e1b5683fSHaiyang Zhang 	struct mana_cq *cq = context;
1222e1b5683fSHaiyang Zhang 
1223e1b5683fSHaiyang Zhang 	napi_schedule_irqoff(&cq->napi);
1224ca9c54d2SDexuan Cui }
1225ca9c54d2SDexuan Cui 
1226ca9c54d2SDexuan Cui static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1227ca9c54d2SDexuan Cui {
1228ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1229ca9c54d2SDexuan Cui 
1230ca9c54d2SDexuan Cui 	if (!cq->gdma_cq)
1231ca9c54d2SDexuan Cui 		return;
1232ca9c54d2SDexuan Cui 
1233ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1234ca9c54d2SDexuan Cui }
1235ca9c54d2SDexuan Cui 
1236ca9c54d2SDexuan Cui static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1237ca9c54d2SDexuan Cui {
1238ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1239ca9c54d2SDexuan Cui 
1240ca9c54d2SDexuan Cui 	if (!txq->gdma_sq)
1241ca9c54d2SDexuan Cui 		return;
1242ca9c54d2SDexuan Cui 
1243ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1244ca9c54d2SDexuan Cui }
1245ca9c54d2SDexuan Cui 
1246ca9c54d2SDexuan Cui static void mana_destroy_txq(struct mana_port_context *apc)
1247ca9c54d2SDexuan Cui {
1248e1b5683fSHaiyang Zhang 	struct napi_struct *napi;
1249ca9c54d2SDexuan Cui 	int i;
1250ca9c54d2SDexuan Cui 
1251ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1252ca9c54d2SDexuan Cui 		return;
1253ca9c54d2SDexuan Cui 
1254ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1255e1b5683fSHaiyang Zhang 		napi = &apc->tx_qp[i].tx_cq.napi;
1256e1b5683fSHaiyang Zhang 		napi_synchronize(napi);
1257e1b5683fSHaiyang Zhang 		napi_disable(napi);
1258e1b5683fSHaiyang Zhang 		netif_napi_del(napi);
1259e1b5683fSHaiyang Zhang 
1260ca9c54d2SDexuan Cui 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1261ca9c54d2SDexuan Cui 
1262ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1263ca9c54d2SDexuan Cui 
1264ca9c54d2SDexuan Cui 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1265ca9c54d2SDexuan Cui 	}
1266ca9c54d2SDexuan Cui 
1267ca9c54d2SDexuan Cui 	kfree(apc->tx_qp);
1268ca9c54d2SDexuan Cui 	apc->tx_qp = NULL;
1269ca9c54d2SDexuan Cui }
1270ca9c54d2SDexuan Cui 
1271ca9c54d2SDexuan Cui static int mana_create_txq(struct mana_port_context *apc,
1272ca9c54d2SDexuan Cui 			   struct net_device *net)
1273ca9c54d2SDexuan Cui {
12741e2d0824SHaiyang Zhang 	struct mana_context *ac = apc->ac;
12751e2d0824SHaiyang Zhang 	struct gdma_dev *gd = ac->gdma_dev;
1276ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
1277ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
1278ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
1279ca9c54d2SDexuan Cui 	struct gdma_context *gc;
1280ca9c54d2SDexuan Cui 	struct mana_txq *txq;
1281ca9c54d2SDexuan Cui 	struct mana_cq *cq;
1282ca9c54d2SDexuan Cui 	u32 txq_size;
1283ca9c54d2SDexuan Cui 	u32 cq_size;
1284ca9c54d2SDexuan Cui 	int err;
1285ca9c54d2SDexuan Cui 	int i;
1286ca9c54d2SDexuan Cui 
1287ca9c54d2SDexuan Cui 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1288ca9c54d2SDexuan Cui 			     GFP_KERNEL);
1289ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1290ca9c54d2SDexuan Cui 		return -ENOMEM;
1291ca9c54d2SDexuan Cui 
1292ca9c54d2SDexuan Cui 	/*  The minimum size of the WQE is 32 bytes, hence
1293ca9c54d2SDexuan Cui 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1294ca9c54d2SDexuan Cui 	 *  the SQ can store. This value is then used to size other queues
1295ca9c54d2SDexuan Cui 	 *  to prevent overflow.
1296ca9c54d2SDexuan Cui 	 */
1297ca9c54d2SDexuan Cui 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1298ca9c54d2SDexuan Cui 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1299ca9c54d2SDexuan Cui 
1300ca9c54d2SDexuan Cui 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1301ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
1302ca9c54d2SDexuan Cui 
1303ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
1304ca9c54d2SDexuan Cui 
1305ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1306ca9c54d2SDexuan Cui 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1307ca9c54d2SDexuan Cui 
1308ca9c54d2SDexuan Cui 		/* Create SQ */
1309ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
1310ca9c54d2SDexuan Cui 
1311ca9c54d2SDexuan Cui 		u64_stats_init(&txq->stats.syncp);
1312ca9c54d2SDexuan Cui 		txq->ndev = net;
1313ca9c54d2SDexuan Cui 		txq->net_txq = netdev_get_tx_queue(net, i);
1314ca9c54d2SDexuan Cui 		txq->vp_offset = apc->tx_vp_offset;
1315ca9c54d2SDexuan Cui 		skb_queue_head_init(&txq->pending_skbs);
1316ca9c54d2SDexuan Cui 
1317ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1318ca9c54d2SDexuan Cui 		spec.type = GDMA_SQ;
1319ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = true;
1320ca9c54d2SDexuan Cui 		spec.queue_size = txq_size;
1321ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1322ca9c54d2SDexuan Cui 		if (err)
1323ca9c54d2SDexuan Cui 			goto out;
1324ca9c54d2SDexuan Cui 
1325ca9c54d2SDexuan Cui 		/* Create SQ's CQ */
1326ca9c54d2SDexuan Cui 		cq = &apc->tx_qp[i].tx_cq;
1327ca9c54d2SDexuan Cui 		cq->type = MANA_CQ_TYPE_TX;
1328ca9c54d2SDexuan Cui 
1329ca9c54d2SDexuan Cui 		cq->txq = txq;
1330ca9c54d2SDexuan Cui 
1331ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1332ca9c54d2SDexuan Cui 		spec.type = GDMA_CQ;
1333ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = false;
1334ca9c54d2SDexuan Cui 		spec.queue_size = cq_size;
1335e1b5683fSHaiyang Zhang 		spec.cq.callback = mana_schedule_napi;
13361e2d0824SHaiyang Zhang 		spec.cq.parent_eq = ac->eqs[i].eq;
1337ca9c54d2SDexuan Cui 		spec.cq.context = cq;
1338ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1339ca9c54d2SDexuan Cui 		if (err)
1340ca9c54d2SDexuan Cui 			goto out;
1341ca9c54d2SDexuan Cui 
1342ca9c54d2SDexuan Cui 		memset(&wq_spec, 0, sizeof(wq_spec));
1343ca9c54d2SDexuan Cui 		memset(&cq_spec, 0, sizeof(cq_spec));
1344ca9c54d2SDexuan Cui 
1345ca9c54d2SDexuan Cui 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1346ca9c54d2SDexuan Cui 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1347ca9c54d2SDexuan Cui 
1348ca9c54d2SDexuan Cui 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1349ca9c54d2SDexuan Cui 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1350ca9c54d2SDexuan Cui 		cq_spec.modr_ctx_id = 0;
1351ca9c54d2SDexuan Cui 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1352ca9c54d2SDexuan Cui 
1353ca9c54d2SDexuan Cui 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1354ca9c54d2SDexuan Cui 					 &wq_spec, &cq_spec,
1355ca9c54d2SDexuan Cui 					 &apc->tx_qp[i].tx_object);
1356ca9c54d2SDexuan Cui 
1357ca9c54d2SDexuan Cui 		if (err)
1358ca9c54d2SDexuan Cui 			goto out;
1359ca9c54d2SDexuan Cui 
1360ca9c54d2SDexuan Cui 		txq->gdma_sq->id = wq_spec.queue_index;
1361ca9c54d2SDexuan Cui 		cq->gdma_cq->id = cq_spec.queue_index;
1362ca9c54d2SDexuan Cui 
1363ca9c54d2SDexuan Cui 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1364ca9c54d2SDexuan Cui 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1365ca9c54d2SDexuan Cui 
1366ca9c54d2SDexuan Cui 		txq->gdma_txq_id = txq->gdma_sq->id;
1367ca9c54d2SDexuan Cui 
1368ca9c54d2SDexuan Cui 		cq->gdma_id = cq->gdma_cq->id;
1369ca9c54d2SDexuan Cui 
1370b9078845SChristophe JAILLET 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1371b9078845SChristophe JAILLET 			err = -EINVAL;
1372b9078845SChristophe JAILLET 			goto out;
1373b9078845SChristophe JAILLET 		}
1374ca9c54d2SDexuan Cui 
1375ca9c54d2SDexuan Cui 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1376ca9c54d2SDexuan Cui 
1377e1b5683fSHaiyang Zhang 		netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT);
1378e1b5683fSHaiyang Zhang 		napi_enable(&cq->napi);
1379e1b5683fSHaiyang Zhang 
1380e1b5683fSHaiyang Zhang 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1381ca9c54d2SDexuan Cui 	}
1382ca9c54d2SDexuan Cui 
1383ca9c54d2SDexuan Cui 	return 0;
1384ca9c54d2SDexuan Cui out:
1385ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
1386ca9c54d2SDexuan Cui 	return err;
1387ca9c54d2SDexuan Cui }
1388ca9c54d2SDexuan Cui 
1389ca9c54d2SDexuan Cui static void mana_destroy_rxq(struct mana_port_context *apc,
1390ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, bool validate_state)
1391ca9c54d2SDexuan Cui 
1392ca9c54d2SDexuan Cui {
1393ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1394ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1395ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1396e1b5683fSHaiyang Zhang 	struct napi_struct *napi;
1397ca9c54d2SDexuan Cui 	int i;
1398ca9c54d2SDexuan Cui 
1399ca9c54d2SDexuan Cui 	if (!rxq)
1400ca9c54d2SDexuan Cui 		return;
1401ca9c54d2SDexuan Cui 
1402e1b5683fSHaiyang Zhang 	napi = &rxq->rx_cq.napi;
1403e1b5683fSHaiyang Zhang 
1404ca9c54d2SDexuan Cui 	if (validate_state)
1405e1b5683fSHaiyang Zhang 		napi_synchronize(napi);
1406e1b5683fSHaiyang Zhang 
1407e1b5683fSHaiyang Zhang 	napi_disable(napi);
1408ed5356b5SHaiyang Zhang 
1409ed5356b5SHaiyang Zhang 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
1410ed5356b5SHaiyang Zhang 
1411e1b5683fSHaiyang Zhang 	netif_napi_del(napi);
1412ca9c54d2SDexuan Cui 
1413ca9c54d2SDexuan Cui 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1414ca9c54d2SDexuan Cui 
1415ca9c54d2SDexuan Cui 	mana_deinit_cq(apc, &rxq->rx_cq);
1416ca9c54d2SDexuan Cui 
1417a6bf5703SHaiyang Zhang 	if (rxq->xdp_save_page)
1418a6bf5703SHaiyang Zhang 		__free_page(rxq->xdp_save_page);
1419a6bf5703SHaiyang Zhang 
1420ca9c54d2SDexuan Cui 	for (i = 0; i < rxq->num_rx_buf; i++) {
1421ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[i];
1422ca9c54d2SDexuan Cui 
1423ca9c54d2SDexuan Cui 		if (!rx_oob->buf_va)
1424ca9c54d2SDexuan Cui 			continue;
1425ca9c54d2SDexuan Cui 
1426ca9c54d2SDexuan Cui 		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1427ca9c54d2SDexuan Cui 			       DMA_FROM_DEVICE);
1428ca9c54d2SDexuan Cui 
1429ca9c54d2SDexuan Cui 		free_page((unsigned long)rx_oob->buf_va);
1430ca9c54d2SDexuan Cui 		rx_oob->buf_va = NULL;
1431ca9c54d2SDexuan Cui 	}
1432ca9c54d2SDexuan Cui 
1433ca9c54d2SDexuan Cui 	if (rxq->gdma_rq)
1434ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1435ca9c54d2SDexuan Cui 
1436ca9c54d2SDexuan Cui 	kfree(rxq);
1437ca9c54d2SDexuan Cui }
1438ca9c54d2SDexuan Cui 
1439ca9c54d2SDexuan Cui #define MANA_WQE_HEADER_SIZE 16
1440ca9c54d2SDexuan Cui #define MANA_WQE_SGE_SIZE 16
1441ca9c54d2SDexuan Cui 
1442ca9c54d2SDexuan Cui static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1443ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1444ca9c54d2SDexuan Cui {
1445ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1446ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1447ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1448ca9c54d2SDexuan Cui 	struct page *page;
1449ca9c54d2SDexuan Cui 	dma_addr_t da;
1450ca9c54d2SDexuan Cui 	u32 buf_idx;
1451ca9c54d2SDexuan Cui 
1452ca9c54d2SDexuan Cui 	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1453ca9c54d2SDexuan Cui 
1454ca9c54d2SDexuan Cui 	*rxq_size = 0;
1455ca9c54d2SDexuan Cui 	*cq_size = 0;
1456ca9c54d2SDexuan Cui 
1457ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1458ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
1459ca9c54d2SDexuan Cui 		memset(rx_oob, 0, sizeof(*rx_oob));
1460ca9c54d2SDexuan Cui 
1461ca9c54d2SDexuan Cui 		page = alloc_page(GFP_KERNEL);
1462ca9c54d2SDexuan Cui 		if (!page)
1463ca9c54d2SDexuan Cui 			return -ENOMEM;
1464ca9c54d2SDexuan Cui 
1465ed5356b5SHaiyang Zhang 		da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1466ed5356b5SHaiyang Zhang 				  DMA_FROM_DEVICE);
1467ca9c54d2SDexuan Cui 
1468ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da)) {
1469ca9c54d2SDexuan Cui 			__free_page(page);
1470ca9c54d2SDexuan Cui 			return -ENOMEM;
1471ca9c54d2SDexuan Cui 		}
1472ca9c54d2SDexuan Cui 
1473ca9c54d2SDexuan Cui 		rx_oob->buf_va = page_to_virt(page);
1474ca9c54d2SDexuan Cui 		rx_oob->buf_dma_addr = da;
1475ca9c54d2SDexuan Cui 
1476ca9c54d2SDexuan Cui 		rx_oob->num_sge = 1;
1477ca9c54d2SDexuan Cui 		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1478ca9c54d2SDexuan Cui 		rx_oob->sgl[0].size = rxq->datasize;
1479ca9c54d2SDexuan Cui 		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1480ca9c54d2SDexuan Cui 
1481ca9c54d2SDexuan Cui 		rx_oob->wqe_req.sgl = rx_oob->sgl;
1482ca9c54d2SDexuan Cui 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1483ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_size = 0;
1484ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_data = NULL;
1485ca9c54d2SDexuan Cui 		rx_oob->wqe_req.flags = 0;
1486ca9c54d2SDexuan Cui 		rx_oob->wqe_req.client_data_unit = 0;
1487ca9c54d2SDexuan Cui 
1488ca9c54d2SDexuan Cui 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1489ca9c54d2SDexuan Cui 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1490ca9c54d2SDexuan Cui 		*cq_size += COMP_ENTRY_SIZE;
1491ca9c54d2SDexuan Cui 	}
1492ca9c54d2SDexuan Cui 
1493ca9c54d2SDexuan Cui 	return 0;
1494ca9c54d2SDexuan Cui }
1495ca9c54d2SDexuan Cui 
1496ca9c54d2SDexuan Cui static int mana_push_wqe(struct mana_rxq *rxq)
1497ca9c54d2SDexuan Cui {
1498ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1499ca9c54d2SDexuan Cui 	u32 buf_idx;
1500ca9c54d2SDexuan Cui 	int err;
1501ca9c54d2SDexuan Cui 
1502ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1503ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
1504ca9c54d2SDexuan Cui 
1505ca9c54d2SDexuan Cui 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1506ca9c54d2SDexuan Cui 					    &rx_oob->wqe_inf);
1507ca9c54d2SDexuan Cui 		if (err)
1508ca9c54d2SDexuan Cui 			return -ENOSPC;
1509ca9c54d2SDexuan Cui 	}
1510ca9c54d2SDexuan Cui 
1511ca9c54d2SDexuan Cui 	return 0;
1512ca9c54d2SDexuan Cui }
1513ca9c54d2SDexuan Cui 
1514ca9c54d2SDexuan Cui static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1515ca9c54d2SDexuan Cui 					u32 rxq_idx, struct mana_eq *eq,
1516ca9c54d2SDexuan Cui 					struct net_device *ndev)
1517ca9c54d2SDexuan Cui {
1518ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1519ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
1520ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
1521ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
1522ca9c54d2SDexuan Cui 	struct mana_cq *cq = NULL;
1523ca9c54d2SDexuan Cui 	struct gdma_context *gc;
1524ca9c54d2SDexuan Cui 	u32 cq_size, rq_size;
1525ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1526ca9c54d2SDexuan Cui 	int err;
1527ca9c54d2SDexuan Cui 
1528ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
1529ca9c54d2SDexuan Cui 
1530ea89c862SGustavo A. R. Silva 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1531ca9c54d2SDexuan Cui 		      GFP_KERNEL);
1532ca9c54d2SDexuan Cui 	if (!rxq)
1533ca9c54d2SDexuan Cui 		return NULL;
1534ca9c54d2SDexuan Cui 
1535ca9c54d2SDexuan Cui 	rxq->ndev = ndev;
1536ca9c54d2SDexuan Cui 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1537ca9c54d2SDexuan Cui 	rxq->rxq_idx = rxq_idx;
1538ca9c54d2SDexuan Cui 	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1539ca9c54d2SDexuan Cui 	rxq->rxobj = INVALID_MANA_HANDLE;
1540ca9c54d2SDexuan Cui 
1541ca9c54d2SDexuan Cui 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1542ca9c54d2SDexuan Cui 	if (err)
1543ca9c54d2SDexuan Cui 		goto out;
1544ca9c54d2SDexuan Cui 
1545ca9c54d2SDexuan Cui 	rq_size = PAGE_ALIGN(rq_size);
1546ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
1547ca9c54d2SDexuan Cui 
1548ca9c54d2SDexuan Cui 	/* Create RQ */
1549ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
1550ca9c54d2SDexuan Cui 	spec.type = GDMA_RQ;
1551ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = true;
1552ca9c54d2SDexuan Cui 	spec.queue_size = rq_size;
1553ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1554ca9c54d2SDexuan Cui 	if (err)
1555ca9c54d2SDexuan Cui 		goto out;
1556ca9c54d2SDexuan Cui 
1557ca9c54d2SDexuan Cui 	/* Create RQ's CQ */
1558ca9c54d2SDexuan Cui 	cq = &rxq->rx_cq;
1559ca9c54d2SDexuan Cui 	cq->type = MANA_CQ_TYPE_RX;
1560ca9c54d2SDexuan Cui 	cq->rxq = rxq;
1561ca9c54d2SDexuan Cui 
1562ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
1563ca9c54d2SDexuan Cui 	spec.type = GDMA_CQ;
1564ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
1565ca9c54d2SDexuan Cui 	spec.queue_size = cq_size;
1566e1b5683fSHaiyang Zhang 	spec.cq.callback = mana_schedule_napi;
1567ca9c54d2SDexuan Cui 	spec.cq.parent_eq = eq->eq;
1568ca9c54d2SDexuan Cui 	spec.cq.context = cq;
1569ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1570ca9c54d2SDexuan Cui 	if (err)
1571ca9c54d2SDexuan Cui 		goto out;
1572ca9c54d2SDexuan Cui 
1573ca9c54d2SDexuan Cui 	memset(&wq_spec, 0, sizeof(wq_spec));
1574ca9c54d2SDexuan Cui 	memset(&cq_spec, 0, sizeof(cq_spec));
1575ca9c54d2SDexuan Cui 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1576ca9c54d2SDexuan Cui 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1577ca9c54d2SDexuan Cui 
1578ca9c54d2SDexuan Cui 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1579ca9c54d2SDexuan Cui 	cq_spec.queue_size = cq->gdma_cq->queue_size;
1580ca9c54d2SDexuan Cui 	cq_spec.modr_ctx_id = 0;
1581ca9c54d2SDexuan Cui 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1582ca9c54d2SDexuan Cui 
1583ca9c54d2SDexuan Cui 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1584ca9c54d2SDexuan Cui 				 &wq_spec, &cq_spec, &rxq->rxobj);
1585ca9c54d2SDexuan Cui 	if (err)
1586ca9c54d2SDexuan Cui 		goto out;
1587ca9c54d2SDexuan Cui 
1588ca9c54d2SDexuan Cui 	rxq->gdma_rq->id = wq_spec.queue_index;
1589ca9c54d2SDexuan Cui 	cq->gdma_cq->id = cq_spec.queue_index;
1590ca9c54d2SDexuan Cui 
1591ca9c54d2SDexuan Cui 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1592ca9c54d2SDexuan Cui 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1593ca9c54d2SDexuan Cui 
1594ca9c54d2SDexuan Cui 	rxq->gdma_id = rxq->gdma_rq->id;
1595ca9c54d2SDexuan Cui 	cq->gdma_id = cq->gdma_cq->id;
1596ca9c54d2SDexuan Cui 
1597ca9c54d2SDexuan Cui 	err = mana_push_wqe(rxq);
1598ca9c54d2SDexuan Cui 	if (err)
1599ca9c54d2SDexuan Cui 		goto out;
1600ca9c54d2SDexuan Cui 
1601be049936SHaiyang Zhang 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1602be049936SHaiyang Zhang 		err = -EINVAL;
1603ca9c54d2SDexuan Cui 		goto out;
1604be049936SHaiyang Zhang 	}
1605ca9c54d2SDexuan Cui 
1606ca9c54d2SDexuan Cui 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1607ca9c54d2SDexuan Cui 
1608e1b5683fSHaiyang Zhang 	netif_napi_add(ndev, &cq->napi, mana_poll, 1);
1609ed5356b5SHaiyang Zhang 
1610ed5356b5SHaiyang Zhang 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1611ed5356b5SHaiyang Zhang 				 cq->napi.napi_id));
1612ed5356b5SHaiyang Zhang 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1613ed5356b5SHaiyang Zhang 					   MEM_TYPE_PAGE_SHARED, NULL));
1614ed5356b5SHaiyang Zhang 
1615e1b5683fSHaiyang Zhang 	napi_enable(&cq->napi);
1616e1b5683fSHaiyang Zhang 
1617e1b5683fSHaiyang Zhang 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1618ca9c54d2SDexuan Cui out:
1619ca9c54d2SDexuan Cui 	if (!err)
1620ca9c54d2SDexuan Cui 		return rxq;
1621ca9c54d2SDexuan Cui 
1622ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1623ca9c54d2SDexuan Cui 
1624ca9c54d2SDexuan Cui 	mana_destroy_rxq(apc, rxq, false);
1625ca9c54d2SDexuan Cui 
1626ca9c54d2SDexuan Cui 	if (cq)
1627ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, cq);
1628ca9c54d2SDexuan Cui 
1629ca9c54d2SDexuan Cui 	return NULL;
1630ca9c54d2SDexuan Cui }
1631ca9c54d2SDexuan Cui 
1632ca9c54d2SDexuan Cui static int mana_add_rx_queues(struct mana_port_context *apc,
1633ca9c54d2SDexuan Cui 			      struct net_device *ndev)
1634ca9c54d2SDexuan Cui {
16351e2d0824SHaiyang Zhang 	struct mana_context *ac = apc->ac;
1636ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1637ca9c54d2SDexuan Cui 	int err = 0;
1638ca9c54d2SDexuan Cui 	int i;
1639ca9c54d2SDexuan Cui 
1640ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
16411e2d0824SHaiyang Zhang 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1642ca9c54d2SDexuan Cui 		if (!rxq) {
1643ca9c54d2SDexuan Cui 			err = -ENOMEM;
1644ca9c54d2SDexuan Cui 			goto out;
1645ca9c54d2SDexuan Cui 		}
1646ca9c54d2SDexuan Cui 
1647ca9c54d2SDexuan Cui 		u64_stats_init(&rxq->stats.syncp);
1648ca9c54d2SDexuan Cui 
1649ca9c54d2SDexuan Cui 		apc->rxqs[i] = rxq;
1650ca9c54d2SDexuan Cui 	}
1651ca9c54d2SDexuan Cui 
1652ca9c54d2SDexuan Cui 	apc->default_rxobj = apc->rxqs[0]->rxobj;
1653ca9c54d2SDexuan Cui out:
1654ca9c54d2SDexuan Cui 	return err;
1655ca9c54d2SDexuan Cui }
1656ca9c54d2SDexuan Cui 
1657ca9c54d2SDexuan Cui static void mana_destroy_vport(struct mana_port_context *apc)
1658ca9c54d2SDexuan Cui {
1659ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
1660ca9c54d2SDexuan Cui 	u32 rxq_idx;
1661ca9c54d2SDexuan Cui 
1662ca9c54d2SDexuan Cui 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1663ca9c54d2SDexuan Cui 		rxq = apc->rxqs[rxq_idx];
1664ca9c54d2SDexuan Cui 		if (!rxq)
1665ca9c54d2SDexuan Cui 			continue;
1666ca9c54d2SDexuan Cui 
1667ca9c54d2SDexuan Cui 		mana_destroy_rxq(apc, rxq, true);
1668ca9c54d2SDexuan Cui 		apc->rxqs[rxq_idx] = NULL;
1669ca9c54d2SDexuan Cui 	}
1670ca9c54d2SDexuan Cui 
1671ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
1672ca9c54d2SDexuan Cui }
1673ca9c54d2SDexuan Cui 
1674ca9c54d2SDexuan Cui static int mana_create_vport(struct mana_port_context *apc,
1675ca9c54d2SDexuan Cui 			     struct net_device *net)
1676ca9c54d2SDexuan Cui {
1677ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1678ca9c54d2SDexuan Cui 	int err;
1679ca9c54d2SDexuan Cui 
1680ca9c54d2SDexuan Cui 	apc->default_rxobj = INVALID_MANA_HANDLE;
1681ca9c54d2SDexuan Cui 
1682ca9c54d2SDexuan Cui 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1683ca9c54d2SDexuan Cui 	if (err)
1684ca9c54d2SDexuan Cui 		return err;
1685ca9c54d2SDexuan Cui 
1686ca9c54d2SDexuan Cui 	return mana_create_txq(apc, net);
1687ca9c54d2SDexuan Cui }
1688ca9c54d2SDexuan Cui 
1689ca9c54d2SDexuan Cui static void mana_rss_table_init(struct mana_port_context *apc)
1690ca9c54d2SDexuan Cui {
1691ca9c54d2SDexuan Cui 	int i;
1692ca9c54d2SDexuan Cui 
1693ca9c54d2SDexuan Cui 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1694ca9c54d2SDexuan Cui 		apc->indir_table[i] =
1695ca9c54d2SDexuan Cui 			ethtool_rxfh_indir_default(i, apc->num_queues);
1696ca9c54d2SDexuan Cui }
1697ca9c54d2SDexuan Cui 
1698ca9c54d2SDexuan Cui int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1699ca9c54d2SDexuan Cui 		    bool update_hash, bool update_tab)
1700ca9c54d2SDexuan Cui {
1701ca9c54d2SDexuan Cui 	u32 queue_idx;
17026cc74443SDexuan Cui 	int err;
1703ca9c54d2SDexuan Cui 	int i;
1704ca9c54d2SDexuan Cui 
1705ca9c54d2SDexuan Cui 	if (update_tab) {
1706ca9c54d2SDexuan Cui 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1707ca9c54d2SDexuan Cui 			queue_idx = apc->indir_table[i];
1708ca9c54d2SDexuan Cui 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1709ca9c54d2SDexuan Cui 		}
1710ca9c54d2SDexuan Cui 	}
1711ca9c54d2SDexuan Cui 
17126cc74443SDexuan Cui 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
17136cc74443SDexuan Cui 	if (err)
17146cc74443SDexuan Cui 		return err;
17156cc74443SDexuan Cui 
17166cc74443SDexuan Cui 	mana_fence_rqs(apc);
17176cc74443SDexuan Cui 
17186cc74443SDexuan Cui 	return 0;
1719ca9c54d2SDexuan Cui }
1720ca9c54d2SDexuan Cui 
1721ca9c54d2SDexuan Cui static int mana_init_port(struct net_device *ndev)
1722ca9c54d2SDexuan Cui {
1723ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1724ca9c54d2SDexuan Cui 	u32 max_txq, max_rxq, max_queues;
1725ca9c54d2SDexuan Cui 	int port_idx = apc->port_idx;
1726ca9c54d2SDexuan Cui 	u32 num_indirect_entries;
1727ca9c54d2SDexuan Cui 	int err;
1728ca9c54d2SDexuan Cui 
1729ca9c54d2SDexuan Cui 	err = mana_init_port_context(apc);
1730ca9c54d2SDexuan Cui 	if (err)
1731ca9c54d2SDexuan Cui 		return err;
1732ca9c54d2SDexuan Cui 
1733ca9c54d2SDexuan Cui 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1734ca9c54d2SDexuan Cui 				   &num_indirect_entries);
1735ca9c54d2SDexuan Cui 	if (err) {
17366c7ea696SDexuan Cui 		netdev_err(ndev, "Failed to query info for vPort %d\n",
17376c7ea696SDexuan Cui 			   port_idx);
1738ca9c54d2SDexuan Cui 		goto reset_apc;
1739ca9c54d2SDexuan Cui 	}
1740ca9c54d2SDexuan Cui 
1741ca9c54d2SDexuan Cui 	max_queues = min_t(u32, max_txq, max_rxq);
1742ca9c54d2SDexuan Cui 	if (apc->max_queues > max_queues)
1743ca9c54d2SDexuan Cui 		apc->max_queues = max_queues;
1744ca9c54d2SDexuan Cui 
1745ca9c54d2SDexuan Cui 	if (apc->num_queues > apc->max_queues)
1746ca9c54d2SDexuan Cui 		apc->num_queues = apc->max_queues;
1747ca9c54d2SDexuan Cui 
1748f3956ebbSJakub Kicinski 	eth_hw_addr_set(ndev, apc->mac_addr);
1749ca9c54d2SDexuan Cui 
1750ca9c54d2SDexuan Cui 	return 0;
1751ca9c54d2SDexuan Cui 
1752ca9c54d2SDexuan Cui reset_apc:
1753ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
1754ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
1755ca9c54d2SDexuan Cui 	return err;
1756ca9c54d2SDexuan Cui }
1757ca9c54d2SDexuan Cui 
1758ca9c54d2SDexuan Cui int mana_alloc_queues(struct net_device *ndev)
1759ca9c54d2SDexuan Cui {
1760ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1761ca9c54d2SDexuan Cui 	int err;
1762ca9c54d2SDexuan Cui 
1763ca9c54d2SDexuan Cui 	err = mana_create_vport(apc, ndev);
1764ca9c54d2SDexuan Cui 	if (err)
17651e2d0824SHaiyang Zhang 		return err;
1766ca9c54d2SDexuan Cui 
1767ca9c54d2SDexuan Cui 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1768ca9c54d2SDexuan Cui 	if (err)
1769ca9c54d2SDexuan Cui 		goto destroy_vport;
1770ca9c54d2SDexuan Cui 
1771ca9c54d2SDexuan Cui 	err = mana_add_rx_queues(apc, ndev);
1772ca9c54d2SDexuan Cui 	if (err)
1773ca9c54d2SDexuan Cui 		goto destroy_vport;
1774ca9c54d2SDexuan Cui 
1775ca9c54d2SDexuan Cui 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1776ca9c54d2SDexuan Cui 
1777ca9c54d2SDexuan Cui 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1778ca9c54d2SDexuan Cui 	if (err)
1779ca9c54d2SDexuan Cui 		goto destroy_vport;
1780ca9c54d2SDexuan Cui 
1781ca9c54d2SDexuan Cui 	mana_rss_table_init(apc);
1782ca9c54d2SDexuan Cui 
1783ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1784ca9c54d2SDexuan Cui 	if (err)
1785ca9c54d2SDexuan Cui 		goto destroy_vport;
1786ca9c54d2SDexuan Cui 
1787ed5356b5SHaiyang Zhang 	mana_chn_setxdp(apc, mana_xdp_get(apc));
1788ed5356b5SHaiyang Zhang 
1789ca9c54d2SDexuan Cui 	return 0;
1790ca9c54d2SDexuan Cui 
1791ca9c54d2SDexuan Cui destroy_vport:
1792ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
1793ca9c54d2SDexuan Cui 	return err;
1794ca9c54d2SDexuan Cui }
1795ca9c54d2SDexuan Cui 
1796ca9c54d2SDexuan Cui int mana_attach(struct net_device *ndev)
1797ca9c54d2SDexuan Cui {
1798ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1799ca9c54d2SDexuan Cui 	int err;
1800ca9c54d2SDexuan Cui 
1801ca9c54d2SDexuan Cui 	ASSERT_RTNL();
1802ca9c54d2SDexuan Cui 
1803ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
1804ca9c54d2SDexuan Cui 	if (err)
1805ca9c54d2SDexuan Cui 		return err;
1806ca9c54d2SDexuan Cui 
1807a137c069SHaiyang Zhang 	if (apc->port_st_save) {
1808ca9c54d2SDexuan Cui 		err = mana_alloc_queues(ndev);
1809ca9c54d2SDexuan Cui 		if (err) {
1810a137c069SHaiyang Zhang 			mana_cleanup_port_context(apc);
1811ca9c54d2SDexuan Cui 			return err;
1812ca9c54d2SDexuan Cui 		}
1813a137c069SHaiyang Zhang 	}
1814ca9c54d2SDexuan Cui 
1815ca9c54d2SDexuan Cui 	apc->port_is_up = apc->port_st_save;
1816ca9c54d2SDexuan Cui 
1817ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
1818ca9c54d2SDexuan Cui 	smp_wmb();
1819ca9c54d2SDexuan Cui 
1820a137c069SHaiyang Zhang 	if (apc->port_is_up)
1821ca9c54d2SDexuan Cui 		netif_carrier_on(ndev);
1822a137c069SHaiyang Zhang 
1823a137c069SHaiyang Zhang 	netif_device_attach(ndev);
1824ca9c54d2SDexuan Cui 
1825ca9c54d2SDexuan Cui 	return 0;
1826ca9c54d2SDexuan Cui }
1827ca9c54d2SDexuan Cui 
1828ca9c54d2SDexuan Cui static int mana_dealloc_queues(struct net_device *ndev)
1829ca9c54d2SDexuan Cui {
1830ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1831ca9c54d2SDexuan Cui 	struct mana_txq *txq;
1832ca9c54d2SDexuan Cui 	int i, err;
1833ca9c54d2SDexuan Cui 
1834ca9c54d2SDexuan Cui 	if (apc->port_is_up)
1835ca9c54d2SDexuan Cui 		return -EINVAL;
1836ca9c54d2SDexuan Cui 
1837ed5356b5SHaiyang Zhang 	mana_chn_setxdp(apc, NULL);
1838ed5356b5SHaiyang Zhang 
1839ca9c54d2SDexuan Cui 	/* No packet can be transmitted now since apc->port_is_up is false.
1840ca9c54d2SDexuan Cui 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1841ca9c54d2SDexuan Cui 	 * a txq because it may not timely see apc->port_is_up being cleared
1842ca9c54d2SDexuan Cui 	 * to false, but it doesn't matter since mana_start_xmit() drops any
1843ca9c54d2SDexuan Cui 	 * new packets due to apc->port_is_up being false.
1844ca9c54d2SDexuan Cui 	 *
1845ca9c54d2SDexuan Cui 	 * Drain all the in-flight TX packets
1846ca9c54d2SDexuan Cui 	 */
1847ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1848ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
1849ca9c54d2SDexuan Cui 
1850ca9c54d2SDexuan Cui 		while (atomic_read(&txq->pending_sends) > 0)
1851ca9c54d2SDexuan Cui 			usleep_range(1000, 2000);
1852ca9c54d2SDexuan Cui 	}
1853ca9c54d2SDexuan Cui 
1854ca9c54d2SDexuan Cui 	/* We're 100% sure the queues can no longer be woken up, because
1855ca9c54d2SDexuan Cui 	 * we're sure now mana_poll_tx_cq() can't be running.
1856ca9c54d2SDexuan Cui 	 */
1857ca9c54d2SDexuan Cui 
1858ca9c54d2SDexuan Cui 	apc->rss_state = TRI_STATE_FALSE;
1859ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1860ca9c54d2SDexuan Cui 	if (err) {
1861ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1862ca9c54d2SDexuan Cui 		return err;
1863ca9c54d2SDexuan Cui 	}
1864ca9c54d2SDexuan Cui 
1865ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
1866ca9c54d2SDexuan Cui 
1867ca9c54d2SDexuan Cui 	return 0;
1868ca9c54d2SDexuan Cui }
1869ca9c54d2SDexuan Cui 
1870ca9c54d2SDexuan Cui int mana_detach(struct net_device *ndev, bool from_close)
1871ca9c54d2SDexuan Cui {
1872ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
1873ca9c54d2SDexuan Cui 	int err;
1874ca9c54d2SDexuan Cui 
1875ca9c54d2SDexuan Cui 	ASSERT_RTNL();
1876ca9c54d2SDexuan Cui 
1877ca9c54d2SDexuan Cui 	apc->port_st_save = apc->port_is_up;
1878ca9c54d2SDexuan Cui 	apc->port_is_up = false;
1879ca9c54d2SDexuan Cui 
1880ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
1881ca9c54d2SDexuan Cui 	smp_wmb();
1882ca9c54d2SDexuan Cui 
1883ca9c54d2SDexuan Cui 	netif_tx_disable(ndev);
1884ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
1885ca9c54d2SDexuan Cui 
1886ca9c54d2SDexuan Cui 	if (apc->port_st_save) {
1887ca9c54d2SDexuan Cui 		err = mana_dealloc_queues(ndev);
1888ca9c54d2SDexuan Cui 		if (err)
1889ca9c54d2SDexuan Cui 			return err;
1890ca9c54d2SDexuan Cui 	}
1891ca9c54d2SDexuan Cui 
1892ca9c54d2SDexuan Cui 	if (!from_close) {
1893ca9c54d2SDexuan Cui 		netif_device_detach(ndev);
1894ca9c54d2SDexuan Cui 		mana_cleanup_port_context(apc);
1895ca9c54d2SDexuan Cui 	}
1896ca9c54d2SDexuan Cui 
1897ca9c54d2SDexuan Cui 	return 0;
1898ca9c54d2SDexuan Cui }
1899ca9c54d2SDexuan Cui 
1900ca9c54d2SDexuan Cui static int mana_probe_port(struct mana_context *ac, int port_idx,
1901ca9c54d2SDexuan Cui 			   struct net_device **ndev_storage)
1902ca9c54d2SDexuan Cui {
1903ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1904ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
1905ca9c54d2SDexuan Cui 	struct net_device *ndev;
1906ca9c54d2SDexuan Cui 	int err;
1907ca9c54d2SDexuan Cui 
1908ca9c54d2SDexuan Cui 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1909ca9c54d2SDexuan Cui 				 gc->max_num_queues);
1910ca9c54d2SDexuan Cui 	if (!ndev)
1911ca9c54d2SDexuan Cui 		return -ENOMEM;
1912ca9c54d2SDexuan Cui 
1913ca9c54d2SDexuan Cui 	*ndev_storage = ndev;
1914ca9c54d2SDexuan Cui 
1915ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
1916ca9c54d2SDexuan Cui 	apc->ac = ac;
1917ca9c54d2SDexuan Cui 	apc->ndev = ndev;
1918ca9c54d2SDexuan Cui 	apc->max_queues = gc->max_num_queues;
19191e2d0824SHaiyang Zhang 	apc->num_queues = gc->max_num_queues;
1920ca9c54d2SDexuan Cui 	apc->port_handle = INVALID_MANA_HANDLE;
1921ca9c54d2SDexuan Cui 	apc->port_idx = port_idx;
1922ca9c54d2SDexuan Cui 
1923ca9c54d2SDexuan Cui 	ndev->netdev_ops = &mana_devops;
1924ca9c54d2SDexuan Cui 	ndev->ethtool_ops = &mana_ethtool_ops;
1925ca9c54d2SDexuan Cui 	ndev->mtu = ETH_DATA_LEN;
1926ca9c54d2SDexuan Cui 	ndev->max_mtu = ndev->mtu;
1927ca9c54d2SDexuan Cui 	ndev->min_mtu = ndev->mtu;
1928ca9c54d2SDexuan Cui 	ndev->needed_headroom = MANA_HEADROOM;
1929ca9c54d2SDexuan Cui 	SET_NETDEV_DEV(ndev, gc->dev);
1930ca9c54d2SDexuan Cui 
1931ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
1932ca9c54d2SDexuan Cui 
1933ca9c54d2SDexuan Cui 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1934ca9c54d2SDexuan Cui 
1935ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
1936ca9c54d2SDexuan Cui 	if (err)
1937ca9c54d2SDexuan Cui 		goto free_net;
1938ca9c54d2SDexuan Cui 
1939ca9c54d2SDexuan Cui 	netdev_lockdep_set_classes(ndev);
1940ca9c54d2SDexuan Cui 
1941ca9c54d2SDexuan Cui 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1942ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXCSUM;
1943ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1944ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXHASH;
1945ca9c54d2SDexuan Cui 	ndev->features = ndev->hw_features;
1946ca9c54d2SDexuan Cui 	ndev->vlan_features = 0;
1947ca9c54d2SDexuan Cui 
1948ca9c54d2SDexuan Cui 	err = register_netdev(ndev);
1949ca9c54d2SDexuan Cui 	if (err) {
1950ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unable to register netdev.\n");
1951ca9c54d2SDexuan Cui 		goto reset_apc;
1952ca9c54d2SDexuan Cui 	}
1953ca9c54d2SDexuan Cui 
1954ca9c54d2SDexuan Cui 	return 0;
1955ca9c54d2SDexuan Cui 
1956ca9c54d2SDexuan Cui reset_apc:
1957ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
1958ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
1959ca9c54d2SDexuan Cui free_net:
1960ca9c54d2SDexuan Cui 	*ndev_storage = NULL;
1961ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1962ca9c54d2SDexuan Cui 	free_netdev(ndev);
1963ca9c54d2SDexuan Cui 	return err;
1964ca9c54d2SDexuan Cui }
1965ca9c54d2SDexuan Cui 
1966635096a8SDexuan Cui int mana_probe(struct gdma_dev *gd, bool resuming)
1967ca9c54d2SDexuan Cui {
1968ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
1969635096a8SDexuan Cui 	struct mana_context *ac = gd->driver_data;
1970ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1971635096a8SDexuan Cui 	u16 num_ports = 0;
1972ca9c54d2SDexuan Cui 	int err;
1973ca9c54d2SDexuan Cui 	int i;
1974ca9c54d2SDexuan Cui 
1975ca9c54d2SDexuan Cui 	dev_info(dev,
1976ca9c54d2SDexuan Cui 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1977ca9c54d2SDexuan Cui 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1978ca9c54d2SDexuan Cui 
1979ca9c54d2SDexuan Cui 	err = mana_gd_register_device(gd);
1980ca9c54d2SDexuan Cui 	if (err)
1981ca9c54d2SDexuan Cui 		return err;
1982ca9c54d2SDexuan Cui 
1983635096a8SDexuan Cui 	if (!resuming) {
1984ca9c54d2SDexuan Cui 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1985ca9c54d2SDexuan Cui 		if (!ac)
1986ca9c54d2SDexuan Cui 			return -ENOMEM;
1987ca9c54d2SDexuan Cui 
1988ca9c54d2SDexuan Cui 		ac->gdma_dev = gd;
1989ca9c54d2SDexuan Cui 		gd->driver_data = ac;
1990635096a8SDexuan Cui 	}
1991ca9c54d2SDexuan Cui 
19921e2d0824SHaiyang Zhang 	err = mana_create_eq(ac);
19931e2d0824SHaiyang Zhang 	if (err)
19941e2d0824SHaiyang Zhang 		goto out;
19951e2d0824SHaiyang Zhang 
1996ca9c54d2SDexuan Cui 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1997635096a8SDexuan Cui 				    MANA_MICRO_VERSION, &num_ports);
1998ca9c54d2SDexuan Cui 	if (err)
1999ca9c54d2SDexuan Cui 		goto out;
2000ca9c54d2SDexuan Cui 
2001635096a8SDexuan Cui 	if (!resuming) {
2002635096a8SDexuan Cui 		ac->num_ports = num_ports;
2003635096a8SDexuan Cui 	} else {
2004635096a8SDexuan Cui 		if (ac->num_ports != num_ports) {
2005635096a8SDexuan Cui 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2006635096a8SDexuan Cui 				ac->num_ports, num_ports);
2007635096a8SDexuan Cui 			err = -EPROTO;
2008635096a8SDexuan Cui 			goto out;
2009635096a8SDexuan Cui 		}
2010635096a8SDexuan Cui 	}
2011635096a8SDexuan Cui 
2012635096a8SDexuan Cui 	if (ac->num_ports == 0)
2013635096a8SDexuan Cui 		dev_err(dev, "Failed to detect any vPort\n");
2014635096a8SDexuan Cui 
2015ca9c54d2SDexuan Cui 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2016ca9c54d2SDexuan Cui 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2017ca9c54d2SDexuan Cui 
2018635096a8SDexuan Cui 	if (!resuming) {
2019ca9c54d2SDexuan Cui 		for (i = 0; i < ac->num_ports; i++) {
2020ca9c54d2SDexuan Cui 			err = mana_probe_port(ac, i, &ac->ports[i]);
2021ca9c54d2SDexuan Cui 			if (err)
2022ca9c54d2SDexuan Cui 				break;
2023ca9c54d2SDexuan Cui 		}
2024635096a8SDexuan Cui 	} else {
2025635096a8SDexuan Cui 		for (i = 0; i < ac->num_ports; i++) {
2026635096a8SDexuan Cui 			rtnl_lock();
2027635096a8SDexuan Cui 			err = mana_attach(ac->ports[i]);
2028635096a8SDexuan Cui 			rtnl_unlock();
2029635096a8SDexuan Cui 			if (err)
2030635096a8SDexuan Cui 				break;
2031635096a8SDexuan Cui 		}
2032635096a8SDexuan Cui 	}
2033ca9c54d2SDexuan Cui out:
2034ca9c54d2SDexuan Cui 	if (err)
2035635096a8SDexuan Cui 		mana_remove(gd, false);
2036ca9c54d2SDexuan Cui 
2037ca9c54d2SDexuan Cui 	return err;
2038ca9c54d2SDexuan Cui }
2039ca9c54d2SDexuan Cui 
2040635096a8SDexuan Cui void mana_remove(struct gdma_dev *gd, bool suspending)
2041ca9c54d2SDexuan Cui {
2042ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
2043ca9c54d2SDexuan Cui 	struct mana_context *ac = gd->driver_data;
2044ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
2045ca9c54d2SDexuan Cui 	struct net_device *ndev;
2046635096a8SDexuan Cui 	int err;
2047ca9c54d2SDexuan Cui 	int i;
2048ca9c54d2SDexuan Cui 
2049ca9c54d2SDexuan Cui 	for (i = 0; i < ac->num_ports; i++) {
2050ca9c54d2SDexuan Cui 		ndev = ac->ports[i];
2051ca9c54d2SDexuan Cui 		if (!ndev) {
2052ca9c54d2SDexuan Cui 			if (i == 0)
2053ca9c54d2SDexuan Cui 				dev_err(dev, "No net device to remove\n");
2054ca9c54d2SDexuan Cui 			goto out;
2055ca9c54d2SDexuan Cui 		}
2056ca9c54d2SDexuan Cui 
2057ca9c54d2SDexuan Cui 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2058ca9c54d2SDexuan Cui 		 * other functions may access partially cleaned up data.
2059ca9c54d2SDexuan Cui 		 */
2060ca9c54d2SDexuan Cui 		rtnl_lock();
2061ca9c54d2SDexuan Cui 
2062635096a8SDexuan Cui 		err = mana_detach(ndev, false);
2063635096a8SDexuan Cui 		if (err)
2064635096a8SDexuan Cui 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2065635096a8SDexuan Cui 				   i, err);
2066635096a8SDexuan Cui 
2067635096a8SDexuan Cui 		if (suspending) {
2068635096a8SDexuan Cui 			/* No need to unregister the ndev. */
2069635096a8SDexuan Cui 			rtnl_unlock();
2070635096a8SDexuan Cui 			continue;
2071635096a8SDexuan Cui 		}
2072ca9c54d2SDexuan Cui 
2073ca9c54d2SDexuan Cui 		unregister_netdevice(ndev);
2074ca9c54d2SDexuan Cui 
2075ca9c54d2SDexuan Cui 		rtnl_unlock();
2076ca9c54d2SDexuan Cui 
2077ca9c54d2SDexuan Cui 		free_netdev(ndev);
2078ca9c54d2SDexuan Cui 	}
20791e2d0824SHaiyang Zhang 
20801e2d0824SHaiyang Zhang 	mana_destroy_eq(ac);
20811e2d0824SHaiyang Zhang 
2082ca9c54d2SDexuan Cui out:
2083ca9c54d2SDexuan Cui 	mana_gd_deregister_device(gd);
2084635096a8SDexuan Cui 
2085635096a8SDexuan Cui 	if (suspending)
2086635096a8SDexuan Cui 		return;
2087635096a8SDexuan Cui 
2088ca9c54d2SDexuan Cui 	gd->driver_data = NULL;
2089ca9c54d2SDexuan Cui 	gd->gdma_context = NULL;
2090ca9c54d2SDexuan Cui 	kfree(ac);
2091ca9c54d2SDexuan Cui }
2092