1ca9c54d2SDexuan Cui // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2ca9c54d2SDexuan Cui /* Copyright (c) 2021, Microsoft Corporation. */
3ca9c54d2SDexuan Cui 
43b80b73aSJakub Kicinski #include <uapi/linux/bpf.h>
53b80b73aSJakub Kicinski 
6ca9c54d2SDexuan Cui #include <linux/inetdevice.h>
7ca9c54d2SDexuan Cui #include <linux/etherdevice.h>
8ca9c54d2SDexuan Cui #include <linux/ethtool.h>
97a8938cdSHaiyang Zhang #include <linux/filter.h>
10ca9c54d2SDexuan Cui #include <linux/mm.h>
11a7dfeda6SSouradeep Chakrabarti #include <linux/pci.h>
12ca9c54d2SDexuan Cui 
13ca9c54d2SDexuan Cui #include <net/checksum.h>
14ca9c54d2SDexuan Cui #include <net/ip6_checksum.h>
15a9ca9f9cSYunsheng Lin #include <net/page_pool/helpers.h>
1692272ec4SJakub Kicinski #include <net/xdp.h>
17ca9c54d2SDexuan Cui 
18fd325cd6SLong Li #include <net/mana/mana.h>
19fd325cd6SLong Li #include <net/mana/mana_auxiliary.h>
20a69839d4SLong Li 
21a69839d4SLong Li static DEFINE_IDA(mana_adev_ida);
22a69839d4SLong Li 
mana_adev_idx_alloc(void)23a69839d4SLong Li static int mana_adev_idx_alloc(void)
24a69839d4SLong Li {
25a69839d4SLong Li 	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
26a69839d4SLong Li }
27a69839d4SLong Li 
mana_adev_idx_free(int idx)28a69839d4SLong Li static void mana_adev_idx_free(int idx)
29a69839d4SLong Li {
30a69839d4SLong Li 	ida_free(&mana_adev_ida, idx);
31a69839d4SLong Li }
32ca9c54d2SDexuan Cui 
33ca9c54d2SDexuan Cui /* Microsoft Azure Network Adapter (MANA) functions */
34ca9c54d2SDexuan Cui 
mana_open(struct net_device * ndev)35ca9c54d2SDexuan Cui static int mana_open(struct net_device *ndev)
36ca9c54d2SDexuan Cui {
37ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
38ca9c54d2SDexuan Cui 	int err;
39ca9c54d2SDexuan Cui 
40ca9c54d2SDexuan Cui 	err = mana_alloc_queues(ndev);
41ca9c54d2SDexuan Cui 	if (err)
42ca9c54d2SDexuan Cui 		return err;
43ca9c54d2SDexuan Cui 
44ca9c54d2SDexuan Cui 	apc->port_is_up = true;
45ca9c54d2SDexuan Cui 
46ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
47ca9c54d2SDexuan Cui 	smp_wmb();
48ca9c54d2SDexuan Cui 
49ca9c54d2SDexuan Cui 	netif_carrier_on(ndev);
50ca9c54d2SDexuan Cui 	netif_tx_wake_all_queues(ndev);
51ca9c54d2SDexuan Cui 
52ca9c54d2SDexuan Cui 	return 0;
53ca9c54d2SDexuan Cui }
54ca9c54d2SDexuan Cui 
mana_close(struct net_device * ndev)55ca9c54d2SDexuan Cui static int mana_close(struct net_device *ndev)
56ca9c54d2SDexuan Cui {
57ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
58ca9c54d2SDexuan Cui 
59ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
60ca9c54d2SDexuan Cui 		return 0;
61ca9c54d2SDexuan Cui 
62ca9c54d2SDexuan Cui 	return mana_detach(ndev, true);
63ca9c54d2SDexuan Cui }
64ca9c54d2SDexuan Cui 
mana_can_tx(struct gdma_queue * wq)65ca9c54d2SDexuan Cui static bool mana_can_tx(struct gdma_queue *wq)
66ca9c54d2SDexuan Cui {
67ca9c54d2SDexuan Cui 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
68ca9c54d2SDexuan Cui }
69ca9c54d2SDexuan Cui 
mana_checksum_info(struct sk_buff * skb)70ca9c54d2SDexuan Cui static unsigned int mana_checksum_info(struct sk_buff *skb)
71ca9c54d2SDexuan Cui {
72ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP)) {
73ca9c54d2SDexuan Cui 		struct iphdr *ip = ip_hdr(skb);
74ca9c54d2SDexuan Cui 
75ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_TCP)
76ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
77ca9c54d2SDexuan Cui 
78ca9c54d2SDexuan Cui 		if (ip->protocol == IPPROTO_UDP)
79ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
80ca9c54d2SDexuan Cui 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
81ca9c54d2SDexuan Cui 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
82ca9c54d2SDexuan Cui 
83ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_TCP)
84ca9c54d2SDexuan Cui 			return IPPROTO_TCP;
85ca9c54d2SDexuan Cui 
86ca9c54d2SDexuan Cui 		if (ip6->nexthdr == IPPROTO_UDP)
87ca9c54d2SDexuan Cui 			return IPPROTO_UDP;
88ca9c54d2SDexuan Cui 	}
89ca9c54d2SDexuan Cui 
90ca9c54d2SDexuan Cui 	/* No csum offloading */
91ca9c54d2SDexuan Cui 	return 0;
92ca9c54d2SDexuan Cui }
93ca9c54d2SDexuan Cui 
mana_add_sge(struct mana_tx_package * tp,struct mana_skb_head * ash,int sg_i,dma_addr_t da,int sge_len,u32 gpa_mkey)94a43e8e9fSHaiyang Zhang static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
95a43e8e9fSHaiyang Zhang 			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
96a43e8e9fSHaiyang Zhang {
97a43e8e9fSHaiyang Zhang 	ash->dma_handle[sg_i] = da;
98a43e8e9fSHaiyang Zhang 	ash->size[sg_i] = sge_len;
99a43e8e9fSHaiyang Zhang 
100a43e8e9fSHaiyang Zhang 	tp->wqe_req.sgl[sg_i].address = da;
101a43e8e9fSHaiyang Zhang 	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
102a43e8e9fSHaiyang Zhang 	tp->wqe_req.sgl[sg_i].size = sge_len;
103a43e8e9fSHaiyang Zhang }
104a43e8e9fSHaiyang Zhang 
mana_map_skb(struct sk_buff * skb,struct mana_port_context * apc,struct mana_tx_package * tp,int gso_hs)105ca9c54d2SDexuan Cui static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
106a43e8e9fSHaiyang Zhang 			struct mana_tx_package *tp, int gso_hs)
107ca9c54d2SDexuan Cui {
108ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
109a43e8e9fSHaiyang Zhang 	int hsg = 1; /* num of SGEs of linear part */
110ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
111a43e8e9fSHaiyang Zhang 	int skb_hlen = skb_headlen(skb);
112a43e8e9fSHaiyang Zhang 	int sge0_len, sge1_len = 0;
113ca9c54d2SDexuan Cui 	struct gdma_context *gc;
114ca9c54d2SDexuan Cui 	struct device *dev;
115ca9c54d2SDexuan Cui 	skb_frag_t *frag;
116ca9c54d2SDexuan Cui 	dma_addr_t da;
117a43e8e9fSHaiyang Zhang 	int sg_i;
118ca9c54d2SDexuan Cui 	int i;
119ca9c54d2SDexuan Cui 
120ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
121ca9c54d2SDexuan Cui 	dev = gc->dev;
122ca9c54d2SDexuan Cui 
123a43e8e9fSHaiyang Zhang 	if (gso_hs && gso_hs < skb_hlen) {
124a43e8e9fSHaiyang Zhang 		sge0_len = gso_hs;
125a43e8e9fSHaiyang Zhang 		sge1_len = skb_hlen - gso_hs;
126a43e8e9fSHaiyang Zhang 	} else {
127a43e8e9fSHaiyang Zhang 		sge0_len = skb_hlen;
128a43e8e9fSHaiyang Zhang 	}
129a43e8e9fSHaiyang Zhang 
130a43e8e9fSHaiyang Zhang 	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
131ca9c54d2SDexuan Cui 	if (dma_mapping_error(dev, da))
132ca9c54d2SDexuan Cui 		return -ENOMEM;
133ca9c54d2SDexuan Cui 
134a43e8e9fSHaiyang Zhang 	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
135ca9c54d2SDexuan Cui 
136a43e8e9fSHaiyang Zhang 	if (sge1_len) {
137a43e8e9fSHaiyang Zhang 		sg_i = 1;
138a43e8e9fSHaiyang Zhang 		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
139ca9c54d2SDexuan Cui 				    DMA_TO_DEVICE);
140ca9c54d2SDexuan Cui 		if (dma_mapping_error(dev, da))
141ca9c54d2SDexuan Cui 			goto frag_err;
142ca9c54d2SDexuan Cui 
143a43e8e9fSHaiyang Zhang 		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
144a43e8e9fSHaiyang Zhang 		hsg = 2;
145a43e8e9fSHaiyang Zhang 	}
146ca9c54d2SDexuan Cui 
147a43e8e9fSHaiyang Zhang 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
148a43e8e9fSHaiyang Zhang 		sg_i = hsg + i;
149a43e8e9fSHaiyang Zhang 
150a43e8e9fSHaiyang Zhang 		frag = &skb_shinfo(skb)->frags[i];
151a43e8e9fSHaiyang Zhang 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
152a43e8e9fSHaiyang Zhang 				      DMA_TO_DEVICE);
153a43e8e9fSHaiyang Zhang 		if (dma_mapping_error(dev, da))
154a43e8e9fSHaiyang Zhang 			goto frag_err;
155a43e8e9fSHaiyang Zhang 
156a43e8e9fSHaiyang Zhang 		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
157a43e8e9fSHaiyang Zhang 			     gd->gpa_mkey);
158ca9c54d2SDexuan Cui 	}
159ca9c54d2SDexuan Cui 
160ca9c54d2SDexuan Cui 	return 0;
161ca9c54d2SDexuan Cui 
162ca9c54d2SDexuan Cui frag_err:
163a43e8e9fSHaiyang Zhang 	for (i = sg_i - 1; i >= hsg; i--)
164a43e8e9fSHaiyang Zhang 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
165ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
166ca9c54d2SDexuan Cui 
167a43e8e9fSHaiyang Zhang 	for (i = hsg - 1; i >= 0; i--)
168a43e8e9fSHaiyang Zhang 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
169a43e8e9fSHaiyang Zhang 				 DMA_TO_DEVICE);
170ca9c54d2SDexuan Cui 
171ca9c54d2SDexuan Cui 	return -ENOMEM;
172ca9c54d2SDexuan Cui }
173ca9c54d2SDexuan Cui 
174a43e8e9fSHaiyang Zhang /* Handle the case when GSO SKB linear length is too large.
175a43e8e9fSHaiyang Zhang  * MANA NIC requires GSO packets to put only the packet header to SGE0.
176a43e8e9fSHaiyang Zhang  * So, we need 2 SGEs for the skb linear part which contains more than the
177a43e8e9fSHaiyang Zhang  * header.
178a43e8e9fSHaiyang Zhang  * Return a positive value for the number of SGEs, or a negative value
179a43e8e9fSHaiyang Zhang  * for an error.
180a43e8e9fSHaiyang Zhang  */
mana_fix_skb_head(struct net_device * ndev,struct sk_buff * skb,int gso_hs)181a43e8e9fSHaiyang Zhang static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
182a43e8e9fSHaiyang Zhang 			     int gso_hs)
183a43e8e9fSHaiyang Zhang {
184a43e8e9fSHaiyang Zhang 	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
185a43e8e9fSHaiyang Zhang 	int skb_hlen = skb_headlen(skb);
186a43e8e9fSHaiyang Zhang 
187a43e8e9fSHaiyang Zhang 	if (gso_hs < skb_hlen) {
188a43e8e9fSHaiyang Zhang 		num_sge++;
189a43e8e9fSHaiyang Zhang 	} else if (gso_hs > skb_hlen) {
190a43e8e9fSHaiyang Zhang 		if (net_ratelimit())
191a43e8e9fSHaiyang Zhang 			netdev_err(ndev,
192a43e8e9fSHaiyang Zhang 				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
193a43e8e9fSHaiyang Zhang 				   gso_hs, skb_hlen);
194a43e8e9fSHaiyang Zhang 
195a43e8e9fSHaiyang Zhang 		return -EINVAL;
196a43e8e9fSHaiyang Zhang 	}
197a43e8e9fSHaiyang Zhang 
198a43e8e9fSHaiyang Zhang 	return num_sge;
199a43e8e9fSHaiyang Zhang }
200a43e8e9fSHaiyang Zhang 
201a43e8e9fSHaiyang Zhang /* Get the GSO packet's header size */
mana_get_gso_hs(struct sk_buff * skb)202a43e8e9fSHaiyang Zhang static int mana_get_gso_hs(struct sk_buff *skb)
203a43e8e9fSHaiyang Zhang {
204a43e8e9fSHaiyang Zhang 	int gso_hs;
205a43e8e9fSHaiyang Zhang 
206a43e8e9fSHaiyang Zhang 	if (skb->encapsulation) {
207a43e8e9fSHaiyang Zhang 		gso_hs = skb_inner_tcp_all_headers(skb);
208a43e8e9fSHaiyang Zhang 	} else {
209a43e8e9fSHaiyang Zhang 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
210a43e8e9fSHaiyang Zhang 			gso_hs = skb_transport_offset(skb) +
211a43e8e9fSHaiyang Zhang 				 sizeof(struct udphdr);
212a43e8e9fSHaiyang Zhang 		} else {
213a43e8e9fSHaiyang Zhang 			gso_hs = skb_tcp_all_headers(skb);
214a43e8e9fSHaiyang Zhang 		}
215a43e8e9fSHaiyang Zhang 	}
216a43e8e9fSHaiyang Zhang 
217a43e8e9fSHaiyang Zhang 	return gso_hs;
218a43e8e9fSHaiyang Zhang }
219a43e8e9fSHaiyang Zhang 
mana_start_xmit(struct sk_buff * skb,struct net_device * ndev)2200c9ef08aSNathan Huckleberry netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
221ca9c54d2SDexuan Cui {
222ca9c54d2SDexuan Cui 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
223ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
224a43e8e9fSHaiyang Zhang 	int gso_hs = 0; /* zero for non-GSO pkts */
225ca9c54d2SDexuan Cui 	u16 txq_idx = skb_get_queue_mapping(skb);
226ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
227ca9c54d2SDexuan Cui 	bool ipv4 = false, ipv6 = false;
228ca9c54d2SDexuan Cui 	struct mana_tx_package pkg = {};
229ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
230f90f8420SHaiyang Zhang 	struct mana_stats_tx *tx_stats;
231ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_sq;
232ca9c54d2SDexuan Cui 	unsigned int csum_type;
233ca9c54d2SDexuan Cui 	struct mana_txq *txq;
234ca9c54d2SDexuan Cui 	struct mana_cq *cq;
235ca9c54d2SDexuan Cui 	int err, len;
236ca9c54d2SDexuan Cui 
237ca9c54d2SDexuan Cui 	if (unlikely(!apc->port_is_up))
238ca9c54d2SDexuan Cui 		goto tx_drop;
239ca9c54d2SDexuan Cui 
240ca9c54d2SDexuan Cui 	if (skb_cow_head(skb, MANA_HEADROOM))
241ca9c54d2SDexuan Cui 		goto tx_drop_count;
242ca9c54d2SDexuan Cui 
243ca9c54d2SDexuan Cui 	txq = &apc->tx_qp[txq_idx].txq;
244ca9c54d2SDexuan Cui 	gdma_sq = txq->gdma_sq;
245ca9c54d2SDexuan Cui 	cq = &apc->tx_qp[txq_idx].tx_cq;
246bd7fc6e1SShradha Gupta 	tx_stats = &txq->stats;
247ca9c54d2SDexuan Cui 
248ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
249ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
250ca9c54d2SDexuan Cui 
251ca9c54d2SDexuan Cui 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
252ca9c54d2SDexuan Cui 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
253ca9c54d2SDexuan Cui 		pkt_fmt = MANA_LONG_PKT_FMT;
254ca9c54d2SDexuan Cui 	} else {
255ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
256ca9c54d2SDexuan Cui 	}
257ca9c54d2SDexuan Cui 
258b803d1fdSHaiyang Zhang 	if (skb_vlan_tag_present(skb)) {
259b803d1fdSHaiyang Zhang 		pkt_fmt = MANA_LONG_PKT_FMT;
260b803d1fdSHaiyang Zhang 		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
261b803d1fdSHaiyang Zhang 		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
262b803d1fdSHaiyang Zhang 		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
263b803d1fdSHaiyang Zhang 		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
264b803d1fdSHaiyang Zhang 	}
265b803d1fdSHaiyang Zhang 
266ca9c54d2SDexuan Cui 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
267ca9c54d2SDexuan Cui 
268bd7fc6e1SShradha Gupta 	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
269ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
270bd7fc6e1SShradha Gupta 		u64_stats_update_begin(&tx_stats->syncp);
271bd7fc6e1SShradha Gupta 		tx_stats->short_pkt_fmt++;
272bd7fc6e1SShradha Gupta 		u64_stats_update_end(&tx_stats->syncp);
273bd7fc6e1SShradha Gupta 	} else {
274ca9c54d2SDexuan Cui 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
275bd7fc6e1SShradha Gupta 		u64_stats_update_begin(&tx_stats->syncp);
276bd7fc6e1SShradha Gupta 		tx_stats->long_pkt_fmt++;
277bd7fc6e1SShradha Gupta 		u64_stats_update_end(&tx_stats->syncp);
278bd7fc6e1SShradha Gupta 	}
279ca9c54d2SDexuan Cui 
280ca9c54d2SDexuan Cui 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
281ca9c54d2SDexuan Cui 	pkg.wqe_req.flags = 0;
282ca9c54d2SDexuan Cui 	pkg.wqe_req.client_data_unit = 0;
283ca9c54d2SDexuan Cui 
284ca9c54d2SDexuan Cui 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
285ca9c54d2SDexuan Cui 
286ca9c54d2SDexuan Cui 	if (skb->protocol == htons(ETH_P_IP))
287ca9c54d2SDexuan Cui 		ipv4 = true;
288ca9c54d2SDexuan Cui 	else if (skb->protocol == htons(ETH_P_IPV6))
289ca9c54d2SDexuan Cui 		ipv6 = true;
290ca9c54d2SDexuan Cui 
291ca9c54d2SDexuan Cui 	if (skb_is_gso(skb)) {
292a43e8e9fSHaiyang Zhang 		int num_sge;
293a43e8e9fSHaiyang Zhang 
294a43e8e9fSHaiyang Zhang 		gso_hs = mana_get_gso_hs(skb);
295a43e8e9fSHaiyang Zhang 
296a43e8e9fSHaiyang Zhang 		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
297a43e8e9fSHaiyang Zhang 		if (num_sge > 0)
298a43e8e9fSHaiyang Zhang 			pkg.wqe_req.num_sge = num_sge;
299a43e8e9fSHaiyang Zhang 		else
300a43e8e9fSHaiyang Zhang 			goto tx_drop_count;
301a43e8e9fSHaiyang Zhang 
302a43e8e9fSHaiyang Zhang 		u64_stats_update_begin(&tx_stats->syncp);
303a43e8e9fSHaiyang Zhang 		if (skb->encapsulation) {
304a43e8e9fSHaiyang Zhang 			tx_stats->tso_inner_packets++;
305a43e8e9fSHaiyang Zhang 			tx_stats->tso_inner_bytes += skb->len - gso_hs;
306a43e8e9fSHaiyang Zhang 		} else {
307a43e8e9fSHaiyang Zhang 			tx_stats->tso_packets++;
308a43e8e9fSHaiyang Zhang 			tx_stats->tso_bytes += skb->len - gso_hs;
309a43e8e9fSHaiyang Zhang 		}
310a43e8e9fSHaiyang Zhang 		u64_stats_update_end(&tx_stats->syncp);
311a43e8e9fSHaiyang Zhang 
312ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
313ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
314ca9c54d2SDexuan Cui 
315ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
316ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
317ca9c54d2SDexuan Cui 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
318ca9c54d2SDexuan Cui 
319ca9c54d2SDexuan Cui 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
320ca9c54d2SDexuan Cui 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
321ca9c54d2SDexuan Cui 		if (ipv4) {
322ca9c54d2SDexuan Cui 			ip_hdr(skb)->tot_len = 0;
323ca9c54d2SDexuan Cui 			ip_hdr(skb)->check = 0;
324ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
325ca9c54d2SDexuan Cui 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
326ca9c54d2SDexuan Cui 						   ip_hdr(skb)->daddr, 0,
327ca9c54d2SDexuan Cui 						   IPPROTO_TCP, 0);
328ca9c54d2SDexuan Cui 		} else {
329ca9c54d2SDexuan Cui 			ipv6_hdr(skb)->payload_len = 0;
330ca9c54d2SDexuan Cui 			tcp_hdr(skb)->check =
331ca9c54d2SDexuan Cui 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
332ca9c54d2SDexuan Cui 						 &ipv6_hdr(skb)->daddr, 0,
333ca9c54d2SDexuan Cui 						 IPPROTO_TCP, 0);
334ca9c54d2SDexuan Cui 		}
335ca9c54d2SDexuan Cui 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
336ca9c54d2SDexuan Cui 		csum_type = mana_checksum_info(skb);
337ca9c54d2SDexuan Cui 
338bd7fc6e1SShradha Gupta 		u64_stats_update_begin(&tx_stats->syncp);
339bd7fc6e1SShradha Gupta 		tx_stats->csum_partial++;
340bd7fc6e1SShradha Gupta 		u64_stats_update_end(&tx_stats->syncp);
341bd7fc6e1SShradha Gupta 
342ca9c54d2SDexuan Cui 		if (csum_type == IPPROTO_TCP) {
343ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
344ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
345ca9c54d2SDexuan Cui 
346ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
347ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
348ca9c54d2SDexuan Cui 
349ca9c54d2SDexuan Cui 		} else if (csum_type == IPPROTO_UDP) {
350ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
351ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
352ca9c54d2SDexuan Cui 
353ca9c54d2SDexuan Cui 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
354ca9c54d2SDexuan Cui 		} else {
355ca9c54d2SDexuan Cui 			/* Can't do offload of this type of checksum */
356ca9c54d2SDexuan Cui 			if (skb_checksum_help(skb))
357a43e8e9fSHaiyang Zhang 				goto tx_drop_count;
358ca9c54d2SDexuan Cui 		}
359ca9c54d2SDexuan Cui 	}
360ca9c54d2SDexuan Cui 
361a43e8e9fSHaiyang Zhang 	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
362a43e8e9fSHaiyang Zhang 
363a43e8e9fSHaiyang Zhang 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
364a43e8e9fSHaiyang Zhang 		pkg.wqe_req.sgl = pkg.sgl_array;
365a43e8e9fSHaiyang Zhang 	} else {
366a43e8e9fSHaiyang Zhang 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
367a43e8e9fSHaiyang Zhang 					    sizeof(struct gdma_sge),
368a43e8e9fSHaiyang Zhang 					    GFP_ATOMIC);
369a43e8e9fSHaiyang Zhang 		if (!pkg.sgl_ptr)
370a43e8e9fSHaiyang Zhang 			goto tx_drop_count;
371a43e8e9fSHaiyang Zhang 
372a43e8e9fSHaiyang Zhang 		pkg.wqe_req.sgl = pkg.sgl_ptr;
373a43e8e9fSHaiyang Zhang 	}
374a43e8e9fSHaiyang Zhang 
375a43e8e9fSHaiyang Zhang 	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
376bd7fc6e1SShradha Gupta 		u64_stats_update_begin(&tx_stats->syncp);
377bd7fc6e1SShradha Gupta 		tx_stats->mana_map_err++;
378bd7fc6e1SShradha Gupta 		u64_stats_update_end(&tx_stats->syncp);
379ca9c54d2SDexuan Cui 		goto free_sgl_ptr;
380bd7fc6e1SShradha Gupta 	}
381ca9c54d2SDexuan Cui 
382ca9c54d2SDexuan Cui 	skb_queue_tail(&txq->pending_skbs, skb);
383ca9c54d2SDexuan Cui 
384ca9c54d2SDexuan Cui 	len = skb->len;
385ca9c54d2SDexuan Cui 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
386ca9c54d2SDexuan Cui 
387ca9c54d2SDexuan Cui 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
388ca9c54d2SDexuan Cui 					(struct gdma_posted_wqe_info *)skb->cb);
389ca9c54d2SDexuan Cui 	if (!mana_can_tx(gdma_sq)) {
390ca9c54d2SDexuan Cui 		netif_tx_stop_queue(net_txq);
391ca9c54d2SDexuan Cui 		apc->eth_stats.stop_queue++;
392ca9c54d2SDexuan Cui 	}
393ca9c54d2SDexuan Cui 
394ca9c54d2SDexuan Cui 	if (err) {
395ca9c54d2SDexuan Cui 		(void)skb_dequeue_tail(&txq->pending_skbs);
396ca9c54d2SDexuan Cui 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
397ca9c54d2SDexuan Cui 		err = NETDEV_TX_BUSY;
398ca9c54d2SDexuan Cui 		goto tx_busy;
399ca9c54d2SDexuan Cui 	}
400ca9c54d2SDexuan Cui 
401ca9c54d2SDexuan Cui 	err = NETDEV_TX_OK;
402ca9c54d2SDexuan Cui 	atomic_inc(&txq->pending_sends);
403ca9c54d2SDexuan Cui 
404ca9c54d2SDexuan Cui 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
405ca9c54d2SDexuan Cui 
406ca9c54d2SDexuan Cui 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
407ca9c54d2SDexuan Cui 	skb = NULL;
408ca9c54d2SDexuan Cui 
409ca9c54d2SDexuan Cui 	tx_stats = &txq->stats;
410ca9c54d2SDexuan Cui 	u64_stats_update_begin(&tx_stats->syncp);
411ca9c54d2SDexuan Cui 	tx_stats->packets++;
412ca9c54d2SDexuan Cui 	tx_stats->bytes += len;
413ca9c54d2SDexuan Cui 	u64_stats_update_end(&tx_stats->syncp);
414ca9c54d2SDexuan Cui 
415ca9c54d2SDexuan Cui tx_busy:
416ca9c54d2SDexuan Cui 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
417ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
418ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
419ca9c54d2SDexuan Cui 	}
420ca9c54d2SDexuan Cui 
421ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
422ca9c54d2SDexuan Cui 	return err;
423ca9c54d2SDexuan Cui 
424ca9c54d2SDexuan Cui free_sgl_ptr:
425ca9c54d2SDexuan Cui 	kfree(pkg.sgl_ptr);
426ca9c54d2SDexuan Cui tx_drop_count:
427ca9c54d2SDexuan Cui 	ndev->stats.tx_dropped++;
428ca9c54d2SDexuan Cui tx_drop:
429ca9c54d2SDexuan Cui 	dev_kfree_skb_any(skb);
430ca9c54d2SDexuan Cui 	return NETDEV_TX_OK;
431ca9c54d2SDexuan Cui }
432ca9c54d2SDexuan Cui 
mana_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * st)433ca9c54d2SDexuan Cui static void mana_get_stats64(struct net_device *ndev,
434ca9c54d2SDexuan Cui 			     struct rtnl_link_stats64 *st)
435ca9c54d2SDexuan Cui {
436ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
437ca9c54d2SDexuan Cui 	unsigned int num_queues = apc->num_queues;
438f90f8420SHaiyang Zhang 	struct mana_stats_rx *rx_stats;
439f90f8420SHaiyang Zhang 	struct mana_stats_tx *tx_stats;
440ca9c54d2SDexuan Cui 	unsigned int start;
441ca9c54d2SDexuan Cui 	u64 packets, bytes;
442ca9c54d2SDexuan Cui 	int q;
443ca9c54d2SDexuan Cui 
444ca9c54d2SDexuan Cui 	if (!apc->port_is_up)
445ca9c54d2SDexuan Cui 		return;
446ca9c54d2SDexuan Cui 
447ca9c54d2SDexuan Cui 	netdev_stats_to_stats64(st, &ndev->stats);
448ca9c54d2SDexuan Cui 
449ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
450f90f8420SHaiyang Zhang 		rx_stats = &apc->rxqs[q]->stats;
451ca9c54d2SDexuan Cui 
452ca9c54d2SDexuan Cui 		do {
453068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&rx_stats->syncp);
454f90f8420SHaiyang Zhang 			packets = rx_stats->packets;
455f90f8420SHaiyang Zhang 			bytes = rx_stats->bytes;
456068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
457ca9c54d2SDexuan Cui 
458ca9c54d2SDexuan Cui 		st->rx_packets += packets;
459ca9c54d2SDexuan Cui 		st->rx_bytes += bytes;
460ca9c54d2SDexuan Cui 	}
461ca9c54d2SDexuan Cui 
462ca9c54d2SDexuan Cui 	for (q = 0; q < num_queues; q++) {
463f90f8420SHaiyang Zhang 		tx_stats = &apc->tx_qp[q].txq.stats;
464ca9c54d2SDexuan Cui 
465ca9c54d2SDexuan Cui 		do {
466068c38adSThomas Gleixner 			start = u64_stats_fetch_begin(&tx_stats->syncp);
467f90f8420SHaiyang Zhang 			packets = tx_stats->packets;
468f90f8420SHaiyang Zhang 			bytes = tx_stats->bytes;
469068c38adSThomas Gleixner 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
470ca9c54d2SDexuan Cui 
471ca9c54d2SDexuan Cui 		st->tx_packets += packets;
472ca9c54d2SDexuan Cui 		st->tx_bytes += bytes;
473ca9c54d2SDexuan Cui 	}
474ca9c54d2SDexuan Cui }
475ca9c54d2SDexuan Cui 
mana_get_tx_queue(struct net_device * ndev,struct sk_buff * skb,int old_q)476ca9c54d2SDexuan Cui static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
477ca9c54d2SDexuan Cui 			     int old_q)
478ca9c54d2SDexuan Cui {
479ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
480ca9c54d2SDexuan Cui 	u32 hash = skb_get_hash(skb);
481ca9c54d2SDexuan Cui 	struct sock *sk = skb->sk;
482ca9c54d2SDexuan Cui 	int txq;
483ca9c54d2SDexuan Cui 
484ca9c54d2SDexuan Cui 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
485ca9c54d2SDexuan Cui 
486ca9c54d2SDexuan Cui 	if (txq != old_q && sk && sk_fullsock(sk) &&
487ca9c54d2SDexuan Cui 	    rcu_access_pointer(sk->sk_dst_cache))
488ca9c54d2SDexuan Cui 		sk_tx_queue_set(sk, txq);
489ca9c54d2SDexuan Cui 
490ca9c54d2SDexuan Cui 	return txq;
491ca9c54d2SDexuan Cui }
492ca9c54d2SDexuan Cui 
mana_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)493ca9c54d2SDexuan Cui static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
494ca9c54d2SDexuan Cui 			     struct net_device *sb_dev)
495ca9c54d2SDexuan Cui {
496ca9c54d2SDexuan Cui 	int txq;
497ca9c54d2SDexuan Cui 
498ca9c54d2SDexuan Cui 	if (ndev->real_num_tx_queues == 1)
499ca9c54d2SDexuan Cui 		return 0;
500ca9c54d2SDexuan Cui 
501ca9c54d2SDexuan Cui 	txq = sk_tx_queue_get(skb->sk);
502ca9c54d2SDexuan Cui 
503ca9c54d2SDexuan Cui 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
504ca9c54d2SDexuan Cui 		if (skb_rx_queue_recorded(skb))
505ca9c54d2SDexuan Cui 			txq = skb_get_rx_queue(skb);
506ca9c54d2SDexuan Cui 		else
507ca9c54d2SDexuan Cui 			txq = mana_get_tx_queue(ndev, skb, txq);
508ca9c54d2SDexuan Cui 	}
509ca9c54d2SDexuan Cui 
510ca9c54d2SDexuan Cui 	return txq;
511ca9c54d2SDexuan Cui }
512ca9c54d2SDexuan Cui 
51380f6215bSHaiyang Zhang /* Release pre-allocated RX buffers */
mana_pre_dealloc_rxbufs(struct mana_port_context * mpc)51480f6215bSHaiyang Zhang static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
51580f6215bSHaiyang Zhang {
51680f6215bSHaiyang Zhang 	struct device *dev;
51780f6215bSHaiyang Zhang 	int i;
51880f6215bSHaiyang Zhang 
51980f6215bSHaiyang Zhang 	dev = mpc->ac->gdma_dev->gdma_context->dev;
52080f6215bSHaiyang Zhang 
52180f6215bSHaiyang Zhang 	if (!mpc->rxbufs_pre)
52280f6215bSHaiyang Zhang 		goto out1;
52380f6215bSHaiyang Zhang 
52480f6215bSHaiyang Zhang 	if (!mpc->das_pre)
52580f6215bSHaiyang Zhang 		goto out2;
52680f6215bSHaiyang Zhang 
52780f6215bSHaiyang Zhang 	while (mpc->rxbpre_total) {
52880f6215bSHaiyang Zhang 		i = --mpc->rxbpre_total;
52980f6215bSHaiyang Zhang 		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
53080f6215bSHaiyang Zhang 				 DMA_FROM_DEVICE);
53180f6215bSHaiyang Zhang 		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
53280f6215bSHaiyang Zhang 	}
53380f6215bSHaiyang Zhang 
53480f6215bSHaiyang Zhang 	kfree(mpc->das_pre);
53580f6215bSHaiyang Zhang 	mpc->das_pre = NULL;
53680f6215bSHaiyang Zhang 
53780f6215bSHaiyang Zhang out2:
53880f6215bSHaiyang Zhang 	kfree(mpc->rxbufs_pre);
53980f6215bSHaiyang Zhang 	mpc->rxbufs_pre = NULL;
54080f6215bSHaiyang Zhang 
54180f6215bSHaiyang Zhang out1:
54280f6215bSHaiyang Zhang 	mpc->rxbpre_datasize = 0;
54380f6215bSHaiyang Zhang 	mpc->rxbpre_alloc_size = 0;
54480f6215bSHaiyang Zhang 	mpc->rxbpre_headroom = 0;
54580f6215bSHaiyang Zhang }
54680f6215bSHaiyang Zhang 
54780f6215bSHaiyang Zhang /* Get a buffer from the pre-allocated RX buffers */
mana_get_rxbuf_pre(struct mana_rxq * rxq,dma_addr_t * da)54880f6215bSHaiyang Zhang static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
54980f6215bSHaiyang Zhang {
55080f6215bSHaiyang Zhang 	struct net_device *ndev = rxq->ndev;
55180f6215bSHaiyang Zhang 	struct mana_port_context *mpc;
55280f6215bSHaiyang Zhang 	void *va;
55380f6215bSHaiyang Zhang 
55480f6215bSHaiyang Zhang 	mpc = netdev_priv(ndev);
55580f6215bSHaiyang Zhang 
55680f6215bSHaiyang Zhang 	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
55780f6215bSHaiyang Zhang 		netdev_err(ndev, "No RX pre-allocated bufs\n");
55880f6215bSHaiyang Zhang 		return NULL;
55980f6215bSHaiyang Zhang 	}
56080f6215bSHaiyang Zhang 
56180f6215bSHaiyang Zhang 	/* Check sizes to catch unexpected coding error */
56280f6215bSHaiyang Zhang 	if (mpc->rxbpre_datasize != rxq->datasize) {
56380f6215bSHaiyang Zhang 		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
56480f6215bSHaiyang Zhang 			   mpc->rxbpre_datasize, rxq->datasize);
56580f6215bSHaiyang Zhang 		return NULL;
56680f6215bSHaiyang Zhang 	}
56780f6215bSHaiyang Zhang 
56880f6215bSHaiyang Zhang 	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
56980f6215bSHaiyang Zhang 		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
57080f6215bSHaiyang Zhang 			   mpc->rxbpre_alloc_size, rxq->alloc_size);
57180f6215bSHaiyang Zhang 		return NULL;
57280f6215bSHaiyang Zhang 	}
57380f6215bSHaiyang Zhang 
57480f6215bSHaiyang Zhang 	if (mpc->rxbpre_headroom != rxq->headroom) {
57580f6215bSHaiyang Zhang 		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
57680f6215bSHaiyang Zhang 			   mpc->rxbpre_headroom, rxq->headroom);
57780f6215bSHaiyang Zhang 		return NULL;
57880f6215bSHaiyang Zhang 	}
57980f6215bSHaiyang Zhang 
58080f6215bSHaiyang Zhang 	mpc->rxbpre_total--;
58180f6215bSHaiyang Zhang 
58280f6215bSHaiyang Zhang 	*da = mpc->das_pre[mpc->rxbpre_total];
58380f6215bSHaiyang Zhang 	va = mpc->rxbufs_pre[mpc->rxbpre_total];
58480f6215bSHaiyang Zhang 	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
58580f6215bSHaiyang Zhang 
58680f6215bSHaiyang Zhang 	/* Deallocate the array after all buffers are gone */
58780f6215bSHaiyang Zhang 	if (!mpc->rxbpre_total)
58880f6215bSHaiyang Zhang 		mana_pre_dealloc_rxbufs(mpc);
58980f6215bSHaiyang Zhang 
59080f6215bSHaiyang Zhang 	return va;
59180f6215bSHaiyang Zhang }
59280f6215bSHaiyang Zhang 
59380f6215bSHaiyang Zhang /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
mana_get_rxbuf_cfg(int mtu,u32 * datasize,u32 * alloc_size,u32 * headroom)59480f6215bSHaiyang Zhang static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
59580f6215bSHaiyang Zhang 			       u32 *headroom)
59680f6215bSHaiyang Zhang {
59780f6215bSHaiyang Zhang 	if (mtu > MANA_XDP_MTU_MAX)
59880f6215bSHaiyang Zhang 		*headroom = 0; /* no support for XDP */
59980f6215bSHaiyang Zhang 	else
60080f6215bSHaiyang Zhang 		*headroom = XDP_PACKET_HEADROOM;
60180f6215bSHaiyang Zhang 
60280f6215bSHaiyang Zhang 	*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
60380f6215bSHaiyang Zhang 
604*ca58927bSHaiyang Zhang 	*datasize = mtu + ETH_HLEN;
60580f6215bSHaiyang Zhang }
60680f6215bSHaiyang Zhang 
mana_pre_alloc_rxbufs(struct mana_port_context * mpc,int new_mtu)60780f6215bSHaiyang Zhang static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
60880f6215bSHaiyang Zhang {
60980f6215bSHaiyang Zhang 	struct device *dev;
61080f6215bSHaiyang Zhang 	struct page *page;
61180f6215bSHaiyang Zhang 	dma_addr_t da;
61280f6215bSHaiyang Zhang 	int num_rxb;
61380f6215bSHaiyang Zhang 	void *va;
61480f6215bSHaiyang Zhang 	int i;
61580f6215bSHaiyang Zhang 
61680f6215bSHaiyang Zhang 	mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
61780f6215bSHaiyang Zhang 			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
61880f6215bSHaiyang Zhang 
61980f6215bSHaiyang Zhang 	dev = mpc->ac->gdma_dev->gdma_context->dev;
62080f6215bSHaiyang Zhang 
62180f6215bSHaiyang Zhang 	num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
62280f6215bSHaiyang Zhang 
62380f6215bSHaiyang Zhang 	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
62480f6215bSHaiyang Zhang 	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
62580f6215bSHaiyang Zhang 	if (!mpc->rxbufs_pre)
62680f6215bSHaiyang Zhang 		goto error;
62780f6215bSHaiyang Zhang 
62880f6215bSHaiyang Zhang 	mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
62980f6215bSHaiyang Zhang 	if (!mpc->das_pre)
63080f6215bSHaiyang Zhang 		goto error;
63180f6215bSHaiyang Zhang 
63280f6215bSHaiyang Zhang 	mpc->rxbpre_total = 0;
63380f6215bSHaiyang Zhang 
63480f6215bSHaiyang Zhang 	for (i = 0; i < num_rxb; i++) {
63580f6215bSHaiyang Zhang 		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
63680f6215bSHaiyang Zhang 			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
63780f6215bSHaiyang Zhang 			if (!va)
63880f6215bSHaiyang Zhang 				goto error;
639df18f2daSHaiyang Zhang 
640df18f2daSHaiyang Zhang 			page = virt_to_head_page(va);
641df18f2daSHaiyang Zhang 			/* Check if the frag falls back to single page */
642df18f2daSHaiyang Zhang 			if (compound_order(page) <
643df18f2daSHaiyang Zhang 			    get_order(mpc->rxbpre_alloc_size)) {
644df18f2daSHaiyang Zhang 				put_page(page);
645df18f2daSHaiyang Zhang 				goto error;
646df18f2daSHaiyang Zhang 			}
64780f6215bSHaiyang Zhang 		} else {
64880f6215bSHaiyang Zhang 			page = dev_alloc_page();
64980f6215bSHaiyang Zhang 			if (!page)
65080f6215bSHaiyang Zhang 				goto error;
65180f6215bSHaiyang Zhang 
65280f6215bSHaiyang Zhang 			va = page_to_virt(page);
65380f6215bSHaiyang Zhang 		}
65480f6215bSHaiyang Zhang 
65580f6215bSHaiyang Zhang 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
65680f6215bSHaiyang Zhang 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
65780f6215bSHaiyang Zhang 		if (dma_mapping_error(dev, da)) {
65880f6215bSHaiyang Zhang 			put_page(virt_to_head_page(va));
65980f6215bSHaiyang Zhang 			goto error;
66080f6215bSHaiyang Zhang 		}
66180f6215bSHaiyang Zhang 
66280f6215bSHaiyang Zhang 		mpc->rxbufs_pre[i] = va;
66380f6215bSHaiyang Zhang 		mpc->das_pre[i] = da;
66480f6215bSHaiyang Zhang 		mpc->rxbpre_total = i + 1;
66580f6215bSHaiyang Zhang 	}
66680f6215bSHaiyang Zhang 
66780f6215bSHaiyang Zhang 	return 0;
66880f6215bSHaiyang Zhang 
66980f6215bSHaiyang Zhang error:
67080f6215bSHaiyang Zhang 	mana_pre_dealloc_rxbufs(mpc);
67180f6215bSHaiyang Zhang 	return -ENOMEM;
67280f6215bSHaiyang Zhang }
67380f6215bSHaiyang Zhang 
mana_change_mtu(struct net_device * ndev,int new_mtu)67480f6215bSHaiyang Zhang static int mana_change_mtu(struct net_device *ndev, int new_mtu)
67580f6215bSHaiyang Zhang {
67680f6215bSHaiyang Zhang 	struct mana_port_context *mpc = netdev_priv(ndev);
67780f6215bSHaiyang Zhang 	unsigned int old_mtu = ndev->mtu;
67880f6215bSHaiyang Zhang 	int err;
67980f6215bSHaiyang Zhang 
68080f6215bSHaiyang Zhang 	/* Pre-allocate buffers to prevent failure in mana_attach later */
68180f6215bSHaiyang Zhang 	err = mana_pre_alloc_rxbufs(mpc, new_mtu);
68280f6215bSHaiyang Zhang 	if (err) {
68380f6215bSHaiyang Zhang 		netdev_err(ndev, "Insufficient memory for new MTU\n");
68480f6215bSHaiyang Zhang 		return err;
68580f6215bSHaiyang Zhang 	}
68680f6215bSHaiyang Zhang 
68780f6215bSHaiyang Zhang 	err = mana_detach(ndev, false);
68880f6215bSHaiyang Zhang 	if (err) {
68980f6215bSHaiyang Zhang 		netdev_err(ndev, "mana_detach failed: %d\n", err);
69080f6215bSHaiyang Zhang 		goto out;
69180f6215bSHaiyang Zhang 	}
69280f6215bSHaiyang Zhang 
69380f6215bSHaiyang Zhang 	ndev->mtu = new_mtu;
69480f6215bSHaiyang Zhang 
69580f6215bSHaiyang Zhang 	err = mana_attach(ndev);
69680f6215bSHaiyang Zhang 	if (err) {
69780f6215bSHaiyang Zhang 		netdev_err(ndev, "mana_attach failed: %d\n", err);
69880f6215bSHaiyang Zhang 		ndev->mtu = old_mtu;
69980f6215bSHaiyang Zhang 	}
70080f6215bSHaiyang Zhang 
70180f6215bSHaiyang Zhang out:
70280f6215bSHaiyang Zhang 	mana_pre_dealloc_rxbufs(mpc);
70380f6215bSHaiyang Zhang 	return err;
70480f6215bSHaiyang Zhang }
70580f6215bSHaiyang Zhang 
706ca9c54d2SDexuan Cui static const struct net_device_ops mana_devops = {
707ca9c54d2SDexuan Cui 	.ndo_open		= mana_open,
708ca9c54d2SDexuan Cui 	.ndo_stop		= mana_close,
709ca9c54d2SDexuan Cui 	.ndo_select_queue	= mana_select_queue,
710ca9c54d2SDexuan Cui 	.ndo_start_xmit		= mana_start_xmit,
711ca9c54d2SDexuan Cui 	.ndo_validate_addr	= eth_validate_addr,
712ca9c54d2SDexuan Cui 	.ndo_get_stats64	= mana_get_stats64,
713ed5356b5SHaiyang Zhang 	.ndo_bpf		= mana_bpf,
7147a8938cdSHaiyang Zhang 	.ndo_xdp_xmit		= mana_xdp_xmit,
71580f6215bSHaiyang Zhang 	.ndo_change_mtu		= mana_change_mtu,
716ca9c54d2SDexuan Cui };
717ca9c54d2SDexuan Cui 
mana_cleanup_port_context(struct mana_port_context * apc)718ca9c54d2SDexuan Cui static void mana_cleanup_port_context(struct mana_port_context *apc)
719ca9c54d2SDexuan Cui {
720ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
721ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
722ca9c54d2SDexuan Cui }
723ca9c54d2SDexuan Cui 
mana_init_port_context(struct mana_port_context * apc)724ca9c54d2SDexuan Cui static int mana_init_port_context(struct mana_port_context *apc)
725ca9c54d2SDexuan Cui {
726ca9c54d2SDexuan Cui 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
727ca9c54d2SDexuan Cui 			    GFP_KERNEL);
728ca9c54d2SDexuan Cui 
729ca9c54d2SDexuan Cui 	return !apc->rxqs ? -ENOMEM : 0;
730ca9c54d2SDexuan Cui }
731ca9c54d2SDexuan Cui 
mana_send_request(struct mana_context * ac,void * in_buf,u32 in_len,void * out_buf,u32 out_len)732ca9c54d2SDexuan Cui static int mana_send_request(struct mana_context *ac, void *in_buf,
733ca9c54d2SDexuan Cui 			     u32 in_len, void *out_buf, u32 out_len)
734ca9c54d2SDexuan Cui {
735ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
736ca9c54d2SDexuan Cui 	struct gdma_resp_hdr *resp = out_buf;
737ca9c54d2SDexuan Cui 	struct gdma_req_hdr *req = in_buf;
738ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
739ca9c54d2SDexuan Cui 	static atomic_t activity_id;
740ca9c54d2SDexuan Cui 	int err;
741ca9c54d2SDexuan Cui 
742ca9c54d2SDexuan Cui 	req->dev_id = gc->mana.dev_id;
743ca9c54d2SDexuan Cui 	req->activity_id = atomic_inc_return(&activity_id);
744ca9c54d2SDexuan Cui 
745ca9c54d2SDexuan Cui 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
746ca9c54d2SDexuan Cui 				   out_buf);
747ca9c54d2SDexuan Cui 	if (err || resp->status) {
748ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
749ca9c54d2SDexuan Cui 			err, resp->status);
750ca9c54d2SDexuan Cui 		return err ? err : -EPROTO;
751ca9c54d2SDexuan Cui 	}
752ca9c54d2SDexuan Cui 
753ca9c54d2SDexuan Cui 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
754ca9c54d2SDexuan Cui 	    req->activity_id != resp->activity_id) {
755ca9c54d2SDexuan Cui 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
756ca9c54d2SDexuan Cui 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
757ca9c54d2SDexuan Cui 			req->activity_id, resp->activity_id);
758ca9c54d2SDexuan Cui 		return -EPROTO;
759ca9c54d2SDexuan Cui 	}
760ca9c54d2SDexuan Cui 
761ca9c54d2SDexuan Cui 	return 0;
762ca9c54d2SDexuan Cui }
763ca9c54d2SDexuan Cui 
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const u32 min_size)764ca9c54d2SDexuan Cui static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
765ca9c54d2SDexuan Cui 				const enum mana_command_code expected_code,
766ca9c54d2SDexuan Cui 				const u32 min_size)
767ca9c54d2SDexuan Cui {
768ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_type != expected_code)
769ca9c54d2SDexuan Cui 		return -EPROTO;
770ca9c54d2SDexuan Cui 
771ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
772ca9c54d2SDexuan Cui 		return -EPROTO;
773ca9c54d2SDexuan Cui 
774ca9c54d2SDexuan Cui 	if (resp_hdr->response.msg_size < min_size)
775ca9c54d2SDexuan Cui 		return -EPROTO;
776ca9c54d2SDexuan Cui 
777ca9c54d2SDexuan Cui 	return 0;
778ca9c54d2SDexuan Cui }
779ca9c54d2SDexuan Cui 
mana_pf_register_hw_vport(struct mana_port_context * apc)7801566e7d6SDexuan Cui static int mana_pf_register_hw_vport(struct mana_port_context *apc)
7811566e7d6SDexuan Cui {
7821566e7d6SDexuan Cui 	struct mana_register_hw_vport_resp resp = {};
7831566e7d6SDexuan Cui 	struct mana_register_hw_vport_req req = {};
7841566e7d6SDexuan Cui 	int err;
7851566e7d6SDexuan Cui 
7861566e7d6SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
7871566e7d6SDexuan Cui 			     sizeof(req), sizeof(resp));
7881566e7d6SDexuan Cui 	req.attached_gfid = 1;
7891566e7d6SDexuan Cui 	req.is_pf_default_vport = 1;
7901566e7d6SDexuan Cui 	req.allow_all_ether_types = 1;
7911566e7d6SDexuan Cui 
7921566e7d6SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
7931566e7d6SDexuan Cui 				sizeof(resp));
7941566e7d6SDexuan Cui 	if (err) {
7951566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
7961566e7d6SDexuan Cui 		return err;
7971566e7d6SDexuan Cui 	}
7981566e7d6SDexuan Cui 
7991566e7d6SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
8001566e7d6SDexuan Cui 				   sizeof(resp));
8011566e7d6SDexuan Cui 	if (err || resp.hdr.status) {
8021566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
8031566e7d6SDexuan Cui 			   err, resp.hdr.status);
8041566e7d6SDexuan Cui 		return err ? err : -EPROTO;
8051566e7d6SDexuan Cui 	}
8061566e7d6SDexuan Cui 
8071566e7d6SDexuan Cui 	apc->port_handle = resp.hw_vport_handle;
8081566e7d6SDexuan Cui 	return 0;
8091566e7d6SDexuan Cui }
8101566e7d6SDexuan Cui 
mana_pf_deregister_hw_vport(struct mana_port_context * apc)8111566e7d6SDexuan Cui static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
8121566e7d6SDexuan Cui {
8131566e7d6SDexuan Cui 	struct mana_deregister_hw_vport_resp resp = {};
8141566e7d6SDexuan Cui 	struct mana_deregister_hw_vport_req req = {};
8151566e7d6SDexuan Cui 	int err;
8161566e7d6SDexuan Cui 
8171566e7d6SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
8181566e7d6SDexuan Cui 			     sizeof(req), sizeof(resp));
8191566e7d6SDexuan Cui 	req.hw_vport_handle = apc->port_handle;
8201566e7d6SDexuan Cui 
8211566e7d6SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
8221566e7d6SDexuan Cui 				sizeof(resp));
8231566e7d6SDexuan Cui 	if (err) {
8241566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
8251566e7d6SDexuan Cui 			   err);
8261566e7d6SDexuan Cui 		return;
8271566e7d6SDexuan Cui 	}
8281566e7d6SDexuan Cui 
8291566e7d6SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
8301566e7d6SDexuan Cui 				   sizeof(resp));
8311566e7d6SDexuan Cui 	if (err || resp.hdr.status)
8321566e7d6SDexuan Cui 		netdev_err(apc->ndev,
8331566e7d6SDexuan Cui 			   "Failed to deregister hw vPort: %d, 0x%x\n",
8341566e7d6SDexuan Cui 			   err, resp.hdr.status);
8351566e7d6SDexuan Cui }
8361566e7d6SDexuan Cui 
mana_pf_register_filter(struct mana_port_context * apc)8371566e7d6SDexuan Cui static int mana_pf_register_filter(struct mana_port_context *apc)
8381566e7d6SDexuan Cui {
8391566e7d6SDexuan Cui 	struct mana_register_filter_resp resp = {};
8401566e7d6SDexuan Cui 	struct mana_register_filter_req req = {};
8411566e7d6SDexuan Cui 	int err;
8421566e7d6SDexuan Cui 
8431566e7d6SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
8441566e7d6SDexuan Cui 			     sizeof(req), sizeof(resp));
8451566e7d6SDexuan Cui 	req.vport = apc->port_handle;
8461566e7d6SDexuan Cui 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
8471566e7d6SDexuan Cui 
8481566e7d6SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
8491566e7d6SDexuan Cui 				sizeof(resp));
8501566e7d6SDexuan Cui 	if (err) {
8511566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
8521566e7d6SDexuan Cui 		return err;
8531566e7d6SDexuan Cui 	}
8541566e7d6SDexuan Cui 
8551566e7d6SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
8561566e7d6SDexuan Cui 				   sizeof(resp));
8571566e7d6SDexuan Cui 	if (err || resp.hdr.status) {
8581566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
8591566e7d6SDexuan Cui 			   err, resp.hdr.status);
8601566e7d6SDexuan Cui 		return err ? err : -EPROTO;
8611566e7d6SDexuan Cui 	}
8621566e7d6SDexuan Cui 
8631566e7d6SDexuan Cui 	apc->pf_filter_handle = resp.filter_handle;
8641566e7d6SDexuan Cui 	return 0;
8651566e7d6SDexuan Cui }
8661566e7d6SDexuan Cui 
mana_pf_deregister_filter(struct mana_port_context * apc)8671566e7d6SDexuan Cui static void mana_pf_deregister_filter(struct mana_port_context *apc)
8681566e7d6SDexuan Cui {
8691566e7d6SDexuan Cui 	struct mana_deregister_filter_resp resp = {};
8701566e7d6SDexuan Cui 	struct mana_deregister_filter_req req = {};
8711566e7d6SDexuan Cui 	int err;
8721566e7d6SDexuan Cui 
8731566e7d6SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
8741566e7d6SDexuan Cui 			     sizeof(req), sizeof(resp));
8751566e7d6SDexuan Cui 	req.filter_handle = apc->pf_filter_handle;
8761566e7d6SDexuan Cui 
8771566e7d6SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
8781566e7d6SDexuan Cui 				sizeof(resp));
8791566e7d6SDexuan Cui 	if (err) {
8801566e7d6SDexuan Cui 		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
8811566e7d6SDexuan Cui 			   err);
8821566e7d6SDexuan Cui 		return;
8831566e7d6SDexuan Cui 	}
8841566e7d6SDexuan Cui 
8851566e7d6SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
8861566e7d6SDexuan Cui 				   sizeof(resp));
8871566e7d6SDexuan Cui 	if (err || resp.hdr.status)
8881566e7d6SDexuan Cui 		netdev_err(apc->ndev,
8891566e7d6SDexuan Cui 			   "Failed to deregister filter: %d, 0x%x\n",
8901566e7d6SDexuan Cui 			   err, resp.hdr.status);
8911566e7d6SDexuan Cui }
8921566e7d6SDexuan Cui 
mana_query_device_cfg(struct mana_context * ac,u32 proto_major_ver,u32 proto_minor_ver,u32 proto_micro_ver,u16 * max_num_vports)893ca9c54d2SDexuan Cui static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
894ca9c54d2SDexuan Cui 				 u32 proto_minor_ver, u32 proto_micro_ver,
895ca9c54d2SDexuan Cui 				 u16 *max_num_vports)
896ca9c54d2SDexuan Cui {
897ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
898ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_resp resp = {};
899ca9c54d2SDexuan Cui 	struct mana_query_device_cfg_req req = {};
900ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
901ca9c54d2SDexuan Cui 	int err = 0;
902ca9c54d2SDexuan Cui 
903ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
904ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
90580f6215bSHaiyang Zhang 
90680f6215bSHaiyang Zhang 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
90780f6215bSHaiyang Zhang 
908ca9c54d2SDexuan Cui 	req.proto_major_ver = proto_major_ver;
909ca9c54d2SDexuan Cui 	req.proto_minor_ver = proto_minor_ver;
910ca9c54d2SDexuan Cui 	req.proto_micro_ver = proto_micro_ver;
911ca9c54d2SDexuan Cui 
912ca9c54d2SDexuan Cui 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
913ca9c54d2SDexuan Cui 	if (err) {
914ca9c54d2SDexuan Cui 		dev_err(dev, "Failed to query config: %d", err);
915ca9c54d2SDexuan Cui 		return err;
916ca9c54d2SDexuan Cui 	}
917ca9c54d2SDexuan Cui 
918ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
919ca9c54d2SDexuan Cui 				   sizeof(resp));
920ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
921ca9c54d2SDexuan Cui 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
922ca9c54d2SDexuan Cui 			resp.hdr.status);
923ca9c54d2SDexuan Cui 		if (!err)
924ca9c54d2SDexuan Cui 			err = -EPROTO;
925ca9c54d2SDexuan Cui 		return err;
926ca9c54d2SDexuan Cui 	}
927ca9c54d2SDexuan Cui 
928ca9c54d2SDexuan Cui 	*max_num_vports = resp.max_num_vports;
929ca9c54d2SDexuan Cui 
93080f6215bSHaiyang Zhang 	if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
93180f6215bSHaiyang Zhang 		gc->adapter_mtu = resp.adapter_mtu;
93280f6215bSHaiyang Zhang 	else
93380f6215bSHaiyang Zhang 		gc->adapter_mtu = ETH_FRAME_LEN;
93480f6215bSHaiyang Zhang 
935ca9c54d2SDexuan Cui 	return 0;
936ca9c54d2SDexuan Cui }
937ca9c54d2SDexuan Cui 
mana_query_vport_cfg(struct mana_port_context * apc,u32 vport_index,u32 * max_sq,u32 * max_rq,u32 * num_indir_entry)938ca9c54d2SDexuan Cui static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
939ca9c54d2SDexuan Cui 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
940ca9c54d2SDexuan Cui {
941ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_resp resp = {};
942ca9c54d2SDexuan Cui 	struct mana_query_vport_cfg_req req = {};
943ca9c54d2SDexuan Cui 	int err;
944ca9c54d2SDexuan Cui 
945ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
946ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
947ca9c54d2SDexuan Cui 
948ca9c54d2SDexuan Cui 	req.vport_index = vport_index;
949ca9c54d2SDexuan Cui 
950ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
951ca9c54d2SDexuan Cui 				sizeof(resp));
952ca9c54d2SDexuan Cui 	if (err)
953ca9c54d2SDexuan Cui 		return err;
954ca9c54d2SDexuan Cui 
955ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
956ca9c54d2SDexuan Cui 				   sizeof(resp));
957ca9c54d2SDexuan Cui 	if (err)
958ca9c54d2SDexuan Cui 		return err;
959ca9c54d2SDexuan Cui 
960ca9c54d2SDexuan Cui 	if (resp.hdr.status)
961ca9c54d2SDexuan Cui 		return -EPROTO;
962ca9c54d2SDexuan Cui 
963ca9c54d2SDexuan Cui 	*max_sq = resp.max_num_sq;
964ca9c54d2SDexuan Cui 	*max_rq = resp.max_num_rq;
965ca9c54d2SDexuan Cui 	*num_indir_entry = resp.num_indirection_ent;
966ca9c54d2SDexuan Cui 
967ca9c54d2SDexuan Cui 	apc->port_handle = resp.vport;
968ca9c54d2SDexuan Cui 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
969ca9c54d2SDexuan Cui 
970ca9c54d2SDexuan Cui 	return 0;
971ca9c54d2SDexuan Cui }
972ca9c54d2SDexuan Cui 
mana_uncfg_vport(struct mana_port_context * apc)973b5c1c985SLong Li void mana_uncfg_vport(struct mana_port_context *apc)
974b5c1c985SLong Li {
975b5c1c985SLong Li 	mutex_lock(&apc->vport_mutex);
976b5c1c985SLong Li 	apc->vport_use_count--;
977b5c1c985SLong Li 	WARN_ON(apc->vport_use_count < 0);
978b5c1c985SLong Li 	mutex_unlock(&apc->vport_mutex);
979b5c1c985SLong Li }
980b5c1c985SLong Li EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
981b5c1c985SLong Li 
mana_cfg_vport(struct mana_port_context * apc,u32 protection_dom_id,u32 doorbell_pg_id)982b5c1c985SLong Li int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
983ca9c54d2SDexuan Cui 		   u32 doorbell_pg_id)
984ca9c54d2SDexuan Cui {
985ca9c54d2SDexuan Cui 	struct mana_config_vport_resp resp = {};
986ca9c54d2SDexuan Cui 	struct mana_config_vport_req req = {};
987ca9c54d2SDexuan Cui 	int err;
988ca9c54d2SDexuan Cui 
989b5c1c985SLong Li 	/* This function is used to program the Ethernet port in the hardware
990b5c1c985SLong Li 	 * table. It can be called from the Ethernet driver or the RDMA driver.
991b5c1c985SLong Li 	 *
992b5c1c985SLong Li 	 * For Ethernet usage, the hardware supports only one active user on a
993b5c1c985SLong Li 	 * physical port. The driver checks on the port usage before programming
994b5c1c985SLong Li 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
995b5c1c985SLong Li 	 * device to kernel NET layer (Ethernet driver).
996b5c1c985SLong Li 	 *
997b5c1c985SLong Li 	 * Because the RDMA driver doesn't know in advance which QP type the
998b5c1c985SLong Li 	 * user will create, it exposes the device with all its ports. The user
999b5c1c985SLong Li 	 * may not be able to create RAW QP on a port if this port is already
1000b5c1c985SLong Li 	 * in used by the Ethernet driver from the kernel.
1001b5c1c985SLong Li 	 *
1002b5c1c985SLong Li 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1003b5c1c985SLong Li 	 * the hardware doesn't have this limitation. The user can create RC
1004b5c1c985SLong Li 	 * QPs on a physical port up to the hardware limits independent of the
1005b5c1c985SLong Li 	 * Ethernet usage on the same port.
1006b5c1c985SLong Li 	 */
1007b5c1c985SLong Li 	mutex_lock(&apc->vport_mutex);
1008b5c1c985SLong Li 	if (apc->vport_use_count > 0) {
1009b5c1c985SLong Li 		mutex_unlock(&apc->vport_mutex);
1010b5c1c985SLong Li 		return -EBUSY;
1011b5c1c985SLong Li 	}
1012b5c1c985SLong Li 	apc->vport_use_count++;
1013b5c1c985SLong Li 	mutex_unlock(&apc->vport_mutex);
1014b5c1c985SLong Li 
1015ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1016ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
1017ca9c54d2SDexuan Cui 	req.vport = apc->port_handle;
1018ca9c54d2SDexuan Cui 	req.pdid = protection_dom_id;
1019ca9c54d2SDexuan Cui 	req.doorbell_pageid = doorbell_pg_id;
1020ca9c54d2SDexuan Cui 
1021ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1022ca9c54d2SDexuan Cui 				sizeof(resp));
1023ca9c54d2SDexuan Cui 	if (err) {
1024ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1025ca9c54d2SDexuan Cui 		goto out;
1026ca9c54d2SDexuan Cui 	}
1027ca9c54d2SDexuan Cui 
1028ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1029ca9c54d2SDexuan Cui 				   sizeof(resp));
1030ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
1031ca9c54d2SDexuan Cui 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1032ca9c54d2SDexuan Cui 			   err, resp.hdr.status);
1033ca9c54d2SDexuan Cui 		if (!err)
1034ca9c54d2SDexuan Cui 			err = -EPROTO;
1035ca9c54d2SDexuan Cui 
1036ca9c54d2SDexuan Cui 		goto out;
1037ca9c54d2SDexuan Cui 	}
1038ca9c54d2SDexuan Cui 
1039ca9c54d2SDexuan Cui 	apc->tx_shortform_allowed = resp.short_form_allowed;
1040ca9c54d2SDexuan Cui 	apc->tx_vp_offset = resp.tx_vport_offset;
1041b5c1c985SLong Li 
1042b5c1c985SLong Li 	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1043b5c1c985SLong Li 		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1044ca9c54d2SDexuan Cui out:
1045b5c1c985SLong Li 	if (err)
1046b5c1c985SLong Li 		mana_uncfg_vport(apc);
1047b5c1c985SLong Li 
1048ca9c54d2SDexuan Cui 	return err;
1049ca9c54d2SDexuan Cui }
1050b5c1c985SLong Li EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
1051ca9c54d2SDexuan Cui 
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1052ca9c54d2SDexuan Cui static int mana_cfg_vport_steering(struct mana_port_context *apc,
1053ca9c54d2SDexuan Cui 				   enum TRI_STATE rx,
1054ca9c54d2SDexuan Cui 				   bool update_default_rxobj, bool update_key,
1055ca9c54d2SDexuan Cui 				   bool update_tab)
1056ca9c54d2SDexuan Cui {
1057ca9c54d2SDexuan Cui 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
105821453285SLong Li 	struct mana_cfg_rx_steer_req_v2 *req;
1059ca9c54d2SDexuan Cui 	struct mana_cfg_rx_steer_resp resp = {};
1060ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
1061ca9c54d2SDexuan Cui 	mana_handle_t *req_indir_tab;
1062ca9c54d2SDexuan Cui 	u32 req_buf_size;
1063ca9c54d2SDexuan Cui 	int err;
1064ca9c54d2SDexuan Cui 
1065ca9c54d2SDexuan Cui 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1066ca9c54d2SDexuan Cui 	req = kzalloc(req_buf_size, GFP_KERNEL);
1067ca9c54d2SDexuan Cui 	if (!req)
1068ca9c54d2SDexuan Cui 		return -ENOMEM;
1069ca9c54d2SDexuan Cui 
1070ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1071ca9c54d2SDexuan Cui 			     sizeof(resp));
1072ca9c54d2SDexuan Cui 
107321453285SLong Li 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
107421453285SLong Li 
1075ca9c54d2SDexuan Cui 	req->vport = apc->port_handle;
1076ca9c54d2SDexuan Cui 	req->num_indir_entries = num_entries;
1077ca9c54d2SDexuan Cui 	req->indir_tab_offset = sizeof(*req);
1078ca9c54d2SDexuan Cui 	req->rx_enable = rx;
1079ca9c54d2SDexuan Cui 	req->rss_enable = apc->rss_state;
1080ca9c54d2SDexuan Cui 	req->update_default_rxobj = update_default_rxobj;
1081ca9c54d2SDexuan Cui 	req->update_hashkey = update_key;
1082ca9c54d2SDexuan Cui 	req->update_indir_tab = update_tab;
1083ca9c54d2SDexuan Cui 	req->default_rxobj = apc->default_rxobj;
108421453285SLong Li 	req->cqe_coalescing_enable = 0;
1085ca9c54d2SDexuan Cui 
1086ca9c54d2SDexuan Cui 	if (update_key)
1087ca9c54d2SDexuan Cui 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1088ca9c54d2SDexuan Cui 
1089ca9c54d2SDexuan Cui 	if (update_tab) {
1090ca9c54d2SDexuan Cui 		req_indir_tab = (mana_handle_t *)(req + 1);
1091ca9c54d2SDexuan Cui 		memcpy(req_indir_tab, apc->rxobj_table,
1092ca9c54d2SDexuan Cui 		       req->num_indir_entries * sizeof(mana_handle_t));
1093ca9c54d2SDexuan Cui 	}
1094ca9c54d2SDexuan Cui 
1095ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1096ca9c54d2SDexuan Cui 				sizeof(resp));
1097ca9c54d2SDexuan Cui 	if (err) {
1098ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1099ca9c54d2SDexuan Cui 		goto out;
1100ca9c54d2SDexuan Cui 	}
1101ca9c54d2SDexuan Cui 
1102ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1103ca9c54d2SDexuan Cui 				   sizeof(resp));
1104ca9c54d2SDexuan Cui 	if (err) {
1105ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1106ca9c54d2SDexuan Cui 		goto out;
1107ca9c54d2SDexuan Cui 	}
1108ca9c54d2SDexuan Cui 
1109ca9c54d2SDexuan Cui 	if (resp.hdr.status) {
1110ca9c54d2SDexuan Cui 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1111ca9c54d2SDexuan Cui 			   resp.hdr.status);
1112ca9c54d2SDexuan Cui 		err = -EPROTO;
1113ca9c54d2SDexuan Cui 	}
1114b5c1c985SLong Li 
1115b5c1c985SLong Li 	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1116b5c1c985SLong Li 		    apc->port_handle, num_entries);
1117ca9c54d2SDexuan Cui out:
1118ca9c54d2SDexuan Cui 	kfree(req);
1119ca9c54d2SDexuan Cui 	return err;
1120ca9c54d2SDexuan Cui }
1121ca9c54d2SDexuan Cui 
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,u32 wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)11224c0ff7a1SLong Li int mana_create_wq_obj(struct mana_port_context *apc,
1123ca9c54d2SDexuan Cui 		       mana_handle_t vport,
1124ca9c54d2SDexuan Cui 		       u32 wq_type, struct mana_obj_spec *wq_spec,
1125ca9c54d2SDexuan Cui 		       struct mana_obj_spec *cq_spec,
1126ca9c54d2SDexuan Cui 		       mana_handle_t *wq_obj)
1127ca9c54d2SDexuan Cui {
1128ca9c54d2SDexuan Cui 	struct mana_create_wqobj_resp resp = {};
1129ca9c54d2SDexuan Cui 	struct mana_create_wqobj_req req = {};
1130ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
1131ca9c54d2SDexuan Cui 	int err;
1132ca9c54d2SDexuan Cui 
1133ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1134ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
1135ca9c54d2SDexuan Cui 	req.vport = vport;
1136ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
1137ca9c54d2SDexuan Cui 	req.wq_gdma_region = wq_spec->gdma_region;
1138ca9c54d2SDexuan Cui 	req.cq_gdma_region = cq_spec->gdma_region;
1139ca9c54d2SDexuan Cui 	req.wq_size = wq_spec->queue_size;
1140ca9c54d2SDexuan Cui 	req.cq_size = cq_spec->queue_size;
1141ca9c54d2SDexuan Cui 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1142ca9c54d2SDexuan Cui 	req.cq_parent_qid = cq_spec->attached_eq;
1143ca9c54d2SDexuan Cui 
1144ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1145ca9c54d2SDexuan Cui 				sizeof(resp));
1146ca9c54d2SDexuan Cui 	if (err) {
1147ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1148ca9c54d2SDexuan Cui 		goto out;
1149ca9c54d2SDexuan Cui 	}
1150ca9c54d2SDexuan Cui 
1151ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1152ca9c54d2SDexuan Cui 				   sizeof(resp));
1153ca9c54d2SDexuan Cui 	if (err || resp.hdr.status) {
1154ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1155ca9c54d2SDexuan Cui 			   resp.hdr.status);
1156ca9c54d2SDexuan Cui 		if (!err)
1157ca9c54d2SDexuan Cui 			err = -EPROTO;
1158ca9c54d2SDexuan Cui 		goto out;
1159ca9c54d2SDexuan Cui 	}
1160ca9c54d2SDexuan Cui 
1161ca9c54d2SDexuan Cui 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1162ca9c54d2SDexuan Cui 		netdev_err(ndev, "Got an invalid WQ object handle\n");
1163ca9c54d2SDexuan Cui 		err = -EPROTO;
1164ca9c54d2SDexuan Cui 		goto out;
1165ca9c54d2SDexuan Cui 	}
1166ca9c54d2SDexuan Cui 
1167ca9c54d2SDexuan Cui 	*wq_obj = resp.wq_obj;
1168ca9c54d2SDexuan Cui 	wq_spec->queue_index = resp.wq_id;
1169ca9c54d2SDexuan Cui 	cq_spec->queue_index = resp.cq_id;
1170ca9c54d2SDexuan Cui 
1171ca9c54d2SDexuan Cui 	return 0;
1172ca9c54d2SDexuan Cui out:
1173ca9c54d2SDexuan Cui 	return err;
1174ca9c54d2SDexuan Cui }
11754c0ff7a1SLong Li EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
1176ca9c54d2SDexuan Cui 
mana_destroy_wq_obj(struct mana_port_context * apc,u32 wq_type,mana_handle_t wq_obj)11774c0ff7a1SLong Li void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1178ca9c54d2SDexuan Cui 			 mana_handle_t wq_obj)
1179ca9c54d2SDexuan Cui {
1180ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_resp resp = {};
1181ca9c54d2SDexuan Cui 	struct mana_destroy_wqobj_req req = {};
1182ca9c54d2SDexuan Cui 	struct net_device *ndev = apc->ndev;
1183ca9c54d2SDexuan Cui 	int err;
1184ca9c54d2SDexuan Cui 
1185ca9c54d2SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1186ca9c54d2SDexuan Cui 			     sizeof(req), sizeof(resp));
1187ca9c54d2SDexuan Cui 	req.wq_type = wq_type;
1188ca9c54d2SDexuan Cui 	req.wq_obj_handle = wq_obj;
1189ca9c54d2SDexuan Cui 
1190ca9c54d2SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1191ca9c54d2SDexuan Cui 				sizeof(resp));
1192ca9c54d2SDexuan Cui 	if (err) {
1193ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1194ca9c54d2SDexuan Cui 		return;
1195ca9c54d2SDexuan Cui 	}
1196ca9c54d2SDexuan Cui 
1197ca9c54d2SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1198ca9c54d2SDexuan Cui 				   sizeof(resp));
1199ca9c54d2SDexuan Cui 	if (err || resp.hdr.status)
1200ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1201ca9c54d2SDexuan Cui 			   resp.hdr.status);
1202ca9c54d2SDexuan Cui }
12034c0ff7a1SLong Li EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
1204ca9c54d2SDexuan Cui 
mana_destroy_eq(struct mana_context * ac)12051e2d0824SHaiyang Zhang static void mana_destroy_eq(struct mana_context *ac)
1206ca9c54d2SDexuan Cui {
12071e2d0824SHaiyang Zhang 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1208ca9c54d2SDexuan Cui 	struct gdma_queue *eq;
1209ca9c54d2SDexuan Cui 	int i;
1210ca9c54d2SDexuan Cui 
12111e2d0824SHaiyang Zhang 	if (!ac->eqs)
1212ca9c54d2SDexuan Cui 		return;
1213ca9c54d2SDexuan Cui 
12141e2d0824SHaiyang Zhang 	for (i = 0; i < gc->max_num_queues; i++) {
12151e2d0824SHaiyang Zhang 		eq = ac->eqs[i].eq;
1216ca9c54d2SDexuan Cui 		if (!eq)
1217ca9c54d2SDexuan Cui 			continue;
1218ca9c54d2SDexuan Cui 
1219ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, eq);
1220ca9c54d2SDexuan Cui 	}
1221ca9c54d2SDexuan Cui 
12221e2d0824SHaiyang Zhang 	kfree(ac->eqs);
12231e2d0824SHaiyang Zhang 	ac->eqs = NULL;
1224ca9c54d2SDexuan Cui }
1225ca9c54d2SDexuan Cui 
mana_create_eq(struct mana_context * ac)12261e2d0824SHaiyang Zhang static int mana_create_eq(struct mana_context *ac)
1227ca9c54d2SDexuan Cui {
12281e2d0824SHaiyang Zhang 	struct gdma_dev *gd = ac->gdma_dev;
12291e2d0824SHaiyang Zhang 	struct gdma_context *gc = gd->gdma_context;
1230ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec = {};
1231ca9c54d2SDexuan Cui 	int err;
1232ca9c54d2SDexuan Cui 	int i;
1233ca9c54d2SDexuan Cui 
12341e2d0824SHaiyang Zhang 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1235ca9c54d2SDexuan Cui 			  GFP_KERNEL);
12361e2d0824SHaiyang Zhang 	if (!ac->eqs)
1237ca9c54d2SDexuan Cui 		return -ENOMEM;
1238ca9c54d2SDexuan Cui 
1239ca9c54d2SDexuan Cui 	spec.type = GDMA_EQ;
1240ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
1241ca9c54d2SDexuan Cui 	spec.queue_size = EQ_SIZE;
1242ca9c54d2SDexuan Cui 	spec.eq.callback = NULL;
12431e2d0824SHaiyang Zhang 	spec.eq.context = ac->eqs;
1244ca9c54d2SDexuan Cui 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1245ca9c54d2SDexuan Cui 
12461e2d0824SHaiyang Zhang 	for (i = 0; i < gc->max_num_queues; i++) {
12471e2d0824SHaiyang Zhang 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1248ca9c54d2SDexuan Cui 		if (err)
1249ca9c54d2SDexuan Cui 			goto out;
1250ca9c54d2SDexuan Cui 	}
1251ca9c54d2SDexuan Cui 
1252ca9c54d2SDexuan Cui 	return 0;
1253ca9c54d2SDexuan Cui out:
12541e2d0824SHaiyang Zhang 	mana_destroy_eq(ac);
1255ca9c54d2SDexuan Cui 	return err;
1256ca9c54d2SDexuan Cui }
1257ca9c54d2SDexuan Cui 
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)12586cc74443SDexuan Cui static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
12596cc74443SDexuan Cui {
12606cc74443SDexuan Cui 	struct mana_fence_rq_resp resp = {};
12616cc74443SDexuan Cui 	struct mana_fence_rq_req req = {};
12626cc74443SDexuan Cui 	int err;
12636cc74443SDexuan Cui 
12646cc74443SDexuan Cui 	init_completion(&rxq->fence_event);
12656cc74443SDexuan Cui 
12666cc74443SDexuan Cui 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
12676cc74443SDexuan Cui 			     sizeof(req), sizeof(resp));
12686cc74443SDexuan Cui 	req.wq_obj_handle =  rxq->rxobj;
12696cc74443SDexuan Cui 
12706cc74443SDexuan Cui 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
12716cc74443SDexuan Cui 				sizeof(resp));
12726cc74443SDexuan Cui 	if (err) {
12736cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
12746cc74443SDexuan Cui 			   rxq->rxq_idx, err);
12756cc74443SDexuan Cui 		return err;
12766cc74443SDexuan Cui 	}
12776cc74443SDexuan Cui 
12786cc74443SDexuan Cui 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
12796cc74443SDexuan Cui 	if (err || resp.hdr.status) {
12806cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
12816cc74443SDexuan Cui 			   rxq->rxq_idx, err, resp.hdr.status);
12826cc74443SDexuan Cui 		if (!err)
12836cc74443SDexuan Cui 			err = -EPROTO;
12846cc74443SDexuan Cui 
12856cc74443SDexuan Cui 		return err;
12866cc74443SDexuan Cui 	}
12876cc74443SDexuan Cui 
12886cc74443SDexuan Cui 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
12896cc74443SDexuan Cui 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
12906cc74443SDexuan Cui 			   rxq->rxq_idx);
12916cc74443SDexuan Cui 		return -ETIMEDOUT;
12926cc74443SDexuan Cui 	}
12936cc74443SDexuan Cui 
12946cc74443SDexuan Cui 	return 0;
12956cc74443SDexuan Cui }
12966cc74443SDexuan Cui 
mana_fence_rqs(struct mana_port_context * apc)12976cc74443SDexuan Cui static void mana_fence_rqs(struct mana_port_context *apc)
12986cc74443SDexuan Cui {
12996cc74443SDexuan Cui 	unsigned int rxq_idx;
13006cc74443SDexuan Cui 	struct mana_rxq *rxq;
13016cc74443SDexuan Cui 	int err;
13026cc74443SDexuan Cui 
13036cc74443SDexuan Cui 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
13046cc74443SDexuan Cui 		rxq = apc->rxqs[rxq_idx];
13056cc74443SDexuan Cui 		err = mana_fence_rq(apc, rxq);
13066cc74443SDexuan Cui 
13076cc74443SDexuan Cui 		/* In case of any error, use sleep instead. */
13086cc74443SDexuan Cui 		if (err)
13096cc74443SDexuan Cui 			msleep(100);
13106cc74443SDexuan Cui 	}
13116cc74443SDexuan Cui }
13126cc74443SDexuan Cui 
mana_move_wq_tail(struct gdma_queue * wq,u32 num_units)1313ca9c54d2SDexuan Cui static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1314ca9c54d2SDexuan Cui {
1315ca9c54d2SDexuan Cui 	u32 used_space_old;
1316ca9c54d2SDexuan Cui 	u32 used_space_new;
1317ca9c54d2SDexuan Cui 
1318ca9c54d2SDexuan Cui 	used_space_old = wq->head - wq->tail;
1319ca9c54d2SDexuan Cui 	used_space_new = wq->head - (wq->tail + num_units);
1320ca9c54d2SDexuan Cui 
1321ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(used_space_new > used_space_old))
1322ca9c54d2SDexuan Cui 		return -ERANGE;
1323ca9c54d2SDexuan Cui 
1324ca9c54d2SDexuan Cui 	wq->tail += num_units;
1325ca9c54d2SDexuan Cui 	return 0;
1326ca9c54d2SDexuan Cui }
1327ca9c54d2SDexuan Cui 
mana_unmap_skb(struct sk_buff * skb,struct mana_port_context * apc)1328ca9c54d2SDexuan Cui static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1329ca9c54d2SDexuan Cui {
1330ca9c54d2SDexuan Cui 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1331ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1332ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1333a43e8e9fSHaiyang Zhang 	int hsg, i;
1334ca9c54d2SDexuan Cui 
1335a43e8e9fSHaiyang Zhang 	/* Number of SGEs of linear part */
1336a43e8e9fSHaiyang Zhang 	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1337ca9c54d2SDexuan Cui 
1338a43e8e9fSHaiyang Zhang 	for (i = 0; i < hsg; i++)
1339a43e8e9fSHaiyang Zhang 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1340a43e8e9fSHaiyang Zhang 				 DMA_TO_DEVICE);
1341a43e8e9fSHaiyang Zhang 
1342a43e8e9fSHaiyang Zhang 	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1343ca9c54d2SDexuan Cui 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1344ca9c54d2SDexuan Cui 			       DMA_TO_DEVICE);
1345ca9c54d2SDexuan Cui }
1346ca9c54d2SDexuan Cui 
mana_poll_tx_cq(struct mana_cq * cq)1347ca9c54d2SDexuan Cui static void mana_poll_tx_cq(struct mana_cq *cq)
1348ca9c54d2SDexuan Cui {
1349ca9c54d2SDexuan Cui 	struct gdma_comp *completions = cq->gdma_comp_buf;
1350ca9c54d2SDexuan Cui 	struct gdma_posted_wqe_info *wqe_info;
1351ca9c54d2SDexuan Cui 	unsigned int pkt_transmitted = 0;
1352ca9c54d2SDexuan Cui 	unsigned int wqe_unit_cnt = 0;
1353ca9c54d2SDexuan Cui 	struct mana_txq *txq = cq->txq;
1354ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
1355ca9c54d2SDexuan Cui 	struct netdev_queue *net_txq;
1356ca9c54d2SDexuan Cui 	struct gdma_queue *gdma_wq;
1357ca9c54d2SDexuan Cui 	unsigned int avail_space;
1358ca9c54d2SDexuan Cui 	struct net_device *ndev;
1359ca9c54d2SDexuan Cui 	struct sk_buff *skb;
1360ca9c54d2SDexuan Cui 	bool txq_stopped;
1361ca9c54d2SDexuan Cui 	int comp_read;
1362ca9c54d2SDexuan Cui 	int i;
1363ca9c54d2SDexuan Cui 
1364ca9c54d2SDexuan Cui 	ndev = txq->ndev;
1365ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
1366ca9c54d2SDexuan Cui 
1367ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1368ca9c54d2SDexuan Cui 				    CQE_POLLING_BUFFER);
1369ca9c54d2SDexuan Cui 
1370e1b5683fSHaiyang Zhang 	if (comp_read < 1)
1371e1b5683fSHaiyang Zhang 		return;
1372e1b5683fSHaiyang Zhang 
1373ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
1374ca9c54d2SDexuan Cui 		struct mana_tx_comp_oob *cqe_oob;
1375ca9c54d2SDexuan Cui 
1376ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!completions[i].is_sq))
1377ca9c54d2SDexuan Cui 			return;
1378ca9c54d2SDexuan Cui 
1379ca9c54d2SDexuan Cui 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1380ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1381ca9c54d2SDexuan Cui 				 MANA_CQE_COMPLETION))
1382ca9c54d2SDexuan Cui 			return;
1383ca9c54d2SDexuan Cui 
1384ca9c54d2SDexuan Cui 		switch (cqe_oob->cqe_hdr.cqe_type) {
1385ca9c54d2SDexuan Cui 		case CQE_TX_OKAY:
1386ca9c54d2SDexuan Cui 			break;
1387ca9c54d2SDexuan Cui 
1388ca9c54d2SDexuan Cui 		case CQE_TX_SA_DROP:
1389ca9c54d2SDexuan Cui 		case CQE_TX_MTU_DROP:
1390ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_OOB:
1391ca9c54d2SDexuan Cui 		case CQE_TX_INVALID_ETH_TYPE:
1392ca9c54d2SDexuan Cui 		case CQE_TX_HDR_PROCESSING_ERROR:
1393ca9c54d2SDexuan Cui 		case CQE_TX_VF_DISABLED:
1394ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1395ca9c54d2SDexuan Cui 		case CQE_TX_VPORT_DISABLED:
1396ca9c54d2SDexuan Cui 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1397b2b00006SHaiyang Zhang 			if (net_ratelimit())
1398b2b00006SHaiyang Zhang 				netdev_err(ndev, "TX: CQE error %d\n",
1399ca9c54d2SDexuan Cui 					   cqe_oob->cqe_hdr.cqe_type);
1400b2b00006SHaiyang Zhang 
1401bd7fc6e1SShradha Gupta 			apc->eth_stats.tx_cqe_err++;
1402ca9c54d2SDexuan Cui 			break;
1403ca9c54d2SDexuan Cui 
1404ca9c54d2SDexuan Cui 		default:
1405b2b00006SHaiyang Zhang 			/* If the CQE type is unknown, log an error,
1406b2b00006SHaiyang Zhang 			 * and still free the SKB, update tail, etc.
1407ca9c54d2SDexuan Cui 			 */
1408b2b00006SHaiyang Zhang 			if (net_ratelimit())
1409b2b00006SHaiyang Zhang 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1410ca9c54d2SDexuan Cui 					   cqe_oob->cqe_hdr.cqe_type);
1411b2b00006SHaiyang Zhang 
1412bd7fc6e1SShradha Gupta 			apc->eth_stats.tx_cqe_unknown_type++;
1413b2b00006SHaiyang Zhang 			break;
1414ca9c54d2SDexuan Cui 		}
1415ca9c54d2SDexuan Cui 
1416ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1417ca9c54d2SDexuan Cui 			return;
1418ca9c54d2SDexuan Cui 
1419ca9c54d2SDexuan Cui 		skb = skb_dequeue(&txq->pending_skbs);
1420ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(!skb))
1421ca9c54d2SDexuan Cui 			return;
1422ca9c54d2SDexuan Cui 
1423ca9c54d2SDexuan Cui 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1424ca9c54d2SDexuan Cui 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1425ca9c54d2SDexuan Cui 
1426ca9c54d2SDexuan Cui 		mana_unmap_skb(skb, apc);
1427ca9c54d2SDexuan Cui 
1428e1b5683fSHaiyang Zhang 		napi_consume_skb(skb, cq->budget);
1429ca9c54d2SDexuan Cui 
1430ca9c54d2SDexuan Cui 		pkt_transmitted++;
1431ca9c54d2SDexuan Cui 	}
1432ca9c54d2SDexuan Cui 
1433ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1434ca9c54d2SDexuan Cui 		return;
1435ca9c54d2SDexuan Cui 
1436ca9c54d2SDexuan Cui 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1437ca9c54d2SDexuan Cui 
1438ca9c54d2SDexuan Cui 	gdma_wq = txq->gdma_sq;
1439ca9c54d2SDexuan Cui 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1440ca9c54d2SDexuan Cui 
1441ca9c54d2SDexuan Cui 	/* Ensure tail updated before checking q stop */
1442ca9c54d2SDexuan Cui 	smp_mb();
1443ca9c54d2SDexuan Cui 
1444ca9c54d2SDexuan Cui 	net_txq = txq->net_txq;
1445ca9c54d2SDexuan Cui 	txq_stopped = netif_tx_queue_stopped(net_txq);
1446ca9c54d2SDexuan Cui 
1447ca9c54d2SDexuan Cui 	/* Ensure checking txq_stopped before apc->port_is_up. */
1448ca9c54d2SDexuan Cui 	smp_rmb();
1449ca9c54d2SDexuan Cui 
1450ca9c54d2SDexuan Cui 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1451ca9c54d2SDexuan Cui 		netif_tx_wake_queue(net_txq);
1452ca9c54d2SDexuan Cui 		apc->eth_stats.wake_queue++;
1453ca9c54d2SDexuan Cui 	}
1454ca9c54d2SDexuan Cui 
1455ca9c54d2SDexuan Cui 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1456ca9c54d2SDexuan Cui 		WARN_ON_ONCE(1);
1457e1b5683fSHaiyang Zhang 
1458e1b5683fSHaiyang Zhang 	cq->work_done = pkt_transmitted;
1459ca9c54d2SDexuan Cui }
1460ca9c54d2SDexuan Cui 
mana_post_pkt_rxq(struct mana_rxq * rxq)1461ca9c54d2SDexuan Cui static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1462ca9c54d2SDexuan Cui {
1463ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *recv_buf_oob;
1464ca9c54d2SDexuan Cui 	u32 curr_index;
1465ca9c54d2SDexuan Cui 	int err;
1466ca9c54d2SDexuan Cui 
1467ca9c54d2SDexuan Cui 	curr_index = rxq->buf_index++;
1468ca9c54d2SDexuan Cui 	if (rxq->buf_index == rxq->num_rx_buf)
1469ca9c54d2SDexuan Cui 		rxq->buf_index = 0;
1470ca9c54d2SDexuan Cui 
1471ca9c54d2SDexuan Cui 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1472ca9c54d2SDexuan Cui 
1473da4e8648SLong Li 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1474ca9c54d2SDexuan Cui 					&recv_buf_oob->wqe_inf);
1475ca9c54d2SDexuan Cui 	if (WARN_ON_ONCE(err))
1476ca9c54d2SDexuan Cui 		return;
1477ca9c54d2SDexuan Cui 
1478ca9c54d2SDexuan Cui 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1479ca9c54d2SDexuan Cui }
1480ca9c54d2SDexuan Cui 
mana_build_skb(struct mana_rxq * rxq,void * buf_va,uint pkt_len,struct xdp_buff * xdp)14812fbbd712SHaiyang Zhang static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
14822fbbd712SHaiyang Zhang 				      uint pkt_len, struct xdp_buff *xdp)
1483ed5356b5SHaiyang Zhang {
14842fbbd712SHaiyang Zhang 	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1485ed5356b5SHaiyang Zhang 
1486ed5356b5SHaiyang Zhang 	if (!skb)
1487ed5356b5SHaiyang Zhang 		return NULL;
1488ed5356b5SHaiyang Zhang 
1489ed5356b5SHaiyang Zhang 	if (xdp->data_hard_start) {
1490ed5356b5SHaiyang Zhang 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1491ed5356b5SHaiyang Zhang 		skb_put(skb, xdp->data_end - xdp->data);
14922fbbd712SHaiyang Zhang 		return skb;
1493ed5356b5SHaiyang Zhang 	}
1494ed5356b5SHaiyang Zhang 
14952fbbd712SHaiyang Zhang 	skb_reserve(skb, rxq->headroom);
14962fbbd712SHaiyang Zhang 	skb_put(skb, pkt_len);
14972fbbd712SHaiyang Zhang 
1498ed5356b5SHaiyang Zhang 	return skb;
1499ed5356b5SHaiyang Zhang }
1500ed5356b5SHaiyang Zhang 
mana_rx_skb(void * buf_va,bool from_pool,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq)1501b1d13f7aSHaiyang Zhang static void mana_rx_skb(void *buf_va, bool from_pool,
1502b1d13f7aSHaiyang Zhang 			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1503ca9c54d2SDexuan Cui {
1504f90f8420SHaiyang Zhang 	struct mana_stats_rx *rx_stats = &rxq->stats;
1505ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
1506ca9c54d2SDexuan Cui 	uint pkt_len = cqe->ppi[0].pkt_len;
1507ca9c54d2SDexuan Cui 	u16 rxq_idx = rxq->rxq_idx;
1508ca9c54d2SDexuan Cui 	struct napi_struct *napi;
1509ed5356b5SHaiyang Zhang 	struct xdp_buff xdp = {};
1510ca9c54d2SDexuan Cui 	struct sk_buff *skb;
1511ca9c54d2SDexuan Cui 	u32 hash_value;
1512ed5356b5SHaiyang Zhang 	u32 act;
1513ca9c54d2SDexuan Cui 
1514e1b5683fSHaiyang Zhang 	rxq->rx_cq.work_done++;
1515e1b5683fSHaiyang Zhang 	napi = &rxq->rx_cq.napi;
1516ca9c54d2SDexuan Cui 
1517ca9c54d2SDexuan Cui 	if (!buf_va) {
1518ca9c54d2SDexuan Cui 		++ndev->stats.rx_dropped;
1519ca9c54d2SDexuan Cui 		return;
1520ca9c54d2SDexuan Cui 	}
1521ca9c54d2SDexuan Cui 
1522ed5356b5SHaiyang Zhang 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1523ca9c54d2SDexuan Cui 
15247a8938cdSHaiyang Zhang 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
15257a8938cdSHaiyang Zhang 		return;
15267a8938cdSHaiyang Zhang 
1527ed5356b5SHaiyang Zhang 	if (act != XDP_PASS && act != XDP_TX)
1528f90f8420SHaiyang Zhang 		goto drop_xdp;
1529ca9c54d2SDexuan Cui 
15302fbbd712SHaiyang Zhang 	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1531ed5356b5SHaiyang Zhang 
1532ed5356b5SHaiyang Zhang 	if (!skb)
1533ed5356b5SHaiyang Zhang 		goto drop;
1534ed5356b5SHaiyang Zhang 
1535b1d13f7aSHaiyang Zhang 	if (from_pool)
1536b1d13f7aSHaiyang Zhang 		skb_mark_for_recycle(skb);
1537b1d13f7aSHaiyang Zhang 
1538ca9c54d2SDexuan Cui 	skb->dev = napi->dev;
1539ca9c54d2SDexuan Cui 
1540ca9c54d2SDexuan Cui 	skb->protocol = eth_type_trans(skb, ndev);
1541ca9c54d2SDexuan Cui 	skb_checksum_none_assert(skb);
1542ca9c54d2SDexuan Cui 	skb_record_rx_queue(skb, rxq_idx);
1543ca9c54d2SDexuan Cui 
1544ca9c54d2SDexuan Cui 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1545ca9c54d2SDexuan Cui 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1546ca9c54d2SDexuan Cui 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1547ca9c54d2SDexuan Cui 	}
1548ca9c54d2SDexuan Cui 
1549ca9c54d2SDexuan Cui 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1550ca9c54d2SDexuan Cui 		hash_value = cqe->ppi[0].pkt_hash;
1551ca9c54d2SDexuan Cui 
1552ca9c54d2SDexuan Cui 		if (cqe->rx_hashtype & MANA_HASH_L4)
1553ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1554ca9c54d2SDexuan Cui 		else
1555ca9c54d2SDexuan Cui 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1556ca9c54d2SDexuan Cui 	}
1557ca9c54d2SDexuan Cui 
1558b803d1fdSHaiyang Zhang 	if (cqe->rx_vlantag_present) {
1559b803d1fdSHaiyang Zhang 		u16 vlan_tci = cqe->rx_vlan_id;
1560b803d1fdSHaiyang Zhang 
1561b803d1fdSHaiyang Zhang 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1562b803d1fdSHaiyang Zhang 	}
1563b803d1fdSHaiyang Zhang 
1564d356abb9SHaiyang Zhang 	u64_stats_update_begin(&rx_stats->syncp);
1565d356abb9SHaiyang Zhang 	rx_stats->packets++;
1566d356abb9SHaiyang Zhang 	rx_stats->bytes += pkt_len;
1567d356abb9SHaiyang Zhang 
1568d356abb9SHaiyang Zhang 	if (act == XDP_TX)
1569d356abb9SHaiyang Zhang 		rx_stats->xdp_tx++;
1570d356abb9SHaiyang Zhang 	u64_stats_update_end(&rx_stats->syncp);
1571d356abb9SHaiyang Zhang 
1572ed5356b5SHaiyang Zhang 	if (act == XDP_TX) {
1573ed5356b5SHaiyang Zhang 		skb_set_queue_mapping(skb, rxq_idx);
1574ed5356b5SHaiyang Zhang 		mana_xdp_tx(skb, ndev);
1575ed5356b5SHaiyang Zhang 		return;
1576ed5356b5SHaiyang Zhang 	}
1577ed5356b5SHaiyang Zhang 
1578ca9c54d2SDexuan Cui 	napi_gro_receive(napi, skb);
1579ca9c54d2SDexuan Cui 
1580ed5356b5SHaiyang Zhang 	return;
1581ed5356b5SHaiyang Zhang 
1582f90f8420SHaiyang Zhang drop_xdp:
1583f90f8420SHaiyang Zhang 	u64_stats_update_begin(&rx_stats->syncp);
1584f90f8420SHaiyang Zhang 	rx_stats->xdp_drop++;
1585f90f8420SHaiyang Zhang 	u64_stats_update_end(&rx_stats->syncp);
1586f90f8420SHaiyang Zhang 
1587ed5356b5SHaiyang Zhang drop:
1588b1d13f7aSHaiyang Zhang 	if (from_pool) {
1589b1d13f7aSHaiyang Zhang 		page_pool_recycle_direct(rxq->page_pool,
1590b1d13f7aSHaiyang Zhang 					 virt_to_head_page(buf_va));
1591b1d13f7aSHaiyang Zhang 	} else {
1592a2917b23SHaiyang Zhang 		WARN_ON_ONCE(rxq->xdp_save_va);
1593a2917b23SHaiyang Zhang 		/* Save for reuse */
1594a2917b23SHaiyang Zhang 		rxq->xdp_save_va = buf_va;
1595b1d13f7aSHaiyang Zhang 	}
1596a6bf5703SHaiyang Zhang 
1597ed5356b5SHaiyang Zhang 	++ndev->stats.rx_dropped;
1598f90f8420SHaiyang Zhang 
1599ed5356b5SHaiyang Zhang 	return;
1600ca9c54d2SDexuan Cui }
1601ca9c54d2SDexuan Cui 
mana_get_rxfrag(struct mana_rxq * rxq,struct device * dev,dma_addr_t * da,bool * from_pool,bool is_napi)1602a2917b23SHaiyang Zhang static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1603b1d13f7aSHaiyang Zhang 			     dma_addr_t *da, bool *from_pool, bool is_napi)
1604a2917b23SHaiyang Zhang {
1605a2917b23SHaiyang Zhang 	struct page *page;
1606a2917b23SHaiyang Zhang 	void *va;
1607a2917b23SHaiyang Zhang 
1608b1d13f7aSHaiyang Zhang 	*from_pool = false;
1609b1d13f7aSHaiyang Zhang 
1610a2917b23SHaiyang Zhang 	/* Reuse XDP dropped page if available */
1611a2917b23SHaiyang Zhang 	if (rxq->xdp_save_va) {
1612a2917b23SHaiyang Zhang 		va = rxq->xdp_save_va;
1613a2917b23SHaiyang Zhang 		rxq->xdp_save_va = NULL;
16142fbbd712SHaiyang Zhang 	} else if (rxq->alloc_size > PAGE_SIZE) {
16152fbbd712SHaiyang Zhang 		if (is_napi)
16162fbbd712SHaiyang Zhang 			va = napi_alloc_frag(rxq->alloc_size);
16172fbbd712SHaiyang Zhang 		else
16182fbbd712SHaiyang Zhang 			va = netdev_alloc_frag(rxq->alloc_size);
16192fbbd712SHaiyang Zhang 
16202fbbd712SHaiyang Zhang 		if (!va)
16212fbbd712SHaiyang Zhang 			return NULL;
1622df18f2daSHaiyang Zhang 
1623df18f2daSHaiyang Zhang 		page = virt_to_head_page(va);
1624df18f2daSHaiyang Zhang 		/* Check if the frag falls back to single page */
1625df18f2daSHaiyang Zhang 		if (compound_order(page) < get_order(rxq->alloc_size)) {
1626df18f2daSHaiyang Zhang 			put_page(page);
1627df18f2daSHaiyang Zhang 			return NULL;
1628df18f2daSHaiyang Zhang 		}
1629a2917b23SHaiyang Zhang 	} else {
1630b1d13f7aSHaiyang Zhang 		page = page_pool_dev_alloc_pages(rxq->page_pool);
1631a2917b23SHaiyang Zhang 		if (!page)
1632a2917b23SHaiyang Zhang 			return NULL;
1633a2917b23SHaiyang Zhang 
1634b1d13f7aSHaiyang Zhang 		*from_pool = true;
1635a2917b23SHaiyang Zhang 		va = page_to_virt(page);
1636a2917b23SHaiyang Zhang 	}
1637a2917b23SHaiyang Zhang 
16382fbbd712SHaiyang Zhang 	*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1639a2917b23SHaiyang Zhang 			     DMA_FROM_DEVICE);
1640a2917b23SHaiyang Zhang 	if (dma_mapping_error(dev, *da)) {
1641b1d13f7aSHaiyang Zhang 		if (*from_pool)
1642b1d13f7aSHaiyang Zhang 			page_pool_put_full_page(rxq->page_pool, page, false);
1643b1d13f7aSHaiyang Zhang 		else
1644a2917b23SHaiyang Zhang 			put_page(virt_to_head_page(va));
1645b1d13f7aSHaiyang Zhang 
1646a2917b23SHaiyang Zhang 		return NULL;
1647a2917b23SHaiyang Zhang 	}
1648a2917b23SHaiyang Zhang 
1649a2917b23SHaiyang Zhang 	return va;
1650a2917b23SHaiyang Zhang }
1651a2917b23SHaiyang Zhang 
1652a2917b23SHaiyang Zhang /* Allocate frag for rx buffer, and save the old buf */
mana_refill_rx_oob(struct device * dev,struct mana_rxq * rxq,struct mana_recv_buf_oob * rxoob,void ** old_buf,bool * old_fp)16535c74064fSHaiyang Zhang static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1654b1d13f7aSHaiyang Zhang 			       struct mana_recv_buf_oob *rxoob, void **old_buf,
1655b1d13f7aSHaiyang Zhang 			       bool *old_fp)
1656a2917b23SHaiyang Zhang {
1657b1d13f7aSHaiyang Zhang 	bool from_pool;
1658a2917b23SHaiyang Zhang 	dma_addr_t da;
1659a2917b23SHaiyang Zhang 	void *va;
1660a2917b23SHaiyang Zhang 
1661b1d13f7aSHaiyang Zhang 	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1662a2917b23SHaiyang Zhang 	if (!va)
1663a2917b23SHaiyang Zhang 		return;
1664a2917b23SHaiyang Zhang 
1665a2917b23SHaiyang Zhang 	dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1666a2917b23SHaiyang Zhang 			 DMA_FROM_DEVICE);
1667a2917b23SHaiyang Zhang 	*old_buf = rxoob->buf_va;
1668b1d13f7aSHaiyang Zhang 	*old_fp = rxoob->from_pool;
1669a2917b23SHaiyang Zhang 
1670a2917b23SHaiyang Zhang 	rxoob->buf_va = va;
1671a2917b23SHaiyang Zhang 	rxoob->sgl[0].address = da;
1672b1d13f7aSHaiyang Zhang 	rxoob->from_pool = from_pool;
1673a2917b23SHaiyang Zhang }
1674a2917b23SHaiyang Zhang 
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)1675ca9c54d2SDexuan Cui static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1676ca9c54d2SDexuan Cui 				struct gdma_comp *cqe)
1677ca9c54d2SDexuan Cui {
1678ca9c54d2SDexuan Cui 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1679ca9c54d2SDexuan Cui 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1680ca9c54d2SDexuan Cui 	struct net_device *ndev = rxq->ndev;
1681ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rxbuf_oob;
1682bd7fc6e1SShradha Gupta 	struct mana_port_context *apc;
1683ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1684a2917b23SHaiyang Zhang 	void *old_buf = NULL;
1685ca9c54d2SDexuan Cui 	u32 curr, pktlen;
1686b1d13f7aSHaiyang Zhang 	bool old_fp;
1687ca9c54d2SDexuan Cui 
1688bd7fc6e1SShradha Gupta 	apc = netdev_priv(ndev);
1689bd7fc6e1SShradha Gupta 
1690ca9c54d2SDexuan Cui 	switch (oob->cqe_hdr.cqe_type) {
1691ca9c54d2SDexuan Cui 	case CQE_RX_OKAY:
1692ca9c54d2SDexuan Cui 		break;
1693ca9c54d2SDexuan Cui 
1694ca9c54d2SDexuan Cui 	case CQE_RX_TRUNCATED:
1695e4b76219SHaiyang Zhang 		++ndev->stats.rx_dropped;
1696e4b76219SHaiyang Zhang 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1697e4b76219SHaiyang Zhang 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1698e4b76219SHaiyang Zhang 		goto drop;
1699ca9c54d2SDexuan Cui 
1700ca9c54d2SDexuan Cui 	case CQE_RX_COALESCED_4:
1701ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX coalescing is unsupported\n");
1702bd7fc6e1SShradha Gupta 		apc->eth_stats.rx_coalesced_err++;
1703ca9c54d2SDexuan Cui 		return;
1704ca9c54d2SDexuan Cui 
1705ca9c54d2SDexuan Cui 	case CQE_RX_OBJECT_FENCE:
17066cc74443SDexuan Cui 		complete(&rxq->fence_event);
1707ca9c54d2SDexuan Cui 		return;
1708ca9c54d2SDexuan Cui 
1709ca9c54d2SDexuan Cui 	default:
1710ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1711ca9c54d2SDexuan Cui 			   oob->cqe_hdr.cqe_type);
1712bd7fc6e1SShradha Gupta 		apc->eth_stats.rx_cqe_unknown_type++;
1713ca9c54d2SDexuan Cui 		return;
1714ca9c54d2SDexuan Cui 	}
1715ca9c54d2SDexuan Cui 
1716ca9c54d2SDexuan Cui 	pktlen = oob->ppi[0].pkt_len;
1717ca9c54d2SDexuan Cui 
1718ca9c54d2SDexuan Cui 	if (pktlen == 0) {
1719ca9c54d2SDexuan Cui 		/* data packets should never have packetlength of zero */
1720ca9c54d2SDexuan Cui 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1721ca9c54d2SDexuan Cui 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1722ca9c54d2SDexuan Cui 		return;
1723ca9c54d2SDexuan Cui 	}
1724ca9c54d2SDexuan Cui 
1725ca9c54d2SDexuan Cui 	curr = rxq->buf_index;
1726ca9c54d2SDexuan Cui 	rxbuf_oob = &rxq->rx_oobs[curr];
1727ca9c54d2SDexuan Cui 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1728ca9c54d2SDexuan Cui 
1729b1d13f7aSHaiyang Zhang 	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1730ca9c54d2SDexuan Cui 
1731a2917b23SHaiyang Zhang 	/* Unsuccessful refill will have old_buf == NULL.
1732a2917b23SHaiyang Zhang 	 * In this case, mana_rx_skb() will drop the packet.
1733a2917b23SHaiyang Zhang 	 */
1734b1d13f7aSHaiyang Zhang 	mana_rx_skb(old_buf, old_fp, oob, rxq);
1735ca9c54d2SDexuan Cui 
1736e4b76219SHaiyang Zhang drop:
1737ca9c54d2SDexuan Cui 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1738ca9c54d2SDexuan Cui 
1739ca9c54d2SDexuan Cui 	mana_post_pkt_rxq(rxq);
1740ca9c54d2SDexuan Cui }
1741ca9c54d2SDexuan Cui 
mana_poll_rx_cq(struct mana_cq * cq)1742ca9c54d2SDexuan Cui static void mana_poll_rx_cq(struct mana_cq *cq)
1743ca9c54d2SDexuan Cui {
1744ca9c54d2SDexuan Cui 	struct gdma_comp *comp = cq->gdma_comp_buf;
17457a8938cdSHaiyang Zhang 	struct mana_rxq *rxq = cq->rxq;
1746d90a9468SDexuan Cui 	int comp_read, i;
1747ca9c54d2SDexuan Cui 
1748ca9c54d2SDexuan Cui 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1749ca9c54d2SDexuan Cui 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1750ca9c54d2SDexuan Cui 
17517a8938cdSHaiyang Zhang 	rxq->xdp_flush = false;
17527a8938cdSHaiyang Zhang 
1753ca9c54d2SDexuan Cui 	for (i = 0; i < comp_read; i++) {
1754ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].is_sq))
1755ca9c54d2SDexuan Cui 			return;
1756ca9c54d2SDexuan Cui 
1757ca9c54d2SDexuan Cui 		/* verify recv cqe references the right rxq */
1758ca9c54d2SDexuan Cui 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1759ca9c54d2SDexuan Cui 			return;
1760ca9c54d2SDexuan Cui 
17617a8938cdSHaiyang Zhang 		mana_process_rx_cqe(rxq, cq, &comp[i]);
1762ca9c54d2SDexuan Cui 	}
17637a8938cdSHaiyang Zhang 
1764da4e8648SLong Li 	if (comp_read > 0) {
1765da4e8648SLong Li 		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1766da4e8648SLong Li 
1767da4e8648SLong Li 		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1768da4e8648SLong Li 	}
1769da4e8648SLong Li 
17707a8938cdSHaiyang Zhang 	if (rxq->xdp_flush)
17717a8938cdSHaiyang Zhang 		xdp_do_flush();
1772ca9c54d2SDexuan Cui }
1773ca9c54d2SDexuan Cui 
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)177418010ff7SHaiyang Zhang static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1775ca9c54d2SDexuan Cui {
1776ca9c54d2SDexuan Cui 	struct mana_cq *cq = context;
1777e1b5683fSHaiyang Zhang 	u8 arm_bit;
177818010ff7SHaiyang Zhang 	int w;
1779ca9c54d2SDexuan Cui 
1780ca9c54d2SDexuan Cui 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1781ca9c54d2SDexuan Cui 
1782ca9c54d2SDexuan Cui 	if (cq->type == MANA_CQ_TYPE_RX)
1783ca9c54d2SDexuan Cui 		mana_poll_rx_cq(cq);
1784ca9c54d2SDexuan Cui 	else
1785ca9c54d2SDexuan Cui 		mana_poll_tx_cq(cq);
1786ca9c54d2SDexuan Cui 
178718010ff7SHaiyang Zhang 	w = cq->work_done;
178818010ff7SHaiyang Zhang 
178918010ff7SHaiyang Zhang 	if (w < cq->budget &&
179018010ff7SHaiyang Zhang 	    napi_complete_done(&cq->napi, w)) {
1791e1b5683fSHaiyang Zhang 		arm_bit = SET_ARM_BIT;
1792e1b5683fSHaiyang Zhang 	} else {
1793e1b5683fSHaiyang Zhang 		arm_bit = 0;
1794e1b5683fSHaiyang Zhang 	}
1795e1b5683fSHaiyang Zhang 
1796e1b5683fSHaiyang Zhang 	mana_gd_ring_cq(gdma_queue, arm_bit);
179718010ff7SHaiyang Zhang 
179818010ff7SHaiyang Zhang 	return w;
1799e1b5683fSHaiyang Zhang }
1800e1b5683fSHaiyang Zhang 
mana_poll(struct napi_struct * napi,int budget)1801e1b5683fSHaiyang Zhang static int mana_poll(struct napi_struct *napi, int budget)
1802e1b5683fSHaiyang Zhang {
1803e1b5683fSHaiyang Zhang 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
180418010ff7SHaiyang Zhang 	int w;
1805e1b5683fSHaiyang Zhang 
1806e1b5683fSHaiyang Zhang 	cq->work_done = 0;
1807e1b5683fSHaiyang Zhang 	cq->budget = budget;
1808e1b5683fSHaiyang Zhang 
180918010ff7SHaiyang Zhang 	w = mana_cq_handler(cq, cq->gdma_cq);
1810e1b5683fSHaiyang Zhang 
181118010ff7SHaiyang Zhang 	return min(w, budget);
1812e1b5683fSHaiyang Zhang }
1813e1b5683fSHaiyang Zhang 
mana_schedule_napi(void * context,struct gdma_queue * gdma_queue)1814e1b5683fSHaiyang Zhang static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1815e1b5683fSHaiyang Zhang {
1816e1b5683fSHaiyang Zhang 	struct mana_cq *cq = context;
1817e1b5683fSHaiyang Zhang 
1818e1b5683fSHaiyang Zhang 	napi_schedule_irqoff(&cq->napi);
1819ca9c54d2SDexuan Cui }
1820ca9c54d2SDexuan Cui 
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)1821ca9c54d2SDexuan Cui static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1822ca9c54d2SDexuan Cui {
1823ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1824ca9c54d2SDexuan Cui 
1825ca9c54d2SDexuan Cui 	if (!cq->gdma_cq)
1826ca9c54d2SDexuan Cui 		return;
1827ca9c54d2SDexuan Cui 
1828ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1829ca9c54d2SDexuan Cui }
1830ca9c54d2SDexuan Cui 
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)1831ca9c54d2SDexuan Cui static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1832ca9c54d2SDexuan Cui {
1833ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
1834ca9c54d2SDexuan Cui 
1835ca9c54d2SDexuan Cui 	if (!txq->gdma_sq)
1836ca9c54d2SDexuan Cui 		return;
1837ca9c54d2SDexuan Cui 
1838ca9c54d2SDexuan Cui 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1839ca9c54d2SDexuan Cui }
1840ca9c54d2SDexuan Cui 
mana_destroy_txq(struct mana_port_context * apc)1841ca9c54d2SDexuan Cui static void mana_destroy_txq(struct mana_port_context *apc)
1842ca9c54d2SDexuan Cui {
1843e1b5683fSHaiyang Zhang 	struct napi_struct *napi;
1844ca9c54d2SDexuan Cui 	int i;
1845ca9c54d2SDexuan Cui 
1846ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1847ca9c54d2SDexuan Cui 		return;
1848ca9c54d2SDexuan Cui 
1849ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1850e1b5683fSHaiyang Zhang 		napi = &apc->tx_qp[i].tx_cq.napi;
1851e1b5683fSHaiyang Zhang 		napi_synchronize(napi);
1852e1b5683fSHaiyang Zhang 		napi_disable(napi);
1853e1b5683fSHaiyang Zhang 		netif_napi_del(napi);
1854e1b5683fSHaiyang Zhang 
1855ca9c54d2SDexuan Cui 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1856ca9c54d2SDexuan Cui 
1857ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1858ca9c54d2SDexuan Cui 
1859ca9c54d2SDexuan Cui 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1860ca9c54d2SDexuan Cui 	}
1861ca9c54d2SDexuan Cui 
1862ca9c54d2SDexuan Cui 	kfree(apc->tx_qp);
1863ca9c54d2SDexuan Cui 	apc->tx_qp = NULL;
1864ca9c54d2SDexuan Cui }
1865ca9c54d2SDexuan Cui 
mana_create_txq(struct mana_port_context * apc,struct net_device * net)1866ca9c54d2SDexuan Cui static int mana_create_txq(struct mana_port_context *apc,
1867ca9c54d2SDexuan Cui 			   struct net_device *net)
1868ca9c54d2SDexuan Cui {
18691e2d0824SHaiyang Zhang 	struct mana_context *ac = apc->ac;
18701e2d0824SHaiyang Zhang 	struct gdma_dev *gd = ac->gdma_dev;
1871ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
1872ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
1873ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
1874ca9c54d2SDexuan Cui 	struct gdma_context *gc;
1875ca9c54d2SDexuan Cui 	struct mana_txq *txq;
1876ca9c54d2SDexuan Cui 	struct mana_cq *cq;
1877ca9c54d2SDexuan Cui 	u32 txq_size;
1878ca9c54d2SDexuan Cui 	u32 cq_size;
1879ca9c54d2SDexuan Cui 	int err;
1880ca9c54d2SDexuan Cui 	int i;
1881ca9c54d2SDexuan Cui 
1882ca9c54d2SDexuan Cui 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1883ca9c54d2SDexuan Cui 			     GFP_KERNEL);
1884ca9c54d2SDexuan Cui 	if (!apc->tx_qp)
1885ca9c54d2SDexuan Cui 		return -ENOMEM;
1886ca9c54d2SDexuan Cui 
1887ca9c54d2SDexuan Cui 	/*  The minimum size of the WQE is 32 bytes, hence
1888ca9c54d2SDexuan Cui 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1889ca9c54d2SDexuan Cui 	 *  the SQ can store. This value is then used to size other queues
1890ca9c54d2SDexuan Cui 	 *  to prevent overflow.
1891ca9c54d2SDexuan Cui 	 */
1892ca9c54d2SDexuan Cui 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1893ca9c54d2SDexuan Cui 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1894ca9c54d2SDexuan Cui 
1895ca9c54d2SDexuan Cui 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1896ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
1897ca9c54d2SDexuan Cui 
1898ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
1899ca9c54d2SDexuan Cui 
1900ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
1901ca9c54d2SDexuan Cui 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1902ca9c54d2SDexuan Cui 
1903ca9c54d2SDexuan Cui 		/* Create SQ */
1904ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
1905ca9c54d2SDexuan Cui 
1906ca9c54d2SDexuan Cui 		u64_stats_init(&txq->stats.syncp);
1907ca9c54d2SDexuan Cui 		txq->ndev = net;
1908ca9c54d2SDexuan Cui 		txq->net_txq = netdev_get_tx_queue(net, i);
1909ca9c54d2SDexuan Cui 		txq->vp_offset = apc->tx_vp_offset;
1910ca9c54d2SDexuan Cui 		skb_queue_head_init(&txq->pending_skbs);
1911ca9c54d2SDexuan Cui 
1912ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1913ca9c54d2SDexuan Cui 		spec.type = GDMA_SQ;
1914ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = true;
1915ca9c54d2SDexuan Cui 		spec.queue_size = txq_size;
1916ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1917ca9c54d2SDexuan Cui 		if (err)
1918ca9c54d2SDexuan Cui 			goto out;
1919ca9c54d2SDexuan Cui 
1920ca9c54d2SDexuan Cui 		/* Create SQ's CQ */
1921ca9c54d2SDexuan Cui 		cq = &apc->tx_qp[i].tx_cq;
1922ca9c54d2SDexuan Cui 		cq->type = MANA_CQ_TYPE_TX;
1923ca9c54d2SDexuan Cui 
1924ca9c54d2SDexuan Cui 		cq->txq = txq;
1925ca9c54d2SDexuan Cui 
1926ca9c54d2SDexuan Cui 		memset(&spec, 0, sizeof(spec));
1927ca9c54d2SDexuan Cui 		spec.type = GDMA_CQ;
1928ca9c54d2SDexuan Cui 		spec.monitor_avl_buf = false;
1929ca9c54d2SDexuan Cui 		spec.queue_size = cq_size;
1930e1b5683fSHaiyang Zhang 		spec.cq.callback = mana_schedule_napi;
19311e2d0824SHaiyang Zhang 		spec.cq.parent_eq = ac->eqs[i].eq;
1932ca9c54d2SDexuan Cui 		spec.cq.context = cq;
1933ca9c54d2SDexuan Cui 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1934ca9c54d2SDexuan Cui 		if (err)
1935ca9c54d2SDexuan Cui 			goto out;
1936ca9c54d2SDexuan Cui 
1937ca9c54d2SDexuan Cui 		memset(&wq_spec, 0, sizeof(wq_spec));
1938ca9c54d2SDexuan Cui 		memset(&cq_spec, 0, sizeof(cq_spec));
1939ca9c54d2SDexuan Cui 
194028c66cfaSAjay Sharma 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1941ca9c54d2SDexuan Cui 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1942ca9c54d2SDexuan Cui 
194328c66cfaSAjay Sharma 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1944ca9c54d2SDexuan Cui 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1945ca9c54d2SDexuan Cui 		cq_spec.modr_ctx_id = 0;
1946ca9c54d2SDexuan Cui 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1947ca9c54d2SDexuan Cui 
1948ca9c54d2SDexuan Cui 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1949ca9c54d2SDexuan Cui 					 &wq_spec, &cq_spec,
1950ca9c54d2SDexuan Cui 					 &apc->tx_qp[i].tx_object);
1951ca9c54d2SDexuan Cui 
1952ca9c54d2SDexuan Cui 		if (err)
1953ca9c54d2SDexuan Cui 			goto out;
1954ca9c54d2SDexuan Cui 
1955ca9c54d2SDexuan Cui 		txq->gdma_sq->id = wq_spec.queue_index;
1956ca9c54d2SDexuan Cui 		cq->gdma_cq->id = cq_spec.queue_index;
1957ca9c54d2SDexuan Cui 
195828c66cfaSAjay Sharma 		txq->gdma_sq->mem_info.dma_region_handle =
195928c66cfaSAjay Sharma 			GDMA_INVALID_DMA_REGION;
196028c66cfaSAjay Sharma 		cq->gdma_cq->mem_info.dma_region_handle =
196128c66cfaSAjay Sharma 			GDMA_INVALID_DMA_REGION;
1962ca9c54d2SDexuan Cui 
1963ca9c54d2SDexuan Cui 		txq->gdma_txq_id = txq->gdma_sq->id;
1964ca9c54d2SDexuan Cui 
1965ca9c54d2SDexuan Cui 		cq->gdma_id = cq->gdma_cq->id;
1966ca9c54d2SDexuan Cui 
1967b9078845SChristophe JAILLET 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1968b9078845SChristophe JAILLET 			err = -EINVAL;
1969b9078845SChristophe JAILLET 			goto out;
1970b9078845SChristophe JAILLET 		}
1971ca9c54d2SDexuan Cui 
1972ca9c54d2SDexuan Cui 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1973ca9c54d2SDexuan Cui 
197416d083e2SJakub Kicinski 		netif_napi_add_tx(net, &cq->napi, mana_poll);
1975e1b5683fSHaiyang Zhang 		napi_enable(&cq->napi);
1976e1b5683fSHaiyang Zhang 
1977e1b5683fSHaiyang Zhang 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1978ca9c54d2SDexuan Cui 	}
1979ca9c54d2SDexuan Cui 
1980ca9c54d2SDexuan Cui 	return 0;
1981ca9c54d2SDexuan Cui out:
1982ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
1983ca9c54d2SDexuan Cui 	return err;
1984ca9c54d2SDexuan Cui }
1985ca9c54d2SDexuan Cui 
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool validate_state)1986ca9c54d2SDexuan Cui static void mana_destroy_rxq(struct mana_port_context *apc,
1987ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, bool validate_state)
1988ca9c54d2SDexuan Cui 
1989ca9c54d2SDexuan Cui {
1990ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1991ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
1992ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
1993e1b5683fSHaiyang Zhang 	struct napi_struct *napi;
1994b1d13f7aSHaiyang Zhang 	struct page *page;
1995ca9c54d2SDexuan Cui 	int i;
1996ca9c54d2SDexuan Cui 
1997ca9c54d2SDexuan Cui 	if (!rxq)
1998ca9c54d2SDexuan Cui 		return;
1999ca9c54d2SDexuan Cui 
2000e1b5683fSHaiyang Zhang 	napi = &rxq->rx_cq.napi;
2001e1b5683fSHaiyang Zhang 
2002ca9c54d2SDexuan Cui 	if (validate_state)
2003e1b5683fSHaiyang Zhang 		napi_synchronize(napi);
2004e1b5683fSHaiyang Zhang 
2005e1b5683fSHaiyang Zhang 	napi_disable(napi);
2006ed5356b5SHaiyang Zhang 
2007ed5356b5SHaiyang Zhang 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2008ed5356b5SHaiyang Zhang 
2009e1b5683fSHaiyang Zhang 	netif_napi_del(napi);
2010ca9c54d2SDexuan Cui 
2011ca9c54d2SDexuan Cui 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2012ca9c54d2SDexuan Cui 
2013ca9c54d2SDexuan Cui 	mana_deinit_cq(apc, &rxq->rx_cq);
2014ca9c54d2SDexuan Cui 
2015a2917b23SHaiyang Zhang 	if (rxq->xdp_save_va)
2016a2917b23SHaiyang Zhang 		put_page(virt_to_head_page(rxq->xdp_save_va));
2017a6bf5703SHaiyang Zhang 
2018ca9c54d2SDexuan Cui 	for (i = 0; i < rxq->num_rx_buf; i++) {
2019ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[i];
2020ca9c54d2SDexuan Cui 
2021ca9c54d2SDexuan Cui 		if (!rx_oob->buf_va)
2022ca9c54d2SDexuan Cui 			continue;
2023ca9c54d2SDexuan Cui 
2024a2917b23SHaiyang Zhang 		dma_unmap_single(dev, rx_oob->sgl[0].address,
2025a2917b23SHaiyang Zhang 				 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2026ca9c54d2SDexuan Cui 
2027b1d13f7aSHaiyang Zhang 		page = virt_to_head_page(rx_oob->buf_va);
2028b1d13f7aSHaiyang Zhang 
2029b1d13f7aSHaiyang Zhang 		if (rx_oob->from_pool)
2030b1d13f7aSHaiyang Zhang 			page_pool_put_full_page(rxq->page_pool, page, false);
2031b1d13f7aSHaiyang Zhang 		else
2032b1d13f7aSHaiyang Zhang 			put_page(page);
2033b1d13f7aSHaiyang Zhang 
2034ca9c54d2SDexuan Cui 		rx_oob->buf_va = NULL;
2035ca9c54d2SDexuan Cui 	}
2036ca9c54d2SDexuan Cui 
2037b1d13f7aSHaiyang Zhang 	page_pool_destroy(rxq->page_pool);
2038b1d13f7aSHaiyang Zhang 
2039ca9c54d2SDexuan Cui 	if (rxq->gdma_rq)
2040ca9c54d2SDexuan Cui 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2041ca9c54d2SDexuan Cui 
2042ca9c54d2SDexuan Cui 	kfree(rxq);
2043ca9c54d2SDexuan Cui }
2044ca9c54d2SDexuan Cui 
mana_fill_rx_oob(struct mana_recv_buf_oob * rx_oob,u32 mem_key,struct mana_rxq * rxq,struct device * dev)2045a2917b23SHaiyang Zhang static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2046a2917b23SHaiyang Zhang 			    struct mana_rxq *rxq, struct device *dev)
2047a2917b23SHaiyang Zhang {
204880f6215bSHaiyang Zhang 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2049b1d13f7aSHaiyang Zhang 	bool from_pool = false;
2050a2917b23SHaiyang Zhang 	dma_addr_t da;
2051a2917b23SHaiyang Zhang 	void *va;
2052a2917b23SHaiyang Zhang 
205380f6215bSHaiyang Zhang 	if (mpc->rxbufs_pre)
205480f6215bSHaiyang Zhang 		va = mana_get_rxbuf_pre(rxq, &da);
205580f6215bSHaiyang Zhang 	else
2056b1d13f7aSHaiyang Zhang 		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2057a2917b23SHaiyang Zhang 
2058a2917b23SHaiyang Zhang 	if (!va)
2059a2917b23SHaiyang Zhang 		return -ENOMEM;
2060a2917b23SHaiyang Zhang 
2061a2917b23SHaiyang Zhang 	rx_oob->buf_va = va;
2062b1d13f7aSHaiyang Zhang 	rx_oob->from_pool = from_pool;
2063a2917b23SHaiyang Zhang 
2064a2917b23SHaiyang Zhang 	rx_oob->sgl[0].address = da;
2065a2917b23SHaiyang Zhang 	rx_oob->sgl[0].size = rxq->datasize;
2066a2917b23SHaiyang Zhang 	rx_oob->sgl[0].mem_key = mem_key;
2067a2917b23SHaiyang Zhang 
2068a2917b23SHaiyang Zhang 	return 0;
2069a2917b23SHaiyang Zhang }
2070a2917b23SHaiyang Zhang 
2071ca9c54d2SDexuan Cui #define MANA_WQE_HEADER_SIZE 16
2072ca9c54d2SDexuan Cui #define MANA_WQE_SGE_SIZE 16
2073ca9c54d2SDexuan Cui 
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size)2074ca9c54d2SDexuan Cui static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2075ca9c54d2SDexuan Cui 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2076ca9c54d2SDexuan Cui {
2077ca9c54d2SDexuan Cui 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2078ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
2079ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
2080ca9c54d2SDexuan Cui 	u32 buf_idx;
2081a2917b23SHaiyang Zhang 	int ret;
2082ca9c54d2SDexuan Cui 
20832fbbd712SHaiyang Zhang 	WARN_ON(rxq->datasize == 0);
2084ca9c54d2SDexuan Cui 
2085ca9c54d2SDexuan Cui 	*rxq_size = 0;
2086ca9c54d2SDexuan Cui 	*cq_size = 0;
2087ca9c54d2SDexuan Cui 
2088ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2089ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
2090ca9c54d2SDexuan Cui 		memset(rx_oob, 0, sizeof(*rx_oob));
2091ca9c54d2SDexuan Cui 
2092ca9c54d2SDexuan Cui 		rx_oob->num_sge = 1;
2093a2917b23SHaiyang Zhang 
2094a2917b23SHaiyang Zhang 		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2095a2917b23SHaiyang Zhang 				       dev);
2096a2917b23SHaiyang Zhang 		if (ret)
2097a2917b23SHaiyang Zhang 			return ret;
2098ca9c54d2SDexuan Cui 
2099ca9c54d2SDexuan Cui 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2100ca9c54d2SDexuan Cui 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2101ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_size = 0;
2102ca9c54d2SDexuan Cui 		rx_oob->wqe_req.inline_oob_data = NULL;
2103ca9c54d2SDexuan Cui 		rx_oob->wqe_req.flags = 0;
2104ca9c54d2SDexuan Cui 		rx_oob->wqe_req.client_data_unit = 0;
2105ca9c54d2SDexuan Cui 
2106ca9c54d2SDexuan Cui 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2107ca9c54d2SDexuan Cui 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2108ca9c54d2SDexuan Cui 		*cq_size += COMP_ENTRY_SIZE;
2109ca9c54d2SDexuan Cui 	}
2110ca9c54d2SDexuan Cui 
2111ca9c54d2SDexuan Cui 	return 0;
2112ca9c54d2SDexuan Cui }
2113ca9c54d2SDexuan Cui 
mana_push_wqe(struct mana_rxq * rxq)2114ca9c54d2SDexuan Cui static int mana_push_wqe(struct mana_rxq *rxq)
2115ca9c54d2SDexuan Cui {
2116ca9c54d2SDexuan Cui 	struct mana_recv_buf_oob *rx_oob;
2117ca9c54d2SDexuan Cui 	u32 buf_idx;
2118ca9c54d2SDexuan Cui 	int err;
2119ca9c54d2SDexuan Cui 
2120ca9c54d2SDexuan Cui 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2121ca9c54d2SDexuan Cui 		rx_oob = &rxq->rx_oobs[buf_idx];
2122ca9c54d2SDexuan Cui 
2123ca9c54d2SDexuan Cui 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2124ca9c54d2SDexuan Cui 					    &rx_oob->wqe_inf);
2125ca9c54d2SDexuan Cui 		if (err)
2126ca9c54d2SDexuan Cui 			return -ENOSPC;
2127ca9c54d2SDexuan Cui 	}
2128ca9c54d2SDexuan Cui 
2129ca9c54d2SDexuan Cui 	return 0;
2130ca9c54d2SDexuan Cui }
2131ca9c54d2SDexuan Cui 
mana_create_page_pool(struct mana_rxq * rxq,struct gdma_context * gc)2132b1d13f7aSHaiyang Zhang static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2133b1d13f7aSHaiyang Zhang {
2134b1d13f7aSHaiyang Zhang 	struct page_pool_params pprm = {};
2135b1d13f7aSHaiyang Zhang 	int ret;
2136b1d13f7aSHaiyang Zhang 
2137b1d13f7aSHaiyang Zhang 	pprm.pool_size = RX_BUFFERS_PER_QUEUE;
2138b1d13f7aSHaiyang Zhang 	pprm.nid = gc->numa_node;
2139b1d13f7aSHaiyang Zhang 	pprm.napi = &rxq->rx_cq.napi;
2140b1d13f7aSHaiyang Zhang 
2141b1d13f7aSHaiyang Zhang 	rxq->page_pool = page_pool_create(&pprm);
2142b1d13f7aSHaiyang Zhang 
2143b1d13f7aSHaiyang Zhang 	if (IS_ERR(rxq->page_pool)) {
2144b1d13f7aSHaiyang Zhang 		ret = PTR_ERR(rxq->page_pool);
2145b1d13f7aSHaiyang Zhang 		rxq->page_pool = NULL;
2146b1d13f7aSHaiyang Zhang 		return ret;
2147b1d13f7aSHaiyang Zhang 	}
2148b1d13f7aSHaiyang Zhang 
2149b1d13f7aSHaiyang Zhang 	return 0;
2150b1d13f7aSHaiyang Zhang }
2151b1d13f7aSHaiyang Zhang 
mana_create_rxq(struct mana_port_context * apc,u32 rxq_idx,struct mana_eq * eq,struct net_device * ndev)2152ca9c54d2SDexuan Cui static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2153ca9c54d2SDexuan Cui 					u32 rxq_idx, struct mana_eq *eq,
2154ca9c54d2SDexuan Cui 					struct net_device *ndev)
2155ca9c54d2SDexuan Cui {
2156ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
2157ca9c54d2SDexuan Cui 	struct mana_obj_spec wq_spec;
2158ca9c54d2SDexuan Cui 	struct mana_obj_spec cq_spec;
2159ca9c54d2SDexuan Cui 	struct gdma_queue_spec spec;
2160ca9c54d2SDexuan Cui 	struct mana_cq *cq = NULL;
2161ca9c54d2SDexuan Cui 	struct gdma_context *gc;
2162ca9c54d2SDexuan Cui 	u32 cq_size, rq_size;
2163ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
2164ca9c54d2SDexuan Cui 	int err;
2165ca9c54d2SDexuan Cui 
2166ca9c54d2SDexuan Cui 	gc = gd->gdma_context;
2167ca9c54d2SDexuan Cui 
2168ea89c862SGustavo A. R. Silva 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
2169ca9c54d2SDexuan Cui 		      GFP_KERNEL);
2170ca9c54d2SDexuan Cui 	if (!rxq)
2171ca9c54d2SDexuan Cui 		return NULL;
2172ca9c54d2SDexuan Cui 
2173ca9c54d2SDexuan Cui 	rxq->ndev = ndev;
2174ca9c54d2SDexuan Cui 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2175ca9c54d2SDexuan Cui 	rxq->rxq_idx = rxq_idx;
2176ca9c54d2SDexuan Cui 	rxq->rxobj = INVALID_MANA_HANDLE;
2177ca9c54d2SDexuan Cui 
217880f6215bSHaiyang Zhang 	mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
217980f6215bSHaiyang Zhang 			   &rxq->headroom);
2180a2917b23SHaiyang Zhang 
2181b1d13f7aSHaiyang Zhang 	/* Create page pool for RX queue */
2182b1d13f7aSHaiyang Zhang 	err = mana_create_page_pool(rxq, gc);
2183b1d13f7aSHaiyang Zhang 	if (err) {
2184b1d13f7aSHaiyang Zhang 		netdev_err(ndev, "Create page pool err:%d\n", err);
2185b1d13f7aSHaiyang Zhang 		goto out;
2186b1d13f7aSHaiyang Zhang 	}
2187b1d13f7aSHaiyang Zhang 
2188ca9c54d2SDexuan Cui 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2189ca9c54d2SDexuan Cui 	if (err)
2190ca9c54d2SDexuan Cui 		goto out;
2191ca9c54d2SDexuan Cui 
2192ca9c54d2SDexuan Cui 	rq_size = PAGE_ALIGN(rq_size);
2193ca9c54d2SDexuan Cui 	cq_size = PAGE_ALIGN(cq_size);
2194ca9c54d2SDexuan Cui 
2195ca9c54d2SDexuan Cui 	/* Create RQ */
2196ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
2197ca9c54d2SDexuan Cui 	spec.type = GDMA_RQ;
2198ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = true;
2199ca9c54d2SDexuan Cui 	spec.queue_size = rq_size;
2200ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2201ca9c54d2SDexuan Cui 	if (err)
2202ca9c54d2SDexuan Cui 		goto out;
2203ca9c54d2SDexuan Cui 
2204ca9c54d2SDexuan Cui 	/* Create RQ's CQ */
2205ca9c54d2SDexuan Cui 	cq = &rxq->rx_cq;
2206ca9c54d2SDexuan Cui 	cq->type = MANA_CQ_TYPE_RX;
2207ca9c54d2SDexuan Cui 	cq->rxq = rxq;
2208ca9c54d2SDexuan Cui 
2209ca9c54d2SDexuan Cui 	memset(&spec, 0, sizeof(spec));
2210ca9c54d2SDexuan Cui 	spec.type = GDMA_CQ;
2211ca9c54d2SDexuan Cui 	spec.monitor_avl_buf = false;
2212ca9c54d2SDexuan Cui 	spec.queue_size = cq_size;
2213e1b5683fSHaiyang Zhang 	spec.cq.callback = mana_schedule_napi;
2214ca9c54d2SDexuan Cui 	spec.cq.parent_eq = eq->eq;
2215ca9c54d2SDexuan Cui 	spec.cq.context = cq;
2216ca9c54d2SDexuan Cui 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2217ca9c54d2SDexuan Cui 	if (err)
2218ca9c54d2SDexuan Cui 		goto out;
2219ca9c54d2SDexuan Cui 
2220ca9c54d2SDexuan Cui 	memset(&wq_spec, 0, sizeof(wq_spec));
2221ca9c54d2SDexuan Cui 	memset(&cq_spec, 0, sizeof(cq_spec));
222228c66cfaSAjay Sharma 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2223ca9c54d2SDexuan Cui 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2224ca9c54d2SDexuan Cui 
222528c66cfaSAjay Sharma 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2226ca9c54d2SDexuan Cui 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2227ca9c54d2SDexuan Cui 	cq_spec.modr_ctx_id = 0;
2228ca9c54d2SDexuan Cui 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2229ca9c54d2SDexuan Cui 
2230ca9c54d2SDexuan Cui 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2231ca9c54d2SDexuan Cui 				 &wq_spec, &cq_spec, &rxq->rxobj);
2232ca9c54d2SDexuan Cui 	if (err)
2233ca9c54d2SDexuan Cui 		goto out;
2234ca9c54d2SDexuan Cui 
2235ca9c54d2SDexuan Cui 	rxq->gdma_rq->id = wq_spec.queue_index;
2236ca9c54d2SDexuan Cui 	cq->gdma_cq->id = cq_spec.queue_index;
2237ca9c54d2SDexuan Cui 
223828c66cfaSAjay Sharma 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
223928c66cfaSAjay Sharma 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2240ca9c54d2SDexuan Cui 
2241ca9c54d2SDexuan Cui 	rxq->gdma_id = rxq->gdma_rq->id;
2242ca9c54d2SDexuan Cui 	cq->gdma_id = cq->gdma_cq->id;
2243ca9c54d2SDexuan Cui 
2244ca9c54d2SDexuan Cui 	err = mana_push_wqe(rxq);
2245ca9c54d2SDexuan Cui 	if (err)
2246ca9c54d2SDexuan Cui 		goto out;
2247ca9c54d2SDexuan Cui 
2248be049936SHaiyang Zhang 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2249be049936SHaiyang Zhang 		err = -EINVAL;
2250ca9c54d2SDexuan Cui 		goto out;
2251be049936SHaiyang Zhang 	}
2252ca9c54d2SDexuan Cui 
2253ca9c54d2SDexuan Cui 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2254ca9c54d2SDexuan Cui 
2255b707b89fSJakub Kicinski 	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2256ed5356b5SHaiyang Zhang 
2257ed5356b5SHaiyang Zhang 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2258ed5356b5SHaiyang Zhang 				 cq->napi.napi_id));
2259b1d13f7aSHaiyang Zhang 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2260b1d13f7aSHaiyang Zhang 					   rxq->page_pool));
2261ed5356b5SHaiyang Zhang 
2262e1b5683fSHaiyang Zhang 	napi_enable(&cq->napi);
2263e1b5683fSHaiyang Zhang 
2264e1b5683fSHaiyang Zhang 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2265ca9c54d2SDexuan Cui out:
2266ca9c54d2SDexuan Cui 	if (!err)
2267ca9c54d2SDexuan Cui 		return rxq;
2268ca9c54d2SDexuan Cui 
2269ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2270ca9c54d2SDexuan Cui 
2271ca9c54d2SDexuan Cui 	mana_destroy_rxq(apc, rxq, false);
2272ca9c54d2SDexuan Cui 
2273ca9c54d2SDexuan Cui 	if (cq)
2274ca9c54d2SDexuan Cui 		mana_deinit_cq(apc, cq);
2275ca9c54d2SDexuan Cui 
2276ca9c54d2SDexuan Cui 	return NULL;
2277ca9c54d2SDexuan Cui }
2278ca9c54d2SDexuan Cui 
mana_add_rx_queues(struct mana_port_context * apc,struct net_device * ndev)2279ca9c54d2SDexuan Cui static int mana_add_rx_queues(struct mana_port_context *apc,
2280ca9c54d2SDexuan Cui 			      struct net_device *ndev)
2281ca9c54d2SDexuan Cui {
22821e2d0824SHaiyang Zhang 	struct mana_context *ac = apc->ac;
2283ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
2284ca9c54d2SDexuan Cui 	int err = 0;
2285ca9c54d2SDexuan Cui 	int i;
2286ca9c54d2SDexuan Cui 
2287ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
22881e2d0824SHaiyang Zhang 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2289ca9c54d2SDexuan Cui 		if (!rxq) {
2290ca9c54d2SDexuan Cui 			err = -ENOMEM;
2291ca9c54d2SDexuan Cui 			goto out;
2292ca9c54d2SDexuan Cui 		}
2293ca9c54d2SDexuan Cui 
2294ca9c54d2SDexuan Cui 		u64_stats_init(&rxq->stats.syncp);
2295ca9c54d2SDexuan Cui 
2296ca9c54d2SDexuan Cui 		apc->rxqs[i] = rxq;
2297ca9c54d2SDexuan Cui 	}
2298ca9c54d2SDexuan Cui 
2299ca9c54d2SDexuan Cui 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2300ca9c54d2SDexuan Cui out:
2301ca9c54d2SDexuan Cui 	return err;
2302ca9c54d2SDexuan Cui }
2303ca9c54d2SDexuan Cui 
mana_destroy_vport(struct mana_port_context * apc)2304ca9c54d2SDexuan Cui static void mana_destroy_vport(struct mana_port_context *apc)
2305ca9c54d2SDexuan Cui {
23061566e7d6SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
2307ca9c54d2SDexuan Cui 	struct mana_rxq *rxq;
2308ca9c54d2SDexuan Cui 	u32 rxq_idx;
2309ca9c54d2SDexuan Cui 
2310ca9c54d2SDexuan Cui 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2311ca9c54d2SDexuan Cui 		rxq = apc->rxqs[rxq_idx];
2312ca9c54d2SDexuan Cui 		if (!rxq)
2313ca9c54d2SDexuan Cui 			continue;
2314ca9c54d2SDexuan Cui 
2315ca9c54d2SDexuan Cui 		mana_destroy_rxq(apc, rxq, true);
2316ca9c54d2SDexuan Cui 		apc->rxqs[rxq_idx] = NULL;
2317ca9c54d2SDexuan Cui 	}
2318ca9c54d2SDexuan Cui 
2319ca9c54d2SDexuan Cui 	mana_destroy_txq(apc);
2320b5c1c985SLong Li 	mana_uncfg_vport(apc);
23211566e7d6SDexuan Cui 
23221566e7d6SDexuan Cui 	if (gd->gdma_context->is_pf)
23231566e7d6SDexuan Cui 		mana_pf_deregister_hw_vport(apc);
2324ca9c54d2SDexuan Cui }
2325ca9c54d2SDexuan Cui 
mana_create_vport(struct mana_port_context * apc,struct net_device * net)2326ca9c54d2SDexuan Cui static int mana_create_vport(struct mana_port_context *apc,
2327ca9c54d2SDexuan Cui 			     struct net_device *net)
2328ca9c54d2SDexuan Cui {
2329ca9c54d2SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
2330ca9c54d2SDexuan Cui 	int err;
2331ca9c54d2SDexuan Cui 
2332ca9c54d2SDexuan Cui 	apc->default_rxobj = INVALID_MANA_HANDLE;
2333ca9c54d2SDexuan Cui 
23341566e7d6SDexuan Cui 	if (gd->gdma_context->is_pf) {
23351566e7d6SDexuan Cui 		err = mana_pf_register_hw_vport(apc);
23361566e7d6SDexuan Cui 		if (err)
23371566e7d6SDexuan Cui 			return err;
23381566e7d6SDexuan Cui 	}
23391566e7d6SDexuan Cui 
2340ca9c54d2SDexuan Cui 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2341ca9c54d2SDexuan Cui 	if (err)
2342ca9c54d2SDexuan Cui 		return err;
2343ca9c54d2SDexuan Cui 
2344ca9c54d2SDexuan Cui 	return mana_create_txq(apc, net);
2345ca9c54d2SDexuan Cui }
2346ca9c54d2SDexuan Cui 
mana_rss_table_init(struct mana_port_context * apc)2347ca9c54d2SDexuan Cui static void mana_rss_table_init(struct mana_port_context *apc)
2348ca9c54d2SDexuan Cui {
2349ca9c54d2SDexuan Cui 	int i;
2350ca9c54d2SDexuan Cui 
2351ca9c54d2SDexuan Cui 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2352ca9c54d2SDexuan Cui 		apc->indir_table[i] =
2353ca9c54d2SDexuan Cui 			ethtool_rxfh_indir_default(i, apc->num_queues);
2354ca9c54d2SDexuan Cui }
2355ca9c54d2SDexuan Cui 
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2356ca9c54d2SDexuan Cui int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2357ca9c54d2SDexuan Cui 		    bool update_hash, bool update_tab)
2358ca9c54d2SDexuan Cui {
2359ca9c54d2SDexuan Cui 	u32 queue_idx;
23606cc74443SDexuan Cui 	int err;
2361ca9c54d2SDexuan Cui 	int i;
2362ca9c54d2SDexuan Cui 
2363ca9c54d2SDexuan Cui 	if (update_tab) {
2364ca9c54d2SDexuan Cui 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2365ca9c54d2SDexuan Cui 			queue_idx = apc->indir_table[i];
2366ca9c54d2SDexuan Cui 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2367ca9c54d2SDexuan Cui 		}
2368ca9c54d2SDexuan Cui 	}
2369ca9c54d2SDexuan Cui 
23706cc74443SDexuan Cui 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
23716cc74443SDexuan Cui 	if (err)
23726cc74443SDexuan Cui 		return err;
23736cc74443SDexuan Cui 
23746cc74443SDexuan Cui 	mana_fence_rqs(apc);
23756cc74443SDexuan Cui 
23766cc74443SDexuan Cui 	return 0;
2377ca9c54d2SDexuan Cui }
2378ca9c54d2SDexuan Cui 
mana_query_gf_stats(struct mana_port_context * apc)2379ac3899c6SShradha Gupta void mana_query_gf_stats(struct mana_port_context *apc)
2380ac3899c6SShradha Gupta {
2381ac3899c6SShradha Gupta 	struct mana_query_gf_stat_resp resp = {};
2382ac3899c6SShradha Gupta 	struct mana_query_gf_stat_req req = {};
2383ac3899c6SShradha Gupta 	struct net_device *ndev = apc->ndev;
2384ac3899c6SShradha Gupta 	int err;
2385ac3899c6SShradha Gupta 
2386ac3899c6SShradha Gupta 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2387ac3899c6SShradha Gupta 			     sizeof(req), sizeof(resp));
2388ac3899c6SShradha Gupta 	req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
2389ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2390ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2391ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2392ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2393ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2394ac3899c6SShradha Gupta 			STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
2395ac3899c6SShradha Gupta 
2396ac3899c6SShradha Gupta 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2397ac3899c6SShradha Gupta 				sizeof(resp));
2398ac3899c6SShradha Gupta 	if (err) {
2399ac3899c6SShradha Gupta 		netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2400ac3899c6SShradha Gupta 		return;
2401ac3899c6SShradha Gupta 	}
2402ac3899c6SShradha Gupta 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2403ac3899c6SShradha Gupta 				   sizeof(resp));
2404ac3899c6SShradha Gupta 	if (err || resp.hdr.status) {
2405ac3899c6SShradha Gupta 		netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2406ac3899c6SShradha Gupta 			   resp.hdr.status);
2407ac3899c6SShradha Gupta 		return;
2408ac3899c6SShradha Gupta 	}
2409ac3899c6SShradha Gupta 
2410ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2411ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2412ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2413ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2414ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2415ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2416ac3899c6SShradha Gupta 	apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2417ac3899c6SShradha Gupta }
2418ac3899c6SShradha Gupta 
mana_init_port(struct net_device * ndev)2419ca9c54d2SDexuan Cui static int mana_init_port(struct net_device *ndev)
2420ca9c54d2SDexuan Cui {
2421ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
2422ca9c54d2SDexuan Cui 	u32 max_txq, max_rxq, max_queues;
2423ca9c54d2SDexuan Cui 	int port_idx = apc->port_idx;
2424ca9c54d2SDexuan Cui 	u32 num_indirect_entries;
2425ca9c54d2SDexuan Cui 	int err;
2426ca9c54d2SDexuan Cui 
2427ca9c54d2SDexuan Cui 	err = mana_init_port_context(apc);
2428ca9c54d2SDexuan Cui 	if (err)
2429ca9c54d2SDexuan Cui 		return err;
2430ca9c54d2SDexuan Cui 
2431ca9c54d2SDexuan Cui 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2432ca9c54d2SDexuan Cui 				   &num_indirect_entries);
2433ca9c54d2SDexuan Cui 	if (err) {
24346c7ea696SDexuan Cui 		netdev_err(ndev, "Failed to query info for vPort %d\n",
24356c7ea696SDexuan Cui 			   port_idx);
2436ca9c54d2SDexuan Cui 		goto reset_apc;
2437ca9c54d2SDexuan Cui 	}
2438ca9c54d2SDexuan Cui 
2439ca9c54d2SDexuan Cui 	max_queues = min_t(u32, max_txq, max_rxq);
2440ca9c54d2SDexuan Cui 	if (apc->max_queues > max_queues)
2441ca9c54d2SDexuan Cui 		apc->max_queues = max_queues;
2442ca9c54d2SDexuan Cui 
2443ca9c54d2SDexuan Cui 	if (apc->num_queues > apc->max_queues)
2444ca9c54d2SDexuan Cui 		apc->num_queues = apc->max_queues;
2445ca9c54d2SDexuan Cui 
2446f3956ebbSJakub Kicinski 	eth_hw_addr_set(ndev, apc->mac_addr);
2447ca9c54d2SDexuan Cui 
2448ca9c54d2SDexuan Cui 	return 0;
2449ca9c54d2SDexuan Cui 
2450ca9c54d2SDexuan Cui reset_apc:
2451ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
2452ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
2453ca9c54d2SDexuan Cui 	return err;
2454ca9c54d2SDexuan Cui }
2455ca9c54d2SDexuan Cui 
mana_alloc_queues(struct net_device * ndev)2456ca9c54d2SDexuan Cui int mana_alloc_queues(struct net_device *ndev)
2457ca9c54d2SDexuan Cui {
2458ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
24591566e7d6SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
2460ca9c54d2SDexuan Cui 	int err;
2461ca9c54d2SDexuan Cui 
2462ca9c54d2SDexuan Cui 	err = mana_create_vport(apc, ndev);
2463ca9c54d2SDexuan Cui 	if (err)
24641e2d0824SHaiyang Zhang 		return err;
2465ca9c54d2SDexuan Cui 
2466ca9c54d2SDexuan Cui 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2467ca9c54d2SDexuan Cui 	if (err)
2468ca9c54d2SDexuan Cui 		goto destroy_vport;
2469ca9c54d2SDexuan Cui 
2470ca9c54d2SDexuan Cui 	err = mana_add_rx_queues(apc, ndev);
2471ca9c54d2SDexuan Cui 	if (err)
2472ca9c54d2SDexuan Cui 		goto destroy_vport;
2473ca9c54d2SDexuan Cui 
2474ca9c54d2SDexuan Cui 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2475ca9c54d2SDexuan Cui 
2476ca9c54d2SDexuan Cui 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2477ca9c54d2SDexuan Cui 	if (err)
2478ca9c54d2SDexuan Cui 		goto destroy_vport;
2479ca9c54d2SDexuan Cui 
2480ca9c54d2SDexuan Cui 	mana_rss_table_init(apc);
2481ca9c54d2SDexuan Cui 
2482ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2483ca9c54d2SDexuan Cui 	if (err)
2484ca9c54d2SDexuan Cui 		goto destroy_vport;
2485ca9c54d2SDexuan Cui 
24861566e7d6SDexuan Cui 	if (gd->gdma_context->is_pf) {
24871566e7d6SDexuan Cui 		err = mana_pf_register_filter(apc);
24881566e7d6SDexuan Cui 		if (err)
24891566e7d6SDexuan Cui 			goto destroy_vport;
24901566e7d6SDexuan Cui 	}
24911566e7d6SDexuan Cui 
2492ed5356b5SHaiyang Zhang 	mana_chn_setxdp(apc, mana_xdp_get(apc));
2493ed5356b5SHaiyang Zhang 
2494ca9c54d2SDexuan Cui 	return 0;
2495ca9c54d2SDexuan Cui 
2496ca9c54d2SDexuan Cui destroy_vport:
2497ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
2498ca9c54d2SDexuan Cui 	return err;
2499ca9c54d2SDexuan Cui }
2500ca9c54d2SDexuan Cui 
mana_attach(struct net_device * ndev)2501ca9c54d2SDexuan Cui int mana_attach(struct net_device *ndev)
2502ca9c54d2SDexuan Cui {
2503ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
2504ca9c54d2SDexuan Cui 	int err;
2505ca9c54d2SDexuan Cui 
2506ca9c54d2SDexuan Cui 	ASSERT_RTNL();
2507ca9c54d2SDexuan Cui 
2508ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
2509ca9c54d2SDexuan Cui 	if (err)
2510ca9c54d2SDexuan Cui 		return err;
2511ca9c54d2SDexuan Cui 
2512a137c069SHaiyang Zhang 	if (apc->port_st_save) {
2513ca9c54d2SDexuan Cui 		err = mana_alloc_queues(ndev);
2514ca9c54d2SDexuan Cui 		if (err) {
2515a137c069SHaiyang Zhang 			mana_cleanup_port_context(apc);
2516ca9c54d2SDexuan Cui 			return err;
2517ca9c54d2SDexuan Cui 		}
2518a137c069SHaiyang Zhang 	}
2519ca9c54d2SDexuan Cui 
2520ca9c54d2SDexuan Cui 	apc->port_is_up = apc->port_st_save;
2521ca9c54d2SDexuan Cui 
2522ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
2523ca9c54d2SDexuan Cui 	smp_wmb();
2524ca9c54d2SDexuan Cui 
2525a137c069SHaiyang Zhang 	if (apc->port_is_up)
2526ca9c54d2SDexuan Cui 		netif_carrier_on(ndev);
2527a137c069SHaiyang Zhang 
2528a137c069SHaiyang Zhang 	netif_device_attach(ndev);
2529ca9c54d2SDexuan Cui 
2530ca9c54d2SDexuan Cui 	return 0;
2531ca9c54d2SDexuan Cui }
2532ca9c54d2SDexuan Cui 
mana_dealloc_queues(struct net_device * ndev)2533ca9c54d2SDexuan Cui static int mana_dealloc_queues(struct net_device *ndev)
2534ca9c54d2SDexuan Cui {
2535ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
2536a7dfeda6SSouradeep Chakrabarti 	unsigned long timeout = jiffies + 120 * HZ;
25371566e7d6SDexuan Cui 	struct gdma_dev *gd = apc->ac->gdma_dev;
2538ca9c54d2SDexuan Cui 	struct mana_txq *txq;
2539a7dfeda6SSouradeep Chakrabarti 	struct sk_buff *skb;
2540ca9c54d2SDexuan Cui 	int i, err;
2541a7dfeda6SSouradeep Chakrabarti 	u32 tsleep;
2542ca9c54d2SDexuan Cui 
2543ca9c54d2SDexuan Cui 	if (apc->port_is_up)
2544ca9c54d2SDexuan Cui 		return -EINVAL;
2545ca9c54d2SDexuan Cui 
2546ed5356b5SHaiyang Zhang 	mana_chn_setxdp(apc, NULL);
2547ed5356b5SHaiyang Zhang 
25481566e7d6SDexuan Cui 	if (gd->gdma_context->is_pf)
25491566e7d6SDexuan Cui 		mana_pf_deregister_filter(apc);
25501566e7d6SDexuan Cui 
2551ca9c54d2SDexuan Cui 	/* No packet can be transmitted now since apc->port_is_up is false.
2552ca9c54d2SDexuan Cui 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2553ca9c54d2SDexuan Cui 	 * a txq because it may not timely see apc->port_is_up being cleared
2554ca9c54d2SDexuan Cui 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2555ca9c54d2SDexuan Cui 	 * new packets due to apc->port_is_up being false.
2556ca9c54d2SDexuan Cui 	 *
2557a7dfeda6SSouradeep Chakrabarti 	 * Drain all the in-flight TX packets.
2558a7dfeda6SSouradeep Chakrabarti 	 * A timeout of 120 seconds for all the queues is used.
2559a7dfeda6SSouradeep Chakrabarti 	 * This will break the while loop when h/w is not responding.
2560a7dfeda6SSouradeep Chakrabarti 	 * This value of 120 has been decided here considering max
2561a7dfeda6SSouradeep Chakrabarti 	 * number of queues.
2562ca9c54d2SDexuan Cui 	 */
2563a7dfeda6SSouradeep Chakrabarti 
2564ca9c54d2SDexuan Cui 	for (i = 0; i < apc->num_queues; i++) {
2565ca9c54d2SDexuan Cui 		txq = &apc->tx_qp[i].txq;
2566a7dfeda6SSouradeep Chakrabarti 		tsleep = 1000;
2567a7dfeda6SSouradeep Chakrabarti 		while (atomic_read(&txq->pending_sends) > 0 &&
2568a7dfeda6SSouradeep Chakrabarti 		       time_before(jiffies, timeout)) {
2569a7dfeda6SSouradeep Chakrabarti 			usleep_range(tsleep, tsleep + 1000);
2570a7dfeda6SSouradeep Chakrabarti 			tsleep <<= 1;
2571a7dfeda6SSouradeep Chakrabarti 		}
2572a7dfeda6SSouradeep Chakrabarti 		if (atomic_read(&txq->pending_sends)) {
2573a7dfeda6SSouradeep Chakrabarti 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2574a7dfeda6SSouradeep Chakrabarti 			if (err) {
2575a7dfeda6SSouradeep Chakrabarti 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2576a7dfeda6SSouradeep Chakrabarti 					   err, atomic_read(&txq->pending_sends),
2577a7dfeda6SSouradeep Chakrabarti 					   txq->gdma_txq_id);
2578a7dfeda6SSouradeep Chakrabarti 			}
2579a7dfeda6SSouradeep Chakrabarti 			break;
2580a7dfeda6SSouradeep Chakrabarti 		}
2581ca9c54d2SDexuan Cui 	}
2582ca9c54d2SDexuan Cui 
2583a7dfeda6SSouradeep Chakrabarti 	for (i = 0; i < apc->num_queues; i++) {
2584a7dfeda6SSouradeep Chakrabarti 		txq = &apc->tx_qp[i].txq;
2585a7dfeda6SSouradeep Chakrabarti 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2586a7dfeda6SSouradeep Chakrabarti 			mana_unmap_skb(skb, apc);
2587a7dfeda6SSouradeep Chakrabarti 			dev_kfree_skb_any(skb);
2588a7dfeda6SSouradeep Chakrabarti 		}
2589a7dfeda6SSouradeep Chakrabarti 		atomic_set(&txq->pending_sends, 0);
2590a7dfeda6SSouradeep Chakrabarti 	}
2591ca9c54d2SDexuan Cui 	/* We're 100% sure the queues can no longer be woken up, because
2592ca9c54d2SDexuan Cui 	 * we're sure now mana_poll_tx_cq() can't be running.
2593ca9c54d2SDexuan Cui 	 */
2594ca9c54d2SDexuan Cui 
2595ca9c54d2SDexuan Cui 	apc->rss_state = TRI_STATE_FALSE;
2596ca9c54d2SDexuan Cui 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2597ca9c54d2SDexuan Cui 	if (err) {
2598ca9c54d2SDexuan Cui 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2599ca9c54d2SDexuan Cui 		return err;
2600ca9c54d2SDexuan Cui 	}
2601ca9c54d2SDexuan Cui 
2602ca9c54d2SDexuan Cui 	mana_destroy_vport(apc);
2603ca9c54d2SDexuan Cui 
2604ca9c54d2SDexuan Cui 	return 0;
2605ca9c54d2SDexuan Cui }
2606ca9c54d2SDexuan Cui 
mana_detach(struct net_device * ndev,bool from_close)2607ca9c54d2SDexuan Cui int mana_detach(struct net_device *ndev, bool from_close)
2608ca9c54d2SDexuan Cui {
2609ca9c54d2SDexuan Cui 	struct mana_port_context *apc = netdev_priv(ndev);
2610ca9c54d2SDexuan Cui 	int err;
2611ca9c54d2SDexuan Cui 
2612ca9c54d2SDexuan Cui 	ASSERT_RTNL();
2613ca9c54d2SDexuan Cui 
2614ca9c54d2SDexuan Cui 	apc->port_st_save = apc->port_is_up;
2615ca9c54d2SDexuan Cui 	apc->port_is_up = false;
2616ca9c54d2SDexuan Cui 
2617ca9c54d2SDexuan Cui 	/* Ensure port state updated before txq state */
2618ca9c54d2SDexuan Cui 	smp_wmb();
2619ca9c54d2SDexuan Cui 
2620ca9c54d2SDexuan Cui 	netif_tx_disable(ndev);
2621ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
2622ca9c54d2SDexuan Cui 
2623ca9c54d2SDexuan Cui 	if (apc->port_st_save) {
2624ca9c54d2SDexuan Cui 		err = mana_dealloc_queues(ndev);
2625ca9c54d2SDexuan Cui 		if (err)
2626ca9c54d2SDexuan Cui 			return err;
2627ca9c54d2SDexuan Cui 	}
2628ca9c54d2SDexuan Cui 
2629ca9c54d2SDexuan Cui 	if (!from_close) {
2630ca9c54d2SDexuan Cui 		netif_device_detach(ndev);
2631ca9c54d2SDexuan Cui 		mana_cleanup_port_context(apc);
2632ca9c54d2SDexuan Cui 	}
2633ca9c54d2SDexuan Cui 
2634ca9c54d2SDexuan Cui 	return 0;
2635ca9c54d2SDexuan Cui }
2636ca9c54d2SDexuan Cui 
mana_probe_port(struct mana_context * ac,int port_idx,struct net_device ** ndev_storage)2637ca9c54d2SDexuan Cui static int mana_probe_port(struct mana_context *ac, int port_idx,
2638ca9c54d2SDexuan Cui 			   struct net_device **ndev_storage)
2639ca9c54d2SDexuan Cui {
2640ca9c54d2SDexuan Cui 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2641ca9c54d2SDexuan Cui 	struct mana_port_context *apc;
2642ca9c54d2SDexuan Cui 	struct net_device *ndev;
2643ca9c54d2SDexuan Cui 	int err;
2644ca9c54d2SDexuan Cui 
2645ca9c54d2SDexuan Cui 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2646ca9c54d2SDexuan Cui 				 gc->max_num_queues);
2647ca9c54d2SDexuan Cui 	if (!ndev)
2648ca9c54d2SDexuan Cui 		return -ENOMEM;
2649ca9c54d2SDexuan Cui 
2650ca9c54d2SDexuan Cui 	*ndev_storage = ndev;
2651ca9c54d2SDexuan Cui 
2652ca9c54d2SDexuan Cui 	apc = netdev_priv(ndev);
2653ca9c54d2SDexuan Cui 	apc->ac = ac;
2654ca9c54d2SDexuan Cui 	apc->ndev = ndev;
2655ca9c54d2SDexuan Cui 	apc->max_queues = gc->max_num_queues;
26561e2d0824SHaiyang Zhang 	apc->num_queues = gc->max_num_queues;
2657ca9c54d2SDexuan Cui 	apc->port_handle = INVALID_MANA_HANDLE;
26581566e7d6SDexuan Cui 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2659ca9c54d2SDexuan Cui 	apc->port_idx = port_idx;
2660ca9c54d2SDexuan Cui 
2661b5c1c985SLong Li 	mutex_init(&apc->vport_mutex);
2662b5c1c985SLong Li 	apc->vport_use_count = 0;
2663b5c1c985SLong Li 
2664ca9c54d2SDexuan Cui 	ndev->netdev_ops = &mana_devops;
2665ca9c54d2SDexuan Cui 	ndev->ethtool_ops = &mana_ethtool_ops;
2666ca9c54d2SDexuan Cui 	ndev->mtu = ETH_DATA_LEN;
266780f6215bSHaiyang Zhang 	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
266880f6215bSHaiyang Zhang 	ndev->min_mtu = ETH_MIN_MTU;
2669ca9c54d2SDexuan Cui 	ndev->needed_headroom = MANA_HEADROOM;
2670d44089e5SLong Li 	ndev->dev_port = port_idx;
2671ca9c54d2SDexuan Cui 	SET_NETDEV_DEV(ndev, gc->dev);
2672ca9c54d2SDexuan Cui 
2673ca9c54d2SDexuan Cui 	netif_carrier_off(ndev);
2674ca9c54d2SDexuan Cui 
2675ca9c54d2SDexuan Cui 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2676ca9c54d2SDexuan Cui 
2677ca9c54d2SDexuan Cui 	err = mana_init_port(ndev);
2678ca9c54d2SDexuan Cui 	if (err)
2679ca9c54d2SDexuan Cui 		goto free_net;
2680ca9c54d2SDexuan Cui 
2681ca9c54d2SDexuan Cui 	netdev_lockdep_set_classes(ndev);
2682ca9c54d2SDexuan Cui 
2683ca9c54d2SDexuan Cui 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2684ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXCSUM;
2685ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2686ca9c54d2SDexuan Cui 	ndev->hw_features |= NETIF_F_RXHASH;
2687b803d1fdSHaiyang Zhang 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2688b803d1fdSHaiyang Zhang 			 NETIF_F_HW_VLAN_CTAG_RX;
2689b803d1fdSHaiyang Zhang 	ndev->vlan_features = ndev->features;
269066c0e13aSMarek Majtyka 	ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
269166c0e13aSMarek Majtyka 			     NETDEV_XDP_ACT_NDO_XMIT;
2692ca9c54d2SDexuan Cui 
2693ca9c54d2SDexuan Cui 	err = register_netdev(ndev);
2694ca9c54d2SDexuan Cui 	if (err) {
2695ca9c54d2SDexuan Cui 		netdev_err(ndev, "Unable to register netdev.\n");
2696ca9c54d2SDexuan Cui 		goto reset_apc;
2697ca9c54d2SDexuan Cui 	}
2698ca9c54d2SDexuan Cui 
2699ca9c54d2SDexuan Cui 	return 0;
2700ca9c54d2SDexuan Cui 
2701ca9c54d2SDexuan Cui reset_apc:
2702ca9c54d2SDexuan Cui 	kfree(apc->rxqs);
2703ca9c54d2SDexuan Cui 	apc->rxqs = NULL;
2704ca9c54d2SDexuan Cui free_net:
2705ca9c54d2SDexuan Cui 	*ndev_storage = NULL;
2706ca9c54d2SDexuan Cui 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2707ca9c54d2SDexuan Cui 	free_netdev(ndev);
2708ca9c54d2SDexuan Cui 	return err;
2709ca9c54d2SDexuan Cui }
2710ca9c54d2SDexuan Cui 
adev_release(struct device * dev)2711a69839d4SLong Li static void adev_release(struct device *dev)
2712a69839d4SLong Li {
2713a69839d4SLong Li 	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2714a69839d4SLong Li 
2715a69839d4SLong Li 	kfree(madev);
2716a69839d4SLong Li }
2717a69839d4SLong Li 
remove_adev(struct gdma_dev * gd)2718a69839d4SLong Li static void remove_adev(struct gdma_dev *gd)
2719a69839d4SLong Li {
2720a69839d4SLong Li 	struct auxiliary_device *adev = gd->adev;
2721a69839d4SLong Li 	int id = adev->id;
2722a69839d4SLong Li 
2723a69839d4SLong Li 	auxiliary_device_delete(adev);
2724a69839d4SLong Li 	auxiliary_device_uninit(adev);
2725a69839d4SLong Li 
2726a69839d4SLong Li 	mana_adev_idx_free(id);
2727a69839d4SLong Li 	gd->adev = NULL;
2728a69839d4SLong Li }
2729a69839d4SLong Li 
add_adev(struct gdma_dev * gd)2730a69839d4SLong Li static int add_adev(struct gdma_dev *gd)
2731a69839d4SLong Li {
2732a69839d4SLong Li 	struct auxiliary_device *adev;
2733a69839d4SLong Li 	struct mana_adev *madev;
2734a69839d4SLong Li 	int ret;
2735a69839d4SLong Li 
2736a69839d4SLong Li 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2737a69839d4SLong Li 	if (!madev)
2738a69839d4SLong Li 		return -ENOMEM;
2739a69839d4SLong Li 
2740a69839d4SLong Li 	adev = &madev->adev;
2741a69839d4SLong Li 	ret = mana_adev_idx_alloc();
2742a69839d4SLong Li 	if (ret < 0)
2743a69839d4SLong Li 		goto idx_fail;
2744a69839d4SLong Li 	adev->id = ret;
2745a69839d4SLong Li 
2746a69839d4SLong Li 	adev->name = "rdma";
2747a69839d4SLong Li 	adev->dev.parent = gd->gdma_context->dev;
2748a69839d4SLong Li 	adev->dev.release = adev_release;
2749a69839d4SLong Li 	madev->mdev = gd;
2750a69839d4SLong Li 
2751a69839d4SLong Li 	ret = auxiliary_device_init(adev);
2752a69839d4SLong Li 	if (ret)
2753a69839d4SLong Li 		goto init_fail;
2754a69839d4SLong Li 
2755a69839d4SLong Li 	ret = auxiliary_device_add(adev);
2756a69839d4SLong Li 	if (ret)
2757a69839d4SLong Li 		goto add_fail;
2758a69839d4SLong Li 
2759a69839d4SLong Li 	gd->adev = adev;
2760a69839d4SLong Li 	return 0;
2761a69839d4SLong Li 
2762a69839d4SLong Li add_fail:
2763a69839d4SLong Li 	auxiliary_device_uninit(adev);
2764a69839d4SLong Li 
2765a69839d4SLong Li init_fail:
2766a69839d4SLong Li 	mana_adev_idx_free(adev->id);
2767a69839d4SLong Li 
2768a69839d4SLong Li idx_fail:
2769a69839d4SLong Li 	kfree(madev);
2770a69839d4SLong Li 
2771a69839d4SLong Li 	return ret;
2772a69839d4SLong Li }
2773a69839d4SLong Li 
mana_probe(struct gdma_dev * gd,bool resuming)2774635096a8SDexuan Cui int mana_probe(struct gdma_dev *gd, bool resuming)
2775ca9c54d2SDexuan Cui {
2776ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
2777635096a8SDexuan Cui 	struct mana_context *ac = gd->driver_data;
2778ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
2779635096a8SDexuan Cui 	u16 num_ports = 0;
2780ca9c54d2SDexuan Cui 	int err;
2781ca9c54d2SDexuan Cui 	int i;
2782ca9c54d2SDexuan Cui 
2783ca9c54d2SDexuan Cui 	dev_info(dev,
2784ca9c54d2SDexuan Cui 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2785ca9c54d2SDexuan Cui 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2786ca9c54d2SDexuan Cui 
2787ca9c54d2SDexuan Cui 	err = mana_gd_register_device(gd);
2788ca9c54d2SDexuan Cui 	if (err)
2789ca9c54d2SDexuan Cui 		return err;
2790ca9c54d2SDexuan Cui 
2791635096a8SDexuan Cui 	if (!resuming) {
2792ca9c54d2SDexuan Cui 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2793ca9c54d2SDexuan Cui 		if (!ac)
2794ca9c54d2SDexuan Cui 			return -ENOMEM;
2795ca9c54d2SDexuan Cui 
2796ca9c54d2SDexuan Cui 		ac->gdma_dev = gd;
2797ca9c54d2SDexuan Cui 		gd->driver_data = ac;
2798635096a8SDexuan Cui 	}
2799ca9c54d2SDexuan Cui 
28001e2d0824SHaiyang Zhang 	err = mana_create_eq(ac);
28011e2d0824SHaiyang Zhang 	if (err)
28021e2d0824SHaiyang Zhang 		goto out;
28031e2d0824SHaiyang Zhang 
2804ca9c54d2SDexuan Cui 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2805635096a8SDexuan Cui 				    MANA_MICRO_VERSION, &num_ports);
2806ca9c54d2SDexuan Cui 	if (err)
2807ca9c54d2SDexuan Cui 		goto out;
2808ca9c54d2SDexuan Cui 
2809635096a8SDexuan Cui 	if (!resuming) {
2810635096a8SDexuan Cui 		ac->num_ports = num_ports;
2811635096a8SDexuan Cui 	} else {
2812635096a8SDexuan Cui 		if (ac->num_ports != num_ports) {
2813635096a8SDexuan Cui 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2814635096a8SDexuan Cui 				ac->num_ports, num_ports);
2815635096a8SDexuan Cui 			err = -EPROTO;
2816635096a8SDexuan Cui 			goto out;
2817635096a8SDexuan Cui 		}
2818635096a8SDexuan Cui 	}
2819635096a8SDexuan Cui 
2820635096a8SDexuan Cui 	if (ac->num_ports == 0)
2821635096a8SDexuan Cui 		dev_err(dev, "Failed to detect any vPort\n");
2822635096a8SDexuan Cui 
2823ca9c54d2SDexuan Cui 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2824ca9c54d2SDexuan Cui 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2825ca9c54d2SDexuan Cui 
2826635096a8SDexuan Cui 	if (!resuming) {
2827ca9c54d2SDexuan Cui 		for (i = 0; i < ac->num_ports; i++) {
2828ca9c54d2SDexuan Cui 			err = mana_probe_port(ac, i, &ac->ports[i]);
2829ca9c54d2SDexuan Cui 			if (err)
2830ca9c54d2SDexuan Cui 				break;
2831ca9c54d2SDexuan Cui 		}
2832635096a8SDexuan Cui 	} else {
2833635096a8SDexuan Cui 		for (i = 0; i < ac->num_ports; i++) {
2834635096a8SDexuan Cui 			rtnl_lock();
2835635096a8SDexuan Cui 			err = mana_attach(ac->ports[i]);
2836635096a8SDexuan Cui 			rtnl_unlock();
2837635096a8SDexuan Cui 			if (err)
2838635096a8SDexuan Cui 				break;
2839635096a8SDexuan Cui 		}
2840635096a8SDexuan Cui 	}
2841a69839d4SLong Li 
2842a69839d4SLong Li 	err = add_adev(gd);
2843ca9c54d2SDexuan Cui out:
2844ca9c54d2SDexuan Cui 	if (err)
2845635096a8SDexuan Cui 		mana_remove(gd, false);
2846ca9c54d2SDexuan Cui 
2847ca9c54d2SDexuan Cui 	return err;
2848ca9c54d2SDexuan Cui }
2849ca9c54d2SDexuan Cui 
mana_remove(struct gdma_dev * gd,bool suspending)2850635096a8SDexuan Cui void mana_remove(struct gdma_dev *gd, bool suspending)
2851ca9c54d2SDexuan Cui {
2852ca9c54d2SDexuan Cui 	struct gdma_context *gc = gd->gdma_context;
2853ca9c54d2SDexuan Cui 	struct mana_context *ac = gd->driver_data;
2854ca9c54d2SDexuan Cui 	struct device *dev = gc->dev;
2855ca9c54d2SDexuan Cui 	struct net_device *ndev;
2856635096a8SDexuan Cui 	int err;
2857ca9c54d2SDexuan Cui 	int i;
2858ca9c54d2SDexuan Cui 
2859a69839d4SLong Li 	/* adev currently doesn't support suspending, always remove it */
2860a69839d4SLong Li 	if (gd->adev)
2861a69839d4SLong Li 		remove_adev(gd);
2862a69839d4SLong Li 
2863ca9c54d2SDexuan Cui 	for (i = 0; i < ac->num_ports; i++) {
2864ca9c54d2SDexuan Cui 		ndev = ac->ports[i];
2865ca9c54d2SDexuan Cui 		if (!ndev) {
2866ca9c54d2SDexuan Cui 			if (i == 0)
2867ca9c54d2SDexuan Cui 				dev_err(dev, "No net device to remove\n");
2868ca9c54d2SDexuan Cui 			goto out;
2869ca9c54d2SDexuan Cui 		}
2870ca9c54d2SDexuan Cui 
2871ca9c54d2SDexuan Cui 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2872ca9c54d2SDexuan Cui 		 * other functions may access partially cleaned up data.
2873ca9c54d2SDexuan Cui 		 */
2874ca9c54d2SDexuan Cui 		rtnl_lock();
2875ca9c54d2SDexuan Cui 
2876635096a8SDexuan Cui 		err = mana_detach(ndev, false);
2877635096a8SDexuan Cui 		if (err)
2878635096a8SDexuan Cui 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2879635096a8SDexuan Cui 				   i, err);
2880635096a8SDexuan Cui 
2881635096a8SDexuan Cui 		if (suspending) {
2882635096a8SDexuan Cui 			/* No need to unregister the ndev. */
2883635096a8SDexuan Cui 			rtnl_unlock();
2884635096a8SDexuan Cui 			continue;
2885635096a8SDexuan Cui 		}
2886ca9c54d2SDexuan Cui 
2887ca9c54d2SDexuan Cui 		unregister_netdevice(ndev);
2888ca9c54d2SDexuan Cui 
2889ca9c54d2SDexuan Cui 		rtnl_unlock();
2890ca9c54d2SDexuan Cui 
2891ca9c54d2SDexuan Cui 		free_netdev(ndev);
2892ca9c54d2SDexuan Cui 	}
28931e2d0824SHaiyang Zhang 
28941e2d0824SHaiyang Zhang 	mana_destroy_eq(ac);
2895ca9c54d2SDexuan Cui out:
2896ca9c54d2SDexuan Cui 	mana_gd_deregister_device(gd);
2897635096a8SDexuan Cui 
2898635096a8SDexuan Cui 	if (suspending)
2899635096a8SDexuan Cui 		return;
2900635096a8SDexuan Cui 
2901ca9c54d2SDexuan Cui 	gd->driver_data = NULL;
2902ca9c54d2SDexuan Cui 	gd->gdma_context = NULL;
2903ca9c54d2SDexuan Cui 	kfree(ac);
2904ca9c54d2SDexuan Cui }
2905