1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/mm.h>
10 
11 #include <net/checksum.h>
12 #include <net/ip6_checksum.h>
13 
14 #include "mana.h"
15 
16 /* Microsoft Azure Network Adapter (MANA) functions */
17 
18 static int mana_open(struct net_device *ndev)
19 {
20 	struct mana_port_context *apc = netdev_priv(ndev);
21 	int err;
22 
23 	err = mana_alloc_queues(ndev);
24 	if (err)
25 		return err;
26 
27 	apc->port_is_up = true;
28 
29 	/* Ensure port state updated before txq state */
30 	smp_wmb();
31 
32 	netif_carrier_on(ndev);
33 	netif_tx_wake_all_queues(ndev);
34 
35 	return 0;
36 }
37 
38 static int mana_close(struct net_device *ndev)
39 {
40 	struct mana_port_context *apc = netdev_priv(ndev);
41 
42 	if (!apc->port_is_up)
43 		return 0;
44 
45 	return mana_detach(ndev, true);
46 }
47 
48 static bool mana_can_tx(struct gdma_queue *wq)
49 {
50 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
51 }
52 
53 static unsigned int mana_checksum_info(struct sk_buff *skb)
54 {
55 	if (skb->protocol == htons(ETH_P_IP)) {
56 		struct iphdr *ip = ip_hdr(skb);
57 
58 		if (ip->protocol == IPPROTO_TCP)
59 			return IPPROTO_TCP;
60 
61 		if (ip->protocol == IPPROTO_UDP)
62 			return IPPROTO_UDP;
63 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
64 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
65 
66 		if (ip6->nexthdr == IPPROTO_TCP)
67 			return IPPROTO_TCP;
68 
69 		if (ip6->nexthdr == IPPROTO_UDP)
70 			return IPPROTO_UDP;
71 	}
72 
73 	/* No csum offloading */
74 	return 0;
75 }
76 
77 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
78 			struct mana_tx_package *tp)
79 {
80 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
81 	struct gdma_dev *gd = apc->ac->gdma_dev;
82 	struct gdma_context *gc;
83 	struct device *dev;
84 	skb_frag_t *frag;
85 	dma_addr_t da;
86 	int i;
87 
88 	gc = gd->gdma_context;
89 	dev = gc->dev;
90 	da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
91 
92 	if (dma_mapping_error(dev, da))
93 		return -ENOMEM;
94 
95 	ash->dma_handle[0] = da;
96 	ash->size[0] = skb_headlen(skb);
97 
98 	tp->wqe_req.sgl[0].address = ash->dma_handle[0];
99 	tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
100 	tp->wqe_req.sgl[0].size = ash->size[0];
101 
102 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
103 		frag = &skb_shinfo(skb)->frags[i];
104 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
105 				      DMA_TO_DEVICE);
106 
107 		if (dma_mapping_error(dev, da))
108 			goto frag_err;
109 
110 		ash->dma_handle[i + 1] = da;
111 		ash->size[i + 1] = skb_frag_size(frag);
112 
113 		tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
114 		tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
115 		tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
116 	}
117 
118 	return 0;
119 
120 frag_err:
121 	for (i = i - 1; i >= 0; i--)
122 		dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
123 			       DMA_TO_DEVICE);
124 
125 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
126 
127 	return -ENOMEM;
128 }
129 
130 int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
131 {
132 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
133 	struct mana_port_context *apc = netdev_priv(ndev);
134 	u16 txq_idx = skb_get_queue_mapping(skb);
135 	struct gdma_dev *gd = apc->ac->gdma_dev;
136 	bool ipv4 = false, ipv6 = false;
137 	struct mana_tx_package pkg = {};
138 	struct netdev_queue *net_txq;
139 	struct mana_stats *tx_stats;
140 	struct gdma_queue *gdma_sq;
141 	unsigned int csum_type;
142 	struct mana_txq *txq;
143 	struct mana_cq *cq;
144 	int err, len;
145 
146 	if (unlikely(!apc->port_is_up))
147 		goto tx_drop;
148 
149 	if (skb_cow_head(skb, MANA_HEADROOM))
150 		goto tx_drop_count;
151 
152 	txq = &apc->tx_qp[txq_idx].txq;
153 	gdma_sq = txq->gdma_sq;
154 	cq = &apc->tx_qp[txq_idx].tx_cq;
155 
156 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
157 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
158 
159 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
160 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
161 		pkt_fmt = MANA_LONG_PKT_FMT;
162 	} else {
163 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
164 	}
165 
166 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
167 
168 	if (pkt_fmt == MANA_SHORT_PKT_FMT)
169 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
170 	else
171 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
172 
173 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
174 	pkg.wqe_req.flags = 0;
175 	pkg.wqe_req.client_data_unit = 0;
176 
177 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
178 	WARN_ON_ONCE(pkg.wqe_req.num_sge > 30);
179 
180 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
181 		pkg.wqe_req.sgl = pkg.sgl_array;
182 	} else {
183 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
184 					    sizeof(struct gdma_sge),
185 					    GFP_ATOMIC);
186 		if (!pkg.sgl_ptr)
187 			goto tx_drop_count;
188 
189 		pkg.wqe_req.sgl = pkg.sgl_ptr;
190 	}
191 
192 	if (skb->protocol == htons(ETH_P_IP))
193 		ipv4 = true;
194 	else if (skb->protocol == htons(ETH_P_IPV6))
195 		ipv6 = true;
196 
197 	if (skb_is_gso(skb)) {
198 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
199 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
200 
201 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
202 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
203 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
204 
205 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
206 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
207 		if (ipv4) {
208 			ip_hdr(skb)->tot_len = 0;
209 			ip_hdr(skb)->check = 0;
210 			tcp_hdr(skb)->check =
211 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
212 						   ip_hdr(skb)->daddr, 0,
213 						   IPPROTO_TCP, 0);
214 		} else {
215 			ipv6_hdr(skb)->payload_len = 0;
216 			tcp_hdr(skb)->check =
217 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
218 						 &ipv6_hdr(skb)->daddr, 0,
219 						 IPPROTO_TCP, 0);
220 		}
221 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
222 		csum_type = mana_checksum_info(skb);
223 
224 		if (csum_type == IPPROTO_TCP) {
225 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
226 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
227 
228 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
229 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
230 
231 		} else if (csum_type == IPPROTO_UDP) {
232 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
233 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
234 
235 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
236 		} else {
237 			/* Can't do offload of this type of checksum */
238 			if (skb_checksum_help(skb))
239 				goto free_sgl_ptr;
240 		}
241 	}
242 
243 	if (mana_map_skb(skb, apc, &pkg))
244 		goto free_sgl_ptr;
245 
246 	skb_queue_tail(&txq->pending_skbs, skb);
247 
248 	len = skb->len;
249 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
250 
251 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
252 					(struct gdma_posted_wqe_info *)skb->cb);
253 	if (!mana_can_tx(gdma_sq)) {
254 		netif_tx_stop_queue(net_txq);
255 		apc->eth_stats.stop_queue++;
256 	}
257 
258 	if (err) {
259 		(void)skb_dequeue_tail(&txq->pending_skbs);
260 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
261 		err = NETDEV_TX_BUSY;
262 		goto tx_busy;
263 	}
264 
265 	err = NETDEV_TX_OK;
266 	atomic_inc(&txq->pending_sends);
267 
268 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
269 
270 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
271 	skb = NULL;
272 
273 	tx_stats = &txq->stats;
274 	u64_stats_update_begin(&tx_stats->syncp);
275 	tx_stats->packets++;
276 	tx_stats->bytes += len;
277 	u64_stats_update_end(&tx_stats->syncp);
278 
279 tx_busy:
280 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
281 		netif_tx_wake_queue(net_txq);
282 		apc->eth_stats.wake_queue++;
283 	}
284 
285 	kfree(pkg.sgl_ptr);
286 	return err;
287 
288 free_sgl_ptr:
289 	kfree(pkg.sgl_ptr);
290 tx_drop_count:
291 	ndev->stats.tx_dropped++;
292 tx_drop:
293 	dev_kfree_skb_any(skb);
294 	return NETDEV_TX_OK;
295 }
296 
297 static void mana_get_stats64(struct net_device *ndev,
298 			     struct rtnl_link_stats64 *st)
299 {
300 	struct mana_port_context *apc = netdev_priv(ndev);
301 	unsigned int num_queues = apc->num_queues;
302 	struct mana_stats *stats;
303 	unsigned int start;
304 	u64 packets, bytes;
305 	int q;
306 
307 	if (!apc->port_is_up)
308 		return;
309 
310 	netdev_stats_to_stats64(st, &ndev->stats);
311 
312 	for (q = 0; q < num_queues; q++) {
313 		stats = &apc->rxqs[q]->stats;
314 
315 		do {
316 			start = u64_stats_fetch_begin_irq(&stats->syncp);
317 			packets = stats->packets;
318 			bytes = stats->bytes;
319 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
320 
321 		st->rx_packets += packets;
322 		st->rx_bytes += bytes;
323 	}
324 
325 	for (q = 0; q < num_queues; q++) {
326 		stats = &apc->tx_qp[q].txq.stats;
327 
328 		do {
329 			start = u64_stats_fetch_begin_irq(&stats->syncp);
330 			packets = stats->packets;
331 			bytes = stats->bytes;
332 		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
333 
334 		st->tx_packets += packets;
335 		st->tx_bytes += bytes;
336 	}
337 }
338 
339 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
340 			     int old_q)
341 {
342 	struct mana_port_context *apc = netdev_priv(ndev);
343 	u32 hash = skb_get_hash(skb);
344 	struct sock *sk = skb->sk;
345 	int txq;
346 
347 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
348 
349 	if (txq != old_q && sk && sk_fullsock(sk) &&
350 	    rcu_access_pointer(sk->sk_dst_cache))
351 		sk_tx_queue_set(sk, txq);
352 
353 	return txq;
354 }
355 
356 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
357 			     struct net_device *sb_dev)
358 {
359 	int txq;
360 
361 	if (ndev->real_num_tx_queues == 1)
362 		return 0;
363 
364 	txq = sk_tx_queue_get(skb->sk);
365 
366 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
367 		if (skb_rx_queue_recorded(skb))
368 			txq = skb_get_rx_queue(skb);
369 		else
370 			txq = mana_get_tx_queue(ndev, skb, txq);
371 	}
372 
373 	return txq;
374 }
375 
376 static const struct net_device_ops mana_devops = {
377 	.ndo_open		= mana_open,
378 	.ndo_stop		= mana_close,
379 	.ndo_select_queue	= mana_select_queue,
380 	.ndo_start_xmit		= mana_start_xmit,
381 	.ndo_validate_addr	= eth_validate_addr,
382 	.ndo_get_stats64	= mana_get_stats64,
383 	.ndo_bpf		= mana_bpf,
384 };
385 
386 static void mana_cleanup_port_context(struct mana_port_context *apc)
387 {
388 	kfree(apc->rxqs);
389 	apc->rxqs = NULL;
390 }
391 
392 static int mana_init_port_context(struct mana_port_context *apc)
393 {
394 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
395 			    GFP_KERNEL);
396 
397 	return !apc->rxqs ? -ENOMEM : 0;
398 }
399 
400 static int mana_send_request(struct mana_context *ac, void *in_buf,
401 			     u32 in_len, void *out_buf, u32 out_len)
402 {
403 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
404 	struct gdma_resp_hdr *resp = out_buf;
405 	struct gdma_req_hdr *req = in_buf;
406 	struct device *dev = gc->dev;
407 	static atomic_t activity_id;
408 	int err;
409 
410 	req->dev_id = gc->mana.dev_id;
411 	req->activity_id = atomic_inc_return(&activity_id);
412 
413 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
414 				   out_buf);
415 	if (err || resp->status) {
416 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
417 			err, resp->status);
418 		return err ? err : -EPROTO;
419 	}
420 
421 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
422 	    req->activity_id != resp->activity_id) {
423 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
424 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
425 			req->activity_id, resp->activity_id);
426 		return -EPROTO;
427 	}
428 
429 	return 0;
430 }
431 
432 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
433 				const enum mana_command_code expected_code,
434 				const u32 min_size)
435 {
436 	if (resp_hdr->response.msg_type != expected_code)
437 		return -EPROTO;
438 
439 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
440 		return -EPROTO;
441 
442 	if (resp_hdr->response.msg_size < min_size)
443 		return -EPROTO;
444 
445 	return 0;
446 }
447 
448 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
449 				 u32 proto_minor_ver, u32 proto_micro_ver,
450 				 u16 *max_num_vports)
451 {
452 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
453 	struct mana_query_device_cfg_resp resp = {};
454 	struct mana_query_device_cfg_req req = {};
455 	struct device *dev = gc->dev;
456 	int err = 0;
457 
458 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
459 			     sizeof(req), sizeof(resp));
460 	req.proto_major_ver = proto_major_ver;
461 	req.proto_minor_ver = proto_minor_ver;
462 	req.proto_micro_ver = proto_micro_ver;
463 
464 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
465 	if (err) {
466 		dev_err(dev, "Failed to query config: %d", err);
467 		return err;
468 	}
469 
470 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
471 				   sizeof(resp));
472 	if (err || resp.hdr.status) {
473 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
474 			resp.hdr.status);
475 		if (!err)
476 			err = -EPROTO;
477 		return err;
478 	}
479 
480 	*max_num_vports = resp.max_num_vports;
481 
482 	return 0;
483 }
484 
485 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
486 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
487 {
488 	struct mana_query_vport_cfg_resp resp = {};
489 	struct mana_query_vport_cfg_req req = {};
490 	int err;
491 
492 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
493 			     sizeof(req), sizeof(resp));
494 
495 	req.vport_index = vport_index;
496 
497 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
498 				sizeof(resp));
499 	if (err)
500 		return err;
501 
502 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
503 				   sizeof(resp));
504 	if (err)
505 		return err;
506 
507 	if (resp.hdr.status)
508 		return -EPROTO;
509 
510 	*max_sq = resp.max_num_sq;
511 	*max_rq = resp.max_num_rq;
512 	*num_indir_entry = resp.num_indirection_ent;
513 
514 	apc->port_handle = resp.vport;
515 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
516 
517 	return 0;
518 }
519 
520 static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
521 			  u32 doorbell_pg_id)
522 {
523 	struct mana_config_vport_resp resp = {};
524 	struct mana_config_vport_req req = {};
525 	int err;
526 
527 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
528 			     sizeof(req), sizeof(resp));
529 	req.vport = apc->port_handle;
530 	req.pdid = protection_dom_id;
531 	req.doorbell_pageid = doorbell_pg_id;
532 
533 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
534 				sizeof(resp));
535 	if (err) {
536 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
537 		goto out;
538 	}
539 
540 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
541 				   sizeof(resp));
542 	if (err || resp.hdr.status) {
543 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
544 			   err, resp.hdr.status);
545 		if (!err)
546 			err = -EPROTO;
547 
548 		goto out;
549 	}
550 
551 	apc->tx_shortform_allowed = resp.short_form_allowed;
552 	apc->tx_vp_offset = resp.tx_vport_offset;
553 out:
554 	return err;
555 }
556 
557 static int mana_cfg_vport_steering(struct mana_port_context *apc,
558 				   enum TRI_STATE rx,
559 				   bool update_default_rxobj, bool update_key,
560 				   bool update_tab)
561 {
562 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
563 	struct mana_cfg_rx_steer_req *req = NULL;
564 	struct mana_cfg_rx_steer_resp resp = {};
565 	struct net_device *ndev = apc->ndev;
566 	mana_handle_t *req_indir_tab;
567 	u32 req_buf_size;
568 	int err;
569 
570 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
571 	req = kzalloc(req_buf_size, GFP_KERNEL);
572 	if (!req)
573 		return -ENOMEM;
574 
575 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
576 			     sizeof(resp));
577 
578 	req->vport = apc->port_handle;
579 	req->num_indir_entries = num_entries;
580 	req->indir_tab_offset = sizeof(*req);
581 	req->rx_enable = rx;
582 	req->rss_enable = apc->rss_state;
583 	req->update_default_rxobj = update_default_rxobj;
584 	req->update_hashkey = update_key;
585 	req->update_indir_tab = update_tab;
586 	req->default_rxobj = apc->default_rxobj;
587 
588 	if (update_key)
589 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
590 
591 	if (update_tab) {
592 		req_indir_tab = (mana_handle_t *)(req + 1);
593 		memcpy(req_indir_tab, apc->rxobj_table,
594 		       req->num_indir_entries * sizeof(mana_handle_t));
595 	}
596 
597 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
598 				sizeof(resp));
599 	if (err) {
600 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
601 		goto out;
602 	}
603 
604 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
605 				   sizeof(resp));
606 	if (err) {
607 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
608 		goto out;
609 	}
610 
611 	if (resp.hdr.status) {
612 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
613 			   resp.hdr.status);
614 		err = -EPROTO;
615 	}
616 out:
617 	kfree(req);
618 	return err;
619 }
620 
621 static int mana_create_wq_obj(struct mana_port_context *apc,
622 			      mana_handle_t vport,
623 			      u32 wq_type, struct mana_obj_spec *wq_spec,
624 			      struct mana_obj_spec *cq_spec,
625 			      mana_handle_t *wq_obj)
626 {
627 	struct mana_create_wqobj_resp resp = {};
628 	struct mana_create_wqobj_req req = {};
629 	struct net_device *ndev = apc->ndev;
630 	int err;
631 
632 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
633 			     sizeof(req), sizeof(resp));
634 	req.vport = vport;
635 	req.wq_type = wq_type;
636 	req.wq_gdma_region = wq_spec->gdma_region;
637 	req.cq_gdma_region = cq_spec->gdma_region;
638 	req.wq_size = wq_spec->queue_size;
639 	req.cq_size = cq_spec->queue_size;
640 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
641 	req.cq_parent_qid = cq_spec->attached_eq;
642 
643 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
644 				sizeof(resp));
645 	if (err) {
646 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
647 		goto out;
648 	}
649 
650 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
651 				   sizeof(resp));
652 	if (err || resp.hdr.status) {
653 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
654 			   resp.hdr.status);
655 		if (!err)
656 			err = -EPROTO;
657 		goto out;
658 	}
659 
660 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
661 		netdev_err(ndev, "Got an invalid WQ object handle\n");
662 		err = -EPROTO;
663 		goto out;
664 	}
665 
666 	*wq_obj = resp.wq_obj;
667 	wq_spec->queue_index = resp.wq_id;
668 	cq_spec->queue_index = resp.cq_id;
669 
670 	return 0;
671 out:
672 	return err;
673 }
674 
675 static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
676 				mana_handle_t wq_obj)
677 {
678 	struct mana_destroy_wqobj_resp resp = {};
679 	struct mana_destroy_wqobj_req req = {};
680 	struct net_device *ndev = apc->ndev;
681 	int err;
682 
683 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
684 			     sizeof(req), sizeof(resp));
685 	req.wq_type = wq_type;
686 	req.wq_obj_handle = wq_obj;
687 
688 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
689 				sizeof(resp));
690 	if (err) {
691 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
692 		return;
693 	}
694 
695 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
696 				   sizeof(resp));
697 	if (err || resp.hdr.status)
698 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
699 			   resp.hdr.status);
700 }
701 
702 static void mana_destroy_eq(struct mana_context *ac)
703 {
704 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
705 	struct gdma_queue *eq;
706 	int i;
707 
708 	if (!ac->eqs)
709 		return;
710 
711 	for (i = 0; i < gc->max_num_queues; i++) {
712 		eq = ac->eqs[i].eq;
713 		if (!eq)
714 			continue;
715 
716 		mana_gd_destroy_queue(gc, eq);
717 	}
718 
719 	kfree(ac->eqs);
720 	ac->eqs = NULL;
721 }
722 
723 static int mana_create_eq(struct mana_context *ac)
724 {
725 	struct gdma_dev *gd = ac->gdma_dev;
726 	struct gdma_context *gc = gd->gdma_context;
727 	struct gdma_queue_spec spec = {};
728 	int err;
729 	int i;
730 
731 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
732 			  GFP_KERNEL);
733 	if (!ac->eqs)
734 		return -ENOMEM;
735 
736 	spec.type = GDMA_EQ;
737 	spec.monitor_avl_buf = false;
738 	spec.queue_size = EQ_SIZE;
739 	spec.eq.callback = NULL;
740 	spec.eq.context = ac->eqs;
741 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
742 
743 	for (i = 0; i < gc->max_num_queues; i++) {
744 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
745 		if (err)
746 			goto out;
747 	}
748 
749 	return 0;
750 out:
751 	mana_destroy_eq(ac);
752 	return err;
753 }
754 
755 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
756 {
757 	struct mana_fence_rq_resp resp = {};
758 	struct mana_fence_rq_req req = {};
759 	int err;
760 
761 	init_completion(&rxq->fence_event);
762 
763 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
764 			     sizeof(req), sizeof(resp));
765 	req.wq_obj_handle =  rxq->rxobj;
766 
767 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
768 				sizeof(resp));
769 	if (err) {
770 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
771 			   rxq->rxq_idx, err);
772 		return err;
773 	}
774 
775 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
776 	if (err || resp.hdr.status) {
777 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
778 			   rxq->rxq_idx, err, resp.hdr.status);
779 		if (!err)
780 			err = -EPROTO;
781 
782 		return err;
783 	}
784 
785 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
786 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
787 			   rxq->rxq_idx);
788 		return -ETIMEDOUT;
789 	}
790 
791 	return 0;
792 }
793 
794 static void mana_fence_rqs(struct mana_port_context *apc)
795 {
796 	unsigned int rxq_idx;
797 	struct mana_rxq *rxq;
798 	int err;
799 
800 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
801 		rxq = apc->rxqs[rxq_idx];
802 		err = mana_fence_rq(apc, rxq);
803 
804 		/* In case of any error, use sleep instead. */
805 		if (err)
806 			msleep(100);
807 	}
808 }
809 
810 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
811 {
812 	u32 used_space_old;
813 	u32 used_space_new;
814 
815 	used_space_old = wq->head - wq->tail;
816 	used_space_new = wq->head - (wq->tail + num_units);
817 
818 	if (WARN_ON_ONCE(used_space_new > used_space_old))
819 		return -ERANGE;
820 
821 	wq->tail += num_units;
822 	return 0;
823 }
824 
825 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
826 {
827 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
828 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
829 	struct device *dev = gc->dev;
830 	int i;
831 
832 	dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
833 
834 	for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
835 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
836 			       DMA_TO_DEVICE);
837 }
838 
839 static void mana_poll_tx_cq(struct mana_cq *cq)
840 {
841 	struct gdma_comp *completions = cq->gdma_comp_buf;
842 	struct gdma_posted_wqe_info *wqe_info;
843 	unsigned int pkt_transmitted = 0;
844 	unsigned int wqe_unit_cnt = 0;
845 	struct mana_txq *txq = cq->txq;
846 	struct mana_port_context *apc;
847 	struct netdev_queue *net_txq;
848 	struct gdma_queue *gdma_wq;
849 	unsigned int avail_space;
850 	struct net_device *ndev;
851 	struct sk_buff *skb;
852 	bool txq_stopped;
853 	int comp_read;
854 	int i;
855 
856 	ndev = txq->ndev;
857 	apc = netdev_priv(ndev);
858 
859 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
860 				    CQE_POLLING_BUFFER);
861 
862 	if (comp_read < 1)
863 		return;
864 
865 	for (i = 0; i < comp_read; i++) {
866 		struct mana_tx_comp_oob *cqe_oob;
867 
868 		if (WARN_ON_ONCE(!completions[i].is_sq))
869 			return;
870 
871 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
872 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
873 				 MANA_CQE_COMPLETION))
874 			return;
875 
876 		switch (cqe_oob->cqe_hdr.cqe_type) {
877 		case CQE_TX_OKAY:
878 			break;
879 
880 		case CQE_TX_SA_DROP:
881 		case CQE_TX_MTU_DROP:
882 		case CQE_TX_INVALID_OOB:
883 		case CQE_TX_INVALID_ETH_TYPE:
884 		case CQE_TX_HDR_PROCESSING_ERROR:
885 		case CQE_TX_VF_DISABLED:
886 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
887 		case CQE_TX_VPORT_DISABLED:
888 		case CQE_TX_VLAN_TAGGING_VIOLATION:
889 			WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
890 				  cqe_oob->cqe_hdr.cqe_type);
891 			break;
892 
893 		default:
894 			/* If the CQE type is unexpected, log an error, assert,
895 			 * and go through the error path.
896 			 */
897 			WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
898 				  cqe_oob->cqe_hdr.cqe_type);
899 			return;
900 		}
901 
902 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
903 			return;
904 
905 		skb = skb_dequeue(&txq->pending_skbs);
906 		if (WARN_ON_ONCE(!skb))
907 			return;
908 
909 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
910 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
911 
912 		mana_unmap_skb(skb, apc);
913 
914 		napi_consume_skb(skb, cq->budget);
915 
916 		pkt_transmitted++;
917 	}
918 
919 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
920 		return;
921 
922 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
923 
924 	gdma_wq = txq->gdma_sq;
925 	avail_space = mana_gd_wq_avail_space(gdma_wq);
926 
927 	/* Ensure tail updated before checking q stop */
928 	smp_mb();
929 
930 	net_txq = txq->net_txq;
931 	txq_stopped = netif_tx_queue_stopped(net_txq);
932 
933 	/* Ensure checking txq_stopped before apc->port_is_up. */
934 	smp_rmb();
935 
936 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
937 		netif_tx_wake_queue(net_txq);
938 		apc->eth_stats.wake_queue++;
939 	}
940 
941 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
942 		WARN_ON_ONCE(1);
943 
944 	cq->work_done = pkt_transmitted;
945 }
946 
947 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
948 {
949 	struct mana_recv_buf_oob *recv_buf_oob;
950 	u32 curr_index;
951 	int err;
952 
953 	curr_index = rxq->buf_index++;
954 	if (rxq->buf_index == rxq->num_rx_buf)
955 		rxq->buf_index = 0;
956 
957 	recv_buf_oob = &rxq->rx_oobs[curr_index];
958 
959 	err = mana_gd_post_and_ring(rxq->gdma_rq, &recv_buf_oob->wqe_req,
960 				    &recv_buf_oob->wqe_inf);
961 	if (WARN_ON_ONCE(err))
962 		return;
963 
964 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
965 }
966 
967 static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len,
968 				      struct xdp_buff *xdp)
969 {
970 	struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE);
971 
972 	if (!skb)
973 		return NULL;
974 
975 	if (xdp->data_hard_start) {
976 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
977 		skb_put(skb, xdp->data_end - xdp->data);
978 	} else {
979 		skb_reserve(skb, XDP_PACKET_HEADROOM);
980 		skb_put(skb, pkt_len);
981 	}
982 
983 	return skb;
984 }
985 
986 static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
987 			struct mana_rxq *rxq)
988 {
989 	struct mana_stats *rx_stats = &rxq->stats;
990 	struct net_device *ndev = rxq->ndev;
991 	uint pkt_len = cqe->ppi[0].pkt_len;
992 	u16 rxq_idx = rxq->rxq_idx;
993 	struct napi_struct *napi;
994 	struct xdp_buff xdp = {};
995 	struct sk_buff *skb;
996 	u32 hash_value;
997 	u32 act;
998 
999 	rxq->rx_cq.work_done++;
1000 	napi = &rxq->rx_cq.napi;
1001 
1002 	if (!buf_va) {
1003 		++ndev->stats.rx_dropped;
1004 		return;
1005 	}
1006 
1007 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1008 
1009 	if (act != XDP_PASS && act != XDP_TX)
1010 		goto drop;
1011 
1012 	skb = mana_build_skb(buf_va, pkt_len, &xdp);
1013 
1014 	if (!skb)
1015 		goto drop;
1016 
1017 	skb->dev = napi->dev;
1018 
1019 	skb->protocol = eth_type_trans(skb, ndev);
1020 	skb_checksum_none_assert(skb);
1021 	skb_record_rx_queue(skb, rxq_idx);
1022 
1023 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1024 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1025 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1026 	}
1027 
1028 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1029 		hash_value = cqe->ppi[0].pkt_hash;
1030 
1031 		if (cqe->rx_hashtype & MANA_HASH_L4)
1032 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1033 		else
1034 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1035 	}
1036 
1037 	if (act == XDP_TX) {
1038 		skb_set_queue_mapping(skb, rxq_idx);
1039 		mana_xdp_tx(skb, ndev);
1040 		return;
1041 	}
1042 
1043 	napi_gro_receive(napi, skb);
1044 
1045 	u64_stats_update_begin(&rx_stats->syncp);
1046 	rx_stats->packets++;
1047 	rx_stats->bytes += pkt_len;
1048 	u64_stats_update_end(&rx_stats->syncp);
1049 	return;
1050 
1051 drop:
1052 	free_page((unsigned long)buf_va);
1053 	++ndev->stats.rx_dropped;
1054 	return;
1055 }
1056 
1057 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1058 				struct gdma_comp *cqe)
1059 {
1060 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1061 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1062 	struct net_device *ndev = rxq->ndev;
1063 	struct mana_recv_buf_oob *rxbuf_oob;
1064 	struct device *dev = gc->dev;
1065 	void *new_buf, *old_buf;
1066 	struct page *new_page;
1067 	u32 curr, pktlen;
1068 	dma_addr_t da;
1069 
1070 	switch (oob->cqe_hdr.cqe_type) {
1071 	case CQE_RX_OKAY:
1072 		break;
1073 
1074 	case CQE_RX_TRUNCATED:
1075 		netdev_err(ndev, "Dropped a truncated packet\n");
1076 		return;
1077 
1078 	case CQE_RX_COALESCED_4:
1079 		netdev_err(ndev, "RX coalescing is unsupported\n");
1080 		return;
1081 
1082 	case CQE_RX_OBJECT_FENCE:
1083 		complete(&rxq->fence_event);
1084 		return;
1085 
1086 	default:
1087 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1088 			   oob->cqe_hdr.cqe_type);
1089 		return;
1090 	}
1091 
1092 	if (oob->cqe_hdr.cqe_type != CQE_RX_OKAY)
1093 		return;
1094 
1095 	pktlen = oob->ppi[0].pkt_len;
1096 
1097 	if (pktlen == 0) {
1098 		/* data packets should never have packetlength of zero */
1099 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1100 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1101 		return;
1102 	}
1103 
1104 	curr = rxq->buf_index;
1105 	rxbuf_oob = &rxq->rx_oobs[curr];
1106 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1107 
1108 	new_page = alloc_page(GFP_ATOMIC);
1109 
1110 	if (new_page) {
1111 		da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize,
1112 				  DMA_FROM_DEVICE);
1113 
1114 		if (dma_mapping_error(dev, da)) {
1115 			__free_page(new_page);
1116 			new_page = NULL;
1117 		}
1118 	}
1119 
1120 	new_buf = new_page ? page_to_virt(new_page) : NULL;
1121 
1122 	if (new_buf) {
1123 		dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize,
1124 			       DMA_FROM_DEVICE);
1125 
1126 		old_buf = rxbuf_oob->buf_va;
1127 
1128 		/* refresh the rxbuf_oob with the new page */
1129 		rxbuf_oob->buf_va = new_buf;
1130 		rxbuf_oob->buf_dma_addr = da;
1131 		rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr;
1132 	} else {
1133 		old_buf = NULL; /* drop the packet if no memory */
1134 	}
1135 
1136 	mana_rx_skb(old_buf, oob, rxq);
1137 
1138 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1139 
1140 	mana_post_pkt_rxq(rxq);
1141 }
1142 
1143 static void mana_poll_rx_cq(struct mana_cq *cq)
1144 {
1145 	struct gdma_comp *comp = cq->gdma_comp_buf;
1146 	int comp_read, i;
1147 
1148 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1149 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1150 
1151 	for (i = 0; i < comp_read; i++) {
1152 		if (WARN_ON_ONCE(comp[i].is_sq))
1153 			return;
1154 
1155 		/* verify recv cqe references the right rxq */
1156 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1157 			return;
1158 
1159 		mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
1160 	}
1161 }
1162 
1163 static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1164 {
1165 	struct mana_cq *cq = context;
1166 	u8 arm_bit;
1167 
1168 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1169 
1170 	if (cq->type == MANA_CQ_TYPE_RX)
1171 		mana_poll_rx_cq(cq);
1172 	else
1173 		mana_poll_tx_cq(cq);
1174 
1175 	if (cq->work_done < cq->budget &&
1176 	    napi_complete_done(&cq->napi, cq->work_done)) {
1177 		arm_bit = SET_ARM_BIT;
1178 	} else {
1179 		arm_bit = 0;
1180 	}
1181 
1182 	mana_gd_ring_cq(gdma_queue, arm_bit);
1183 }
1184 
1185 static int mana_poll(struct napi_struct *napi, int budget)
1186 {
1187 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1188 
1189 	cq->work_done = 0;
1190 	cq->budget = budget;
1191 
1192 	mana_cq_handler(cq, cq->gdma_cq);
1193 
1194 	return min(cq->work_done, budget);
1195 }
1196 
1197 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1198 {
1199 	struct mana_cq *cq = context;
1200 
1201 	napi_schedule_irqoff(&cq->napi);
1202 }
1203 
1204 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1205 {
1206 	struct gdma_dev *gd = apc->ac->gdma_dev;
1207 
1208 	if (!cq->gdma_cq)
1209 		return;
1210 
1211 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1212 }
1213 
1214 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1215 {
1216 	struct gdma_dev *gd = apc->ac->gdma_dev;
1217 
1218 	if (!txq->gdma_sq)
1219 		return;
1220 
1221 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1222 }
1223 
1224 static void mana_destroy_txq(struct mana_port_context *apc)
1225 {
1226 	struct napi_struct *napi;
1227 	int i;
1228 
1229 	if (!apc->tx_qp)
1230 		return;
1231 
1232 	for (i = 0; i < apc->num_queues; i++) {
1233 		napi = &apc->tx_qp[i].tx_cq.napi;
1234 		napi_synchronize(napi);
1235 		napi_disable(napi);
1236 		netif_napi_del(napi);
1237 
1238 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1239 
1240 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1241 
1242 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1243 	}
1244 
1245 	kfree(apc->tx_qp);
1246 	apc->tx_qp = NULL;
1247 }
1248 
1249 static int mana_create_txq(struct mana_port_context *apc,
1250 			   struct net_device *net)
1251 {
1252 	struct mana_context *ac = apc->ac;
1253 	struct gdma_dev *gd = ac->gdma_dev;
1254 	struct mana_obj_spec wq_spec;
1255 	struct mana_obj_spec cq_spec;
1256 	struct gdma_queue_spec spec;
1257 	struct gdma_context *gc;
1258 	struct mana_txq *txq;
1259 	struct mana_cq *cq;
1260 	u32 txq_size;
1261 	u32 cq_size;
1262 	int err;
1263 	int i;
1264 
1265 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1266 			     GFP_KERNEL);
1267 	if (!apc->tx_qp)
1268 		return -ENOMEM;
1269 
1270 	/*  The minimum size of the WQE is 32 bytes, hence
1271 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1272 	 *  the SQ can store. This value is then used to size other queues
1273 	 *  to prevent overflow.
1274 	 */
1275 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1276 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1277 
1278 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1279 	cq_size = PAGE_ALIGN(cq_size);
1280 
1281 	gc = gd->gdma_context;
1282 
1283 	for (i = 0; i < apc->num_queues; i++) {
1284 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1285 
1286 		/* Create SQ */
1287 		txq = &apc->tx_qp[i].txq;
1288 
1289 		u64_stats_init(&txq->stats.syncp);
1290 		txq->ndev = net;
1291 		txq->net_txq = netdev_get_tx_queue(net, i);
1292 		txq->vp_offset = apc->tx_vp_offset;
1293 		skb_queue_head_init(&txq->pending_skbs);
1294 
1295 		memset(&spec, 0, sizeof(spec));
1296 		spec.type = GDMA_SQ;
1297 		spec.monitor_avl_buf = true;
1298 		spec.queue_size = txq_size;
1299 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1300 		if (err)
1301 			goto out;
1302 
1303 		/* Create SQ's CQ */
1304 		cq = &apc->tx_qp[i].tx_cq;
1305 		cq->type = MANA_CQ_TYPE_TX;
1306 
1307 		cq->txq = txq;
1308 
1309 		memset(&spec, 0, sizeof(spec));
1310 		spec.type = GDMA_CQ;
1311 		spec.monitor_avl_buf = false;
1312 		spec.queue_size = cq_size;
1313 		spec.cq.callback = mana_schedule_napi;
1314 		spec.cq.parent_eq = ac->eqs[i].eq;
1315 		spec.cq.context = cq;
1316 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1317 		if (err)
1318 			goto out;
1319 
1320 		memset(&wq_spec, 0, sizeof(wq_spec));
1321 		memset(&cq_spec, 0, sizeof(cq_spec));
1322 
1323 		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
1324 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1325 
1326 		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1327 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1328 		cq_spec.modr_ctx_id = 0;
1329 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1330 
1331 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1332 					 &wq_spec, &cq_spec,
1333 					 &apc->tx_qp[i].tx_object);
1334 
1335 		if (err)
1336 			goto out;
1337 
1338 		txq->gdma_sq->id = wq_spec.queue_index;
1339 		cq->gdma_cq->id = cq_spec.queue_index;
1340 
1341 		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1342 		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1343 
1344 		txq->gdma_txq_id = txq->gdma_sq->id;
1345 
1346 		cq->gdma_id = cq->gdma_cq->id;
1347 
1348 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1349 			err = -EINVAL;
1350 			goto out;
1351 		}
1352 
1353 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1354 
1355 		netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT);
1356 		napi_enable(&cq->napi);
1357 
1358 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1359 	}
1360 
1361 	return 0;
1362 out:
1363 	mana_destroy_txq(apc);
1364 	return err;
1365 }
1366 
1367 static void mana_destroy_rxq(struct mana_port_context *apc,
1368 			     struct mana_rxq *rxq, bool validate_state)
1369 
1370 {
1371 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1372 	struct mana_recv_buf_oob *rx_oob;
1373 	struct device *dev = gc->dev;
1374 	struct napi_struct *napi;
1375 	int i;
1376 
1377 	if (!rxq)
1378 		return;
1379 
1380 	napi = &rxq->rx_cq.napi;
1381 
1382 	if (validate_state)
1383 		napi_synchronize(napi);
1384 
1385 	napi_disable(napi);
1386 
1387 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
1388 
1389 	netif_napi_del(napi);
1390 
1391 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
1392 
1393 	mana_deinit_cq(apc, &rxq->rx_cq);
1394 
1395 	for (i = 0; i < rxq->num_rx_buf; i++) {
1396 		rx_oob = &rxq->rx_oobs[i];
1397 
1398 		if (!rx_oob->buf_va)
1399 			continue;
1400 
1401 		dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize,
1402 			       DMA_FROM_DEVICE);
1403 
1404 		free_page((unsigned long)rx_oob->buf_va);
1405 		rx_oob->buf_va = NULL;
1406 	}
1407 
1408 	if (rxq->gdma_rq)
1409 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
1410 
1411 	kfree(rxq);
1412 }
1413 
1414 #define MANA_WQE_HEADER_SIZE 16
1415 #define MANA_WQE_SGE_SIZE 16
1416 
1417 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
1418 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
1419 {
1420 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1421 	struct mana_recv_buf_oob *rx_oob;
1422 	struct device *dev = gc->dev;
1423 	struct page *page;
1424 	dma_addr_t da;
1425 	u32 buf_idx;
1426 
1427 	WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE);
1428 
1429 	*rxq_size = 0;
1430 	*cq_size = 0;
1431 
1432 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1433 		rx_oob = &rxq->rx_oobs[buf_idx];
1434 		memset(rx_oob, 0, sizeof(*rx_oob));
1435 
1436 		page = alloc_page(GFP_KERNEL);
1437 		if (!page)
1438 			return -ENOMEM;
1439 
1440 		da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize,
1441 				  DMA_FROM_DEVICE);
1442 
1443 		if (dma_mapping_error(dev, da)) {
1444 			__free_page(page);
1445 			return -ENOMEM;
1446 		}
1447 
1448 		rx_oob->buf_va = page_to_virt(page);
1449 		rx_oob->buf_dma_addr = da;
1450 
1451 		rx_oob->num_sge = 1;
1452 		rx_oob->sgl[0].address = rx_oob->buf_dma_addr;
1453 		rx_oob->sgl[0].size = rxq->datasize;
1454 		rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey;
1455 
1456 		rx_oob->wqe_req.sgl = rx_oob->sgl;
1457 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
1458 		rx_oob->wqe_req.inline_oob_size = 0;
1459 		rx_oob->wqe_req.inline_oob_data = NULL;
1460 		rx_oob->wqe_req.flags = 0;
1461 		rx_oob->wqe_req.client_data_unit = 0;
1462 
1463 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
1464 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
1465 		*cq_size += COMP_ENTRY_SIZE;
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static int mana_push_wqe(struct mana_rxq *rxq)
1472 {
1473 	struct mana_recv_buf_oob *rx_oob;
1474 	u32 buf_idx;
1475 	int err;
1476 
1477 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
1478 		rx_oob = &rxq->rx_oobs[buf_idx];
1479 
1480 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
1481 					    &rx_oob->wqe_inf);
1482 		if (err)
1483 			return -ENOSPC;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
1490 					u32 rxq_idx, struct mana_eq *eq,
1491 					struct net_device *ndev)
1492 {
1493 	struct gdma_dev *gd = apc->ac->gdma_dev;
1494 	struct mana_obj_spec wq_spec;
1495 	struct mana_obj_spec cq_spec;
1496 	struct gdma_queue_spec spec;
1497 	struct mana_cq *cq = NULL;
1498 	struct gdma_context *gc;
1499 	u32 cq_size, rq_size;
1500 	struct mana_rxq *rxq;
1501 	int err;
1502 
1503 	gc = gd->gdma_context;
1504 
1505 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
1506 		      GFP_KERNEL);
1507 	if (!rxq)
1508 		return NULL;
1509 
1510 	rxq->ndev = ndev;
1511 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
1512 	rxq->rxq_idx = rxq_idx;
1513 	rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64);
1514 	rxq->rxobj = INVALID_MANA_HANDLE;
1515 
1516 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
1517 	if (err)
1518 		goto out;
1519 
1520 	rq_size = PAGE_ALIGN(rq_size);
1521 	cq_size = PAGE_ALIGN(cq_size);
1522 
1523 	/* Create RQ */
1524 	memset(&spec, 0, sizeof(spec));
1525 	spec.type = GDMA_RQ;
1526 	spec.monitor_avl_buf = true;
1527 	spec.queue_size = rq_size;
1528 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
1529 	if (err)
1530 		goto out;
1531 
1532 	/* Create RQ's CQ */
1533 	cq = &rxq->rx_cq;
1534 	cq->type = MANA_CQ_TYPE_RX;
1535 	cq->rxq = rxq;
1536 
1537 	memset(&spec, 0, sizeof(spec));
1538 	spec.type = GDMA_CQ;
1539 	spec.monitor_avl_buf = false;
1540 	spec.queue_size = cq_size;
1541 	spec.cq.callback = mana_schedule_napi;
1542 	spec.cq.parent_eq = eq->eq;
1543 	spec.cq.context = cq;
1544 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1545 	if (err)
1546 		goto out;
1547 
1548 	memset(&wq_spec, 0, sizeof(wq_spec));
1549 	memset(&cq_spec, 0, sizeof(cq_spec));
1550 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
1551 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
1552 
1553 	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
1554 	cq_spec.queue_size = cq->gdma_cq->queue_size;
1555 	cq_spec.modr_ctx_id = 0;
1556 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1557 
1558 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
1559 				 &wq_spec, &cq_spec, &rxq->rxobj);
1560 	if (err)
1561 		goto out;
1562 
1563 	rxq->gdma_rq->id = wq_spec.queue_index;
1564 	cq->gdma_cq->id = cq_spec.queue_index;
1565 
1566 	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1567 	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
1568 
1569 	rxq->gdma_id = rxq->gdma_rq->id;
1570 	cq->gdma_id = cq->gdma_cq->id;
1571 
1572 	err = mana_push_wqe(rxq);
1573 	if (err)
1574 		goto out;
1575 
1576 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1577 		err = -EINVAL;
1578 		goto out;
1579 	}
1580 
1581 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1582 
1583 	netif_napi_add(ndev, &cq->napi, mana_poll, 1);
1584 
1585 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
1586 				 cq->napi.napi_id));
1587 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq,
1588 					   MEM_TYPE_PAGE_SHARED, NULL));
1589 
1590 	napi_enable(&cq->napi);
1591 
1592 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1593 out:
1594 	if (!err)
1595 		return rxq;
1596 
1597 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
1598 
1599 	mana_destroy_rxq(apc, rxq, false);
1600 
1601 	if (cq)
1602 		mana_deinit_cq(apc, cq);
1603 
1604 	return NULL;
1605 }
1606 
1607 static int mana_add_rx_queues(struct mana_port_context *apc,
1608 			      struct net_device *ndev)
1609 {
1610 	struct mana_context *ac = apc->ac;
1611 	struct mana_rxq *rxq;
1612 	int err = 0;
1613 	int i;
1614 
1615 	for (i = 0; i < apc->num_queues; i++) {
1616 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
1617 		if (!rxq) {
1618 			err = -ENOMEM;
1619 			goto out;
1620 		}
1621 
1622 		u64_stats_init(&rxq->stats.syncp);
1623 
1624 		apc->rxqs[i] = rxq;
1625 	}
1626 
1627 	apc->default_rxobj = apc->rxqs[0]->rxobj;
1628 out:
1629 	return err;
1630 }
1631 
1632 static void mana_destroy_vport(struct mana_port_context *apc)
1633 {
1634 	struct mana_rxq *rxq;
1635 	u32 rxq_idx;
1636 
1637 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1638 		rxq = apc->rxqs[rxq_idx];
1639 		if (!rxq)
1640 			continue;
1641 
1642 		mana_destroy_rxq(apc, rxq, true);
1643 		apc->rxqs[rxq_idx] = NULL;
1644 	}
1645 
1646 	mana_destroy_txq(apc);
1647 }
1648 
1649 static int mana_create_vport(struct mana_port_context *apc,
1650 			     struct net_device *net)
1651 {
1652 	struct gdma_dev *gd = apc->ac->gdma_dev;
1653 	int err;
1654 
1655 	apc->default_rxobj = INVALID_MANA_HANDLE;
1656 
1657 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
1658 	if (err)
1659 		return err;
1660 
1661 	return mana_create_txq(apc, net);
1662 }
1663 
1664 static void mana_rss_table_init(struct mana_port_context *apc)
1665 {
1666 	int i;
1667 
1668 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
1669 		apc->indir_table[i] =
1670 			ethtool_rxfh_indir_default(i, apc->num_queues);
1671 }
1672 
1673 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
1674 		    bool update_hash, bool update_tab)
1675 {
1676 	u32 queue_idx;
1677 	int err;
1678 	int i;
1679 
1680 	if (update_tab) {
1681 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
1682 			queue_idx = apc->indir_table[i];
1683 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
1684 		}
1685 	}
1686 
1687 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
1688 	if (err)
1689 		return err;
1690 
1691 	mana_fence_rqs(apc);
1692 
1693 	return 0;
1694 }
1695 
1696 static int mana_init_port(struct net_device *ndev)
1697 {
1698 	struct mana_port_context *apc = netdev_priv(ndev);
1699 	u32 max_txq, max_rxq, max_queues;
1700 	int port_idx = apc->port_idx;
1701 	u32 num_indirect_entries;
1702 	int err;
1703 
1704 	err = mana_init_port_context(apc);
1705 	if (err)
1706 		return err;
1707 
1708 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
1709 				   &num_indirect_entries);
1710 	if (err) {
1711 		netdev_err(ndev, "Failed to query info for vPort %d\n",
1712 			   port_idx);
1713 		goto reset_apc;
1714 	}
1715 
1716 	max_queues = min_t(u32, max_txq, max_rxq);
1717 	if (apc->max_queues > max_queues)
1718 		apc->max_queues = max_queues;
1719 
1720 	if (apc->num_queues > apc->max_queues)
1721 		apc->num_queues = apc->max_queues;
1722 
1723 	eth_hw_addr_set(ndev, apc->mac_addr);
1724 
1725 	return 0;
1726 
1727 reset_apc:
1728 	kfree(apc->rxqs);
1729 	apc->rxqs = NULL;
1730 	return err;
1731 }
1732 
1733 int mana_alloc_queues(struct net_device *ndev)
1734 {
1735 	struct mana_port_context *apc = netdev_priv(ndev);
1736 	int err;
1737 
1738 	err = mana_create_vport(apc, ndev);
1739 	if (err)
1740 		return err;
1741 
1742 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
1743 	if (err)
1744 		goto destroy_vport;
1745 
1746 	err = mana_add_rx_queues(apc, ndev);
1747 	if (err)
1748 		goto destroy_vport;
1749 
1750 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
1751 
1752 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
1753 	if (err)
1754 		goto destroy_vport;
1755 
1756 	mana_rss_table_init(apc);
1757 
1758 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
1759 	if (err)
1760 		goto destroy_vport;
1761 
1762 	mana_chn_setxdp(apc, mana_xdp_get(apc));
1763 
1764 	return 0;
1765 
1766 destroy_vport:
1767 	mana_destroy_vport(apc);
1768 	return err;
1769 }
1770 
1771 int mana_attach(struct net_device *ndev)
1772 {
1773 	struct mana_port_context *apc = netdev_priv(ndev);
1774 	int err;
1775 
1776 	ASSERT_RTNL();
1777 
1778 	err = mana_init_port(ndev);
1779 	if (err)
1780 		return err;
1781 
1782 	if (apc->port_st_save) {
1783 		err = mana_alloc_queues(ndev);
1784 		if (err) {
1785 			mana_cleanup_port_context(apc);
1786 			return err;
1787 		}
1788 	}
1789 
1790 	apc->port_is_up = apc->port_st_save;
1791 
1792 	/* Ensure port state updated before txq state */
1793 	smp_wmb();
1794 
1795 	if (apc->port_is_up)
1796 		netif_carrier_on(ndev);
1797 
1798 	netif_device_attach(ndev);
1799 
1800 	return 0;
1801 }
1802 
1803 static int mana_dealloc_queues(struct net_device *ndev)
1804 {
1805 	struct mana_port_context *apc = netdev_priv(ndev);
1806 	struct mana_txq *txq;
1807 	int i, err;
1808 
1809 	if (apc->port_is_up)
1810 		return -EINVAL;
1811 
1812 	mana_chn_setxdp(apc, NULL);
1813 
1814 	/* No packet can be transmitted now since apc->port_is_up is false.
1815 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
1816 	 * a txq because it may not timely see apc->port_is_up being cleared
1817 	 * to false, but it doesn't matter since mana_start_xmit() drops any
1818 	 * new packets due to apc->port_is_up being false.
1819 	 *
1820 	 * Drain all the in-flight TX packets
1821 	 */
1822 	for (i = 0; i < apc->num_queues; i++) {
1823 		txq = &apc->tx_qp[i].txq;
1824 
1825 		while (atomic_read(&txq->pending_sends) > 0)
1826 			usleep_range(1000, 2000);
1827 	}
1828 
1829 	/* We're 100% sure the queues can no longer be woken up, because
1830 	 * we're sure now mana_poll_tx_cq() can't be running.
1831 	 */
1832 
1833 	apc->rss_state = TRI_STATE_FALSE;
1834 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
1835 	if (err) {
1836 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
1837 		return err;
1838 	}
1839 
1840 	mana_destroy_vport(apc);
1841 
1842 	return 0;
1843 }
1844 
1845 int mana_detach(struct net_device *ndev, bool from_close)
1846 {
1847 	struct mana_port_context *apc = netdev_priv(ndev);
1848 	int err;
1849 
1850 	ASSERT_RTNL();
1851 
1852 	apc->port_st_save = apc->port_is_up;
1853 	apc->port_is_up = false;
1854 
1855 	/* Ensure port state updated before txq state */
1856 	smp_wmb();
1857 
1858 	netif_tx_disable(ndev);
1859 	netif_carrier_off(ndev);
1860 
1861 	if (apc->port_st_save) {
1862 		err = mana_dealloc_queues(ndev);
1863 		if (err)
1864 			return err;
1865 	}
1866 
1867 	if (!from_close) {
1868 		netif_device_detach(ndev);
1869 		mana_cleanup_port_context(apc);
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 static int mana_probe_port(struct mana_context *ac, int port_idx,
1876 			   struct net_device **ndev_storage)
1877 {
1878 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1879 	struct mana_port_context *apc;
1880 	struct net_device *ndev;
1881 	int err;
1882 
1883 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
1884 				 gc->max_num_queues);
1885 	if (!ndev)
1886 		return -ENOMEM;
1887 
1888 	*ndev_storage = ndev;
1889 
1890 	apc = netdev_priv(ndev);
1891 	apc->ac = ac;
1892 	apc->ndev = ndev;
1893 	apc->max_queues = gc->max_num_queues;
1894 	apc->num_queues = gc->max_num_queues;
1895 	apc->port_handle = INVALID_MANA_HANDLE;
1896 	apc->port_idx = port_idx;
1897 
1898 	ndev->netdev_ops = &mana_devops;
1899 	ndev->ethtool_ops = &mana_ethtool_ops;
1900 	ndev->mtu = ETH_DATA_LEN;
1901 	ndev->max_mtu = ndev->mtu;
1902 	ndev->min_mtu = ndev->mtu;
1903 	ndev->needed_headroom = MANA_HEADROOM;
1904 	SET_NETDEV_DEV(ndev, gc->dev);
1905 
1906 	netif_carrier_off(ndev);
1907 
1908 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
1909 
1910 	err = mana_init_port(ndev);
1911 	if (err)
1912 		goto free_net;
1913 
1914 	netdev_lockdep_set_classes(ndev);
1915 
1916 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1917 	ndev->hw_features |= NETIF_F_RXCSUM;
1918 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1919 	ndev->hw_features |= NETIF_F_RXHASH;
1920 	ndev->features = ndev->hw_features;
1921 	ndev->vlan_features = 0;
1922 
1923 	err = register_netdev(ndev);
1924 	if (err) {
1925 		netdev_err(ndev, "Unable to register netdev.\n");
1926 		goto reset_apc;
1927 	}
1928 
1929 	return 0;
1930 
1931 reset_apc:
1932 	kfree(apc->rxqs);
1933 	apc->rxqs = NULL;
1934 free_net:
1935 	*ndev_storage = NULL;
1936 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
1937 	free_netdev(ndev);
1938 	return err;
1939 }
1940 
1941 int mana_probe(struct gdma_dev *gd, bool resuming)
1942 {
1943 	struct gdma_context *gc = gd->gdma_context;
1944 	struct mana_context *ac = gd->driver_data;
1945 	struct device *dev = gc->dev;
1946 	u16 num_ports = 0;
1947 	int err;
1948 	int i;
1949 
1950 	dev_info(dev,
1951 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
1952 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
1953 
1954 	err = mana_gd_register_device(gd);
1955 	if (err)
1956 		return err;
1957 
1958 	if (!resuming) {
1959 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
1960 		if (!ac)
1961 			return -ENOMEM;
1962 
1963 		ac->gdma_dev = gd;
1964 		gd->driver_data = ac;
1965 	}
1966 
1967 	err = mana_create_eq(ac);
1968 	if (err)
1969 		goto out;
1970 
1971 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
1972 				    MANA_MICRO_VERSION, &num_ports);
1973 	if (err)
1974 		goto out;
1975 
1976 	if (!resuming) {
1977 		ac->num_ports = num_ports;
1978 	} else {
1979 		if (ac->num_ports != num_ports) {
1980 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
1981 				ac->num_ports, num_ports);
1982 			err = -EPROTO;
1983 			goto out;
1984 		}
1985 	}
1986 
1987 	if (ac->num_ports == 0)
1988 		dev_err(dev, "Failed to detect any vPort\n");
1989 
1990 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
1991 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
1992 
1993 	if (!resuming) {
1994 		for (i = 0; i < ac->num_ports; i++) {
1995 			err = mana_probe_port(ac, i, &ac->ports[i]);
1996 			if (err)
1997 				break;
1998 		}
1999 	} else {
2000 		for (i = 0; i < ac->num_ports; i++) {
2001 			rtnl_lock();
2002 			err = mana_attach(ac->ports[i]);
2003 			rtnl_unlock();
2004 			if (err)
2005 				break;
2006 		}
2007 	}
2008 out:
2009 	if (err)
2010 		mana_remove(gd, false);
2011 
2012 	return err;
2013 }
2014 
2015 void mana_remove(struct gdma_dev *gd, bool suspending)
2016 {
2017 	struct gdma_context *gc = gd->gdma_context;
2018 	struct mana_context *ac = gd->driver_data;
2019 	struct device *dev = gc->dev;
2020 	struct net_device *ndev;
2021 	int err;
2022 	int i;
2023 
2024 	for (i = 0; i < ac->num_ports; i++) {
2025 		ndev = ac->ports[i];
2026 		if (!ndev) {
2027 			if (i == 0)
2028 				dev_err(dev, "No net device to remove\n");
2029 			goto out;
2030 		}
2031 
2032 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2033 		 * other functions may access partially cleaned up data.
2034 		 */
2035 		rtnl_lock();
2036 
2037 		err = mana_detach(ndev, false);
2038 		if (err)
2039 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2040 				   i, err);
2041 
2042 		if (suspending) {
2043 			/* No need to unregister the ndev. */
2044 			rtnl_unlock();
2045 			continue;
2046 		}
2047 
2048 		unregister_netdevice(ndev);
2049 
2050 		rtnl_unlock();
2051 
2052 		free_netdev(ndev);
2053 	}
2054 
2055 	mana_destroy_eq(ac);
2056 
2057 out:
2058 	mana_gd_deregister_device(gd);
2059 
2060 	if (suspending)
2061 		return;
2062 
2063 	gd->driver_data = NULL;
2064 	gd->gdma_context = NULL;
2065 	kfree(ac);
2066 }
2067