1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7 
8 #include <linux/interrupt.h>
9 #include <linux/pci.h>
10 #include <net/tso.h>
11 #include <linux/bitfield.h>
12 
13 #include "otx2_reg.h"
14 #include "otx2_common.h"
15 #include "otx2_struct.h"
16 #include "cn10k.h"
17 
18 static void otx2_nix_rq_op_stats(struct queue_stats *stats,
19 				 struct otx2_nic *pfvf, int qidx)
20 {
21 	u64 incr = (u64)qidx << 32;
22 	u64 *ptr;
23 
24 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
25 	stats->bytes = otx2_atomic64_add(incr, ptr);
26 
27 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
28 	stats->pkts = otx2_atomic64_add(incr, ptr);
29 }
30 
31 static void otx2_nix_sq_op_stats(struct queue_stats *stats,
32 				 struct otx2_nic *pfvf, int qidx)
33 {
34 	u64 incr = (u64)qidx << 32;
35 	u64 *ptr;
36 
37 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
38 	stats->bytes = otx2_atomic64_add(incr, ptr);
39 
40 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
41 	stats->pkts = otx2_atomic64_add(incr, ptr);
42 }
43 
44 void otx2_update_lmac_stats(struct otx2_nic *pfvf)
45 {
46 	struct msg_req *req;
47 
48 	if (!netif_running(pfvf->netdev))
49 		return;
50 
51 	mutex_lock(&pfvf->mbox.lock);
52 	req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
53 	if (!req) {
54 		mutex_unlock(&pfvf->mbox.lock);
55 		return;
56 	}
57 
58 	otx2_sync_mbox_msg(&pfvf->mbox);
59 	mutex_unlock(&pfvf->mbox.lock);
60 }
61 
62 void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
63 {
64 	struct msg_req *req;
65 
66 	if (!netif_running(pfvf->netdev))
67 		return;
68 	mutex_lock(&pfvf->mbox.lock);
69 	req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
70 	if (req)
71 		otx2_sync_mbox_msg(&pfvf->mbox);
72 	mutex_unlock(&pfvf->mbox.lock);
73 }
74 
75 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
76 {
77 	struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
78 
79 	if (!pfvf->qset.rq)
80 		return 0;
81 
82 	otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
83 	return 1;
84 }
85 
86 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
87 {
88 	struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
89 
90 	if (!pfvf->qset.sq)
91 		return 0;
92 
93 	if (qidx >= pfvf->hw.non_qos_queues) {
94 		if (!test_bit(qidx - pfvf->hw.non_qos_queues, pfvf->qos.qos_sq_bmap))
95 			return 0;
96 	}
97 
98 	otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
99 	return 1;
100 }
101 
102 void otx2_get_dev_stats(struct otx2_nic *pfvf)
103 {
104 	struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
105 
106 	dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
107 	dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
108 	dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
109 	dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
110 	dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
111 	dev_stats->rx_frames = dev_stats->rx_bcast_frames +
112 			       dev_stats->rx_mcast_frames +
113 			       dev_stats->rx_ucast_frames;
114 
115 	dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
116 	dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
117 	dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
118 	dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
119 	dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
120 	dev_stats->tx_frames = dev_stats->tx_bcast_frames +
121 			       dev_stats->tx_mcast_frames +
122 			       dev_stats->tx_ucast_frames;
123 }
124 
125 void otx2_get_stats64(struct net_device *netdev,
126 		      struct rtnl_link_stats64 *stats)
127 {
128 	struct otx2_nic *pfvf = netdev_priv(netdev);
129 	struct otx2_dev_stats *dev_stats;
130 
131 	otx2_get_dev_stats(pfvf);
132 
133 	dev_stats = &pfvf->hw.dev_stats;
134 	stats->rx_bytes = dev_stats->rx_bytes;
135 	stats->rx_packets = dev_stats->rx_frames;
136 	stats->rx_dropped = dev_stats->rx_drops;
137 	stats->multicast = dev_stats->rx_mcast_frames;
138 
139 	stats->tx_bytes = dev_stats->tx_bytes;
140 	stats->tx_packets = dev_stats->tx_frames;
141 	stats->tx_dropped = dev_stats->tx_drops;
142 }
143 EXPORT_SYMBOL(otx2_get_stats64);
144 
145 /* Sync MAC address with RVU AF */
146 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
147 {
148 	struct nix_set_mac_addr *req;
149 	int err;
150 
151 	mutex_lock(&pfvf->mbox.lock);
152 	req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
153 	if (!req) {
154 		mutex_unlock(&pfvf->mbox.lock);
155 		return -ENOMEM;
156 	}
157 
158 	ether_addr_copy(req->mac_addr, mac);
159 
160 	err = otx2_sync_mbox_msg(&pfvf->mbox);
161 	mutex_unlock(&pfvf->mbox.lock);
162 	return err;
163 }
164 
165 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
166 				struct net_device *netdev)
167 {
168 	struct nix_get_mac_addr_rsp *rsp;
169 	struct mbox_msghdr *msghdr;
170 	struct msg_req *req;
171 	int err;
172 
173 	mutex_lock(&pfvf->mbox.lock);
174 	req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
175 	if (!req) {
176 		mutex_unlock(&pfvf->mbox.lock);
177 		return -ENOMEM;
178 	}
179 
180 	err = otx2_sync_mbox_msg(&pfvf->mbox);
181 	if (err) {
182 		mutex_unlock(&pfvf->mbox.lock);
183 		return err;
184 	}
185 
186 	msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
187 	if (IS_ERR(msghdr)) {
188 		mutex_unlock(&pfvf->mbox.lock);
189 		return PTR_ERR(msghdr);
190 	}
191 	rsp = (struct nix_get_mac_addr_rsp *)msghdr;
192 	eth_hw_addr_set(netdev, rsp->mac_addr);
193 	mutex_unlock(&pfvf->mbox.lock);
194 
195 	return 0;
196 }
197 
198 int otx2_set_mac_address(struct net_device *netdev, void *p)
199 {
200 	struct otx2_nic *pfvf = netdev_priv(netdev);
201 	struct sockaddr *addr = p;
202 
203 	if (!is_valid_ether_addr(addr->sa_data))
204 		return -EADDRNOTAVAIL;
205 
206 	if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
207 		eth_hw_addr_set(netdev, addr->sa_data);
208 		/* update dmac field in vlan offload rule */
209 		if (netif_running(netdev) &&
210 		    pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
211 			otx2_install_rxvlan_offload_flow(pfvf);
212 		/* update dmac address in ntuple and DMAC filter list */
213 		if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
214 			otx2_dmacflt_update_pfmac_flow(pfvf);
215 	} else {
216 		return -EPERM;
217 	}
218 
219 	return 0;
220 }
221 EXPORT_SYMBOL(otx2_set_mac_address);
222 
223 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
224 {
225 	struct nix_frs_cfg *req;
226 	u16 maxlen;
227 	int err;
228 
229 	maxlen = otx2_get_max_mtu(pfvf) + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
230 
231 	mutex_lock(&pfvf->mbox.lock);
232 	req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
233 	if (!req) {
234 		mutex_unlock(&pfvf->mbox.lock);
235 		return -ENOMEM;
236 	}
237 
238 	req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
239 
240 	/* Use max receive length supported by hardware for loopback devices */
241 	if (is_otx2_lbkvf(pfvf->pdev))
242 		req->maxlen = maxlen;
243 
244 	err = otx2_sync_mbox_msg(&pfvf->mbox);
245 	mutex_unlock(&pfvf->mbox.lock);
246 	return err;
247 }
248 
249 int otx2_config_pause_frm(struct otx2_nic *pfvf)
250 {
251 	struct cgx_pause_frm_cfg *req;
252 	int err;
253 
254 	if (is_otx2_lbkvf(pfvf->pdev))
255 		return 0;
256 
257 	mutex_lock(&pfvf->mbox.lock);
258 	req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
259 	if (!req) {
260 		err = -ENOMEM;
261 		goto unlock;
262 	}
263 
264 	req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
265 	req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
266 	req->set = 1;
267 
268 	err = otx2_sync_mbox_msg(&pfvf->mbox);
269 unlock:
270 	mutex_unlock(&pfvf->mbox.lock);
271 	return err;
272 }
273 EXPORT_SYMBOL(otx2_config_pause_frm);
274 
275 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
276 {
277 	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
278 	struct nix_rss_flowkey_cfg_rsp *rsp;
279 	struct nix_rss_flowkey_cfg *req;
280 	int err;
281 
282 	mutex_lock(&pfvf->mbox.lock);
283 	req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
284 	if (!req) {
285 		mutex_unlock(&pfvf->mbox.lock);
286 		return -ENOMEM;
287 	}
288 	req->mcam_index = -1; /* Default or reserved index */
289 	req->flowkey_cfg = rss->flowkey_cfg;
290 	req->group = DEFAULT_RSS_CONTEXT_GROUP;
291 
292 	err = otx2_sync_mbox_msg(&pfvf->mbox);
293 	if (err)
294 		goto fail;
295 
296 	rsp = (struct nix_rss_flowkey_cfg_rsp *)
297 			otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
298 	if (IS_ERR(rsp)) {
299 		err = PTR_ERR(rsp);
300 		goto fail;
301 	}
302 
303 	pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
304 fail:
305 	mutex_unlock(&pfvf->mbox.lock);
306 	return err;
307 }
308 
309 int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
310 {
311 	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
312 	const int index = rss->rss_size * ctx_id;
313 	struct mbox *mbox = &pfvf->mbox;
314 	struct otx2_rss_ctx *rss_ctx;
315 	struct nix_aq_enq_req *aq;
316 	int idx, err;
317 
318 	mutex_lock(&mbox->lock);
319 	rss_ctx = rss->rss_ctx[ctx_id];
320 	/* Get memory to put this msg */
321 	for (idx = 0; idx < rss->rss_size; idx++) {
322 		aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
323 		if (!aq) {
324 			/* The shared memory buffer can be full.
325 			 * Flush it and retry
326 			 */
327 			err = otx2_sync_mbox_msg(mbox);
328 			if (err) {
329 				mutex_unlock(&mbox->lock);
330 				return err;
331 			}
332 			aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
333 			if (!aq) {
334 				mutex_unlock(&mbox->lock);
335 				return -ENOMEM;
336 			}
337 		}
338 
339 		aq->rss.rq = rss_ctx->ind_tbl[idx];
340 
341 		/* Fill AQ info */
342 		aq->qidx = index + idx;
343 		aq->ctype = NIX_AQ_CTYPE_RSS;
344 		aq->op = NIX_AQ_INSTOP_INIT;
345 	}
346 	err = otx2_sync_mbox_msg(mbox);
347 	mutex_unlock(&mbox->lock);
348 	return err;
349 }
350 
351 void otx2_set_rss_key(struct otx2_nic *pfvf)
352 {
353 	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
354 	u64 *key = (u64 *)&rss->key[4];
355 	int idx;
356 
357 	/* 352bit or 44byte key needs to be configured as below
358 	 * NIX_LF_RX_SECRETX0 = key<351:288>
359 	 * NIX_LF_RX_SECRETX1 = key<287:224>
360 	 * NIX_LF_RX_SECRETX2 = key<223:160>
361 	 * NIX_LF_RX_SECRETX3 = key<159:96>
362 	 * NIX_LF_RX_SECRETX4 = key<95:32>
363 	 * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
364 	 */
365 	otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
366 		     (u64)(*((u32 *)&rss->key)) << 32);
367 	idx = sizeof(rss->key) / sizeof(u64);
368 	while (idx > 0) {
369 		idx--;
370 		otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
371 	}
372 }
373 
374 int otx2_rss_init(struct otx2_nic *pfvf)
375 {
376 	struct otx2_rss_info *rss = &pfvf->hw.rss_info;
377 	struct otx2_rss_ctx *rss_ctx;
378 	int idx, ret = 0;
379 
380 	rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
381 
382 	/* Init RSS key if it is not setup already */
383 	if (!rss->enable)
384 		netdev_rss_key_fill(rss->key, sizeof(rss->key));
385 	otx2_set_rss_key(pfvf);
386 
387 	if (!netif_is_rxfh_configured(pfvf->netdev)) {
388 		/* Set RSS group 0 as default indirection table */
389 		rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
390 								  GFP_KERNEL);
391 		if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
392 			return -ENOMEM;
393 
394 		rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
395 		for (idx = 0; idx < rss->rss_size; idx++)
396 			rss_ctx->ind_tbl[idx] =
397 				ethtool_rxfh_indir_default(idx,
398 							   pfvf->hw.rx_queues);
399 	}
400 	ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
401 	if (ret)
402 		return ret;
403 
404 	/* Flowkey or hash config to be used for generating flow tag */
405 	rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
406 			   NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
407 			   NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
408 			   NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN |
409 			   NIX_FLOW_KEY_TYPE_IPV4_PROTO;
410 
411 	ret = otx2_set_flowkey_cfg(pfvf);
412 	if (ret)
413 		return ret;
414 
415 	rss->enable = true;
416 	return 0;
417 }
418 
419 /* Setup UDP segmentation algorithm in HW */
420 static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
421 {
422 	struct nix_lso_format *field;
423 
424 	field = (struct nix_lso_format *)&lso->fields[0];
425 	lso->field_mask = GENMASK(18, 0);
426 
427 	/* IP's Length field */
428 	field->layer = NIX_TXLAYER_OL3;
429 	/* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
430 	field->offset = v4 ? 2 : 4;
431 	field->sizem1 = 1; /* i.e 2 bytes */
432 	field->alg = NIX_LSOALG_ADD_PAYLEN;
433 	field++;
434 
435 	/* No ID field in IPv6 header */
436 	if (v4) {
437 		/* Increment IPID */
438 		field->layer = NIX_TXLAYER_OL3;
439 		field->offset = 4;
440 		field->sizem1 = 1; /* i.e 2 bytes */
441 		field->alg = NIX_LSOALG_ADD_SEGNUM;
442 		field++;
443 	}
444 
445 	/* Update length in UDP header */
446 	field->layer = NIX_TXLAYER_OL4;
447 	field->offset = 4;
448 	field->sizem1 = 1;
449 	field->alg = NIX_LSOALG_ADD_PAYLEN;
450 }
451 
452 /* Setup segmentation algorithms in HW and retrieve algorithm index */
453 void otx2_setup_segmentation(struct otx2_nic *pfvf)
454 {
455 	struct nix_lso_format_cfg_rsp *rsp;
456 	struct nix_lso_format_cfg *lso;
457 	struct otx2_hw *hw = &pfvf->hw;
458 	int err;
459 
460 	mutex_lock(&pfvf->mbox.lock);
461 
462 	/* UDPv4 segmentation */
463 	lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
464 	if (!lso)
465 		goto fail;
466 
467 	/* Setup UDP/IP header fields that HW should update per segment */
468 	otx2_setup_udp_segmentation(lso, true);
469 
470 	err = otx2_sync_mbox_msg(&pfvf->mbox);
471 	if (err)
472 		goto fail;
473 
474 	rsp = (struct nix_lso_format_cfg_rsp *)
475 			otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
476 	if (IS_ERR(rsp))
477 		goto fail;
478 
479 	hw->lso_udpv4_idx = rsp->lso_format_idx;
480 
481 	/* UDPv6 segmentation */
482 	lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
483 	if (!lso)
484 		goto fail;
485 
486 	/* Setup UDP/IP header fields that HW should update per segment */
487 	otx2_setup_udp_segmentation(lso, false);
488 
489 	err = otx2_sync_mbox_msg(&pfvf->mbox);
490 	if (err)
491 		goto fail;
492 
493 	rsp = (struct nix_lso_format_cfg_rsp *)
494 			otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
495 	if (IS_ERR(rsp))
496 		goto fail;
497 
498 	hw->lso_udpv6_idx = rsp->lso_format_idx;
499 	mutex_unlock(&pfvf->mbox.lock);
500 	return;
501 fail:
502 	mutex_unlock(&pfvf->mbox.lock);
503 	netdev_info(pfvf->netdev,
504 		    "Failed to get LSO index for UDP GSO offload, disabling\n");
505 	pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
506 }
507 
508 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
509 {
510 	/* Configure CQE interrupt coalescing parameters
511 	 *
512 	 * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
513 	 * set 1 less than cq_ecount_wait. And cq_time_wait is in
514 	 * usecs, convert that to 100ns count.
515 	 */
516 	otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
517 		     ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
518 		     ((u64)pfvf->hw.cq_qcount_wait << 32) |
519 		     (pfvf->hw.cq_ecount_wait - 1));
520 }
521 
522 static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
523 			       dma_addr_t *dma)
524 {
525 	unsigned int offset = 0;
526 	struct page *page;
527 	size_t sz;
528 
529 	sz = SKB_DATA_ALIGN(pool->rbsize);
530 	sz = ALIGN(sz, OTX2_ALIGN);
531 
532 	page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC);
533 	if (unlikely(!page))
534 		return -ENOMEM;
535 
536 	*dma = page_pool_get_dma_addr(page) + offset;
537 	return 0;
538 }
539 
540 static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
541 			     dma_addr_t *dma)
542 {
543 	u8 *buf;
544 
545 	if (pool->page_pool)
546 		return otx2_alloc_pool_buf(pfvf, pool, dma);
547 
548 	buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
549 	if (unlikely(!buf))
550 		return -ENOMEM;
551 
552 	*dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
553 				    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
554 	if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
555 		page_frag_free(buf);
556 		return -ENOMEM;
557 	}
558 
559 	return 0;
560 }
561 
562 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
563 		    dma_addr_t *dma)
564 {
565 	int ret;
566 
567 	local_bh_disable();
568 	ret = __otx2_alloc_rbuf(pfvf, pool, dma);
569 	local_bh_enable();
570 	return ret;
571 }
572 
573 int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
574 		      dma_addr_t *dma)
575 {
576 	if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
577 		struct refill_work *work;
578 		struct delayed_work *dwork;
579 
580 		work = &pfvf->refill_wrk[cq->cq_idx];
581 		dwork = &work->pool_refill_work;
582 		/* Schedule a task if no other task is running */
583 		if (!cq->refill_task_sched) {
584 			cq->refill_task_sched = true;
585 			schedule_delayed_work(dwork,
586 					      msecs_to_jiffies(100));
587 		}
588 		return -ENOMEM;
589 	}
590 	return 0;
591 }
592 
593 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
594 {
595 	struct otx2_nic *pfvf = netdev_priv(netdev);
596 
597 	schedule_work(&pfvf->reset_task);
598 }
599 EXPORT_SYMBOL(otx2_tx_timeout);
600 
601 void otx2_get_mac_from_af(struct net_device *netdev)
602 {
603 	struct otx2_nic *pfvf = netdev_priv(netdev);
604 	int err;
605 
606 	err = otx2_hw_get_mac_addr(pfvf, netdev);
607 	if (err)
608 		dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
609 
610 	/* If AF doesn't provide a valid MAC, generate a random one */
611 	if (!is_valid_ether_addr(netdev->dev_addr))
612 		eth_hw_addr_random(netdev);
613 }
614 EXPORT_SYMBOL(otx2_get_mac_from_af);
615 
616 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
617 {
618 	u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
619 	struct otx2_hw *hw = &pfvf->hw;
620 	struct nix_txschq_config *req;
621 	u64 schq, parent;
622 	u64 dwrr_val;
623 
624 	dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
625 
626 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
627 	if (!req)
628 		return -ENOMEM;
629 
630 	req->lvl = lvl;
631 	req->num_regs = 1;
632 
633 	schq_list = hw->txschq_list;
634 #ifdef CONFIG_DCB
635 	if (txschq_for_pfc)
636 		schq_list = pfvf->pfc_schq_list;
637 #endif
638 
639 	schq = schq_list[lvl][prio];
640 	/* Set topology e.t.c configuration */
641 	if (lvl == NIX_TXSCH_LVL_SMQ) {
642 		req->reg[0] = NIX_AF_SMQX_CFG(schq);
643 		req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
644 		req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
645 				  (0x2ULL << 36);
646 		/* Set link type for DWRR MTU selection on CN10K silicons */
647 		if (!is_dev_otx2(pfvf->pdev))
648 			req->regval[0] |= FIELD_PREP(GENMASK_ULL(58, 57),
649 						(u64)hw->smq_link_type);
650 		req->num_regs++;
651 		/* MDQ config */
652 		parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
653 		req->reg[1] = NIX_AF_MDQX_PARENT(schq);
654 		req->regval[1] = parent << 16;
655 		req->num_regs++;
656 		/* Set DWRR quantum */
657 		req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
658 		req->regval[2] =  dwrr_val;
659 	} else if (lvl == NIX_TXSCH_LVL_TL4) {
660 		parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
661 		req->reg[0] = NIX_AF_TL4X_PARENT(schq);
662 		req->regval[0] = parent << 16;
663 		req->num_regs++;
664 		req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
665 		req->regval[1] = dwrr_val;
666 	} else if (lvl == NIX_TXSCH_LVL_TL3) {
667 		parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
668 		req->reg[0] = NIX_AF_TL3X_PARENT(schq);
669 		req->regval[0] = parent << 16;
670 		req->num_regs++;
671 		req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
672 		req->regval[1] = dwrr_val;
673 		if (lvl == hw->txschq_link_cfg_lvl) {
674 			req->num_regs++;
675 			req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
676 			/* Enable this queue and backpressure
677 			 * and set relative channel
678 			 */
679 			req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
680 		}
681 	} else if (lvl == NIX_TXSCH_LVL_TL2) {
682 		parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
683 		req->reg[0] = NIX_AF_TL2X_PARENT(schq);
684 		req->regval[0] = parent << 16;
685 
686 		req->num_regs++;
687 		req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
688 		req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
689 
690 		if (lvl == hw->txschq_link_cfg_lvl) {
691 			req->num_regs++;
692 			req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
693 			/* Enable this queue and backpressure
694 			 * and set relative channel
695 			 */
696 			req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
697 		}
698 	} else if (lvl == NIX_TXSCH_LVL_TL1) {
699 		/* Default config for TL1.
700 		 * For VF this is always ignored.
701 		 */
702 
703 		/* On CN10K, if RR_WEIGHT is greater than 16384, HW will
704 		 * clip it to 16384, so configuring a 24bit max value
705 		 * will work on both OTx2 and CN10K.
706 		 */
707 		req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
708 		req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
709 
710 		req->num_regs++;
711 		req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
712 		req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
713 
714 		req->num_regs++;
715 		req->reg[2] = NIX_AF_TL1X_CIR(schq);
716 		req->regval[2] = 0;
717 	}
718 
719 	return otx2_sync_mbox_msg(&pfvf->mbox);
720 }
721 EXPORT_SYMBOL(otx2_txschq_config);
722 
723 int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
724 {
725 	struct nix_txschq_config *req;
726 	int rc;
727 
728 	mutex_lock(&pfvf->mbox.lock);
729 
730 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
731 	if (!req) {
732 		mutex_unlock(&pfvf->mbox.lock);
733 		return -ENOMEM;
734 	}
735 
736 	req->lvl = NIX_TXSCH_LVL_SMQ;
737 	req->reg[0] = NIX_AF_SMQX_CFG(smq);
738 	req->regval[0] |= BIT_ULL(49);
739 	req->num_regs++;
740 
741 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
742 	mutex_unlock(&pfvf->mbox.lock);
743 	return rc;
744 }
745 EXPORT_SYMBOL(otx2_smq_flush);
746 
747 int otx2_txsch_alloc(struct otx2_nic *pfvf)
748 {
749 	struct nix_txsch_alloc_req *req;
750 	struct nix_txsch_alloc_rsp *rsp;
751 	int lvl, schq, rc;
752 
753 	/* Get memory to put this msg */
754 	req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
755 	if (!req)
756 		return -ENOMEM;
757 
758 	/* Request one schq per level */
759 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
760 		req->schq[lvl] = 1;
761 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
762 	if (rc)
763 		return rc;
764 
765 	rsp = (struct nix_txsch_alloc_rsp *)
766 	      otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
767 	if (IS_ERR(rsp))
768 		return PTR_ERR(rsp);
769 
770 	/* Setup transmit scheduler list */
771 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
772 		for (schq = 0; schq < rsp->schq[lvl]; schq++)
773 			pfvf->hw.txschq_list[lvl][schq] =
774 				rsp->schq_list[lvl][schq];
775 
776 	pfvf->hw.txschq_link_cfg_lvl = rsp->link_cfg_lvl;
777 
778 	return 0;
779 }
780 
781 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq)
782 {
783 	struct nix_txsch_free_req *free_req;
784 	int err;
785 
786 	mutex_lock(&pfvf->mbox.lock);
787 
788 	free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
789 	if (!free_req) {
790 		mutex_unlock(&pfvf->mbox.lock);
791 		netdev_err(pfvf->netdev,
792 			   "Failed alloc txschq free req\n");
793 		return;
794 	}
795 
796 	free_req->schq_lvl = lvl;
797 	free_req->schq = schq;
798 
799 	err = otx2_sync_mbox_msg(&pfvf->mbox);
800 	if (err) {
801 		netdev_err(pfvf->netdev,
802 			   "Failed stop txschq %d at level %d\n", schq, lvl);
803 	}
804 
805 	mutex_unlock(&pfvf->mbox.lock);
806 }
807 
808 void otx2_txschq_stop(struct otx2_nic *pfvf)
809 {
810 	int lvl, schq;
811 
812 	/* free non QOS TLx nodes */
813 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
814 		otx2_txschq_free_one(pfvf, lvl,
815 				     pfvf->hw.txschq_list[lvl][0]);
816 
817 	/* Clear the txschq list */
818 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
819 		for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
820 			pfvf->hw.txschq_list[lvl][schq] = 0;
821 	}
822 
823 }
824 
825 void otx2_sqb_flush(struct otx2_nic *pfvf)
826 {
827 	int qidx, sqe_tail, sqe_head;
828 	struct otx2_snd_queue *sq;
829 	u64 incr, *ptr, val;
830 	int timeout = 1000;
831 
832 	ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
833 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
834 		sq = &pfvf->qset.sq[qidx];
835 		if (!sq->sqb_ptrs)
836 			continue;
837 
838 		incr = (u64)qidx << 32;
839 		while (timeout) {
840 			val = otx2_atomic64_add(incr, ptr);
841 			sqe_head = (val >> 20) & 0x3F;
842 			sqe_tail = (val >> 28) & 0x3F;
843 			if (sqe_head == sqe_tail)
844 				break;
845 			usleep_range(1, 3);
846 			timeout--;
847 		}
848 	}
849 }
850 
851 /* RED and drop levels of CQ on packet reception.
852  * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
853  */
854 #define RQ_PASS_LVL_CQ(skid, qsize)	((((skid) + 16) * 256) / (qsize))
855 #define RQ_DROP_LVL_CQ(skid, qsize)	(((skid) * 256) / (qsize))
856 
857 /* RED and drop levels of AURA for packet reception.
858  * For AURA level is measure of fullness (0x0 = empty, 255 = full).
859  * Eg: For RQ length 1K, for pass/drop level 204/230.
860  * RED accepts pkts if free pointers > 102 & <= 205.
861  * Drops pkts if free pointers < 102.
862  */
863 #define RQ_BP_LVL_AURA   (255 - ((85 * 256) / 100)) /* BP when 85% is full */
864 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
865 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
866 
867 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
868 {
869 	struct otx2_qset *qset = &pfvf->qset;
870 	struct nix_aq_enq_req *aq;
871 
872 	/* Get memory to put this msg */
873 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
874 	if (!aq)
875 		return -ENOMEM;
876 
877 	aq->rq.cq = qidx;
878 	aq->rq.ena = 1;
879 	aq->rq.pb_caching = 1;
880 	aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
881 	aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
882 	aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
883 	aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
884 	aq->rq.qint_idx = 0;
885 	aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
886 	aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
887 	aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
888 	aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
889 	aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
890 	aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
891 
892 	/* Fill AQ info */
893 	aq->qidx = qidx;
894 	aq->ctype = NIX_AQ_CTYPE_RQ;
895 	aq->op = NIX_AQ_INSTOP_INIT;
896 
897 	return otx2_sync_mbox_msg(&pfvf->mbox);
898 }
899 
900 int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
901 {
902 	struct otx2_nic *pfvf = dev;
903 	struct otx2_snd_queue *sq;
904 	struct nix_aq_enq_req *aq;
905 
906 	sq = &pfvf->qset.sq[qidx];
907 	sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
908 	/* Get memory to put this msg */
909 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
910 	if (!aq)
911 		return -ENOMEM;
912 
913 	aq->sq.cq = pfvf->hw.rx_queues + qidx;
914 	aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
915 	aq->sq.cq_ena = 1;
916 	aq->sq.ena = 1;
917 	aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
918 	aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
919 	aq->sq.default_chan = pfvf->hw.tx_chan_base;
920 	aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
921 	aq->sq.sqb_aura = sqb_aura;
922 	aq->sq.sq_int_ena = NIX_SQINT_BITS;
923 	aq->sq.qint_idx = 0;
924 	/* Due pipelining impact minimum 2000 unused SQ CQE's
925 	 * need to maintain to avoid CQ overflow.
926 	 */
927 	aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
928 
929 	/* Fill AQ info */
930 	aq->qidx = qidx;
931 	aq->ctype = NIX_AQ_CTYPE_SQ;
932 	aq->op = NIX_AQ_INSTOP_INIT;
933 
934 	return otx2_sync_mbox_msg(&pfvf->mbox);
935 }
936 
937 int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
938 {
939 	struct otx2_qset *qset = &pfvf->qset;
940 	struct otx2_snd_queue *sq;
941 	struct otx2_pool *pool;
942 	int err;
943 
944 	pool = &pfvf->qset.pool[sqb_aura];
945 	sq = &qset->sq[qidx];
946 	sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
947 	sq->sqe_cnt = qset->sqe_cnt;
948 
949 	err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
950 	if (err)
951 		return err;
952 
953 	if (qidx < pfvf->hw.tx_queues) {
954 		err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
955 				 TSO_HEADER_SIZE);
956 		if (err)
957 			return err;
958 	}
959 
960 	sq->sqe_base = sq->sqe->base;
961 	sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
962 	if (!sq->sg)
963 		return -ENOMEM;
964 
965 	if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
966 		err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
967 				 sizeof(*sq->timestamps));
968 		if (err)
969 			return err;
970 	}
971 
972 	sq->head = 0;
973 	sq->cons_head = 0;
974 	sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
975 	sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
976 	/* Set SQE threshold to 10% of total SQEs */
977 	sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
978 	sq->aura_id = sqb_aura;
979 	sq->aura_fc_addr = pool->fc_addr->base;
980 	sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
981 
982 	sq->stats.bytes = 0;
983 	sq->stats.pkts = 0;
984 
985 	return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
986 
987 }
988 
989 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
990 {
991 	struct otx2_qset *qset = &pfvf->qset;
992 	int err, pool_id, non_xdp_queues;
993 	struct nix_aq_enq_req *aq;
994 	struct otx2_cq_queue *cq;
995 
996 	cq = &qset->cq[qidx];
997 	cq->cq_idx = qidx;
998 	non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
999 	if (qidx < pfvf->hw.rx_queues) {
1000 		cq->cq_type = CQ_RX;
1001 		cq->cint_idx = qidx;
1002 		cq->cqe_cnt = qset->rqe_cnt;
1003 		if (pfvf->xdp_prog)
1004 			xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
1005 	} else if (qidx < non_xdp_queues) {
1006 		cq->cq_type = CQ_TX;
1007 		cq->cint_idx = qidx - pfvf->hw.rx_queues;
1008 		cq->cqe_cnt = qset->sqe_cnt;
1009 	} else {
1010 		if (pfvf->hw.xdp_queues &&
1011 		    qidx < non_xdp_queues + pfvf->hw.xdp_queues) {
1012 			cq->cq_type = CQ_XDP;
1013 			cq->cint_idx = qidx - non_xdp_queues;
1014 			cq->cqe_cnt = qset->sqe_cnt;
1015 		} else {
1016 			cq->cq_type = CQ_QOS;
1017 			cq->cint_idx = qidx - non_xdp_queues -
1018 				       pfvf->hw.xdp_queues;
1019 			cq->cqe_cnt = qset->sqe_cnt;
1020 		}
1021 	}
1022 	cq->cqe_size = pfvf->qset.xqe_size;
1023 
1024 	/* Allocate memory for CQEs */
1025 	err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
1026 	if (err)
1027 		return err;
1028 
1029 	/* Save CQE CPU base for faster reference */
1030 	cq->cqe_base = cq->cqe->base;
1031 	/* In case where all RQs auras point to single pool,
1032 	 * all CQs receive buffer pool also point to same pool.
1033 	 */
1034 	pool_id = ((cq->cq_type == CQ_RX) &&
1035 		   (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
1036 	cq->rbpool = &qset->pool[pool_id];
1037 	cq->refill_task_sched = false;
1038 
1039 	/* Get memory to put this msg */
1040 	aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
1041 	if (!aq)
1042 		return -ENOMEM;
1043 
1044 	aq->cq.ena = 1;
1045 	aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
1046 	aq->cq.caching = 1;
1047 	aq->cq.base = cq->cqe->iova;
1048 	aq->cq.cint_idx = cq->cint_idx;
1049 	aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
1050 	aq->cq.qint_idx = 0;
1051 	aq->cq.avg_level = 255;
1052 
1053 	if (qidx < pfvf->hw.rx_queues) {
1054 		aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
1055 		aq->cq.drop_ena = 1;
1056 
1057 		if (!is_otx2_lbkvf(pfvf->pdev)) {
1058 			/* Enable receive CQ backpressure */
1059 			aq->cq.bp_ena = 1;
1060 #ifdef CONFIG_DCB
1061 			aq->cq.bpid = pfvf->bpid[pfvf->queue_to_pfc_map[qidx]];
1062 #else
1063 			aq->cq.bpid = pfvf->bpid[0];
1064 #endif
1065 
1066 			/* Set backpressure level is same as cq pass level */
1067 			aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
1068 		}
1069 	}
1070 
1071 	/* Fill AQ info */
1072 	aq->qidx = qidx;
1073 	aq->ctype = NIX_AQ_CTYPE_CQ;
1074 	aq->op = NIX_AQ_INSTOP_INIT;
1075 
1076 	return otx2_sync_mbox_msg(&pfvf->mbox);
1077 }
1078 
1079 static void otx2_pool_refill_task(struct work_struct *work)
1080 {
1081 	struct otx2_cq_queue *cq;
1082 	struct otx2_pool *rbpool;
1083 	struct refill_work *wrk;
1084 	int qidx, free_ptrs = 0;
1085 	struct otx2_nic *pfvf;
1086 	dma_addr_t bufptr;
1087 
1088 	wrk = container_of(work, struct refill_work, pool_refill_work.work);
1089 	pfvf = wrk->pf;
1090 	qidx = wrk - pfvf->refill_wrk;
1091 	cq = &pfvf->qset.cq[qidx];
1092 	rbpool = cq->rbpool;
1093 	free_ptrs = cq->pool_ptrs;
1094 
1095 	while (cq->pool_ptrs) {
1096 		if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
1097 			/* Schedule a WQ if we fails to free atleast half of the
1098 			 * pointers else enable napi for this RQ.
1099 			 */
1100 			if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
1101 				struct delayed_work *dwork;
1102 
1103 				dwork = &wrk->pool_refill_work;
1104 				schedule_delayed_work(dwork,
1105 						      msecs_to_jiffies(100));
1106 			} else {
1107 				cq->refill_task_sched = false;
1108 			}
1109 			return;
1110 		}
1111 		pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
1112 		cq->pool_ptrs--;
1113 	}
1114 	cq->refill_task_sched = false;
1115 }
1116 
1117 int otx2_config_nix_queues(struct otx2_nic *pfvf)
1118 {
1119 	int qidx, err;
1120 
1121 	/* Initialize RX queues */
1122 	for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
1123 		u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
1124 
1125 		err = otx2_rq_init(pfvf, qidx, lpb_aura);
1126 		if (err)
1127 			return err;
1128 	}
1129 
1130 	/* Initialize TX queues */
1131 	for (qidx = 0; qidx < pfvf->hw.non_qos_queues; qidx++) {
1132 		u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1133 
1134 		err = otx2_sq_init(pfvf, qidx, sqb_aura);
1135 		if (err)
1136 			return err;
1137 	}
1138 
1139 	/* Initialize completion queues */
1140 	for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1141 		err = otx2_cq_init(pfvf, qidx);
1142 		if (err)
1143 			return err;
1144 	}
1145 
1146 	pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
1147 							   NIX_LF_CQ_OP_STATUS);
1148 
1149 	/* Initialize work queue for receive buffer refill */
1150 	pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
1151 					sizeof(struct refill_work), GFP_KERNEL);
1152 	if (!pfvf->refill_wrk)
1153 		return -ENOMEM;
1154 
1155 	for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
1156 		pfvf->refill_wrk[qidx].pf = pfvf;
1157 		INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
1158 				  otx2_pool_refill_task);
1159 	}
1160 	return 0;
1161 }
1162 
1163 int otx2_config_nix(struct otx2_nic *pfvf)
1164 {
1165 	struct nix_lf_alloc_req  *nixlf;
1166 	struct nix_lf_alloc_rsp *rsp;
1167 	int err;
1168 
1169 	pfvf->qset.xqe_size = pfvf->hw.xqe_size;
1170 
1171 	/* Get memory to put this msg */
1172 	nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
1173 	if (!nixlf)
1174 		return -ENOMEM;
1175 
1176 	/* Set RQ/SQ/CQ counts */
1177 	nixlf->rq_cnt = pfvf->hw.rx_queues;
1178 	nixlf->sq_cnt = otx2_get_total_tx_queues(pfvf);
1179 	nixlf->cq_cnt = pfvf->qset.cq_cnt;
1180 	nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
1181 	nixlf->rss_grps = MAX_RSS_GROUPS;
1182 	nixlf->xqe_sz = pfvf->hw.xqe_size == 128 ? NIX_XQESZ_W16 : NIX_XQESZ_W64;
1183 	/* We don't know absolute NPA LF idx attached.
1184 	 * AF will replace 'RVU_DEFAULT_PF_FUNC' with
1185 	 * NPA LF attached to this RVU PF/VF.
1186 	 */
1187 	nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
1188 	/* Disable alignment pad, enable L2 length check,
1189 	 * enable L4 TCP/UDP checksum verification.
1190 	 */
1191 	nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
1192 
1193 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1194 	if (err)
1195 		return err;
1196 
1197 	rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
1198 							   &nixlf->hdr);
1199 	if (IS_ERR(rsp))
1200 		return PTR_ERR(rsp);
1201 
1202 	if (rsp->qints < 1)
1203 		return -ENXIO;
1204 
1205 	return rsp->hdr.rc;
1206 }
1207 
1208 void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
1209 {
1210 	struct otx2_qset *qset = &pfvf->qset;
1211 	struct otx2_hw *hw = &pfvf->hw;
1212 	struct otx2_snd_queue *sq;
1213 	int sqb, qidx;
1214 	u64 iova, pa;
1215 
1216 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
1217 		sq = &qset->sq[qidx];
1218 		if (!sq->sqb_ptrs)
1219 			continue;
1220 		for (sqb = 0; sqb < sq->sqb_count; sqb++) {
1221 			if (!sq->sqb_ptrs[sqb])
1222 				continue;
1223 			iova = sq->sqb_ptrs[sqb];
1224 			pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1225 			dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
1226 					     DMA_FROM_DEVICE,
1227 					     DMA_ATTR_SKIP_CPU_SYNC);
1228 			put_page(virt_to_page(phys_to_virt(pa)));
1229 		}
1230 		sq->sqb_count = 0;
1231 	}
1232 }
1233 
1234 void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
1235 		    u64 iova, int size)
1236 {
1237 	struct page *page;
1238 	u64 pa;
1239 
1240 	pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
1241 	page = virt_to_head_page(phys_to_virt(pa));
1242 
1243 	if (pool->page_pool) {
1244 		page_pool_put_full_page(pool->page_pool, page, true);
1245 	} else {
1246 		dma_unmap_page_attrs(pfvf->dev, iova, size,
1247 				     DMA_FROM_DEVICE,
1248 				     DMA_ATTR_SKIP_CPU_SYNC);
1249 
1250 		put_page(page);
1251 	}
1252 }
1253 
1254 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
1255 {
1256 	int pool_id, pool_start = 0, pool_end = 0, size = 0;
1257 	struct otx2_pool *pool;
1258 	u64 iova;
1259 
1260 	if (type == AURA_NIX_SQ) {
1261 		pool_start = otx2_get_pool_idx(pfvf, type, 0);
1262 		pool_end =  pool_start + pfvf->hw.sqpool_cnt;
1263 		size = pfvf->hw.sqb_size;
1264 	}
1265 	if (type == AURA_NIX_RQ) {
1266 		pool_start = otx2_get_pool_idx(pfvf, type, 0);
1267 		pool_end = pfvf->hw.rqpool_cnt;
1268 		size = pfvf->rbsize;
1269 	}
1270 
1271 	/* Free SQB and RQB pointers from the aura pool */
1272 	for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
1273 		iova = otx2_aura_allocptr(pfvf, pool_id);
1274 		pool = &pfvf->qset.pool[pool_id];
1275 		while (iova) {
1276 			if (type == AURA_NIX_RQ)
1277 				iova -= OTX2_HEAD_ROOM;
1278 
1279 			otx2_free_bufs(pfvf, pool, iova, size);
1280 
1281 			iova = otx2_aura_allocptr(pfvf, pool_id);
1282 		}
1283 	}
1284 }
1285 
1286 void otx2_aura_pool_free(struct otx2_nic *pfvf)
1287 {
1288 	struct otx2_pool *pool;
1289 	int pool_id;
1290 
1291 	if (!pfvf->qset.pool)
1292 		return;
1293 
1294 	for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
1295 		pool = &pfvf->qset.pool[pool_id];
1296 		qmem_free(pfvf->dev, pool->stack);
1297 		qmem_free(pfvf->dev, pool->fc_addr);
1298 		page_pool_destroy(pool->page_pool);
1299 		pool->page_pool = NULL;
1300 	}
1301 	devm_kfree(pfvf->dev, pfvf->qset.pool);
1302 	pfvf->qset.pool = NULL;
1303 }
1304 
1305 int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
1306 		   int pool_id, int numptrs)
1307 {
1308 	struct npa_aq_enq_req *aq;
1309 	struct otx2_pool *pool;
1310 	int err;
1311 
1312 	pool = &pfvf->qset.pool[pool_id];
1313 
1314 	/* Allocate memory for HW to update Aura count.
1315 	 * Alloc one cache line, so that it fits all FC_STYPE modes.
1316 	 */
1317 	if (!pool->fc_addr) {
1318 		err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
1319 		if (err)
1320 			return err;
1321 	}
1322 
1323 	/* Initialize this aura's context via AF */
1324 	aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1325 	if (!aq) {
1326 		/* Shared mbox memory buffer is full, flush it and retry */
1327 		err = otx2_sync_mbox_msg(&pfvf->mbox);
1328 		if (err)
1329 			return err;
1330 		aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1331 		if (!aq)
1332 			return -ENOMEM;
1333 	}
1334 
1335 	aq->aura_id = aura_id;
1336 	/* Will be filled by AF with correct pool context address */
1337 	aq->aura.pool_addr = pool_id;
1338 	aq->aura.pool_caching = 1;
1339 	aq->aura.shift = ilog2(numptrs) - 8;
1340 	aq->aura.count = numptrs;
1341 	aq->aura.limit = numptrs;
1342 	aq->aura.avg_level = 255;
1343 	aq->aura.ena = 1;
1344 	aq->aura.fc_ena = 1;
1345 	aq->aura.fc_addr = pool->fc_addr->iova;
1346 	aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
1347 
1348 	/* Enable backpressure for RQ aura */
1349 	if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
1350 		aq->aura.bp_ena = 0;
1351 		/* If NIX1 LF is attached then specify NIX1_RX.
1352 		 *
1353 		 * Below NPA_AURA_S[BP_ENA] is set according to the
1354 		 * NPA_BPINTF_E enumeration given as:
1355 		 * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
1356 		 * NIX0_RX is 0x0 + 0*0x1 = 0
1357 		 * NIX1_RX is 0x0 + 1*0x1 = 1
1358 		 * But in HRM it is given that
1359 		 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
1360 		 * NIX-RX based on [BP] level. One bit per NIX-RX; index
1361 		 * enumerated by NPA_BPINTF_E."
1362 		 */
1363 		if (pfvf->nix_blkaddr == BLKADDR_NIX1)
1364 			aq->aura.bp_ena = 1;
1365 #ifdef CONFIG_DCB
1366 		aq->aura.nix0_bpid = pfvf->bpid[pfvf->queue_to_pfc_map[aura_id]];
1367 #else
1368 		aq->aura.nix0_bpid = pfvf->bpid[0];
1369 #endif
1370 
1371 		/* Set backpressure level for RQ's Aura */
1372 		aq->aura.bp = RQ_BP_LVL_AURA;
1373 	}
1374 
1375 	/* Fill AQ info */
1376 	aq->ctype = NPA_AQ_CTYPE_AURA;
1377 	aq->op = NPA_AQ_INSTOP_INIT;
1378 
1379 	return 0;
1380 }
1381 
1382 int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1383 		   int stack_pages, int numptrs, int buf_size, int type)
1384 {
1385 	struct page_pool_params pp_params = { 0 };
1386 	struct npa_aq_enq_req *aq;
1387 	struct otx2_pool *pool;
1388 	int err;
1389 
1390 	pool = &pfvf->qset.pool[pool_id];
1391 	/* Alloc memory for stack which is used to store buffer pointers */
1392 	err = qmem_alloc(pfvf->dev, &pool->stack,
1393 			 stack_pages, pfvf->hw.stack_pg_bytes);
1394 	if (err)
1395 		return err;
1396 
1397 	pool->rbsize = buf_size;
1398 
1399 	/* Initialize this pool's context via AF */
1400 	aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1401 	if (!aq) {
1402 		/* Shared mbox memory buffer is full, flush it and retry */
1403 		err = otx2_sync_mbox_msg(&pfvf->mbox);
1404 		if (err) {
1405 			qmem_free(pfvf->dev, pool->stack);
1406 			return err;
1407 		}
1408 		aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1409 		if (!aq) {
1410 			qmem_free(pfvf->dev, pool->stack);
1411 			return -ENOMEM;
1412 		}
1413 	}
1414 
1415 	aq->aura_id = pool_id;
1416 	aq->pool.stack_base = pool->stack->iova;
1417 	aq->pool.stack_caching = 1;
1418 	aq->pool.ena = 1;
1419 	aq->pool.buf_size = buf_size / 128;
1420 	aq->pool.stack_max_pages = stack_pages;
1421 	aq->pool.shift = ilog2(numptrs) - 8;
1422 	aq->pool.ptr_start = 0;
1423 	aq->pool.ptr_end = ~0ULL;
1424 
1425 	/* Fill AQ info */
1426 	aq->ctype = NPA_AQ_CTYPE_POOL;
1427 	aq->op = NPA_AQ_INSTOP_INIT;
1428 
1429 	if (type != AURA_NIX_RQ) {
1430 		pool->page_pool = NULL;
1431 		return 0;
1432 	}
1433 
1434 	pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
1435 	pp_params.pool_size = numptrs;
1436 	pp_params.nid = NUMA_NO_NODE;
1437 	pp_params.dev = pfvf->dev;
1438 	pp_params.dma_dir = DMA_FROM_DEVICE;
1439 	pool->page_pool = page_pool_create(&pp_params);
1440 	if (IS_ERR(pool->page_pool)) {
1441 		netdev_err(pfvf->netdev, "Creation of page pool failed\n");
1442 		return PTR_ERR(pool->page_pool);
1443 	}
1444 
1445 	return 0;
1446 }
1447 
1448 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
1449 {
1450 	int qidx, pool_id, stack_pages, num_sqbs;
1451 	struct otx2_qset *qset = &pfvf->qset;
1452 	struct otx2_hw *hw = &pfvf->hw;
1453 	struct otx2_snd_queue *sq;
1454 	struct otx2_pool *pool;
1455 	dma_addr_t bufptr;
1456 	int err, ptr;
1457 
1458 	/* Calculate number of SQBs needed.
1459 	 *
1460 	 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
1461 	 * Last SQE is used for pointing to next SQB.
1462 	 */
1463 	num_sqbs = (hw->sqb_size / 128) - 1;
1464 	num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
1465 
1466 	/* Get no of stack pages needed */
1467 	stack_pages =
1468 		(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1469 
1470 	for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
1471 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1472 		/* Initialize aura context */
1473 		err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
1474 		if (err)
1475 			goto fail;
1476 
1477 		/* Initialize pool context */
1478 		err = otx2_pool_init(pfvf, pool_id, stack_pages,
1479 				     num_sqbs, hw->sqb_size, AURA_NIX_SQ);
1480 		if (err)
1481 			goto fail;
1482 	}
1483 
1484 	/* Flush accumulated messages */
1485 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1486 	if (err)
1487 		goto fail;
1488 
1489 	/* Allocate pointers and free them to aura/pool */
1490 	for (qidx = 0; qidx < hw->non_qos_queues; qidx++) {
1491 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1492 		pool = &pfvf->qset.pool[pool_id];
1493 
1494 		sq = &qset->sq[qidx];
1495 		sq->sqb_count = 0;
1496 		sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
1497 		if (!sq->sqb_ptrs) {
1498 			err = -ENOMEM;
1499 			goto err_mem;
1500 		}
1501 
1502 		for (ptr = 0; ptr < num_sqbs; ptr++) {
1503 			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
1504 			if (err)
1505 				goto err_mem;
1506 			pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
1507 			sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
1508 		}
1509 	}
1510 
1511 err_mem:
1512 	return err ? -ENOMEM : 0;
1513 
1514 fail:
1515 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1516 	otx2_aura_pool_free(pfvf);
1517 	return err;
1518 }
1519 
1520 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
1521 {
1522 	struct otx2_hw *hw = &pfvf->hw;
1523 	int stack_pages, pool_id, rq;
1524 	struct otx2_pool *pool;
1525 	int err, ptr, num_ptrs;
1526 	dma_addr_t bufptr;
1527 
1528 	num_ptrs = pfvf->qset.rqe_cnt;
1529 
1530 	stack_pages =
1531 		(num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1532 
1533 	for (rq = 0; rq < hw->rx_queues; rq++) {
1534 		pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
1535 		/* Initialize aura context */
1536 		err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
1537 		if (err)
1538 			goto fail;
1539 	}
1540 	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1541 		err = otx2_pool_init(pfvf, pool_id, stack_pages,
1542 				     num_ptrs, pfvf->rbsize, AURA_NIX_RQ);
1543 		if (err)
1544 			goto fail;
1545 	}
1546 
1547 	/* Flush accumulated messages */
1548 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1549 	if (err)
1550 		goto fail;
1551 
1552 	/* Allocate pointers and free them to aura/pool */
1553 	for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1554 		pool = &pfvf->qset.pool[pool_id];
1555 		for (ptr = 0; ptr < num_ptrs; ptr++) {
1556 			err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
1557 			if (err)
1558 				return -ENOMEM;
1559 			pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
1560 						   bufptr + OTX2_HEAD_ROOM);
1561 		}
1562 	}
1563 	return 0;
1564 fail:
1565 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1566 	otx2_aura_pool_free(pfvf);
1567 	return err;
1568 }
1569 
1570 int otx2_config_npa(struct otx2_nic *pfvf)
1571 {
1572 	struct otx2_qset *qset = &pfvf->qset;
1573 	struct npa_lf_alloc_req  *npalf;
1574 	struct otx2_hw *hw = &pfvf->hw;
1575 	int aura_cnt;
1576 
1577 	/* Pool - Stack of free buffer pointers
1578 	 * Aura - Alloc/frees pointers from/to pool for NIX DMA.
1579 	 */
1580 
1581 	if (!hw->pool_cnt)
1582 		return -EINVAL;
1583 
1584 	qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt,
1585 				  sizeof(struct otx2_pool), GFP_KERNEL);
1586 	if (!qset->pool)
1587 		return -ENOMEM;
1588 
1589 	/* Get memory to put this msg */
1590 	npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
1591 	if (!npalf)
1592 		return -ENOMEM;
1593 
1594 	/* Set aura and pool counts */
1595 	npalf->nr_pools = hw->pool_cnt;
1596 	aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
1597 	npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
1598 
1599 	return otx2_sync_mbox_msg(&pfvf->mbox);
1600 }
1601 
1602 int otx2_detach_resources(struct mbox *mbox)
1603 {
1604 	struct rsrc_detach *detach;
1605 
1606 	mutex_lock(&mbox->lock);
1607 	detach = otx2_mbox_alloc_msg_detach_resources(mbox);
1608 	if (!detach) {
1609 		mutex_unlock(&mbox->lock);
1610 		return -ENOMEM;
1611 	}
1612 
1613 	/* detach all */
1614 	detach->partial = false;
1615 
1616 	/* Send detach request to AF */
1617 	otx2_mbox_msg_send(&mbox->mbox, 0);
1618 	mutex_unlock(&mbox->lock);
1619 	return 0;
1620 }
1621 EXPORT_SYMBOL(otx2_detach_resources);
1622 
1623 int otx2_attach_npa_nix(struct otx2_nic *pfvf)
1624 {
1625 	struct rsrc_attach *attach;
1626 	struct msg_req *msix;
1627 	int err;
1628 
1629 	mutex_lock(&pfvf->mbox.lock);
1630 	/* Get memory to put this msg */
1631 	attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
1632 	if (!attach) {
1633 		mutex_unlock(&pfvf->mbox.lock);
1634 		return -ENOMEM;
1635 	}
1636 
1637 	attach->npalf = true;
1638 	attach->nixlf = true;
1639 
1640 	/* Send attach request to AF */
1641 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1642 	if (err) {
1643 		mutex_unlock(&pfvf->mbox.lock);
1644 		return err;
1645 	}
1646 
1647 	pfvf->nix_blkaddr = BLKADDR_NIX0;
1648 
1649 	/* If the platform has two NIX blocks then LF may be
1650 	 * allocated from NIX1.
1651 	 */
1652 	if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
1653 		pfvf->nix_blkaddr = BLKADDR_NIX1;
1654 
1655 	/* Get NPA and NIX MSIX vector offsets */
1656 	msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
1657 	if (!msix) {
1658 		mutex_unlock(&pfvf->mbox.lock);
1659 		return -ENOMEM;
1660 	}
1661 
1662 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1663 	if (err) {
1664 		mutex_unlock(&pfvf->mbox.lock);
1665 		return err;
1666 	}
1667 	mutex_unlock(&pfvf->mbox.lock);
1668 
1669 	if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
1670 	    pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
1671 		dev_err(pfvf->dev,
1672 			"RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
1673 		return -EINVAL;
1674 	}
1675 
1676 	return 0;
1677 }
1678 EXPORT_SYMBOL(otx2_attach_npa_nix);
1679 
1680 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
1681 {
1682 	struct hwctx_disable_req *req;
1683 
1684 	mutex_lock(&mbox->lock);
1685 	/* Request AQ to disable this context */
1686 	if (npa)
1687 		req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
1688 	else
1689 		req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
1690 
1691 	if (!req) {
1692 		mutex_unlock(&mbox->lock);
1693 		return;
1694 	}
1695 
1696 	req->ctype = type;
1697 
1698 	if (otx2_sync_mbox_msg(mbox))
1699 		dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
1700 			__func__);
1701 
1702 	mutex_unlock(&mbox->lock);
1703 }
1704 
1705 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
1706 {
1707 	struct nix_bp_cfg_req *req;
1708 
1709 	if (enable)
1710 		req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
1711 	else
1712 		req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
1713 
1714 	if (!req)
1715 		return -ENOMEM;
1716 
1717 	req->chan_base = 0;
1718 #ifdef CONFIG_DCB
1719 	req->chan_cnt = pfvf->pfc_en ? IEEE_8021QAZ_MAX_TCS : 1;
1720 	req->bpid_per_chan = pfvf->pfc_en ? 1 : 0;
1721 #else
1722 	req->chan_cnt =  1;
1723 	req->bpid_per_chan = 0;
1724 #endif
1725 
1726 	return otx2_sync_mbox_msg(&pfvf->mbox);
1727 }
1728 EXPORT_SYMBOL(otx2_nix_config_bp);
1729 
1730 /* Mbox message handlers */
1731 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1732 			    struct cgx_stats_rsp *rsp)
1733 {
1734 	int id;
1735 
1736 	for (id = 0; id < CGX_RX_STATS_COUNT; id++)
1737 		pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
1738 	for (id = 0; id < CGX_TX_STATS_COUNT; id++)
1739 		pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
1740 }
1741 
1742 void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
1743 				struct cgx_fec_stats_rsp *rsp)
1744 {
1745 	pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
1746 	pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
1747 }
1748 
1749 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1750 			       struct npa_lf_alloc_rsp *rsp)
1751 {
1752 	pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
1753 	pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
1754 }
1755 EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
1756 
1757 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1758 			       struct nix_lf_alloc_rsp *rsp)
1759 {
1760 	pfvf->hw.sqb_size = rsp->sqb_size;
1761 	pfvf->hw.rx_chan_base = rsp->rx_chan_base;
1762 	pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1763 	pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1764 	pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
1765 	pfvf->hw.cgx_links = rsp->cgx_links;
1766 	pfvf->hw.lbk_links = rsp->lbk_links;
1767 	pfvf->hw.tx_link = rsp->tx_link;
1768 }
1769 EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
1770 
1771 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1772 			      struct msix_offset_rsp *rsp)
1773 {
1774 	pfvf->hw.npa_msixoff = rsp->npa_msixoff;
1775 	pfvf->hw.nix_msixoff = rsp->nix_msixoff;
1776 }
1777 EXPORT_SYMBOL(mbox_handler_msix_offset);
1778 
1779 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1780 				struct nix_bp_cfg_rsp *rsp)
1781 {
1782 	int chan, chan_id;
1783 
1784 	for (chan = 0; chan < rsp->chan_cnt; chan++) {
1785 		chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
1786 		pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
1787 	}
1788 }
1789 EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
1790 
1791 void otx2_free_cints(struct otx2_nic *pfvf, int n)
1792 {
1793 	struct otx2_qset *qset = &pfvf->qset;
1794 	struct otx2_hw *hw = &pfvf->hw;
1795 	int irq, qidx;
1796 
1797 	for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1798 	     qidx < n;
1799 	     qidx++, irq++) {
1800 		int vector = pci_irq_vector(pfvf->pdev, irq);
1801 
1802 		irq_set_affinity_hint(vector, NULL);
1803 		free_cpumask_var(hw->affinity_mask[irq]);
1804 		free_irq(vector, &qset->napi[qidx]);
1805 	}
1806 }
1807 
1808 void otx2_set_cints_affinity(struct otx2_nic *pfvf)
1809 {
1810 	struct otx2_hw *hw = &pfvf->hw;
1811 	int vec, cpu, irq, cint;
1812 
1813 	vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1814 	cpu = cpumask_first(cpu_online_mask);
1815 
1816 	/* CQ interrupts */
1817 	for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
1818 		if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
1819 			return;
1820 
1821 		cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
1822 
1823 		irq = pci_irq_vector(pfvf->pdev, vec);
1824 		irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
1825 
1826 		cpu = cpumask_next(cpu, cpu_online_mask);
1827 		if (unlikely(cpu >= nr_cpu_ids))
1828 			cpu = 0;
1829 	}
1830 }
1831 
1832 static u32 get_dwrr_mtu(struct otx2_nic *pfvf, struct nix_hw_info *hw)
1833 {
1834 	if (is_otx2_lbkvf(pfvf->pdev)) {
1835 		pfvf->hw.smq_link_type = SMQ_LINK_TYPE_LBK;
1836 		return hw->lbk_dwrr_mtu;
1837 	}
1838 
1839 	pfvf->hw.smq_link_type = SMQ_LINK_TYPE_RPM;
1840 	return hw->rpm_dwrr_mtu;
1841 }
1842 
1843 u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
1844 {
1845 	struct nix_hw_info *rsp;
1846 	struct msg_req *req;
1847 	u16 max_mtu;
1848 	int rc;
1849 
1850 	mutex_lock(&pfvf->mbox.lock);
1851 
1852 	req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
1853 	if (!req) {
1854 		rc =  -ENOMEM;
1855 		goto out;
1856 	}
1857 
1858 	rc = otx2_sync_mbox_msg(&pfvf->mbox);
1859 	if (!rc) {
1860 		rsp = (struct nix_hw_info *)
1861 		       otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
1862 
1863 		/* HW counts VLAN insertion bytes (8 for double tag)
1864 		 * irrespective of whether SQE is requesting to insert VLAN
1865 		 * in the packet or not. Hence these 8 bytes have to be
1866 		 * discounted from max packet size otherwise HW will throw
1867 		 * SMQ errors
1868 		 */
1869 		max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
1870 
1871 		/* Also save DWRR MTU, needed for DWRR weight calculation */
1872 		pfvf->hw.dwrr_mtu = get_dwrr_mtu(pfvf, rsp);
1873 		if (!pfvf->hw.dwrr_mtu)
1874 			pfvf->hw.dwrr_mtu = 1;
1875 	}
1876 
1877 out:
1878 	mutex_unlock(&pfvf->mbox.lock);
1879 	if (rc) {
1880 		dev_warn(pfvf->dev,
1881 			 "Failed to get MTU from hardware setting default value(1500)\n");
1882 		max_mtu = 1500;
1883 	}
1884 	return max_mtu;
1885 }
1886 EXPORT_SYMBOL(otx2_get_max_mtu);
1887 
1888 int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features)
1889 {
1890 	netdev_features_t changed = features ^ netdev->features;
1891 	struct otx2_nic *pfvf = netdev_priv(netdev);
1892 	bool ntuple = !!(features & NETIF_F_NTUPLE);
1893 	bool tc = !!(features & NETIF_F_HW_TC);
1894 
1895 	if ((changed & NETIF_F_NTUPLE) && !ntuple)
1896 		otx2_destroy_ntuple_flows(pfvf);
1897 
1898 	if ((changed & NETIF_F_NTUPLE) && ntuple) {
1899 		if (!pfvf->flow_cfg->max_flows) {
1900 			netdev_err(netdev,
1901 				   "Can't enable NTUPLE, MCAM entries not allocated\n");
1902 			return -EINVAL;
1903 		}
1904 	}
1905 
1906 	if ((changed & NETIF_F_HW_TC) && tc) {
1907 		if (!pfvf->flow_cfg->max_flows) {
1908 			netdev_err(netdev,
1909 				   "Can't enable TC, MCAM entries not allocated\n");
1910 			return -EINVAL;
1911 		}
1912 	}
1913 
1914 	if ((changed & NETIF_F_HW_TC) && !tc &&
1915 	    pfvf->flow_cfg && pfvf->flow_cfg->nr_flows) {
1916 		netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n");
1917 		return -EBUSY;
1918 	}
1919 
1920 	if ((changed & NETIF_F_NTUPLE) && ntuple &&
1921 	    (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) {
1922 		netdev_err(netdev,
1923 			   "Can't enable NTUPLE when TC is active, disable TC and retry\n");
1924 		return -EINVAL;
1925 	}
1926 
1927 	if ((changed & NETIF_F_HW_TC) && tc &&
1928 	    (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) {
1929 		netdev_err(netdev,
1930 			   "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n");
1931 		return -EINVAL;
1932 	}
1933 
1934 	return 0;
1935 }
1936 EXPORT_SYMBOL(otx2_handle_ntuple_tc_features);
1937 
1938 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
1939 int __weak								\
1940 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf,		\
1941 				struct _req_type *req,			\
1942 				struct _rsp_type *rsp)			\
1943 {									\
1944 	/* Nothing to do here */					\
1945 	return 0;							\
1946 }									\
1947 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
1948 MBOX_UP_CGX_MESSAGES
1949 MBOX_UP_MCS_MESSAGES
1950 #undef M
1951