1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/etherdevice.h>
15 #include <linux/of.h>
16 #include <linux/if_vlan.h>
17 #include <linux/iommu.h>
18 #include <net/ip.h>
19 
20 #include "otx2_reg.h"
21 #include "otx2_common.h"
22 #include "otx2_txrx.h"
23 #include "otx2_struct.h"
24 #include "otx2_ptp.h"
25 #include "cn10k.h"
26 #include <rvu_trace.h>
27 
28 #define DRV_NAME	"rvu_nicpf"
29 #define DRV_STRING	"Marvell RVU NIC Physical Function Driver"
30 
31 /* Supported devices */
32 static const struct pci_device_id otx2_pf_id_table[] = {
33 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
34 	{ 0, }  /* end of table */
35 };
36 
37 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
38 MODULE_DESCRIPTION(DRV_STRING);
39 MODULE_LICENSE("GPL v2");
40 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
41 
42 enum {
43 	TYPE_PFAF,
44 	TYPE_PFVF,
45 };
46 
47 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
48 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
49 
50 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
51 {
52 	bool if_up = netif_running(netdev);
53 	int err = 0;
54 
55 	if (if_up)
56 		otx2_stop(netdev);
57 
58 	netdev_info(netdev, "Changing MTU from %d to %d\n",
59 		    netdev->mtu, new_mtu);
60 	netdev->mtu = new_mtu;
61 
62 	if (if_up)
63 		err = otx2_open(netdev);
64 
65 	return err;
66 }
67 
68 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
69 {
70 	int irq, vfs = pf->total_vfs;
71 
72 	/* Disable VFs ME interrupts */
73 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
74 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
75 	free_irq(irq, pf);
76 
77 	/* Disable VFs FLR interrupts */
78 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
79 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
80 	free_irq(irq, pf);
81 
82 	if (vfs <= 64)
83 		return;
84 
85 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
86 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
87 	free_irq(irq, pf);
88 
89 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
90 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
91 	free_irq(irq, pf);
92 }
93 
94 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
95 {
96 	if (!pf->flr_wq)
97 		return;
98 	destroy_workqueue(pf->flr_wq);
99 	pf->flr_wq = NULL;
100 	devm_kfree(pf->dev, pf->flr_wrk);
101 }
102 
103 static void otx2_flr_handler(struct work_struct *work)
104 {
105 	struct flr_work *flrwork = container_of(work, struct flr_work, work);
106 	struct otx2_nic *pf = flrwork->pf;
107 	struct mbox *mbox = &pf->mbox;
108 	struct msg_req *req;
109 	int vf, reg = 0;
110 
111 	vf = flrwork - pf->flr_wrk;
112 
113 	mutex_lock(&mbox->lock);
114 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
115 	if (!req) {
116 		mutex_unlock(&mbox->lock);
117 		return;
118 	}
119 	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
120 	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
121 
122 	if (!otx2_sync_mbox_msg(&pf->mbox)) {
123 		if (vf >= 64) {
124 			reg = 1;
125 			vf = vf - 64;
126 		}
127 		/* clear transcation pending bit */
128 		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
129 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
130 	}
131 
132 	mutex_unlock(&mbox->lock);
133 }
134 
135 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
136 {
137 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
138 	int reg, dev, vf, start_vf, num_reg = 1;
139 	u64 intr;
140 
141 	if (pf->total_vfs > 64)
142 		num_reg = 2;
143 
144 	for (reg = 0; reg < num_reg; reg++) {
145 		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
146 		if (!intr)
147 			continue;
148 		start_vf = 64 * reg;
149 		for (vf = 0; vf < 64; vf++) {
150 			if (!(intr & BIT_ULL(vf)))
151 				continue;
152 			dev = vf + start_vf;
153 			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
154 			/* Clear interrupt */
155 			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
156 			/* Disable the interrupt */
157 			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
158 				     BIT_ULL(vf));
159 		}
160 	}
161 	return IRQ_HANDLED;
162 }
163 
164 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
165 {
166 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
167 	int vf, reg, num_reg = 1;
168 	u64 intr;
169 
170 	if (pf->total_vfs > 64)
171 		num_reg = 2;
172 
173 	for (reg = 0; reg < num_reg; reg++) {
174 		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
175 		if (!intr)
176 			continue;
177 		for (vf = 0; vf < 64; vf++) {
178 			if (!(intr & BIT_ULL(vf)))
179 				continue;
180 			/* clear trpend bit */
181 			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
182 			/* clear interrupt */
183 			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
184 		}
185 	}
186 	return IRQ_HANDLED;
187 }
188 
189 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
190 {
191 	struct otx2_hw *hw = &pf->hw;
192 	char *irq_name;
193 	int ret;
194 
195 	/* Register ME interrupt handler*/
196 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
197 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
198 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
199 			  otx2_pf_me_intr_handler, 0, irq_name, pf);
200 	if (ret) {
201 		dev_err(pf->dev,
202 			"RVUPF: IRQ registration failed for ME0\n");
203 	}
204 
205 	/* Register FLR interrupt handler */
206 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
207 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
208 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
209 			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
210 	if (ret) {
211 		dev_err(pf->dev,
212 			"RVUPF: IRQ registration failed for FLR0\n");
213 		return ret;
214 	}
215 
216 	if (numvfs > 64) {
217 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
218 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
219 			 rvu_get_pf(pf->pcifunc));
220 		ret = request_irq(pci_irq_vector
221 				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
222 				  otx2_pf_me_intr_handler, 0, irq_name, pf);
223 		if (ret) {
224 			dev_err(pf->dev,
225 				"RVUPF: IRQ registration failed for ME1\n");
226 		}
227 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
228 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
229 			 rvu_get_pf(pf->pcifunc));
230 		ret = request_irq(pci_irq_vector
231 				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
232 				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
233 		if (ret) {
234 			dev_err(pf->dev,
235 				"RVUPF: IRQ registration failed for FLR1\n");
236 			return ret;
237 		}
238 	}
239 
240 	/* Enable ME interrupt for all VFs*/
241 	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
242 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
243 
244 	/* Enable FLR interrupt for all VFs*/
245 	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
246 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
247 
248 	if (numvfs > 64) {
249 		numvfs -= 64;
250 
251 		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
252 		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
253 			     INTR_MASK(numvfs));
254 
255 		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
256 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
257 			     INTR_MASK(numvfs));
258 	}
259 	return 0;
260 }
261 
262 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
263 {
264 	int vf;
265 
266 	pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
267 				     WQ_UNBOUND | WQ_HIGHPRI, 1);
268 	if (!pf->flr_wq)
269 		return -ENOMEM;
270 
271 	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
272 				   sizeof(struct flr_work), GFP_KERNEL);
273 	if (!pf->flr_wrk) {
274 		destroy_workqueue(pf->flr_wq);
275 		return -ENOMEM;
276 	}
277 
278 	for (vf = 0; vf < num_vfs; vf++) {
279 		pf->flr_wrk[vf].pf = pf;
280 		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
281 	}
282 
283 	return 0;
284 }
285 
286 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
287 			    int first, int mdevs, u64 intr, int type)
288 {
289 	struct otx2_mbox_dev *mdev;
290 	struct otx2_mbox *mbox;
291 	struct mbox_hdr *hdr;
292 	int i;
293 
294 	for (i = first; i < mdevs; i++) {
295 		/* start from 0 */
296 		if (!(intr & BIT_ULL(i - first)))
297 			continue;
298 
299 		mbox = &mw->mbox;
300 		mdev = &mbox->dev[i];
301 		if (type == TYPE_PFAF)
302 			otx2_sync_mbox_bbuf(mbox, i);
303 		hdr = mdev->mbase + mbox->rx_start;
304 		/* The hdr->num_msgs is set to zero immediately in the interrupt
305 		 * handler to  ensure that it holds a correct value next time
306 		 * when the interrupt handler is called.
307 		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
308 		 * pf>mbox.up_num_msgs holds the data for use in
309 		 * pfaf_mbox_up_handler.
310 		 */
311 		if (hdr->num_msgs) {
312 			mw[i].num_msgs = hdr->num_msgs;
313 			hdr->num_msgs = 0;
314 			if (type == TYPE_PFAF)
315 				memset(mbox->hwbase + mbox->rx_start, 0,
316 				       ALIGN(sizeof(struct mbox_hdr),
317 					     sizeof(u64)));
318 
319 			queue_work(mbox_wq, &mw[i].mbox_wrk);
320 		}
321 
322 		mbox = &mw->mbox_up;
323 		mdev = &mbox->dev[i];
324 		if (type == TYPE_PFAF)
325 			otx2_sync_mbox_bbuf(mbox, i);
326 		hdr = mdev->mbase + mbox->rx_start;
327 		if (hdr->num_msgs) {
328 			mw[i].up_num_msgs = hdr->num_msgs;
329 			hdr->num_msgs = 0;
330 			if (type == TYPE_PFAF)
331 				memset(mbox->hwbase + mbox->rx_start, 0,
332 				       ALIGN(sizeof(struct mbox_hdr),
333 					     sizeof(u64)));
334 
335 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
336 		}
337 	}
338 }
339 
340 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
341 				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
342 				  int devid)
343 {
344 	struct otx2_mbox_dev *src_mdev = mdev;
345 	int offset;
346 
347 	/* Msgs are already copied, trigger VF's mbox irq */
348 	smp_wmb();
349 
350 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
351 	writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
352 
353 	/* Restore VF's mbox bounce buffer region address */
354 	src_mdev->mbase = bbuf_base;
355 }
356 
357 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
358 				     struct otx2_mbox *src_mbox,
359 				     int dir, int vf, int num_msgs)
360 {
361 	struct otx2_mbox_dev *src_mdev, *dst_mdev;
362 	struct mbox_hdr *mbox_hdr;
363 	struct mbox_hdr *req_hdr;
364 	struct mbox *dst_mbox;
365 	int dst_size, err;
366 
367 	if (dir == MBOX_DIR_PFAF) {
368 		/* Set VF's mailbox memory as PF's bounce buffer memory, so
369 		 * that explicit copying of VF's msgs to PF=>AF mbox region
370 		 * and AF=>PF responses to VF's mbox region can be avoided.
371 		 */
372 		src_mdev = &src_mbox->dev[vf];
373 		mbox_hdr = src_mbox->hwbase +
374 				src_mbox->rx_start + (vf * MBOX_SIZE);
375 
376 		dst_mbox = &pf->mbox;
377 		dst_size = dst_mbox->mbox.tx_size -
378 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
379 		/* Check if msgs fit into destination area and has valid size */
380 		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
381 			return -EINVAL;
382 
383 		dst_mdev = &dst_mbox->mbox.dev[0];
384 
385 		mutex_lock(&pf->mbox.lock);
386 		dst_mdev->mbase = src_mdev->mbase;
387 		dst_mdev->msg_size = mbox_hdr->msg_size;
388 		dst_mdev->num_msgs = num_msgs;
389 		err = otx2_sync_mbox_msg(dst_mbox);
390 		if (err) {
391 			dev_warn(pf->dev,
392 				 "AF not responding to VF%d messages\n", vf);
393 			/* restore PF mbase and exit */
394 			dst_mdev->mbase = pf->mbox.bbuf_base;
395 			mutex_unlock(&pf->mbox.lock);
396 			return err;
397 		}
398 		/* At this point, all the VF messages sent to AF are acked
399 		 * with proper responses and responses are copied to VF
400 		 * mailbox hence raise interrupt to VF.
401 		 */
402 		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
403 					      dst_mbox->mbox.rx_start);
404 		req_hdr->num_msgs = num_msgs;
405 
406 		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
407 				      pf->mbox.bbuf_base, vf);
408 		mutex_unlock(&pf->mbox.lock);
409 	} else if (dir == MBOX_DIR_PFVF_UP) {
410 		src_mdev = &src_mbox->dev[0];
411 		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
412 		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
413 					      src_mbox->rx_start);
414 		req_hdr->num_msgs = num_msgs;
415 
416 		dst_mbox = &pf->mbox_pfvf[0];
417 		dst_size = dst_mbox->mbox_up.tx_size -
418 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
419 		/* Check if msgs fit into destination area */
420 		if (mbox_hdr->msg_size > dst_size)
421 			return -EINVAL;
422 
423 		dst_mdev = &dst_mbox->mbox_up.dev[vf];
424 		dst_mdev->mbase = src_mdev->mbase;
425 		dst_mdev->msg_size = mbox_hdr->msg_size;
426 		dst_mdev->num_msgs = mbox_hdr->num_msgs;
427 		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
428 		if (err) {
429 			dev_warn(pf->dev,
430 				 "VF%d is not responding to mailbox\n", vf);
431 			return err;
432 		}
433 	} else if (dir == MBOX_DIR_VFPF_UP) {
434 		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
435 					      src_mbox->rx_start);
436 		req_hdr->num_msgs = num_msgs;
437 		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
438 				      &pf->mbox.mbox_up,
439 				      pf->mbox_pfvf[vf].bbuf_base,
440 				      0);
441 	}
442 
443 	return 0;
444 }
445 
446 static void otx2_pfvf_mbox_handler(struct work_struct *work)
447 {
448 	struct mbox_msghdr *msg = NULL;
449 	int offset, vf_idx, id, err;
450 	struct otx2_mbox_dev *mdev;
451 	struct mbox_hdr *req_hdr;
452 	struct otx2_mbox *mbox;
453 	struct mbox *vf_mbox;
454 	struct otx2_nic *pf;
455 
456 	vf_mbox = container_of(work, struct mbox, mbox_wrk);
457 	pf = vf_mbox->pfvf;
458 	vf_idx = vf_mbox - pf->mbox_pfvf;
459 
460 	mbox = &pf->mbox_pfvf[0].mbox;
461 	mdev = &mbox->dev[vf_idx];
462 	req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
463 
464 	offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
465 
466 	for (id = 0; id < vf_mbox->num_msgs; id++) {
467 		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
468 					     offset);
469 
470 		if (msg->sig != OTX2_MBOX_REQ_SIG)
471 			goto inval_msg;
472 
473 		/* Set VF's number in each of the msg */
474 		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
475 		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
476 		offset = msg->next_msgoff;
477 	}
478 	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
479 					vf_mbox->num_msgs);
480 	if (err)
481 		goto inval_msg;
482 	return;
483 
484 inval_msg:
485 	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
486 	otx2_mbox_msg_send(mbox, vf_idx);
487 }
488 
489 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
490 {
491 	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
492 	struct otx2_nic *pf = vf_mbox->pfvf;
493 	struct otx2_mbox_dev *mdev;
494 	int offset, id, vf_idx = 0;
495 	struct mbox_hdr *rsp_hdr;
496 	struct mbox_msghdr *msg;
497 	struct otx2_mbox *mbox;
498 
499 	vf_idx = vf_mbox - pf->mbox_pfvf;
500 	mbox = &pf->mbox_pfvf[0].mbox_up;
501 	mdev = &mbox->dev[vf_idx];
502 
503 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
504 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
505 
506 	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
507 		msg = mdev->mbase + offset;
508 
509 		if (msg->id >= MBOX_MSG_MAX) {
510 			dev_err(pf->dev,
511 				"Mbox msg with unknown ID 0x%x\n", msg->id);
512 			goto end;
513 		}
514 
515 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
516 			dev_err(pf->dev,
517 				"Mbox msg with wrong signature %x, ID 0x%x\n",
518 				msg->sig, msg->id);
519 			goto end;
520 		}
521 
522 		switch (msg->id) {
523 		case MBOX_MSG_CGX_LINK_EVENT:
524 			break;
525 		default:
526 			if (msg->rc)
527 				dev_err(pf->dev,
528 					"Mbox msg response has err %d, ID 0x%x\n",
529 					msg->rc, msg->id);
530 			break;
531 		}
532 
533 end:
534 		offset = mbox->rx_start + msg->next_msgoff;
535 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
536 			__otx2_mbox_reset(mbox, 0);
537 		mdev->msgs_acked++;
538 	}
539 }
540 
541 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
542 {
543 	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
544 	int vfs = pf->total_vfs;
545 	struct mbox *mbox;
546 	u64 intr;
547 
548 	mbox = pf->mbox_pfvf;
549 	/* Handle VF interrupts */
550 	if (vfs > 64) {
551 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
552 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
553 		otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
554 				TYPE_PFVF);
555 		vfs -= 64;
556 	}
557 
558 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
559 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
560 
561 	otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
562 
563 	trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
564 
565 	return IRQ_HANDLED;
566 }
567 
568 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
569 {
570 	void __iomem *hwbase;
571 	struct mbox *mbox;
572 	int err, vf;
573 	u64 base;
574 
575 	if (!numvfs)
576 		return -EINVAL;
577 
578 	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
579 				     sizeof(struct mbox), GFP_KERNEL);
580 	if (!pf->mbox_pfvf)
581 		return -ENOMEM;
582 
583 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
584 					   WQ_UNBOUND | WQ_HIGHPRI |
585 					   WQ_MEM_RECLAIM, 1);
586 	if (!pf->mbox_pfvf_wq)
587 		return -ENOMEM;
588 
589 	/* On CN10K platform, PF <-> VF mailbox region follows after
590 	 * PF <-> AF mailbox region.
591 	 */
592 	if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
593 		base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
594 		       MBOX_SIZE;
595 	else
596 		base = readq((void __iomem *)((u64)pf->reg_base +
597 					      RVU_PF_VF_BAR4_ADDR));
598 
599 	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
600 	if (!hwbase) {
601 		err = -ENOMEM;
602 		goto free_wq;
603 	}
604 
605 	mbox = &pf->mbox_pfvf[0];
606 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
607 			     MBOX_DIR_PFVF, numvfs);
608 	if (err)
609 		goto free_iomem;
610 
611 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
612 			     MBOX_DIR_PFVF_UP, numvfs);
613 	if (err)
614 		goto free_iomem;
615 
616 	for (vf = 0; vf < numvfs; vf++) {
617 		mbox->pfvf = pf;
618 		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
619 		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
620 		mbox++;
621 	}
622 
623 	return 0;
624 
625 free_iomem:
626 	if (hwbase)
627 		iounmap(hwbase);
628 free_wq:
629 	destroy_workqueue(pf->mbox_pfvf_wq);
630 	return err;
631 }
632 
633 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
634 {
635 	struct mbox *mbox = &pf->mbox_pfvf[0];
636 
637 	if (!mbox)
638 		return;
639 
640 	if (pf->mbox_pfvf_wq) {
641 		destroy_workqueue(pf->mbox_pfvf_wq);
642 		pf->mbox_pfvf_wq = NULL;
643 	}
644 
645 	if (mbox->mbox.hwbase)
646 		iounmap(mbox->mbox.hwbase);
647 
648 	otx2_mbox_destroy(&mbox->mbox);
649 }
650 
651 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
652 {
653 	/* Clear PF <=> VF mailbox IRQ */
654 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
655 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
656 
657 	/* Enable PF <=> VF mailbox IRQ */
658 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
659 	if (numvfs > 64) {
660 		numvfs -= 64;
661 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
662 			     INTR_MASK(numvfs));
663 	}
664 }
665 
666 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
667 {
668 	int vector;
669 
670 	/* Disable PF <=> VF mailbox IRQ */
671 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
672 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
673 
674 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
675 	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
676 	free_irq(vector, pf);
677 
678 	if (numvfs > 64) {
679 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
680 		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
681 		free_irq(vector, pf);
682 	}
683 }
684 
685 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
686 {
687 	struct otx2_hw *hw = &pf->hw;
688 	char *irq_name;
689 	int err;
690 
691 	/* Register MBOX0 interrupt handler */
692 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
693 	if (pf->pcifunc)
694 		snprintf(irq_name, NAME_SIZE,
695 			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
696 	else
697 		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
698 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
699 			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
700 	if (err) {
701 		dev_err(pf->dev,
702 			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
703 		return err;
704 	}
705 
706 	if (numvfs > 64) {
707 		/* Register MBOX1 interrupt handler */
708 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
709 		if (pf->pcifunc)
710 			snprintf(irq_name, NAME_SIZE,
711 				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
712 		else
713 			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
714 		err = request_irq(pci_irq_vector(pf->pdev,
715 						 RVU_PF_INT_VEC_VFPF_MBOX1),
716 						 otx2_pfvf_mbox_intr_handler,
717 						 0, irq_name, pf);
718 		if (err) {
719 			dev_err(pf->dev,
720 				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
721 			return err;
722 		}
723 	}
724 
725 	otx2_enable_pfvf_mbox_intr(pf, numvfs);
726 
727 	return 0;
728 }
729 
730 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
731 				       struct mbox_msghdr *msg)
732 {
733 	int devid;
734 
735 	if (msg->id >= MBOX_MSG_MAX) {
736 		dev_err(pf->dev,
737 			"Mbox msg with unknown ID 0x%x\n", msg->id);
738 		return;
739 	}
740 
741 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
742 		dev_err(pf->dev,
743 			"Mbox msg with wrong signature %x, ID 0x%x\n",
744 			 msg->sig, msg->id);
745 		return;
746 	}
747 
748 	/* message response heading VF */
749 	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
750 	if (devid) {
751 		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
752 		struct delayed_work *dwork;
753 
754 		switch (msg->id) {
755 		case MBOX_MSG_NIX_LF_START_RX:
756 			config->intf_down = false;
757 			dwork = &config->link_event_work;
758 			schedule_delayed_work(dwork, msecs_to_jiffies(100));
759 			break;
760 		case MBOX_MSG_NIX_LF_STOP_RX:
761 			config->intf_down = true;
762 			break;
763 		}
764 
765 		return;
766 	}
767 
768 	switch (msg->id) {
769 	case MBOX_MSG_READY:
770 		pf->pcifunc = msg->pcifunc;
771 		break;
772 	case MBOX_MSG_MSIX_OFFSET:
773 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
774 		break;
775 	case MBOX_MSG_NPA_LF_ALLOC:
776 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
777 		break;
778 	case MBOX_MSG_NIX_LF_ALLOC:
779 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
780 		break;
781 	case MBOX_MSG_NIX_TXSCH_ALLOC:
782 		mbox_handler_nix_txsch_alloc(pf,
783 					     (struct nix_txsch_alloc_rsp *)msg);
784 		break;
785 	case MBOX_MSG_NIX_BP_ENABLE:
786 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
787 		break;
788 	case MBOX_MSG_CGX_STATS:
789 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
790 		break;
791 	case MBOX_MSG_CGX_FEC_STATS:
792 		mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
793 		break;
794 	default:
795 		if (msg->rc)
796 			dev_err(pf->dev,
797 				"Mbox msg response has err %d, ID 0x%x\n",
798 				msg->rc, msg->id);
799 		break;
800 	}
801 }
802 
803 static void otx2_pfaf_mbox_handler(struct work_struct *work)
804 {
805 	struct otx2_mbox_dev *mdev;
806 	struct mbox_hdr *rsp_hdr;
807 	struct mbox_msghdr *msg;
808 	struct otx2_mbox *mbox;
809 	struct mbox *af_mbox;
810 	struct otx2_nic *pf;
811 	int offset, id;
812 
813 	af_mbox = container_of(work, struct mbox, mbox_wrk);
814 	mbox = &af_mbox->mbox;
815 	mdev = &mbox->dev[0];
816 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
817 
818 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
819 	pf = af_mbox->pfvf;
820 
821 	for (id = 0; id < af_mbox->num_msgs; id++) {
822 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
823 		otx2_process_pfaf_mbox_msg(pf, msg);
824 		offset = mbox->rx_start + msg->next_msgoff;
825 		if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
826 			__otx2_mbox_reset(mbox, 0);
827 		mdev->msgs_acked++;
828 	}
829 
830 }
831 
832 static void otx2_handle_link_event(struct otx2_nic *pf)
833 {
834 	struct cgx_link_user_info *linfo = &pf->linfo;
835 	struct net_device *netdev = pf->netdev;
836 
837 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
838 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
839 		linfo->full_duplex ? "Full" : "Half");
840 	if (linfo->link_up) {
841 		netif_carrier_on(netdev);
842 		netif_tx_start_all_queues(netdev);
843 	} else {
844 		netif_tx_stop_all_queues(netdev);
845 		netif_carrier_off(netdev);
846 	}
847 }
848 
849 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
850 					struct cgx_link_info_msg *msg,
851 					struct msg_rsp *rsp)
852 {
853 	int i;
854 
855 	/* Copy the link info sent by AF */
856 	pf->linfo = msg->link_info;
857 
858 	/* notify VFs about link event */
859 	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
860 		struct otx2_vf_config *config = &pf->vf_configs[i];
861 		struct delayed_work *dwork = &config->link_event_work;
862 
863 		if (config->intf_down)
864 			continue;
865 
866 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
867 	}
868 
869 	/* interface has not been fully configured yet */
870 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
871 		return 0;
872 
873 	otx2_handle_link_event(pf);
874 	return 0;
875 }
876 
877 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
878 				    struct mbox_msghdr *req)
879 {
880 	/* Check if valid, if not reply with a invalid msg */
881 	if (req->sig != OTX2_MBOX_REQ_SIG) {
882 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
883 		return -ENODEV;
884 	}
885 
886 	switch (req->id) {
887 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
888 	case _id: {							\
889 		struct _rsp_type *rsp;					\
890 		int err;						\
891 									\
892 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
893 			&pf->mbox.mbox_up, 0,				\
894 			sizeof(struct _rsp_type));			\
895 		if (!rsp)						\
896 			return -ENOMEM;					\
897 									\
898 		rsp->hdr.id = _id;					\
899 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
900 		rsp->hdr.pcifunc = 0;					\
901 		rsp->hdr.rc = 0;					\
902 									\
903 		err = otx2_mbox_up_handler_ ## _fn_name(		\
904 			pf, (struct _req_type *)req, rsp);		\
905 		return err;						\
906 	}
907 MBOX_UP_CGX_MESSAGES
908 #undef M
909 		break;
910 	default:
911 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
912 		return -ENODEV;
913 	}
914 	return 0;
915 }
916 
917 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
918 {
919 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
920 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
921 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
922 	struct otx2_nic *pf = af_mbox->pfvf;
923 	int offset, id, devid = 0;
924 	struct mbox_hdr *rsp_hdr;
925 	struct mbox_msghdr *msg;
926 
927 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
928 
929 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
930 
931 	for (id = 0; id < af_mbox->up_num_msgs; id++) {
932 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
933 
934 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
935 		/* Skip processing VF's messages */
936 		if (!devid)
937 			otx2_process_mbox_msg_up(pf, msg);
938 		offset = mbox->rx_start + msg->next_msgoff;
939 	}
940 	if (devid) {
941 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
942 					  MBOX_DIR_PFVF_UP, devid - 1,
943 					  af_mbox->up_num_msgs);
944 		return;
945 	}
946 
947 	otx2_mbox_msg_send(mbox, 0);
948 }
949 
950 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
951 {
952 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
953 	struct mbox *mbox;
954 
955 	/* Clear the IRQ */
956 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
957 
958 	mbox = &pf->mbox;
959 
960 	trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
961 
962 	otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
963 
964 	return IRQ_HANDLED;
965 }
966 
967 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
968 {
969 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
970 
971 	/* Disable AF => PF mailbox IRQ */
972 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
973 	free_irq(vector, pf);
974 }
975 
976 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
977 {
978 	struct otx2_hw *hw = &pf->hw;
979 	struct msg_req *req;
980 	char *irq_name;
981 	int err;
982 
983 	/* Register mailbox interrupt handler */
984 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
985 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
986 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
987 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
988 	if (err) {
989 		dev_err(pf->dev,
990 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
991 		return err;
992 	}
993 
994 	/* Enable mailbox interrupt for msgs coming from AF.
995 	 * First clear to avoid spurious interrupts, if any.
996 	 */
997 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
998 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
999 
1000 	if (!probe_af)
1001 		return 0;
1002 
1003 	/* Check mailbox communication with AF */
1004 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1005 	if (!req) {
1006 		otx2_disable_mbox_intr(pf);
1007 		return -ENOMEM;
1008 	}
1009 	err = otx2_sync_mbox_msg(&pf->mbox);
1010 	if (err) {
1011 		dev_warn(pf->dev,
1012 			 "AF not responding to mailbox, deferring probe\n");
1013 		otx2_disable_mbox_intr(pf);
1014 		return -EPROBE_DEFER;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1021 {
1022 	struct mbox *mbox = &pf->mbox;
1023 
1024 	if (pf->mbox_wq) {
1025 		destroy_workqueue(pf->mbox_wq);
1026 		pf->mbox_wq = NULL;
1027 	}
1028 
1029 	if (mbox->mbox.hwbase)
1030 		iounmap((void __iomem *)mbox->mbox.hwbase);
1031 
1032 	otx2_mbox_destroy(&mbox->mbox);
1033 	otx2_mbox_destroy(&mbox->mbox_up);
1034 }
1035 
1036 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1037 {
1038 	struct mbox *mbox = &pf->mbox;
1039 	void __iomem *hwbase;
1040 	int err;
1041 
1042 	mbox->pfvf = pf;
1043 	pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1044 				      WQ_UNBOUND | WQ_HIGHPRI |
1045 				      WQ_MEM_RECLAIM, 1);
1046 	if (!pf->mbox_wq)
1047 		return -ENOMEM;
1048 
1049 	/* Mailbox is a reserved memory (in RAM) region shared between
1050 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
1051 	 * device memory to allow unaligned accesses.
1052 	 */
1053 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1054 			    MBOX_SIZE);
1055 	if (!hwbase) {
1056 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1057 		err = -ENOMEM;
1058 		goto exit;
1059 	}
1060 
1061 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1062 			     MBOX_DIR_PFAF, 1);
1063 	if (err)
1064 		goto exit;
1065 
1066 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1067 			     MBOX_DIR_PFAF_UP, 1);
1068 	if (err)
1069 		goto exit;
1070 
1071 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1072 	if (err)
1073 		goto exit;
1074 
1075 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1076 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1077 	mutex_init(&mbox->lock);
1078 
1079 	return 0;
1080 exit:
1081 	otx2_pfaf_mbox_destroy(pf);
1082 	return err;
1083 }
1084 
1085 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1086 {
1087 	struct msg_req *msg;
1088 	int err;
1089 
1090 	mutex_lock(&pf->mbox.lock);
1091 	if (enable)
1092 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1093 	else
1094 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1095 
1096 	if (!msg) {
1097 		mutex_unlock(&pf->mbox.lock);
1098 		return -ENOMEM;
1099 	}
1100 
1101 	err = otx2_sync_mbox_msg(&pf->mbox);
1102 	mutex_unlock(&pf->mbox.lock);
1103 	return err;
1104 }
1105 
1106 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1107 {
1108 	struct msg_req *msg;
1109 	int err;
1110 
1111 	mutex_lock(&pf->mbox.lock);
1112 	if (enable)
1113 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1114 	else
1115 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1116 
1117 	if (!msg) {
1118 		mutex_unlock(&pf->mbox.lock);
1119 		return -ENOMEM;
1120 	}
1121 
1122 	err = otx2_sync_mbox_msg(&pf->mbox);
1123 	mutex_unlock(&pf->mbox.lock);
1124 	return err;
1125 }
1126 
1127 int otx2_set_real_num_queues(struct net_device *netdev,
1128 			     int tx_queues, int rx_queues)
1129 {
1130 	int err;
1131 
1132 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
1133 	if (err) {
1134 		netdev_err(netdev,
1135 			   "Failed to set no of Tx queues: %d\n", tx_queues);
1136 		return err;
1137 	}
1138 
1139 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
1140 	if (err)
1141 		netdev_err(netdev,
1142 			   "Failed to set no of Rx queues: %d\n", rx_queues);
1143 	return err;
1144 }
1145 EXPORT_SYMBOL(otx2_set_real_num_queues);
1146 
1147 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1148 {
1149 	struct otx2_nic *pf = data;
1150 	u64 val, *ptr;
1151 	u64 qidx = 0;
1152 
1153 	/* CQ */
1154 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1155 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1156 		val = otx2_atomic64_add((qidx << 44), ptr);
1157 
1158 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1159 			     (val & NIX_CQERRINT_BITS));
1160 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1161 			continue;
1162 
1163 		if (val & BIT_ULL(42)) {
1164 			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1165 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1166 		} else {
1167 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1168 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1169 					   qidx);
1170 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1171 				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1172 					   qidx);
1173 		}
1174 
1175 		schedule_work(&pf->reset_task);
1176 	}
1177 
1178 	/* SQ */
1179 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1180 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1181 		val = otx2_atomic64_add((qidx << 44), ptr);
1182 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1183 			     (val & NIX_SQINT_BITS));
1184 
1185 		if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1186 			continue;
1187 
1188 		if (val & BIT_ULL(42)) {
1189 			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1190 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1191 		} else {
1192 			if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1193 				netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1194 					   qidx,
1195 					   otx2_read64(pf,
1196 						       NIX_LF_SQ_OP_ERR_DBG));
1197 				otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1198 					     BIT_ULL(44));
1199 			}
1200 			if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1201 				netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1202 					   qidx,
1203 					   otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1204 				otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1205 					     BIT_ULL(44));
1206 			}
1207 			if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1208 				netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1209 					   qidx,
1210 					   otx2_read64(pf,
1211 						       NIX_LF_SEND_ERR_DBG));
1212 				otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1213 					     BIT_ULL(44));
1214 			}
1215 			if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1216 				netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1217 					   qidx);
1218 		}
1219 
1220 		schedule_work(&pf->reset_task);
1221 	}
1222 
1223 	return IRQ_HANDLED;
1224 }
1225 
1226 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1227 {
1228 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1229 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1230 	int qidx = cq_poll->cint_idx;
1231 
1232 	/* Disable interrupts.
1233 	 *
1234 	 * Completion interrupts behave in a level-triggered interrupt
1235 	 * fashion, and hence have to be cleared only after it is serviced.
1236 	 */
1237 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1238 
1239 	/* Schedule NAPI */
1240 	napi_schedule_irqoff(&cq_poll->napi);
1241 
1242 	return IRQ_HANDLED;
1243 }
1244 
1245 static void otx2_disable_napi(struct otx2_nic *pf)
1246 {
1247 	struct otx2_qset *qset = &pf->qset;
1248 	struct otx2_cq_poll *cq_poll;
1249 	int qidx;
1250 
1251 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1252 		cq_poll = &qset->napi[qidx];
1253 		napi_disable(&cq_poll->napi);
1254 		netif_napi_del(&cq_poll->napi);
1255 	}
1256 }
1257 
1258 static void otx2_free_cq_res(struct otx2_nic *pf)
1259 {
1260 	struct otx2_qset *qset = &pf->qset;
1261 	struct otx2_cq_queue *cq;
1262 	int qidx;
1263 
1264 	/* Disable CQs */
1265 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1266 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1267 		cq = &qset->cq[qidx];
1268 		qmem_free(pf->dev, cq->cqe);
1269 	}
1270 }
1271 
1272 static void otx2_free_sq_res(struct otx2_nic *pf)
1273 {
1274 	struct otx2_qset *qset = &pf->qset;
1275 	struct otx2_snd_queue *sq;
1276 	int qidx;
1277 
1278 	/* Disable SQs */
1279 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1280 	/* Free SQB pointers */
1281 	otx2_sq_free_sqbs(pf);
1282 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1283 		sq = &qset->sq[qidx];
1284 		qmem_free(pf->dev, sq->sqe);
1285 		qmem_free(pf->dev, sq->tso_hdrs);
1286 		kfree(sq->sg);
1287 		kfree(sq->sqb_ptrs);
1288 	}
1289 }
1290 
1291 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1292 {
1293 	int frame_size;
1294 	int total_size;
1295 	int rbuf_size;
1296 
1297 	/* The data transferred by NIX to memory consists of actual packet
1298 	 * plus additional data which has timestamp and/or EDSA/HIGIG2
1299 	 * headers if interface is configured in corresponding modes.
1300 	 * NIX transfers entire data using 6 segments/buffers and writes
1301 	 * a CQE_RX descriptor with those segment addresses. First segment
1302 	 * has additional data prepended to packet. Also software omits a
1303 	 * headroom of 128 bytes and sizeof(struct skb_shared_info) in
1304 	 * each segment. Hence the total size of memory needed
1305 	 * to receive a packet with 'mtu' is:
1306 	 * frame size =  mtu + additional data;
1307 	 * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
1308 	 * each receive buffer size = memory / 6;
1309 	 */
1310 	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1311 	total_size = frame_size + (OTX2_HEAD_ROOM +
1312 		     OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
1313 	rbuf_size = total_size / 6;
1314 
1315 	return ALIGN(rbuf_size, 2048);
1316 }
1317 
1318 static int otx2_init_hw_resources(struct otx2_nic *pf)
1319 {
1320 	struct nix_lf_free_req *free_req;
1321 	struct mbox *mbox = &pf->mbox;
1322 	struct otx2_hw *hw = &pf->hw;
1323 	struct msg_req *req;
1324 	int err = 0, lvl;
1325 
1326 	/* Set required NPA LF's pool counts
1327 	 * Auras and Pools are used in a 1:1 mapping,
1328 	 * so, aura count = pool count.
1329 	 */
1330 	hw->rqpool_cnt = hw->rx_queues;
1331 	hw->sqpool_cnt = hw->tx_queues;
1332 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1333 
1334 	pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1335 
1336 	pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1337 
1338 	mutex_lock(&mbox->lock);
1339 	/* NPA init */
1340 	err = otx2_config_npa(pf);
1341 	if (err)
1342 		goto exit;
1343 
1344 	/* NIX init */
1345 	err = otx2_config_nix(pf);
1346 	if (err)
1347 		goto err_free_npa_lf;
1348 
1349 	/* Enable backpressure */
1350 	otx2_nix_config_bp(pf, true);
1351 
1352 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1353 	err = otx2_rq_aura_pool_init(pf);
1354 	if (err) {
1355 		mutex_unlock(&mbox->lock);
1356 		goto err_free_nix_lf;
1357 	}
1358 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
1359 	err = otx2_sq_aura_pool_init(pf);
1360 	if (err) {
1361 		mutex_unlock(&mbox->lock);
1362 		goto err_free_rq_ptrs;
1363 	}
1364 
1365 	err = otx2_txsch_alloc(pf);
1366 	if (err) {
1367 		mutex_unlock(&mbox->lock);
1368 		goto err_free_sq_ptrs;
1369 	}
1370 
1371 	err = otx2_config_nix_queues(pf);
1372 	if (err) {
1373 		mutex_unlock(&mbox->lock);
1374 		goto err_free_txsch;
1375 	}
1376 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1377 		err = otx2_txschq_config(pf, lvl);
1378 		if (err) {
1379 			mutex_unlock(&mbox->lock);
1380 			goto err_free_nix_queues;
1381 		}
1382 	}
1383 	mutex_unlock(&mbox->lock);
1384 	return err;
1385 
1386 err_free_nix_queues:
1387 	otx2_free_sq_res(pf);
1388 	otx2_free_cq_res(pf);
1389 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1390 err_free_txsch:
1391 	if (otx2_txschq_stop(pf))
1392 		dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1393 err_free_sq_ptrs:
1394 	otx2_sq_free_sqbs(pf);
1395 err_free_rq_ptrs:
1396 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1397 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1398 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1399 	otx2_aura_pool_free(pf);
1400 err_free_nix_lf:
1401 	mutex_lock(&mbox->lock);
1402 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1403 	if (free_req) {
1404 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1405 		if (otx2_sync_mbox_msg(mbox))
1406 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1407 	}
1408 err_free_npa_lf:
1409 	/* Reset NPA LF */
1410 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1411 	if (req) {
1412 		if (otx2_sync_mbox_msg(mbox))
1413 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1414 	}
1415 exit:
1416 	mutex_unlock(&mbox->lock);
1417 	return err;
1418 }
1419 
1420 static void otx2_free_hw_resources(struct otx2_nic *pf)
1421 {
1422 	struct otx2_qset *qset = &pf->qset;
1423 	struct nix_lf_free_req *free_req;
1424 	struct mbox *mbox = &pf->mbox;
1425 	struct otx2_cq_queue *cq;
1426 	struct msg_req *req;
1427 	int qidx, err;
1428 
1429 	/* Ensure all SQE are processed */
1430 	otx2_sqb_flush(pf);
1431 
1432 	/* Stop transmission */
1433 	err = otx2_txschq_stop(pf);
1434 	if (err)
1435 		dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1436 
1437 	mutex_lock(&mbox->lock);
1438 	/* Disable backpressure */
1439 	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1440 		otx2_nix_config_bp(pf, false);
1441 	mutex_unlock(&mbox->lock);
1442 
1443 	/* Disable RQs */
1444 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1445 
1446 	/*Dequeue all CQEs */
1447 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1448 		cq = &qset->cq[qidx];
1449 		if (cq->cq_type == CQ_RX)
1450 			otx2_cleanup_rx_cqes(pf, cq);
1451 		else
1452 			otx2_cleanup_tx_cqes(pf, cq);
1453 	}
1454 
1455 	otx2_free_sq_res(pf);
1456 
1457 	/* Free RQ buffer pointers*/
1458 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1459 
1460 	otx2_free_cq_res(pf);
1461 
1462 	mutex_lock(&mbox->lock);
1463 	/* Reset NIX LF */
1464 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1465 	if (free_req) {
1466 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1467 		if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1468 			free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1469 		if (otx2_sync_mbox_msg(mbox))
1470 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1471 	}
1472 	mutex_unlock(&mbox->lock);
1473 
1474 	/* Disable NPA Pool and Aura hw context */
1475 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1476 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1477 	otx2_aura_pool_free(pf);
1478 
1479 	mutex_lock(&mbox->lock);
1480 	/* Reset NPA LF */
1481 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1482 	if (req) {
1483 		if (otx2_sync_mbox_msg(mbox))
1484 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1485 	}
1486 	mutex_unlock(&mbox->lock);
1487 }
1488 
1489 int otx2_open(struct net_device *netdev)
1490 {
1491 	struct otx2_nic *pf = netdev_priv(netdev);
1492 	struct otx2_cq_poll *cq_poll = NULL;
1493 	struct otx2_qset *qset = &pf->qset;
1494 	int err = 0, qidx, vec;
1495 	char *irq_name;
1496 
1497 	netif_carrier_off(netdev);
1498 
1499 	pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1500 	/* RQ and SQs are mapped to different CQs,
1501 	 * so find out max CQ IRQs (i.e CINTs) needed.
1502 	 */
1503 	pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1504 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1505 	if (!qset->napi)
1506 		return -ENOMEM;
1507 
1508 	/* CQ size of RQ */
1509 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1510 	/* CQ size of SQ */
1511 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1512 
1513 	err = -ENOMEM;
1514 	qset->cq = kcalloc(pf->qset.cq_cnt,
1515 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
1516 	if (!qset->cq)
1517 		goto err_free_mem;
1518 
1519 	qset->sq = kcalloc(pf->hw.tx_queues,
1520 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
1521 	if (!qset->sq)
1522 		goto err_free_mem;
1523 
1524 	qset->rq = kcalloc(pf->hw.rx_queues,
1525 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1526 	if (!qset->rq)
1527 		goto err_free_mem;
1528 
1529 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
1530 		/* Reserve LMT lines for NPA AURA batch free */
1531 		pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
1532 		/* Reserve LMT lines for NIX TX */
1533 		pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
1534 				      (NIX_LMTID_BASE * LMT_LINE_SIZE));
1535 	}
1536 
1537 	err = otx2_init_hw_resources(pf);
1538 	if (err)
1539 		goto err_free_mem;
1540 
1541 	/* Register NAPI handler */
1542 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1543 		cq_poll = &qset->napi[qidx];
1544 		cq_poll->cint_idx = qidx;
1545 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
1546 		 * 'cq_ids[0]' points to RQ's CQ and
1547 		 * 'cq_ids[1]' points to SQ's CQ and
1548 		 */
1549 		cq_poll->cq_ids[CQ_RX] =
1550 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1551 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1552 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1553 		cq_poll->dev = (void *)pf;
1554 		netif_napi_add(netdev, &cq_poll->napi,
1555 			       otx2_napi_handler, NAPI_POLL_WEIGHT);
1556 		napi_enable(&cq_poll->napi);
1557 	}
1558 
1559 	/* Set maximum frame size allowed in HW */
1560 	err = otx2_hw_set_mtu(pf, netdev->mtu);
1561 	if (err)
1562 		goto err_disable_napi;
1563 
1564 	/* Setup segmentation algorithms, if failed, clear offload capability */
1565 	otx2_setup_segmentation(pf);
1566 
1567 	/* Initialize RSS */
1568 	err = otx2_rss_init(pf);
1569 	if (err)
1570 		goto err_disable_napi;
1571 
1572 	/* Register Queue IRQ handlers */
1573 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1574 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1575 
1576 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1577 
1578 	err = request_irq(pci_irq_vector(pf->pdev, vec),
1579 			  otx2_q_intr_handler, 0, irq_name, pf);
1580 	if (err) {
1581 		dev_err(pf->dev,
1582 			"RVUPF%d: IRQ registration failed for QERR\n",
1583 			rvu_get_pf(pf->pcifunc));
1584 		goto err_disable_napi;
1585 	}
1586 
1587 	/* Enable QINT IRQ */
1588 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1589 
1590 	/* Register CQ IRQ handlers */
1591 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1592 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1593 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1594 
1595 		snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1596 			 qidx);
1597 
1598 		err = request_irq(pci_irq_vector(pf->pdev, vec),
1599 				  otx2_cq_intr_handler, 0, irq_name,
1600 				  &qset->napi[qidx]);
1601 		if (err) {
1602 			dev_err(pf->dev,
1603 				"RVUPF%d: IRQ registration failed for CQ%d\n",
1604 				rvu_get_pf(pf->pcifunc), qidx);
1605 			goto err_free_cints;
1606 		}
1607 		vec++;
1608 
1609 		otx2_config_irq_coalescing(pf, qidx);
1610 
1611 		/* Enable CQ IRQ */
1612 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1613 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1614 	}
1615 
1616 	otx2_set_cints_affinity(pf);
1617 
1618 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1619 		otx2_enable_rxvlan(pf, true);
1620 
1621 	/* When reinitializing enable time stamping if it is enabled before */
1622 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1623 		pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1624 		otx2_config_hw_tx_tstamp(pf, true);
1625 	}
1626 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1627 		pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1628 		otx2_config_hw_rx_tstamp(pf, true);
1629 	}
1630 
1631 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1632 	/* 'intf_down' may be checked on any cpu */
1633 	smp_wmb();
1634 
1635 	/* we have already received link status notification */
1636 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1637 		otx2_handle_link_event(pf);
1638 
1639 	/* Restore pause frame settings */
1640 	otx2_config_pause_frm(pf);
1641 
1642 	err = otx2_rxtx_enable(pf, true);
1643 	if (err)
1644 		goto err_tx_stop_queues;
1645 
1646 	return 0;
1647 
1648 err_tx_stop_queues:
1649 	netif_tx_stop_all_queues(netdev);
1650 	netif_carrier_off(netdev);
1651 err_free_cints:
1652 	otx2_free_cints(pf, qidx);
1653 	vec = pci_irq_vector(pf->pdev,
1654 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1655 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1656 	synchronize_irq(vec);
1657 	free_irq(vec, pf);
1658 err_disable_napi:
1659 	otx2_disable_napi(pf);
1660 	otx2_free_hw_resources(pf);
1661 err_free_mem:
1662 	kfree(qset->sq);
1663 	kfree(qset->cq);
1664 	kfree(qset->rq);
1665 	kfree(qset->napi);
1666 	return err;
1667 }
1668 EXPORT_SYMBOL(otx2_open);
1669 
1670 int otx2_stop(struct net_device *netdev)
1671 {
1672 	struct otx2_nic *pf = netdev_priv(netdev);
1673 	struct otx2_cq_poll *cq_poll = NULL;
1674 	struct otx2_qset *qset = &pf->qset;
1675 	struct otx2_rss_info *rss;
1676 	int qidx, vec, wrk;
1677 
1678 	netif_carrier_off(netdev);
1679 	netif_tx_stop_all_queues(netdev);
1680 
1681 	pf->flags |= OTX2_FLAG_INTF_DOWN;
1682 	/* 'intf_down' may be checked on any cpu */
1683 	smp_wmb();
1684 
1685 	/* First stop packet Rx/Tx */
1686 	otx2_rxtx_enable(pf, false);
1687 
1688 	/* Clear RSS enable flag */
1689 	rss = &pf->hw.rss_info;
1690 	rss->enable = false;
1691 
1692 	/* Cleanup Queue IRQ */
1693 	vec = pci_irq_vector(pf->pdev,
1694 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1695 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1696 	synchronize_irq(vec);
1697 	free_irq(vec, pf);
1698 
1699 	/* Cleanup CQ NAPI and IRQ */
1700 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1701 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1702 		/* Disable interrupt */
1703 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1704 
1705 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
1706 
1707 		cq_poll = &qset->napi[qidx];
1708 		napi_synchronize(&cq_poll->napi);
1709 		vec++;
1710 	}
1711 
1712 	netif_tx_disable(netdev);
1713 
1714 	otx2_free_hw_resources(pf);
1715 	otx2_free_cints(pf, pf->hw.cint_cnt);
1716 	otx2_disable_napi(pf);
1717 
1718 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1719 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1720 
1721 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1722 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1723 	devm_kfree(pf->dev, pf->refill_wrk);
1724 
1725 	kfree(qset->sq);
1726 	kfree(qset->cq);
1727 	kfree(qset->rq);
1728 	kfree(qset->napi);
1729 	/* Do not clear RQ/SQ ringsize settings */
1730 	memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1731 	       sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1732 	return 0;
1733 }
1734 EXPORT_SYMBOL(otx2_stop);
1735 
1736 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1737 {
1738 	struct otx2_nic *pf = netdev_priv(netdev);
1739 	int qidx = skb_get_queue_mapping(skb);
1740 	struct otx2_snd_queue *sq;
1741 	struct netdev_queue *txq;
1742 
1743 	/* Check for minimum and maximum packet length */
1744 	if (skb->len <= ETH_HLEN ||
1745 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1746 		dev_kfree_skb(skb);
1747 		return NETDEV_TX_OK;
1748 	}
1749 
1750 	sq = &pf->qset.sq[qidx];
1751 	txq = netdev_get_tx_queue(netdev, qidx);
1752 
1753 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1754 		netif_tx_stop_queue(txq);
1755 
1756 		/* Check again, incase SQBs got freed up */
1757 		smp_mb();
1758 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1759 							> sq->sqe_thresh)
1760 			netif_tx_wake_queue(txq);
1761 
1762 		return NETDEV_TX_BUSY;
1763 	}
1764 
1765 	return NETDEV_TX_OK;
1766 }
1767 
1768 static void otx2_set_rx_mode(struct net_device *netdev)
1769 {
1770 	struct otx2_nic *pf = netdev_priv(netdev);
1771 
1772 	queue_work(pf->otx2_wq, &pf->rx_mode_work);
1773 }
1774 
1775 static void otx2_do_set_rx_mode(struct work_struct *work)
1776 {
1777 	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1778 	struct net_device *netdev = pf->netdev;
1779 	struct nix_rx_mode *req;
1780 	bool promisc = false;
1781 
1782 	if (!(netdev->flags & IFF_UP))
1783 		return;
1784 
1785 	if ((netdev->flags & IFF_PROMISC) ||
1786 	    (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1787 		promisc = true;
1788 	}
1789 
1790 	/* Write unicast address to mcam entries or del from mcam */
1791 	if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1792 		__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1793 
1794 	mutex_lock(&pf->mbox.lock);
1795 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1796 	if (!req) {
1797 		mutex_unlock(&pf->mbox.lock);
1798 		return;
1799 	}
1800 
1801 	req->mode = NIX_RX_MODE_UCAST;
1802 
1803 	if (promisc)
1804 		req->mode |= NIX_RX_MODE_PROMISC;
1805 	else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1806 		req->mode |= NIX_RX_MODE_ALLMULTI;
1807 
1808 	otx2_sync_mbox_msg(&pf->mbox);
1809 	mutex_unlock(&pf->mbox.lock);
1810 }
1811 
1812 static int otx2_set_features(struct net_device *netdev,
1813 			     netdev_features_t features)
1814 {
1815 	netdev_features_t changed = features ^ netdev->features;
1816 	bool ntuple = !!(features & NETIF_F_NTUPLE);
1817 	struct otx2_nic *pf = netdev_priv(netdev);
1818 
1819 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1820 		return otx2_cgx_config_loopback(pf,
1821 						features & NETIF_F_LOOPBACK);
1822 
1823 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1824 		return otx2_enable_rxvlan(pf,
1825 					  features & NETIF_F_HW_VLAN_CTAG_RX);
1826 
1827 	if ((changed & NETIF_F_NTUPLE) && !ntuple)
1828 		otx2_destroy_ntuple_flows(pf);
1829 
1830 	return 0;
1831 }
1832 
1833 static void otx2_reset_task(struct work_struct *work)
1834 {
1835 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1836 
1837 	if (!netif_running(pf->netdev))
1838 		return;
1839 
1840 	rtnl_lock();
1841 	otx2_stop(pf->netdev);
1842 	pf->reset_count++;
1843 	otx2_open(pf->netdev);
1844 	netif_trans_update(pf->netdev);
1845 	rtnl_unlock();
1846 }
1847 
1848 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1849 {
1850 	struct msg_req *req;
1851 	int err;
1852 
1853 	if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1854 		return 0;
1855 
1856 	mutex_lock(&pfvf->mbox.lock);
1857 	if (enable)
1858 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1859 	else
1860 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1861 	if (!req) {
1862 		mutex_unlock(&pfvf->mbox.lock);
1863 		return -ENOMEM;
1864 	}
1865 
1866 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1867 	if (err) {
1868 		mutex_unlock(&pfvf->mbox.lock);
1869 		return err;
1870 	}
1871 
1872 	mutex_unlock(&pfvf->mbox.lock);
1873 	if (enable)
1874 		pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1875 	else
1876 		pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1877 	return 0;
1878 }
1879 
1880 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1881 {
1882 	struct msg_req *req;
1883 	int err;
1884 
1885 	if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1886 		return 0;
1887 
1888 	mutex_lock(&pfvf->mbox.lock);
1889 	if (enable)
1890 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1891 	else
1892 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1893 	if (!req) {
1894 		mutex_unlock(&pfvf->mbox.lock);
1895 		return -ENOMEM;
1896 	}
1897 
1898 	err = otx2_sync_mbox_msg(&pfvf->mbox);
1899 	if (err) {
1900 		mutex_unlock(&pfvf->mbox.lock);
1901 		return err;
1902 	}
1903 
1904 	mutex_unlock(&pfvf->mbox.lock);
1905 	if (enable)
1906 		pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1907 	else
1908 		pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1909 	return 0;
1910 }
1911 
1912 static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1913 {
1914 	struct otx2_nic *pfvf = netdev_priv(netdev);
1915 	struct hwtstamp_config config;
1916 
1917 	if (!pfvf->ptp)
1918 		return -ENODEV;
1919 
1920 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1921 		return -EFAULT;
1922 
1923 	/* reserved for future extensions */
1924 	if (config.flags)
1925 		return -EINVAL;
1926 
1927 	switch (config.tx_type) {
1928 	case HWTSTAMP_TX_OFF:
1929 		otx2_config_hw_tx_tstamp(pfvf, false);
1930 		break;
1931 	case HWTSTAMP_TX_ON:
1932 		otx2_config_hw_tx_tstamp(pfvf, true);
1933 		break;
1934 	default:
1935 		return -ERANGE;
1936 	}
1937 
1938 	switch (config.rx_filter) {
1939 	case HWTSTAMP_FILTER_NONE:
1940 		otx2_config_hw_rx_tstamp(pfvf, false);
1941 		break;
1942 	case HWTSTAMP_FILTER_ALL:
1943 	case HWTSTAMP_FILTER_SOME:
1944 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1945 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1946 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1947 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1948 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1949 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1950 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1951 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1952 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1953 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
1954 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
1955 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1956 		otx2_config_hw_rx_tstamp(pfvf, true);
1957 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1958 		break;
1959 	default:
1960 		return -ERANGE;
1961 	}
1962 
1963 	memcpy(&pfvf->tstamp, &config, sizeof(config));
1964 
1965 	return copy_to_user(ifr->ifr_data, &config,
1966 			    sizeof(config)) ? -EFAULT : 0;
1967 }
1968 
1969 static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
1970 {
1971 	struct otx2_nic *pfvf = netdev_priv(netdev);
1972 	struct hwtstamp_config *cfg = &pfvf->tstamp;
1973 
1974 	switch (cmd) {
1975 	case SIOCSHWTSTAMP:
1976 		return otx2_config_hwtstamp(netdev, req);
1977 	case SIOCGHWTSTAMP:
1978 		return copy_to_user(req->ifr_data, cfg,
1979 				    sizeof(*cfg)) ? -EFAULT : 0;
1980 	default:
1981 		return -EOPNOTSUPP;
1982 	}
1983 }
1984 
1985 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
1986 {
1987 	struct npc_install_flow_req *req;
1988 	int err;
1989 
1990 	mutex_lock(&pf->mbox.lock);
1991 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
1992 	if (!req) {
1993 		err = -ENOMEM;
1994 		goto out;
1995 	}
1996 
1997 	ether_addr_copy(req->packet.dmac, mac);
1998 	eth_broadcast_addr((u8 *)&req->mask.dmac);
1999 	req->features = BIT_ULL(NPC_DMAC);
2000 	req->channel = pf->hw.rx_chan_base;
2001 	req->intf = NIX_INTF_RX;
2002 	req->default_rule = 1;
2003 	req->append = 1;
2004 	req->vf = vf + 1;
2005 	req->op = NIX_RX_ACTION_DEFAULT;
2006 
2007 	err = otx2_sync_mbox_msg(&pf->mbox);
2008 out:
2009 	mutex_unlock(&pf->mbox.lock);
2010 	return err;
2011 }
2012 
2013 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2014 {
2015 	struct otx2_nic *pf = netdev_priv(netdev);
2016 	struct pci_dev *pdev = pf->pdev;
2017 	struct otx2_vf_config *config;
2018 	int ret;
2019 
2020 	if (!netif_running(netdev))
2021 		return -EAGAIN;
2022 
2023 	if (vf >= pci_num_vf(pdev))
2024 		return -EINVAL;
2025 
2026 	if (!is_valid_ether_addr(mac))
2027 		return -EINVAL;
2028 
2029 	config = &pf->vf_configs[vf];
2030 	ether_addr_copy(config->mac, mac);
2031 
2032 	ret = otx2_do_set_vf_mac(pf, vf, mac);
2033 	if (ret == 0)
2034 		dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
2035 
2036 	return ret;
2037 }
2038 
2039 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2040 			       __be16 proto)
2041 {
2042 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2043 	struct nix_vtag_config_rsp *vtag_rsp;
2044 	struct npc_delete_flow_req *del_req;
2045 	struct nix_vtag_config *vtag_req;
2046 	struct npc_install_flow_req *req;
2047 	struct otx2_vf_config *config;
2048 	int err = 0;
2049 	u32 idx;
2050 
2051 	config = &pf->vf_configs[vf];
2052 
2053 	if (!vlan && !config->vlan)
2054 		goto out;
2055 
2056 	mutex_lock(&pf->mbox.lock);
2057 
2058 	/* free old tx vtag entry */
2059 	if (config->vlan) {
2060 		vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2061 		if (!vtag_req) {
2062 			err = -ENOMEM;
2063 			goto out;
2064 		}
2065 		vtag_req->cfg_type = 0;
2066 		vtag_req->tx.free_vtag0 = 1;
2067 		vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2068 
2069 		err = otx2_sync_mbox_msg(&pf->mbox);
2070 		if (err)
2071 			goto out;
2072 	}
2073 
2074 	if (!vlan && config->vlan) {
2075 		/* rx */
2076 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2077 		if (!del_req) {
2078 			err = -ENOMEM;
2079 			goto out;
2080 		}
2081 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2082 		del_req->entry =
2083 			flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2084 		err = otx2_sync_mbox_msg(&pf->mbox);
2085 		if (err)
2086 			goto out;
2087 
2088 		/* tx */
2089 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2090 		if (!del_req) {
2091 			err = -ENOMEM;
2092 			goto out;
2093 		}
2094 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2095 		del_req->entry =
2096 			flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2097 		err = otx2_sync_mbox_msg(&pf->mbox);
2098 
2099 		goto out;
2100 	}
2101 
2102 	/* rx */
2103 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2104 	if (!req) {
2105 		err = -ENOMEM;
2106 		goto out;
2107 	}
2108 
2109 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2110 	req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2111 	req->packet.vlan_tci = htons(vlan);
2112 	req->mask.vlan_tci = htons(VLAN_VID_MASK);
2113 	/* af fills the destination mac addr */
2114 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2115 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2116 	req->channel = pf->hw.rx_chan_base;
2117 	req->intf = NIX_INTF_RX;
2118 	req->vf = vf + 1;
2119 	req->op = NIX_RX_ACTION_DEFAULT;
2120 	req->vtag0_valid = true;
2121 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2122 	req->set_cntr = 1;
2123 
2124 	err = otx2_sync_mbox_msg(&pf->mbox);
2125 	if (err)
2126 		goto out;
2127 
2128 	/* tx */
2129 	vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2130 	if (!vtag_req) {
2131 		err = -ENOMEM;
2132 		goto out;
2133 	}
2134 
2135 	/* configure tx vtag params */
2136 	vtag_req->vtag_size = VTAGSIZE_T4;
2137 	vtag_req->cfg_type = 0; /* tx vlan cfg */
2138 	vtag_req->tx.cfg_vtag0 = 1;
2139 	vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2140 
2141 	err = otx2_sync_mbox_msg(&pf->mbox);
2142 	if (err)
2143 		goto out;
2144 
2145 	vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2146 			(&pf->mbox.mbox, 0, &vtag_req->hdr);
2147 	if (IS_ERR(vtag_rsp)) {
2148 		err = PTR_ERR(vtag_rsp);
2149 		goto out;
2150 	}
2151 	config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2152 
2153 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2154 	if (!req) {
2155 		err = -ENOMEM;
2156 		goto out;
2157 	}
2158 
2159 	eth_zero_addr((u8 *)&req->mask.dmac);
2160 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2161 	req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
2162 	req->features = BIT_ULL(NPC_DMAC);
2163 	req->channel = pf->hw.tx_chan_base;
2164 	req->intf = NIX_INTF_TX;
2165 	req->vf = vf + 1;
2166 	req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2167 	req->vtag0_def = vtag_rsp->vtag0_idx;
2168 	req->vtag0_op = VTAG_INSERT;
2169 	req->set_cntr = 1;
2170 
2171 	err = otx2_sync_mbox_msg(&pf->mbox);
2172 out:
2173 	config->vlan = vlan;
2174 	mutex_unlock(&pf->mbox.lock);
2175 	return err;
2176 }
2177 
2178 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2179 			    __be16 proto)
2180 {
2181 	struct otx2_nic *pf = netdev_priv(netdev);
2182 	struct pci_dev *pdev = pf->pdev;
2183 
2184 	if (!netif_running(netdev))
2185 		return -EAGAIN;
2186 
2187 	if (vf >= pci_num_vf(pdev))
2188 		return -EINVAL;
2189 
2190 	/* qos is currently unsupported */
2191 	if (vlan >= VLAN_N_VID || qos)
2192 		return -EINVAL;
2193 
2194 	if (proto != htons(ETH_P_8021Q))
2195 		return -EPROTONOSUPPORT;
2196 
2197 	if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2198 		return -EOPNOTSUPP;
2199 
2200 	return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2201 }
2202 
2203 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2204 			      struct ifla_vf_info *ivi)
2205 {
2206 	struct otx2_nic *pf = netdev_priv(netdev);
2207 	struct pci_dev *pdev = pf->pdev;
2208 	struct otx2_vf_config *config;
2209 
2210 	if (!netif_running(netdev))
2211 		return -EAGAIN;
2212 
2213 	if (vf >= pci_num_vf(pdev))
2214 		return -EINVAL;
2215 
2216 	config = &pf->vf_configs[vf];
2217 	ivi->vf = vf;
2218 	ether_addr_copy(ivi->mac, config->mac);
2219 	ivi->vlan = config->vlan;
2220 
2221 	return 0;
2222 }
2223 
2224 static const struct net_device_ops otx2_netdev_ops = {
2225 	.ndo_open		= otx2_open,
2226 	.ndo_stop		= otx2_stop,
2227 	.ndo_start_xmit		= otx2_xmit,
2228 	.ndo_set_mac_address    = otx2_set_mac_address,
2229 	.ndo_change_mtu		= otx2_change_mtu,
2230 	.ndo_set_rx_mode	= otx2_set_rx_mode,
2231 	.ndo_set_features	= otx2_set_features,
2232 	.ndo_tx_timeout		= otx2_tx_timeout,
2233 	.ndo_get_stats64	= otx2_get_stats64,
2234 	.ndo_do_ioctl		= otx2_ioctl,
2235 	.ndo_set_vf_mac		= otx2_set_vf_mac,
2236 	.ndo_set_vf_vlan	= otx2_set_vf_vlan,
2237 	.ndo_get_vf_config	= otx2_get_vf_config,
2238 };
2239 
2240 static int otx2_wq_init(struct otx2_nic *pf)
2241 {
2242 	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2243 	if (!pf->otx2_wq)
2244 		return -ENOMEM;
2245 
2246 	INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
2247 	INIT_WORK(&pf->reset_task, otx2_reset_task);
2248 	return 0;
2249 }
2250 
2251 static int otx2_check_pf_usable(struct otx2_nic *nic)
2252 {
2253 	u64 rev;
2254 
2255 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2256 	rev = (rev >> 12) & 0xFF;
2257 	/* Check if AF has setup revision for RVUM block,
2258 	 * otherwise this driver probe should be deferred
2259 	 * until AF driver comes up.
2260 	 */
2261 	if (!rev) {
2262 		dev_warn(nic->dev,
2263 			 "AF is not initialized, deferring probe\n");
2264 		return -EPROBE_DEFER;
2265 	}
2266 	return 0;
2267 }
2268 
2269 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2270 {
2271 	struct otx2_hw *hw = &pf->hw;
2272 	int num_vec, err;
2273 
2274 	/* NPA interrupts are inot registered, so alloc only
2275 	 * upto NIX vector offset.
2276 	 */
2277 	num_vec = hw->nix_msixoff;
2278 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2279 
2280 	otx2_disable_mbox_intr(pf);
2281 	pci_free_irq_vectors(hw->pdev);
2282 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2283 	if (err < 0) {
2284 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2285 			__func__, num_vec);
2286 		return err;
2287 	}
2288 
2289 	return otx2_register_mbox_intr(pf, false);
2290 }
2291 
2292 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2293 {
2294 	struct device *dev = &pdev->dev;
2295 	struct net_device *netdev;
2296 	struct otx2_nic *pf;
2297 	struct otx2_hw *hw;
2298 	int err, qcount;
2299 	int num_vec;
2300 
2301 	err = pcim_enable_device(pdev);
2302 	if (err) {
2303 		dev_err(dev, "Failed to enable PCI device\n");
2304 		return err;
2305 	}
2306 
2307 	err = pci_request_regions(pdev, DRV_NAME);
2308 	if (err) {
2309 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
2310 		return err;
2311 	}
2312 
2313 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2314 	if (err) {
2315 		dev_err(dev, "DMA mask config failed, abort\n");
2316 		goto err_release_regions;
2317 	}
2318 
2319 	pci_set_master(pdev);
2320 
2321 	/* Set number of queues */
2322 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2323 
2324 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2325 	if (!netdev) {
2326 		err = -ENOMEM;
2327 		goto err_release_regions;
2328 	}
2329 
2330 	pci_set_drvdata(pdev, netdev);
2331 	SET_NETDEV_DEV(netdev, &pdev->dev);
2332 	pf = netdev_priv(netdev);
2333 	pf->netdev = netdev;
2334 	pf->pdev = pdev;
2335 	pf->dev = dev;
2336 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2337 	pf->flags |= OTX2_FLAG_INTF_DOWN;
2338 
2339 	hw = &pf->hw;
2340 	hw->pdev = pdev;
2341 	hw->rx_queues = qcount;
2342 	hw->tx_queues = qcount;
2343 	hw->max_queues = qcount;
2344 
2345 	num_vec = pci_msix_vec_count(pdev);
2346 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2347 					  GFP_KERNEL);
2348 	if (!hw->irq_name) {
2349 		err = -ENOMEM;
2350 		goto err_free_netdev;
2351 	}
2352 
2353 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2354 					 sizeof(cpumask_var_t), GFP_KERNEL);
2355 	if (!hw->affinity_mask) {
2356 		err = -ENOMEM;
2357 		goto err_free_netdev;
2358 	}
2359 
2360 	/* Map CSRs */
2361 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2362 	if (!pf->reg_base) {
2363 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2364 		err = -ENOMEM;
2365 		goto err_free_netdev;
2366 	}
2367 
2368 	err = otx2_check_pf_usable(pf);
2369 	if (err)
2370 		goto err_free_netdev;
2371 
2372 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2373 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2374 	if (err < 0) {
2375 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2376 			__func__, num_vec);
2377 		goto err_free_netdev;
2378 	}
2379 
2380 	otx2_setup_dev_hw_settings(pf);
2381 
2382 	/* Init PF <=> AF mailbox stuff */
2383 	err = otx2_pfaf_mbox_init(pf);
2384 	if (err)
2385 		goto err_free_irq_vectors;
2386 
2387 	/* Register mailbox interrupt */
2388 	err = otx2_register_mbox_intr(pf, true);
2389 	if (err)
2390 		goto err_mbox_destroy;
2391 
2392 	/* Request AF to attach NPA and NIX LFs to this PF.
2393 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
2394 	 */
2395 	err = otx2_attach_npa_nix(pf);
2396 	if (err)
2397 		goto err_disable_mbox_intr;
2398 
2399 	err = otx2_realloc_msix_vectors(pf);
2400 	if (err)
2401 		goto err_detach_rsrc;
2402 
2403 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2404 	if (err)
2405 		goto err_detach_rsrc;
2406 
2407 	err = cn10k_pf_lmtst_init(pf);
2408 	if (err)
2409 		goto err_detach_rsrc;
2410 
2411 	/* Assign default mac address */
2412 	otx2_get_mac_from_af(netdev);
2413 
2414 	/* Don't check for error.  Proceed without ptp */
2415 	otx2_ptp_init(pf);
2416 
2417 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2418 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
2419 	 * ingress packet. In some scenarios HW can free back allocated buffer
2420 	 * pointers to pool. This makes it impossible for SW to maintain a
2421 	 * parallel list where physical addresses of buffer pointers (IOVAs)
2422 	 * given to HW can be saved for later reference.
2423 	 *
2424 	 * So the only way to convert Rx packet's buffer address is to use
2425 	 * IOMMU's iova_to_phys() handler which translates the address by
2426 	 * walking through the translation tables.
2427 	 */
2428 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
2429 
2430 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2431 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2432 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2433 			       NETIF_F_GSO_UDP_L4);
2434 	netdev->features |= netdev->hw_features;
2435 
2436 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2437 
2438 	err = otx2_mcam_flow_init(pf);
2439 	if (err)
2440 		goto err_ptp_destroy;
2441 
2442 	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2443 		netdev->hw_features |= NETIF_F_NTUPLE;
2444 
2445 	if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2446 		netdev->priv_flags |= IFF_UNICAST_FLT;
2447 
2448 	/* Support TSO on tag interface */
2449 	netdev->vlan_features |= netdev->features;
2450 	netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
2451 				NETIF_F_HW_VLAN_STAG_TX;
2452 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2453 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2454 				       NETIF_F_HW_VLAN_STAG_RX;
2455 	netdev->features |= netdev->hw_features;
2456 
2457 	netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
2458 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2459 
2460 	netdev->netdev_ops = &otx2_netdev_ops;
2461 
2462 	/* MTU range: 64 - 9190 */
2463 	netdev->min_mtu = OTX2_MIN_MTU;
2464 	netdev->max_mtu = otx2_get_max_mtu(pf);
2465 
2466 	err = register_netdev(netdev);
2467 	if (err) {
2468 		dev_err(dev, "Failed to register netdevice\n");
2469 		goto err_del_mcam_entries;
2470 	}
2471 
2472 	err = otx2_wq_init(pf);
2473 	if (err)
2474 		goto err_unreg_netdev;
2475 
2476 	otx2_set_ethtool_ops(netdev);
2477 
2478 	/* Enable link notifications */
2479 	otx2_cgx_config_linkevents(pf, true);
2480 
2481 	/* Enable pause frames by default */
2482 	pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
2483 	pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
2484 
2485 	return 0;
2486 
2487 err_unreg_netdev:
2488 	unregister_netdev(netdev);
2489 err_del_mcam_entries:
2490 	otx2_mcam_flow_del(pf);
2491 err_ptp_destroy:
2492 	otx2_ptp_destroy(pf);
2493 err_detach_rsrc:
2494 	if (hw->lmt_base)
2495 		iounmap(hw->lmt_base);
2496 	otx2_detach_resources(&pf->mbox);
2497 err_disable_mbox_intr:
2498 	otx2_disable_mbox_intr(pf);
2499 err_mbox_destroy:
2500 	otx2_pfaf_mbox_destroy(pf);
2501 err_free_irq_vectors:
2502 	pci_free_irq_vectors(hw->pdev);
2503 err_free_netdev:
2504 	pci_set_drvdata(pdev, NULL);
2505 	free_netdev(netdev);
2506 err_release_regions:
2507 	pci_release_regions(pdev);
2508 	return err;
2509 }
2510 
2511 static void otx2_vf_link_event_task(struct work_struct *work)
2512 {
2513 	struct otx2_vf_config *config;
2514 	struct cgx_link_info_msg *req;
2515 	struct mbox_msghdr *msghdr;
2516 	struct otx2_nic *pf;
2517 	int vf_idx;
2518 
2519 	config = container_of(work, struct otx2_vf_config,
2520 			      link_event_work.work);
2521 	vf_idx = config - config->pf->vf_configs;
2522 	pf = config->pf;
2523 
2524 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2525 					 sizeof(*req), sizeof(struct msg_rsp));
2526 	if (!msghdr) {
2527 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2528 		return;
2529 	}
2530 
2531 	req = (struct cgx_link_info_msg *)msghdr;
2532 	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2533 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
2534 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2535 
2536 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2537 }
2538 
2539 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2540 {
2541 	struct net_device *netdev = pci_get_drvdata(pdev);
2542 	struct otx2_nic *pf = netdev_priv(netdev);
2543 	int ret, i;
2544 
2545 	/* Init PF <=> VF mailbox stuff */
2546 	ret = otx2_pfvf_mbox_init(pf, numvfs);
2547 	if (ret)
2548 		return ret;
2549 
2550 	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2551 	if (ret)
2552 		goto free_mbox;
2553 
2554 	pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
2555 				 GFP_KERNEL);
2556 	if (!pf->vf_configs) {
2557 		ret = -ENOMEM;
2558 		goto free_intr;
2559 	}
2560 
2561 	for (i = 0; i < numvfs; i++) {
2562 		pf->vf_configs[i].pf = pf;
2563 		pf->vf_configs[i].intf_down = true;
2564 		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2565 				  otx2_vf_link_event_task);
2566 	}
2567 
2568 	ret = otx2_pf_flr_init(pf, numvfs);
2569 	if (ret)
2570 		goto free_configs;
2571 
2572 	ret = otx2_register_flr_me_intr(pf, numvfs);
2573 	if (ret)
2574 		goto free_flr;
2575 
2576 	ret = pci_enable_sriov(pdev, numvfs);
2577 	if (ret)
2578 		goto free_flr_intr;
2579 
2580 	return numvfs;
2581 free_flr_intr:
2582 	otx2_disable_flr_me_intr(pf);
2583 free_flr:
2584 	otx2_flr_wq_destroy(pf);
2585 free_configs:
2586 	kfree(pf->vf_configs);
2587 free_intr:
2588 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
2589 free_mbox:
2590 	otx2_pfvf_mbox_destroy(pf);
2591 	return ret;
2592 }
2593 
2594 static int otx2_sriov_disable(struct pci_dev *pdev)
2595 {
2596 	struct net_device *netdev = pci_get_drvdata(pdev);
2597 	struct otx2_nic *pf = netdev_priv(netdev);
2598 	int numvfs = pci_num_vf(pdev);
2599 	int i;
2600 
2601 	if (!numvfs)
2602 		return 0;
2603 
2604 	pci_disable_sriov(pdev);
2605 
2606 	for (i = 0; i < pci_num_vf(pdev); i++)
2607 		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2608 	kfree(pf->vf_configs);
2609 
2610 	otx2_disable_flr_me_intr(pf);
2611 	otx2_flr_wq_destroy(pf);
2612 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
2613 	otx2_pfvf_mbox_destroy(pf);
2614 
2615 	return 0;
2616 }
2617 
2618 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2619 {
2620 	if (numvfs == 0)
2621 		return otx2_sriov_disable(pdev);
2622 	else
2623 		return otx2_sriov_enable(pdev, numvfs);
2624 }
2625 
2626 static void otx2_remove(struct pci_dev *pdev)
2627 {
2628 	struct net_device *netdev = pci_get_drvdata(pdev);
2629 	struct otx2_nic *pf;
2630 
2631 	if (!netdev)
2632 		return;
2633 
2634 	pf = netdev_priv(netdev);
2635 
2636 	pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2637 
2638 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2639 		otx2_config_hw_tx_tstamp(pf, false);
2640 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2641 		otx2_config_hw_rx_tstamp(pf, false);
2642 
2643 	cancel_work_sync(&pf->reset_task);
2644 	/* Disable link notifications */
2645 	otx2_cgx_config_linkevents(pf, false);
2646 
2647 	unregister_netdev(netdev);
2648 	otx2_sriov_disable(pf->pdev);
2649 	if (pf->otx2_wq)
2650 		destroy_workqueue(pf->otx2_wq);
2651 
2652 	otx2_ptp_destroy(pf);
2653 	otx2_mcam_flow_del(pf);
2654 	otx2_detach_resources(&pf->mbox);
2655 	if (pf->hw.lmt_base)
2656 		iounmap(pf->hw.lmt_base);
2657 
2658 	otx2_disable_mbox_intr(pf);
2659 	otx2_pfaf_mbox_destroy(pf);
2660 	pci_free_irq_vectors(pf->pdev);
2661 	pci_set_drvdata(pdev, NULL);
2662 	free_netdev(netdev);
2663 
2664 	pci_release_regions(pdev);
2665 }
2666 
2667 static struct pci_driver otx2_pf_driver = {
2668 	.name = DRV_NAME,
2669 	.id_table = otx2_pf_id_table,
2670 	.probe = otx2_probe,
2671 	.shutdown = otx2_remove,
2672 	.remove = otx2_remove,
2673 	.sriov_configure = otx2_sriov_configure
2674 };
2675 
2676 static int __init otx2_rvupf_init_module(void)
2677 {
2678 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2679 
2680 	return pci_register_driver(&otx2_pf_driver);
2681 }
2682 
2683 static void __exit otx2_rvupf_cleanup_module(void)
2684 {
2685 	pci_unregister_driver(&otx2_pf_driver);
2686 }
2687 
2688 module_init(otx2_rvupf_init_module);
2689 module_exit(otx2_rvupf_cleanup_module);
2690