1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/etherdevice.h>
15 #include <linux/of.h>
16 #include <linux/if_vlan.h>
17 #include <linux/iommu.h>
18 #include <net/ip.h>
19 
20 #include "otx2_reg.h"
21 #include "otx2_common.h"
22 #include "otx2_txrx.h"
23 #include "otx2_struct.h"
24 
25 #define DRV_NAME	"octeontx2-nicpf"
26 #define DRV_STRING	"Marvell OcteonTX2 NIC Physical Function Driver"
27 
28 /* Supported devices */
29 static const struct pci_device_id otx2_pf_id_table[] = {
30 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
31 	{ 0, }  /* end of table */
32 };
33 
34 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
35 MODULE_DESCRIPTION(DRV_STRING);
36 MODULE_LICENSE("GPL v2");
37 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
38 
39 enum {
40 	TYPE_PFAF,
41 	TYPE_PFVF,
42 };
43 
44 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
45 {
46 	bool if_up = netif_running(netdev);
47 	int err = 0;
48 
49 	if (if_up)
50 		otx2_stop(netdev);
51 
52 	netdev_info(netdev, "Changing MTU from %d to %d\n",
53 		    netdev->mtu, new_mtu);
54 	netdev->mtu = new_mtu;
55 
56 	if (if_up)
57 		err = otx2_open(netdev);
58 
59 	return err;
60 }
61 
62 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
63 {
64 	int irq, vfs = pf->total_vfs;
65 
66 	/* Disable VFs ME interrupts */
67 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
68 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
69 	free_irq(irq, pf);
70 
71 	/* Disable VFs FLR interrupts */
72 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
73 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
74 	free_irq(irq, pf);
75 
76 	if (vfs <= 64)
77 		return;
78 
79 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
80 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
81 	free_irq(irq, pf);
82 
83 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
84 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
85 	free_irq(irq, pf);
86 }
87 
88 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
89 {
90 	if (!pf->flr_wq)
91 		return;
92 	destroy_workqueue(pf->flr_wq);
93 	pf->flr_wq = NULL;
94 	devm_kfree(pf->dev, pf->flr_wrk);
95 }
96 
97 static void otx2_flr_handler(struct work_struct *work)
98 {
99 	struct flr_work *flrwork = container_of(work, struct flr_work, work);
100 	struct otx2_nic *pf = flrwork->pf;
101 	struct mbox *mbox = &pf->mbox;
102 	struct msg_req *req;
103 	int vf, reg = 0;
104 
105 	vf = flrwork - pf->flr_wrk;
106 
107 	mutex_lock(&mbox->lock);
108 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
109 	if (!req) {
110 		mutex_unlock(&mbox->lock);
111 		return;
112 	}
113 	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
114 	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
115 
116 	if (!otx2_sync_mbox_msg(&pf->mbox)) {
117 		if (vf >= 64) {
118 			reg = 1;
119 			vf = vf - 64;
120 		}
121 		/* clear transcation pending bit */
122 		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
123 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
124 	}
125 
126 	mutex_unlock(&mbox->lock);
127 }
128 
129 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
130 {
131 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
132 	int reg, dev, vf, start_vf, num_reg = 1;
133 	u64 intr;
134 
135 	if (pf->total_vfs > 64)
136 		num_reg = 2;
137 
138 	for (reg = 0; reg < num_reg; reg++) {
139 		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
140 		if (!intr)
141 			continue;
142 		start_vf = 64 * reg;
143 		for (vf = 0; vf < 64; vf++) {
144 			if (!(intr & BIT_ULL(vf)))
145 				continue;
146 			dev = vf + start_vf;
147 			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
148 			/* Clear interrupt */
149 			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
150 			/* Disable the interrupt */
151 			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
152 				     BIT_ULL(vf));
153 		}
154 	}
155 	return IRQ_HANDLED;
156 }
157 
158 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
159 {
160 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
161 	int vf, reg, num_reg = 1;
162 	u64 intr;
163 
164 	if (pf->total_vfs > 64)
165 		num_reg = 2;
166 
167 	for (reg = 0; reg < num_reg; reg++) {
168 		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
169 		if (!intr)
170 			continue;
171 		for (vf = 0; vf < 64; vf++) {
172 			if (!(intr & BIT_ULL(vf)))
173 				continue;
174 			/* clear trpend bit */
175 			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
176 			/* clear interrupt */
177 			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
178 		}
179 	}
180 	return IRQ_HANDLED;
181 }
182 
183 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
184 {
185 	struct otx2_hw *hw = &pf->hw;
186 	char *irq_name;
187 	int ret;
188 
189 	/* Register ME interrupt handler*/
190 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
191 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
192 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
193 			  otx2_pf_me_intr_handler, 0, irq_name, pf);
194 	if (ret) {
195 		dev_err(pf->dev,
196 			"RVUPF: IRQ registration failed for ME0\n");
197 	}
198 
199 	/* Register FLR interrupt handler */
200 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
201 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
202 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
203 			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
204 	if (ret) {
205 		dev_err(pf->dev,
206 			"RVUPF: IRQ registration failed for FLR0\n");
207 		return ret;
208 	}
209 
210 	if (numvfs > 64) {
211 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
212 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
213 			 rvu_get_pf(pf->pcifunc));
214 		ret = request_irq(pci_irq_vector
215 				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
216 				  otx2_pf_me_intr_handler, 0, irq_name, pf);
217 		if (ret) {
218 			dev_err(pf->dev,
219 				"RVUPF: IRQ registration failed for ME1\n");
220 		}
221 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
222 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
223 			 rvu_get_pf(pf->pcifunc));
224 		ret = request_irq(pci_irq_vector
225 				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
226 				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
227 		if (ret) {
228 			dev_err(pf->dev,
229 				"RVUPF: IRQ registration failed for FLR1\n");
230 			return ret;
231 		}
232 	}
233 
234 	/* Enable ME interrupt for all VFs*/
235 	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
236 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
237 
238 	/* Enable FLR interrupt for all VFs*/
239 	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
240 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
241 
242 	if (numvfs > 64) {
243 		numvfs -= 64;
244 
245 		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
246 		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
247 			     INTR_MASK(numvfs));
248 
249 		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
250 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
251 			     INTR_MASK(numvfs));
252 	}
253 	return 0;
254 }
255 
256 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
257 {
258 	int vf;
259 
260 	pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
261 				     WQ_UNBOUND | WQ_HIGHPRI, 1);
262 	if (!pf->flr_wq)
263 		return -ENOMEM;
264 
265 	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
266 				   sizeof(struct flr_work), GFP_KERNEL);
267 	if (!pf->flr_wrk) {
268 		destroy_workqueue(pf->flr_wq);
269 		return -ENOMEM;
270 	}
271 
272 	for (vf = 0; vf < num_vfs; vf++) {
273 		pf->flr_wrk[vf].pf = pf;
274 		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
275 	}
276 
277 	return 0;
278 }
279 
280 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
281 			    int first, int mdevs, u64 intr, int type)
282 {
283 	struct otx2_mbox_dev *mdev;
284 	struct otx2_mbox *mbox;
285 	struct mbox_hdr *hdr;
286 	int i;
287 
288 	for (i = first; i < mdevs; i++) {
289 		/* start from 0 */
290 		if (!(intr & BIT_ULL(i - first)))
291 			continue;
292 
293 		mbox = &mw->mbox;
294 		mdev = &mbox->dev[i];
295 		if (type == TYPE_PFAF)
296 			otx2_sync_mbox_bbuf(mbox, i);
297 		hdr = mdev->mbase + mbox->rx_start;
298 		/* The hdr->num_msgs is set to zero immediately in the interrupt
299 		 * handler to  ensure that it holds a correct value next time
300 		 * when the interrupt handler is called.
301 		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
302 		 * pf>mbox.up_num_msgs holds the data for use in
303 		 * pfaf_mbox_up_handler.
304 		 */
305 		if (hdr->num_msgs) {
306 			mw[i].num_msgs = hdr->num_msgs;
307 			hdr->num_msgs = 0;
308 			if (type == TYPE_PFAF)
309 				memset(mbox->hwbase + mbox->rx_start, 0,
310 				       ALIGN(sizeof(struct mbox_hdr),
311 					     sizeof(u64)));
312 
313 			queue_work(mbox_wq, &mw[i].mbox_wrk);
314 		}
315 
316 		mbox = &mw->mbox_up;
317 		mdev = &mbox->dev[i];
318 		if (type == TYPE_PFAF)
319 			otx2_sync_mbox_bbuf(mbox, i);
320 		hdr = mdev->mbase + mbox->rx_start;
321 		if (hdr->num_msgs) {
322 			mw[i].up_num_msgs = hdr->num_msgs;
323 			hdr->num_msgs = 0;
324 			if (type == TYPE_PFAF)
325 				memset(mbox->hwbase + mbox->rx_start, 0,
326 				       ALIGN(sizeof(struct mbox_hdr),
327 					     sizeof(u64)));
328 
329 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
330 		}
331 	}
332 }
333 
334 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
335 				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
336 				  int devid)
337 {
338 	struct otx2_mbox_dev *src_mdev = mdev;
339 	int offset;
340 
341 	/* Msgs are already copied, trigger VF's mbox irq */
342 	smp_wmb();
343 
344 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
345 	writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
346 
347 	/* Restore VF's mbox bounce buffer region address */
348 	src_mdev->mbase = bbuf_base;
349 }
350 
351 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
352 				     struct otx2_mbox *src_mbox,
353 				     int dir, int vf, int num_msgs)
354 {
355 	struct otx2_mbox_dev *src_mdev, *dst_mdev;
356 	struct mbox_hdr *mbox_hdr;
357 	struct mbox_hdr *req_hdr;
358 	struct mbox *dst_mbox;
359 	int dst_size, err;
360 
361 	if (dir == MBOX_DIR_PFAF) {
362 		/* Set VF's mailbox memory as PF's bounce buffer memory, so
363 		 * that explicit copying of VF's msgs to PF=>AF mbox region
364 		 * and AF=>PF responses to VF's mbox region can be avoided.
365 		 */
366 		src_mdev = &src_mbox->dev[vf];
367 		mbox_hdr = src_mbox->hwbase +
368 				src_mbox->rx_start + (vf * MBOX_SIZE);
369 
370 		dst_mbox = &pf->mbox;
371 		dst_size = dst_mbox->mbox.tx_size -
372 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
373 		/* Check if msgs fit into destination area and has valid size */
374 		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
375 			return -EINVAL;
376 
377 		dst_mdev = &dst_mbox->mbox.dev[0];
378 
379 		mutex_lock(&pf->mbox.lock);
380 		dst_mdev->mbase = src_mdev->mbase;
381 		dst_mdev->msg_size = mbox_hdr->msg_size;
382 		dst_mdev->num_msgs = num_msgs;
383 		err = otx2_sync_mbox_msg(dst_mbox);
384 		if (err) {
385 			dev_warn(pf->dev,
386 				 "AF not responding to VF%d messages\n", vf);
387 			/* restore PF mbase and exit */
388 			dst_mdev->mbase = pf->mbox.bbuf_base;
389 			mutex_unlock(&pf->mbox.lock);
390 			return err;
391 		}
392 		/* At this point, all the VF messages sent to AF are acked
393 		 * with proper responses and responses are copied to VF
394 		 * mailbox hence raise interrupt to VF.
395 		 */
396 		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
397 					      dst_mbox->mbox.rx_start);
398 		req_hdr->num_msgs = num_msgs;
399 
400 		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
401 				      pf->mbox.bbuf_base, vf);
402 		mutex_unlock(&pf->mbox.lock);
403 	} else if (dir == MBOX_DIR_PFVF_UP) {
404 		src_mdev = &src_mbox->dev[0];
405 		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
406 		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
407 					      src_mbox->rx_start);
408 		req_hdr->num_msgs = num_msgs;
409 
410 		dst_mbox = &pf->mbox_pfvf[0];
411 		dst_size = dst_mbox->mbox_up.tx_size -
412 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
413 		/* Check if msgs fit into destination area */
414 		if (mbox_hdr->msg_size > dst_size)
415 			return -EINVAL;
416 
417 		dst_mdev = &dst_mbox->mbox_up.dev[vf];
418 		dst_mdev->mbase = src_mdev->mbase;
419 		dst_mdev->msg_size = mbox_hdr->msg_size;
420 		dst_mdev->num_msgs = mbox_hdr->num_msgs;
421 		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
422 		if (err) {
423 			dev_warn(pf->dev,
424 				 "VF%d is not responding to mailbox\n", vf);
425 			return err;
426 		}
427 	} else if (dir == MBOX_DIR_VFPF_UP) {
428 		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
429 					      src_mbox->rx_start);
430 		req_hdr->num_msgs = num_msgs;
431 		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
432 				      &pf->mbox.mbox_up,
433 				      pf->mbox_pfvf[vf].bbuf_base,
434 				      0);
435 	}
436 
437 	return 0;
438 }
439 
440 static void otx2_pfvf_mbox_handler(struct work_struct *work)
441 {
442 	struct mbox_msghdr *msg = NULL;
443 	int offset, vf_idx, id, err;
444 	struct otx2_mbox_dev *mdev;
445 	struct mbox_hdr *req_hdr;
446 	struct otx2_mbox *mbox;
447 	struct mbox *vf_mbox;
448 	struct otx2_nic *pf;
449 
450 	vf_mbox = container_of(work, struct mbox, mbox_wrk);
451 	pf = vf_mbox->pfvf;
452 	vf_idx = vf_mbox - pf->mbox_pfvf;
453 
454 	mbox = &pf->mbox_pfvf[0].mbox;
455 	mdev = &mbox->dev[vf_idx];
456 	req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
457 
458 	offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
459 
460 	for (id = 0; id < vf_mbox->num_msgs; id++) {
461 		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
462 					     offset);
463 
464 		if (msg->sig != OTX2_MBOX_REQ_SIG)
465 			goto inval_msg;
466 
467 		/* Set VF's number in each of the msg */
468 		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
469 		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
470 		offset = msg->next_msgoff;
471 	}
472 	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
473 					vf_mbox->num_msgs);
474 	if (err)
475 		goto inval_msg;
476 	return;
477 
478 inval_msg:
479 	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
480 	otx2_mbox_msg_send(mbox, vf_idx);
481 }
482 
483 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
484 {
485 	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
486 	struct otx2_nic *pf = vf_mbox->pfvf;
487 	struct otx2_mbox_dev *mdev;
488 	int offset, id, vf_idx = 0;
489 	struct mbox_hdr *rsp_hdr;
490 	struct mbox_msghdr *msg;
491 	struct otx2_mbox *mbox;
492 
493 	vf_idx = vf_mbox - pf->mbox_pfvf;
494 	mbox = &pf->mbox_pfvf[0].mbox_up;
495 	mdev = &mbox->dev[vf_idx];
496 
497 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
498 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
499 
500 	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
501 		msg = mdev->mbase + offset;
502 
503 		if (msg->id >= MBOX_MSG_MAX) {
504 			dev_err(pf->dev,
505 				"Mbox msg with unknown ID 0x%x\n", msg->id);
506 			goto end;
507 		}
508 
509 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
510 			dev_err(pf->dev,
511 				"Mbox msg with wrong signature %x, ID 0x%x\n",
512 				msg->sig, msg->id);
513 			goto end;
514 		}
515 
516 		switch (msg->id) {
517 		case MBOX_MSG_CGX_LINK_EVENT:
518 			break;
519 		default:
520 			if (msg->rc)
521 				dev_err(pf->dev,
522 					"Mbox msg response has err %d, ID 0x%x\n",
523 					msg->rc, msg->id);
524 			break;
525 		}
526 
527 end:
528 		offset = mbox->rx_start + msg->next_msgoff;
529 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
530 			__otx2_mbox_reset(mbox, 0);
531 		mdev->msgs_acked++;
532 	}
533 }
534 
535 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
536 {
537 	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
538 	int vfs = pf->total_vfs;
539 	struct mbox *mbox;
540 	u64 intr;
541 
542 	mbox = pf->mbox_pfvf;
543 	/* Handle VF interrupts */
544 	if (vfs > 64) {
545 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
546 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
547 		otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
548 				TYPE_PFVF);
549 		vfs -= 64;
550 	}
551 
552 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
553 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
554 
555 	otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
556 
557 	return IRQ_HANDLED;
558 }
559 
560 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
561 {
562 	void __iomem *hwbase;
563 	struct mbox *mbox;
564 	int err, vf;
565 	u64 base;
566 
567 	if (!numvfs)
568 		return -EINVAL;
569 
570 	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
571 				     sizeof(struct mbox), GFP_KERNEL);
572 	if (!pf->mbox_pfvf)
573 		return -ENOMEM;
574 
575 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
576 					   WQ_UNBOUND | WQ_HIGHPRI |
577 					   WQ_MEM_RECLAIM, 1);
578 	if (!pf->mbox_pfvf_wq)
579 		return -ENOMEM;
580 
581 	base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
582 	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
583 
584 	if (!hwbase) {
585 		err = -ENOMEM;
586 		goto free_wq;
587 	}
588 
589 	mbox = &pf->mbox_pfvf[0];
590 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
591 			     MBOX_DIR_PFVF, numvfs);
592 	if (err)
593 		goto free_iomem;
594 
595 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
596 			     MBOX_DIR_PFVF_UP, numvfs);
597 	if (err)
598 		goto free_iomem;
599 
600 	for (vf = 0; vf < numvfs; vf++) {
601 		mbox->pfvf = pf;
602 		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
603 		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
604 		mbox++;
605 	}
606 
607 	return 0;
608 
609 free_iomem:
610 	if (hwbase)
611 		iounmap(hwbase);
612 free_wq:
613 	destroy_workqueue(pf->mbox_pfvf_wq);
614 	return err;
615 }
616 
617 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
618 {
619 	struct mbox *mbox = &pf->mbox_pfvf[0];
620 
621 	if (!mbox)
622 		return;
623 
624 	if (pf->mbox_pfvf_wq) {
625 		destroy_workqueue(pf->mbox_pfvf_wq);
626 		pf->mbox_pfvf_wq = NULL;
627 	}
628 
629 	if (mbox->mbox.hwbase)
630 		iounmap(mbox->mbox.hwbase);
631 
632 	otx2_mbox_destroy(&mbox->mbox);
633 }
634 
635 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
636 {
637 	/* Clear PF <=> VF mailbox IRQ */
638 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
639 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
640 
641 	/* Enable PF <=> VF mailbox IRQ */
642 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
643 	if (numvfs > 64) {
644 		numvfs -= 64;
645 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
646 			     INTR_MASK(numvfs));
647 	}
648 }
649 
650 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
651 {
652 	int vector;
653 
654 	/* Disable PF <=> VF mailbox IRQ */
655 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
656 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
657 
658 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
659 	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
660 	free_irq(vector, pf);
661 
662 	if (numvfs > 64) {
663 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
664 		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
665 		free_irq(vector, pf);
666 	}
667 }
668 
669 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
670 {
671 	struct otx2_hw *hw = &pf->hw;
672 	char *irq_name;
673 	int err;
674 
675 	/* Register MBOX0 interrupt handler */
676 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
677 	if (pf->pcifunc)
678 		snprintf(irq_name, NAME_SIZE,
679 			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
680 	else
681 		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
682 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
683 			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
684 	if (err) {
685 		dev_err(pf->dev,
686 			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
687 		return err;
688 	}
689 
690 	if (numvfs > 64) {
691 		/* Register MBOX1 interrupt handler */
692 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
693 		if (pf->pcifunc)
694 			snprintf(irq_name, NAME_SIZE,
695 				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
696 		else
697 			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
698 		err = request_irq(pci_irq_vector(pf->pdev,
699 						 RVU_PF_INT_VEC_VFPF_MBOX1),
700 						 otx2_pfvf_mbox_intr_handler,
701 						 0, irq_name, pf);
702 		if (err) {
703 			dev_err(pf->dev,
704 				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
705 			return err;
706 		}
707 	}
708 
709 	otx2_enable_pfvf_mbox_intr(pf, numvfs);
710 
711 	return 0;
712 }
713 
714 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
715 				       struct mbox_msghdr *msg)
716 {
717 	int devid;
718 
719 	if (msg->id >= MBOX_MSG_MAX) {
720 		dev_err(pf->dev,
721 			"Mbox msg with unknown ID 0x%x\n", msg->id);
722 		return;
723 	}
724 
725 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
726 		dev_err(pf->dev,
727 			"Mbox msg with wrong signature %x, ID 0x%x\n",
728 			 msg->sig, msg->id);
729 		return;
730 	}
731 
732 	/* message response heading VF */
733 	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
734 	if (devid) {
735 		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
736 		struct delayed_work *dwork;
737 
738 		switch (msg->id) {
739 		case MBOX_MSG_NIX_LF_START_RX:
740 			config->intf_down = false;
741 			dwork = &config->link_event_work;
742 			schedule_delayed_work(dwork, msecs_to_jiffies(100));
743 			break;
744 		case MBOX_MSG_NIX_LF_STOP_RX:
745 			config->intf_down = true;
746 			break;
747 		}
748 
749 		return;
750 	}
751 
752 	switch (msg->id) {
753 	case MBOX_MSG_READY:
754 		pf->pcifunc = msg->pcifunc;
755 		break;
756 	case MBOX_MSG_MSIX_OFFSET:
757 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
758 		break;
759 	case MBOX_MSG_NPA_LF_ALLOC:
760 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
761 		break;
762 	case MBOX_MSG_NIX_LF_ALLOC:
763 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
764 		break;
765 	case MBOX_MSG_NIX_TXSCH_ALLOC:
766 		mbox_handler_nix_txsch_alloc(pf,
767 					     (struct nix_txsch_alloc_rsp *)msg);
768 		break;
769 	case MBOX_MSG_NIX_BP_ENABLE:
770 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
771 		break;
772 	case MBOX_MSG_CGX_STATS:
773 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
774 		break;
775 	default:
776 		if (msg->rc)
777 			dev_err(pf->dev,
778 				"Mbox msg response has err %d, ID 0x%x\n",
779 				msg->rc, msg->id);
780 		break;
781 	}
782 }
783 
784 static void otx2_pfaf_mbox_handler(struct work_struct *work)
785 {
786 	struct otx2_mbox_dev *mdev;
787 	struct mbox_hdr *rsp_hdr;
788 	struct mbox_msghdr *msg;
789 	struct otx2_mbox *mbox;
790 	struct mbox *af_mbox;
791 	struct otx2_nic *pf;
792 	int offset, id;
793 
794 	af_mbox = container_of(work, struct mbox, mbox_wrk);
795 	mbox = &af_mbox->mbox;
796 	mdev = &mbox->dev[0];
797 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
798 
799 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
800 	pf = af_mbox->pfvf;
801 
802 	for (id = 0; id < af_mbox->num_msgs; id++) {
803 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
804 		otx2_process_pfaf_mbox_msg(pf, msg);
805 		offset = mbox->rx_start + msg->next_msgoff;
806 		if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
807 			__otx2_mbox_reset(mbox, 0);
808 		mdev->msgs_acked++;
809 	}
810 
811 }
812 
813 static void otx2_handle_link_event(struct otx2_nic *pf)
814 {
815 	struct cgx_link_user_info *linfo = &pf->linfo;
816 	struct net_device *netdev = pf->netdev;
817 
818 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
819 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
820 		linfo->full_duplex ? "Full" : "Half");
821 	if (linfo->link_up) {
822 		netif_carrier_on(netdev);
823 		netif_tx_start_all_queues(netdev);
824 	} else {
825 		netif_tx_stop_all_queues(netdev);
826 		netif_carrier_off(netdev);
827 	}
828 }
829 
830 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
831 					struct cgx_link_info_msg *msg,
832 					struct msg_rsp *rsp)
833 {
834 	int i;
835 
836 	/* Copy the link info sent by AF */
837 	pf->linfo = msg->link_info;
838 
839 	/* notify VFs about link event */
840 	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
841 		struct otx2_vf_config *config = &pf->vf_configs[i];
842 		struct delayed_work *dwork = &config->link_event_work;
843 
844 		if (config->intf_down)
845 			continue;
846 
847 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
848 	}
849 
850 	/* interface has not been fully configured yet */
851 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
852 		return 0;
853 
854 	otx2_handle_link_event(pf);
855 	return 0;
856 }
857 
858 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
859 				    struct mbox_msghdr *req)
860 {
861 	/* Check if valid, if not reply with a invalid msg */
862 	if (req->sig != OTX2_MBOX_REQ_SIG) {
863 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
864 		return -ENODEV;
865 	}
866 
867 	switch (req->id) {
868 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
869 	case _id: {							\
870 		struct _rsp_type *rsp;					\
871 		int err;						\
872 									\
873 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
874 			&pf->mbox.mbox_up, 0,				\
875 			sizeof(struct _rsp_type));			\
876 		if (!rsp)						\
877 			return -ENOMEM;					\
878 									\
879 		rsp->hdr.id = _id;					\
880 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
881 		rsp->hdr.pcifunc = 0;					\
882 		rsp->hdr.rc = 0;					\
883 									\
884 		err = otx2_mbox_up_handler_ ## _fn_name(		\
885 			pf, (struct _req_type *)req, rsp);		\
886 		return err;						\
887 	}
888 MBOX_UP_CGX_MESSAGES
889 #undef M
890 		break;
891 	default:
892 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
893 		return -ENODEV;
894 	}
895 	return 0;
896 }
897 
898 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
899 {
900 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
901 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
902 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
903 	struct otx2_nic *pf = af_mbox->pfvf;
904 	int offset, id, devid = 0;
905 	struct mbox_hdr *rsp_hdr;
906 	struct mbox_msghdr *msg;
907 
908 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
909 
910 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
911 
912 	for (id = 0; id < af_mbox->up_num_msgs; id++) {
913 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
914 
915 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
916 		/* Skip processing VF's messages */
917 		if (!devid)
918 			otx2_process_mbox_msg_up(pf, msg);
919 		offset = mbox->rx_start + msg->next_msgoff;
920 	}
921 	if (devid) {
922 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
923 					  MBOX_DIR_PFVF_UP, devid - 1,
924 					  af_mbox->up_num_msgs);
925 		return;
926 	}
927 
928 	otx2_mbox_msg_send(mbox, 0);
929 }
930 
931 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
932 {
933 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
934 	struct mbox *mbox;
935 
936 	/* Clear the IRQ */
937 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
938 
939 	mbox = &pf->mbox;
940 	otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
941 
942 	return IRQ_HANDLED;
943 }
944 
945 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
946 {
947 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
948 
949 	/* Disable AF => PF mailbox IRQ */
950 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
951 	free_irq(vector, pf);
952 }
953 
954 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
955 {
956 	struct otx2_hw *hw = &pf->hw;
957 	struct msg_req *req;
958 	char *irq_name;
959 	int err;
960 
961 	/* Register mailbox interrupt handler */
962 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
963 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
964 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
965 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
966 	if (err) {
967 		dev_err(pf->dev,
968 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
969 		return err;
970 	}
971 
972 	/* Enable mailbox interrupt for msgs coming from AF.
973 	 * First clear to avoid spurious interrupts, if any.
974 	 */
975 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
976 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
977 
978 	if (!probe_af)
979 		return 0;
980 
981 	/* Check mailbox communication with AF */
982 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
983 	if (!req) {
984 		otx2_disable_mbox_intr(pf);
985 		return -ENOMEM;
986 	}
987 	err = otx2_sync_mbox_msg(&pf->mbox);
988 	if (err) {
989 		dev_warn(pf->dev,
990 			 "AF not responding to mailbox, deferring probe\n");
991 		otx2_disable_mbox_intr(pf);
992 		return -EPROBE_DEFER;
993 	}
994 
995 	return 0;
996 }
997 
998 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
999 {
1000 	struct mbox *mbox = &pf->mbox;
1001 
1002 	if (pf->mbox_wq) {
1003 		destroy_workqueue(pf->mbox_wq);
1004 		pf->mbox_wq = NULL;
1005 	}
1006 
1007 	if (mbox->mbox.hwbase)
1008 		iounmap((void __iomem *)mbox->mbox.hwbase);
1009 
1010 	otx2_mbox_destroy(&mbox->mbox);
1011 	otx2_mbox_destroy(&mbox->mbox_up);
1012 }
1013 
1014 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1015 {
1016 	struct mbox *mbox = &pf->mbox;
1017 	void __iomem *hwbase;
1018 	int err;
1019 
1020 	mbox->pfvf = pf;
1021 	pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1022 				      WQ_UNBOUND | WQ_HIGHPRI |
1023 				      WQ_MEM_RECLAIM, 1);
1024 	if (!pf->mbox_wq)
1025 		return -ENOMEM;
1026 
1027 	/* Mailbox is a reserved memory (in RAM) region shared between
1028 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
1029 	 * device memory to allow unaligned accesses.
1030 	 */
1031 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1032 			    pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
1033 	if (!hwbase) {
1034 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1035 		err = -ENOMEM;
1036 		goto exit;
1037 	}
1038 
1039 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1040 			     MBOX_DIR_PFAF, 1);
1041 	if (err)
1042 		goto exit;
1043 
1044 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1045 			     MBOX_DIR_PFAF_UP, 1);
1046 	if (err)
1047 		goto exit;
1048 
1049 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1050 	if (err)
1051 		goto exit;
1052 
1053 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1054 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1055 	mutex_init(&mbox->lock);
1056 
1057 	return 0;
1058 exit:
1059 	otx2_pfaf_mbox_destroy(pf);
1060 	return err;
1061 }
1062 
1063 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1064 {
1065 	struct msg_req *msg;
1066 	int err;
1067 
1068 	mutex_lock(&pf->mbox.lock);
1069 	if (enable)
1070 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1071 	else
1072 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1073 
1074 	if (!msg) {
1075 		mutex_unlock(&pf->mbox.lock);
1076 		return -ENOMEM;
1077 	}
1078 
1079 	err = otx2_sync_mbox_msg(&pf->mbox);
1080 	mutex_unlock(&pf->mbox.lock);
1081 	return err;
1082 }
1083 
1084 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1085 {
1086 	struct msg_req *msg;
1087 	int err;
1088 
1089 	mutex_lock(&pf->mbox.lock);
1090 	if (enable)
1091 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1092 	else
1093 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1094 
1095 	if (!msg) {
1096 		mutex_unlock(&pf->mbox.lock);
1097 		return -ENOMEM;
1098 	}
1099 
1100 	err = otx2_sync_mbox_msg(&pf->mbox);
1101 	mutex_unlock(&pf->mbox.lock);
1102 	return err;
1103 }
1104 
1105 int otx2_set_real_num_queues(struct net_device *netdev,
1106 			     int tx_queues, int rx_queues)
1107 {
1108 	int err;
1109 
1110 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
1111 	if (err) {
1112 		netdev_err(netdev,
1113 			   "Failed to set no of Tx queues: %d\n", tx_queues);
1114 		return err;
1115 	}
1116 
1117 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
1118 	if (err)
1119 		netdev_err(netdev,
1120 			   "Failed to set no of Rx queues: %d\n", rx_queues);
1121 	return err;
1122 }
1123 EXPORT_SYMBOL(otx2_set_real_num_queues);
1124 
1125 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1126 {
1127 	struct otx2_nic *pf = data;
1128 	u64 val, *ptr;
1129 	u64 qidx = 0;
1130 
1131 	/* CQ */
1132 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1133 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1134 		val = otx2_atomic64_add((qidx << 44), ptr);
1135 
1136 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1137 			     (val & NIX_CQERRINT_BITS));
1138 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1139 			continue;
1140 
1141 		if (val & BIT_ULL(42)) {
1142 			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1143 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1144 		} else {
1145 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1146 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1147 					   qidx);
1148 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1149 				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1150 					   qidx);
1151 		}
1152 
1153 		schedule_work(&pf->reset_task);
1154 	}
1155 
1156 	/* SQ */
1157 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1158 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1159 		val = otx2_atomic64_add((qidx << 44), ptr);
1160 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1161 			     (val & NIX_SQINT_BITS));
1162 
1163 		if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1164 			continue;
1165 
1166 		if (val & BIT_ULL(42)) {
1167 			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1168 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1169 		} else {
1170 			if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1171 				netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1172 					   qidx,
1173 					   otx2_read64(pf,
1174 						       NIX_LF_SQ_OP_ERR_DBG));
1175 				otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1176 					     BIT_ULL(44));
1177 			}
1178 			if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1179 				netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1180 					   qidx,
1181 					   otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1182 				otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1183 					     BIT_ULL(44));
1184 			}
1185 			if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1186 				netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1187 					   qidx,
1188 					   otx2_read64(pf,
1189 						       NIX_LF_SEND_ERR_DBG));
1190 				otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1191 					     BIT_ULL(44));
1192 			}
1193 			if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1194 				netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1195 					   qidx);
1196 		}
1197 
1198 		schedule_work(&pf->reset_task);
1199 	}
1200 
1201 	return IRQ_HANDLED;
1202 }
1203 
1204 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1205 {
1206 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1207 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1208 	int qidx = cq_poll->cint_idx;
1209 
1210 	/* Disable interrupts.
1211 	 *
1212 	 * Completion interrupts behave in a level-triggered interrupt
1213 	 * fashion, and hence have to be cleared only after it is serviced.
1214 	 */
1215 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1216 
1217 	/* Schedule NAPI */
1218 	napi_schedule_irqoff(&cq_poll->napi);
1219 
1220 	return IRQ_HANDLED;
1221 }
1222 
1223 static void otx2_disable_napi(struct otx2_nic *pf)
1224 {
1225 	struct otx2_qset *qset = &pf->qset;
1226 	struct otx2_cq_poll *cq_poll;
1227 	int qidx;
1228 
1229 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1230 		cq_poll = &qset->napi[qidx];
1231 		napi_disable(&cq_poll->napi);
1232 		netif_napi_del(&cq_poll->napi);
1233 	}
1234 }
1235 
1236 static void otx2_free_cq_res(struct otx2_nic *pf)
1237 {
1238 	struct otx2_qset *qset = &pf->qset;
1239 	struct otx2_cq_queue *cq;
1240 	int qidx;
1241 
1242 	/* Disable CQs */
1243 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1244 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1245 		cq = &qset->cq[qidx];
1246 		qmem_free(pf->dev, cq->cqe);
1247 	}
1248 }
1249 
1250 static void otx2_free_sq_res(struct otx2_nic *pf)
1251 {
1252 	struct otx2_qset *qset = &pf->qset;
1253 	struct otx2_snd_queue *sq;
1254 	int qidx;
1255 
1256 	/* Disable SQs */
1257 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1258 	/* Free SQB pointers */
1259 	otx2_sq_free_sqbs(pf);
1260 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
1261 		sq = &qset->sq[qidx];
1262 		qmem_free(pf->dev, sq->sqe);
1263 		qmem_free(pf->dev, sq->tso_hdrs);
1264 		kfree(sq->sg);
1265 		kfree(sq->sqb_ptrs);
1266 	}
1267 }
1268 
1269 static int otx2_init_hw_resources(struct otx2_nic *pf)
1270 {
1271 	struct mbox *mbox = &pf->mbox;
1272 	struct otx2_hw *hw = &pf->hw;
1273 	struct msg_req *req;
1274 	int err = 0, lvl;
1275 
1276 	/* Set required NPA LF's pool counts
1277 	 * Auras and Pools are used in a 1:1 mapping,
1278 	 * so, aura count = pool count.
1279 	 */
1280 	hw->rqpool_cnt = hw->rx_queues;
1281 	hw->sqpool_cnt = hw->tx_queues;
1282 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1283 
1284 	/* Get the size of receive buffers to allocate */
1285 	pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
1286 
1287 	mutex_lock(&mbox->lock);
1288 	/* NPA init */
1289 	err = otx2_config_npa(pf);
1290 	if (err)
1291 		goto exit;
1292 
1293 	/* NIX init */
1294 	err = otx2_config_nix(pf);
1295 	if (err)
1296 		goto err_free_npa_lf;
1297 
1298 	/* Enable backpressure */
1299 	otx2_nix_config_bp(pf, true);
1300 
1301 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1302 	err = otx2_rq_aura_pool_init(pf);
1303 	if (err) {
1304 		mutex_unlock(&mbox->lock);
1305 		goto err_free_nix_lf;
1306 	}
1307 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
1308 	err = otx2_sq_aura_pool_init(pf);
1309 	if (err) {
1310 		mutex_unlock(&mbox->lock);
1311 		goto err_free_rq_ptrs;
1312 	}
1313 
1314 	err = otx2_txsch_alloc(pf);
1315 	if (err) {
1316 		mutex_unlock(&mbox->lock);
1317 		goto err_free_sq_ptrs;
1318 	}
1319 
1320 	err = otx2_config_nix_queues(pf);
1321 	if (err) {
1322 		mutex_unlock(&mbox->lock);
1323 		goto err_free_txsch;
1324 	}
1325 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1326 		err = otx2_txschq_config(pf, lvl);
1327 		if (err) {
1328 			mutex_unlock(&mbox->lock);
1329 			goto err_free_nix_queues;
1330 		}
1331 	}
1332 	mutex_unlock(&mbox->lock);
1333 	return err;
1334 
1335 err_free_nix_queues:
1336 	otx2_free_sq_res(pf);
1337 	otx2_free_cq_res(pf);
1338 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1339 err_free_txsch:
1340 	if (otx2_txschq_stop(pf))
1341 		dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1342 err_free_sq_ptrs:
1343 	otx2_sq_free_sqbs(pf);
1344 err_free_rq_ptrs:
1345 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1346 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1347 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1348 	otx2_aura_pool_free(pf);
1349 err_free_nix_lf:
1350 	mutex_lock(&mbox->lock);
1351 	req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1352 	if (req) {
1353 		if (otx2_sync_mbox_msg(mbox))
1354 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1355 	}
1356 err_free_npa_lf:
1357 	/* Reset NPA LF */
1358 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1359 	if (req) {
1360 		if (otx2_sync_mbox_msg(mbox))
1361 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1362 	}
1363 exit:
1364 	mutex_unlock(&mbox->lock);
1365 	return err;
1366 }
1367 
1368 static void otx2_free_hw_resources(struct otx2_nic *pf)
1369 {
1370 	struct otx2_qset *qset = &pf->qset;
1371 	struct mbox *mbox = &pf->mbox;
1372 	struct otx2_cq_queue *cq;
1373 	struct msg_req *req;
1374 	int qidx, err;
1375 
1376 	/* Ensure all SQE are processed */
1377 	otx2_sqb_flush(pf);
1378 
1379 	/* Stop transmission */
1380 	err = otx2_txschq_stop(pf);
1381 	if (err)
1382 		dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1383 
1384 	mutex_lock(&mbox->lock);
1385 	/* Disable backpressure */
1386 	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1387 		otx2_nix_config_bp(pf, false);
1388 	mutex_unlock(&mbox->lock);
1389 
1390 	/* Disable RQs */
1391 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1392 
1393 	/*Dequeue all CQEs */
1394 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1395 		cq = &qset->cq[qidx];
1396 		if (cq->cq_type == CQ_RX)
1397 			otx2_cleanup_rx_cqes(pf, cq);
1398 		else
1399 			otx2_cleanup_tx_cqes(pf, cq);
1400 	}
1401 
1402 	otx2_free_sq_res(pf);
1403 
1404 	/* Free RQ buffer pointers*/
1405 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1406 
1407 	otx2_free_cq_res(pf);
1408 
1409 	mutex_lock(&mbox->lock);
1410 	/* Reset NIX LF */
1411 	req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1412 	if (req) {
1413 		if (otx2_sync_mbox_msg(mbox))
1414 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1415 	}
1416 	mutex_unlock(&mbox->lock);
1417 
1418 	/* Disable NPA Pool and Aura hw context */
1419 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1420 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1421 	otx2_aura_pool_free(pf);
1422 
1423 	mutex_lock(&mbox->lock);
1424 	/* Reset NPA LF */
1425 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1426 	if (req) {
1427 		if (otx2_sync_mbox_msg(mbox))
1428 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1429 	}
1430 	mutex_unlock(&mbox->lock);
1431 }
1432 
1433 int otx2_open(struct net_device *netdev)
1434 {
1435 	struct otx2_nic *pf = netdev_priv(netdev);
1436 	struct otx2_cq_poll *cq_poll = NULL;
1437 	struct otx2_qset *qset = &pf->qset;
1438 	int err = 0, qidx, vec;
1439 	char *irq_name;
1440 
1441 	netif_carrier_off(netdev);
1442 
1443 	pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
1444 	/* RQ and SQs are mapped to different CQs,
1445 	 * so find out max CQ IRQs (i.e CINTs) needed.
1446 	 */
1447 	pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1448 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1449 	if (!qset->napi)
1450 		return -ENOMEM;
1451 
1452 	/* CQ size of RQ */
1453 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1454 	/* CQ size of SQ */
1455 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1456 
1457 	err = -ENOMEM;
1458 	qset->cq = kcalloc(pf->qset.cq_cnt,
1459 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
1460 	if (!qset->cq)
1461 		goto err_free_mem;
1462 
1463 	qset->sq = kcalloc(pf->hw.tx_queues,
1464 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
1465 	if (!qset->sq)
1466 		goto err_free_mem;
1467 
1468 	qset->rq = kcalloc(pf->hw.rx_queues,
1469 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1470 	if (!qset->rq)
1471 		goto err_free_mem;
1472 
1473 	err = otx2_init_hw_resources(pf);
1474 	if (err)
1475 		goto err_free_mem;
1476 
1477 	/* Register NAPI handler */
1478 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1479 		cq_poll = &qset->napi[qidx];
1480 		cq_poll->cint_idx = qidx;
1481 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
1482 		 * 'cq_ids[0]' points to RQ's CQ and
1483 		 * 'cq_ids[1]' points to SQ's CQ and
1484 		 */
1485 		cq_poll->cq_ids[CQ_RX] =
1486 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1487 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1488 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1489 		cq_poll->dev = (void *)pf;
1490 		netif_napi_add(netdev, &cq_poll->napi,
1491 			       otx2_napi_handler, NAPI_POLL_WEIGHT);
1492 		napi_enable(&cq_poll->napi);
1493 	}
1494 
1495 	/* Set maximum frame size allowed in HW */
1496 	err = otx2_hw_set_mtu(pf, netdev->mtu);
1497 	if (err)
1498 		goto err_disable_napi;
1499 
1500 	/* Initialize RSS */
1501 	err = otx2_rss_init(pf);
1502 	if (err)
1503 		goto err_disable_napi;
1504 
1505 	/* Register Queue IRQ handlers */
1506 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1507 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1508 
1509 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1510 
1511 	err = request_irq(pci_irq_vector(pf->pdev, vec),
1512 			  otx2_q_intr_handler, 0, irq_name, pf);
1513 	if (err) {
1514 		dev_err(pf->dev,
1515 			"RVUPF%d: IRQ registration failed for QERR\n",
1516 			rvu_get_pf(pf->pcifunc));
1517 		goto err_disable_napi;
1518 	}
1519 
1520 	/* Enable QINT IRQ */
1521 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1522 
1523 	/* Register CQ IRQ handlers */
1524 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1525 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1526 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1527 
1528 		snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1529 			 qidx);
1530 
1531 		err = request_irq(pci_irq_vector(pf->pdev, vec),
1532 				  otx2_cq_intr_handler, 0, irq_name,
1533 				  &qset->napi[qidx]);
1534 		if (err) {
1535 			dev_err(pf->dev,
1536 				"RVUPF%d: IRQ registration failed for CQ%d\n",
1537 				rvu_get_pf(pf->pcifunc), qidx);
1538 			goto err_free_cints;
1539 		}
1540 		vec++;
1541 
1542 		otx2_config_irq_coalescing(pf, qidx);
1543 
1544 		/* Enable CQ IRQ */
1545 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1546 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1547 	}
1548 
1549 	otx2_set_cints_affinity(pf);
1550 
1551 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1552 	/* 'intf_down' may be checked on any cpu */
1553 	smp_wmb();
1554 
1555 	/* we have already received link status notification */
1556 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1557 		otx2_handle_link_event(pf);
1558 
1559 	/* Restore pause frame settings */
1560 	otx2_config_pause_frm(pf);
1561 
1562 	err = otx2_rxtx_enable(pf, true);
1563 	if (err)
1564 		goto err_tx_stop_queues;
1565 
1566 	return 0;
1567 
1568 err_tx_stop_queues:
1569 	netif_tx_stop_all_queues(netdev);
1570 	netif_carrier_off(netdev);
1571 err_free_cints:
1572 	otx2_free_cints(pf, qidx);
1573 	vec = pci_irq_vector(pf->pdev,
1574 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1575 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1576 	synchronize_irq(vec);
1577 	free_irq(vec, pf);
1578 err_disable_napi:
1579 	otx2_disable_napi(pf);
1580 	otx2_free_hw_resources(pf);
1581 err_free_mem:
1582 	kfree(qset->sq);
1583 	kfree(qset->cq);
1584 	kfree(qset->rq);
1585 	kfree(qset->napi);
1586 	return err;
1587 }
1588 EXPORT_SYMBOL(otx2_open);
1589 
1590 int otx2_stop(struct net_device *netdev)
1591 {
1592 	struct otx2_nic *pf = netdev_priv(netdev);
1593 	struct otx2_cq_poll *cq_poll = NULL;
1594 	struct otx2_qset *qset = &pf->qset;
1595 	int qidx, vec, wrk;
1596 
1597 	netif_carrier_off(netdev);
1598 	netif_tx_stop_all_queues(netdev);
1599 
1600 	pf->flags |= OTX2_FLAG_INTF_DOWN;
1601 	/* 'intf_down' may be checked on any cpu */
1602 	smp_wmb();
1603 
1604 	/* First stop packet Rx/Tx */
1605 	otx2_rxtx_enable(pf, false);
1606 
1607 	/* Cleanup Queue IRQ */
1608 	vec = pci_irq_vector(pf->pdev,
1609 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1610 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1611 	synchronize_irq(vec);
1612 	free_irq(vec, pf);
1613 
1614 	/* Cleanup CQ NAPI and IRQ */
1615 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1616 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1617 		/* Disable interrupt */
1618 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1619 
1620 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
1621 
1622 		cq_poll = &qset->napi[qidx];
1623 		napi_synchronize(&cq_poll->napi);
1624 		vec++;
1625 	}
1626 
1627 	netif_tx_disable(netdev);
1628 
1629 	otx2_free_hw_resources(pf);
1630 	otx2_free_cints(pf, pf->hw.cint_cnt);
1631 	otx2_disable_napi(pf);
1632 
1633 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1634 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1635 
1636 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1637 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1638 	devm_kfree(pf->dev, pf->refill_wrk);
1639 
1640 	kfree(qset->sq);
1641 	kfree(qset->cq);
1642 	kfree(qset->rq);
1643 	kfree(qset->napi);
1644 	/* Do not clear RQ/SQ ringsize settings */
1645 	memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1646 	       sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1647 	return 0;
1648 }
1649 EXPORT_SYMBOL(otx2_stop);
1650 
1651 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1652 {
1653 	struct otx2_nic *pf = netdev_priv(netdev);
1654 	int qidx = skb_get_queue_mapping(skb);
1655 	struct otx2_snd_queue *sq;
1656 	struct netdev_queue *txq;
1657 
1658 	/* Check for minimum and maximum packet length */
1659 	if (skb->len <= ETH_HLEN ||
1660 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1661 		dev_kfree_skb(skb);
1662 		return NETDEV_TX_OK;
1663 	}
1664 
1665 	sq = &pf->qset.sq[qidx];
1666 	txq = netdev_get_tx_queue(netdev, qidx);
1667 
1668 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1669 		netif_tx_stop_queue(txq);
1670 
1671 		/* Check again, incase SQBs got freed up */
1672 		smp_mb();
1673 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1674 							> sq->sqe_thresh)
1675 			netif_tx_wake_queue(txq);
1676 
1677 		return NETDEV_TX_BUSY;
1678 	}
1679 
1680 	return NETDEV_TX_OK;
1681 }
1682 
1683 static void otx2_set_rx_mode(struct net_device *netdev)
1684 {
1685 	struct otx2_nic *pf = netdev_priv(netdev);
1686 
1687 	queue_work(pf->otx2_wq, &pf->rx_mode_work);
1688 }
1689 
1690 static void otx2_do_set_rx_mode(struct work_struct *work)
1691 {
1692 	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1693 	struct net_device *netdev = pf->netdev;
1694 	struct nix_rx_mode *req;
1695 
1696 	if (!(netdev->flags & IFF_UP))
1697 		return;
1698 
1699 	mutex_lock(&pf->mbox.lock);
1700 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1701 	if (!req) {
1702 		mutex_unlock(&pf->mbox.lock);
1703 		return;
1704 	}
1705 
1706 	req->mode = NIX_RX_MODE_UCAST;
1707 
1708 	/* We don't support MAC address filtering yet */
1709 	if (netdev->flags & IFF_PROMISC)
1710 		req->mode |= NIX_RX_MODE_PROMISC;
1711 	else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1712 		req->mode |= NIX_RX_MODE_ALLMULTI;
1713 
1714 	otx2_sync_mbox_msg(&pf->mbox);
1715 	mutex_unlock(&pf->mbox.lock);
1716 }
1717 
1718 static int otx2_set_features(struct net_device *netdev,
1719 			     netdev_features_t features)
1720 {
1721 	netdev_features_t changed = features ^ netdev->features;
1722 	struct otx2_nic *pf = netdev_priv(netdev);
1723 
1724 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1725 		return otx2_cgx_config_loopback(pf,
1726 						features & NETIF_F_LOOPBACK);
1727 	return 0;
1728 }
1729 
1730 static void otx2_reset_task(struct work_struct *work)
1731 {
1732 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1733 
1734 	if (!netif_running(pf->netdev))
1735 		return;
1736 
1737 	rtnl_lock();
1738 	otx2_stop(pf->netdev);
1739 	pf->reset_count++;
1740 	otx2_open(pf->netdev);
1741 	netif_trans_update(pf->netdev);
1742 	rtnl_unlock();
1743 }
1744 
1745 static const struct net_device_ops otx2_netdev_ops = {
1746 	.ndo_open		= otx2_open,
1747 	.ndo_stop		= otx2_stop,
1748 	.ndo_start_xmit		= otx2_xmit,
1749 	.ndo_set_mac_address    = otx2_set_mac_address,
1750 	.ndo_change_mtu		= otx2_change_mtu,
1751 	.ndo_set_rx_mode	= otx2_set_rx_mode,
1752 	.ndo_set_features	= otx2_set_features,
1753 	.ndo_tx_timeout		= otx2_tx_timeout,
1754 	.ndo_get_stats64	= otx2_get_stats64,
1755 };
1756 
1757 static int otx2_wq_init(struct otx2_nic *pf)
1758 {
1759 	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
1760 	if (!pf->otx2_wq)
1761 		return -ENOMEM;
1762 
1763 	INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
1764 	INIT_WORK(&pf->reset_task, otx2_reset_task);
1765 	return 0;
1766 }
1767 
1768 static int otx2_check_pf_usable(struct otx2_nic *nic)
1769 {
1770 	u64 rev;
1771 
1772 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
1773 	rev = (rev >> 12) & 0xFF;
1774 	/* Check if AF has setup revision for RVUM block,
1775 	 * otherwise this driver probe should be deferred
1776 	 * until AF driver comes up.
1777 	 */
1778 	if (!rev) {
1779 		dev_warn(nic->dev,
1780 			 "AF is not initialized, deferring probe\n");
1781 		return -EPROBE_DEFER;
1782 	}
1783 	return 0;
1784 }
1785 
1786 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
1787 {
1788 	struct otx2_hw *hw = &pf->hw;
1789 	int num_vec, err;
1790 
1791 	/* NPA interrupts are inot registered, so alloc only
1792 	 * upto NIX vector offset.
1793 	 */
1794 	num_vec = hw->nix_msixoff;
1795 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
1796 
1797 	otx2_disable_mbox_intr(pf);
1798 	pci_free_irq_vectors(hw->pdev);
1799 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
1800 	if (err < 0) {
1801 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
1802 			__func__, num_vec);
1803 		return err;
1804 	}
1805 
1806 	return otx2_register_mbox_intr(pf, false);
1807 }
1808 
1809 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1810 {
1811 	struct device *dev = &pdev->dev;
1812 	struct net_device *netdev;
1813 	struct otx2_nic *pf;
1814 	struct otx2_hw *hw;
1815 	int err, qcount;
1816 	int num_vec;
1817 
1818 	err = pcim_enable_device(pdev);
1819 	if (err) {
1820 		dev_err(dev, "Failed to enable PCI device\n");
1821 		return err;
1822 	}
1823 
1824 	err = pci_request_regions(pdev, DRV_NAME);
1825 	if (err) {
1826 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1827 		return err;
1828 	}
1829 
1830 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
1831 	if (err) {
1832 		dev_err(dev, "DMA mask config failed, abort\n");
1833 		goto err_release_regions;
1834 	}
1835 
1836 	pci_set_master(pdev);
1837 
1838 	/* Set number of queues */
1839 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
1840 
1841 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
1842 	if (!netdev) {
1843 		err = -ENOMEM;
1844 		goto err_release_regions;
1845 	}
1846 
1847 	pci_set_drvdata(pdev, netdev);
1848 	SET_NETDEV_DEV(netdev, &pdev->dev);
1849 	pf = netdev_priv(netdev);
1850 	pf->netdev = netdev;
1851 	pf->pdev = pdev;
1852 	pf->dev = dev;
1853 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
1854 	pf->flags |= OTX2_FLAG_INTF_DOWN;
1855 
1856 	hw = &pf->hw;
1857 	hw->pdev = pdev;
1858 	hw->rx_queues = qcount;
1859 	hw->tx_queues = qcount;
1860 	hw->max_queues = qcount;
1861 
1862 	num_vec = pci_msix_vec_count(pdev);
1863 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
1864 					  GFP_KERNEL);
1865 	if (!hw->irq_name) {
1866 		err = -ENOMEM;
1867 		goto err_free_netdev;
1868 	}
1869 
1870 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
1871 					 sizeof(cpumask_var_t), GFP_KERNEL);
1872 	if (!hw->affinity_mask) {
1873 		err = -ENOMEM;
1874 		goto err_free_netdev;
1875 	}
1876 
1877 	/* Map CSRs */
1878 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1879 	if (!pf->reg_base) {
1880 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
1881 		err = -ENOMEM;
1882 		goto err_free_netdev;
1883 	}
1884 
1885 	err = otx2_check_pf_usable(pf);
1886 	if (err)
1887 		goto err_free_netdev;
1888 
1889 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
1890 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
1891 	if (err < 0) {
1892 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
1893 			__func__, num_vec);
1894 		goto err_free_netdev;
1895 	}
1896 
1897 	/* Init PF <=> AF mailbox stuff */
1898 	err = otx2_pfaf_mbox_init(pf);
1899 	if (err)
1900 		goto err_free_irq_vectors;
1901 
1902 	/* Register mailbox interrupt */
1903 	err = otx2_register_mbox_intr(pf, true);
1904 	if (err)
1905 		goto err_mbox_destroy;
1906 
1907 	/* Request AF to attach NPA and NIX LFs to this PF.
1908 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
1909 	 */
1910 	err = otx2_attach_npa_nix(pf);
1911 	if (err)
1912 		goto err_disable_mbox_intr;
1913 
1914 	err = otx2_realloc_msix_vectors(pf);
1915 	if (err)
1916 		goto err_detach_rsrc;
1917 
1918 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
1919 	if (err)
1920 		goto err_detach_rsrc;
1921 
1922 	otx2_setup_dev_hw_settings(pf);
1923 
1924 	/* Assign default mac address */
1925 	otx2_get_mac_from_af(netdev);
1926 
1927 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
1928 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
1929 	 * ingress packet. In some scenarios HW can free back allocated buffer
1930 	 * pointers to pool. This makes it impossible for SW to maintain a
1931 	 * parallel list where physical addresses of buffer pointers (IOVAs)
1932 	 * given to HW can be saved for later reference.
1933 	 *
1934 	 * So the only way to convert Rx packet's buffer address is to use
1935 	 * IOMMU's iova_to_phys() handler which translates the address by
1936 	 * walking through the translation tables.
1937 	 */
1938 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
1939 
1940 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
1941 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
1942 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
1943 	netdev->features |= netdev->hw_features;
1944 
1945 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
1946 
1947 	netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
1948 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
1949 
1950 	netdev->netdev_ops = &otx2_netdev_ops;
1951 
1952 	/* MTU range: 64 - 9190 */
1953 	netdev->min_mtu = OTX2_MIN_MTU;
1954 	netdev->max_mtu = OTX2_MAX_MTU;
1955 
1956 	err = register_netdev(netdev);
1957 	if (err) {
1958 		dev_err(dev, "Failed to register netdevice\n");
1959 		goto err_detach_rsrc;
1960 	}
1961 
1962 	err = otx2_wq_init(pf);
1963 	if (err)
1964 		goto err_unreg_netdev;
1965 
1966 	otx2_set_ethtool_ops(netdev);
1967 
1968 	/* Enable link notifications */
1969 	otx2_cgx_config_linkevents(pf, true);
1970 
1971 	/* Enable pause frames by default */
1972 	pf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED;
1973 	pf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED;
1974 
1975 	return 0;
1976 
1977 err_unreg_netdev:
1978 	unregister_netdev(netdev);
1979 err_detach_rsrc:
1980 	otx2_detach_resources(&pf->mbox);
1981 err_disable_mbox_intr:
1982 	otx2_disable_mbox_intr(pf);
1983 err_mbox_destroy:
1984 	otx2_pfaf_mbox_destroy(pf);
1985 err_free_irq_vectors:
1986 	pci_free_irq_vectors(hw->pdev);
1987 err_free_netdev:
1988 	pci_set_drvdata(pdev, NULL);
1989 	free_netdev(netdev);
1990 err_release_regions:
1991 	pci_release_regions(pdev);
1992 	return err;
1993 }
1994 
1995 static void otx2_vf_link_event_task(struct work_struct *work)
1996 {
1997 	struct otx2_vf_config *config;
1998 	struct cgx_link_info_msg *req;
1999 	struct mbox_msghdr *msghdr;
2000 	struct otx2_nic *pf;
2001 	int vf_idx;
2002 
2003 	config = container_of(work, struct otx2_vf_config,
2004 			      link_event_work.work);
2005 	vf_idx = config - config->pf->vf_configs;
2006 	pf = config->pf;
2007 
2008 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2009 					 sizeof(*req), sizeof(struct msg_rsp));
2010 	if (!msghdr) {
2011 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2012 		return;
2013 	}
2014 
2015 	req = (struct cgx_link_info_msg *)msghdr;
2016 	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2017 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
2018 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2019 
2020 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2021 }
2022 
2023 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2024 {
2025 	struct net_device *netdev = pci_get_drvdata(pdev);
2026 	struct otx2_nic *pf = netdev_priv(netdev);
2027 	int ret, i;
2028 
2029 	/* Init PF <=> VF mailbox stuff */
2030 	ret = otx2_pfvf_mbox_init(pf, numvfs);
2031 	if (ret)
2032 		return ret;
2033 
2034 	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2035 	if (ret)
2036 		goto free_mbox;
2037 
2038 	pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
2039 				 GFP_KERNEL);
2040 	if (!pf->vf_configs) {
2041 		ret = -ENOMEM;
2042 		goto free_intr;
2043 	}
2044 
2045 	for (i = 0; i < numvfs; i++) {
2046 		pf->vf_configs[i].pf = pf;
2047 		pf->vf_configs[i].intf_down = true;
2048 		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2049 				  otx2_vf_link_event_task);
2050 	}
2051 
2052 	ret = otx2_pf_flr_init(pf, numvfs);
2053 	if (ret)
2054 		goto free_configs;
2055 
2056 	ret = otx2_register_flr_me_intr(pf, numvfs);
2057 	if (ret)
2058 		goto free_flr;
2059 
2060 	ret = pci_enable_sriov(pdev, numvfs);
2061 	if (ret)
2062 		goto free_flr_intr;
2063 
2064 	return numvfs;
2065 free_flr_intr:
2066 	otx2_disable_flr_me_intr(pf);
2067 free_flr:
2068 	otx2_flr_wq_destroy(pf);
2069 free_configs:
2070 	kfree(pf->vf_configs);
2071 free_intr:
2072 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
2073 free_mbox:
2074 	otx2_pfvf_mbox_destroy(pf);
2075 	return ret;
2076 }
2077 
2078 static int otx2_sriov_disable(struct pci_dev *pdev)
2079 {
2080 	struct net_device *netdev = pci_get_drvdata(pdev);
2081 	struct otx2_nic *pf = netdev_priv(netdev);
2082 	int numvfs = pci_num_vf(pdev);
2083 	int i;
2084 
2085 	if (!numvfs)
2086 		return 0;
2087 
2088 	pci_disable_sriov(pdev);
2089 
2090 	for (i = 0; i < pci_num_vf(pdev); i++)
2091 		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2092 	kfree(pf->vf_configs);
2093 
2094 	otx2_disable_flr_me_intr(pf);
2095 	otx2_flr_wq_destroy(pf);
2096 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
2097 	otx2_pfvf_mbox_destroy(pf);
2098 
2099 	return 0;
2100 }
2101 
2102 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2103 {
2104 	if (numvfs == 0)
2105 		return otx2_sriov_disable(pdev);
2106 	else
2107 		return otx2_sriov_enable(pdev, numvfs);
2108 }
2109 
2110 static void otx2_remove(struct pci_dev *pdev)
2111 {
2112 	struct net_device *netdev = pci_get_drvdata(pdev);
2113 	struct otx2_nic *pf;
2114 
2115 	if (!netdev)
2116 		return;
2117 
2118 	pf = netdev_priv(netdev);
2119 
2120 	cancel_work_sync(&pf->reset_task);
2121 	/* Disable link notifications */
2122 	otx2_cgx_config_linkevents(pf, false);
2123 
2124 	unregister_netdev(netdev);
2125 	otx2_sriov_disable(pf->pdev);
2126 	if (pf->otx2_wq)
2127 		destroy_workqueue(pf->otx2_wq);
2128 
2129 	otx2_detach_resources(&pf->mbox);
2130 	otx2_disable_mbox_intr(pf);
2131 	otx2_pfaf_mbox_destroy(pf);
2132 	pci_free_irq_vectors(pf->pdev);
2133 	pci_set_drvdata(pdev, NULL);
2134 	free_netdev(netdev);
2135 
2136 	pci_release_regions(pdev);
2137 }
2138 
2139 static struct pci_driver otx2_pf_driver = {
2140 	.name = DRV_NAME,
2141 	.id_table = otx2_pf_id_table,
2142 	.probe = otx2_probe,
2143 	.shutdown = otx2_remove,
2144 	.remove = otx2_remove,
2145 	.sriov_configure = otx2_sriov_configure
2146 };
2147 
2148 static int __init otx2_rvupf_init_module(void)
2149 {
2150 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2151 
2152 	return pci_register_driver(&otx2_pf_driver);
2153 }
2154 
2155 static void __exit otx2_rvupf_cleanup_module(void)
2156 {
2157 	pci_unregister_driver(&otx2_pf_driver);
2158 }
2159 
2160 module_init(otx2_rvupf_init_module);
2161 module_exit(otx2_rvupf_cleanup_module);
2162