116547577SSunil Goutham // SPDX-License-Identifier: GPL-2.0
2cb0e3ec4SSunil Goutham /* Marvell RVU Physical Function ethernet driver
316547577SSunil Goutham  *
4cb0e3ec4SSunil Goutham  * Copyright (C) 2020 Marvell.
516547577SSunil Goutham  *
616547577SSunil Goutham  */
716547577SSunil Goutham 
816547577SSunil Goutham #include <linux/module.h>
916547577SSunil Goutham #include <linux/interrupt.h>
1016547577SSunil Goutham #include <linux/pci.h>
1116547577SSunil Goutham #include <linux/etherdevice.h>
1216547577SSunil Goutham #include <linux/of.h>
1316547577SSunil Goutham #include <linux/if_vlan.h>
1416547577SSunil Goutham #include <linux/iommu.h>
1516547577SSunil Goutham #include <net/ip.h>
1606059a1aSGeetha sowjanya #include <linux/bpf.h>
1706059a1aSGeetha sowjanya #include <linux/bpf_trace.h>
1851afe902SRatheesh Kannoth #include <linux/bitfield.h>
19a9ca9f9cSYunsheng Lin #include <net/page_pool/types.h>
2016547577SSunil Goutham 
21caa2da34SSunil Goutham #include "otx2_reg.h"
2216547577SSunil Goutham #include "otx2_common.h"
23caa2da34SSunil Goutham #include "otx2_txrx.h"
24caa2da34SSunil Goutham #include "otx2_struct.h"
25c9c12d33SAleksey Makarov #include "otx2_ptp.h"
264c236d5dSGeetha sowjanya #include "cn10k.h"
27ab6dddd2SSubbaraya Sundeep #include "qos.h"
2831a97460SSubbaraya Sundeep #include <rvu_trace.h>
2916547577SSunil Goutham 
30facede82SSubbaraya Sundeep #define DRV_NAME	"rvu_nicpf"
31facede82SSubbaraya Sundeep #define DRV_STRING	"Marvell RVU NIC Physical Function Driver"
3216547577SSunil Goutham 
3316547577SSunil Goutham /* Supported devices */
3416547577SSunil Goutham static const struct pci_device_id otx2_pf_id_table[] = {
3516547577SSunil Goutham 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
3616547577SSunil Goutham 	{ 0, }  /* end of table */
3716547577SSunil Goutham };
3816547577SSunil Goutham 
39fc992e33SSunil Goutham MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
4016547577SSunil Goutham MODULE_DESCRIPTION(DRV_STRING);
4116547577SSunil Goutham MODULE_LICENSE("GPL v2");
4216547577SSunil Goutham MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
4316547577SSunil Goutham 
44b1dc2040SHariprasad Kelam static void otx2_vf_link_event_task(struct work_struct *work);
45b1dc2040SHariprasad Kelam 
465a6d7c9dSSunil Goutham enum {
475a6d7c9dSSunil Goutham 	TYPE_PFAF,
485a6d7c9dSSunil Goutham 	TYPE_PFVF,
495a6d7c9dSSunil Goutham };
505a6d7c9dSSunil Goutham 
51c9c12d33SAleksey Makarov static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
52c9c12d33SAleksey Makarov static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
53c9c12d33SAleksey Makarov 
otx2_change_mtu(struct net_device * netdev,int new_mtu)5434bfe0ebSSunil Goutham static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
5534bfe0ebSSunil Goutham {
5606059a1aSGeetha sowjanya 	struct otx2_nic *pf = netdev_priv(netdev);
5734bfe0ebSSunil Goutham 	bool if_up = netif_running(netdev);
5834bfe0ebSSunil Goutham 	int err = 0;
5934bfe0ebSSunil Goutham 
6006059a1aSGeetha sowjanya 	if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
6106059a1aSGeetha sowjanya 		netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
6206059a1aSGeetha sowjanya 			    netdev->mtu);
6306059a1aSGeetha sowjanya 		return -EINVAL;
6406059a1aSGeetha sowjanya 	}
6534bfe0ebSSunil Goutham 	if (if_up)
6634bfe0ebSSunil Goutham 		otx2_stop(netdev);
6734bfe0ebSSunil Goutham 
6834bfe0ebSSunil Goutham 	netdev_info(netdev, "Changing MTU from %d to %d\n",
6934bfe0ebSSunil Goutham 		    netdev->mtu, new_mtu);
7034bfe0ebSSunil Goutham 	netdev->mtu = new_mtu;
7134bfe0ebSSunil Goutham 
7234bfe0ebSSunil Goutham 	if (if_up)
7334bfe0ebSSunil Goutham 		err = otx2_open(netdev);
7434bfe0ebSSunil Goutham 
7534bfe0ebSSunil Goutham 	return err;
7634bfe0ebSSunil Goutham }
7734bfe0ebSSunil Goutham 
otx2_disable_flr_me_intr(struct otx2_nic * pf)78547d20f1SGeetha sowjanya static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
79547d20f1SGeetha sowjanya {
80547d20f1SGeetha sowjanya 	int irq, vfs = pf->total_vfs;
81547d20f1SGeetha sowjanya 
82547d20f1SGeetha sowjanya 	/* Disable VFs ME interrupts */
83547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
84547d20f1SGeetha sowjanya 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
85547d20f1SGeetha sowjanya 	free_irq(irq, pf);
86547d20f1SGeetha sowjanya 
87547d20f1SGeetha sowjanya 	/* Disable VFs FLR interrupts */
88547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
89547d20f1SGeetha sowjanya 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
90547d20f1SGeetha sowjanya 	free_irq(irq, pf);
91547d20f1SGeetha sowjanya 
92547d20f1SGeetha sowjanya 	if (vfs <= 64)
93547d20f1SGeetha sowjanya 		return;
94547d20f1SGeetha sowjanya 
95547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
96547d20f1SGeetha sowjanya 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
97547d20f1SGeetha sowjanya 	free_irq(irq, pf);
98547d20f1SGeetha sowjanya 
99547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
100547d20f1SGeetha sowjanya 	irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
101547d20f1SGeetha sowjanya 	free_irq(irq, pf);
102547d20f1SGeetha sowjanya }
103547d20f1SGeetha sowjanya 
otx2_flr_wq_destroy(struct otx2_nic * pf)104547d20f1SGeetha sowjanya static void otx2_flr_wq_destroy(struct otx2_nic *pf)
105547d20f1SGeetha sowjanya {
106547d20f1SGeetha sowjanya 	if (!pf->flr_wq)
107547d20f1SGeetha sowjanya 		return;
108547d20f1SGeetha sowjanya 	destroy_workqueue(pf->flr_wq);
109547d20f1SGeetha sowjanya 	pf->flr_wq = NULL;
110547d20f1SGeetha sowjanya 	devm_kfree(pf->dev, pf->flr_wrk);
111547d20f1SGeetha sowjanya }
112547d20f1SGeetha sowjanya 
otx2_flr_handler(struct work_struct * work)113547d20f1SGeetha sowjanya static void otx2_flr_handler(struct work_struct *work)
114547d20f1SGeetha sowjanya {
115547d20f1SGeetha sowjanya 	struct flr_work *flrwork = container_of(work, struct flr_work, work);
116547d20f1SGeetha sowjanya 	struct otx2_nic *pf = flrwork->pf;
1174c3212f5SSunil Goutham 	struct mbox *mbox = &pf->mbox;
118547d20f1SGeetha sowjanya 	struct msg_req *req;
119547d20f1SGeetha sowjanya 	int vf, reg = 0;
120547d20f1SGeetha sowjanya 
121547d20f1SGeetha sowjanya 	vf = flrwork - pf->flr_wrk;
122547d20f1SGeetha sowjanya 
1234c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
1244c3212f5SSunil Goutham 	req = otx2_mbox_alloc_msg_vf_flr(mbox);
125547d20f1SGeetha sowjanya 	if (!req) {
1264c3212f5SSunil Goutham 		mutex_unlock(&mbox->lock);
127547d20f1SGeetha sowjanya 		return;
128547d20f1SGeetha sowjanya 	}
129547d20f1SGeetha sowjanya 	req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
130547d20f1SGeetha sowjanya 	req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
131547d20f1SGeetha sowjanya 
132547d20f1SGeetha sowjanya 	if (!otx2_sync_mbox_msg(&pf->mbox)) {
133547d20f1SGeetha sowjanya 		if (vf >= 64) {
134547d20f1SGeetha sowjanya 			reg = 1;
135547d20f1SGeetha sowjanya 			vf = vf - 64;
136547d20f1SGeetha sowjanya 		}
137547d20f1SGeetha sowjanya 		/* clear transcation pending bit */
138547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
139547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
140547d20f1SGeetha sowjanya 	}
141547d20f1SGeetha sowjanya 
1424c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
143547d20f1SGeetha sowjanya }
144547d20f1SGeetha sowjanya 
otx2_pf_flr_intr_handler(int irq,void * pf_irq)145547d20f1SGeetha sowjanya static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
146547d20f1SGeetha sowjanya {
147547d20f1SGeetha sowjanya 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
148547d20f1SGeetha sowjanya 	int reg, dev, vf, start_vf, num_reg = 1;
149547d20f1SGeetha sowjanya 	u64 intr;
150547d20f1SGeetha sowjanya 
151547d20f1SGeetha sowjanya 	if (pf->total_vfs > 64)
152547d20f1SGeetha sowjanya 		num_reg = 2;
153547d20f1SGeetha sowjanya 
154547d20f1SGeetha sowjanya 	for (reg = 0; reg < num_reg; reg++) {
155547d20f1SGeetha sowjanya 		intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
156547d20f1SGeetha sowjanya 		if (!intr)
157547d20f1SGeetha sowjanya 			continue;
158547d20f1SGeetha sowjanya 		start_vf = 64 * reg;
159547d20f1SGeetha sowjanya 		for (vf = 0; vf < 64; vf++) {
160547d20f1SGeetha sowjanya 			if (!(intr & BIT_ULL(vf)))
161547d20f1SGeetha sowjanya 				continue;
162547d20f1SGeetha sowjanya 			dev = vf + start_vf;
163547d20f1SGeetha sowjanya 			queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
164547d20f1SGeetha sowjanya 			/* Clear interrupt */
165547d20f1SGeetha sowjanya 			otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
166547d20f1SGeetha sowjanya 			/* Disable the interrupt */
167547d20f1SGeetha sowjanya 			otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
168547d20f1SGeetha sowjanya 				     BIT_ULL(vf));
169547d20f1SGeetha sowjanya 		}
170547d20f1SGeetha sowjanya 	}
171547d20f1SGeetha sowjanya 	return IRQ_HANDLED;
172547d20f1SGeetha sowjanya }
173547d20f1SGeetha sowjanya 
otx2_pf_me_intr_handler(int irq,void * pf_irq)174547d20f1SGeetha sowjanya static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
175547d20f1SGeetha sowjanya {
176547d20f1SGeetha sowjanya 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
177547d20f1SGeetha sowjanya 	int vf, reg, num_reg = 1;
178547d20f1SGeetha sowjanya 	u64 intr;
179547d20f1SGeetha sowjanya 
180547d20f1SGeetha sowjanya 	if (pf->total_vfs > 64)
181547d20f1SGeetha sowjanya 		num_reg = 2;
182547d20f1SGeetha sowjanya 
183547d20f1SGeetha sowjanya 	for (reg = 0; reg < num_reg; reg++) {
184547d20f1SGeetha sowjanya 		intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
185547d20f1SGeetha sowjanya 		if (!intr)
186547d20f1SGeetha sowjanya 			continue;
187547d20f1SGeetha sowjanya 		for (vf = 0; vf < 64; vf++) {
188547d20f1SGeetha sowjanya 			if (!(intr & BIT_ULL(vf)))
189547d20f1SGeetha sowjanya 				continue;
190547d20f1SGeetha sowjanya 			/* clear trpend bit */
191547d20f1SGeetha sowjanya 			otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
192547d20f1SGeetha sowjanya 			/* clear interrupt */
193547d20f1SGeetha sowjanya 			otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
194547d20f1SGeetha sowjanya 		}
195547d20f1SGeetha sowjanya 	}
196547d20f1SGeetha sowjanya 	return IRQ_HANDLED;
197547d20f1SGeetha sowjanya }
198547d20f1SGeetha sowjanya 
otx2_register_flr_me_intr(struct otx2_nic * pf,int numvfs)199547d20f1SGeetha sowjanya static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
200547d20f1SGeetha sowjanya {
201547d20f1SGeetha sowjanya 	struct otx2_hw *hw = &pf->hw;
202547d20f1SGeetha sowjanya 	char *irq_name;
203547d20f1SGeetha sowjanya 	int ret;
204547d20f1SGeetha sowjanya 
205547d20f1SGeetha sowjanya 	/* Register ME interrupt handler*/
206547d20f1SGeetha sowjanya 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
207547d20f1SGeetha sowjanya 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
208547d20f1SGeetha sowjanya 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
209547d20f1SGeetha sowjanya 			  otx2_pf_me_intr_handler, 0, irq_name, pf);
210547d20f1SGeetha sowjanya 	if (ret) {
211547d20f1SGeetha sowjanya 		dev_err(pf->dev,
212547d20f1SGeetha sowjanya 			"RVUPF: IRQ registration failed for ME0\n");
213547d20f1SGeetha sowjanya 	}
214547d20f1SGeetha sowjanya 
215547d20f1SGeetha sowjanya 	/* Register FLR interrupt handler */
216547d20f1SGeetha sowjanya 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
217547d20f1SGeetha sowjanya 	snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
218547d20f1SGeetha sowjanya 	ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
219547d20f1SGeetha sowjanya 			  otx2_pf_flr_intr_handler, 0, irq_name, pf);
220547d20f1SGeetha sowjanya 	if (ret) {
221547d20f1SGeetha sowjanya 		dev_err(pf->dev,
222547d20f1SGeetha sowjanya 			"RVUPF: IRQ registration failed for FLR0\n");
223547d20f1SGeetha sowjanya 		return ret;
224547d20f1SGeetha sowjanya 	}
225547d20f1SGeetha sowjanya 
226547d20f1SGeetha sowjanya 	if (numvfs > 64) {
227547d20f1SGeetha sowjanya 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
228547d20f1SGeetha sowjanya 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
229547d20f1SGeetha sowjanya 			 rvu_get_pf(pf->pcifunc));
230547d20f1SGeetha sowjanya 		ret = request_irq(pci_irq_vector
231547d20f1SGeetha sowjanya 				  (pf->pdev, RVU_PF_INT_VEC_VFME1),
232547d20f1SGeetha sowjanya 				  otx2_pf_me_intr_handler, 0, irq_name, pf);
233547d20f1SGeetha sowjanya 		if (ret) {
234547d20f1SGeetha sowjanya 			dev_err(pf->dev,
235547d20f1SGeetha sowjanya 				"RVUPF: IRQ registration failed for ME1\n");
236547d20f1SGeetha sowjanya 		}
237547d20f1SGeetha sowjanya 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
238547d20f1SGeetha sowjanya 		snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
239547d20f1SGeetha sowjanya 			 rvu_get_pf(pf->pcifunc));
240547d20f1SGeetha sowjanya 		ret = request_irq(pci_irq_vector
241547d20f1SGeetha sowjanya 				  (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
242547d20f1SGeetha sowjanya 				  otx2_pf_flr_intr_handler, 0, irq_name, pf);
243547d20f1SGeetha sowjanya 		if (ret) {
244547d20f1SGeetha sowjanya 			dev_err(pf->dev,
245547d20f1SGeetha sowjanya 				"RVUPF: IRQ registration failed for FLR1\n");
246547d20f1SGeetha sowjanya 			return ret;
247547d20f1SGeetha sowjanya 		}
248547d20f1SGeetha sowjanya 	}
249547d20f1SGeetha sowjanya 
250547d20f1SGeetha sowjanya 	/* Enable ME interrupt for all VFs*/
251547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
252547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
253547d20f1SGeetha sowjanya 
254547d20f1SGeetha sowjanya 	/* Enable FLR interrupt for all VFs*/
255547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
256547d20f1SGeetha sowjanya 	otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
257547d20f1SGeetha sowjanya 
258547d20f1SGeetha sowjanya 	if (numvfs > 64) {
259547d20f1SGeetha sowjanya 		numvfs -= 64;
260547d20f1SGeetha sowjanya 
261547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
262547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
263547d20f1SGeetha sowjanya 			     INTR_MASK(numvfs));
264547d20f1SGeetha sowjanya 
265547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
266547d20f1SGeetha sowjanya 		otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
267547d20f1SGeetha sowjanya 			     INTR_MASK(numvfs));
268547d20f1SGeetha sowjanya 	}
269547d20f1SGeetha sowjanya 	return 0;
270547d20f1SGeetha sowjanya }
271547d20f1SGeetha sowjanya 
otx2_pf_flr_init(struct otx2_nic * pf,int num_vfs)272547d20f1SGeetha sowjanya static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
273547d20f1SGeetha sowjanya {
274547d20f1SGeetha sowjanya 	int vf;
275547d20f1SGeetha sowjanya 
276289f9746STejun Heo 	pf->flr_wq = alloc_ordered_workqueue("otx2_pf_flr_wq", WQ_HIGHPRI);
277547d20f1SGeetha sowjanya 	if (!pf->flr_wq)
278547d20f1SGeetha sowjanya 		return -ENOMEM;
279547d20f1SGeetha sowjanya 
280547d20f1SGeetha sowjanya 	pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
281547d20f1SGeetha sowjanya 				   sizeof(struct flr_work), GFP_KERNEL);
282547d20f1SGeetha sowjanya 	if (!pf->flr_wrk) {
283547d20f1SGeetha sowjanya 		destroy_workqueue(pf->flr_wq);
284547d20f1SGeetha sowjanya 		return -ENOMEM;
285547d20f1SGeetha sowjanya 	}
286547d20f1SGeetha sowjanya 
287547d20f1SGeetha sowjanya 	for (vf = 0; vf < num_vfs; vf++) {
288547d20f1SGeetha sowjanya 		pf->flr_wrk[vf].pf = pf;
289547d20f1SGeetha sowjanya 		INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
290547d20f1SGeetha sowjanya 	}
291547d20f1SGeetha sowjanya 
292547d20f1SGeetha sowjanya 	return 0;
293547d20f1SGeetha sowjanya }
294547d20f1SGeetha sowjanya 
otx2_queue_vf_work(struct mbox * mw,struct workqueue_struct * mbox_wq,int first,int mdevs,u64 intr)295c6354b85SSubbaraya Sundeep static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
296c6354b85SSubbaraya Sundeep 			       int first, int mdevs, u64 intr)
2975a6d7c9dSSunil Goutham {
2985a6d7c9dSSunil Goutham 	struct otx2_mbox_dev *mdev;
2995a6d7c9dSSunil Goutham 	struct otx2_mbox *mbox;
3005a6d7c9dSSunil Goutham 	struct mbox_hdr *hdr;
3015a6d7c9dSSunil Goutham 	int i;
3025a6d7c9dSSunil Goutham 
3035a6d7c9dSSunil Goutham 	for (i = first; i < mdevs; i++) {
3045a6d7c9dSSunil Goutham 		/* start from 0 */
3055a6d7c9dSSunil Goutham 		if (!(intr & BIT_ULL(i - first)))
3065a6d7c9dSSunil Goutham 			continue;
3075a6d7c9dSSunil Goutham 
3085a6d7c9dSSunil Goutham 		mbox = &mw->mbox;
3095a6d7c9dSSunil Goutham 		mdev = &mbox->dev[i];
3105a6d7c9dSSunil Goutham 		hdr = mdev->mbase + mbox->rx_start;
3115a6d7c9dSSunil Goutham 		/* The hdr->num_msgs is set to zero immediately in the interrupt
3125a6d7c9dSSunil Goutham 		 * handler to ensure that it holds a correct value next time
313c6354b85SSubbaraya Sundeep 		 * when the interrupt handler is called. pf->mw[i].num_msgs
314c6354b85SSubbaraya Sundeep 		 * holds the data for use in otx2_pfvf_mbox_handler and
315c6354b85SSubbaraya Sundeep 		 * pf->mw[i].up_num_msgs holds the data for use in
316c6354b85SSubbaraya Sundeep 		 * otx2_pfvf_mbox_up_handler.
3175a6d7c9dSSunil Goutham 		 */
3185a6d7c9dSSunil Goutham 		if (hdr->num_msgs) {
3195a6d7c9dSSunil Goutham 			mw[i].num_msgs = hdr->num_msgs;
3205a6d7c9dSSunil Goutham 			hdr->num_msgs = 0;
3215a6d7c9dSSunil Goutham 			queue_work(mbox_wq, &mw[i].mbox_wrk);
3225a6d7c9dSSunil Goutham 		}
3235a6d7c9dSSunil Goutham 
3245a6d7c9dSSunil Goutham 		mbox = &mw->mbox_up;
3255a6d7c9dSSunil Goutham 		mdev = &mbox->dev[i];
3265a6d7c9dSSunil Goutham 		hdr = mdev->mbase + mbox->rx_start;
3275a6d7c9dSSunil Goutham 		if (hdr->num_msgs) {
3285a6d7c9dSSunil Goutham 			mw[i].up_num_msgs = hdr->num_msgs;
3295a6d7c9dSSunil Goutham 			hdr->num_msgs = 0;
3305a6d7c9dSSunil Goutham 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
3315a6d7c9dSSunil Goutham 		}
3325a6d7c9dSSunil Goutham 	}
3335a6d7c9dSSunil Goutham }
3345a6d7c9dSSunil Goutham 
otx2_forward_msg_pfvf(struct otx2_mbox_dev * mdev,struct otx2_mbox * pfvf_mbox,void * bbuf_base,int devid)335d424b6c0SSunil Goutham static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
336d424b6c0SSunil Goutham 				  struct otx2_mbox *pfvf_mbox, void *bbuf_base,
337d424b6c0SSunil Goutham 				  int devid)
338d424b6c0SSunil Goutham {
339d424b6c0SSunil Goutham 	struct otx2_mbox_dev *src_mdev = mdev;
340d424b6c0SSunil Goutham 	int offset;
341d424b6c0SSunil Goutham 
342d424b6c0SSunil Goutham 	/* Msgs are already copied, trigger VF's mbox irq */
343d424b6c0SSunil Goutham 	smp_wmb();
344d424b6c0SSunil Goutham 
345c6354b85SSubbaraya Sundeep 	otx2_mbox_wait_for_zero(pfvf_mbox, devid);
346c6354b85SSubbaraya Sundeep 
347d424b6c0SSunil Goutham 	offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
348c6354b85SSubbaraya Sundeep 	writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
349d424b6c0SSunil Goutham 
350d424b6c0SSunil Goutham 	/* Restore VF's mbox bounce buffer region address */
351d424b6c0SSunil Goutham 	src_mdev->mbase = bbuf_base;
352d424b6c0SSunil Goutham }
353d424b6c0SSunil Goutham 
otx2_forward_vf_mbox_msgs(struct otx2_nic * pf,struct otx2_mbox * src_mbox,int dir,int vf,int num_msgs)354d424b6c0SSunil Goutham static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
355d424b6c0SSunil Goutham 				     struct otx2_mbox *src_mbox,
356d424b6c0SSunil Goutham 				     int dir, int vf, int num_msgs)
357d424b6c0SSunil Goutham {
358d424b6c0SSunil Goutham 	struct otx2_mbox_dev *src_mdev, *dst_mdev;
359d424b6c0SSunil Goutham 	struct mbox_hdr *mbox_hdr;
360d424b6c0SSunil Goutham 	struct mbox_hdr *req_hdr;
361d424b6c0SSunil Goutham 	struct mbox *dst_mbox;
362d424b6c0SSunil Goutham 	int dst_size, err;
363d424b6c0SSunil Goutham 
364d424b6c0SSunil Goutham 	if (dir == MBOX_DIR_PFAF) {
365d424b6c0SSunil Goutham 		/* Set VF's mailbox memory as PF's bounce buffer memory, so
366d424b6c0SSunil Goutham 		 * that explicit copying of VF's msgs to PF=>AF mbox region
367d424b6c0SSunil Goutham 		 * and AF=>PF responses to VF's mbox region can be avoided.
368d424b6c0SSunil Goutham 		 */
369d424b6c0SSunil Goutham 		src_mdev = &src_mbox->dev[vf];
370d424b6c0SSunil Goutham 		mbox_hdr = src_mbox->hwbase +
371d424b6c0SSunil Goutham 				src_mbox->rx_start + (vf * MBOX_SIZE);
372d424b6c0SSunil Goutham 
373d424b6c0SSunil Goutham 		dst_mbox = &pf->mbox;
374d424b6c0SSunil Goutham 		dst_size = dst_mbox->mbox.tx_size -
375d424b6c0SSunil Goutham 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
37666a5209bSHariprasad Kelam 		/* Check if msgs fit into destination area and has valid size */
37766a5209bSHariprasad Kelam 		if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
378d424b6c0SSunil Goutham 			return -EINVAL;
379d424b6c0SSunil Goutham 
380d424b6c0SSunil Goutham 		dst_mdev = &dst_mbox->mbox.dev[0];
381d424b6c0SSunil Goutham 
3824c3212f5SSunil Goutham 		mutex_lock(&pf->mbox.lock);
383d424b6c0SSunil Goutham 		dst_mdev->mbase = src_mdev->mbase;
384d424b6c0SSunil Goutham 		dst_mdev->msg_size = mbox_hdr->msg_size;
385d424b6c0SSunil Goutham 		dst_mdev->num_msgs = num_msgs;
386d424b6c0SSunil Goutham 		err = otx2_sync_mbox_msg(dst_mbox);
387a8db854bSSubbaraya Sundeep 		/* Error code -EIO indicate there is a communication failure
388a8db854bSSubbaraya Sundeep 		 * to the AF. Rest of the error codes indicate that AF processed
389a8db854bSSubbaraya Sundeep 		 * VF messages and set the error codes in response messages
390a8db854bSSubbaraya Sundeep 		 * (if any) so simply forward responses to VF.
391a8db854bSSubbaraya Sundeep 		 */
392a8db854bSSubbaraya Sundeep 		if (err == -EIO) {
393d424b6c0SSunil Goutham 			dev_warn(pf->dev,
394d424b6c0SSunil Goutham 				 "AF not responding to VF%d messages\n", vf);
395d424b6c0SSunil Goutham 			/* restore PF mbase and exit */
396d424b6c0SSunil Goutham 			dst_mdev->mbase = pf->mbox.bbuf_base;
3974c3212f5SSunil Goutham 			mutex_unlock(&pf->mbox.lock);
398d424b6c0SSunil Goutham 			return err;
399d424b6c0SSunil Goutham 		}
400d424b6c0SSunil Goutham 		/* At this point, all the VF messages sent to AF are acked
401d424b6c0SSunil Goutham 		 * with proper responses and responses are copied to VF
402d424b6c0SSunil Goutham 		 * mailbox hence raise interrupt to VF.
403d424b6c0SSunil Goutham 		 */
404d424b6c0SSunil Goutham 		req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
405d424b6c0SSunil Goutham 					      dst_mbox->mbox.rx_start);
406d424b6c0SSunil Goutham 		req_hdr->num_msgs = num_msgs;
407d424b6c0SSunil Goutham 
408d424b6c0SSunil Goutham 		otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
409d424b6c0SSunil Goutham 				      pf->mbox.bbuf_base, vf);
4104c3212f5SSunil Goutham 		mutex_unlock(&pf->mbox.lock);
411d424b6c0SSunil Goutham 	} else if (dir == MBOX_DIR_PFVF_UP) {
412d424b6c0SSunil Goutham 		src_mdev = &src_mbox->dev[0];
413d424b6c0SSunil Goutham 		mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
414d424b6c0SSunil Goutham 		req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
415d424b6c0SSunil Goutham 					      src_mbox->rx_start);
416d424b6c0SSunil Goutham 		req_hdr->num_msgs = num_msgs;
417d424b6c0SSunil Goutham 
418d424b6c0SSunil Goutham 		dst_mbox = &pf->mbox_pfvf[0];
419d424b6c0SSunil Goutham 		dst_size = dst_mbox->mbox_up.tx_size -
420d424b6c0SSunil Goutham 				ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
421d424b6c0SSunil Goutham 		/* Check if msgs fit into destination area */
422d424b6c0SSunil Goutham 		if (mbox_hdr->msg_size > dst_size)
423d424b6c0SSunil Goutham 			return -EINVAL;
424d424b6c0SSunil Goutham 
425d424b6c0SSunil Goutham 		dst_mdev = &dst_mbox->mbox_up.dev[vf];
426d424b6c0SSunil Goutham 		dst_mdev->mbase = src_mdev->mbase;
427d424b6c0SSunil Goutham 		dst_mdev->msg_size = mbox_hdr->msg_size;
428d424b6c0SSunil Goutham 		dst_mdev->num_msgs = mbox_hdr->num_msgs;
429d424b6c0SSunil Goutham 		err = otx2_sync_mbox_up_msg(dst_mbox, vf);
430d424b6c0SSunil Goutham 		if (err) {
431d424b6c0SSunil Goutham 			dev_warn(pf->dev,
432d424b6c0SSunil Goutham 				 "VF%d is not responding to mailbox\n", vf);
433d424b6c0SSunil Goutham 			return err;
434d424b6c0SSunil Goutham 		}
435d424b6c0SSunil Goutham 	} else if (dir == MBOX_DIR_VFPF_UP) {
436d424b6c0SSunil Goutham 		req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
437d424b6c0SSunil Goutham 					      src_mbox->rx_start);
438d424b6c0SSunil Goutham 		req_hdr->num_msgs = num_msgs;
439d424b6c0SSunil Goutham 		otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
440d424b6c0SSunil Goutham 				      &pf->mbox.mbox_up,
441d424b6c0SSunil Goutham 				      pf->mbox_pfvf[vf].bbuf_base,
442d424b6c0SSunil Goutham 				      0);
443d424b6c0SSunil Goutham 	}
444d424b6c0SSunil Goutham 
445d424b6c0SSunil Goutham 	return 0;
446d424b6c0SSunil Goutham }
447d424b6c0SSunil Goutham 
otx2_pfvf_mbox_handler(struct work_struct * work)448d424b6c0SSunil Goutham static void otx2_pfvf_mbox_handler(struct work_struct *work)
449d424b6c0SSunil Goutham {
450d424b6c0SSunil Goutham 	struct mbox_msghdr *msg = NULL;
451d424b6c0SSunil Goutham 	int offset, vf_idx, id, err;
452d424b6c0SSunil Goutham 	struct otx2_mbox_dev *mdev;
453d424b6c0SSunil Goutham 	struct mbox_hdr *req_hdr;
454d424b6c0SSunil Goutham 	struct otx2_mbox *mbox;
455d424b6c0SSunil Goutham 	struct mbox *vf_mbox;
456d424b6c0SSunil Goutham 	struct otx2_nic *pf;
457d424b6c0SSunil Goutham 
458d424b6c0SSunil Goutham 	vf_mbox = container_of(work, struct mbox, mbox_wrk);
459d424b6c0SSunil Goutham 	pf = vf_mbox->pfvf;
460d424b6c0SSunil Goutham 	vf_idx = vf_mbox - pf->mbox_pfvf;
461d424b6c0SSunil Goutham 
462d424b6c0SSunil Goutham 	mbox = &pf->mbox_pfvf[0].mbox;
463d424b6c0SSunil Goutham 	mdev = &mbox->dev[vf_idx];
464d424b6c0SSunil Goutham 	req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
465d424b6c0SSunil Goutham 
466d424b6c0SSunil Goutham 	offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
467d424b6c0SSunil Goutham 
468d424b6c0SSunil Goutham 	for (id = 0; id < vf_mbox->num_msgs; id++) {
469d424b6c0SSunil Goutham 		msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
470d424b6c0SSunil Goutham 					     offset);
471d424b6c0SSunil Goutham 
472d424b6c0SSunil Goutham 		if (msg->sig != OTX2_MBOX_REQ_SIG)
473d424b6c0SSunil Goutham 			goto inval_msg;
474d424b6c0SSunil Goutham 
475d424b6c0SSunil Goutham 		/* Set VF's number in each of the msg */
476d424b6c0SSunil Goutham 		msg->pcifunc &= RVU_PFVF_FUNC_MASK;
477d424b6c0SSunil Goutham 		msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
478d424b6c0SSunil Goutham 		offset = msg->next_msgoff;
479d424b6c0SSunil Goutham 	}
480d424b6c0SSunil Goutham 	err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
481d424b6c0SSunil Goutham 					vf_mbox->num_msgs);
482d424b6c0SSunil Goutham 	if (err)
483d424b6c0SSunil Goutham 		goto inval_msg;
484d424b6c0SSunil Goutham 	return;
485d424b6c0SSunil Goutham 
486d424b6c0SSunil Goutham inval_msg:
487d424b6c0SSunil Goutham 	otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
488d424b6c0SSunil Goutham 	otx2_mbox_msg_send(mbox, vf_idx);
489d424b6c0SSunil Goutham }
490d424b6c0SSunil Goutham 
otx2_pfvf_mbox_up_handler(struct work_struct * work)491d424b6c0SSunil Goutham static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
492d424b6c0SSunil Goutham {
493d424b6c0SSunil Goutham 	struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
494d424b6c0SSunil Goutham 	struct otx2_nic *pf = vf_mbox->pfvf;
495d424b6c0SSunil Goutham 	struct otx2_mbox_dev *mdev;
496d424b6c0SSunil Goutham 	int offset, id, vf_idx = 0;
497d424b6c0SSunil Goutham 	struct mbox_hdr *rsp_hdr;
498d424b6c0SSunil Goutham 	struct mbox_msghdr *msg;
499d424b6c0SSunil Goutham 	struct otx2_mbox *mbox;
500d424b6c0SSunil Goutham 
501d424b6c0SSunil Goutham 	vf_idx = vf_mbox - pf->mbox_pfvf;
502d424b6c0SSunil Goutham 	mbox = &pf->mbox_pfvf[0].mbox_up;
503d424b6c0SSunil Goutham 	mdev = &mbox->dev[vf_idx];
504d424b6c0SSunil Goutham 
505d424b6c0SSunil Goutham 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
506d424b6c0SSunil Goutham 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
507d424b6c0SSunil Goutham 
508d424b6c0SSunil Goutham 	for (id = 0; id < vf_mbox->up_num_msgs; id++) {
509d424b6c0SSunil Goutham 		msg = mdev->mbase + offset;
510d424b6c0SSunil Goutham 
511d424b6c0SSunil Goutham 		if (msg->id >= MBOX_MSG_MAX) {
512d424b6c0SSunil Goutham 			dev_err(pf->dev,
513d424b6c0SSunil Goutham 				"Mbox msg with unknown ID 0x%x\n", msg->id);
514d424b6c0SSunil Goutham 			goto end;
515d424b6c0SSunil Goutham 		}
516d424b6c0SSunil Goutham 
517d424b6c0SSunil Goutham 		if (msg->sig != OTX2_MBOX_RSP_SIG) {
518d424b6c0SSunil Goutham 			dev_err(pf->dev,
519d424b6c0SSunil Goutham 				"Mbox msg with wrong signature %x, ID 0x%x\n",
520d424b6c0SSunil Goutham 				msg->sig, msg->id);
521d424b6c0SSunil Goutham 			goto end;
522d424b6c0SSunil Goutham 		}
523d424b6c0SSunil Goutham 
524d424b6c0SSunil Goutham 		switch (msg->id) {
525d424b6c0SSunil Goutham 		case MBOX_MSG_CGX_LINK_EVENT:
526d424b6c0SSunil Goutham 			break;
527d424b6c0SSunil Goutham 		default:
528d424b6c0SSunil Goutham 			if (msg->rc)
529d424b6c0SSunil Goutham 				dev_err(pf->dev,
530d424b6c0SSunil Goutham 					"Mbox msg response has err %d, ID 0x%x\n",
531d424b6c0SSunil Goutham 					msg->rc, msg->id);
532d424b6c0SSunil Goutham 			break;
533d424b6c0SSunil Goutham 		}
534d424b6c0SSunil Goutham 
535d424b6c0SSunil Goutham end:
536d424b6c0SSunil Goutham 		offset = mbox->rx_start + msg->next_msgoff;
53766a5209bSHariprasad Kelam 		if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
538c6354b85SSubbaraya Sundeep 			__otx2_mbox_reset(mbox, vf_idx);
539d424b6c0SSunil Goutham 		mdev->msgs_acked++;
540d424b6c0SSunil Goutham 	}
541d424b6c0SSunil Goutham }
542d424b6c0SSunil Goutham 
otx2_pfvf_mbox_intr_handler(int irq,void * pf_irq)543d424b6c0SSunil Goutham static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
544d424b6c0SSunil Goutham {
545d424b6c0SSunil Goutham 	struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
546d424b6c0SSunil Goutham 	int vfs = pf->total_vfs;
547d424b6c0SSunil Goutham 	struct mbox *mbox;
548d424b6c0SSunil Goutham 	u64 intr;
549d424b6c0SSunil Goutham 
550d424b6c0SSunil Goutham 	mbox = pf->mbox_pfvf;
551d424b6c0SSunil Goutham 	/* Handle VF interrupts */
552d424b6c0SSunil Goutham 	if (vfs > 64) {
553d424b6c0SSunil Goutham 		intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
554d424b6c0SSunil Goutham 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
555c6354b85SSubbaraya Sundeep 		otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
556dc75da22SGeetha sowjanya 		if (intr)
557dc75da22SGeetha sowjanya 			trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
558dc75da22SGeetha sowjanya 		vfs = 64;
559d424b6c0SSunil Goutham 	}
560d424b6c0SSunil Goutham 
561d424b6c0SSunil Goutham 	intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
562d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
563d424b6c0SSunil Goutham 
564c6354b85SSubbaraya Sundeep 	otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
565d424b6c0SSunil Goutham 
566dc75da22SGeetha sowjanya 	if (intr)
56731a97460SSubbaraya Sundeep 		trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
56831a97460SSubbaraya Sundeep 
569d424b6c0SSunil Goutham 	return IRQ_HANDLED;
570d424b6c0SSunil Goutham }
571d424b6c0SSunil Goutham 
otx2_pfvf_mbox_init(struct otx2_nic * pf,int numvfs)572d424b6c0SSunil Goutham static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
573d424b6c0SSunil Goutham {
574d424b6c0SSunil Goutham 	void __iomem *hwbase;
575d424b6c0SSunil Goutham 	struct mbox *mbox;
576d424b6c0SSunil Goutham 	int err, vf;
577d424b6c0SSunil Goutham 	u64 base;
578d424b6c0SSunil Goutham 
579d424b6c0SSunil Goutham 	if (!numvfs)
580d424b6c0SSunil Goutham 		return -EINVAL;
581d424b6c0SSunil Goutham 
582d424b6c0SSunil Goutham 	pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
583d424b6c0SSunil Goutham 				     sizeof(struct mbox), GFP_KERNEL);
584d424b6c0SSunil Goutham 	if (!pf->mbox_pfvf)
585d424b6c0SSunil Goutham 		return -ENOMEM;
586d424b6c0SSunil Goutham 
587b96b278cSSubbaraya Sundeep 	pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
588b96b278cSSubbaraya Sundeep 					   WQ_UNBOUND | WQ_HIGHPRI |
589b96b278cSSubbaraya Sundeep 					   WQ_MEM_RECLAIM, 0);
590d424b6c0SSunil Goutham 	if (!pf->mbox_pfvf_wq)
591d424b6c0SSunil Goutham 		return -ENOMEM;
592d424b6c0SSunil Goutham 
593facede82SSubbaraya Sundeep 	/* On CN10K platform, PF <-> VF mailbox region follows after
594facede82SSubbaraya Sundeep 	 * PF <-> AF mailbox region.
595facede82SSubbaraya Sundeep 	 */
596facede82SSubbaraya Sundeep 	if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
597facede82SSubbaraya Sundeep 		base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
598facede82SSubbaraya Sundeep 		       MBOX_SIZE;
599facede82SSubbaraya Sundeep 	else
600facede82SSubbaraya Sundeep 		base = readq((void __iomem *)((u64)pf->reg_base +
601facede82SSubbaraya Sundeep 					      RVU_PF_VF_BAR4_ADDR));
602d424b6c0SSunil Goutham 
603facede82SSubbaraya Sundeep 	hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
604d424b6c0SSunil Goutham 	if (!hwbase) {
605d424b6c0SSunil Goutham 		err = -ENOMEM;
606d424b6c0SSunil Goutham 		goto free_wq;
607d424b6c0SSunil Goutham 	}
608d424b6c0SSunil Goutham 
609d424b6c0SSunil Goutham 	mbox = &pf->mbox_pfvf[0];
610d424b6c0SSunil Goutham 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
611d424b6c0SSunil Goutham 			     MBOX_DIR_PFVF, numvfs);
612d424b6c0SSunil Goutham 	if (err)
613d424b6c0SSunil Goutham 		goto free_iomem;
614d424b6c0SSunil Goutham 
615d424b6c0SSunil Goutham 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
616d424b6c0SSunil Goutham 			     MBOX_DIR_PFVF_UP, numvfs);
617d424b6c0SSunil Goutham 	if (err)
618d424b6c0SSunil Goutham 		goto free_iomem;
619d424b6c0SSunil Goutham 
620d424b6c0SSunil Goutham 	for (vf = 0; vf < numvfs; vf++) {
621d424b6c0SSunil Goutham 		mbox->pfvf = pf;
622d424b6c0SSunil Goutham 		INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
623d424b6c0SSunil Goutham 		INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
624d424b6c0SSunil Goutham 		mbox++;
625d424b6c0SSunil Goutham 	}
626d424b6c0SSunil Goutham 
627d424b6c0SSunil Goutham 	return 0;
628d424b6c0SSunil Goutham 
629d424b6c0SSunil Goutham free_iomem:
630d424b6c0SSunil Goutham 	if (hwbase)
631d424b6c0SSunil Goutham 		iounmap(hwbase);
632d424b6c0SSunil Goutham free_wq:
633d424b6c0SSunil Goutham 	destroy_workqueue(pf->mbox_pfvf_wq);
634d424b6c0SSunil Goutham 	return err;
635d424b6c0SSunil Goutham }
636d424b6c0SSunil Goutham 
otx2_pfvf_mbox_destroy(struct otx2_nic * pf)637d424b6c0SSunil Goutham static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
638d424b6c0SSunil Goutham {
639d424b6c0SSunil Goutham 	struct mbox *mbox = &pf->mbox_pfvf[0];
640d424b6c0SSunil Goutham 
641d424b6c0SSunil Goutham 	if (!mbox)
642d424b6c0SSunil Goutham 		return;
643d424b6c0SSunil Goutham 
644d424b6c0SSunil Goutham 	if (pf->mbox_pfvf_wq) {
645d424b6c0SSunil Goutham 		destroy_workqueue(pf->mbox_pfvf_wq);
646d424b6c0SSunil Goutham 		pf->mbox_pfvf_wq = NULL;
647d424b6c0SSunil Goutham 	}
648d424b6c0SSunil Goutham 
649d424b6c0SSunil Goutham 	if (mbox->mbox.hwbase)
650d424b6c0SSunil Goutham 		iounmap(mbox->mbox.hwbase);
651d424b6c0SSunil Goutham 
652d424b6c0SSunil Goutham 	otx2_mbox_destroy(&mbox->mbox);
653d424b6c0SSunil Goutham }
654d424b6c0SSunil Goutham 
otx2_enable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)655d424b6c0SSunil Goutham static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
656d424b6c0SSunil Goutham {
657d424b6c0SSunil Goutham 	/* Clear PF <=> VF mailbox IRQ */
658d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
659d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
660d424b6c0SSunil Goutham 
661d424b6c0SSunil Goutham 	/* Enable PF <=> VF mailbox IRQ */
662d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
663d424b6c0SSunil Goutham 	if (numvfs > 64) {
664d424b6c0SSunil Goutham 		numvfs -= 64;
665d424b6c0SSunil Goutham 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
666d424b6c0SSunil Goutham 			     INTR_MASK(numvfs));
667d424b6c0SSunil Goutham 	}
668d424b6c0SSunil Goutham }
669d424b6c0SSunil Goutham 
otx2_disable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)670d424b6c0SSunil Goutham static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
671d424b6c0SSunil Goutham {
672d424b6c0SSunil Goutham 	int vector;
673d424b6c0SSunil Goutham 
674d424b6c0SSunil Goutham 	/* Disable PF <=> VF mailbox IRQ */
675d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
676d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
677d424b6c0SSunil Goutham 
678d424b6c0SSunil Goutham 	otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
679d424b6c0SSunil Goutham 	vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
680d424b6c0SSunil Goutham 	free_irq(vector, pf);
681d424b6c0SSunil Goutham 
682d424b6c0SSunil Goutham 	if (numvfs > 64) {
683d424b6c0SSunil Goutham 		otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
684d424b6c0SSunil Goutham 		vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
685d424b6c0SSunil Goutham 		free_irq(vector, pf);
686d424b6c0SSunil Goutham 	}
687d424b6c0SSunil Goutham }
688d424b6c0SSunil Goutham 
otx2_register_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)689d424b6c0SSunil Goutham static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
690d424b6c0SSunil Goutham {
691d424b6c0SSunil Goutham 	struct otx2_hw *hw = &pf->hw;
692d424b6c0SSunil Goutham 	char *irq_name;
693d424b6c0SSunil Goutham 	int err;
694d424b6c0SSunil Goutham 
695d424b6c0SSunil Goutham 	/* Register MBOX0 interrupt handler */
696d424b6c0SSunil Goutham 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
697d424b6c0SSunil Goutham 	if (pf->pcifunc)
698d424b6c0SSunil Goutham 		snprintf(irq_name, NAME_SIZE,
699d424b6c0SSunil Goutham 			 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
700d424b6c0SSunil Goutham 	else
701d424b6c0SSunil Goutham 		snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
702d424b6c0SSunil Goutham 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
703d424b6c0SSunil Goutham 			  otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
704d424b6c0SSunil Goutham 	if (err) {
705d424b6c0SSunil Goutham 		dev_err(pf->dev,
706d424b6c0SSunil Goutham 			"RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
707d424b6c0SSunil Goutham 		return err;
708d424b6c0SSunil Goutham 	}
709d424b6c0SSunil Goutham 
710d424b6c0SSunil Goutham 	if (numvfs > 64) {
711d424b6c0SSunil Goutham 		/* Register MBOX1 interrupt handler */
712d424b6c0SSunil Goutham 		irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
713d424b6c0SSunil Goutham 		if (pf->pcifunc)
714d424b6c0SSunil Goutham 			snprintf(irq_name, NAME_SIZE,
715d424b6c0SSunil Goutham 				 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
716d424b6c0SSunil Goutham 		else
717d424b6c0SSunil Goutham 			snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
718d424b6c0SSunil Goutham 		err = request_irq(pci_irq_vector(pf->pdev,
719d424b6c0SSunil Goutham 						 RVU_PF_INT_VEC_VFPF_MBOX1),
720d424b6c0SSunil Goutham 						 otx2_pfvf_mbox_intr_handler,
721d424b6c0SSunil Goutham 						 0, irq_name, pf);
722d424b6c0SSunil Goutham 		if (err) {
723d424b6c0SSunil Goutham 			dev_err(pf->dev,
724d424b6c0SSunil Goutham 				"RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
725d424b6c0SSunil Goutham 			return err;
726d424b6c0SSunil Goutham 		}
727d424b6c0SSunil Goutham 	}
728d424b6c0SSunil Goutham 
729d424b6c0SSunil Goutham 	otx2_enable_pfvf_mbox_intr(pf, numvfs);
730d424b6c0SSunil Goutham 
731d424b6c0SSunil Goutham 	return 0;
732d424b6c0SSunil Goutham }
733d424b6c0SSunil Goutham 
otx2_process_pfaf_mbox_msg(struct otx2_nic * pf,struct mbox_msghdr * msg)7345a6d7c9dSSunil Goutham static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
7355a6d7c9dSSunil Goutham 				       struct mbox_msghdr *msg)
7365a6d7c9dSSunil Goutham {
737ad513ed9STomasz Duszynski 	int devid;
738ad513ed9STomasz Duszynski 
7395a6d7c9dSSunil Goutham 	if (msg->id >= MBOX_MSG_MAX) {
7405a6d7c9dSSunil Goutham 		dev_err(pf->dev,
7415a6d7c9dSSunil Goutham 			"Mbox msg with unknown ID 0x%x\n", msg->id);
7425a6d7c9dSSunil Goutham 		return;
7435a6d7c9dSSunil Goutham 	}
7445a6d7c9dSSunil Goutham 
7455a6d7c9dSSunil Goutham 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
7465a6d7c9dSSunil Goutham 		dev_err(pf->dev,
7475a6d7c9dSSunil Goutham 			"Mbox msg with wrong signature %x, ID 0x%x\n",
7485a6d7c9dSSunil Goutham 			 msg->sig, msg->id);
7495a6d7c9dSSunil Goutham 		return;
7505a6d7c9dSSunil Goutham 	}
7515a6d7c9dSSunil Goutham 
752ad513ed9STomasz Duszynski 	/* message response heading VF */
753ad513ed9STomasz Duszynski 	devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
754ad513ed9STomasz Duszynski 	if (devid) {
755ad513ed9STomasz Duszynski 		struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
756ad513ed9STomasz Duszynski 		struct delayed_work *dwork;
757ad513ed9STomasz Duszynski 
758ad513ed9STomasz Duszynski 		switch (msg->id) {
759ad513ed9STomasz Duszynski 		case MBOX_MSG_NIX_LF_START_RX:
760ad513ed9STomasz Duszynski 			config->intf_down = false;
761ad513ed9STomasz Duszynski 			dwork = &config->link_event_work;
762ad513ed9STomasz Duszynski 			schedule_delayed_work(dwork, msecs_to_jiffies(100));
763ad513ed9STomasz Duszynski 			break;
764ad513ed9STomasz Duszynski 		case MBOX_MSG_NIX_LF_STOP_RX:
765ad513ed9STomasz Duszynski 			config->intf_down = true;
766ad513ed9STomasz Duszynski 			break;
767ad513ed9STomasz Duszynski 		}
768ad513ed9STomasz Duszynski 
769ad513ed9STomasz Duszynski 		return;
770ad513ed9STomasz Duszynski 	}
771ad513ed9STomasz Duszynski 
7725a6d7c9dSSunil Goutham 	switch (msg->id) {
7735a6d7c9dSSunil Goutham 	case MBOX_MSG_READY:
7745a6d7c9dSSunil Goutham 		pf->pcifunc = msg->pcifunc;
7755a6d7c9dSSunil Goutham 		break;
77605fcc9e0SSunil Goutham 	case MBOX_MSG_MSIX_OFFSET:
77705fcc9e0SSunil Goutham 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
77805fcc9e0SSunil Goutham 		break;
77905fcc9e0SSunil Goutham 	case MBOX_MSG_NPA_LF_ALLOC:
78005fcc9e0SSunil Goutham 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
78105fcc9e0SSunil Goutham 		break;
78205fcc9e0SSunil Goutham 	case MBOX_MSG_NIX_LF_ALLOC:
78305fcc9e0SSunil Goutham 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
78405fcc9e0SSunil Goutham 		break;
78575f36270SGeetha sowjanya 	case MBOX_MSG_NIX_BP_ENABLE:
78675f36270SGeetha sowjanya 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
78775f36270SGeetha sowjanya 		break;
788d45d8979SChristina Jacob 	case MBOX_MSG_CGX_STATS:
789d45d8979SChristina Jacob 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
790d45d8979SChristina Jacob 		break;
791d0cf9503SChristina Jacob 	case MBOX_MSG_CGX_FEC_STATS:
792d0cf9503SChristina Jacob 		mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
793d0cf9503SChristina Jacob 		break;
7945a6d7c9dSSunil Goutham 	default:
7955a6d7c9dSSunil Goutham 		if (msg->rc)
7965a6d7c9dSSunil Goutham 			dev_err(pf->dev,
7975a6d7c9dSSunil Goutham 				"Mbox msg response has err %d, ID 0x%x\n",
7985a6d7c9dSSunil Goutham 				msg->rc, msg->id);
7995a6d7c9dSSunil Goutham 		break;
8005a6d7c9dSSunil Goutham 	}
8015a6d7c9dSSunil Goutham }
8025a6d7c9dSSunil Goutham 
otx2_pfaf_mbox_handler(struct work_struct * work)8035a6d7c9dSSunil Goutham static void otx2_pfaf_mbox_handler(struct work_struct *work)
8045a6d7c9dSSunil Goutham {
8055a6d7c9dSSunil Goutham 	struct otx2_mbox_dev *mdev;
8065a6d7c9dSSunil Goutham 	struct mbox_hdr *rsp_hdr;
8075a6d7c9dSSunil Goutham 	struct mbox_msghdr *msg;
8085a6d7c9dSSunil Goutham 	struct otx2_mbox *mbox;
8095a6d7c9dSSunil Goutham 	struct mbox *af_mbox;
8105a6d7c9dSSunil Goutham 	struct otx2_nic *pf;
8115a6d7c9dSSunil Goutham 	int offset, id;
812c6354b85SSubbaraya Sundeep 	u16 num_msgs;
8135a6d7c9dSSunil Goutham 
8145a6d7c9dSSunil Goutham 	af_mbox = container_of(work, struct mbox, mbox_wrk);
8155a6d7c9dSSunil Goutham 	mbox = &af_mbox->mbox;
8165a6d7c9dSSunil Goutham 	mdev = &mbox->dev[0];
8175a6d7c9dSSunil Goutham 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
818c6354b85SSubbaraya Sundeep 	num_msgs = rsp_hdr->num_msgs;
8195a6d7c9dSSunil Goutham 
8205a6d7c9dSSunil Goutham 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
8215a6d7c9dSSunil Goutham 	pf = af_mbox->pfvf;
8225a6d7c9dSSunil Goutham 
823c6354b85SSubbaraya Sundeep 	for (id = 0; id < num_msgs; id++) {
8245a6d7c9dSSunil Goutham 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
8255a6d7c9dSSunil Goutham 		otx2_process_pfaf_mbox_msg(pf, msg);
8265a6d7c9dSSunil Goutham 		offset = mbox->rx_start + msg->next_msgoff;
827c6354b85SSubbaraya Sundeep 		if (mdev->msgs_acked == (num_msgs - 1))
82866a5209bSHariprasad Kelam 			__otx2_mbox_reset(mbox, 0);
8295a6d7c9dSSunil Goutham 		mdev->msgs_acked++;
8305a6d7c9dSSunil Goutham 	}
8315a6d7c9dSSunil Goutham 
8325a6d7c9dSSunil Goutham }
8335a6d7c9dSSunil Goutham 
otx2_handle_link_event(struct otx2_nic * pf)83450fe6c02SLinu Cherian static void otx2_handle_link_event(struct otx2_nic *pf)
83550fe6c02SLinu Cherian {
83650fe6c02SLinu Cherian 	struct cgx_link_user_info *linfo = &pf->linfo;
83750fe6c02SLinu Cherian 	struct net_device *netdev = pf->netdev;
83850fe6c02SLinu Cherian 
83950fe6c02SLinu Cherian 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
84050fe6c02SLinu Cherian 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
84150fe6c02SLinu Cherian 		linfo->full_duplex ? "Full" : "Half");
84250fe6c02SLinu Cherian 	if (linfo->link_up) {
84350fe6c02SLinu Cherian 		netif_carrier_on(netdev);
84450fe6c02SLinu Cherian 		netif_tx_start_all_queues(netdev);
84550fe6c02SLinu Cherian 	} else {
84650fe6c02SLinu Cherian 		netif_tx_stop_all_queues(netdev);
84750fe6c02SLinu Cherian 		netif_carrier_off(netdev);
84850fe6c02SLinu Cherian 	}
84950fe6c02SLinu Cherian }
85050fe6c02SLinu Cherian 
otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic * pf,struct mcs_intr_info * event,struct msg_rsp * rsp)851c54ffc73SSubbaraya Sundeep int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
852c54ffc73SSubbaraya Sundeep 					 struct mcs_intr_info *event,
853c54ffc73SSubbaraya Sundeep 					 struct msg_rsp *rsp)
854c54ffc73SSubbaraya Sundeep {
855c54ffc73SSubbaraya Sundeep 	cn10k_handle_mcs_event(pf, event);
856c54ffc73SSubbaraya Sundeep 
857c54ffc73SSubbaraya Sundeep 	return 0;
858c54ffc73SSubbaraya Sundeep }
859c54ffc73SSubbaraya Sundeep 
otx2_mbox_up_handler_cgx_link_event(struct otx2_nic * pf,struct cgx_link_info_msg * msg,struct msg_rsp * rsp)86050fe6c02SLinu Cherian int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
86150fe6c02SLinu Cherian 					struct cgx_link_info_msg *msg,
86250fe6c02SLinu Cherian 					struct msg_rsp *rsp)
86350fe6c02SLinu Cherian {
864ad513ed9STomasz Duszynski 	int i;
865ad513ed9STomasz Duszynski 
86650fe6c02SLinu Cherian 	/* Copy the link info sent by AF */
86750fe6c02SLinu Cherian 	pf->linfo = msg->link_info;
86850fe6c02SLinu Cherian 
869ad513ed9STomasz Duszynski 	/* notify VFs about link event */
870ad513ed9STomasz Duszynski 	for (i = 0; i < pci_num_vf(pf->pdev); i++) {
871ad513ed9STomasz Duszynski 		struct otx2_vf_config *config = &pf->vf_configs[i];
872ad513ed9STomasz Duszynski 		struct delayed_work *dwork = &config->link_event_work;
873ad513ed9STomasz Duszynski 
874ad513ed9STomasz Duszynski 		if (config->intf_down)
875ad513ed9STomasz Duszynski 			continue;
876ad513ed9STomasz Duszynski 
877ad513ed9STomasz Duszynski 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
878ad513ed9STomasz Duszynski 	}
879ad513ed9STomasz Duszynski 
88050fe6c02SLinu Cherian 	/* interface has not been fully configured yet */
88150fe6c02SLinu Cherian 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
88250fe6c02SLinu Cherian 		return 0;
88350fe6c02SLinu Cherian 
88450fe6c02SLinu Cherian 	otx2_handle_link_event(pf);
88550fe6c02SLinu Cherian 	return 0;
88650fe6c02SLinu Cherian }
88750fe6c02SLinu Cherian 
otx2_process_mbox_msg_up(struct otx2_nic * pf,struct mbox_msghdr * req)8885a6d7c9dSSunil Goutham static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
8895a6d7c9dSSunil Goutham 				    struct mbox_msghdr *req)
8905a6d7c9dSSunil Goutham {
8915a6d7c9dSSunil Goutham 	/* Check if valid, if not reply with a invalid msg */
8925a6d7c9dSSunil Goutham 	if (req->sig != OTX2_MBOX_REQ_SIG) {
8935a6d7c9dSSunil Goutham 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
8945a6d7c9dSSunil Goutham 		return -ENODEV;
8955a6d7c9dSSunil Goutham 	}
8965a6d7c9dSSunil Goutham 
8975a6d7c9dSSunil Goutham 	switch (req->id) {
8985a6d7c9dSSunil Goutham #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
8995a6d7c9dSSunil Goutham 	case _id: {							\
9005a6d7c9dSSunil Goutham 		struct _rsp_type *rsp;					\
9015a6d7c9dSSunil Goutham 		int err;						\
9025a6d7c9dSSunil Goutham 									\
9035a6d7c9dSSunil Goutham 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
9045a6d7c9dSSunil Goutham 			&pf->mbox.mbox_up, 0,				\
9055a6d7c9dSSunil Goutham 			sizeof(struct _rsp_type));			\
9065a6d7c9dSSunil Goutham 		if (!rsp)						\
9075a6d7c9dSSunil Goutham 			return -ENOMEM;					\
9085a6d7c9dSSunil Goutham 									\
9095a6d7c9dSSunil Goutham 		rsp->hdr.id = _id;					\
9105a6d7c9dSSunil Goutham 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
9115a6d7c9dSSunil Goutham 		rsp->hdr.pcifunc = 0;					\
9125a6d7c9dSSunil Goutham 		rsp->hdr.rc = 0;					\
9135a6d7c9dSSunil Goutham 									\
9145a6d7c9dSSunil Goutham 		err = otx2_mbox_up_handler_ ## _fn_name(		\
9155a6d7c9dSSunil Goutham 			pf, (struct _req_type *)req, rsp);		\
9165a6d7c9dSSunil Goutham 		return err;						\
9175a6d7c9dSSunil Goutham 	}
9185a6d7c9dSSunil Goutham MBOX_UP_CGX_MESSAGES
919c54ffc73SSubbaraya Sundeep MBOX_UP_MCS_MESSAGES
9205a6d7c9dSSunil Goutham #undef M
9215a6d7c9dSSunil Goutham 		break;
9225a6d7c9dSSunil Goutham 	default:
9235a6d7c9dSSunil Goutham 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
9245a6d7c9dSSunil Goutham 		return -ENODEV;
9255a6d7c9dSSunil Goutham 	}
9265a6d7c9dSSunil Goutham 	return 0;
9275a6d7c9dSSunil Goutham }
9285a6d7c9dSSunil Goutham 
otx2_pfaf_mbox_up_handler(struct work_struct * work)9295a6d7c9dSSunil Goutham static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
9305a6d7c9dSSunil Goutham {
9315a6d7c9dSSunil Goutham 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
9325a6d7c9dSSunil Goutham 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
9335a6d7c9dSSunil Goutham 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
9345a6d7c9dSSunil Goutham 	struct otx2_nic *pf = af_mbox->pfvf;
9355a6d7c9dSSunil Goutham 	int offset, id, devid = 0;
9365a6d7c9dSSunil Goutham 	struct mbox_hdr *rsp_hdr;
9375a6d7c9dSSunil Goutham 	struct mbox_msghdr *msg;
938c6354b85SSubbaraya Sundeep 	u16 num_msgs;
9395a6d7c9dSSunil Goutham 
9405a6d7c9dSSunil Goutham 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
941c6354b85SSubbaraya Sundeep 	num_msgs = rsp_hdr->num_msgs;
9425a6d7c9dSSunil Goutham 
9435a6d7c9dSSunil Goutham 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
9445a6d7c9dSSunil Goutham 
945c6354b85SSubbaraya Sundeep 	for (id = 0; id < num_msgs; id++) {
9465a6d7c9dSSunil Goutham 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
9475a6d7c9dSSunil Goutham 
9485a6d7c9dSSunil Goutham 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
9495a6d7c9dSSunil Goutham 		/* Skip processing VF's messages */
9505a6d7c9dSSunil Goutham 		if (!devid)
9515a6d7c9dSSunil Goutham 			otx2_process_mbox_msg_up(pf, msg);
9525a6d7c9dSSunil Goutham 		offset = mbox->rx_start + msg->next_msgoff;
9535a6d7c9dSSunil Goutham 	}
954c6354b85SSubbaraya Sundeep 	/* Forward to VF iff VFs are really present */
955c6354b85SSubbaraya Sundeep 	if (devid && pci_num_vf(pf->pdev)) {
956d424b6c0SSunil Goutham 		otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
957d424b6c0SSunil Goutham 					  MBOX_DIR_PFVF_UP, devid - 1,
958c6354b85SSubbaraya Sundeep 					  num_msgs);
959d424b6c0SSunil Goutham 		return;
960d424b6c0SSunil Goutham 	}
9615a6d7c9dSSunil Goutham 
9625a6d7c9dSSunil Goutham 	otx2_mbox_msg_send(mbox, 0);
9635a6d7c9dSSunil Goutham }
9645a6d7c9dSSunil Goutham 
otx2_pfaf_mbox_intr_handler(int irq,void * pf_irq)9655a6d7c9dSSunil Goutham static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
9665a6d7c9dSSunil Goutham {
9675a6d7c9dSSunil Goutham 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
968c6354b85SSubbaraya Sundeep 	struct mbox *mw = &pf->mbox;
969c6354b85SSubbaraya Sundeep 	struct otx2_mbox_dev *mdev;
970c6354b85SSubbaraya Sundeep 	struct otx2_mbox *mbox;
971c6354b85SSubbaraya Sundeep 	struct mbox_hdr *hdr;
972c6354b85SSubbaraya Sundeep 	u64 mbox_data;
9735a6d7c9dSSunil Goutham 
9745a6d7c9dSSunil Goutham 	/* Clear the IRQ */
9755a6d7c9dSSunil Goutham 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
9765a6d7c9dSSunil Goutham 
97731a97460SSubbaraya Sundeep 
978c6354b85SSubbaraya Sundeep 	mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
97931a97460SSubbaraya Sundeep 
980c6354b85SSubbaraya Sundeep 	if (mbox_data & MBOX_UP_MSG) {
981c6354b85SSubbaraya Sundeep 		mbox_data &= ~MBOX_UP_MSG;
982c6354b85SSubbaraya Sundeep 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
983c6354b85SSubbaraya Sundeep 
984c6354b85SSubbaraya Sundeep 		mbox = &mw->mbox_up;
985c6354b85SSubbaraya Sundeep 		mdev = &mbox->dev[0];
986c6354b85SSubbaraya Sundeep 		otx2_sync_mbox_bbuf(mbox, 0);
987c6354b85SSubbaraya Sundeep 
988c6354b85SSubbaraya Sundeep 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
989c6354b85SSubbaraya Sundeep 		if (hdr->num_msgs)
990c6354b85SSubbaraya Sundeep 			queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
991c6354b85SSubbaraya Sundeep 
992c6354b85SSubbaraya Sundeep 		trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
993c6354b85SSubbaraya Sundeep 					 BIT_ULL(0));
994c6354b85SSubbaraya Sundeep 	}
995c6354b85SSubbaraya Sundeep 
996c6354b85SSubbaraya Sundeep 	if (mbox_data & MBOX_DOWN_MSG) {
997c6354b85SSubbaraya Sundeep 		mbox_data &= ~MBOX_DOWN_MSG;
998c6354b85SSubbaraya Sundeep 		otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
999c6354b85SSubbaraya Sundeep 
1000c6354b85SSubbaraya Sundeep 		mbox = &mw->mbox;
1001c6354b85SSubbaraya Sundeep 		mdev = &mbox->dev[0];
1002c6354b85SSubbaraya Sundeep 		otx2_sync_mbox_bbuf(mbox, 0);
1003c6354b85SSubbaraya Sundeep 
1004c6354b85SSubbaraya Sundeep 		hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
1005c6354b85SSubbaraya Sundeep 		if (hdr->num_msgs)
1006c6354b85SSubbaraya Sundeep 			queue_work(pf->mbox_wq, &mw->mbox_wrk);
1007c6354b85SSubbaraya Sundeep 
1008c6354b85SSubbaraya Sundeep 		trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
1009c6354b85SSubbaraya Sundeep 					 BIT_ULL(0));
1010c6354b85SSubbaraya Sundeep 	}
10115a6d7c9dSSunil Goutham 
10125a6d7c9dSSunil Goutham 	return IRQ_HANDLED;
10135a6d7c9dSSunil Goutham }
10145a6d7c9dSSunil Goutham 
otx2_disable_mbox_intr(struct otx2_nic * pf)10155a6d7c9dSSunil Goutham static void otx2_disable_mbox_intr(struct otx2_nic *pf)
10165a6d7c9dSSunil Goutham {
10175a6d7c9dSSunil Goutham 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
10185a6d7c9dSSunil Goutham 
10195a6d7c9dSSunil Goutham 	/* Disable AF => PF mailbox IRQ */
10205a6d7c9dSSunil Goutham 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
10215a6d7c9dSSunil Goutham 	free_irq(vector, pf);
10225a6d7c9dSSunil Goutham }
10235a6d7c9dSSunil Goutham 
otx2_register_mbox_intr(struct otx2_nic * pf,bool probe_af)10245a6d7c9dSSunil Goutham static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
10255a6d7c9dSSunil Goutham {
10265a6d7c9dSSunil Goutham 	struct otx2_hw *hw = &pf->hw;
10275a6d7c9dSSunil Goutham 	struct msg_req *req;
10285a6d7c9dSSunil Goutham 	char *irq_name;
10295a6d7c9dSSunil Goutham 	int err;
10305a6d7c9dSSunil Goutham 
10315a6d7c9dSSunil Goutham 	/* Register mailbox interrupt handler */
10325a6d7c9dSSunil Goutham 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
10335a6d7c9dSSunil Goutham 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
10345a6d7c9dSSunil Goutham 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
10355a6d7c9dSSunil Goutham 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
10365a6d7c9dSSunil Goutham 	if (err) {
10375a6d7c9dSSunil Goutham 		dev_err(pf->dev,
10385a6d7c9dSSunil Goutham 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
10395a6d7c9dSSunil Goutham 		return err;
10405a6d7c9dSSunil Goutham 	}
10415a6d7c9dSSunil Goutham 
10425a6d7c9dSSunil Goutham 	/* Enable mailbox interrupt for msgs coming from AF.
10435a6d7c9dSSunil Goutham 	 * First clear to avoid spurious interrupts, if any.
10445a6d7c9dSSunil Goutham 	 */
10455a6d7c9dSSunil Goutham 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
10465a6d7c9dSSunil Goutham 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
10475a6d7c9dSSunil Goutham 
10485a6d7c9dSSunil Goutham 	if (!probe_af)
10495a6d7c9dSSunil Goutham 		return 0;
10505a6d7c9dSSunil Goutham 
10515a6d7c9dSSunil Goutham 	/* Check mailbox communication with AF */
10525a6d7c9dSSunil Goutham 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
10535a6d7c9dSSunil Goutham 	if (!req) {
10545a6d7c9dSSunil Goutham 		otx2_disable_mbox_intr(pf);
10555a6d7c9dSSunil Goutham 		return -ENOMEM;
10565a6d7c9dSSunil Goutham 	}
10575a6d7c9dSSunil Goutham 	err = otx2_sync_mbox_msg(&pf->mbox);
10585a6d7c9dSSunil Goutham 	if (err) {
10595a6d7c9dSSunil Goutham 		dev_warn(pf->dev,
10605a6d7c9dSSunil Goutham 			 "AF not responding to mailbox, deferring probe\n");
10615a6d7c9dSSunil Goutham 		otx2_disable_mbox_intr(pf);
10625a6d7c9dSSunil Goutham 		return -EPROBE_DEFER;
10635a6d7c9dSSunil Goutham 	}
10645a6d7c9dSSunil Goutham 
10655a6d7c9dSSunil Goutham 	return 0;
10665a6d7c9dSSunil Goutham }
10675a6d7c9dSSunil Goutham 
otx2_pfaf_mbox_destroy(struct otx2_nic * pf)10685a6d7c9dSSunil Goutham static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
10695a6d7c9dSSunil Goutham {
10705a6d7c9dSSunil Goutham 	struct mbox *mbox = &pf->mbox;
10715a6d7c9dSSunil Goutham 
10725a6d7c9dSSunil Goutham 	if (pf->mbox_wq) {
10735a6d7c9dSSunil Goutham 		destroy_workqueue(pf->mbox_wq);
10745a6d7c9dSSunil Goutham 		pf->mbox_wq = NULL;
10755a6d7c9dSSunil Goutham 	}
10765a6d7c9dSSunil Goutham 
10775a6d7c9dSSunil Goutham 	if (mbox->mbox.hwbase)
10785a6d7c9dSSunil Goutham 		iounmap((void __iomem *)mbox->mbox.hwbase);
10795a6d7c9dSSunil Goutham 
10805a6d7c9dSSunil Goutham 	otx2_mbox_destroy(&mbox->mbox);
10815a6d7c9dSSunil Goutham 	otx2_mbox_destroy(&mbox->mbox_up);
10825a6d7c9dSSunil Goutham }
10835a6d7c9dSSunil Goutham 
otx2_pfaf_mbox_init(struct otx2_nic * pf)10845a6d7c9dSSunil Goutham static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
10855a6d7c9dSSunil Goutham {
10865a6d7c9dSSunil Goutham 	struct mbox *mbox = &pf->mbox;
10875a6d7c9dSSunil Goutham 	void __iomem *hwbase;
10885a6d7c9dSSunil Goutham 	int err;
10895a6d7c9dSSunil Goutham 
10905a6d7c9dSSunil Goutham 	mbox->pfvf = pf;
1091289f9746STejun Heo 	pf->mbox_wq = alloc_ordered_workqueue("otx2_pfaf_mailbox",
1092289f9746STejun Heo 					      WQ_HIGHPRI | WQ_MEM_RECLAIM);
10935a6d7c9dSSunil Goutham 	if (!pf->mbox_wq)
10945a6d7c9dSSunil Goutham 		return -ENOMEM;
10955a6d7c9dSSunil Goutham 
10965a6d7c9dSSunil Goutham 	/* Mailbox is a reserved memory (in RAM) region shared between
10975a6d7c9dSSunil Goutham 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
10985a6d7c9dSSunil Goutham 	 * device memory to allow unaligned accesses.
10995a6d7c9dSSunil Goutham 	 */
11005a6d7c9dSSunil Goutham 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1101facede82SSubbaraya Sundeep 			    MBOX_SIZE);
11025a6d7c9dSSunil Goutham 	if (!hwbase) {
11035a6d7c9dSSunil Goutham 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
11045a6d7c9dSSunil Goutham 		err = -ENOMEM;
11055a6d7c9dSSunil Goutham 		goto exit;
11065a6d7c9dSSunil Goutham 	}
11075a6d7c9dSSunil Goutham 
11085a6d7c9dSSunil Goutham 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
11095a6d7c9dSSunil Goutham 			     MBOX_DIR_PFAF, 1);
11105a6d7c9dSSunil Goutham 	if (err)
11115a6d7c9dSSunil Goutham 		goto exit;
11125a6d7c9dSSunil Goutham 
11135a6d7c9dSSunil Goutham 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
11145a6d7c9dSSunil Goutham 			     MBOX_DIR_PFAF_UP, 1);
11155a6d7c9dSSunil Goutham 	if (err)
11165a6d7c9dSSunil Goutham 		goto exit;
11175a6d7c9dSSunil Goutham 
11185a6d7c9dSSunil Goutham 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
11195a6d7c9dSSunil Goutham 	if (err)
11205a6d7c9dSSunil Goutham 		goto exit;
11215a6d7c9dSSunil Goutham 
11225a6d7c9dSSunil Goutham 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
11235a6d7c9dSSunil Goutham 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
11244c3212f5SSunil Goutham 	mutex_init(&mbox->lock);
11255a6d7c9dSSunil Goutham 
11265a6d7c9dSSunil Goutham 	return 0;
11275a6d7c9dSSunil Goutham exit:
11285a6d7c9dSSunil Goutham 	otx2_pfaf_mbox_destroy(pf);
11295a6d7c9dSSunil Goutham 	return err;
11305a6d7c9dSSunil Goutham }
11315a6d7c9dSSunil Goutham 
otx2_cgx_config_linkevents(struct otx2_nic * pf,bool enable)113250fe6c02SLinu Cherian static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
113350fe6c02SLinu Cherian {
113450fe6c02SLinu Cherian 	struct msg_req *msg;
113550fe6c02SLinu Cherian 	int err;
113650fe6c02SLinu Cherian 
11374c3212f5SSunil Goutham 	mutex_lock(&pf->mbox.lock);
113850fe6c02SLinu Cherian 	if (enable)
113950fe6c02SLinu Cherian 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
114050fe6c02SLinu Cherian 	else
114150fe6c02SLinu Cherian 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
114250fe6c02SLinu Cherian 
114350fe6c02SLinu Cherian 	if (!msg) {
11444c3212f5SSunil Goutham 		mutex_unlock(&pf->mbox.lock);
114550fe6c02SLinu Cherian 		return -ENOMEM;
114650fe6c02SLinu Cherian 	}
114750fe6c02SLinu Cherian 
114850fe6c02SLinu Cherian 	err = otx2_sync_mbox_msg(&pf->mbox);
11494c3212f5SSunil Goutham 	mutex_unlock(&pf->mbox.lock);
115050fe6c02SLinu Cherian 	return err;
115150fe6c02SLinu Cherian }
115250fe6c02SLinu Cherian 
otx2_cgx_config_loopback(struct otx2_nic * pf,bool enable)115334bfe0ebSSunil Goutham static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
115434bfe0ebSSunil Goutham {
115534bfe0ebSSunil Goutham 	struct msg_req *msg;
115634bfe0ebSSunil Goutham 	int err;
115734bfe0ebSSunil Goutham 
1158fa5e0ccbSRatheesh Kannoth 	if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
115979d2be38SHariprasad Kelam 				    pf->flow_cfg->dmacflt_max_flows))
116079d2be38SHariprasad Kelam 		netdev_warn(pf->netdev,
116179d2be38SHariprasad Kelam 			    "CGX/RPM internal loopback might not work as DMAC filters are active\n");
116279d2be38SHariprasad Kelam 
11634c3212f5SSunil Goutham 	mutex_lock(&pf->mbox.lock);
116434bfe0ebSSunil Goutham 	if (enable)
116534bfe0ebSSunil Goutham 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
116634bfe0ebSSunil Goutham 	else
116734bfe0ebSSunil Goutham 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
116834bfe0ebSSunil Goutham 
116934bfe0ebSSunil Goutham 	if (!msg) {
11704c3212f5SSunil Goutham 		mutex_unlock(&pf->mbox.lock);
117134bfe0ebSSunil Goutham 		return -ENOMEM;
117234bfe0ebSSunil Goutham 	}
117334bfe0ebSSunil Goutham 
117434bfe0ebSSunil Goutham 	err = otx2_sync_mbox_msg(&pf->mbox);
11754c3212f5SSunil Goutham 	mutex_unlock(&pf->mbox.lock);
117634bfe0ebSSunil Goutham 	return err;
117734bfe0ebSSunil Goutham }
117834bfe0ebSSunil Goutham 
otx2_set_real_num_queues(struct net_device * netdev,int tx_queues,int rx_queues)1179d45d8979SChristina Jacob int otx2_set_real_num_queues(struct net_device *netdev,
118016547577SSunil Goutham 			     int tx_queues, int rx_queues)
118116547577SSunil Goutham {
118216547577SSunil Goutham 	int err;
118316547577SSunil Goutham 
118416547577SSunil Goutham 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
118516547577SSunil Goutham 	if (err) {
118616547577SSunil Goutham 		netdev_err(netdev,
118716547577SSunil Goutham 			   "Failed to set no of Tx queues: %d\n", tx_queues);
118816547577SSunil Goutham 		return err;
118916547577SSunil Goutham 	}
119016547577SSunil Goutham 
119116547577SSunil Goutham 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
119216547577SSunil Goutham 	if (err)
119316547577SSunil Goutham 		netdev_err(netdev,
119416547577SSunil Goutham 			   "Failed to set no of Rx queues: %d\n", rx_queues);
119516547577SSunil Goutham 	return err;
119616547577SSunil Goutham }
11973184fb5bSTomasz Duszynski EXPORT_SYMBOL(otx2_set_real_num_queues);
119816547577SSunil Goutham 
119951afe902SRatheesh Kannoth static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = {
120051afe902SRatheesh Kannoth 	"NIX_SQOPERR_OOR",
120151afe902SRatheesh Kannoth 	"NIX_SQOPERR_CTX_FAULT",
120251afe902SRatheesh Kannoth 	"NIX_SQOPERR_CTX_POISON",
120351afe902SRatheesh Kannoth 	"NIX_SQOPERR_DISABLED",
120451afe902SRatheesh Kannoth 	"NIX_SQOPERR_SIZE_ERR",
120551afe902SRatheesh Kannoth 	"NIX_SQOPERR_OFLOW",
120651afe902SRatheesh Kannoth 	"NIX_SQOPERR_SQB_NULL",
120751afe902SRatheesh Kannoth 	"NIX_SQOPERR_SQB_FAULT",
120851afe902SRatheesh Kannoth 	"NIX_SQOPERR_SQE_SZ_ZERO",
120951afe902SRatheesh Kannoth };
121051afe902SRatheesh Kannoth 
121151afe902SRatheesh Kannoth static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
121251afe902SRatheesh Kannoth 	"NIX_MNQERR_SQ_CTX_FAULT",
121351afe902SRatheesh Kannoth 	"NIX_MNQERR_SQ_CTX_POISON",
121451afe902SRatheesh Kannoth 	"NIX_MNQERR_SQB_FAULT",
121551afe902SRatheesh Kannoth 	"NIX_MNQERR_SQB_POISON",
121651afe902SRatheesh Kannoth 	"NIX_MNQERR_TOTAL_ERR",
121751afe902SRatheesh Kannoth 	"NIX_MNQERR_LSO_ERR",
121851afe902SRatheesh Kannoth 	"NIX_MNQERR_CQ_QUERY_ERR",
121951afe902SRatheesh Kannoth 	"NIX_MNQERR_MAX_SQE_SIZE_ERR",
122051afe902SRatheesh Kannoth 	"NIX_MNQERR_MAXLEN_ERR",
122151afe902SRatheesh Kannoth 	"NIX_MNQERR_SQE_SIZEM1_ZERO",
122251afe902SRatheesh Kannoth };
122351afe902SRatheesh Kannoth 
122451afe902SRatheesh Kannoth static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
1225d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
1226d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
1227d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
1228d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
1229d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
1230d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
1231d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
1232d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
1233d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
1234d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
1235d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
1236d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
1237d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
1238d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
1239d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
1240d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
1241d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
1242d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
1243d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
1244d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
1245d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
1246d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
1247d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
1248d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
1249d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
1250d1835d26SRatheesh Kannoth 	[NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
125151afe902SRatheesh Kannoth };
125251afe902SRatheesh Kannoth 
otx2_q_intr_handler(int irq,void * data)12534ff7d148SGeetha sowjanya static irqreturn_t otx2_q_intr_handler(int irq, void *data)
12544ff7d148SGeetha sowjanya {
12554ff7d148SGeetha sowjanya 	struct otx2_nic *pf = data;
1256ab6dddd2SSubbaraya Sundeep 	struct otx2_snd_queue *sq;
12574ff7d148SGeetha sowjanya 	u64 val, *ptr;
12584ff7d148SGeetha sowjanya 	u64 qidx = 0;
12594ff7d148SGeetha sowjanya 
12604ff7d148SGeetha sowjanya 	/* CQ */
12614ff7d148SGeetha sowjanya 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
12624ff7d148SGeetha sowjanya 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
12634ff7d148SGeetha sowjanya 		val = otx2_atomic64_add((qidx << 44), ptr);
12644ff7d148SGeetha sowjanya 
12654ff7d148SGeetha sowjanya 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
12664ff7d148SGeetha sowjanya 			     (val & NIX_CQERRINT_BITS));
12674ff7d148SGeetha sowjanya 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
12684ff7d148SGeetha sowjanya 			continue;
12694ff7d148SGeetha sowjanya 
12704ff7d148SGeetha sowjanya 		if (val & BIT_ULL(42)) {
1271d1835d26SRatheesh Kannoth 			netdev_err(pf->netdev,
1272d1835d26SRatheesh Kannoth 				   "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
12734ff7d148SGeetha sowjanya 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
12744ff7d148SGeetha sowjanya 		} else {
12754ff7d148SGeetha sowjanya 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
12764ff7d148SGeetha sowjanya 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
12774ff7d148SGeetha sowjanya 					   qidx);
12784ff7d148SGeetha sowjanya 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1279d1835d26SRatheesh Kannoth 				netdev_err(pf->netdev,
1280d1835d26SRatheesh Kannoth 					   "CQ%lld: Memory fault on CQE write to LLC/DRAM",
12814ff7d148SGeetha sowjanya 					   qidx);
12824ff7d148SGeetha sowjanya 		}
12834ff7d148SGeetha sowjanya 
12844ff7d148SGeetha sowjanya 		schedule_work(&pf->reset_task);
12854ff7d148SGeetha sowjanya 	}
12864ff7d148SGeetha sowjanya 
12874ff7d148SGeetha sowjanya 	/* SQ */
1288ab6dddd2SSubbaraya Sundeep 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
128951afe902SRatheesh Kannoth 		u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg;
129051afe902SRatheesh Kannoth 		u8 sq_op_err_code, mnq_err_code, snd_err_code;
129151afe902SRatheesh Kannoth 
1292ab6dddd2SSubbaraya Sundeep 		sq = &pf->qset.sq[qidx];
1293ab6dddd2SSubbaraya Sundeep 		if (!sq->sqb_ptrs)
1294ab6dddd2SSubbaraya Sundeep 			continue;
1295ab6dddd2SSubbaraya Sundeep 
129651afe902SRatheesh Kannoth 		/* Below debug registers captures first errors corresponding to
129751afe902SRatheesh Kannoth 		 * those registers. We don't have to check against SQ qid as
129851afe902SRatheesh Kannoth 		 * these are fatal errors.
129951afe902SRatheesh Kannoth 		 */
130051afe902SRatheesh Kannoth 
13014ff7d148SGeetha sowjanya 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
13024ff7d148SGeetha sowjanya 		val = otx2_atomic64_add((qidx << 44), ptr);
13034ff7d148SGeetha sowjanya 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
13044ff7d148SGeetha sowjanya 			     (val & NIX_SQINT_BITS));
13054ff7d148SGeetha sowjanya 
13064ff7d148SGeetha sowjanya 		if (val & BIT_ULL(42)) {
1307d1835d26SRatheesh Kannoth 			netdev_err(pf->netdev,
1308d1835d26SRatheesh Kannoth 				   "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
13094ff7d148SGeetha sowjanya 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
131051afe902SRatheesh Kannoth 			goto done;
13114ff7d148SGeetha sowjanya 		}
131251afe902SRatheesh Kannoth 
131351afe902SRatheesh Kannoth 		sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG);
131451afe902SRatheesh Kannoth 		if (!(sq_op_err_dbg & BIT(44)))
131551afe902SRatheesh Kannoth 			goto chk_mnq_err_dbg;
131651afe902SRatheesh Kannoth 
131751afe902SRatheesh Kannoth 		sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
1318d1835d26SRatheesh Kannoth 		netdev_err(pf->netdev,
1319d1835d26SRatheesh Kannoth 			   "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1320d1835d26SRatheesh Kannoth 			   qidx, sq_op_err_dbg,
1321d1835d26SRatheesh Kannoth 			   nix_sqoperr_e_str[sq_op_err_code],
1322d1835d26SRatheesh Kannoth 			   sq_op_err_code);
132351afe902SRatheesh Kannoth 
132451afe902SRatheesh Kannoth 		otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
132551afe902SRatheesh Kannoth 
132651afe902SRatheesh Kannoth 		if (sq_op_err_code == NIX_SQOPERR_SQB_NULL)
132751afe902SRatheesh Kannoth 			goto chk_mnq_err_dbg;
132851afe902SRatheesh Kannoth 
132951afe902SRatheesh Kannoth 		/* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure.
133051afe902SRatheesh Kannoth 		 * TODO: But we are in irq context. How to call mbox functions which does sleep
133151afe902SRatheesh Kannoth 		 */
133251afe902SRatheesh Kannoth 
133351afe902SRatheesh Kannoth chk_mnq_err_dbg:
133451afe902SRatheesh Kannoth 		mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG);
133551afe902SRatheesh Kannoth 		if (!(mnq_err_dbg & BIT(44)))
133651afe902SRatheesh Kannoth 			goto chk_snd_err_dbg;
133751afe902SRatheesh Kannoth 
133851afe902SRatheesh Kannoth 		mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
1339d1835d26SRatheesh Kannoth 		netdev_err(pf->netdev,
1340d1835d26SRatheesh Kannoth 			   "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
1341d1835d26SRatheesh Kannoth 			   qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
1342d1835d26SRatheesh Kannoth 			   mnq_err_code);
134351afe902SRatheesh Kannoth 		otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
134451afe902SRatheesh Kannoth 
134551afe902SRatheesh Kannoth chk_snd_err_dbg:
134651afe902SRatheesh Kannoth 		snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
134751afe902SRatheesh Kannoth 		if (snd_err_dbg & BIT(44)) {
134851afe902SRatheesh Kannoth 			snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
1349d1835d26SRatheesh Kannoth 			netdev_err(pf->netdev,
1350d1835d26SRatheesh Kannoth 				   "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
1351d1835d26SRatheesh Kannoth 				   qidx, snd_err_dbg,
1352d1835d26SRatheesh Kannoth 				   nix_snd_status_e_str[snd_err_code],
1353d1835d26SRatheesh Kannoth 				   snd_err_code);
135451afe902SRatheesh Kannoth 			otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
13554ff7d148SGeetha sowjanya 		}
135651afe902SRatheesh Kannoth 
135751afe902SRatheesh Kannoth done:
135851afe902SRatheesh Kannoth 		/* Print values and reset */
13594ff7d148SGeetha sowjanya 		if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
13604ff7d148SGeetha sowjanya 			netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
13614ff7d148SGeetha sowjanya 				   qidx);
13624ff7d148SGeetha sowjanya 
13634ff7d148SGeetha sowjanya 		schedule_work(&pf->reset_task);
13644ff7d148SGeetha sowjanya 	}
13654ff7d148SGeetha sowjanya 
13664ff7d148SGeetha sowjanya 	return IRQ_HANDLED;
13674ff7d148SGeetha sowjanya }
13684ff7d148SGeetha sowjanya 
otx2_cq_intr_handler(int irq,void * cq_irq)136904a21ef3SSunil Goutham static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
137004a21ef3SSunil Goutham {
137104a21ef3SSunil Goutham 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
137204a21ef3SSunil Goutham 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
137304a21ef3SSunil Goutham 	int qidx = cq_poll->cint_idx;
137404a21ef3SSunil Goutham 
137504a21ef3SSunil Goutham 	/* Disable interrupts.
137604a21ef3SSunil Goutham 	 *
137704a21ef3SSunil Goutham 	 * Completion interrupts behave in a level-triggered interrupt
137804a21ef3SSunil Goutham 	 * fashion, and hence have to be cleared only after it is serviced.
137904a21ef3SSunil Goutham 	 */
138004a21ef3SSunil Goutham 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
138104a21ef3SSunil Goutham 
138204a21ef3SSunil Goutham 	/* Schedule NAPI */
13836e144b47SSuman Ghosh 	pf->napi_events++;
138404a21ef3SSunil Goutham 	napi_schedule_irqoff(&cq_poll->napi);
138504a21ef3SSunil Goutham 
138604a21ef3SSunil Goutham 	return IRQ_HANDLED;
138704a21ef3SSunil Goutham }
138804a21ef3SSunil Goutham 
otx2_disable_napi(struct otx2_nic * pf)138904a21ef3SSunil Goutham static void otx2_disable_napi(struct otx2_nic *pf)
139004a21ef3SSunil Goutham {
139104a21ef3SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
139204a21ef3SSunil Goutham 	struct otx2_cq_poll *cq_poll;
139304a21ef3SSunil Goutham 	int qidx;
139404a21ef3SSunil Goutham 
139504a21ef3SSunil Goutham 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
139604a21ef3SSunil Goutham 		cq_poll = &qset->napi[qidx];
13976e144b47SSuman Ghosh 		cancel_work_sync(&cq_poll->dim.work);
139804a21ef3SSunil Goutham 		napi_disable(&cq_poll->napi);
139904a21ef3SSunil Goutham 		netif_napi_del(&cq_poll->napi);
140004a21ef3SSunil Goutham 	}
140104a21ef3SSunil Goutham }
140204a21ef3SSunil Goutham 
otx2_free_cq_res(struct otx2_nic * pf)1403caa2da34SSunil Goutham static void otx2_free_cq_res(struct otx2_nic *pf)
1404caa2da34SSunil Goutham {
1405caa2da34SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
1406caa2da34SSunil Goutham 	struct otx2_cq_queue *cq;
1407caa2da34SSunil Goutham 	int qidx;
1408caa2da34SSunil Goutham 
1409caa2da34SSunil Goutham 	/* Disable CQs */
1410caa2da34SSunil Goutham 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1411caa2da34SSunil Goutham 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1412caa2da34SSunil Goutham 		cq = &qset->cq[qidx];
1413caa2da34SSunil Goutham 		qmem_free(pf->dev, cq->cqe);
1414caa2da34SSunil Goutham 	}
1415caa2da34SSunil Goutham }
1416caa2da34SSunil Goutham 
otx2_free_sq_res(struct otx2_nic * pf)1417caa2da34SSunil Goutham static void otx2_free_sq_res(struct otx2_nic *pf)
1418caa2da34SSunil Goutham {
1419caa2da34SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
1420caa2da34SSunil Goutham 	struct otx2_snd_queue *sq;
1421caa2da34SSunil Goutham 	int qidx;
1422caa2da34SSunil Goutham 
1423caa2da34SSunil Goutham 	/* Disable SQs */
1424caa2da34SSunil Goutham 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1425caa2da34SSunil Goutham 	/* Free SQB pointers */
1426caa2da34SSunil Goutham 	otx2_sq_free_sqbs(pf);
1427ab6dddd2SSubbaraya Sundeep 	for (qidx = 0; qidx < otx2_get_total_tx_queues(pf); qidx++) {
1428caa2da34SSunil Goutham 		sq = &qset->sq[qidx];
14295e6808b4SNaveen Mamindlapalli 		/* Skip freeing Qos queues if they are not initialized */
14305e6808b4SNaveen Mamindlapalli 		if (!sq->sqe)
14315e6808b4SNaveen Mamindlapalli 			continue;
1432caa2da34SSunil Goutham 		qmem_free(pf->dev, sq->sqe);
143386d74760SSunil Goutham 		qmem_free(pf->dev, sq->tso_hdrs);
14343ca6c4c8SSunil Goutham 		kfree(sq->sg);
1435caa2da34SSunil Goutham 		kfree(sq->sqb_ptrs);
1436caa2da34SSunil Goutham 	}
1437caa2da34SSunil Goutham }
1438caa2da34SSunil Goutham 
otx2_get_rbuf_size(struct otx2_nic * pf,int mtu)1439ab58a416SHariprasad Kelam static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1440ab58a416SHariprasad Kelam {
1441ab58a416SHariprasad Kelam 	int frame_size;
1442ab58a416SHariprasad Kelam 	int total_size;
1443ab58a416SHariprasad Kelam 	int rbuf_size;
1444ab58a416SHariprasad Kelam 
1445a989eb66SSubbaraya Sundeep 	if (pf->hw.rbuf_len)
1446a989eb66SSubbaraya Sundeep 		return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
1447a989eb66SSubbaraya Sundeep 
1448ab58a416SHariprasad Kelam 	/* The data transferred by NIX to memory consists of actual packet
1449ab58a416SHariprasad Kelam 	 * plus additional data which has timestamp and/or EDSA/HIGIG2
1450ab58a416SHariprasad Kelam 	 * headers if interface is configured in corresponding modes.
1451ab58a416SHariprasad Kelam 	 * NIX transfers entire data using 6 segments/buffers and writes
1452ab58a416SHariprasad Kelam 	 * a CQE_RX descriptor with those segment addresses. First segment
1453ab58a416SHariprasad Kelam 	 * has additional data prepended to packet. Also software omits a
14540182d078SSubbaraya Sundeep 	 * headroom of 128 bytes in each segment. Hence the total size of
14550182d078SSubbaraya Sundeep 	 * memory needed to receive a packet with 'mtu' is:
1456ab58a416SHariprasad Kelam 	 * frame size =  mtu + additional data;
14570182d078SSubbaraya Sundeep 	 * memory = frame_size + headroom * 6;
1458ab58a416SHariprasad Kelam 	 * each receive buffer size = memory / 6;
1459ab58a416SHariprasad Kelam 	 */
1460ab58a416SHariprasad Kelam 	frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
14610182d078SSubbaraya Sundeep 	total_size = frame_size + OTX2_HEAD_ROOM * 6;
1462ab58a416SHariprasad Kelam 	rbuf_size = total_size / 6;
1463ab58a416SHariprasad Kelam 
1464ab58a416SHariprasad Kelam 	return ALIGN(rbuf_size, 2048);
1465ab58a416SHariprasad Kelam }
1466ab58a416SHariprasad Kelam 
otx2_init_hw_resources(struct otx2_nic * pf)1467caa2da34SSunil Goutham static int otx2_init_hw_resources(struct otx2_nic *pf)
1468caa2da34SSunil Goutham {
146955307fcbSSubbaraya Sundeep 	struct nix_lf_free_req *free_req;
1470caa2da34SSunil Goutham 	struct mbox *mbox = &pf->mbox;
1471caa2da34SSunil Goutham 	struct otx2_hw *hw = &pf->hw;
1472caa2da34SSunil Goutham 	struct msg_req *req;
1473caa2da34SSunil Goutham 	int err = 0, lvl;
1474caa2da34SSunil Goutham 
1475caa2da34SSunil Goutham 	/* Set required NPA LF's pool counts
1476caa2da34SSunil Goutham 	 * Auras and Pools are used in a 1:1 mapping,
1477caa2da34SSunil Goutham 	 * so, aura count = pool count.
1478caa2da34SSunil Goutham 	 */
1479caa2da34SSunil Goutham 	hw->rqpool_cnt = hw->rx_queues;
1480ab6dddd2SSubbaraya Sundeep 	hw->sqpool_cnt = otx2_get_total_tx_queues(pf);
1481caa2da34SSunil Goutham 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1482caa2da34SSunil Goutham 
14830182d078SSubbaraya Sundeep 	/* Maximum hardware supported transmit length */
14840182d078SSubbaraya Sundeep 	pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1485ab58a416SHariprasad Kelam 
1486ab58a416SHariprasad Kelam 	pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1487caa2da34SSunil Goutham 
14884c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
1489caa2da34SSunil Goutham 	/* NPA init */
1490caa2da34SSunil Goutham 	err = otx2_config_npa(pf);
1491caa2da34SSunil Goutham 	if (err)
1492caa2da34SSunil Goutham 		goto exit;
1493caa2da34SSunil Goutham 
1494caa2da34SSunil Goutham 	/* NIX init */
1495caa2da34SSunil Goutham 	err = otx2_config_nix(pf);
1496caa2da34SSunil Goutham 	if (err)
1497caa2da34SSunil Goutham 		goto err_free_npa_lf;
1498caa2da34SSunil Goutham 
14998fcd7c7bSGeetha sowjanya 	/* Enable backpressure for CGX mapped PF/VFs */
15008fcd7c7bSGeetha sowjanya 	if (!is_otx2_lbkvf(pf->pdev))
150175f36270SGeetha sowjanya 		otx2_nix_config_bp(pf, true);
150275f36270SGeetha sowjanya 
1503caa2da34SSunil Goutham 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
1504caa2da34SSunil Goutham 	err = otx2_rq_aura_pool_init(pf);
1505caa2da34SSunil Goutham 	if (err) {
15064c3212f5SSunil Goutham 		mutex_unlock(&mbox->lock);
1507caa2da34SSunil Goutham 		goto err_free_nix_lf;
1508caa2da34SSunil Goutham 	}
1509caa2da34SSunil Goutham 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
1510caa2da34SSunil Goutham 	err = otx2_sq_aura_pool_init(pf);
1511caa2da34SSunil Goutham 	if (err) {
15124c3212f5SSunil Goutham 		mutex_unlock(&mbox->lock);
1513caa2da34SSunil Goutham 		goto err_free_rq_ptrs;
1514caa2da34SSunil Goutham 	}
1515caa2da34SSunil Goutham 
1516caa2da34SSunil Goutham 	err = otx2_txsch_alloc(pf);
1517caa2da34SSunil Goutham 	if (err) {
15184c3212f5SSunil Goutham 		mutex_unlock(&mbox->lock);
1519caa2da34SSunil Goutham 		goto err_free_sq_ptrs;
1520caa2da34SSunil Goutham 	}
1521caa2da34SSunil Goutham 
152299c969a8SSuman Ghosh #ifdef CONFIG_DCB
152399c969a8SSuman Ghosh 	if (pf->pfc_en) {
152499c969a8SSuman Ghosh 		err = otx2_pfc_txschq_alloc(pf);
152599c969a8SSuman Ghosh 		if (err) {
152699c969a8SSuman Ghosh 			mutex_unlock(&mbox->lock);
152799c969a8SSuman Ghosh 			goto err_free_sq_ptrs;
152899c969a8SSuman Ghosh 		}
152999c969a8SSuman Ghosh 	}
153099c969a8SSuman Ghosh #endif
153199c969a8SSuman Ghosh 
1532caa2da34SSunil Goutham 	err = otx2_config_nix_queues(pf);
1533caa2da34SSunil Goutham 	if (err) {
15344c3212f5SSunil Goutham 		mutex_unlock(&mbox->lock);
1535caa2da34SSunil Goutham 		goto err_free_txsch;
1536caa2da34SSunil Goutham 	}
153799c969a8SSuman Ghosh 
1538caa2da34SSunil Goutham 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
153999c969a8SSuman Ghosh 		err = otx2_txschq_config(pf, lvl, 0, false);
1540caa2da34SSunil Goutham 		if (err) {
15414c3212f5SSunil Goutham 			mutex_unlock(&mbox->lock);
1542caa2da34SSunil Goutham 			goto err_free_nix_queues;
1543caa2da34SSunil Goutham 		}
1544caa2da34SSunil Goutham 	}
154599c969a8SSuman Ghosh 
154699c969a8SSuman Ghosh #ifdef CONFIG_DCB
154799c969a8SSuman Ghosh 	if (pf->pfc_en) {
154899c969a8SSuman Ghosh 		err = otx2_pfc_txschq_config(pf);
154999c969a8SSuman Ghosh 		if (err) {
155099c969a8SSuman Ghosh 			mutex_unlock(&mbox->lock);
155199c969a8SSuman Ghosh 			goto err_free_nix_queues;
155299c969a8SSuman Ghosh 		}
155399c969a8SSuman Ghosh 	}
155499c969a8SSuman Ghosh #endif
155599c969a8SSuman Ghosh 
15564c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
1557caa2da34SSunil Goutham 	return err;
1558caa2da34SSunil Goutham 
1559caa2da34SSunil Goutham err_free_nix_queues:
1560caa2da34SSunil Goutham 	otx2_free_sq_res(pf);
1561caa2da34SSunil Goutham 	otx2_free_cq_res(pf);
1562caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1563caa2da34SSunil Goutham err_free_txsch:
15646b4b2dedSHariprasad Kelam 	otx2_txschq_stop(pf);
1565caa2da34SSunil Goutham err_free_sq_ptrs:
1566caa2da34SSunil Goutham 	otx2_sq_free_sqbs(pf);
1567caa2da34SSunil Goutham err_free_rq_ptrs:
1568caa2da34SSunil Goutham 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1569caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1570caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1571caa2da34SSunil Goutham 	otx2_aura_pool_free(pf);
1572caa2da34SSunil Goutham err_free_nix_lf:
15734c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
157455307fcbSSubbaraya Sundeep 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
157555307fcbSSubbaraya Sundeep 	if (free_req) {
157655307fcbSSubbaraya Sundeep 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1577caa2da34SSunil Goutham 		if (otx2_sync_mbox_msg(mbox))
1578caa2da34SSunil Goutham 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1579caa2da34SSunil Goutham 	}
1580caa2da34SSunil Goutham err_free_npa_lf:
1581caa2da34SSunil Goutham 	/* Reset NPA LF */
1582caa2da34SSunil Goutham 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1583caa2da34SSunil Goutham 	if (req) {
1584caa2da34SSunil Goutham 		if (otx2_sync_mbox_msg(mbox))
1585caa2da34SSunil Goutham 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1586caa2da34SSunil Goutham 	}
1587caa2da34SSunil Goutham exit:
15884c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
1589caa2da34SSunil Goutham 	return err;
1590caa2da34SSunil Goutham }
1591caa2da34SSunil Goutham 
otx2_free_hw_resources(struct otx2_nic * pf)1592caa2da34SSunil Goutham static void otx2_free_hw_resources(struct otx2_nic *pf)
1593caa2da34SSunil Goutham {
1594abe02543SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
159555307fcbSSubbaraya Sundeep 	struct nix_lf_free_req *free_req;
1596caa2da34SSunil Goutham 	struct mbox *mbox = &pf->mbox;
1597abe02543SSunil Goutham 	struct otx2_cq_queue *cq;
1598b2e3406aSRatheesh Kannoth 	struct otx2_pool *pool;
1599caa2da34SSunil Goutham 	struct msg_req *req;
1600b2e3406aSRatheesh Kannoth 	int pool_id;
16016b4b2dedSHariprasad Kelam 	int qidx;
1602caa2da34SSunil Goutham 
1603caa2da34SSunil Goutham 	/* Ensure all SQE are processed */
1604caa2da34SSunil Goutham 	otx2_sqb_flush(pf);
1605caa2da34SSunil Goutham 
1606caa2da34SSunil Goutham 	/* Stop transmission */
16076b4b2dedSHariprasad Kelam 	otx2_txschq_stop(pf);
1608caa2da34SSunil Goutham 
160999c969a8SSuman Ghosh #ifdef CONFIG_DCB
161099c969a8SSuman Ghosh 	if (pf->pfc_en)
161199c969a8SSuman Ghosh 		otx2_pfc_txschq_stop(pf);
161299c969a8SSuman Ghosh #endif
161399c969a8SSuman Ghosh 
16145e6808b4SNaveen Mamindlapalli 	otx2_clean_qos_queues(pf);
16155e6808b4SNaveen Mamindlapalli 
16164c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
161775f36270SGeetha sowjanya 	/* Disable backpressure */
161875f36270SGeetha sowjanya 	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
161975f36270SGeetha sowjanya 		otx2_nix_config_bp(pf, false);
16204c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
162175f36270SGeetha sowjanya 
1622caa2da34SSunil Goutham 	/* Disable RQs */
1623caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1624caa2da34SSunil Goutham 
1625abe02543SSunil Goutham 	/*Dequeue all CQEs */
1626abe02543SSunil Goutham 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1627abe02543SSunil Goutham 		cq = &qset->cq[qidx];
1628abe02543SSunil Goutham 		if (cq->cq_type == CQ_RX)
1629b2e3406aSRatheesh Kannoth 			otx2_cleanup_rx_cqes(pf, cq, qidx);
16303ca6c4c8SSunil Goutham 		else
16313ca6c4c8SSunil Goutham 			otx2_cleanup_tx_cqes(pf, cq);
1632abe02543SSunil Goutham 	}
163392662d9fSGeetha sowjanya 	otx2_free_pending_sqe(pf);
1634abe02543SSunil Goutham 
1635caa2da34SSunil Goutham 	otx2_free_sq_res(pf);
1636caa2da34SSunil Goutham 
1637caa2da34SSunil Goutham 	/* Free RQ buffer pointers*/
1638caa2da34SSunil Goutham 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1639caa2da34SSunil Goutham 
1640b2e3406aSRatheesh Kannoth 	for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) {
1641b2e3406aSRatheesh Kannoth 		pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx);
1642b2e3406aSRatheesh Kannoth 		pool = &pf->qset.pool[pool_id];
1643b2e3406aSRatheesh Kannoth 		page_pool_destroy(pool->page_pool);
1644b2e3406aSRatheesh Kannoth 		pool->page_pool = NULL;
1645b2e3406aSRatheesh Kannoth 	}
1646b2e3406aSRatheesh Kannoth 
1647caa2da34SSunil Goutham 	otx2_free_cq_res(pf);
1648caa2da34SSunil Goutham 
16492ca89a2cSSunil Goutham 	/* Free all ingress bandwidth profiles allocated */
16502ca89a2cSSunil Goutham 	cn10k_free_all_ipolicers(pf);
16512ca89a2cSSunil Goutham 
16524c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
1653caa2da34SSunil Goutham 	/* Reset NIX LF */
165455307fcbSSubbaraya Sundeep 	free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
165555307fcbSSubbaraya Sundeep 	if (free_req) {
165655307fcbSSubbaraya Sundeep 		free_req->flags = NIX_LF_DISABLE_FLOWS;
1657f0c2982aSNaveen Mamindlapalli 		if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1658f0c2982aSNaveen Mamindlapalli 			free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1659caa2da34SSunil Goutham 		if (otx2_sync_mbox_msg(mbox))
1660caa2da34SSunil Goutham 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1661caa2da34SSunil Goutham 	}
16624c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
1663caa2da34SSunil Goutham 
1664caa2da34SSunil Goutham 	/* Disable NPA Pool and Aura hw context */
1665caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1666caa2da34SSunil Goutham 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1667caa2da34SSunil Goutham 	otx2_aura_pool_free(pf);
1668caa2da34SSunil Goutham 
16694c3212f5SSunil Goutham 	mutex_lock(&mbox->lock);
1670caa2da34SSunil Goutham 	/* Reset NPA LF */
1671caa2da34SSunil Goutham 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1672caa2da34SSunil Goutham 	if (req) {
1673caa2da34SSunil Goutham 		if (otx2_sync_mbox_msg(mbox))
1674caa2da34SSunil Goutham 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1675caa2da34SSunil Goutham 	}
16764c3212f5SSunil Goutham 	mutex_unlock(&mbox->lock);
1677caa2da34SSunil Goutham }
1678caa2da34SSunil Goutham 
otx2_promisc_use_mce_list(struct otx2_nic * pfvf)16795295d2adSHariprasad Kelam static bool otx2_promisc_use_mce_list(struct otx2_nic *pfvf)
16805295d2adSHariprasad Kelam {
16815295d2adSHariprasad Kelam 	int vf;
16825295d2adSHariprasad Kelam 
16835295d2adSHariprasad Kelam 	/* The AF driver will determine whether to allow the VF netdev or not */
16845295d2adSHariprasad Kelam 	if (is_otx2_vf(pfvf->pcifunc))
16855295d2adSHariprasad Kelam 		return true;
16865295d2adSHariprasad Kelam 
16875295d2adSHariprasad Kelam 	/* check if there are any trusted VFs associated with the PF netdev */
16885295d2adSHariprasad Kelam 	for (vf = 0; vf < pci_num_vf(pfvf->pdev); vf++)
16895295d2adSHariprasad Kelam 		if (pfvf->vf_configs[vf].trusted)
16905295d2adSHariprasad Kelam 			return true;
16915295d2adSHariprasad Kelam 	return false;
16925295d2adSHariprasad Kelam }
16935295d2adSHariprasad Kelam 
otx2_do_set_rx_mode(struct otx2_nic * pf)1694ffd2f89aSRakesh Babu static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1695ffd2f89aSRakesh Babu {
1696ffd2f89aSRakesh Babu 	struct net_device *netdev = pf->netdev;
1697ffd2f89aSRakesh Babu 	struct nix_rx_mode *req;
1698ffd2f89aSRakesh Babu 	bool promisc = false;
1699ffd2f89aSRakesh Babu 
1700ffd2f89aSRakesh Babu 	if (!(netdev->flags & IFF_UP))
1701ffd2f89aSRakesh Babu 		return;
1702ffd2f89aSRakesh Babu 
1703ffd2f89aSRakesh Babu 	if ((netdev->flags & IFF_PROMISC) ||
1704ffd2f89aSRakesh Babu 	    (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1705ffd2f89aSRakesh Babu 		promisc = true;
1706ffd2f89aSRakesh Babu 	}
1707ffd2f89aSRakesh Babu 
1708ffd2f89aSRakesh Babu 	/* Write unicast address to mcam entries or del from mcam */
1709ffd2f89aSRakesh Babu 	if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1710ffd2f89aSRakesh Babu 		__dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1711ffd2f89aSRakesh Babu 
1712ffd2f89aSRakesh Babu 	mutex_lock(&pf->mbox.lock);
1713ffd2f89aSRakesh Babu 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1714ffd2f89aSRakesh Babu 	if (!req) {
1715ffd2f89aSRakesh Babu 		mutex_unlock(&pf->mbox.lock);
1716ffd2f89aSRakesh Babu 		return;
1717ffd2f89aSRakesh Babu 	}
1718ffd2f89aSRakesh Babu 
1719ffd2f89aSRakesh Babu 	req->mode = NIX_RX_MODE_UCAST;
1720ffd2f89aSRakesh Babu 
1721ffd2f89aSRakesh Babu 	if (promisc)
1722ffd2f89aSRakesh Babu 		req->mode |= NIX_RX_MODE_PROMISC;
1723ffd2f89aSRakesh Babu 	if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1724ffd2f89aSRakesh Babu 		req->mode |= NIX_RX_MODE_ALLMULTI;
1725ffd2f89aSRakesh Babu 
17265295d2adSHariprasad Kelam 	if (otx2_promisc_use_mce_list(pf))
1727ffd2f89aSRakesh Babu 		req->mode |= NIX_RX_MODE_USE_MCE;
1728ffd2f89aSRakesh Babu 
1729ffd2f89aSRakesh Babu 	otx2_sync_mbox_msg(&pf->mbox);
1730ffd2f89aSRakesh Babu 	mutex_unlock(&pf->mbox.lock);
1731ffd2f89aSRakesh Babu }
1732ffd2f89aSRakesh Babu 
otx2_set_irq_coalesce(struct otx2_nic * pfvf)17339cc9fbe5SNaveen Mamindlapalli static void otx2_set_irq_coalesce(struct otx2_nic *pfvf)
17349cc9fbe5SNaveen Mamindlapalli {
17359cc9fbe5SNaveen Mamindlapalli 	int cint;
17369cc9fbe5SNaveen Mamindlapalli 
17379cc9fbe5SNaveen Mamindlapalli 	for (cint = 0; cint < pfvf->hw.cint_cnt; cint++)
17389cc9fbe5SNaveen Mamindlapalli 		otx2_config_irq_coalescing(pfvf, cint);
17399cc9fbe5SNaveen Mamindlapalli }
17409cc9fbe5SNaveen Mamindlapalli 
otx2_dim_work(struct work_struct * w)17416e144b47SSuman Ghosh static void otx2_dim_work(struct work_struct *w)
17426e144b47SSuman Ghosh {
17436e144b47SSuman Ghosh 	struct dim_cq_moder cur_moder;
17446e144b47SSuman Ghosh 	struct otx2_cq_poll *cq_poll;
17456e144b47SSuman Ghosh 	struct otx2_nic *pfvf;
17466e144b47SSuman Ghosh 	struct dim *dim;
17476e144b47SSuman Ghosh 
17486e144b47SSuman Ghosh 	dim = container_of(w, struct dim, work);
17496e144b47SSuman Ghosh 	cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
17506e144b47SSuman Ghosh 	cq_poll = container_of(dim, struct otx2_cq_poll, dim);
17516e144b47SSuman Ghosh 	pfvf = (struct otx2_nic *)cq_poll->dev;
17526e144b47SSuman Ghosh 	pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
17536e144b47SSuman Ghosh 		CQ_TIMER_THRESH_MAX : cur_moder.usec;
17546e144b47SSuman Ghosh 	pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
17556e144b47SSuman Ghosh 		NAPI_POLL_WEIGHT : cur_moder.pkts;
17569cc9fbe5SNaveen Mamindlapalli 	otx2_set_irq_coalesce(pfvf);
17576e144b47SSuman Ghosh 	dim->state = DIM_START_MEASURE;
17586e144b47SSuman Ghosh }
17596e144b47SSuman Ghosh 
otx2_open(struct net_device * netdev)176034bfe0ebSSunil Goutham int otx2_open(struct net_device *netdev)
176116547577SSunil Goutham {
176205fcc9e0SSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
176304a21ef3SSunil Goutham 	struct otx2_cq_poll *cq_poll = NULL;
1764caa2da34SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
176504a21ef3SSunil Goutham 	int err = 0, qidx, vec;
176604a21ef3SSunil Goutham 	char *irq_name;
176705fcc9e0SSunil Goutham 
176816547577SSunil Goutham 	netif_carrier_off(netdev);
176916547577SSunil Goutham 
177004a21ef3SSunil Goutham 	/* RQ and SQs are mapped to different CQs,
177104a21ef3SSunil Goutham 	 * so find out max CQ IRQs (i.e CINTs) needed.
177204a21ef3SSunil Goutham 	 */
1773e7a36b56SGeetha sowjanya 	pf->hw.non_qos_queues =  pf->hw.tx_queues + pf->hw.xdp_queues;
1774ab6dddd2SSubbaraya Sundeep 	pf->hw.cint_cnt = max3(pf->hw.rx_queues, pf->hw.tx_queues,
1775ab6dddd2SSubbaraya Sundeep 			       pf->hw.tc_tx_queues);
1776ab6dddd2SSubbaraya Sundeep 
1777ab6dddd2SSubbaraya Sundeep 	pf->qset.cq_cnt = pf->hw.rx_queues + otx2_get_total_tx_queues(pf);
1778ab6dddd2SSubbaraya Sundeep 
177904a21ef3SSunil Goutham 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
178004a21ef3SSunil Goutham 	if (!qset->napi)
178104a21ef3SSunil Goutham 		return -ENOMEM;
178205fcc9e0SSunil Goutham 
1783caa2da34SSunil Goutham 	/* CQ size of RQ */
1784caa2da34SSunil Goutham 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1785caa2da34SSunil Goutham 	/* CQ size of SQ */
1786caa2da34SSunil Goutham 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
178705fcc9e0SSunil Goutham 
1788caa2da34SSunil Goutham 	err = -ENOMEM;
1789caa2da34SSunil Goutham 	qset->cq = kcalloc(pf->qset.cq_cnt,
1790caa2da34SSunil Goutham 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
1791caa2da34SSunil Goutham 	if (!qset->cq)
1792caa2da34SSunil Goutham 		goto err_free_mem;
1793caa2da34SSunil Goutham 
17945e6808b4SNaveen Mamindlapalli 	qset->sq = kcalloc(otx2_get_total_tx_queues(pf),
1795caa2da34SSunil Goutham 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
1796caa2da34SSunil Goutham 	if (!qset->sq)
1797caa2da34SSunil Goutham 		goto err_free_mem;
1798caa2da34SSunil Goutham 
1799d45d8979SChristina Jacob 	qset->rq = kcalloc(pf->hw.rx_queues,
1800d45d8979SChristina Jacob 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1801d45d8979SChristina Jacob 	if (!qset->rq)
1802d45d8979SChristina Jacob 		goto err_free_mem;
1803d45d8979SChristina Jacob 
1804caa2da34SSunil Goutham 	err = otx2_init_hw_resources(pf);
1805caa2da34SSunil Goutham 	if (err)
1806caa2da34SSunil Goutham 		goto err_free_mem;
1807caa2da34SSunil Goutham 
180804a21ef3SSunil Goutham 	/* Register NAPI handler */
180904a21ef3SSunil Goutham 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
181004a21ef3SSunil Goutham 		cq_poll = &qset->napi[qidx];
181104a21ef3SSunil Goutham 		cq_poll->cint_idx = qidx;
181204a21ef3SSunil Goutham 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
181304a21ef3SSunil Goutham 		 * 'cq_ids[0]' points to RQ's CQ and
181404a21ef3SSunil Goutham 		 * 'cq_ids[1]' points to SQ's CQ and
181506059a1aSGeetha sowjanya 		 * 'cq_ids[2]' points to XDP's CQ and
181604a21ef3SSunil Goutham 		 */
181704a21ef3SSunil Goutham 		cq_poll->cq_ids[CQ_RX] =
181804a21ef3SSunil Goutham 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
181904a21ef3SSunil Goutham 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
182004a21ef3SSunil Goutham 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
182106059a1aSGeetha sowjanya 		if (pf->xdp_prog)
182206059a1aSGeetha sowjanya 			cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
182306059a1aSGeetha sowjanya 						  (qidx + pf->hw.rx_queues +
182406059a1aSGeetha sowjanya 						  pf->hw.tx_queues) :
182506059a1aSGeetha sowjanya 						  CINT_INVALID_CQ;
182606059a1aSGeetha sowjanya 		else
182706059a1aSGeetha sowjanya 			cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
182806059a1aSGeetha sowjanya 
1829ab6dddd2SSubbaraya Sundeep 		cq_poll->cq_ids[CQ_QOS] = (qidx < pf->hw.tc_tx_queues) ?
1830ab6dddd2SSubbaraya Sundeep 					  (qidx + pf->hw.rx_queues +
1831ab6dddd2SSubbaraya Sundeep 					   pf->hw.non_qos_queues) :
1832ab6dddd2SSubbaraya Sundeep 					  CINT_INVALID_CQ;
1833ab6dddd2SSubbaraya Sundeep 
183404a21ef3SSunil Goutham 		cq_poll->dev = (void *)pf;
18356e144b47SSuman Ghosh 		cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
18366e144b47SSuman Ghosh 		INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
1837b48b89f9SJakub Kicinski 		netif_napi_add(netdev, &cq_poll->napi, otx2_napi_handler);
183804a21ef3SSunil Goutham 		napi_enable(&cq_poll->napi);
183904a21ef3SSunil Goutham 	}
184004a21ef3SSunil Goutham 
184134bfe0ebSSunil Goutham 	/* Set maximum frame size allowed in HW */
184234bfe0ebSSunil Goutham 	err = otx2_hw_set_mtu(pf, netdev->mtu);
184334bfe0ebSSunil Goutham 	if (err)
184434bfe0ebSSunil Goutham 		goto err_disable_napi;
184534bfe0ebSSunil Goutham 
1846dc1a9bf2SSunil Goutham 	/* Setup segmentation algorithms, if failed, clear offload capability */
1847dc1a9bf2SSunil Goutham 	otx2_setup_segmentation(pf);
1848dc1a9bf2SSunil Goutham 
184985069e95SSunil Goutham 	/* Initialize RSS */
185085069e95SSunil Goutham 	err = otx2_rss_init(pf);
185185069e95SSunil Goutham 	if (err)
185285069e95SSunil Goutham 		goto err_disable_napi;
185385069e95SSunil Goutham 
18544ff7d148SGeetha sowjanya 	/* Register Queue IRQ handlers */
18554ff7d148SGeetha sowjanya 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
18564ff7d148SGeetha sowjanya 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
18574ff7d148SGeetha sowjanya 
18584ff7d148SGeetha sowjanya 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
18594ff7d148SGeetha sowjanya 
18604ff7d148SGeetha sowjanya 	err = request_irq(pci_irq_vector(pf->pdev, vec),
18614ff7d148SGeetha sowjanya 			  otx2_q_intr_handler, 0, irq_name, pf);
18624ff7d148SGeetha sowjanya 	if (err) {
18634ff7d148SGeetha sowjanya 		dev_err(pf->dev,
18644ff7d148SGeetha sowjanya 			"RVUPF%d: IRQ registration failed for QERR\n",
18654ff7d148SGeetha sowjanya 			rvu_get_pf(pf->pcifunc));
18664ff7d148SGeetha sowjanya 		goto err_disable_napi;
18674ff7d148SGeetha sowjanya 	}
18684ff7d148SGeetha sowjanya 
18694ff7d148SGeetha sowjanya 	/* Enable QINT IRQ */
18704ff7d148SGeetha sowjanya 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
18714ff7d148SGeetha sowjanya 
187204a21ef3SSunil Goutham 	/* Register CQ IRQ handlers */
187304a21ef3SSunil Goutham 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
187404a21ef3SSunil Goutham 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
187504a21ef3SSunil Goutham 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
187604a21ef3SSunil Goutham 
187704a21ef3SSunil Goutham 		snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
187804a21ef3SSunil Goutham 			 qidx);
187904a21ef3SSunil Goutham 
188004a21ef3SSunil Goutham 		err = request_irq(pci_irq_vector(pf->pdev, vec),
188104a21ef3SSunil Goutham 				  otx2_cq_intr_handler, 0, irq_name,
188204a21ef3SSunil Goutham 				  &qset->napi[qidx]);
188304a21ef3SSunil Goutham 		if (err) {
188404a21ef3SSunil Goutham 			dev_err(pf->dev,
188504a21ef3SSunil Goutham 				"RVUPF%d: IRQ registration failed for CQ%d\n",
188604a21ef3SSunil Goutham 				rvu_get_pf(pf->pcifunc), qidx);
188704a21ef3SSunil Goutham 			goto err_free_cints;
188804a21ef3SSunil Goutham 		}
188904a21ef3SSunil Goutham 		vec++;
189004a21ef3SSunil Goutham 
189104a21ef3SSunil Goutham 		otx2_config_irq_coalescing(pf, qidx);
189204a21ef3SSunil Goutham 
189304a21ef3SSunil Goutham 		/* Enable CQ IRQ */
189404a21ef3SSunil Goutham 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
189504a21ef3SSunil Goutham 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
189604a21ef3SSunil Goutham 	}
189704a21ef3SSunil Goutham 
189804a21ef3SSunil Goutham 	otx2_set_cints_affinity(pf);
189904a21ef3SSunil Goutham 
1900fd9d7859SHariprasad Kelam 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1901fd9d7859SHariprasad Kelam 		otx2_enable_rxvlan(pf, true);
1902fd9d7859SHariprasad Kelam 
1903c9c12d33SAleksey Makarov 	/* When reinitializing enable time stamping if it is enabled before */
1904c9c12d33SAleksey Makarov 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1905c9c12d33SAleksey Makarov 		pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1906c9c12d33SAleksey Makarov 		otx2_config_hw_tx_tstamp(pf, true);
1907c9c12d33SAleksey Makarov 	}
1908c9c12d33SAleksey Makarov 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1909c9c12d33SAleksey Makarov 		pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1910c9c12d33SAleksey Makarov 		otx2_config_hw_rx_tstamp(pf, true);
1911c9c12d33SAleksey Makarov 	}
1912c9c12d33SAleksey Makarov 
191350fe6c02SLinu Cherian 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
191450fe6c02SLinu Cherian 	/* 'intf_down' may be checked on any cpu */
191550fe6c02SLinu Cherian 	smp_wmb();
191650fe6c02SLinu Cherian 
19175e6808b4SNaveen Mamindlapalli 	/* Enable QoS configuration before starting tx queues */
19185e6808b4SNaveen Mamindlapalli 	otx2_qos_config_txschq(pf);
19195e6808b4SNaveen Mamindlapalli 
192050fe6c02SLinu Cherian 	/* we have already received link status notification */
192150fe6c02SLinu Cherian 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
192250fe6c02SLinu Cherian 		otx2_handle_link_event(pf);
192350fe6c02SLinu Cherian 
192479d2be38SHariprasad Kelam 	/* Install DMAC Filters */
192579d2be38SHariprasad Kelam 	if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
192679d2be38SHariprasad Kelam 		otx2_dmacflt_reinstall_flows(pf);
192779d2be38SHariprasad Kelam 
1928d3290f7eSSubbaraya Sundeep 	otx2_tc_apply_ingress_police_rules(pf);
1929d3290f7eSSubbaraya Sundeep 
193050fe6c02SLinu Cherian 	err = otx2_rxtx_enable(pf, true);
1931c9262522SSubbaraya Sundeep 	/* If a mbox communication error happens at this point then interface
1932c9262522SSubbaraya Sundeep 	 * will end up in a state such that it is in down state but hardware
1933c9262522SSubbaraya Sundeep 	 * mcam entries are enabled to receive the packets. Hence disable the
1934c9262522SSubbaraya Sundeep 	 * packet I/O.
1935c9262522SSubbaraya Sundeep 	 */
1936*43b69da2SSu Hui 	if (err == -EIO)
1937c9262522SSubbaraya Sundeep 		goto err_disable_rxtx;
1938c9262522SSubbaraya Sundeep 	else if (err)
19391ea0166dSHariprasad Kelam 		goto err_tx_stop_queues;
194050fe6c02SLinu Cherian 
1941ffd2f89aSRakesh Babu 	otx2_do_set_rx_mode(pf);
1942ffd2f89aSRakesh Babu 
1943caa2da34SSunil Goutham 	return 0;
194404a21ef3SSunil Goutham 
1945c9262522SSubbaraya Sundeep err_disable_rxtx:
1946c9262522SSubbaraya Sundeep 	otx2_rxtx_enable(pf, false);
19471ea0166dSHariprasad Kelam err_tx_stop_queues:
19481ea0166dSHariprasad Kelam 	netif_tx_stop_all_queues(netdev);
19491ea0166dSHariprasad Kelam 	netif_carrier_off(netdev);
195069f0aeb1SGeetha sowjanya 	pf->flags |= OTX2_FLAG_INTF_DOWN;
195104a21ef3SSunil Goutham err_free_cints:
195204a21ef3SSunil Goutham 	otx2_free_cints(pf, qidx);
19534ff7d148SGeetha sowjanya 	vec = pci_irq_vector(pf->pdev,
19544ff7d148SGeetha sowjanya 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
19554ff7d148SGeetha sowjanya 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
19564ff7d148SGeetha sowjanya 	free_irq(vec, pf);
195734bfe0ebSSunil Goutham err_disable_napi:
195804a21ef3SSunil Goutham 	otx2_disable_napi(pf);
195904a21ef3SSunil Goutham 	otx2_free_hw_resources(pf);
1960caa2da34SSunil Goutham err_free_mem:
1961caa2da34SSunil Goutham 	kfree(qset->sq);
1962caa2da34SSunil Goutham 	kfree(qset->cq);
1963d45d8979SChristina Jacob 	kfree(qset->rq);
196404a21ef3SSunil Goutham 	kfree(qset->napi);
1965caa2da34SSunil Goutham 	return err;
196616547577SSunil Goutham }
19673184fb5bSTomasz Duszynski EXPORT_SYMBOL(otx2_open);
196816547577SSunil Goutham 
otx2_stop(struct net_device * netdev)196934bfe0ebSSunil Goutham int otx2_stop(struct net_device *netdev)
197016547577SSunil Goutham {
1971caa2da34SSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
197204a21ef3SSunil Goutham 	struct otx2_cq_poll *cq_poll = NULL;
1973caa2da34SSunil Goutham 	struct otx2_qset *qset = &pf->qset;
1974f12098ceSGeetha sowjanya 	struct otx2_rss_info *rss;
19754ff7d148SGeetha sowjanya 	int qidx, vec, wrk;
197604a21ef3SSunil Goutham 
197769f0aeb1SGeetha sowjanya 	/* If the DOWN flag is set resources are already freed */
197869f0aeb1SGeetha sowjanya 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
197969f0aeb1SGeetha sowjanya 		return 0;
198069f0aeb1SGeetha sowjanya 
198104a21ef3SSunil Goutham 	netif_carrier_off(netdev);
198204a21ef3SSunil Goutham 	netif_tx_stop_all_queues(netdev);
198304a21ef3SSunil Goutham 
198450fe6c02SLinu Cherian 	pf->flags |= OTX2_FLAG_INTF_DOWN;
198550fe6c02SLinu Cherian 	/* 'intf_down' may be checked on any cpu */
198650fe6c02SLinu Cherian 	smp_wmb();
198750fe6c02SLinu Cherian 
198850fe6c02SLinu Cherian 	/* First stop packet Rx/Tx */
198950fe6c02SLinu Cherian 	otx2_rxtx_enable(pf, false);
199050fe6c02SLinu Cherian 
1991f12098ceSGeetha sowjanya 	/* Clear RSS enable flag */
1992f12098ceSGeetha sowjanya 	rss = &pf->hw.rss_info;
1993f12098ceSGeetha sowjanya 	rss->enable = false;
19948da4a34cSSuman Ghosh 	if (!netif_is_rxfh_configured(netdev))
19958da4a34cSSuman Ghosh 		kfree(rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
1996f12098ceSGeetha sowjanya 
19974ff7d148SGeetha sowjanya 	/* Cleanup Queue IRQ */
19984ff7d148SGeetha sowjanya 	vec = pci_irq_vector(pf->pdev,
19994ff7d148SGeetha sowjanya 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
20004ff7d148SGeetha sowjanya 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
20014ff7d148SGeetha sowjanya 	free_irq(vec, pf);
20024ff7d148SGeetha sowjanya 
200304a21ef3SSunil Goutham 	/* Cleanup CQ NAPI and IRQ */
200404a21ef3SSunil Goutham 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
200504a21ef3SSunil Goutham 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
200604a21ef3SSunil Goutham 		/* Disable interrupt */
200704a21ef3SSunil Goutham 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
200804a21ef3SSunil Goutham 
200904a21ef3SSunil Goutham 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
201004a21ef3SSunil Goutham 
201104a21ef3SSunil Goutham 		cq_poll = &qset->napi[qidx];
201204a21ef3SSunil Goutham 		napi_synchronize(&cq_poll->napi);
201304a21ef3SSunil Goutham 		vec++;
201404a21ef3SSunil Goutham 	}
201504a21ef3SSunil Goutham 
201604a21ef3SSunil Goutham 	netif_tx_disable(netdev);
2017caa2da34SSunil Goutham 
201888e69af0SRatheesh Kannoth 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
201988e69af0SRatheesh Kannoth 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
202088e69af0SRatheesh Kannoth 	devm_kfree(pf->dev, pf->refill_wrk);
202188e69af0SRatheesh Kannoth 
2022caa2da34SSunil Goutham 	otx2_free_hw_resources(pf);
202304a21ef3SSunil Goutham 	otx2_free_cints(pf, pf->hw.cint_cnt);
202404a21ef3SSunil Goutham 	otx2_disable_napi(pf);
202504a21ef3SSunil Goutham 
202604a21ef3SSunil Goutham 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
202704a21ef3SSunil Goutham 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
2028caa2da34SSunil Goutham 
20294ff7d148SGeetha sowjanya 
2030caa2da34SSunil Goutham 	kfree(qset->sq);
2031caa2da34SSunil Goutham 	kfree(qset->cq);
2032d45d8979SChristina Jacob 	kfree(qset->rq);
203304a21ef3SSunil Goutham 	kfree(qset->napi);
2034caa2da34SSunil Goutham 	/* Do not clear RQ/SQ ringsize settings */
203576e1e5dfSXiu Jianfeng 	memset_startat(qset, 0, sqe_cnt);
203616547577SSunil Goutham 	return 0;
203716547577SSunil Goutham }
20383184fb5bSTomasz Duszynski EXPORT_SYMBOL(otx2_stop);
203916547577SSunil Goutham 
otx2_xmit(struct sk_buff * skb,struct net_device * netdev)20403ca6c4c8SSunil Goutham static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
20413ca6c4c8SSunil Goutham {
20423ca6c4c8SSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
20433ca6c4c8SSunil Goutham 	int qidx = skb_get_queue_mapping(skb);
20443ca6c4c8SSunil Goutham 	struct otx2_snd_queue *sq;
20453ca6c4c8SSunil Goutham 	struct netdev_queue *txq;
2046ab6dddd2SSubbaraya Sundeep 	int sq_idx;
2047ab6dddd2SSubbaraya Sundeep 
2048ab6dddd2SSubbaraya Sundeep 	/* XDP SQs are not mapped with TXQs
2049ab6dddd2SSubbaraya Sundeep 	 * advance qid to derive correct sq mapped with QOS
2050ab6dddd2SSubbaraya Sundeep 	 */
2051ab6dddd2SSubbaraya Sundeep 	sq_idx = (qidx >= pf->hw.tx_queues) ? (qidx + pf->hw.xdp_queues) : qidx;
20523ca6c4c8SSunil Goutham 
205386d74760SSunil Goutham 	/* Check for minimum and maximum packet length */
205486d74760SSunil Goutham 	if (skb->len <= ETH_HLEN ||
20550182d078SSubbaraya Sundeep 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
20563ca6c4c8SSunil Goutham 		dev_kfree_skb(skb);
20573ca6c4c8SSunil Goutham 		return NETDEV_TX_OK;
20583ca6c4c8SSunil Goutham 	}
20593ca6c4c8SSunil Goutham 
2060ab6dddd2SSubbaraya Sundeep 	sq = &pf->qset.sq[sq_idx];
20613ca6c4c8SSunil Goutham 	txq = netdev_get_tx_queue(netdev, qidx);
20623ca6c4c8SSunil Goutham 
20633ca6c4c8SSunil Goutham 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
20643ca6c4c8SSunil Goutham 		netif_tx_stop_queue(txq);
20653ca6c4c8SSunil Goutham 
20663ca6c4c8SSunil Goutham 		/* Check again, incase SQBs got freed up */
20673ca6c4c8SSunil Goutham 		smp_mb();
20683ca6c4c8SSunil Goutham 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
20693ca6c4c8SSunil Goutham 							> sq->sqe_thresh)
20703ca6c4c8SSunil Goutham 			netif_tx_wake_queue(txq);
20713ca6c4c8SSunil Goutham 
20723ca6c4c8SSunil Goutham 		return NETDEV_TX_BUSY;
20733ca6c4c8SSunil Goutham 	}
20743ca6c4c8SSunil Goutham 
20753ca6c4c8SSunil Goutham 	return NETDEV_TX_OK;
20763ca6c4c8SSunil Goutham }
20773ca6c4c8SSunil Goutham 
otx2_qos_select_htb_queue(struct otx2_nic * pf,struct sk_buff * skb,u16 htb_maj_id)20785e6808b4SNaveen Mamindlapalli static int otx2_qos_select_htb_queue(struct otx2_nic *pf, struct sk_buff *skb,
20795e6808b4SNaveen Mamindlapalli 				     u16 htb_maj_id)
20805e6808b4SNaveen Mamindlapalli {
20815e6808b4SNaveen Mamindlapalli 	u16 classid;
20825e6808b4SNaveen Mamindlapalli 
20835e6808b4SNaveen Mamindlapalli 	if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
20845e6808b4SNaveen Mamindlapalli 		classid = TC_H_MIN(skb->priority);
20855e6808b4SNaveen Mamindlapalli 	else
20865e6808b4SNaveen Mamindlapalli 		classid = READ_ONCE(pf->qos.defcls);
20875e6808b4SNaveen Mamindlapalli 
20885e6808b4SNaveen Mamindlapalli 	if (!classid)
20895e6808b4SNaveen Mamindlapalli 		return 0;
20905e6808b4SNaveen Mamindlapalli 
20915e6808b4SNaveen Mamindlapalli 	return otx2_get_txq_by_classid(pf, classid);
20925e6808b4SNaveen Mamindlapalli }
20935e6808b4SNaveen Mamindlapalli 
otx2_select_queue(struct net_device * netdev,struct sk_buff * skb,struct net_device * sb_dev)2094ab6dddd2SSubbaraya Sundeep u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb,
209599c969a8SSuman Ghosh 		      struct net_device *sb_dev)
209699c969a8SSuman Ghosh {
209754b9a2bbSRen Zhijie 	struct otx2_nic *pf = netdev_priv(netdev);
20985e6808b4SNaveen Mamindlapalli 	bool qos_enabled;
20995e6808b4SNaveen Mamindlapalli #ifdef CONFIG_DCB
210099c969a8SSuman Ghosh 	u8 vlan_prio;
210199c969a8SSuman Ghosh #endif
21025e6808b4SNaveen Mamindlapalli 	int txq;
210399c969a8SSuman Ghosh 
2104c7606d49SRuan Jinjie 	qos_enabled = netdev->real_num_tx_queues > pf->hw.tx_queues;
21055e6808b4SNaveen Mamindlapalli 	if (unlikely(qos_enabled)) {
21065e6808b4SNaveen Mamindlapalli 		/* This smp_load_acquire() pairs with smp_store_release() in
21075e6808b4SNaveen Mamindlapalli 		 * otx2_qos_root_add() called from htb offload root creation
21085e6808b4SNaveen Mamindlapalli 		 */
21095e6808b4SNaveen Mamindlapalli 		u16 htb_maj_id = smp_load_acquire(&pf->qos.maj_id);
21105e6808b4SNaveen Mamindlapalli 
21115e6808b4SNaveen Mamindlapalli 		if (unlikely(htb_maj_id)) {
21125e6808b4SNaveen Mamindlapalli 			txq = otx2_qos_select_htb_queue(pf, skb, htb_maj_id);
21135e6808b4SNaveen Mamindlapalli 			if (txq > 0)
21145e6808b4SNaveen Mamindlapalli 				return txq;
21155e6808b4SNaveen Mamindlapalli 			goto process_pfc;
21165e6808b4SNaveen Mamindlapalli 		}
21175e6808b4SNaveen Mamindlapalli 	}
21185e6808b4SNaveen Mamindlapalli 
21195e6808b4SNaveen Mamindlapalli process_pfc:
212099c969a8SSuman Ghosh #ifdef CONFIG_DCB
2121354259faSEric Dumazet 	if (!skb_vlan_tag_present(skb))
212299c969a8SSuman Ghosh 		goto pick_tx;
212399c969a8SSuman Ghosh 
212499c969a8SSuman Ghosh 	vlan_prio = skb->vlan_tci >> 13;
212599c969a8SSuman Ghosh 	if ((vlan_prio > pf->hw.tx_queues - 1) ||
212699c969a8SSuman Ghosh 	    !pf->pfc_alloc_status[vlan_prio])
212799c969a8SSuman Ghosh 		goto pick_tx;
212899c969a8SSuman Ghosh 
212999c969a8SSuman Ghosh 	return vlan_prio;
213099c969a8SSuman Ghosh 
213199c969a8SSuman Ghosh pick_tx:
213299c969a8SSuman Ghosh #endif
21335e6808b4SNaveen Mamindlapalli 	txq = netdev_pick_tx(netdev, skb, NULL);
21345e6808b4SNaveen Mamindlapalli 	if (unlikely(qos_enabled))
21355e6808b4SNaveen Mamindlapalli 		return txq % pf->hw.tx_queues;
21365e6808b4SNaveen Mamindlapalli 
21375e6808b4SNaveen Mamindlapalli 	return txq;
213899c969a8SSuman Ghosh }
2139ab6dddd2SSubbaraya Sundeep EXPORT_SYMBOL(otx2_select_queue);
214099c969a8SSuman Ghosh 
otx2_fix_features(struct net_device * dev,netdev_features_t features)21411d4d9e42SNaveen Mamindlapalli static netdev_features_t otx2_fix_features(struct net_device *dev,
21421d4d9e42SNaveen Mamindlapalli 					   netdev_features_t features)
21431d4d9e42SNaveen Mamindlapalli {
21442e2a8126SSunil Goutham 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
21452e2a8126SSunil Goutham 		features |= NETIF_F_HW_VLAN_STAG_RX;
21462e2a8126SSunil Goutham 	else
21472e2a8126SSunil Goutham 		features &= ~NETIF_F_HW_VLAN_STAG_RX;
21481d4d9e42SNaveen Mamindlapalli 
21491d4d9e42SNaveen Mamindlapalli 	return features;
21501d4d9e42SNaveen Mamindlapalli }
21511d4d9e42SNaveen Mamindlapalli 
otx2_set_rx_mode(struct net_device * netdev)215234bfe0ebSSunil Goutham static void otx2_set_rx_mode(struct net_device *netdev)
215334bfe0ebSSunil Goutham {
215434bfe0ebSSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
2155e99b7c84SSunil Goutham 
2156e99b7c84SSunil Goutham 	queue_work(pf->otx2_wq, &pf->rx_mode_work);
2157e99b7c84SSunil Goutham }
2158e99b7c84SSunil Goutham 
otx2_rx_mode_wrk_handler(struct work_struct * work)2159ffd2f89aSRakesh Babu static void otx2_rx_mode_wrk_handler(struct work_struct *work)
2160e99b7c84SSunil Goutham {
2161e99b7c84SSunil Goutham 	struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
216234bfe0ebSSunil Goutham 
2163ffd2f89aSRakesh Babu 	otx2_do_set_rx_mode(pf);
216434bfe0ebSSunil Goutham }
216534bfe0ebSSunil Goutham 
otx2_set_features(struct net_device * netdev,netdev_features_t features)216634bfe0ebSSunil Goutham static int otx2_set_features(struct net_device *netdev,
216734bfe0ebSSunil Goutham 			     netdev_features_t features)
216834bfe0ebSSunil Goutham {
216934bfe0ebSSunil Goutham 	netdev_features_t changed = features ^ netdev->features;
217034bfe0ebSSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
217134bfe0ebSSunil Goutham 
217234bfe0ebSSunil Goutham 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
217334bfe0ebSSunil Goutham 		return otx2_cgx_config_loopback(pf,
217434bfe0ebSSunil Goutham 						features & NETIF_F_LOOPBACK);
2175f0a1913fSSubbaraya Sundeep 
2176fd9d7859SHariprasad Kelam 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
2177fd9d7859SHariprasad Kelam 		return otx2_enable_rxvlan(pf,
2178fd9d7859SHariprasad Kelam 					  features & NETIF_F_HW_VLAN_CTAG_RX);
2179fd9d7859SHariprasad Kelam 
21804b0385bcSSubbaraya Sundeep 	return otx2_handle_ntuple_tc_features(netdev, features);
218134bfe0ebSSunil Goutham }
218234bfe0ebSSunil Goutham 
otx2_reset_task(struct work_struct * work)21834ff7d148SGeetha sowjanya static void otx2_reset_task(struct work_struct *work)
21844ff7d148SGeetha sowjanya {
21854ff7d148SGeetha sowjanya 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
21864ff7d148SGeetha sowjanya 
21874ff7d148SGeetha sowjanya 	if (!netif_running(pf->netdev))
21884ff7d148SGeetha sowjanya 		return;
21894ff7d148SGeetha sowjanya 
2190948a6633SSubbaraya Sundeep 	rtnl_lock();
21914ff7d148SGeetha sowjanya 	otx2_stop(pf->netdev);
21924ff7d148SGeetha sowjanya 	pf->reset_count++;
21934ff7d148SGeetha sowjanya 	otx2_open(pf->netdev);
21944ff7d148SGeetha sowjanya 	netif_trans_update(pf->netdev);
2195948a6633SSubbaraya Sundeep 	rtnl_unlock();
21964ff7d148SGeetha sowjanya }
21974ff7d148SGeetha sowjanya 
otx2_config_hw_rx_tstamp(struct otx2_nic * pfvf,bool enable)2198c9c12d33SAleksey Makarov static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
2199c9c12d33SAleksey Makarov {
2200c9c12d33SAleksey Makarov 	struct msg_req *req;
2201c9c12d33SAleksey Makarov 	int err;
2202c9c12d33SAleksey Makarov 
2203c9c12d33SAleksey Makarov 	if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
2204c9c12d33SAleksey Makarov 		return 0;
2205c9c12d33SAleksey Makarov 
2206c9c12d33SAleksey Makarov 	mutex_lock(&pfvf->mbox.lock);
2207c9c12d33SAleksey Makarov 	if (enable)
2208c9c12d33SAleksey Makarov 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
2209c9c12d33SAleksey Makarov 	else
2210c9c12d33SAleksey Makarov 		req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
2211c9c12d33SAleksey Makarov 	if (!req) {
2212c9c12d33SAleksey Makarov 		mutex_unlock(&pfvf->mbox.lock);
2213c9c12d33SAleksey Makarov 		return -ENOMEM;
2214c9c12d33SAleksey Makarov 	}
2215c9c12d33SAleksey Makarov 
2216c9c12d33SAleksey Makarov 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2217c9c12d33SAleksey Makarov 	if (err) {
2218c9c12d33SAleksey Makarov 		mutex_unlock(&pfvf->mbox.lock);
2219c9c12d33SAleksey Makarov 		return err;
2220c9c12d33SAleksey Makarov 	}
2221c9c12d33SAleksey Makarov 
2222c9c12d33SAleksey Makarov 	mutex_unlock(&pfvf->mbox.lock);
2223c9c12d33SAleksey Makarov 	if (enable)
2224c9c12d33SAleksey Makarov 		pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
2225c9c12d33SAleksey Makarov 	else
2226c9c12d33SAleksey Makarov 		pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
2227c9c12d33SAleksey Makarov 	return 0;
2228c9c12d33SAleksey Makarov }
2229c9c12d33SAleksey Makarov 
otx2_config_hw_tx_tstamp(struct otx2_nic * pfvf,bool enable)2230c9c12d33SAleksey Makarov static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
2231c9c12d33SAleksey Makarov {
2232c9c12d33SAleksey Makarov 	struct msg_req *req;
2233c9c12d33SAleksey Makarov 	int err;
2234c9c12d33SAleksey Makarov 
2235c9c12d33SAleksey Makarov 	if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
2236c9c12d33SAleksey Makarov 		return 0;
2237c9c12d33SAleksey Makarov 
2238c9c12d33SAleksey Makarov 	mutex_lock(&pfvf->mbox.lock);
2239c9c12d33SAleksey Makarov 	if (enable)
2240c9c12d33SAleksey Makarov 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
2241c9c12d33SAleksey Makarov 	else
2242c9c12d33SAleksey Makarov 		req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
2243c9c12d33SAleksey Makarov 	if (!req) {
2244c9c12d33SAleksey Makarov 		mutex_unlock(&pfvf->mbox.lock);
2245c9c12d33SAleksey Makarov 		return -ENOMEM;
2246c9c12d33SAleksey Makarov 	}
2247c9c12d33SAleksey Makarov 
2248c9c12d33SAleksey Makarov 	err = otx2_sync_mbox_msg(&pfvf->mbox);
2249c9c12d33SAleksey Makarov 	if (err) {
2250c9c12d33SAleksey Makarov 		mutex_unlock(&pfvf->mbox.lock);
2251c9c12d33SAleksey Makarov 		return err;
2252c9c12d33SAleksey Makarov 	}
2253c9c12d33SAleksey Makarov 
2254c9c12d33SAleksey Makarov 	mutex_unlock(&pfvf->mbox.lock);
2255c9c12d33SAleksey Makarov 	if (enable)
2256c9c12d33SAleksey Makarov 		pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
2257c9c12d33SAleksey Makarov 	else
2258c9c12d33SAleksey Makarov 		pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
2259c9c12d33SAleksey Makarov 	return 0;
2260c9c12d33SAleksey Makarov }
2261c9c12d33SAleksey Makarov 
otx2_config_hwtstamp(struct net_device * netdev,struct ifreq * ifr)226243510ef4SNaveen Mamindlapalli int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
2263c9c12d33SAleksey Makarov {
2264c9c12d33SAleksey Makarov 	struct otx2_nic *pfvf = netdev_priv(netdev);
2265c9c12d33SAleksey Makarov 	struct hwtstamp_config config;
2266c9c12d33SAleksey Makarov 
2267c9c12d33SAleksey Makarov 	if (!pfvf->ptp)
2268c9c12d33SAleksey Makarov 		return -ENODEV;
2269c9c12d33SAleksey Makarov 
2270c9c12d33SAleksey Makarov 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
2271c9c12d33SAleksey Makarov 		return -EFAULT;
2272c9c12d33SAleksey Makarov 
2273c9c12d33SAleksey Makarov 	switch (config.tx_type) {
2274c9c12d33SAleksey Makarov 	case HWTSTAMP_TX_OFF:
22752958d17aSHariprasad Kelam 		if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
22762958d17aSHariprasad Kelam 			pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
22772958d17aSHariprasad Kelam 
22782958d17aSHariprasad Kelam 		cancel_delayed_work(&pfvf->ptp->synctstamp_work);
2279c9c12d33SAleksey Makarov 		otx2_config_hw_tx_tstamp(pfvf, false);
2280c9c12d33SAleksey Makarov 		break;
22812958d17aSHariprasad Kelam 	case HWTSTAMP_TX_ONESTEP_SYNC:
22822958d17aSHariprasad Kelam 		if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
22832958d17aSHariprasad Kelam 			return -ERANGE;
22842958d17aSHariprasad Kelam 		pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
22852958d17aSHariprasad Kelam 		schedule_delayed_work(&pfvf->ptp->synctstamp_work,
22862958d17aSHariprasad Kelam 				      msecs_to_jiffies(500));
22872958d17aSHariprasad Kelam 		fallthrough;
2288c9c12d33SAleksey Makarov 	case HWTSTAMP_TX_ON:
2289c9c12d33SAleksey Makarov 		otx2_config_hw_tx_tstamp(pfvf, true);
2290c9c12d33SAleksey Makarov 		break;
2291c9c12d33SAleksey Makarov 	default:
2292c9c12d33SAleksey Makarov 		return -ERANGE;
2293c9c12d33SAleksey Makarov 	}
2294c9c12d33SAleksey Makarov 
2295c9c12d33SAleksey Makarov 	switch (config.rx_filter) {
2296c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_NONE:
2297c9c12d33SAleksey Makarov 		otx2_config_hw_rx_tstamp(pfvf, false);
2298c9c12d33SAleksey Makarov 		break;
2299c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_ALL:
2300c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_SOME:
2301c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2302c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2303c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2304c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2305c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2306c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2307c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2308c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2309c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2310c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
2311c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
2312c9c12d33SAleksey Makarov 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2313c9c12d33SAleksey Makarov 		otx2_config_hw_rx_tstamp(pfvf, true);
2314c9c12d33SAleksey Makarov 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2315c9c12d33SAleksey Makarov 		break;
2316c9c12d33SAleksey Makarov 	default:
2317c9c12d33SAleksey Makarov 		return -ERANGE;
2318c9c12d33SAleksey Makarov 	}
2319c9c12d33SAleksey Makarov 
2320c9c12d33SAleksey Makarov 	memcpy(&pfvf->tstamp, &config, sizeof(config));
2321c9c12d33SAleksey Makarov 
2322c9c12d33SAleksey Makarov 	return copy_to_user(ifr->ifr_data, &config,
2323c9c12d33SAleksey Makarov 			    sizeof(config)) ? -EFAULT : 0;
2324c9c12d33SAleksey Makarov }
232543510ef4SNaveen Mamindlapalli EXPORT_SYMBOL(otx2_config_hwtstamp);
2326c9c12d33SAleksey Makarov 
otx2_ioctl(struct net_device * netdev,struct ifreq * req,int cmd)232743510ef4SNaveen Mamindlapalli int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2328c9c12d33SAleksey Makarov {
2329c9c12d33SAleksey Makarov 	struct otx2_nic *pfvf = netdev_priv(netdev);
2330c9c12d33SAleksey Makarov 	struct hwtstamp_config *cfg = &pfvf->tstamp;
2331c9c12d33SAleksey Makarov 
2332c9c12d33SAleksey Makarov 	switch (cmd) {
2333c9c12d33SAleksey Makarov 	case SIOCSHWTSTAMP:
2334c9c12d33SAleksey Makarov 		return otx2_config_hwtstamp(netdev, req);
2335c9c12d33SAleksey Makarov 	case SIOCGHWTSTAMP:
2336c9c12d33SAleksey Makarov 		return copy_to_user(req->ifr_data, cfg,
2337c9c12d33SAleksey Makarov 				    sizeof(*cfg)) ? -EFAULT : 0;
2338c9c12d33SAleksey Makarov 	default:
2339c9c12d33SAleksey Makarov 		return -EOPNOTSUPP;
2340c9c12d33SAleksey Makarov 	}
2341c9c12d33SAleksey Makarov }
234243510ef4SNaveen Mamindlapalli EXPORT_SYMBOL(otx2_ioctl);
2343c9c12d33SAleksey Makarov 
otx2_do_set_vf_mac(struct otx2_nic * pf,int vf,const u8 * mac)2344f0c2982aSNaveen Mamindlapalli static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2345f0c2982aSNaveen Mamindlapalli {
2346f0c2982aSNaveen Mamindlapalli 	struct npc_install_flow_req *req;
2347f0c2982aSNaveen Mamindlapalli 	int err;
2348f0c2982aSNaveen Mamindlapalli 
2349f0c2982aSNaveen Mamindlapalli 	mutex_lock(&pf->mbox.lock);
2350f0c2982aSNaveen Mamindlapalli 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2351f0c2982aSNaveen Mamindlapalli 	if (!req) {
2352f0c2982aSNaveen Mamindlapalli 		err = -ENOMEM;
2353f0c2982aSNaveen Mamindlapalli 		goto out;
2354f0c2982aSNaveen Mamindlapalli 	}
2355f0c2982aSNaveen Mamindlapalli 
2356f0c2982aSNaveen Mamindlapalli 	ether_addr_copy(req->packet.dmac, mac);
2357f0c2982aSNaveen Mamindlapalli 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2358f0c2982aSNaveen Mamindlapalli 	req->features = BIT_ULL(NPC_DMAC);
2359f0c2982aSNaveen Mamindlapalli 	req->channel = pf->hw.rx_chan_base;
2360f0c2982aSNaveen Mamindlapalli 	req->intf = NIX_INTF_RX;
2361f0c2982aSNaveen Mamindlapalli 	req->default_rule = 1;
2362f0c2982aSNaveen Mamindlapalli 	req->append = 1;
2363f0c2982aSNaveen Mamindlapalli 	req->vf = vf + 1;
2364f0c2982aSNaveen Mamindlapalli 	req->op = NIX_RX_ACTION_DEFAULT;
2365f0c2982aSNaveen Mamindlapalli 
2366f0c2982aSNaveen Mamindlapalli 	err = otx2_sync_mbox_msg(&pf->mbox);
2367f0c2982aSNaveen Mamindlapalli out:
2368f0c2982aSNaveen Mamindlapalli 	mutex_unlock(&pf->mbox.lock);
2369f0c2982aSNaveen Mamindlapalli 	return err;
2370f0c2982aSNaveen Mamindlapalli }
2371f0c2982aSNaveen Mamindlapalli 
otx2_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)2372f0c2982aSNaveen Mamindlapalli static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2373f0c2982aSNaveen Mamindlapalli {
2374f0c2982aSNaveen Mamindlapalli 	struct otx2_nic *pf = netdev_priv(netdev);
2375f0c2982aSNaveen Mamindlapalli 	struct pci_dev *pdev = pf->pdev;
2376f0c2982aSNaveen Mamindlapalli 	struct otx2_vf_config *config;
2377f0c2982aSNaveen Mamindlapalli 	int ret;
2378f0c2982aSNaveen Mamindlapalli 
2379f0c2982aSNaveen Mamindlapalli 	if (!netif_running(netdev))
2380f0c2982aSNaveen Mamindlapalli 		return -EAGAIN;
2381f0c2982aSNaveen Mamindlapalli 
2382b1dc2040SHariprasad Kelam 	if (vf >= pf->total_vfs)
2383f0c2982aSNaveen Mamindlapalli 		return -EINVAL;
2384f0c2982aSNaveen Mamindlapalli 
2385f0c2982aSNaveen Mamindlapalli 	if (!is_valid_ether_addr(mac))
2386f0c2982aSNaveen Mamindlapalli 		return -EINVAL;
2387f0c2982aSNaveen Mamindlapalli 
2388f0c2982aSNaveen Mamindlapalli 	config = &pf->vf_configs[vf];
2389f0c2982aSNaveen Mamindlapalli 	ether_addr_copy(config->mac, mac);
2390f0c2982aSNaveen Mamindlapalli 
2391f0c2982aSNaveen Mamindlapalli 	ret = otx2_do_set_vf_mac(pf, vf, mac);
2392f0c2982aSNaveen Mamindlapalli 	if (ret == 0)
2393b1dc2040SHariprasad Kelam 		dev_info(&pdev->dev,
2394b1dc2040SHariprasad Kelam 			 "Load/Reload VF driver\n");
2395f0c2982aSNaveen Mamindlapalli 
2396f0c2982aSNaveen Mamindlapalli 	return ret;
2397f0c2982aSNaveen Mamindlapalli }
2398f0c2982aSNaveen Mamindlapalli 
otx2_do_set_vf_vlan(struct otx2_nic * pf,int vf,u16 vlan,u8 qos,__be16 proto)2399f0c2982aSNaveen Mamindlapalli static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2400f0c2982aSNaveen Mamindlapalli 			       __be16 proto)
2401f0c2982aSNaveen Mamindlapalli {
2402f0c2982aSNaveen Mamindlapalli 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2403f0c2982aSNaveen Mamindlapalli 	struct nix_vtag_config_rsp *vtag_rsp;
2404f0c2982aSNaveen Mamindlapalli 	struct npc_delete_flow_req *del_req;
2405f0c2982aSNaveen Mamindlapalli 	struct nix_vtag_config *vtag_req;
2406f0c2982aSNaveen Mamindlapalli 	struct npc_install_flow_req *req;
2407f0c2982aSNaveen Mamindlapalli 	struct otx2_vf_config *config;
2408f0c2982aSNaveen Mamindlapalli 	int err = 0;
2409f0c2982aSNaveen Mamindlapalli 	u32 idx;
2410f0c2982aSNaveen Mamindlapalli 
2411f0c2982aSNaveen Mamindlapalli 	config = &pf->vf_configs[vf];
2412f0c2982aSNaveen Mamindlapalli 
2413f0c2982aSNaveen Mamindlapalli 	if (!vlan && !config->vlan)
2414f0c2982aSNaveen Mamindlapalli 		goto out;
2415f0c2982aSNaveen Mamindlapalli 
2416f0c2982aSNaveen Mamindlapalli 	mutex_lock(&pf->mbox.lock);
2417f0c2982aSNaveen Mamindlapalli 
2418f0c2982aSNaveen Mamindlapalli 	/* free old tx vtag entry */
2419f0c2982aSNaveen Mamindlapalli 	if (config->vlan) {
2420f0c2982aSNaveen Mamindlapalli 		vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2421f0c2982aSNaveen Mamindlapalli 		if (!vtag_req) {
2422f0c2982aSNaveen Mamindlapalli 			err = -ENOMEM;
2423f0c2982aSNaveen Mamindlapalli 			goto out;
2424f0c2982aSNaveen Mamindlapalli 		}
2425f0c2982aSNaveen Mamindlapalli 		vtag_req->cfg_type = 0;
2426f0c2982aSNaveen Mamindlapalli 		vtag_req->tx.free_vtag0 = 1;
2427f0c2982aSNaveen Mamindlapalli 		vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2428f0c2982aSNaveen Mamindlapalli 
2429f0c2982aSNaveen Mamindlapalli 		err = otx2_sync_mbox_msg(&pf->mbox);
2430f0c2982aSNaveen Mamindlapalli 		if (err)
2431f0c2982aSNaveen Mamindlapalli 			goto out;
2432f0c2982aSNaveen Mamindlapalli 	}
2433f0c2982aSNaveen Mamindlapalli 
2434f0c2982aSNaveen Mamindlapalli 	if (!vlan && config->vlan) {
2435f0c2982aSNaveen Mamindlapalli 		/* rx */
2436f0c2982aSNaveen Mamindlapalli 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2437f0c2982aSNaveen Mamindlapalli 		if (!del_req) {
2438f0c2982aSNaveen Mamindlapalli 			err = -ENOMEM;
2439f0c2982aSNaveen Mamindlapalli 			goto out;
2440f0c2982aSNaveen Mamindlapalli 		}
2441f0c2982aSNaveen Mamindlapalli 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2442f0c2982aSNaveen Mamindlapalli 		del_req->entry =
24439917060fSSunil Goutham 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2444f0c2982aSNaveen Mamindlapalli 		err = otx2_sync_mbox_msg(&pf->mbox);
2445f0c2982aSNaveen Mamindlapalli 		if (err)
2446f0c2982aSNaveen Mamindlapalli 			goto out;
2447f0c2982aSNaveen Mamindlapalli 
2448f0c2982aSNaveen Mamindlapalli 		/* tx */
2449f0c2982aSNaveen Mamindlapalli 		del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2450f0c2982aSNaveen Mamindlapalli 		if (!del_req) {
2451f0c2982aSNaveen Mamindlapalli 			err = -ENOMEM;
2452f0c2982aSNaveen Mamindlapalli 			goto out;
2453f0c2982aSNaveen Mamindlapalli 		}
2454f0c2982aSNaveen Mamindlapalli 		idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2455f0c2982aSNaveen Mamindlapalli 		del_req->entry =
24569917060fSSunil Goutham 			flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2457f0c2982aSNaveen Mamindlapalli 		err = otx2_sync_mbox_msg(&pf->mbox);
2458f0c2982aSNaveen Mamindlapalli 
2459f0c2982aSNaveen Mamindlapalli 		goto out;
2460f0c2982aSNaveen Mamindlapalli 	}
2461f0c2982aSNaveen Mamindlapalli 
2462f0c2982aSNaveen Mamindlapalli 	/* rx */
2463f0c2982aSNaveen Mamindlapalli 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2464f0c2982aSNaveen Mamindlapalli 	if (!req) {
2465f0c2982aSNaveen Mamindlapalli 		err = -ENOMEM;
2466f0c2982aSNaveen Mamindlapalli 		goto out;
2467f0c2982aSNaveen Mamindlapalli 	}
2468f0c2982aSNaveen Mamindlapalli 
2469f0c2982aSNaveen Mamindlapalli 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
24709917060fSSunil Goutham 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2471f0c2982aSNaveen Mamindlapalli 	req->packet.vlan_tci = htons(vlan);
2472f0c2982aSNaveen Mamindlapalli 	req->mask.vlan_tci = htons(VLAN_VID_MASK);
2473f0c2982aSNaveen Mamindlapalli 	/* af fills the destination mac addr */
2474f0c2982aSNaveen Mamindlapalli 	eth_broadcast_addr((u8 *)&req->mask.dmac);
2475f0c2982aSNaveen Mamindlapalli 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2476f0c2982aSNaveen Mamindlapalli 	req->channel = pf->hw.rx_chan_base;
2477f0c2982aSNaveen Mamindlapalli 	req->intf = NIX_INTF_RX;
2478f0c2982aSNaveen Mamindlapalli 	req->vf = vf + 1;
2479f0c2982aSNaveen Mamindlapalli 	req->op = NIX_RX_ACTION_DEFAULT;
2480f0c2982aSNaveen Mamindlapalli 	req->vtag0_valid = true;
2481f0c2982aSNaveen Mamindlapalli 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2482f0c2982aSNaveen Mamindlapalli 	req->set_cntr = 1;
2483f0c2982aSNaveen Mamindlapalli 
2484f0c2982aSNaveen Mamindlapalli 	err = otx2_sync_mbox_msg(&pf->mbox);
2485f0c2982aSNaveen Mamindlapalli 	if (err)
2486f0c2982aSNaveen Mamindlapalli 		goto out;
2487f0c2982aSNaveen Mamindlapalli 
2488f0c2982aSNaveen Mamindlapalli 	/* tx */
2489f0c2982aSNaveen Mamindlapalli 	vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2490f0c2982aSNaveen Mamindlapalli 	if (!vtag_req) {
2491f0c2982aSNaveen Mamindlapalli 		err = -ENOMEM;
2492f0c2982aSNaveen Mamindlapalli 		goto out;
2493f0c2982aSNaveen Mamindlapalli 	}
2494f0c2982aSNaveen Mamindlapalli 
2495f0c2982aSNaveen Mamindlapalli 	/* configure tx vtag params */
2496f0c2982aSNaveen Mamindlapalli 	vtag_req->vtag_size = VTAGSIZE_T4;
2497f0c2982aSNaveen Mamindlapalli 	vtag_req->cfg_type = 0; /* tx vlan cfg */
2498f0c2982aSNaveen Mamindlapalli 	vtag_req->tx.cfg_vtag0 = 1;
2499583b273dSColin Ian King 	vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2500f0c2982aSNaveen Mamindlapalli 
2501f0c2982aSNaveen Mamindlapalli 	err = otx2_sync_mbox_msg(&pf->mbox);
2502f0c2982aSNaveen Mamindlapalli 	if (err)
2503f0c2982aSNaveen Mamindlapalli 		goto out;
2504f0c2982aSNaveen Mamindlapalli 
2505f0c2982aSNaveen Mamindlapalli 	vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2506f0c2982aSNaveen Mamindlapalli 			(&pf->mbox.mbox, 0, &vtag_req->hdr);
2507f0c2982aSNaveen Mamindlapalli 	if (IS_ERR(vtag_rsp)) {
2508f0c2982aSNaveen Mamindlapalli 		err = PTR_ERR(vtag_rsp);
2509f0c2982aSNaveen Mamindlapalli 		goto out;
2510f0c2982aSNaveen Mamindlapalli 	}
2511f0c2982aSNaveen Mamindlapalli 	config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2512f0c2982aSNaveen Mamindlapalli 
2513f0c2982aSNaveen Mamindlapalli 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2514f0c2982aSNaveen Mamindlapalli 	if (!req) {
2515f0c2982aSNaveen Mamindlapalli 		err = -ENOMEM;
2516f0c2982aSNaveen Mamindlapalli 		goto out;
2517f0c2982aSNaveen Mamindlapalli 	}
2518f0c2982aSNaveen Mamindlapalli 
2519f0c2982aSNaveen Mamindlapalli 	eth_zero_addr((u8 *)&req->mask.dmac);
2520f0c2982aSNaveen Mamindlapalli 	idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
25219917060fSSunil Goutham 	req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2522f0c2982aSNaveen Mamindlapalli 	req->features = BIT_ULL(NPC_DMAC);
2523f0c2982aSNaveen Mamindlapalli 	req->channel = pf->hw.tx_chan_base;
2524f0c2982aSNaveen Mamindlapalli 	req->intf = NIX_INTF_TX;
2525f0c2982aSNaveen Mamindlapalli 	req->vf = vf + 1;
2526f0c2982aSNaveen Mamindlapalli 	req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2527f0c2982aSNaveen Mamindlapalli 	req->vtag0_def = vtag_rsp->vtag0_idx;
2528f0c2982aSNaveen Mamindlapalli 	req->vtag0_op = VTAG_INSERT;
2529f0c2982aSNaveen Mamindlapalli 	req->set_cntr = 1;
2530f0c2982aSNaveen Mamindlapalli 
2531f0c2982aSNaveen Mamindlapalli 	err = otx2_sync_mbox_msg(&pf->mbox);
2532f0c2982aSNaveen Mamindlapalli out:
2533f0c2982aSNaveen Mamindlapalli 	config->vlan = vlan;
2534f0c2982aSNaveen Mamindlapalli 	mutex_unlock(&pf->mbox.lock);
2535f0c2982aSNaveen Mamindlapalli 	return err;
2536f0c2982aSNaveen Mamindlapalli }
2537f0c2982aSNaveen Mamindlapalli 
otx2_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)2538f0c2982aSNaveen Mamindlapalli static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2539f0c2982aSNaveen Mamindlapalli 			    __be16 proto)
2540f0c2982aSNaveen Mamindlapalli {
2541f0c2982aSNaveen Mamindlapalli 	struct otx2_nic *pf = netdev_priv(netdev);
2542f0c2982aSNaveen Mamindlapalli 	struct pci_dev *pdev = pf->pdev;
2543f0c2982aSNaveen Mamindlapalli 
2544f0c2982aSNaveen Mamindlapalli 	if (!netif_running(netdev))
2545f0c2982aSNaveen Mamindlapalli 		return -EAGAIN;
2546f0c2982aSNaveen Mamindlapalli 
2547f0c2982aSNaveen Mamindlapalli 	if (vf >= pci_num_vf(pdev))
2548f0c2982aSNaveen Mamindlapalli 		return -EINVAL;
2549f0c2982aSNaveen Mamindlapalli 
2550f0c2982aSNaveen Mamindlapalli 	/* qos is currently unsupported */
2551f0c2982aSNaveen Mamindlapalli 	if (vlan >= VLAN_N_VID || qos)
2552f0c2982aSNaveen Mamindlapalli 		return -EINVAL;
2553f0c2982aSNaveen Mamindlapalli 
2554f0c2982aSNaveen Mamindlapalli 	if (proto != htons(ETH_P_8021Q))
2555f0c2982aSNaveen Mamindlapalli 		return -EPROTONOSUPPORT;
2556f0c2982aSNaveen Mamindlapalli 
2557f0c2982aSNaveen Mamindlapalli 	if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2558f0c2982aSNaveen Mamindlapalli 		return -EOPNOTSUPP;
2559f0c2982aSNaveen Mamindlapalli 
2560f0c2982aSNaveen Mamindlapalli 	return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2561f0c2982aSNaveen Mamindlapalli }
2562f0c2982aSNaveen Mamindlapalli 
otx2_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivi)2563f0c2982aSNaveen Mamindlapalli static int otx2_get_vf_config(struct net_device *netdev, int vf,
2564f0c2982aSNaveen Mamindlapalli 			      struct ifla_vf_info *ivi)
2565f0c2982aSNaveen Mamindlapalli {
2566f0c2982aSNaveen Mamindlapalli 	struct otx2_nic *pf = netdev_priv(netdev);
2567f0c2982aSNaveen Mamindlapalli 	struct pci_dev *pdev = pf->pdev;
2568f0c2982aSNaveen Mamindlapalli 	struct otx2_vf_config *config;
2569f0c2982aSNaveen Mamindlapalli 
2570f0c2982aSNaveen Mamindlapalli 	if (!netif_running(netdev))
2571f0c2982aSNaveen Mamindlapalli 		return -EAGAIN;
2572f0c2982aSNaveen Mamindlapalli 
2573f0c2982aSNaveen Mamindlapalli 	if (vf >= pci_num_vf(pdev))
2574f0c2982aSNaveen Mamindlapalli 		return -EINVAL;
2575f0c2982aSNaveen Mamindlapalli 
2576f0c2982aSNaveen Mamindlapalli 	config = &pf->vf_configs[vf];
2577f0c2982aSNaveen Mamindlapalli 	ivi->vf = vf;
2578f0c2982aSNaveen Mamindlapalli 	ether_addr_copy(ivi->mac, config->mac);
2579f0c2982aSNaveen Mamindlapalli 	ivi->vlan = config->vlan;
2580b1dc2040SHariprasad Kelam 	ivi->trusted = config->trusted;
2581f0c2982aSNaveen Mamindlapalli 
2582f0c2982aSNaveen Mamindlapalli 	return 0;
2583f0c2982aSNaveen Mamindlapalli }
2584f0c2982aSNaveen Mamindlapalli 
otx2_xdp_xmit_tx(struct otx2_nic * pf,struct xdp_frame * xdpf,int qidx)258506059a1aSGeetha sowjanya static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
258606059a1aSGeetha sowjanya 			    int qidx)
258706059a1aSGeetha sowjanya {
258806059a1aSGeetha sowjanya 	struct page *page;
258906059a1aSGeetha sowjanya 	u64 dma_addr;
259006059a1aSGeetha sowjanya 	int err = 0;
259106059a1aSGeetha sowjanya 
259206059a1aSGeetha sowjanya 	dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
259306059a1aSGeetha sowjanya 				     offset_in_page(xdpf->data), xdpf->len,
259406059a1aSGeetha sowjanya 				     DMA_TO_DEVICE);
259506059a1aSGeetha sowjanya 	if (dma_mapping_error(pf->dev, dma_addr))
259606059a1aSGeetha sowjanya 		return -ENOMEM;
259706059a1aSGeetha sowjanya 
259806059a1aSGeetha sowjanya 	err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
259906059a1aSGeetha sowjanya 	if (!err) {
260006059a1aSGeetha sowjanya 		otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
260106059a1aSGeetha sowjanya 		page = virt_to_page(xdpf->data);
260206059a1aSGeetha sowjanya 		put_page(page);
260306059a1aSGeetha sowjanya 		return -ENOMEM;
260406059a1aSGeetha sowjanya 	}
260506059a1aSGeetha sowjanya 	return 0;
260606059a1aSGeetha sowjanya }
260706059a1aSGeetha sowjanya 
otx2_xdp_xmit(struct net_device * netdev,int n,struct xdp_frame ** frames,u32 flags)260806059a1aSGeetha sowjanya static int otx2_xdp_xmit(struct net_device *netdev, int n,
260906059a1aSGeetha sowjanya 			 struct xdp_frame **frames, u32 flags)
261006059a1aSGeetha sowjanya {
261106059a1aSGeetha sowjanya 	struct otx2_nic *pf = netdev_priv(netdev);
261206059a1aSGeetha sowjanya 	int qidx = smp_processor_id();
261306059a1aSGeetha sowjanya 	struct otx2_snd_queue *sq;
261406059a1aSGeetha sowjanya 	int drops = 0, i;
261506059a1aSGeetha sowjanya 
261606059a1aSGeetha sowjanya 	if (!netif_running(netdev))
261706059a1aSGeetha sowjanya 		return -ENETDOWN;
261806059a1aSGeetha sowjanya 
261906059a1aSGeetha sowjanya 	qidx += pf->hw.tx_queues;
262006059a1aSGeetha sowjanya 	sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
262106059a1aSGeetha sowjanya 
262206059a1aSGeetha sowjanya 	/* Abort xmit if xdp queue is not */
262306059a1aSGeetha sowjanya 	if (unlikely(!sq))
262406059a1aSGeetha sowjanya 		return -ENXIO;
262506059a1aSGeetha sowjanya 
262606059a1aSGeetha sowjanya 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
262706059a1aSGeetha sowjanya 		return -EINVAL;
262806059a1aSGeetha sowjanya 
262906059a1aSGeetha sowjanya 	for (i = 0; i < n; i++) {
263006059a1aSGeetha sowjanya 		struct xdp_frame *xdpf = frames[i];
263106059a1aSGeetha sowjanya 		int err;
263206059a1aSGeetha sowjanya 
263306059a1aSGeetha sowjanya 		err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
263406059a1aSGeetha sowjanya 		if (err)
263506059a1aSGeetha sowjanya 			drops++;
263606059a1aSGeetha sowjanya 	}
263706059a1aSGeetha sowjanya 	return n - drops;
263806059a1aSGeetha sowjanya }
263906059a1aSGeetha sowjanya 
otx2_xdp_setup(struct otx2_nic * pf,struct bpf_prog * prog)264006059a1aSGeetha sowjanya static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
264106059a1aSGeetha sowjanya {
264206059a1aSGeetha sowjanya 	struct net_device *dev = pf->netdev;
264306059a1aSGeetha sowjanya 	bool if_up = netif_running(pf->netdev);
264406059a1aSGeetha sowjanya 	struct bpf_prog *old_prog;
264506059a1aSGeetha sowjanya 
264606059a1aSGeetha sowjanya 	if (prog && dev->mtu > MAX_XDP_MTU) {
264706059a1aSGeetha sowjanya 		netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
264806059a1aSGeetha sowjanya 		return -EOPNOTSUPP;
264906059a1aSGeetha sowjanya 	}
265006059a1aSGeetha sowjanya 
265106059a1aSGeetha sowjanya 	if (if_up)
265206059a1aSGeetha sowjanya 		otx2_stop(pf->netdev);
265306059a1aSGeetha sowjanya 
265406059a1aSGeetha sowjanya 	old_prog = xchg(&pf->xdp_prog, prog);
265506059a1aSGeetha sowjanya 
265606059a1aSGeetha sowjanya 	if (old_prog)
265706059a1aSGeetha sowjanya 		bpf_prog_put(old_prog);
265806059a1aSGeetha sowjanya 
265906059a1aSGeetha sowjanya 	if (pf->xdp_prog)
266006059a1aSGeetha sowjanya 		bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
266106059a1aSGeetha sowjanya 
266206059a1aSGeetha sowjanya 	/* Network stack and XDP shared same rx queues.
266306059a1aSGeetha sowjanya 	 * Use separate tx queues for XDP and network stack.
266406059a1aSGeetha sowjanya 	 */
266566c0e13aSMarek Majtyka 	if (pf->xdp_prog) {
266606059a1aSGeetha sowjanya 		pf->hw.xdp_queues = pf->hw.rx_queues;
266766c0e13aSMarek Majtyka 		xdp_features_set_redirect_target(dev, false);
266866c0e13aSMarek Majtyka 	} else {
266906059a1aSGeetha sowjanya 		pf->hw.xdp_queues = 0;
267066c0e13aSMarek Majtyka 		xdp_features_clear_redirect_target(dev);
267166c0e13aSMarek Majtyka 	}
267206059a1aSGeetha sowjanya 
267306059a1aSGeetha sowjanya 	if (if_up)
267406059a1aSGeetha sowjanya 		otx2_open(pf->netdev);
267506059a1aSGeetha sowjanya 
267606059a1aSGeetha sowjanya 	return 0;
267706059a1aSGeetha sowjanya }
267806059a1aSGeetha sowjanya 
otx2_xdp(struct net_device * netdev,struct netdev_bpf * xdp)267906059a1aSGeetha sowjanya static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
268006059a1aSGeetha sowjanya {
268106059a1aSGeetha sowjanya 	struct otx2_nic *pf = netdev_priv(netdev);
268206059a1aSGeetha sowjanya 
268306059a1aSGeetha sowjanya 	switch (xdp->command) {
268406059a1aSGeetha sowjanya 	case XDP_SETUP_PROG:
268506059a1aSGeetha sowjanya 		return otx2_xdp_setup(pf, xdp->prog);
268606059a1aSGeetha sowjanya 	default:
268706059a1aSGeetha sowjanya 		return -EINVAL;
268806059a1aSGeetha sowjanya 	}
268906059a1aSGeetha sowjanya }
269006059a1aSGeetha sowjanya 
otx2_set_vf_permissions(struct otx2_nic * pf,int vf,int req_perm)2691b1dc2040SHariprasad Kelam static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2692b1dc2040SHariprasad Kelam 				   int req_perm)
2693b1dc2040SHariprasad Kelam {
2694b1dc2040SHariprasad Kelam 	struct set_vf_perm *req;
2695b1dc2040SHariprasad Kelam 	int rc;
2696b1dc2040SHariprasad Kelam 
2697b1dc2040SHariprasad Kelam 	mutex_lock(&pf->mbox.lock);
2698b1dc2040SHariprasad Kelam 	req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2699b1dc2040SHariprasad Kelam 	if (!req) {
2700b1dc2040SHariprasad Kelam 		rc = -ENOMEM;
2701b1dc2040SHariprasad Kelam 		goto out;
2702b1dc2040SHariprasad Kelam 	}
2703b1dc2040SHariprasad Kelam 
2704b1dc2040SHariprasad Kelam 	/* Let AF reset VF permissions as sriov is disabled */
2705b1dc2040SHariprasad Kelam 	if (req_perm == OTX2_RESET_VF_PERM) {
2706b1dc2040SHariprasad Kelam 		req->flags |= RESET_VF_PERM;
2707b1dc2040SHariprasad Kelam 	} else if (req_perm == OTX2_TRUSTED_VF) {
2708b1dc2040SHariprasad Kelam 		if (pf->vf_configs[vf].trusted)
2709b1dc2040SHariprasad Kelam 			req->flags |= VF_TRUSTED;
2710b1dc2040SHariprasad Kelam 	}
2711b1dc2040SHariprasad Kelam 
2712b1dc2040SHariprasad Kelam 	req->vf = vf;
2713b1dc2040SHariprasad Kelam 	rc = otx2_sync_mbox_msg(&pf->mbox);
2714b1dc2040SHariprasad Kelam out:
2715b1dc2040SHariprasad Kelam 	mutex_unlock(&pf->mbox.lock);
2716b1dc2040SHariprasad Kelam 	return rc;
2717b1dc2040SHariprasad Kelam }
2718b1dc2040SHariprasad Kelam 
otx2_ndo_set_vf_trust(struct net_device * netdev,int vf,bool enable)2719b1dc2040SHariprasad Kelam static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2720b1dc2040SHariprasad Kelam 				 bool enable)
2721b1dc2040SHariprasad Kelam {
2722b1dc2040SHariprasad Kelam 	struct otx2_nic *pf = netdev_priv(netdev);
2723b1dc2040SHariprasad Kelam 	struct pci_dev *pdev = pf->pdev;
2724b1dc2040SHariprasad Kelam 	int rc;
2725b1dc2040SHariprasad Kelam 
2726b1dc2040SHariprasad Kelam 	if (vf >= pci_num_vf(pdev))
2727b1dc2040SHariprasad Kelam 		return -EINVAL;
2728b1dc2040SHariprasad Kelam 
2729b1dc2040SHariprasad Kelam 	if (pf->vf_configs[vf].trusted == enable)
2730b1dc2040SHariprasad Kelam 		return 0;
2731b1dc2040SHariprasad Kelam 
2732b1dc2040SHariprasad Kelam 	pf->vf_configs[vf].trusted = enable;
2733b1dc2040SHariprasad Kelam 	rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2734b1dc2040SHariprasad Kelam 
27355295d2adSHariprasad Kelam 	if (rc) {
2736b1dc2040SHariprasad Kelam 		pf->vf_configs[vf].trusted = !enable;
27375295d2adSHariprasad Kelam 	} else {
2738b1dc2040SHariprasad Kelam 		netdev_info(pf->netdev, "VF %d is %strusted\n",
2739b1dc2040SHariprasad Kelam 			    vf, enable ? "" : "not ");
27405295d2adSHariprasad Kelam 		otx2_set_rx_mode(netdev);
27415295d2adSHariprasad Kelam 	}
27425295d2adSHariprasad Kelam 
2743b1dc2040SHariprasad Kelam 	return rc;
2744b1dc2040SHariprasad Kelam }
2745b1dc2040SHariprasad Kelam 
274616547577SSunil Goutham static const struct net_device_ops otx2_netdev_ops = {
274716547577SSunil Goutham 	.ndo_open		= otx2_open,
274816547577SSunil Goutham 	.ndo_stop		= otx2_stop,
27493ca6c4c8SSunil Goutham 	.ndo_start_xmit		= otx2_xmit,
275099c969a8SSuman Ghosh 	.ndo_select_queue	= otx2_select_queue,
27511d4d9e42SNaveen Mamindlapalli 	.ndo_fix_features	= otx2_fix_features,
275234bfe0ebSSunil Goutham 	.ndo_set_mac_address    = otx2_set_mac_address,
275334bfe0ebSSunil Goutham 	.ndo_change_mtu		= otx2_change_mtu,
275434bfe0ebSSunil Goutham 	.ndo_set_rx_mode	= otx2_set_rx_mode,
275534bfe0ebSSunil Goutham 	.ndo_set_features	= otx2_set_features,
27564ff7d148SGeetha sowjanya 	.ndo_tx_timeout		= otx2_tx_timeout,
2757e239d0c7SGeetha sowjanya 	.ndo_get_stats64	= otx2_get_stats64,
2758a7605370SArnd Bergmann 	.ndo_eth_ioctl		= otx2_ioctl,
2759f0c2982aSNaveen Mamindlapalli 	.ndo_set_vf_mac		= otx2_set_vf_mac,
2760f0c2982aSNaveen Mamindlapalli 	.ndo_set_vf_vlan	= otx2_set_vf_vlan,
2761f0c2982aSNaveen Mamindlapalli 	.ndo_get_vf_config	= otx2_get_vf_config,
276206059a1aSGeetha sowjanya 	.ndo_bpf		= otx2_xdp,
276306059a1aSGeetha sowjanya 	.ndo_xdp_xmit           = otx2_xdp_xmit,
27641d4d9e42SNaveen Mamindlapalli 	.ndo_setup_tc		= otx2_setup_tc,
2765b1dc2040SHariprasad Kelam 	.ndo_set_vf_trust	= otx2_ndo_set_vf_trust,
276616547577SSunil Goutham };
276716547577SSunil Goutham 
otx2_wq_init(struct otx2_nic * pf)2768e99b7c84SSunil Goutham static int otx2_wq_init(struct otx2_nic *pf)
2769e99b7c84SSunil Goutham {
2770e99b7c84SSunil Goutham 	pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2771e99b7c84SSunil Goutham 	if (!pf->otx2_wq)
2772e99b7c84SSunil Goutham 		return -ENOMEM;
2773e99b7c84SSunil Goutham 
2774ffd2f89aSRakesh Babu 	INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2775e99b7c84SSunil Goutham 	INIT_WORK(&pf->reset_task, otx2_reset_task);
2776e99b7c84SSunil Goutham 	return 0;
2777e99b7c84SSunil Goutham }
2778e99b7c84SSunil Goutham 
otx2_check_pf_usable(struct otx2_nic * nic)277916547577SSunil Goutham static int otx2_check_pf_usable(struct otx2_nic *nic)
278016547577SSunil Goutham {
278116547577SSunil Goutham 	u64 rev;
278216547577SSunil Goutham 
278316547577SSunil Goutham 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
278416547577SSunil Goutham 	rev = (rev >> 12) & 0xFF;
278516547577SSunil Goutham 	/* Check if AF has setup revision for RVUM block,
278616547577SSunil Goutham 	 * otherwise this driver probe should be deferred
278716547577SSunil Goutham 	 * until AF driver comes up.
278816547577SSunil Goutham 	 */
278916547577SSunil Goutham 	if (!rev) {
279016547577SSunil Goutham 		dev_warn(nic->dev,
279116547577SSunil Goutham 			 "AF is not initialized, deferring probe\n");
279216547577SSunil Goutham 		return -EPROBE_DEFER;
279316547577SSunil Goutham 	}
279416547577SSunil Goutham 	return 0;
279516547577SSunil Goutham }
279616547577SSunil Goutham 
otx2_realloc_msix_vectors(struct otx2_nic * pf)279705fcc9e0SSunil Goutham static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
279805fcc9e0SSunil Goutham {
279905fcc9e0SSunil Goutham 	struct otx2_hw *hw = &pf->hw;
280005fcc9e0SSunil Goutham 	int num_vec, err;
280105fcc9e0SSunil Goutham 
280205fcc9e0SSunil Goutham 	/* NPA interrupts are inot registered, so alloc only
280305fcc9e0SSunil Goutham 	 * upto NIX vector offset.
280405fcc9e0SSunil Goutham 	 */
280505fcc9e0SSunil Goutham 	num_vec = hw->nix_msixoff;
280605fcc9e0SSunil Goutham 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
280705fcc9e0SSunil Goutham 
280805fcc9e0SSunil Goutham 	otx2_disable_mbox_intr(pf);
280905fcc9e0SSunil Goutham 	pci_free_irq_vectors(hw->pdev);
281005fcc9e0SSunil Goutham 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
281105fcc9e0SSunil Goutham 	if (err < 0) {
281205fcc9e0SSunil Goutham 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
281305fcc9e0SSunil Goutham 			__func__, num_vec);
281405fcc9e0SSunil Goutham 		return err;
281505fcc9e0SSunil Goutham 	}
281605fcc9e0SSunil Goutham 
281705fcc9e0SSunil Goutham 	return otx2_register_mbox_intr(pf, false);
281805fcc9e0SSunil Goutham }
281905fcc9e0SSunil Goutham 
otx2_sriov_vfcfg_init(struct otx2_nic * pf)2820b1dc2040SHariprasad Kelam static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2821b1dc2040SHariprasad Kelam {
2822b1dc2040SHariprasad Kelam 	int i;
2823b1dc2040SHariprasad Kelam 
2824b1dc2040SHariprasad Kelam 	pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2825b1dc2040SHariprasad Kelam 				      sizeof(struct otx2_vf_config),
2826b1dc2040SHariprasad Kelam 				      GFP_KERNEL);
2827b1dc2040SHariprasad Kelam 	if (!pf->vf_configs)
2828b1dc2040SHariprasad Kelam 		return -ENOMEM;
2829b1dc2040SHariprasad Kelam 
2830b1dc2040SHariprasad Kelam 	for (i = 0; i < pf->total_vfs; i++) {
2831b1dc2040SHariprasad Kelam 		pf->vf_configs[i].pf = pf;
2832b1dc2040SHariprasad Kelam 		pf->vf_configs[i].intf_down = true;
2833b1dc2040SHariprasad Kelam 		pf->vf_configs[i].trusted = false;
2834b1dc2040SHariprasad Kelam 		INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2835b1dc2040SHariprasad Kelam 				  otx2_vf_link_event_task);
2836b1dc2040SHariprasad Kelam 	}
2837b1dc2040SHariprasad Kelam 
2838b1dc2040SHariprasad Kelam 	return 0;
2839b1dc2040SHariprasad Kelam }
2840b1dc2040SHariprasad Kelam 
otx2_sriov_vfcfg_cleanup(struct otx2_nic * pf)2841b1dc2040SHariprasad Kelam static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2842b1dc2040SHariprasad Kelam {
2843b1dc2040SHariprasad Kelam 	int i;
2844b1dc2040SHariprasad Kelam 
2845b1dc2040SHariprasad Kelam 	if (!pf->vf_configs)
2846b1dc2040SHariprasad Kelam 		return;
2847b1dc2040SHariprasad Kelam 
2848b1dc2040SHariprasad Kelam 	for (i = 0; i < pf->total_vfs; i++) {
2849b1dc2040SHariprasad Kelam 		cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2850b1dc2040SHariprasad Kelam 		otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2851b1dc2040SHariprasad Kelam 	}
2852b1dc2040SHariprasad Kelam }
2853b1dc2040SHariprasad Kelam 
otx2_probe(struct pci_dev * pdev,const struct pci_device_id * id)285416547577SSunil Goutham static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
285516547577SSunil Goutham {
285616547577SSunil Goutham 	struct device *dev = &pdev->dev;
2857ab6dddd2SSubbaraya Sundeep 	int err, qcount, qos_txqs;
285816547577SSunil Goutham 	struct net_device *netdev;
285916547577SSunil Goutham 	struct otx2_nic *pf;
286016547577SSunil Goutham 	struct otx2_hw *hw;
28615a6d7c9dSSunil Goutham 	int num_vec;
286216547577SSunil Goutham 
286316547577SSunil Goutham 	err = pcim_enable_device(pdev);
286416547577SSunil Goutham 	if (err) {
286516547577SSunil Goutham 		dev_err(dev, "Failed to enable PCI device\n");
286616547577SSunil Goutham 		return err;
286716547577SSunil Goutham 	}
286816547577SSunil Goutham 
286916547577SSunil Goutham 	err = pci_request_regions(pdev, DRV_NAME);
287016547577SSunil Goutham 	if (err) {
287116547577SSunil Goutham 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
287216547577SSunil Goutham 		return err;
287316547577SSunil Goutham 	}
287416547577SSunil Goutham 
287516547577SSunil Goutham 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
287616547577SSunil Goutham 	if (err) {
287716547577SSunil Goutham 		dev_err(dev, "DMA mask config failed, abort\n");
287816547577SSunil Goutham 		goto err_release_regions;
287916547577SSunil Goutham 	}
288016547577SSunil Goutham 
288116547577SSunil Goutham 	pci_set_master(pdev);
288216547577SSunil Goutham 
288316547577SSunil Goutham 	/* Set number of queues */
288405fcc9e0SSunil Goutham 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2885ab6dddd2SSubbaraya Sundeep 	qos_txqs = min_t(int, qcount, OTX2_QOS_MAX_LEAF_NODES);
288616547577SSunil Goutham 
2887ab6dddd2SSubbaraya Sundeep 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount + qos_txqs, qcount);
288816547577SSunil Goutham 	if (!netdev) {
288916547577SSunil Goutham 		err = -ENOMEM;
289016547577SSunil Goutham 		goto err_release_regions;
289116547577SSunil Goutham 	}
289216547577SSunil Goutham 
289316547577SSunil Goutham 	pci_set_drvdata(pdev, netdev);
289416547577SSunil Goutham 	SET_NETDEV_DEV(netdev, &pdev->dev);
289516547577SSunil Goutham 	pf = netdev_priv(netdev);
289616547577SSunil Goutham 	pf->netdev = netdev;
289716547577SSunil Goutham 	pf->pdev = pdev;
289816547577SSunil Goutham 	pf->dev = dev;
2899d424b6c0SSunil Goutham 	pf->total_vfs = pci_sriov_get_totalvfs(pdev);
290050fe6c02SLinu Cherian 	pf->flags |= OTX2_FLAG_INTF_DOWN;
290116547577SSunil Goutham 
290216547577SSunil Goutham 	hw = &pf->hw;
290316547577SSunil Goutham 	hw->pdev = pdev;
290416547577SSunil Goutham 	hw->rx_queues = qcount;
290516547577SSunil Goutham 	hw->tx_queues = qcount;
2906508c58f7SHariprasad Kelam 	hw->non_qos_queues = qcount;
290716547577SSunil Goutham 	hw->max_queues = qcount;
2908a989eb66SSubbaraya Sundeep 	hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
290968258596SSubbaraya Sundeep 	/* Use CQE of 128 byte descriptor size by default */
291068258596SSubbaraya Sundeep 	hw->xqe_size = 128;
291116547577SSunil Goutham 
29125a6d7c9dSSunil Goutham 	num_vec = pci_msix_vec_count(pdev);
29135a6d7c9dSSunil Goutham 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
29145a6d7c9dSSunil Goutham 					  GFP_KERNEL);
2915654cad8bSWei Yongjun 	if (!hw->irq_name) {
2916654cad8bSWei Yongjun 		err = -ENOMEM;
29175a6d7c9dSSunil Goutham 		goto err_free_netdev;
2918654cad8bSWei Yongjun 	}
29195a6d7c9dSSunil Goutham 
29205a6d7c9dSSunil Goutham 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
29215a6d7c9dSSunil Goutham 					 sizeof(cpumask_var_t), GFP_KERNEL);
2922654cad8bSWei Yongjun 	if (!hw->affinity_mask) {
2923654cad8bSWei Yongjun 		err = -ENOMEM;
29245a6d7c9dSSunil Goutham 		goto err_free_netdev;
2925654cad8bSWei Yongjun 	}
29265a6d7c9dSSunil Goutham 
292716547577SSunil Goutham 	/* Map CSRs */
292816547577SSunil Goutham 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
292916547577SSunil Goutham 	if (!pf->reg_base) {
293016547577SSunil Goutham 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
293116547577SSunil Goutham 		err = -ENOMEM;
293216547577SSunil Goutham 		goto err_free_netdev;
293316547577SSunil Goutham 	}
293416547577SSunil Goutham 
293516547577SSunil Goutham 	err = otx2_check_pf_usable(pf);
293616547577SSunil Goutham 	if (err)
293716547577SSunil Goutham 		goto err_free_netdev;
293816547577SSunil Goutham 
29395a6d7c9dSSunil Goutham 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
29405a6d7c9dSSunil Goutham 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
29415a6d7c9dSSunil Goutham 	if (err < 0) {
29425a6d7c9dSSunil Goutham 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
29435a6d7c9dSSunil Goutham 			__func__, num_vec);
29445a6d7c9dSSunil Goutham 		goto err_free_netdev;
29455a6d7c9dSSunil Goutham 	}
29465a6d7c9dSSunil Goutham 
29476e8ad438SGeetha sowjanya 	otx2_setup_dev_hw_settings(pf);
29486e8ad438SGeetha sowjanya 
29495a6d7c9dSSunil Goutham 	/* Init PF <=> AF mailbox stuff */
29505a6d7c9dSSunil Goutham 	err = otx2_pfaf_mbox_init(pf);
29515a6d7c9dSSunil Goutham 	if (err)
29525a6d7c9dSSunil Goutham 		goto err_free_irq_vectors;
29535a6d7c9dSSunil Goutham 
29545a6d7c9dSSunil Goutham 	/* Register mailbox interrupt */
29555a6d7c9dSSunil Goutham 	err = otx2_register_mbox_intr(pf, true);
29565a6d7c9dSSunil Goutham 	if (err)
29575a6d7c9dSSunil Goutham 		goto err_mbox_destroy;
29585a6d7c9dSSunil Goutham 
295905fcc9e0SSunil Goutham 	/* Request AF to attach NPA and NIX LFs to this PF.
296005fcc9e0SSunil Goutham 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
296105fcc9e0SSunil Goutham 	 */
296205fcc9e0SSunil Goutham 	err = otx2_attach_npa_nix(pf);
296316547577SSunil Goutham 	if (err)
29645a6d7c9dSSunil Goutham 		goto err_disable_mbox_intr;
296516547577SSunil Goutham 
296605fcc9e0SSunil Goutham 	err = otx2_realloc_msix_vectors(pf);
296705fcc9e0SSunil Goutham 	if (err)
296805fcc9e0SSunil Goutham 		goto err_detach_rsrc;
296905fcc9e0SSunil Goutham 
297005fcc9e0SSunil Goutham 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
297105fcc9e0SSunil Goutham 	if (err)
297205fcc9e0SSunil Goutham 		goto err_detach_rsrc;
297305fcc9e0SSunil Goutham 
29745c051207SGeetha sowjanya 	err = cn10k_lmtst_init(pf);
29756e8ad438SGeetha sowjanya 	if (err)
29766e8ad438SGeetha sowjanya 		goto err_detach_rsrc;
297704a21ef3SSunil Goutham 
297834bfe0ebSSunil Goutham 	/* Assign default mac address */
297934bfe0ebSSunil Goutham 	otx2_get_mac_from_af(netdev);
298034bfe0ebSSunil Goutham 
2981c9c12d33SAleksey Makarov 	/* Don't check for error.  Proceed without ptp */
2982c9c12d33SAleksey Makarov 	otx2_ptp_init(pf);
2983c9c12d33SAleksey Makarov 
2984caa2da34SSunil Goutham 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
2985caa2da34SSunil Goutham 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
2986caa2da34SSunil Goutham 	 * ingress packet. In some scenarios HW can free back allocated buffer
2987caa2da34SSunil Goutham 	 * pointers to pool. This makes it impossible for SW to maintain a
2988caa2da34SSunil Goutham 	 * parallel list where physical addresses of buffer pointers (IOVAs)
2989caa2da34SSunil Goutham 	 * given to HW can be saved for later reference.
2990caa2da34SSunil Goutham 	 *
2991caa2da34SSunil Goutham 	 * So the only way to convert Rx packet's buffer address is to use
2992caa2da34SSunil Goutham 	 * IOMMU's iova_to_phys() handler which translates the address by
2993caa2da34SSunil Goutham 	 * walking through the translation tables.
2994caa2da34SSunil Goutham 	 */
2995caa2da34SSunil Goutham 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
2996caa2da34SSunil Goutham 
29973ca6c4c8SSunil Goutham 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
299885069e95SSunil Goutham 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2999dc1a9bf2SSunil Goutham 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3000dc1a9bf2SSunil Goutham 			       NETIF_F_GSO_UDP_L4);
3001abe02543SSunil Goutham 	netdev->features |= netdev->hw_features;
3002abe02543SSunil Goutham 
3003f0a1913fSSubbaraya Sundeep 	err = otx2_mcam_flow_init(pf);
3004f0a1913fSSubbaraya Sundeep 	if (err)
3005f0a1913fSSubbaraya Sundeep 		goto err_ptp_destroy;
3006f0a1913fSSubbaraya Sundeep 
3007c54ffc73SSubbaraya Sundeep 	err = cn10k_mcs_init(pf);
3008c54ffc73SSubbaraya Sundeep 	if (err)
3009c54ffc73SSubbaraya Sundeep 		goto err_del_mcam_entries;
3010c54ffc73SSubbaraya Sundeep 
3011f0a1913fSSubbaraya Sundeep 	if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
3012f0a1913fSSubbaraya Sundeep 		netdev->hw_features |= NETIF_F_NTUPLE;
3013f0a1913fSSubbaraya Sundeep 
301463ee5157SHariprasad Kelam 	if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
301563ee5157SHariprasad Kelam 		netdev->priv_flags |= IFF_UNICAST_FLT;
301663ee5157SHariprasad Kelam 
3017fd9d7859SHariprasad Kelam 	/* Support TSO on tag interface */
3018fd9d7859SHariprasad Kelam 	netdev->vlan_features |= netdev->features;
3019fd9d7859SHariprasad Kelam 	netdev->hw_features  |= NETIF_F_HW_VLAN_CTAG_TX |
3020fd9d7859SHariprasad Kelam 				NETIF_F_HW_VLAN_STAG_TX;
3021fd9d7859SHariprasad Kelam 	if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
3022fd9d7859SHariprasad Kelam 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
3023fd9d7859SHariprasad Kelam 				       NETIF_F_HW_VLAN_STAG_RX;
3024fd9d7859SHariprasad Kelam 	netdev->features |= netdev->hw_features;
3025fd9d7859SHariprasad Kelam 
30261d4d9e42SNaveen Mamindlapalli 	/* HW supports tc offload but mutually exclusive with n-tuple filters */
30271d4d9e42SNaveen Mamindlapalli 	if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
30281d4d9e42SNaveen Mamindlapalli 		netdev->hw_features |= NETIF_F_HW_TC;
30291d4d9e42SNaveen Mamindlapalli 
30300b3834aeSSunil Goutham 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
30310b3834aeSSunil Goutham 
3032ee8b7a11SJakub Kicinski 	netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
30334ff7d148SGeetha sowjanya 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
30344ff7d148SGeetha sowjanya 
303516547577SSunil Goutham 	netdev->netdev_ops = &otx2_netdev_ops;
303666c0e13aSMarek Majtyka 	netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
303716547577SSunil Goutham 
303834bfe0ebSSunil Goutham 	netdev->min_mtu = OTX2_MIN_MTU;
3039ab58a416SHariprasad Kelam 	netdev->max_mtu = otx2_get_max_mtu(pf);
304034bfe0ebSSunil Goutham 
304116547577SSunil Goutham 	err = register_netdev(netdev);
304216547577SSunil Goutham 	if (err) {
304316547577SSunil Goutham 		dev_err(dev, "Failed to register netdevice\n");
3044af7d23f9SYang Yingliang 		goto err_mcs_free;
304516547577SSunil Goutham 	}
304616547577SSunil Goutham 
3047e99b7c84SSunil Goutham 	err = otx2_wq_init(pf);
3048e99b7c84SSunil Goutham 	if (err)
3049e99b7c84SSunil Goutham 		goto err_unreg_netdev;
3050e99b7c84SSunil Goutham 
3051d45d8979SChristina Jacob 	otx2_set_ethtool_ops(netdev);
3052d45d8979SChristina Jacob 
30531d4d9e42SNaveen Mamindlapalli 	err = otx2_init_tc(pf);
30541d4d9e42SNaveen Mamindlapalli 	if (err)
30551d4d9e42SNaveen Mamindlapalli 		goto err_mcam_flow_del;
30561d4d9e42SNaveen Mamindlapalli 
30572da48943SSunil Goutham 	err = otx2_register_dl(pf);
30582da48943SSunil Goutham 	if (err)
30592da48943SSunil Goutham 		goto err_mcam_flow_del;
30602da48943SSunil Goutham 
3061b1dc2040SHariprasad Kelam 	/* Initialize SR-IOV resources */
3062b1dc2040SHariprasad Kelam 	err = otx2_sriov_vfcfg_init(pf);
3063b1dc2040SHariprasad Kelam 	if (err)
3064b1dc2040SHariprasad Kelam 		goto err_pf_sriov_init;
3065b1dc2040SHariprasad Kelam 
306650fe6c02SLinu Cherian 	/* Enable link notifications */
306750fe6c02SLinu Cherian 	otx2_cgx_config_linkevents(pf, true);
306850fe6c02SLinu Cherian 
30698e675581SHariprasad Kelam #ifdef CONFIG_DCB
30708e675581SHariprasad Kelam 	err = otx2_dcbnl_set_ops(netdev);
30718e675581SHariprasad Kelam 	if (err)
30728e675581SHariprasad Kelam 		goto err_pf_sriov_init;
30738e675581SHariprasad Kelam #endif
30748e675581SHariprasad Kelam 
3075ab6dddd2SSubbaraya Sundeep 	otx2_qos_init(pf, qos_txqs);
3076ab6dddd2SSubbaraya Sundeep 
307716547577SSunil Goutham 	return 0;
307816547577SSunil Goutham 
3079b1dc2040SHariprasad Kelam err_pf_sriov_init:
3080b1dc2040SHariprasad Kelam 	otx2_shutdown_tc(pf);
30811d4d9e42SNaveen Mamindlapalli err_mcam_flow_del:
30821d4d9e42SNaveen Mamindlapalli 	otx2_mcam_flow_del(pf);
3083e99b7c84SSunil Goutham err_unreg_netdev:
3084e99b7c84SSunil Goutham 	unregister_netdev(netdev);
3085af7d23f9SYang Yingliang err_mcs_free:
3086af7d23f9SYang Yingliang 	cn10k_mcs_free(pf);
3087f0a1913fSSubbaraya Sundeep err_del_mcam_entries:
3088f0a1913fSSubbaraya Sundeep 	otx2_mcam_flow_del(pf);
3089c9c12d33SAleksey Makarov err_ptp_destroy:
3090c9c12d33SAleksey Makarov 	otx2_ptp_destroy(pf);
309105fcc9e0SSunil Goutham err_detach_rsrc:
3092ef6c8da7SGeetha sowjanya 	if (pf->hw.lmt_info)
3093ef6c8da7SGeetha sowjanya 		free_percpu(pf->hw.lmt_info);
30945c051207SGeetha sowjanya 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
30955c051207SGeetha sowjanya 		qmem_free(pf->dev, pf->dync_lmt);
309605fcc9e0SSunil Goutham 	otx2_detach_resources(&pf->mbox);
30975a6d7c9dSSunil Goutham err_disable_mbox_intr:
30985a6d7c9dSSunil Goutham 	otx2_disable_mbox_intr(pf);
30995a6d7c9dSSunil Goutham err_mbox_destroy:
31005a6d7c9dSSunil Goutham 	otx2_pfaf_mbox_destroy(pf);
31015a6d7c9dSSunil Goutham err_free_irq_vectors:
31025a6d7c9dSSunil Goutham 	pci_free_irq_vectors(hw->pdev);
310316547577SSunil Goutham err_free_netdev:
310416547577SSunil Goutham 	pci_set_drvdata(pdev, NULL);
310516547577SSunil Goutham 	free_netdev(netdev);
310616547577SSunil Goutham err_release_regions:
310716547577SSunil Goutham 	pci_release_regions(pdev);
310816547577SSunil Goutham 	return err;
310916547577SSunil Goutham }
311016547577SSunil Goutham 
otx2_vf_link_event_task(struct work_struct * work)3111ad513ed9STomasz Duszynski static void otx2_vf_link_event_task(struct work_struct *work)
3112ad513ed9STomasz Duszynski {
3113ad513ed9STomasz Duszynski 	struct otx2_vf_config *config;
3114ad513ed9STomasz Duszynski 	struct cgx_link_info_msg *req;
3115ad513ed9STomasz Duszynski 	struct mbox_msghdr *msghdr;
3116c6354b85SSubbaraya Sundeep 	struct delayed_work *dwork;
3117ad513ed9STomasz Duszynski 	struct otx2_nic *pf;
3118ad513ed9STomasz Duszynski 	int vf_idx;
3119ad513ed9STomasz Duszynski 
3120ad513ed9STomasz Duszynski 	config = container_of(work, struct otx2_vf_config,
3121ad513ed9STomasz Duszynski 			      link_event_work.work);
3122ad513ed9STomasz Duszynski 	vf_idx = config - config->pf->vf_configs;
3123ad513ed9STomasz Duszynski 	pf = config->pf;
3124ad513ed9STomasz Duszynski 
312585724ee6SSubbaraya Sundeep 	if (config->intf_down)
312685724ee6SSubbaraya Sundeep 		return;
312785724ee6SSubbaraya Sundeep 
3128c6354b85SSubbaraya Sundeep 	mutex_lock(&pf->mbox.lock);
3129c6354b85SSubbaraya Sundeep 
3130c6354b85SSubbaraya Sundeep 	dwork = &config->link_event_work;
3131c6354b85SSubbaraya Sundeep 
3132c6354b85SSubbaraya Sundeep 	if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
3133c6354b85SSubbaraya Sundeep 		schedule_delayed_work(dwork, msecs_to_jiffies(100));
3134c6354b85SSubbaraya Sundeep 		mutex_unlock(&pf->mbox.lock);
3135c6354b85SSubbaraya Sundeep 		return;
3136c6354b85SSubbaraya Sundeep 	}
3137c6354b85SSubbaraya Sundeep 
3138ad513ed9STomasz Duszynski 	msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
3139ad513ed9STomasz Duszynski 					 sizeof(*req), sizeof(struct msg_rsp));
3140ad513ed9STomasz Duszynski 	if (!msghdr) {
3141ad513ed9STomasz Duszynski 		dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
3142c6354b85SSubbaraya Sundeep 		mutex_unlock(&pf->mbox.lock);
3143ad513ed9STomasz Duszynski 		return;
3144ad513ed9STomasz Duszynski 	}
3145ad513ed9STomasz Duszynski 
3146ad513ed9STomasz Duszynski 	req = (struct cgx_link_info_msg *)msghdr;
3147ad513ed9STomasz Duszynski 	req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
3148ad513ed9STomasz Duszynski 	req->hdr.sig = OTX2_MBOX_REQ_SIG;
3149ad513ed9STomasz Duszynski 	memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
3150ad513ed9STomasz Duszynski 
3151c6354b85SSubbaraya Sundeep 	otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
3152c6354b85SSubbaraya Sundeep 
3153b96b278cSSubbaraya Sundeep 	otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
3154c6354b85SSubbaraya Sundeep 
3155c6354b85SSubbaraya Sundeep 	mutex_unlock(&pf->mbox.lock);
3156ad513ed9STomasz Duszynski }
3157ad513ed9STomasz Duszynski 
otx2_sriov_enable(struct pci_dev * pdev,int numvfs)3158d424b6c0SSunil Goutham static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
3159d424b6c0SSunil Goutham {
3160d424b6c0SSunil Goutham 	struct net_device *netdev = pci_get_drvdata(pdev);
3161d424b6c0SSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
3162b1dc2040SHariprasad Kelam 	int ret;
3163d424b6c0SSunil Goutham 
3164d424b6c0SSunil Goutham 	/* Init PF <=> VF mailbox stuff */
3165d424b6c0SSunil Goutham 	ret = otx2_pfvf_mbox_init(pf, numvfs);
3166d424b6c0SSunil Goutham 	if (ret)
3167d424b6c0SSunil Goutham 		return ret;
3168d424b6c0SSunil Goutham 
3169d424b6c0SSunil Goutham 	ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
3170d424b6c0SSunil Goutham 	if (ret)
3171d424b6c0SSunil Goutham 		goto free_mbox;
3172d424b6c0SSunil Goutham 
3173547d20f1SGeetha sowjanya 	ret = otx2_pf_flr_init(pf, numvfs);
3174d424b6c0SSunil Goutham 	if (ret)
3175b1dc2040SHariprasad Kelam 		goto free_intr;
3176d424b6c0SSunil Goutham 
3177547d20f1SGeetha sowjanya 	ret = otx2_register_flr_me_intr(pf, numvfs);
3178547d20f1SGeetha sowjanya 	if (ret)
3179547d20f1SGeetha sowjanya 		goto free_flr;
3180547d20f1SGeetha sowjanya 
3181547d20f1SGeetha sowjanya 	ret = pci_enable_sriov(pdev, numvfs);
3182547d20f1SGeetha sowjanya 	if (ret)
3183547d20f1SGeetha sowjanya 		goto free_flr_intr;
3184547d20f1SGeetha sowjanya 
3185d424b6c0SSunil Goutham 	return numvfs;
3186547d20f1SGeetha sowjanya free_flr_intr:
3187547d20f1SGeetha sowjanya 	otx2_disable_flr_me_intr(pf);
3188547d20f1SGeetha sowjanya free_flr:
3189547d20f1SGeetha sowjanya 	otx2_flr_wq_destroy(pf);
3190d424b6c0SSunil Goutham free_intr:
3191d424b6c0SSunil Goutham 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3192d424b6c0SSunil Goutham free_mbox:
3193d424b6c0SSunil Goutham 	otx2_pfvf_mbox_destroy(pf);
3194d424b6c0SSunil Goutham 	return ret;
3195d424b6c0SSunil Goutham }
3196d424b6c0SSunil Goutham 
otx2_sriov_disable(struct pci_dev * pdev)3197d424b6c0SSunil Goutham static int otx2_sriov_disable(struct pci_dev *pdev)
3198d424b6c0SSunil Goutham {
3199d424b6c0SSunil Goutham 	struct net_device *netdev = pci_get_drvdata(pdev);
3200d424b6c0SSunil Goutham 	struct otx2_nic *pf = netdev_priv(netdev);
3201d424b6c0SSunil Goutham 	int numvfs = pci_num_vf(pdev);
3202d424b6c0SSunil Goutham 
3203d424b6c0SSunil Goutham 	if (!numvfs)
3204d424b6c0SSunil Goutham 		return 0;
3205d424b6c0SSunil Goutham 
3206d424b6c0SSunil Goutham 	pci_disable_sriov(pdev);
3207d424b6c0SSunil Goutham 
3208547d20f1SGeetha sowjanya 	otx2_disable_flr_me_intr(pf);
3209547d20f1SGeetha sowjanya 	otx2_flr_wq_destroy(pf);
3210d424b6c0SSunil Goutham 	otx2_disable_pfvf_mbox_intr(pf, numvfs);
3211d424b6c0SSunil Goutham 	otx2_pfvf_mbox_destroy(pf);
3212d424b6c0SSunil Goutham 
3213d424b6c0SSunil Goutham 	return 0;
3214d424b6c0SSunil Goutham }
3215d424b6c0SSunil Goutham 
otx2_sriov_configure(struct pci_dev * pdev,int numvfs)3216d424b6c0SSunil Goutham static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
3217d424b6c0SSunil Goutham {
3218d424b6c0SSunil Goutham 	if (numvfs == 0)
3219d424b6c0SSunil Goutham 		return otx2_sriov_disable(pdev);
3220d424b6c0SSunil Goutham 	else
3221d424b6c0SSunil Goutham 		return otx2_sriov_enable(pdev, numvfs);
3222d424b6c0SSunil Goutham }
3223d424b6c0SSunil Goutham 
otx2_remove(struct pci_dev * pdev)322416547577SSunil Goutham static void otx2_remove(struct pci_dev *pdev)
322516547577SSunil Goutham {
322616547577SSunil Goutham 	struct net_device *netdev = pci_get_drvdata(pdev);
322716547577SSunil Goutham 	struct otx2_nic *pf;
322816547577SSunil Goutham 
322916547577SSunil Goutham 	if (!netdev)
323016547577SSunil Goutham 		return;
323116547577SSunil Goutham 
323216547577SSunil Goutham 	pf = netdev_priv(netdev);
323316547577SSunil Goutham 
3234f0c2982aSNaveen Mamindlapalli 	pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
3235f0c2982aSNaveen Mamindlapalli 
3236c9c12d33SAleksey Makarov 	if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
3237c9c12d33SAleksey Makarov 		otx2_config_hw_tx_tstamp(pf, false);
3238c9c12d33SAleksey Makarov 	if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
3239c9c12d33SAleksey Makarov 		otx2_config_hw_rx_tstamp(pf, false);
3240c9c12d33SAleksey Makarov 
3241d957b51fSHariprasad Kelam 	/* Disable 802.3x pause frames */
3242d957b51fSHariprasad Kelam 	if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
3243d957b51fSHariprasad Kelam 	    (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
3244d957b51fSHariprasad Kelam 		pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
3245d957b51fSHariprasad Kelam 		pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
3246d957b51fSHariprasad Kelam 		otx2_config_pause_frm(pf);
3247d957b51fSHariprasad Kelam 	}
3248d957b51fSHariprasad Kelam 
32498e675581SHariprasad Kelam #ifdef CONFIG_DCB
32508e675581SHariprasad Kelam 	/* Disable PFC config */
32518e675581SHariprasad Kelam 	if (pf->pfc_en) {
32528e675581SHariprasad Kelam 		pf->pfc_en = 0;
32538e675581SHariprasad Kelam 		otx2_config_priority_flow_ctrl(pf);
32548e675581SHariprasad Kelam 	}
32558e675581SHariprasad Kelam #endif
3256948a6633SSubbaraya Sundeep 	cancel_work_sync(&pf->reset_task);
325750fe6c02SLinu Cherian 	/* Disable link notifications */
325850fe6c02SLinu Cherian 	otx2_cgx_config_linkevents(pf, false);
325950fe6c02SLinu Cherian 
32602da48943SSunil Goutham 	otx2_unregister_dl(pf);
326116547577SSunil Goutham 	unregister_netdev(netdev);
3262699af748SSubbaraya Sundeep 	cn10k_mcs_free(pf);
3263d424b6c0SSunil Goutham 	otx2_sriov_disable(pf->pdev);
3264b1dc2040SHariprasad Kelam 	otx2_sriov_vfcfg_cleanup(pf);
3265e99b7c84SSunil Goutham 	if (pf->otx2_wq)
3266e99b7c84SSunil Goutham 		destroy_workqueue(pf->otx2_wq);
3267d424b6c0SSunil Goutham 
3268c9c12d33SAleksey Makarov 	otx2_ptp_destroy(pf);
3269f0a1913fSSubbaraya Sundeep 	otx2_mcam_flow_del(pf);
32701d4d9e42SNaveen Mamindlapalli 	otx2_shutdown_tc(pf);
32715e6808b4SNaveen Mamindlapalli 	otx2_shutdown_qos(pf);
327205fcc9e0SSunil Goutham 	otx2_detach_resources(&pf->mbox);
3273ef6c8da7SGeetha sowjanya 	if (pf->hw.lmt_info)
3274ef6c8da7SGeetha sowjanya 		free_percpu(pf->hw.lmt_info);
32755c051207SGeetha sowjanya 	if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
32765c051207SGeetha sowjanya 		qmem_free(pf->dev, pf->dync_lmt);
32775a6d7c9dSSunil Goutham 	otx2_disable_mbox_intr(pf);
32785a6d7c9dSSunil Goutham 	otx2_pfaf_mbox_destroy(pf);
327916547577SSunil Goutham 	pci_free_irq_vectors(pf->pdev);
328016547577SSunil Goutham 	pci_set_drvdata(pdev, NULL);
328116547577SSunil Goutham 	free_netdev(netdev);
32825a6d7c9dSSunil Goutham 
328316547577SSunil Goutham 	pci_release_regions(pdev);
328416547577SSunil Goutham }
328516547577SSunil Goutham 
328616547577SSunil Goutham static struct pci_driver otx2_pf_driver = {
328716547577SSunil Goutham 	.name = DRV_NAME,
328816547577SSunil Goutham 	.id_table = otx2_pf_id_table,
328916547577SSunil Goutham 	.probe = otx2_probe,
329016547577SSunil Goutham 	.shutdown = otx2_remove,
329116547577SSunil Goutham 	.remove = otx2_remove,
3292d424b6c0SSunil Goutham 	.sriov_configure = otx2_sriov_configure
329316547577SSunil Goutham };
329416547577SSunil Goutham 
otx2_rvupf_init_module(void)329516547577SSunil Goutham static int __init otx2_rvupf_init_module(void)
329616547577SSunil Goutham {
329716547577SSunil Goutham 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
329816547577SSunil Goutham 
329916547577SSunil Goutham 	return pci_register_driver(&otx2_pf_driver);
330016547577SSunil Goutham }
330116547577SSunil Goutham 
otx2_rvupf_cleanup_module(void)330216547577SSunil Goutham static void __exit otx2_rvupf_cleanup_module(void)
330316547577SSunil Goutham {
330416547577SSunil Goutham 	pci_unregister_driver(&otx2_pf_driver);
330516547577SSunil Goutham }
330616547577SSunil Goutham 
330716547577SSunil Goutham module_init(otx2_rvupf_init_module);
330816547577SSunil Goutham module_exit(otx2_rvupf_cleanup_module);
3309