1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 #include <linux/interrupt.h>
13 #include <linux/pci.h>
14 #include <linux/etherdevice.h>
15 #include <linux/of.h>
16 #include <linux/if_vlan.h>
17 #include <linux/iommu.h>
18 #include <net/ip.h>
19 
20 #include "otx2_reg.h"
21 #include "otx2_common.h"
22 #include "otx2_txrx.h"
23 #include "otx2_struct.h"
24 
25 #define DRV_NAME	"octeontx2-nicpf"
26 #define DRV_STRING	"Marvell OcteonTX2 NIC Physical Function Driver"
27 #define DRV_VERSION	"1.0"
28 
29 /* Supported devices */
30 static const struct pci_device_id otx2_pf_id_table[] = {
31 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
32 	{ 0, }  /* end of table */
33 };
34 
35 MODULE_AUTHOR("Marvell International Ltd.");
36 MODULE_DESCRIPTION(DRV_STRING);
37 MODULE_LICENSE("GPL v2");
38 MODULE_VERSION(DRV_VERSION);
39 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
40 
41 enum {
42 	TYPE_PFAF,
43 	TYPE_PFVF,
44 };
45 
46 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
47 {
48 	bool if_up = netif_running(netdev);
49 	int err = 0;
50 
51 	if (if_up)
52 		otx2_stop(netdev);
53 
54 	netdev_info(netdev, "Changing MTU from %d to %d\n",
55 		    netdev->mtu, new_mtu);
56 	netdev->mtu = new_mtu;
57 
58 	if (if_up)
59 		err = otx2_open(netdev);
60 
61 	return err;
62 }
63 
64 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
65 			    int first, int mdevs, u64 intr, int type)
66 {
67 	struct otx2_mbox_dev *mdev;
68 	struct otx2_mbox *mbox;
69 	struct mbox_hdr *hdr;
70 	int i;
71 
72 	for (i = first; i < mdevs; i++) {
73 		/* start from 0 */
74 		if (!(intr & BIT_ULL(i - first)))
75 			continue;
76 
77 		mbox = &mw->mbox;
78 		mdev = &mbox->dev[i];
79 		if (type == TYPE_PFAF)
80 			otx2_sync_mbox_bbuf(mbox, i);
81 		hdr = mdev->mbase + mbox->rx_start;
82 		/* The hdr->num_msgs is set to zero immediately in the interrupt
83 		 * handler to  ensure that it holds a correct value next time
84 		 * when the interrupt handler is called.
85 		 * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
86 		 * pf>mbox.up_num_msgs holds the data for use in
87 		 * pfaf_mbox_up_handler.
88 		 */
89 		if (hdr->num_msgs) {
90 			mw[i].num_msgs = hdr->num_msgs;
91 			hdr->num_msgs = 0;
92 			if (type == TYPE_PFAF)
93 				memset(mbox->hwbase + mbox->rx_start, 0,
94 				       ALIGN(sizeof(struct mbox_hdr),
95 					     sizeof(u64)));
96 
97 			queue_work(mbox_wq, &mw[i].mbox_wrk);
98 		}
99 
100 		mbox = &mw->mbox_up;
101 		mdev = &mbox->dev[i];
102 		if (type == TYPE_PFAF)
103 			otx2_sync_mbox_bbuf(mbox, i);
104 		hdr = mdev->mbase + mbox->rx_start;
105 		if (hdr->num_msgs) {
106 			mw[i].up_num_msgs = hdr->num_msgs;
107 			hdr->num_msgs = 0;
108 			if (type == TYPE_PFAF)
109 				memset(mbox->hwbase + mbox->rx_start, 0,
110 				       ALIGN(sizeof(struct mbox_hdr),
111 					     sizeof(u64)));
112 
113 			queue_work(mbox_wq, &mw[i].mbox_up_wrk);
114 		}
115 	}
116 }
117 
118 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
119 				       struct mbox_msghdr *msg)
120 {
121 	if (msg->id >= MBOX_MSG_MAX) {
122 		dev_err(pf->dev,
123 			"Mbox msg with unknown ID 0x%x\n", msg->id);
124 		return;
125 	}
126 
127 	if (msg->sig != OTX2_MBOX_RSP_SIG) {
128 		dev_err(pf->dev,
129 			"Mbox msg with wrong signature %x, ID 0x%x\n",
130 			 msg->sig, msg->id);
131 		return;
132 	}
133 
134 	switch (msg->id) {
135 	case MBOX_MSG_READY:
136 		pf->pcifunc = msg->pcifunc;
137 		break;
138 	case MBOX_MSG_MSIX_OFFSET:
139 		mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
140 		break;
141 	case MBOX_MSG_NPA_LF_ALLOC:
142 		mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
143 		break;
144 	case MBOX_MSG_NIX_LF_ALLOC:
145 		mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
146 		break;
147 	case MBOX_MSG_NIX_TXSCH_ALLOC:
148 		mbox_handler_nix_txsch_alloc(pf,
149 					     (struct nix_txsch_alloc_rsp *)msg);
150 		break;
151 	case MBOX_MSG_NIX_BP_ENABLE:
152 		mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
153 		break;
154 	case MBOX_MSG_CGX_STATS:
155 		mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
156 		break;
157 	default:
158 		if (msg->rc)
159 			dev_err(pf->dev,
160 				"Mbox msg response has err %d, ID 0x%x\n",
161 				msg->rc, msg->id);
162 		break;
163 	}
164 }
165 
166 static void otx2_pfaf_mbox_handler(struct work_struct *work)
167 {
168 	struct otx2_mbox_dev *mdev;
169 	struct mbox_hdr *rsp_hdr;
170 	struct mbox_msghdr *msg;
171 	struct otx2_mbox *mbox;
172 	struct mbox *af_mbox;
173 	struct otx2_nic *pf;
174 	int offset, id;
175 
176 	af_mbox = container_of(work, struct mbox, mbox_wrk);
177 	mbox = &af_mbox->mbox;
178 	mdev = &mbox->dev[0];
179 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
180 
181 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
182 	pf = af_mbox->pfvf;
183 
184 	for (id = 0; id < af_mbox->num_msgs; id++) {
185 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
186 		otx2_process_pfaf_mbox_msg(pf, msg);
187 		offset = mbox->rx_start + msg->next_msgoff;
188 		mdev->msgs_acked++;
189 	}
190 
191 	otx2_mbox_reset(mbox, 0);
192 }
193 
194 static void otx2_handle_link_event(struct otx2_nic *pf)
195 {
196 	struct cgx_link_user_info *linfo = &pf->linfo;
197 	struct net_device *netdev = pf->netdev;
198 
199 	pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
200 		linfo->link_up ? "UP" : "DOWN", linfo->speed,
201 		linfo->full_duplex ? "Full" : "Half");
202 	if (linfo->link_up) {
203 		netif_carrier_on(netdev);
204 		netif_tx_start_all_queues(netdev);
205 	} else {
206 		netif_tx_stop_all_queues(netdev);
207 		netif_carrier_off(netdev);
208 	}
209 }
210 
211 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
212 					struct cgx_link_info_msg *msg,
213 					struct msg_rsp *rsp)
214 {
215 	/* Copy the link info sent by AF */
216 	pf->linfo = msg->link_info;
217 
218 	/* interface has not been fully configured yet */
219 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
220 		return 0;
221 
222 	otx2_handle_link_event(pf);
223 	return 0;
224 }
225 
226 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
227 				    struct mbox_msghdr *req)
228 {
229 	/* Check if valid, if not reply with a invalid msg */
230 	if (req->sig != OTX2_MBOX_REQ_SIG) {
231 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
232 		return -ENODEV;
233 	}
234 
235 	switch (req->id) {
236 #define M(_name, _id, _fn_name, _req_type, _rsp_type)			\
237 	case _id: {							\
238 		struct _rsp_type *rsp;					\
239 		int err;						\
240 									\
241 		rsp = (struct _rsp_type *)otx2_mbox_alloc_msg(		\
242 			&pf->mbox.mbox_up, 0,				\
243 			sizeof(struct _rsp_type));			\
244 		if (!rsp)						\
245 			return -ENOMEM;					\
246 									\
247 		rsp->hdr.id = _id;					\
248 		rsp->hdr.sig = OTX2_MBOX_RSP_SIG;			\
249 		rsp->hdr.pcifunc = 0;					\
250 		rsp->hdr.rc = 0;					\
251 									\
252 		err = otx2_mbox_up_handler_ ## _fn_name(		\
253 			pf, (struct _req_type *)req, rsp);		\
254 		return err;						\
255 	}
256 MBOX_UP_CGX_MESSAGES
257 #undef M
258 		break;
259 	default:
260 		otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
261 		return -ENODEV;
262 	}
263 	return 0;
264 }
265 
266 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
267 {
268 	struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
269 	struct otx2_mbox *mbox = &af_mbox->mbox_up;
270 	struct otx2_mbox_dev *mdev = &mbox->dev[0];
271 	struct otx2_nic *pf = af_mbox->pfvf;
272 	int offset, id, devid = 0;
273 	struct mbox_hdr *rsp_hdr;
274 	struct mbox_msghdr *msg;
275 
276 	rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
277 
278 	offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
279 
280 	for (id = 0; id < af_mbox->up_num_msgs; id++) {
281 		msg = (struct mbox_msghdr *)(mdev->mbase + offset);
282 
283 		devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
284 		/* Skip processing VF's messages */
285 		if (!devid)
286 			otx2_process_mbox_msg_up(pf, msg);
287 		offset = mbox->rx_start + msg->next_msgoff;
288 	}
289 
290 	otx2_mbox_msg_send(mbox, 0);
291 }
292 
293 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
294 {
295 	struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
296 	struct mbox *mbox;
297 
298 	/* Clear the IRQ */
299 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
300 
301 	mbox = &pf->mbox;
302 	otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
303 
304 	return IRQ_HANDLED;
305 }
306 
307 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
308 {
309 	int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
310 
311 	/* Disable AF => PF mailbox IRQ */
312 	otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
313 	free_irq(vector, pf);
314 }
315 
316 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
317 {
318 	struct otx2_hw *hw = &pf->hw;
319 	struct msg_req *req;
320 	char *irq_name;
321 	int err;
322 
323 	/* Register mailbox interrupt handler */
324 	irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
325 	snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
326 	err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
327 			  otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
328 	if (err) {
329 		dev_err(pf->dev,
330 			"RVUPF: IRQ registration failed for PFAF mbox irq\n");
331 		return err;
332 	}
333 
334 	/* Enable mailbox interrupt for msgs coming from AF.
335 	 * First clear to avoid spurious interrupts, if any.
336 	 */
337 	otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
338 	otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
339 
340 	if (!probe_af)
341 		return 0;
342 
343 	/* Check mailbox communication with AF */
344 	req = otx2_mbox_alloc_msg_ready(&pf->mbox);
345 	if (!req) {
346 		otx2_disable_mbox_intr(pf);
347 		return -ENOMEM;
348 	}
349 	err = otx2_sync_mbox_msg(&pf->mbox);
350 	if (err) {
351 		dev_warn(pf->dev,
352 			 "AF not responding to mailbox, deferring probe\n");
353 		otx2_disable_mbox_intr(pf);
354 		return -EPROBE_DEFER;
355 	}
356 
357 	return 0;
358 }
359 
360 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
361 {
362 	struct mbox *mbox = &pf->mbox;
363 
364 	if (pf->mbox_wq) {
365 		flush_workqueue(pf->mbox_wq);
366 		destroy_workqueue(pf->mbox_wq);
367 		pf->mbox_wq = NULL;
368 	}
369 
370 	if (mbox->mbox.hwbase)
371 		iounmap((void __iomem *)mbox->mbox.hwbase);
372 
373 	otx2_mbox_destroy(&mbox->mbox);
374 	otx2_mbox_destroy(&mbox->mbox_up);
375 }
376 
377 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
378 {
379 	struct mbox *mbox = &pf->mbox;
380 	void __iomem *hwbase;
381 	int err;
382 
383 	mbox->pfvf = pf;
384 	pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
385 				      WQ_UNBOUND | WQ_HIGHPRI |
386 				      WQ_MEM_RECLAIM, 1);
387 	if (!pf->mbox_wq)
388 		return -ENOMEM;
389 
390 	/* Mailbox is a reserved memory (in RAM) region shared between
391 	 * admin function (i.e AF) and this PF, shouldn't be mapped as
392 	 * device memory to allow unaligned accesses.
393 	 */
394 	hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
395 			    pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
396 	if (!hwbase) {
397 		dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
398 		err = -ENOMEM;
399 		goto exit;
400 	}
401 
402 	err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
403 			     MBOX_DIR_PFAF, 1);
404 	if (err)
405 		goto exit;
406 
407 	err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
408 			     MBOX_DIR_PFAF_UP, 1);
409 	if (err)
410 		goto exit;
411 
412 	err = otx2_mbox_bbuf_init(mbox, pf->pdev);
413 	if (err)
414 		goto exit;
415 
416 	INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
417 	INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
418 	otx2_mbox_lock_init(&pf->mbox);
419 
420 	return 0;
421 exit:
422 	otx2_pfaf_mbox_destroy(pf);
423 	return err;
424 }
425 
426 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
427 {
428 	struct msg_req *msg;
429 	int err;
430 
431 	otx2_mbox_lock(&pf->mbox);
432 	if (enable)
433 		msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
434 	else
435 		msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
436 
437 	if (!msg) {
438 		otx2_mbox_unlock(&pf->mbox);
439 		return -ENOMEM;
440 	}
441 
442 	err = otx2_sync_mbox_msg(&pf->mbox);
443 	otx2_mbox_unlock(&pf->mbox);
444 	return err;
445 }
446 
447 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
448 {
449 	struct msg_req *msg;
450 	int err;
451 
452 	otx2_mbox_lock(&pf->mbox);
453 	if (enable)
454 		msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
455 	else
456 		msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
457 
458 	if (!msg) {
459 		otx2_mbox_unlock(&pf->mbox);
460 		return -ENOMEM;
461 	}
462 
463 	err = otx2_sync_mbox_msg(&pf->mbox);
464 	otx2_mbox_unlock(&pf->mbox);
465 	return err;
466 }
467 
468 int otx2_set_real_num_queues(struct net_device *netdev,
469 			     int tx_queues, int rx_queues)
470 {
471 	int err;
472 
473 	err = netif_set_real_num_tx_queues(netdev, tx_queues);
474 	if (err) {
475 		netdev_err(netdev,
476 			   "Failed to set no of Tx queues: %d\n", tx_queues);
477 		return err;
478 	}
479 
480 	err = netif_set_real_num_rx_queues(netdev, rx_queues);
481 	if (err)
482 		netdev_err(netdev,
483 			   "Failed to set no of Rx queues: %d\n", rx_queues);
484 	return err;
485 }
486 
487 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
488 {
489 	struct otx2_nic *pf = data;
490 	u64 val, *ptr;
491 	u64 qidx = 0;
492 
493 	/* CQ */
494 	for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
495 		ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
496 		val = otx2_atomic64_add((qidx << 44), ptr);
497 
498 		otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
499 			     (val & NIX_CQERRINT_BITS));
500 		if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
501 			continue;
502 
503 		if (val & BIT_ULL(42)) {
504 			netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
505 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
506 		} else {
507 			if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
508 				netdev_err(pf->netdev, "CQ%lld: Doorbell error",
509 					   qidx);
510 			if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
511 				netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
512 					   qidx);
513 		}
514 
515 		schedule_work(&pf->reset_task);
516 	}
517 
518 	/* SQ */
519 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
520 		ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
521 		val = otx2_atomic64_add((qidx << 44), ptr);
522 		otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
523 			     (val & NIX_SQINT_BITS));
524 
525 		if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
526 			continue;
527 
528 		if (val & BIT_ULL(42)) {
529 			netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
530 				   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
531 		} else {
532 			if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
533 				netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
534 					   qidx,
535 					   otx2_read64(pf,
536 						       NIX_LF_SQ_OP_ERR_DBG));
537 				otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
538 					     BIT_ULL(44));
539 			}
540 			if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
541 				netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
542 					   qidx,
543 					   otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
544 				otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
545 					     BIT_ULL(44));
546 			}
547 			if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
548 				netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
549 					   qidx,
550 					   otx2_read64(pf,
551 						       NIX_LF_SEND_ERR_DBG));
552 				otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
553 					     BIT_ULL(44));
554 			}
555 			if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
556 				netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
557 					   qidx);
558 		}
559 
560 		schedule_work(&pf->reset_task);
561 	}
562 
563 	return IRQ_HANDLED;
564 }
565 
566 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
567 {
568 	struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
569 	struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
570 	int qidx = cq_poll->cint_idx;
571 
572 	/* Disable interrupts.
573 	 *
574 	 * Completion interrupts behave in a level-triggered interrupt
575 	 * fashion, and hence have to be cleared only after it is serviced.
576 	 */
577 	otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
578 
579 	/* Schedule NAPI */
580 	napi_schedule_irqoff(&cq_poll->napi);
581 
582 	return IRQ_HANDLED;
583 }
584 
585 static void otx2_disable_napi(struct otx2_nic *pf)
586 {
587 	struct otx2_qset *qset = &pf->qset;
588 	struct otx2_cq_poll *cq_poll;
589 	int qidx;
590 
591 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
592 		cq_poll = &qset->napi[qidx];
593 		napi_disable(&cq_poll->napi);
594 		netif_napi_del(&cq_poll->napi);
595 	}
596 }
597 
598 static void otx2_free_cq_res(struct otx2_nic *pf)
599 {
600 	struct otx2_qset *qset = &pf->qset;
601 	struct otx2_cq_queue *cq;
602 	int qidx;
603 
604 	/* Disable CQs */
605 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
606 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
607 		cq = &qset->cq[qidx];
608 		qmem_free(pf->dev, cq->cqe);
609 	}
610 }
611 
612 static void otx2_free_sq_res(struct otx2_nic *pf)
613 {
614 	struct otx2_qset *qset = &pf->qset;
615 	struct otx2_snd_queue *sq;
616 	int qidx;
617 
618 	/* Disable SQs */
619 	otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
620 	/* Free SQB pointers */
621 	otx2_sq_free_sqbs(pf);
622 	for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
623 		sq = &qset->sq[qidx];
624 		qmem_free(pf->dev, sq->sqe);
625 		qmem_free(pf->dev, sq->tso_hdrs);
626 		kfree(sq->sg);
627 		kfree(sq->sqb_ptrs);
628 	}
629 }
630 
631 static int otx2_init_hw_resources(struct otx2_nic *pf)
632 {
633 	struct mbox *mbox = &pf->mbox;
634 	struct otx2_hw *hw = &pf->hw;
635 	struct msg_req *req;
636 	int err = 0, lvl;
637 
638 	/* Set required NPA LF's pool counts
639 	 * Auras and Pools are used in a 1:1 mapping,
640 	 * so, aura count = pool count.
641 	 */
642 	hw->rqpool_cnt = hw->rx_queues;
643 	hw->sqpool_cnt = hw->tx_queues;
644 	hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
645 
646 	/* Get the size of receive buffers to allocate */
647 	pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
648 
649 	otx2_mbox_lock(mbox);
650 	/* NPA init */
651 	err = otx2_config_npa(pf);
652 	if (err)
653 		goto exit;
654 
655 	/* NIX init */
656 	err = otx2_config_nix(pf);
657 	if (err)
658 		goto err_free_npa_lf;
659 
660 	/* Enable backpressure */
661 	otx2_nix_config_bp(pf, true);
662 
663 	/* Init Auras and pools used by NIX RQ, for free buffer ptrs */
664 	err = otx2_rq_aura_pool_init(pf);
665 	if (err) {
666 		otx2_mbox_unlock(mbox);
667 		goto err_free_nix_lf;
668 	}
669 	/* Init Auras and pools used by NIX SQ, for queueing SQEs */
670 	err = otx2_sq_aura_pool_init(pf);
671 	if (err) {
672 		otx2_mbox_unlock(mbox);
673 		goto err_free_rq_ptrs;
674 	}
675 
676 	err = otx2_txsch_alloc(pf);
677 	if (err) {
678 		otx2_mbox_unlock(mbox);
679 		goto err_free_sq_ptrs;
680 	}
681 
682 	err = otx2_config_nix_queues(pf);
683 	if (err) {
684 		otx2_mbox_unlock(mbox);
685 		goto err_free_txsch;
686 	}
687 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
688 		err = otx2_txschq_config(pf, lvl);
689 		if (err) {
690 			otx2_mbox_unlock(mbox);
691 			goto err_free_nix_queues;
692 		}
693 	}
694 	otx2_mbox_unlock(mbox);
695 	return err;
696 
697 err_free_nix_queues:
698 	otx2_free_sq_res(pf);
699 	otx2_free_cq_res(pf);
700 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
701 err_free_txsch:
702 	if (otx2_txschq_stop(pf))
703 		dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
704 err_free_sq_ptrs:
705 	otx2_sq_free_sqbs(pf);
706 err_free_rq_ptrs:
707 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
708 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
709 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
710 	otx2_aura_pool_free(pf);
711 err_free_nix_lf:
712 	otx2_mbox_lock(mbox);
713 	req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
714 	if (req) {
715 		if (otx2_sync_mbox_msg(mbox))
716 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
717 	}
718 err_free_npa_lf:
719 	/* Reset NPA LF */
720 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
721 	if (req) {
722 		if (otx2_sync_mbox_msg(mbox))
723 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
724 	}
725 exit:
726 	otx2_mbox_unlock(mbox);
727 	return err;
728 }
729 
730 static void otx2_free_hw_resources(struct otx2_nic *pf)
731 {
732 	struct otx2_qset *qset = &pf->qset;
733 	struct mbox *mbox = &pf->mbox;
734 	struct otx2_cq_queue *cq;
735 	struct msg_req *req;
736 	int qidx, err;
737 
738 	/* Ensure all SQE are processed */
739 	otx2_sqb_flush(pf);
740 
741 	/* Stop transmission */
742 	err = otx2_txschq_stop(pf);
743 	if (err)
744 		dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
745 
746 	otx2_mbox_lock(mbox);
747 	/* Disable backpressure */
748 	if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
749 		otx2_nix_config_bp(pf, false);
750 	otx2_mbox_unlock(mbox);
751 
752 	/* Disable RQs */
753 	otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
754 
755 	/*Dequeue all CQEs */
756 	for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
757 		cq = &qset->cq[qidx];
758 		if (cq->cq_type == CQ_RX)
759 			otx2_cleanup_rx_cqes(pf, cq);
760 		else
761 			otx2_cleanup_tx_cqes(pf, cq);
762 	}
763 
764 	otx2_free_sq_res(pf);
765 
766 	/* Free RQ buffer pointers*/
767 	otx2_free_aura_ptr(pf, AURA_NIX_RQ);
768 
769 	otx2_free_cq_res(pf);
770 
771 	otx2_mbox_lock(mbox);
772 	/* Reset NIX LF */
773 	req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
774 	if (req) {
775 		if (otx2_sync_mbox_msg(mbox))
776 			dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
777 	}
778 	otx2_mbox_unlock(mbox);
779 
780 	/* Disable NPA Pool and Aura hw context */
781 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
782 	otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
783 	otx2_aura_pool_free(pf);
784 
785 	otx2_mbox_lock(mbox);
786 	/* Reset NPA LF */
787 	req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
788 	if (req) {
789 		if (otx2_sync_mbox_msg(mbox))
790 			dev_err(pf->dev, "%s failed to free npalf\n", __func__);
791 	}
792 	otx2_mbox_unlock(mbox);
793 }
794 
795 int otx2_open(struct net_device *netdev)
796 {
797 	struct otx2_nic *pf = netdev_priv(netdev);
798 	struct otx2_cq_poll *cq_poll = NULL;
799 	struct otx2_qset *qset = &pf->qset;
800 	int err = 0, qidx, vec;
801 	char *irq_name;
802 
803 	netif_carrier_off(netdev);
804 
805 	pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
806 	/* RQ and SQs are mapped to different CQs,
807 	 * so find out max CQ IRQs (i.e CINTs) needed.
808 	 */
809 	pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
810 	qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
811 	if (!qset->napi)
812 		return -ENOMEM;
813 
814 	/* CQ size of RQ */
815 	qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
816 	/* CQ size of SQ */
817 	qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
818 
819 	err = -ENOMEM;
820 	qset->cq = kcalloc(pf->qset.cq_cnt,
821 			   sizeof(struct otx2_cq_queue), GFP_KERNEL);
822 	if (!qset->cq)
823 		goto err_free_mem;
824 
825 	qset->sq = kcalloc(pf->hw.tx_queues,
826 			   sizeof(struct otx2_snd_queue), GFP_KERNEL);
827 	if (!qset->sq)
828 		goto err_free_mem;
829 
830 	qset->rq = kcalloc(pf->hw.rx_queues,
831 			   sizeof(struct otx2_rcv_queue), GFP_KERNEL);
832 	if (!qset->rq)
833 		goto err_free_mem;
834 
835 	err = otx2_init_hw_resources(pf);
836 	if (err)
837 		goto err_free_mem;
838 
839 	/* Register NAPI handler */
840 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
841 		cq_poll = &qset->napi[qidx];
842 		cq_poll->cint_idx = qidx;
843 		/* RQ0 & SQ0 are mapped to CINT0 and so on..
844 		 * 'cq_ids[0]' points to RQ's CQ and
845 		 * 'cq_ids[1]' points to SQ's CQ and
846 		 */
847 		cq_poll->cq_ids[CQ_RX] =
848 			(qidx <  pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
849 		cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
850 				      qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
851 		cq_poll->dev = (void *)pf;
852 		netif_napi_add(netdev, &cq_poll->napi,
853 			       otx2_napi_handler, NAPI_POLL_WEIGHT);
854 		napi_enable(&cq_poll->napi);
855 	}
856 
857 	/* Set maximum frame size allowed in HW */
858 	err = otx2_hw_set_mtu(pf, netdev->mtu);
859 	if (err)
860 		goto err_disable_napi;
861 
862 	/* Initialize RSS */
863 	err = otx2_rss_init(pf);
864 	if (err)
865 		goto err_disable_napi;
866 
867 	/* Register Queue IRQ handlers */
868 	vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
869 	irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
870 
871 	snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
872 
873 	err = request_irq(pci_irq_vector(pf->pdev, vec),
874 			  otx2_q_intr_handler, 0, irq_name, pf);
875 	if (err) {
876 		dev_err(pf->dev,
877 			"RVUPF%d: IRQ registration failed for QERR\n",
878 			rvu_get_pf(pf->pcifunc));
879 		goto err_disable_napi;
880 	}
881 
882 	/* Enable QINT IRQ */
883 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
884 
885 	/* Register CQ IRQ handlers */
886 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
887 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
888 		irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
889 
890 		snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
891 			 qidx);
892 
893 		err = request_irq(pci_irq_vector(pf->pdev, vec),
894 				  otx2_cq_intr_handler, 0, irq_name,
895 				  &qset->napi[qidx]);
896 		if (err) {
897 			dev_err(pf->dev,
898 				"RVUPF%d: IRQ registration failed for CQ%d\n",
899 				rvu_get_pf(pf->pcifunc), qidx);
900 			goto err_free_cints;
901 		}
902 		vec++;
903 
904 		otx2_config_irq_coalescing(pf, qidx);
905 
906 		/* Enable CQ IRQ */
907 		otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
908 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
909 	}
910 
911 	otx2_set_cints_affinity(pf);
912 
913 	pf->flags &= ~OTX2_FLAG_INTF_DOWN;
914 	/* 'intf_down' may be checked on any cpu */
915 	smp_wmb();
916 
917 	/* we have already received link status notification */
918 	if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
919 		otx2_handle_link_event(pf);
920 
921 	err = otx2_rxtx_enable(pf, true);
922 	if (err)
923 		goto err_free_cints;
924 
925 	return 0;
926 
927 err_free_cints:
928 	otx2_free_cints(pf, qidx);
929 	vec = pci_irq_vector(pf->pdev,
930 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
931 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
932 	synchronize_irq(vec);
933 	free_irq(vec, pf);
934 err_disable_napi:
935 	otx2_disable_napi(pf);
936 	otx2_free_hw_resources(pf);
937 err_free_mem:
938 	kfree(qset->sq);
939 	kfree(qset->cq);
940 	kfree(qset->rq);
941 	kfree(qset->napi);
942 	return err;
943 }
944 
945 int otx2_stop(struct net_device *netdev)
946 {
947 	struct otx2_nic *pf = netdev_priv(netdev);
948 	struct otx2_cq_poll *cq_poll = NULL;
949 	struct otx2_qset *qset = &pf->qset;
950 	int qidx, vec, wrk;
951 
952 	netif_carrier_off(netdev);
953 	netif_tx_stop_all_queues(netdev);
954 
955 	pf->flags |= OTX2_FLAG_INTF_DOWN;
956 	/* 'intf_down' may be checked on any cpu */
957 	smp_wmb();
958 
959 	/* First stop packet Rx/Tx */
960 	otx2_rxtx_enable(pf, false);
961 
962 	/* Cleanup Queue IRQ */
963 	vec = pci_irq_vector(pf->pdev,
964 			     pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
965 	otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
966 	synchronize_irq(vec);
967 	free_irq(vec, pf);
968 
969 	/* Cleanup CQ NAPI and IRQ */
970 	vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
971 	for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
972 		/* Disable interrupt */
973 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
974 
975 		synchronize_irq(pci_irq_vector(pf->pdev, vec));
976 
977 		cq_poll = &qset->napi[qidx];
978 		napi_synchronize(&cq_poll->napi);
979 		vec++;
980 	}
981 
982 	netif_tx_disable(netdev);
983 
984 	otx2_free_hw_resources(pf);
985 	otx2_free_cints(pf, pf->hw.cint_cnt);
986 	otx2_disable_napi(pf);
987 
988 	for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
989 		netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
990 
991 	for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
992 		cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
993 	devm_kfree(pf->dev, pf->refill_wrk);
994 
995 	kfree(qset->sq);
996 	kfree(qset->cq);
997 	kfree(qset->rq);
998 	kfree(qset->napi);
999 	/* Do not clear RQ/SQ ringsize settings */
1000 	memset((void *)qset + offsetof(struct otx2_qset, sqe_cnt), 0,
1001 	       sizeof(*qset) - offsetof(struct otx2_qset, sqe_cnt));
1002 	return 0;
1003 }
1004 
1005 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1006 {
1007 	struct otx2_nic *pf = netdev_priv(netdev);
1008 	int qidx = skb_get_queue_mapping(skb);
1009 	struct otx2_snd_queue *sq;
1010 	struct netdev_queue *txq;
1011 
1012 	/* Check for minimum and maximum packet length */
1013 	if (skb->len <= ETH_HLEN ||
1014 	    (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
1015 		dev_kfree_skb(skb);
1016 		return NETDEV_TX_OK;
1017 	}
1018 
1019 	sq = &pf->qset.sq[qidx];
1020 	txq = netdev_get_tx_queue(netdev, qidx);
1021 
1022 	if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1023 		netif_tx_stop_queue(txq);
1024 
1025 		/* Check again, incase SQBs got freed up */
1026 		smp_mb();
1027 		if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1028 							> sq->sqe_thresh)
1029 			netif_tx_wake_queue(txq);
1030 
1031 		return NETDEV_TX_BUSY;
1032 	}
1033 
1034 	return NETDEV_TX_OK;
1035 }
1036 
1037 static void otx2_set_rx_mode(struct net_device *netdev)
1038 {
1039 	struct otx2_nic *pf = netdev_priv(netdev);
1040 	struct nix_rx_mode *req;
1041 
1042 	if (!(netdev->flags & IFF_UP))
1043 		return;
1044 
1045 	otx2_mbox_lock(&pf->mbox);
1046 	req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1047 	if (!req) {
1048 		otx2_mbox_unlock(&pf->mbox);
1049 		return;
1050 	}
1051 
1052 	req->mode = NIX_RX_MODE_UCAST;
1053 
1054 	/* We don't support MAC address filtering yet */
1055 	if (netdev->flags & IFF_PROMISC)
1056 		req->mode |= NIX_RX_MODE_PROMISC;
1057 	else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1058 		req->mode |= NIX_RX_MODE_ALLMULTI;
1059 
1060 	otx2_sync_mbox_msg(&pf->mbox);
1061 	otx2_mbox_unlock(&pf->mbox);
1062 }
1063 
1064 static int otx2_set_features(struct net_device *netdev,
1065 			     netdev_features_t features)
1066 {
1067 	netdev_features_t changed = features ^ netdev->features;
1068 	struct otx2_nic *pf = netdev_priv(netdev);
1069 
1070 	if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1071 		return otx2_cgx_config_loopback(pf,
1072 						features & NETIF_F_LOOPBACK);
1073 	return 0;
1074 }
1075 
1076 static void otx2_reset_task(struct work_struct *work)
1077 {
1078 	struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1079 
1080 	if (!netif_running(pf->netdev))
1081 		return;
1082 
1083 	otx2_stop(pf->netdev);
1084 	pf->reset_count++;
1085 	otx2_open(pf->netdev);
1086 	netif_trans_update(pf->netdev);
1087 }
1088 
1089 static const struct net_device_ops otx2_netdev_ops = {
1090 	.ndo_open		= otx2_open,
1091 	.ndo_stop		= otx2_stop,
1092 	.ndo_start_xmit		= otx2_xmit,
1093 	.ndo_set_mac_address    = otx2_set_mac_address,
1094 	.ndo_change_mtu		= otx2_change_mtu,
1095 	.ndo_set_rx_mode	= otx2_set_rx_mode,
1096 	.ndo_set_features	= otx2_set_features,
1097 	.ndo_tx_timeout		= otx2_tx_timeout,
1098 	.ndo_get_stats64	= otx2_get_stats64,
1099 };
1100 
1101 static int otx2_check_pf_usable(struct otx2_nic *nic)
1102 {
1103 	u64 rev;
1104 
1105 	rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
1106 	rev = (rev >> 12) & 0xFF;
1107 	/* Check if AF has setup revision for RVUM block,
1108 	 * otherwise this driver probe should be deferred
1109 	 * until AF driver comes up.
1110 	 */
1111 	if (!rev) {
1112 		dev_warn(nic->dev,
1113 			 "AF is not initialized, deferring probe\n");
1114 		return -EPROBE_DEFER;
1115 	}
1116 	return 0;
1117 }
1118 
1119 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
1120 {
1121 	struct otx2_hw *hw = &pf->hw;
1122 	int num_vec, err;
1123 
1124 	/* NPA interrupts are inot registered, so alloc only
1125 	 * upto NIX vector offset.
1126 	 */
1127 	num_vec = hw->nix_msixoff;
1128 	num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
1129 
1130 	otx2_disable_mbox_intr(pf);
1131 	pci_free_irq_vectors(hw->pdev);
1132 	pci_free_irq_vectors(hw->pdev);
1133 	err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
1134 	if (err < 0) {
1135 		dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
1136 			__func__, num_vec);
1137 		return err;
1138 	}
1139 
1140 	return otx2_register_mbox_intr(pf, false);
1141 }
1142 
1143 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1144 {
1145 	struct device *dev = &pdev->dev;
1146 	struct net_device *netdev;
1147 	struct otx2_nic *pf;
1148 	struct otx2_hw *hw;
1149 	int err, qcount;
1150 	int num_vec;
1151 
1152 	err = pcim_enable_device(pdev);
1153 	if (err) {
1154 		dev_err(dev, "Failed to enable PCI device\n");
1155 		return err;
1156 	}
1157 
1158 	err = pci_request_regions(pdev, DRV_NAME);
1159 	if (err) {
1160 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
1161 		return err;
1162 	}
1163 
1164 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
1165 	if (err) {
1166 		dev_err(dev, "DMA mask config failed, abort\n");
1167 		goto err_release_regions;
1168 	}
1169 
1170 	pci_set_master(pdev);
1171 
1172 	/* Set number of queues */
1173 	qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
1174 
1175 	netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
1176 	if (!netdev) {
1177 		err = -ENOMEM;
1178 		goto err_release_regions;
1179 	}
1180 
1181 	pci_set_drvdata(pdev, netdev);
1182 	SET_NETDEV_DEV(netdev, &pdev->dev);
1183 	pf = netdev_priv(netdev);
1184 	pf->netdev = netdev;
1185 	pf->pdev = pdev;
1186 	pf->dev = dev;
1187 	pf->flags |= OTX2_FLAG_INTF_DOWN;
1188 
1189 	hw = &pf->hw;
1190 	hw->pdev = pdev;
1191 	hw->rx_queues = qcount;
1192 	hw->tx_queues = qcount;
1193 	hw->max_queues = qcount;
1194 
1195 	num_vec = pci_msix_vec_count(pdev);
1196 	hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
1197 					  GFP_KERNEL);
1198 	if (!hw->irq_name)
1199 		goto err_free_netdev;
1200 
1201 	hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
1202 					 sizeof(cpumask_var_t), GFP_KERNEL);
1203 	if (!hw->affinity_mask)
1204 		goto err_free_netdev;
1205 
1206 	/* Map CSRs */
1207 	pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1208 	if (!pf->reg_base) {
1209 		dev_err(dev, "Unable to map physical function CSRs, aborting\n");
1210 		err = -ENOMEM;
1211 		goto err_free_netdev;
1212 	}
1213 
1214 	err = otx2_check_pf_usable(pf);
1215 	if (err)
1216 		goto err_free_netdev;
1217 
1218 	err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
1219 				    RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
1220 	if (err < 0) {
1221 		dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
1222 			__func__, num_vec);
1223 		goto err_free_netdev;
1224 	}
1225 
1226 	/* Init PF <=> AF mailbox stuff */
1227 	err = otx2_pfaf_mbox_init(pf);
1228 	if (err)
1229 		goto err_free_irq_vectors;
1230 
1231 	/* Register mailbox interrupt */
1232 	err = otx2_register_mbox_intr(pf, true);
1233 	if (err)
1234 		goto err_mbox_destroy;
1235 
1236 	/* Request AF to attach NPA and NIX LFs to this PF.
1237 	 * NIX and NPA LFs are needed for this PF to function as a NIC.
1238 	 */
1239 	err = otx2_attach_npa_nix(pf);
1240 	if (err)
1241 		goto err_disable_mbox_intr;
1242 
1243 	err = otx2_realloc_msix_vectors(pf);
1244 	if (err)
1245 		goto err_detach_rsrc;
1246 
1247 	err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
1248 	if (err)
1249 		goto err_detach_rsrc;
1250 
1251 	otx2_setup_dev_hw_settings(pf);
1252 
1253 	/* Assign default mac address */
1254 	otx2_get_mac_from_af(netdev);
1255 
1256 	/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
1257 	 * HW allocates buffer pointer from stack and uses it for DMA'ing
1258 	 * ingress packet. In some scenarios HW can free back allocated buffer
1259 	 * pointers to pool. This makes it impossible for SW to maintain a
1260 	 * parallel list where physical addresses of buffer pointers (IOVAs)
1261 	 * given to HW can be saved for later reference.
1262 	 *
1263 	 * So the only way to convert Rx packet's buffer address is to use
1264 	 * IOMMU's iova_to_phys() handler which translates the address by
1265 	 * walking through the translation tables.
1266 	 */
1267 	pf->iommu_domain = iommu_get_domain_for_dev(dev);
1268 
1269 	netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
1270 			       NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
1271 			       NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
1272 	netdev->features |= netdev->hw_features;
1273 
1274 	netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
1275 
1276 	netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
1277 	netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
1278 
1279 	netdev->netdev_ops = &otx2_netdev_ops;
1280 
1281 	/* MTU range: 64 - 9190 */
1282 	netdev->min_mtu = OTX2_MIN_MTU;
1283 	netdev->max_mtu = OTX2_MAX_MTU;
1284 
1285 	INIT_WORK(&pf->reset_task, otx2_reset_task);
1286 
1287 	err = register_netdev(netdev);
1288 	if (err) {
1289 		dev_err(dev, "Failed to register netdevice\n");
1290 		goto err_detach_rsrc;
1291 	}
1292 
1293 	otx2_set_ethtool_ops(netdev);
1294 
1295 	/* Enable link notifications */
1296 	otx2_cgx_config_linkevents(pf, true);
1297 
1298 	return 0;
1299 
1300 err_detach_rsrc:
1301 	otx2_detach_resources(&pf->mbox);
1302 err_disable_mbox_intr:
1303 	otx2_disable_mbox_intr(pf);
1304 err_mbox_destroy:
1305 	otx2_pfaf_mbox_destroy(pf);
1306 err_free_irq_vectors:
1307 	pci_free_irq_vectors(hw->pdev);
1308 err_free_netdev:
1309 	pci_set_drvdata(pdev, NULL);
1310 	free_netdev(netdev);
1311 err_release_regions:
1312 	pci_release_regions(pdev);
1313 	return err;
1314 }
1315 
1316 static void otx2_remove(struct pci_dev *pdev)
1317 {
1318 	struct net_device *netdev = pci_get_drvdata(pdev);
1319 	struct otx2_nic *pf;
1320 
1321 	if (!netdev)
1322 		return;
1323 
1324 	pf = netdev_priv(netdev);
1325 
1326 	/* Disable link notifications */
1327 	otx2_cgx_config_linkevents(pf, false);
1328 
1329 	unregister_netdev(netdev);
1330 	otx2_detach_resources(&pf->mbox);
1331 	otx2_disable_mbox_intr(pf);
1332 	otx2_pfaf_mbox_destroy(pf);
1333 	pci_free_irq_vectors(pf->pdev);
1334 	pci_set_drvdata(pdev, NULL);
1335 	free_netdev(netdev);
1336 
1337 	pci_release_regions(pdev);
1338 }
1339 
1340 static struct pci_driver otx2_pf_driver = {
1341 	.name = DRV_NAME,
1342 	.id_table = otx2_pf_id_table,
1343 	.probe = otx2_probe,
1344 	.shutdown = otx2_remove,
1345 	.remove = otx2_remove,
1346 };
1347 
1348 static int __init otx2_rvupf_init_module(void)
1349 {
1350 	pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
1351 
1352 	return pci_register_driver(&otx2_pf_driver);
1353 }
1354 
1355 static void __exit otx2_rvupf_cleanup_module(void)
1356 {
1357 	pci_unregister_driver(&otx2_pf_driver);
1358 }
1359 
1360 module_init(otx2_rvupf_init_module);
1361 module_exit(otx2_rvupf_cleanup_module);
1362