xref: /openbmc/linux/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
196de2506SJakub Kicinski // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
296de2506SJakub Kicinski /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
34c352362SJakub Kicinski 
44c352362SJakub Kicinski /*
54c352362SJakub Kicinski  * nfp_netvf_main.c
64c352362SJakub Kicinski  * Netronome virtual function network device driver: Main entry point
74c352362SJakub Kicinski  * Author: Jason McMullan <jason.mcmullan@netronome.com>
84c352362SJakub Kicinski  *         Rolf Neugebauer <rolf.neugebauer@netronome.com>
94c352362SJakub Kicinski  */
104c352362SJakub Kicinski 
114c352362SJakub Kicinski #include <linux/module.h>
124c352362SJakub Kicinski #include <linux/kernel.h>
134c352362SJakub Kicinski #include <linux/init.h>
144c352362SJakub Kicinski #include <linux/etherdevice.h>
154c352362SJakub Kicinski 
169423d24bSJakub Kicinski #include "nfpcore/nfp_dev.h"
174c352362SJakub Kicinski #include "nfp_net_ctrl.h"
184c352362SJakub Kicinski #include "nfp_net.h"
19fdace6c2SJakub Kicinski #include "nfp_main.h"
20fdace6c2SJakub Kicinski 
21fdace6c2SJakub Kicinski /**
22fdace6c2SJakub Kicinski  * struct nfp_net_vf - NFP VF-specific device structure
23fdace6c2SJakub Kicinski  * @nn:		NFP Net structure for this device
24fdace6c2SJakub Kicinski  * @irq_entries: Pre-allocated array of MSI-X entries
25fdace6c2SJakub Kicinski  * @q_bar:	Pointer to mapped QC memory (NULL if TX/RX mapped directly)
26fdace6c2SJakub Kicinski  * @ddir:	Per-device debugfs directory
27fdace6c2SJakub Kicinski  */
28fdace6c2SJakub Kicinski struct nfp_net_vf {
29fdace6c2SJakub Kicinski 	struct nfp_net *nn;
30fdace6c2SJakub Kicinski 
31fdace6c2SJakub Kicinski 	struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
32fdace6c2SJakub Kicinski 				      NFP_NET_MAX_TX_RINGS];
33fdace6c2SJakub Kicinski 	u8 __iomem *q_bar;
34fdace6c2SJakub Kicinski 
35fdace6c2SJakub Kicinski 	struct dentry *ddir;
36fdace6c2SJakub Kicinski };
374c352362SJakub Kicinski 
382633beb9SJakub Kicinski static const char nfp_net_driver_name[] = "nfp_netvf";
392633beb9SJakub Kicinski 
404c352362SJakub Kicinski static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
41*299ba7a3SYu Xiao 	{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800_VF,
42d3826a95SDirk van der Merwe 	  PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
43d3826a95SDirk van der Merwe 	  PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
44d3826a95SDirk van der Merwe 	},
45*299ba7a3SYu Xiao 	{ PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000_VF,
464c352362SJakub Kicinski 	  PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
47e900db70SJakub Kicinski 	  PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
484c352362SJakub Kicinski 	},
49*299ba7a3SYu Xiao 	{ PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800_VF,
50*299ba7a3SYu Xiao 	  PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
51*299ba7a3SYu Xiao 	  PCI_ANY_ID, 0, NFP_DEV_NFP3800_VF,
52*299ba7a3SYu Xiao 	},
53*299ba7a3SYu Xiao 	{ PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000_VF,
54*299ba7a3SYu Xiao 	  PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID,
55*299ba7a3SYu Xiao 	  PCI_ANY_ID, 0, NFP_DEV_NFP6000_VF,
56*299ba7a3SYu Xiao 	},
574c352362SJakub Kicinski 	{ 0, } /* Required last entry. */
584c352362SJakub Kicinski };
594c352362SJakub Kicinski MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids);
604c352362SJakub Kicinski 
nfp_netvf_get_mac_addr(struct nfp_net * nn)614c352362SJakub Kicinski static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
624c352362SJakub Kicinski {
634c352362SJakub Kicinski 	u8 mac_addr[ETH_ALEN];
644c352362SJakub Kicinski 
654c352362SJakub Kicinski 	put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]);
66416db5c1SJakub Kicinski 	put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
674c352362SJakub Kicinski 
684c352362SJakub Kicinski 	if (!is_valid_ether_addr(mac_addr)) {
6979c12a75SJakub Kicinski 		eth_hw_addr_random(nn->dp.netdev);
704c352362SJakub Kicinski 		return;
714c352362SJakub Kicinski 	}
724c352362SJakub Kicinski 
73f3956ebbSJakub Kicinski 	eth_hw_addr_set(nn->dp.netdev, mac_addr);
7479c12a75SJakub Kicinski 	ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
754c352362SJakub Kicinski }
764c352362SJakub Kicinski 
nfp_netvf_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_id)774c352362SJakub Kicinski static int nfp_netvf_pci_probe(struct pci_dev *pdev,
784c352362SJakub Kicinski 			       const struct pci_device_id *pci_id)
794c352362SJakub Kicinski {
809423d24bSJakub Kicinski 	const struct nfp_dev_info *dev_info;
814c352362SJakub Kicinski 	struct nfp_net_fw_version fw_ver;
824c352362SJakub Kicinski 	int max_tx_rings, max_rx_rings;
834c352362SJakub Kicinski 	u32 tx_bar_off, rx_bar_off;
844c352362SJakub Kicinski 	u32 tx_bar_sz, rx_bar_sz;
854c352362SJakub Kicinski 	int tx_bar_no, rx_bar_no;
86fdace6c2SJakub Kicinski 	struct nfp_net_vf *vf;
87fdace6c2SJakub Kicinski 	unsigned int num_irqs;
884c352362SJakub Kicinski 	u8 __iomem *ctrl_bar;
894c352362SJakub Kicinski 	struct nfp_net *nn;
904c352362SJakub Kicinski 	u32 startq;
914c352362SJakub Kicinski 	int stride;
924c352362SJakub Kicinski 	int err;
934c352362SJakub Kicinski 
949423d24bSJakub Kicinski 	dev_info = &nfp_dev_info[pci_id->driver_data];
959423d24bSJakub Kicinski 
96fdace6c2SJakub Kicinski 	vf = kzalloc(sizeof(*vf), GFP_KERNEL);
97fdace6c2SJakub Kicinski 	if (!vf)
98fdace6c2SJakub Kicinski 		return -ENOMEM;
99fdace6c2SJakub Kicinski 	pci_set_drvdata(pdev, vf);
100fdace6c2SJakub Kicinski 
1014c352362SJakub Kicinski 	err = pci_enable_device_mem(pdev);
1024c352362SJakub Kicinski 	if (err)
103fdace6c2SJakub Kicinski 		goto err_free_vf;
1044c352362SJakub Kicinski 
1054c352362SJakub Kicinski 	err = pci_request_regions(pdev, nfp_net_driver_name);
1064c352362SJakub Kicinski 	if (err) {
1074c352362SJakub Kicinski 		dev_err(&pdev->dev, "Unable to allocate device memory.\n");
1084c352362SJakub Kicinski 		goto err_pci_disable;
1094c352362SJakub Kicinski 	}
1104c352362SJakub Kicinski 
1114c352362SJakub Kicinski 	pci_set_master(pdev);
1124c352362SJakub Kicinski 
1139ba1dc99SJakub Kicinski 	err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask);
1144c352362SJakub Kicinski 	if (err)
1154c352362SJakub Kicinski 		goto err_pci_regions;
1164c352362SJakub Kicinski 
1174c352362SJakub Kicinski 	/* Map the Control BAR.
1184c352362SJakub Kicinski 	 *
1194c352362SJakub Kicinski 	 * Irrespective of the advertised BAR size we only map the
1204c352362SJakub Kicinski 	 * first NFP_NET_CFG_BAR_SZ of the BAR.  This keeps the code
1214c352362SJakub Kicinski 	 * the identical for PF and VF drivers.
1224c352362SJakub Kicinski 	 */
1234bdc0d67SChristoph Hellwig 	ctrl_bar = ioremap(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
1244c352362SJakub Kicinski 				   NFP_NET_CFG_BAR_SZ);
1254c352362SJakub Kicinski 	if (!ctrl_bar) {
1264c352362SJakub Kicinski 		dev_err(&pdev->dev,
127796312cdSJakub Kicinski 			"Failed to map resource %d\n", NFP_NET_CTRL_BAR);
1284c352362SJakub Kicinski 		err = -EIO;
1294c352362SJakub Kicinski 		goto err_pci_regions;
1304c352362SJakub Kicinski 	}
1314c352362SJakub Kicinski 
1324c352362SJakub Kicinski 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
133d9e3c299SJakub Kicinski 	if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
134d9e3c299SJakub Kicinski 	    fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
1354c352362SJakub Kicinski 		dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
136d9e3c299SJakub Kicinski 			fw_ver.extend, fw_ver.class,
137d9e3c299SJakub Kicinski 			fw_ver.major, fw_ver.minor);
1384c352362SJakub Kicinski 		err = -EINVAL;
1394c352362SJakub Kicinski 		goto err_ctrl_unmap;
1404c352362SJakub Kicinski 	}
1414c352362SJakub Kicinski 
1424c352362SJakub Kicinski 	/* Determine stride */
143313b345cSJakub Kicinski 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
1444c352362SJakub Kicinski 		stride = 2;
1454c352362SJakub Kicinski 		tx_bar_no = NFP_NET_Q0_BAR;
1464c352362SJakub Kicinski 		rx_bar_no = NFP_NET_Q1_BAR;
1474c352362SJakub Kicinski 		dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n");
1484c352362SJakub Kicinski 	} else {
1494c352362SJakub Kicinski 		switch (fw_ver.major) {
150f9380629SJakub Kicinski 		case 1 ... 5:
1514c352362SJakub Kicinski 			stride = 4;
1524c352362SJakub Kicinski 			tx_bar_no = NFP_NET_Q0_BAR;
1534c352362SJakub Kicinski 			rx_bar_no = tx_bar_no;
1544c352362SJakub Kicinski 			break;
1554c352362SJakub Kicinski 		default:
1564c352362SJakub Kicinski 			dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n",
157d9e3c299SJakub Kicinski 				fw_ver.extend, fw_ver.class,
1584c352362SJakub Kicinski 				fw_ver.major, fw_ver.minor);
1594c352362SJakub Kicinski 			err = -EINVAL;
1604c352362SJakub Kicinski 			goto err_ctrl_unmap;
1614c352362SJakub Kicinski 		}
1624c352362SJakub Kicinski 	}
1634c352362SJakub Kicinski 
1644c352362SJakub Kicinski 	/* Find out how many rings are supported */
1654c352362SJakub Kicinski 	max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
1664c352362SJakub Kicinski 	max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
1674c352362SJakub Kicinski 
1684c352362SJakub Kicinski 	tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride;
1694c352362SJakub Kicinski 	rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride;
1704c352362SJakub Kicinski 
1714c352362SJakub Kicinski 	/* Sanity checks */
1724c352362SJakub Kicinski 	if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) {
1734c352362SJakub Kicinski 		dev_err(&pdev->dev,
1744c352362SJakub Kicinski 			"TX BAR too small for number of TX rings. Adjusting\n");
1754c352362SJakub Kicinski 		tx_bar_sz = pci_resource_len(pdev, tx_bar_no);
1764c352362SJakub Kicinski 		max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
1774c352362SJakub Kicinski 	}
1784c352362SJakub Kicinski 	if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) {
1794c352362SJakub Kicinski 		dev_err(&pdev->dev,
1804c352362SJakub Kicinski 			"RX BAR too small for number of RX rings. Adjusting\n");
1814c352362SJakub Kicinski 		rx_bar_sz = pci_resource_len(pdev, rx_bar_no);
1824c352362SJakub Kicinski 		max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2;
1834c352362SJakub Kicinski 	}
1844c352362SJakub Kicinski 
1854c352362SJakub Kicinski 	startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
186e900db70SJakub Kicinski 	tx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
1874c352362SJakub Kicinski 	startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
188e900db70SJakub Kicinski 	rx_bar_off = nfp_qcp_queue_offset(dev_info, startq);
1894c352362SJakub Kicinski 
1904c352362SJakub Kicinski 	/* Allocate and initialise the netdev */
1919423d24bSJakub Kicinski 	nn = nfp_net_alloc(pdev, dev_info, ctrl_bar, true,
1929423d24bSJakub Kicinski 			   max_tx_rings, max_rx_rings);
1934c352362SJakub Kicinski 	if (IS_ERR(nn)) {
1944c352362SJakub Kicinski 		err = PTR_ERR(nn);
1954c352362SJakub Kicinski 		goto err_ctrl_unmap;
1964c352362SJakub Kicinski 	}
197fdace6c2SJakub Kicinski 	vf->nn = nn;
1984c352362SJakub Kicinski 
19979c12a75SJakub Kicinski 	nn->dp.is_vf = 1;
2004c352362SJakub Kicinski 	nn->stride_tx = stride;
2014c352362SJakub Kicinski 	nn->stride_rx = stride;
2024c352362SJakub Kicinski 
2034c352362SJakub Kicinski 	if (rx_bar_no == tx_bar_no) {
2044c352362SJakub Kicinski 		u32 bar_off, bar_sz;
2054c352362SJakub Kicinski 		resource_size_t map_addr;
2064c352362SJakub Kicinski 
2074c352362SJakub Kicinski 		/* Make a single overlapping BAR mapping */
2084c352362SJakub Kicinski 		if (tx_bar_off < rx_bar_off)
2094c352362SJakub Kicinski 			bar_off = tx_bar_off;
2104c352362SJakub Kicinski 		else
2114c352362SJakub Kicinski 			bar_off = rx_bar_off;
2124c352362SJakub Kicinski 
2134c352362SJakub Kicinski 		if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz))
2144c352362SJakub Kicinski 			bar_sz = (tx_bar_off + tx_bar_sz) - bar_off;
2154c352362SJakub Kicinski 		else
2164c352362SJakub Kicinski 			bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
2174c352362SJakub Kicinski 
2184c352362SJakub Kicinski 		map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
2194bdc0d67SChristoph Hellwig 		vf->q_bar = ioremap(map_addr, bar_sz);
220fdace6c2SJakub Kicinski 		if (!vf->q_bar) {
2214c352362SJakub Kicinski 			nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
2224c352362SJakub Kicinski 			err = -EIO;
2234c352362SJakub Kicinski 			goto err_netdev_free;
2244c352362SJakub Kicinski 		}
2254c352362SJakub Kicinski 
2264c352362SJakub Kicinski 		/* TX queues */
227fdace6c2SJakub Kicinski 		nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
2284c352362SJakub Kicinski 		/* RX queues */
229fdace6c2SJakub Kicinski 		nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
2304c352362SJakub Kicinski 	} else {
2314c352362SJakub Kicinski 		resource_size_t map_addr;
2324c352362SJakub Kicinski 
2334c352362SJakub Kicinski 		/* TX queues */
2344c352362SJakub Kicinski 		map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off;
2354bdc0d67SChristoph Hellwig 		nn->tx_bar = ioremap(map_addr, tx_bar_sz);
2364c352362SJakub Kicinski 		if (!nn->tx_bar) {
2374c352362SJakub Kicinski 			nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
2384c352362SJakub Kicinski 			err = -EIO;
2394c352362SJakub Kicinski 			goto err_netdev_free;
2404c352362SJakub Kicinski 		}
2414c352362SJakub Kicinski 
2424c352362SJakub Kicinski 		/* RX queues */
2434c352362SJakub Kicinski 		map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off;
2444bdc0d67SChristoph Hellwig 		nn->rx_bar = ioremap(map_addr, rx_bar_sz);
2454c352362SJakub Kicinski 		if (!nn->rx_bar) {
2464c352362SJakub Kicinski 			nn_err(nn, "Failed to map resource %d\n", rx_bar_no);
2474c352362SJakub Kicinski 			err = -EIO;
2484c352362SJakub Kicinski 			goto err_unmap_tx;
2494c352362SJakub Kicinski 		}
2504c352362SJakub Kicinski 	}
2514c352362SJakub Kicinski 
2524c352362SJakub Kicinski 	nfp_netvf_get_mac_addr(nn);
2534c352362SJakub Kicinski 
254fdace6c2SJakub Kicinski 	num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
255d4e7f092SJakub Kicinski 				      NFP_NET_MIN_VNIC_IRQS,
25679c12a75SJakub Kicinski 				      NFP_NET_NON_Q_VECTORS +
25779c12a75SJakub Kicinski 				      nn->dp.num_r_vecs);
258fdace6c2SJakub Kicinski 	if (!num_irqs) {
2594c352362SJakub Kicinski 		nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
2604c352362SJakub Kicinski 		err = -EIO;
2614c352362SJakub Kicinski 		goto err_unmap_rx;
2624c352362SJakub Kicinski 	}
263fdace6c2SJakub Kicinski 	nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
2644c352362SJakub Kicinski 
265beba69caSJakub Kicinski 	err = nfp_net_init(nn);
2664c352362SJakub Kicinski 	if (err)
2674c352362SJakub Kicinski 		goto err_irqs_disable;
2684c352362SJakub Kicinski 
2694c352362SJakub Kicinski 	nfp_net_info(nn);
270fdace6c2SJakub Kicinski 	vf->ddir = nfp_net_debugfs_device_add(pdev);
27151c1df83SJakub Kicinski 	nfp_net_debugfs_vnic_add(nn, vf->ddir);
2724c352362SJakub Kicinski 
2734c352362SJakub Kicinski 	return 0;
2744c352362SJakub Kicinski 
2754c352362SJakub Kicinski err_irqs_disable:
276fdace6c2SJakub Kicinski 	nfp_net_irqs_disable(pdev);
2774c352362SJakub Kicinski err_unmap_rx:
278fdace6c2SJakub Kicinski 	if (!vf->q_bar)
2794c352362SJakub Kicinski 		iounmap(nn->rx_bar);
2804c352362SJakub Kicinski err_unmap_tx:
281fdace6c2SJakub Kicinski 	if (!vf->q_bar)
2824c352362SJakub Kicinski 		iounmap(nn->tx_bar);
2834c352362SJakub Kicinski 	else
284fdace6c2SJakub Kicinski 		iounmap(vf->q_bar);
2854c352362SJakub Kicinski err_netdev_free:
286beba69caSJakub Kicinski 	nfp_net_free(nn);
2874c352362SJakub Kicinski err_ctrl_unmap:
2884c352362SJakub Kicinski 	iounmap(ctrl_bar);
2894c352362SJakub Kicinski err_pci_regions:
2904c352362SJakub Kicinski 	pci_release_regions(pdev);
2914c352362SJakub Kicinski err_pci_disable:
2924c352362SJakub Kicinski 	pci_disable_device(pdev);
293fdace6c2SJakub Kicinski err_free_vf:
294fdace6c2SJakub Kicinski 	pci_set_drvdata(pdev, NULL);
295fdace6c2SJakub Kicinski 	kfree(vf);
2964c352362SJakub Kicinski 	return err;
2974c352362SJakub Kicinski }
2984c352362SJakub Kicinski 
nfp_netvf_pci_remove(struct pci_dev * pdev)2994c352362SJakub Kicinski static void nfp_netvf_pci_remove(struct pci_dev *pdev)
3004c352362SJakub Kicinski {
301790d23e7SDirk van der Merwe 	struct nfp_net_vf *vf;
302790d23e7SDirk van der Merwe 	struct nfp_net *nn;
303790d23e7SDirk van der Merwe 
304790d23e7SDirk van der Merwe 	vf = pci_get_drvdata(pdev);
305790d23e7SDirk van der Merwe 	if (!vf)
306790d23e7SDirk van der Merwe 		return;
307790d23e7SDirk van der Merwe 
308790d23e7SDirk van der Merwe 	nn = vf->nn;
3094c352362SJakub Kicinski 
3104c352362SJakub Kicinski 	/* Note, the order is slightly different from above as we need
3114c352362SJakub Kicinski 	 * to keep the nn pointer around till we have freed everything.
3124c352362SJakub Kicinski 	 */
3136f1cd5caSJakub Kicinski 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
314fdace6c2SJakub Kicinski 	nfp_net_debugfs_dir_clean(&vf->ddir);
3154c352362SJakub Kicinski 
316beba69caSJakub Kicinski 	nfp_net_clean(nn);
3174c352362SJakub Kicinski 
318fdace6c2SJakub Kicinski 	nfp_net_irqs_disable(pdev);
3194c352362SJakub Kicinski 
320fdace6c2SJakub Kicinski 	if (!vf->q_bar) {
3214c352362SJakub Kicinski 		iounmap(nn->rx_bar);
3224c352362SJakub Kicinski 		iounmap(nn->tx_bar);
3234c352362SJakub Kicinski 	} else {
324fdace6c2SJakub Kicinski 		iounmap(vf->q_bar);
3254c352362SJakub Kicinski 	}
326d2b84397SJakub Kicinski 	iounmap(nn->dp.ctrl_bar);
3274c352362SJakub Kicinski 
328beba69caSJakub Kicinski 	nfp_net_free(nn);
3294c352362SJakub Kicinski 
3304c352362SJakub Kicinski 	pci_release_regions(pdev);
3314c352362SJakub Kicinski 	pci_disable_device(pdev);
332fdace6c2SJakub Kicinski 
333fdace6c2SJakub Kicinski 	pci_set_drvdata(pdev, NULL);
334fdace6c2SJakub Kicinski 	kfree(vf);
3354c352362SJakub Kicinski }
3364c352362SJakub Kicinski 
3372633beb9SJakub Kicinski struct pci_driver nfp_netvf_pci_driver = {
3384c352362SJakub Kicinski 	.name        = nfp_net_driver_name,
3394c352362SJakub Kicinski 	.id_table    = nfp_netvf_pci_device_ids,
3404c352362SJakub Kicinski 	.probe       = nfp_netvf_pci_probe,
3414c352362SJakub Kicinski 	.remove      = nfp_netvf_pci_remove,
342790d23e7SDirk van der Merwe 	.shutdown    = nfp_netvf_pci_remove,
3434c352362SJakub Kicinski };
344