1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ 3 4 /* 5 * nfp_netvf_main.c 6 * Netronome virtual function network device driver: Main entry point 7 * Author: Jason McMullan <jason.mcmullan@netronome.com> 8 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 9 */ 10 11 #include <linux/module.h> 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/etherdevice.h> 15 16 #include "nfp_net_ctrl.h" 17 #include "nfp_net.h" 18 #include "nfp_main.h" 19 20 /** 21 * struct nfp_net_vf - NFP VF-specific device structure 22 * @nn: NFP Net structure for this device 23 * @irq_entries: Pre-allocated array of MSI-X entries 24 * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly) 25 * @ddir: Per-device debugfs directory 26 */ 27 struct nfp_net_vf { 28 struct nfp_net *nn; 29 30 struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS + 31 NFP_NET_MAX_TX_RINGS]; 32 u8 __iomem *q_bar; 33 34 struct dentry *ddir; 35 }; 36 37 static const char nfp_net_driver_name[] = "nfp_netvf"; 38 39 #define PCI_DEVICE_NFP6000VF 0x6003 40 static const struct pci_device_id nfp_netvf_pci_device_ids[] = { 41 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF, 42 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, 43 PCI_ANY_ID, 0, 44 }, 45 { 0, } /* Required last entry. */ 46 }; 47 MODULE_DEVICE_TABLE(pci, nfp_netvf_pci_device_ids); 48 49 static void nfp_netvf_get_mac_addr(struct nfp_net *nn) 50 { 51 u8 mac_addr[ETH_ALEN]; 52 53 put_unaligned_be32(nn_readl(nn, NFP_NET_CFG_MACADDR + 0), &mac_addr[0]); 54 put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]); 55 56 if (!is_valid_ether_addr(mac_addr)) { 57 eth_hw_addr_random(nn->dp.netdev); 58 return; 59 } 60 61 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); 62 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); 63 } 64 65 static int nfp_netvf_pci_probe(struct pci_dev *pdev, 66 const struct pci_device_id *pci_id) 67 { 68 struct nfp_net_fw_version fw_ver; 69 int max_tx_rings, max_rx_rings; 70 u32 tx_bar_off, rx_bar_off; 71 u32 tx_bar_sz, rx_bar_sz; 72 int tx_bar_no, rx_bar_no; 73 struct nfp_net_vf *vf; 74 unsigned int num_irqs; 75 u8 __iomem *ctrl_bar; 76 struct nfp_net *nn; 77 u32 startq; 78 int stride; 79 int err; 80 81 vf = kzalloc(sizeof(*vf), GFP_KERNEL); 82 if (!vf) 83 return -ENOMEM; 84 pci_set_drvdata(pdev, vf); 85 86 err = pci_enable_device_mem(pdev); 87 if (err) 88 goto err_free_vf; 89 90 err = pci_request_regions(pdev, nfp_net_driver_name); 91 if (err) { 92 dev_err(&pdev->dev, "Unable to allocate device memory.\n"); 93 goto err_pci_disable; 94 } 95 96 pci_set_master(pdev); 97 98 err = dma_set_mask_and_coherent(&pdev->dev, 99 DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS)); 100 if (err) 101 goto err_pci_regions; 102 103 /* Map the Control BAR. 104 * 105 * Irrespective of the advertised BAR size we only map the 106 * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code 107 * the identical for PF and VF drivers. 108 */ 109 ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), 110 NFP_NET_CFG_BAR_SZ); 111 if (!ctrl_bar) { 112 dev_err(&pdev->dev, 113 "Failed to map resource %d\n", NFP_NET_CTRL_BAR); 114 err = -EIO; 115 goto err_pci_regions; 116 } 117 118 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 119 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 120 dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", 121 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); 122 err = -EINVAL; 123 goto err_ctrl_unmap; 124 } 125 126 /* Determine stride */ 127 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { 128 stride = 2; 129 tx_bar_no = NFP_NET_Q0_BAR; 130 rx_bar_no = NFP_NET_Q1_BAR; 131 dev_warn(&pdev->dev, "OBSOLETE Firmware detected - VF isolation not available\n"); 132 } else { 133 switch (fw_ver.major) { 134 case 1 ... 5: 135 stride = 4; 136 tx_bar_no = NFP_NET_Q0_BAR; 137 rx_bar_no = tx_bar_no; 138 break; 139 default: 140 dev_err(&pdev->dev, "Unsupported Firmware ABI %d.%d.%d.%d\n", 141 fw_ver.resv, fw_ver.class, 142 fw_ver.major, fw_ver.minor); 143 err = -EINVAL; 144 goto err_ctrl_unmap; 145 } 146 } 147 148 /* Find out how many rings are supported */ 149 max_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); 150 max_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); 151 152 tx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_tx_rings * stride; 153 rx_bar_sz = NFP_QCP_QUEUE_ADDR_SZ * max_rx_rings * stride; 154 155 /* Sanity checks */ 156 if (tx_bar_sz > pci_resource_len(pdev, tx_bar_no)) { 157 dev_err(&pdev->dev, 158 "TX BAR too small for number of TX rings. Adjusting\n"); 159 tx_bar_sz = pci_resource_len(pdev, tx_bar_no); 160 max_tx_rings = (tx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; 161 } 162 if (rx_bar_sz > pci_resource_len(pdev, rx_bar_no)) { 163 dev_err(&pdev->dev, 164 "RX BAR too small for number of RX rings. Adjusting\n"); 165 rx_bar_sz = pci_resource_len(pdev, rx_bar_no); 166 max_rx_rings = (rx_bar_sz / NFP_QCP_QUEUE_ADDR_SZ) / 2; 167 } 168 169 startq = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 170 tx_bar_off = NFP_PCIE_QUEUE(startq); 171 startq = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 172 rx_bar_off = NFP_PCIE_QUEUE(startq); 173 174 /* Allocate and initialise the netdev */ 175 nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings); 176 if (IS_ERR(nn)) { 177 err = PTR_ERR(nn); 178 goto err_ctrl_unmap; 179 } 180 vf->nn = nn; 181 182 nn->fw_ver = fw_ver; 183 nn->dp.ctrl_bar = ctrl_bar; 184 nn->dp.is_vf = 1; 185 nn->stride_tx = stride; 186 nn->stride_rx = stride; 187 188 if (rx_bar_no == tx_bar_no) { 189 u32 bar_off, bar_sz; 190 resource_size_t map_addr; 191 192 /* Make a single overlapping BAR mapping */ 193 if (tx_bar_off < rx_bar_off) 194 bar_off = tx_bar_off; 195 else 196 bar_off = rx_bar_off; 197 198 if ((tx_bar_off + tx_bar_sz) > (rx_bar_off + rx_bar_sz)) 199 bar_sz = (tx_bar_off + tx_bar_sz) - bar_off; 200 else 201 bar_sz = (rx_bar_off + rx_bar_sz) - bar_off; 202 203 map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off; 204 vf->q_bar = ioremap_nocache(map_addr, bar_sz); 205 if (!vf->q_bar) { 206 nn_err(nn, "Failed to map resource %d\n", tx_bar_no); 207 err = -EIO; 208 goto err_netdev_free; 209 } 210 211 /* TX queues */ 212 nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off); 213 /* RX queues */ 214 nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off); 215 } else { 216 resource_size_t map_addr; 217 218 /* TX queues */ 219 map_addr = pci_resource_start(pdev, tx_bar_no) + tx_bar_off; 220 nn->tx_bar = ioremap_nocache(map_addr, tx_bar_sz); 221 if (!nn->tx_bar) { 222 nn_err(nn, "Failed to map resource %d\n", tx_bar_no); 223 err = -EIO; 224 goto err_netdev_free; 225 } 226 227 /* RX queues */ 228 map_addr = pci_resource_start(pdev, rx_bar_no) + rx_bar_off; 229 nn->rx_bar = ioremap_nocache(map_addr, rx_bar_sz); 230 if (!nn->rx_bar) { 231 nn_err(nn, "Failed to map resource %d\n", rx_bar_no); 232 err = -EIO; 233 goto err_unmap_tx; 234 } 235 } 236 237 nfp_netvf_get_mac_addr(nn); 238 239 num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries, 240 NFP_NET_MIN_VNIC_IRQS, 241 NFP_NET_NON_Q_VECTORS + 242 nn->dp.num_r_vecs); 243 if (!num_irqs) { 244 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); 245 err = -EIO; 246 goto err_unmap_rx; 247 } 248 nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs); 249 250 err = nfp_net_init(nn); 251 if (err) 252 goto err_irqs_disable; 253 254 nfp_net_info(nn); 255 vf->ddir = nfp_net_debugfs_device_add(pdev); 256 nfp_net_debugfs_vnic_add(nn, vf->ddir); 257 258 return 0; 259 260 err_irqs_disable: 261 nfp_net_irqs_disable(pdev); 262 err_unmap_rx: 263 if (!vf->q_bar) 264 iounmap(nn->rx_bar); 265 err_unmap_tx: 266 if (!vf->q_bar) 267 iounmap(nn->tx_bar); 268 else 269 iounmap(vf->q_bar); 270 err_netdev_free: 271 nfp_net_free(nn); 272 err_ctrl_unmap: 273 iounmap(ctrl_bar); 274 err_pci_regions: 275 pci_release_regions(pdev); 276 err_pci_disable: 277 pci_disable_device(pdev); 278 err_free_vf: 279 pci_set_drvdata(pdev, NULL); 280 kfree(vf); 281 return err; 282 } 283 284 static void nfp_netvf_pci_remove(struct pci_dev *pdev) 285 { 286 struct nfp_net_vf *vf = pci_get_drvdata(pdev); 287 struct nfp_net *nn = vf->nn; 288 289 /* Note, the order is slightly different from above as we need 290 * to keep the nn pointer around till we have freed everything. 291 */ 292 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 293 nfp_net_debugfs_dir_clean(&vf->ddir); 294 295 nfp_net_clean(nn); 296 297 nfp_net_irqs_disable(pdev); 298 299 if (!vf->q_bar) { 300 iounmap(nn->rx_bar); 301 iounmap(nn->tx_bar); 302 } else { 303 iounmap(vf->q_bar); 304 } 305 iounmap(nn->dp.ctrl_bar); 306 307 nfp_net_free(nn); 308 309 pci_release_regions(pdev); 310 pci_disable_device(pdev); 311 312 pci_set_drvdata(pdev, NULL); 313 kfree(vf); 314 } 315 316 struct pci_driver nfp_netvf_pci_driver = { 317 .name = nfp_net_driver_name, 318 .id_table = nfp_netvf_pci_device_ids, 319 .probe = nfp_netvf_pci_probe, 320 .remove = nfp_netvf_pci_remove, 321 }; 322