1 /* 2 * aQuantia Corporation Network Driver 3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 */ 9 10 /* File aq_pci_func.c: Definition of PCI functions. */ 11 12 #include <linux/interrupt.h> 13 #include <linux/module.h> 14 15 #include "aq_main.h" 16 #include "aq_nic.h" 17 #include "aq_vec.h" 18 #include "aq_hw.h" 19 #include "aq_pci_func.h" 20 #include "hw_atl/hw_atl_a0.h" 21 #include "hw_atl/hw_atl_b0.h" 22 23 static const struct pci_device_id aq_pci_tbl[] = { 24 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), }, 25 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), }, 26 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), }, 27 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), }, 28 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), }, 29 30 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), }, 31 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), }, 32 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), }, 33 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), }, 34 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), }, 35 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), }, 36 37 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), }, 38 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), }, 39 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), }, 40 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), }, 41 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), }, 42 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), }, 43 44 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), }, 45 { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), }, 46 47 {} 48 }; 49 50 static const struct aq_board_revision_s hw_atl_boards[] = { 51 { AQ_DEVICE_ID_0001, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, 52 { AQ_DEVICE_ID_D100, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, }, 53 { AQ_DEVICE_ID_D107, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, }, 54 { AQ_DEVICE_ID_D108, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, }, 55 { AQ_DEVICE_ID_D109, AQ_HWREV_1, &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, }, 56 57 { AQ_DEVICE_ID_0001, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, 58 { AQ_DEVICE_ID_D100, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, }, 59 { AQ_DEVICE_ID_D107, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, }, 60 { AQ_DEVICE_ID_D108, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, }, 61 { AQ_DEVICE_ID_D109, AQ_HWREV_2, &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, }, 62 63 { AQ_DEVICE_ID_AQC100, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, 64 { AQ_DEVICE_ID_AQC107, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, }, 65 { AQ_DEVICE_ID_AQC108, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, }, 66 { AQ_DEVICE_ID_AQC109, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, }, 67 { AQ_DEVICE_ID_AQC111, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, }, 68 { AQ_DEVICE_ID_AQC112, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, }, 69 70 { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, }, 71 { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, }, 72 { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, }, 73 { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, }, 74 { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, }, 75 { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, }, 76 77 { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, }, 78 { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, }, 79 }; 80 81 MODULE_DEVICE_TABLE(pci, aq_pci_tbl); 82 83 static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, 84 const struct aq_hw_ops **ops, 85 const struct aq_hw_caps_s **caps) 86 { 87 int i = 0; 88 89 if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA) 90 return -EINVAL; 91 92 for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) { 93 if (hw_atl_boards[i].devid == pdev->device && 94 (hw_atl_boards[i].revision == AQ_HWREV_ANY || 95 hw_atl_boards[i].revision == pdev->revision)) { 96 *ops = hw_atl_boards[i].ops; 97 *caps = hw_atl_boards[i].caps; 98 break; 99 } 100 } 101 102 if (i == ARRAY_SIZE(hw_atl_boards)) 103 return -EINVAL; 104 105 return 0; 106 } 107 108 int aq_pci_func_init(struct pci_dev *pdev) 109 { 110 int err = 0; 111 112 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 113 if (!err) { 114 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 115 116 } 117 if (err) { 118 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 119 if (!err) 120 err = pci_set_consistent_dma_mask(pdev, 121 DMA_BIT_MASK(32)); 122 } 123 if (err != 0) { 124 err = -ENOSR; 125 goto err_exit; 126 } 127 128 err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio"); 129 if (err < 0) 130 goto err_exit; 131 132 pci_set_master(pdev); 133 134 return 0; 135 136 err_exit: 137 return err; 138 } 139 140 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, 141 char *name, void *aq_vec, cpumask_t *affinity_mask) 142 { 143 struct pci_dev *pdev = self->pdev; 144 int err = 0; 145 146 if (pdev->msix_enabled || pdev->msi_enabled) 147 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0, 148 name, aq_vec); 149 else 150 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy, 151 IRQF_SHARED, name, aq_vec); 152 153 if (err >= 0) { 154 self->msix_entry_mask |= (1 << i); 155 self->aq_vec[i] = aq_vec; 156 157 if (pdev->msix_enabled) 158 irq_set_affinity_hint(pci_irq_vector(pdev, i), 159 affinity_mask); 160 } 161 return err; 162 } 163 164 void aq_pci_func_free_irqs(struct aq_nic_s *self) 165 { 166 struct pci_dev *pdev = self->pdev; 167 unsigned int i = 0U; 168 169 for (i = 32U; i--;) { 170 if (!((1U << i) & self->msix_entry_mask)) 171 continue; 172 173 if (pdev->msix_enabled) 174 irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL); 175 free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]); 176 self->msix_entry_mask &= ~(1U << i); 177 } 178 } 179 180 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self) 181 { 182 if (self->pdev->msix_enabled) 183 return AQ_HW_IRQ_MSIX; 184 if (self->pdev->msi_enabled) 185 return AQ_HW_IRQ_MSIX; 186 return AQ_HW_IRQ_LEGACY; 187 } 188 189 static void aq_pci_free_irq_vectors(struct aq_nic_s *self) 190 { 191 pci_free_irq_vectors(self->pdev); 192 } 193 194 static int aq_pci_probe(struct pci_dev *pdev, 195 const struct pci_device_id *pci_id) 196 { 197 struct aq_nic_s *self = NULL; 198 int err = 0; 199 struct net_device *ndev; 200 resource_size_t mmio_pa; 201 u32 bar; 202 u32 numvecs; 203 204 err = pci_enable_device(pdev); 205 if (err) 206 return err; 207 208 err = aq_pci_func_init(pdev); 209 if (err) 210 goto err_pci_func; 211 212 ndev = aq_ndev_alloc(); 213 if (!ndev) { 214 err = -ENOMEM; 215 goto err_ndev; 216 } 217 218 self = netdev_priv(ndev); 219 self->pdev = pdev; 220 SET_NETDEV_DEV(ndev, &pdev->dev); 221 pci_set_drvdata(pdev, self); 222 223 err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops, 224 &aq_nic_get_cfg(self)->aq_hw_caps); 225 if (err) 226 goto err_ioremap; 227 228 self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL); 229 self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self); 230 231 for (bar = 0; bar < 4; ++bar) { 232 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) { 233 resource_size_t reg_sz; 234 235 mmio_pa = pci_resource_start(pdev, bar); 236 if (mmio_pa == 0U) { 237 err = -EIO; 238 goto err_ioremap; 239 } 240 241 reg_sz = pci_resource_len(pdev, bar); 242 if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) { 243 err = -EIO; 244 goto err_ioremap; 245 } 246 247 self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz); 248 if (!self->aq_hw->mmio) { 249 err = -EIO; 250 goto err_ioremap; 251 } 252 break; 253 } 254 } 255 256 if (bar == 4) { 257 err = -EIO; 258 goto err_ioremap; 259 } 260 261 numvecs = min((u8)AQ_CFG_VECS_DEF, 262 aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs); 263 numvecs = min(numvecs, num_online_cpus()); 264 /*enable interrupts */ 265 #if !AQ_CFG_FORCE_LEGACY_INT 266 err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, 267 PCI_IRQ_MSIX); 268 269 if (err < 0) { 270 err = pci_alloc_irq_vectors(self->pdev, 1, 1, 271 PCI_IRQ_MSI | PCI_IRQ_LEGACY); 272 if (err < 0) 273 goto err_hwinit; 274 } 275 #endif 276 277 /* net device init */ 278 aq_nic_cfg_start(self); 279 280 aq_nic_ndev_init(self); 281 282 err = aq_nic_ndev_register(self); 283 if (err < 0) 284 goto err_register; 285 286 return 0; 287 288 err_register: 289 aq_nic_free_vectors(self); 290 aq_pci_free_irq_vectors(self); 291 err_hwinit: 292 iounmap(self->aq_hw->mmio); 293 err_ioremap: 294 free_netdev(ndev); 295 err_pci_func: 296 pci_release_regions(pdev); 297 err_ndev: 298 pci_disable_device(pdev); 299 return err; 300 } 301 302 static void aq_pci_remove(struct pci_dev *pdev) 303 { 304 struct aq_nic_s *self = pci_get_drvdata(pdev); 305 306 if (self->ndev) { 307 if (self->ndev->reg_state == NETREG_REGISTERED) 308 unregister_netdev(self->ndev); 309 aq_nic_free_vectors(self); 310 aq_pci_free_irq_vectors(self); 311 iounmap(self->aq_hw->mmio); 312 kfree(self->aq_hw); 313 pci_release_regions(pdev); 314 free_netdev(self->ndev); 315 } 316 317 pci_disable_device(pdev); 318 } 319 320 static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) 321 { 322 struct aq_nic_s *self = pci_get_drvdata(pdev); 323 324 return aq_nic_change_pm_state(self, &pm_msg); 325 } 326 327 static int aq_pci_resume(struct pci_dev *pdev) 328 { 329 struct aq_nic_s *self = pci_get_drvdata(pdev); 330 pm_message_t pm_msg = PMSG_RESTORE; 331 332 return aq_nic_change_pm_state(self, &pm_msg); 333 } 334 335 static struct pci_driver aq_pci_ops = { 336 .name = AQ_CFG_DRV_NAME, 337 .id_table = aq_pci_tbl, 338 .probe = aq_pci_probe, 339 .remove = aq_pci_remove, 340 .suspend = aq_pci_suspend, 341 .resume = aq_pci_resume, 342 }; 343 344 module_pci_driver(aq_pci_ops); 345