1 // SPDX-License-Identifier: GPL-2.0+ 2 /* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */ 3 4 #include <linux/module.h> 5 #include <linux/printk.h> 6 #include <linux/pci.h> 7 #include <linux/spinlock.h> 8 #include <linux/mutex.h> 9 #include <linux/netdevice.h> 10 #include <linux/seq_file.h> 11 #include <linux/workqueue.h> 12 #include <linux/completion.h> 13 14 #include "pcie_priv.h" 15 #include "bus.h" 16 #include "shm_ipc.h" 17 #include "core.h" 18 #include "debug.h" 19 #include "util.h" 20 #include "qtn_hw_ids.h" 21 22 #define QTN_SYSCTL_BAR 0 23 #define QTN_SHMEM_BAR 2 24 #define QTN_DMA_BAR 3 25 26 #define QTN_PCIE_MAX_FW_BUFSZ (1 * 1024 * 1024) 27 28 static bool use_msi = true; 29 module_param(use_msi, bool, 0644); 30 MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); 31 32 static unsigned int tx_bd_size_param; 33 module_param(tx_bd_size_param, uint, 0644); 34 MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size"); 35 36 static unsigned int rx_bd_size_param; 37 module_param(rx_bd_size_param, uint, 0644); 38 MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size"); 39 40 static u8 flashboot = 1; 41 module_param(flashboot, byte, 0644); 42 MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); 43 44 static unsigned int fw_blksize_param = QTN_PCIE_MAX_FW_BUFSZ; 45 module_param(fw_blksize_param, uint, 0644); 46 MODULE_PARM_DESC(fw_blksize_param, "firmware loading block size in bytes"); 47 48 #define DRV_NAME "qtnfmac_pcie" 49 50 int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) 51 { 52 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 53 int ret; 54 55 ret = qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); 56 57 if (ret == -ETIMEDOUT) { 58 pr_err("EP firmware is dead\n"); 59 bus->fw_state = QTNF_FW_STATE_DEAD; 60 } 61 62 return ret; 63 } 64 65 int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv) 66 { 67 struct sk_buff **vaddr; 68 int len; 69 70 len = priv->tx_bd_num * sizeof(*priv->tx_skb) + 71 priv->rx_bd_num * sizeof(*priv->rx_skb); 72 vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL); 73 74 if (!vaddr) 75 return -ENOMEM; 76 77 priv->tx_skb = vaddr; 78 79 vaddr += priv->tx_bd_num; 80 priv->rx_skb = vaddr; 81 82 return 0; 83 } 84 85 static void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus) 86 { 87 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 88 struct pci_dev *pdev = priv->pdev; 89 90 get_device(&pdev->dev); 91 schedule_work(&bus->fw_work); 92 } 93 94 static int qtnf_dbg_mps_show(struct seq_file *s, void *data) 95 { 96 struct qtnf_bus *bus = dev_get_drvdata(s->private); 97 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 98 99 seq_printf(s, "%d\n", pcie_get_mps(priv->pdev)); 100 101 return 0; 102 } 103 104 static int qtnf_dbg_msi_show(struct seq_file *s, void *data) 105 { 106 struct qtnf_bus *bus = dev_get_drvdata(s->private); 107 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 108 109 seq_printf(s, "%u\n", priv->msi_enabled); 110 111 return 0; 112 } 113 114 static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) 115 { 116 struct qtnf_bus *bus = dev_get_drvdata(s->private); 117 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 118 119 seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", 120 priv->shm_ipc_ep_in.tx_packet_count); 121 seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", 122 priv->shm_ipc_ep_in.rx_packet_count); 123 seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", 124 priv->shm_ipc_ep_out.tx_timeout_count); 125 seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", 126 priv->shm_ipc_ep_out.rx_packet_count); 127 128 return 0; 129 } 130 131 int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus) 132 { 133 struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); 134 char card_id[64]; 135 int ret; 136 137 bus->fw_state = QTNF_FW_STATE_BOOT_DONE; 138 ret = qtnf_core_attach(bus); 139 if (ret) { 140 pr_err("failed to attach core\n"); 141 } else { 142 snprintf(card_id, sizeof(card_id), "%s:%s", 143 DRV_NAME, pci_name(priv->pdev)); 144 qtnf_debugfs_init(bus, card_id); 145 qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); 146 qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); 147 qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); 148 } 149 150 return ret; 151 } 152 153 static void qtnf_tune_pcie_mps(struct pci_dev *pdev) 154 { 155 struct pci_dev *parent; 156 int mps_p, mps_o, mps_m, mps; 157 int ret; 158 159 /* current mps */ 160 mps_o = pcie_get_mps(pdev); 161 162 /* maximum supported mps */ 163 mps_m = 128 << pdev->pcie_mpss; 164 165 /* suggested new mps value */ 166 mps = mps_m; 167 168 if (pdev->bus && pdev->bus->self) { 169 /* parent (bus) mps */ 170 parent = pdev->bus->self; 171 172 if (pci_is_pcie(parent)) { 173 mps_p = pcie_get_mps(parent); 174 mps = min(mps_m, mps_p); 175 } 176 } 177 178 ret = pcie_set_mps(pdev, mps); 179 if (ret) { 180 pr_err("failed to set mps to %d, keep using current %d\n", 181 mps, mps_o); 182 return; 183 } 184 185 pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m); 186 } 187 188 static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi) 189 { 190 struct pci_dev *pdev = priv->pdev; 191 192 /* fall back to legacy INTx interrupts by default */ 193 priv->msi_enabled = 0; 194 195 /* check if MSI capability is available */ 196 if (use_msi) { 197 if (!pci_enable_msi(pdev)) { 198 pr_debug("enabled MSI interrupt\n"); 199 priv->msi_enabled = 1; 200 } else { 201 pr_warn("failed to enable MSI interrupts"); 202 } 203 } 204 205 if (!priv->msi_enabled) { 206 pr_warn("legacy PCIE interrupts enabled\n"); 207 pci_intx(pdev, 1); 208 } 209 } 210 211 static void __iomem *qtnf_map_bar(struct pci_dev *pdev, u8 index) 212 { 213 void __iomem *vaddr; 214 dma_addr_t busaddr; 215 size_t len; 216 int ret; 217 218 ret = pcim_iomap_regions(pdev, 1 << index, "qtnfmac_pcie"); 219 if (ret) 220 return IOMEM_ERR_PTR(ret); 221 222 busaddr = pci_resource_start(pdev, index); 223 len = pci_resource_len(pdev, index); 224 vaddr = pcim_iomap_table(pdev)[index]; 225 if (!vaddr) 226 return IOMEM_ERR_PTR(-ENOMEM); 227 228 pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n", 229 index, vaddr, &busaddr, (int)len); 230 231 return vaddr; 232 } 233 234 static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf, 235 size_t len) 236 { 237 struct qtnf_pcie_bus_priv *priv = arg; 238 struct qtnf_bus *bus = pci_get_drvdata(priv->pdev); 239 struct sk_buff *skb; 240 241 if (unlikely(len == 0)) { 242 pr_warn("zero length packet received\n"); 243 return; 244 } 245 246 skb = __dev_alloc_skb(len, GFP_KERNEL); 247 248 if (unlikely(!skb)) { 249 pr_err("failed to allocate skb\n"); 250 return; 251 } 252 253 memcpy_fromio(skb_put(skb, len), buf, len); 254 255 qtnf_trans_handle_rx_ctl_packet(bus, skb); 256 } 257 258 void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, 259 struct qtnf_shm_ipc_region __iomem *ipc_tx_reg, 260 struct qtnf_shm_ipc_region __iomem *ipc_rx_reg, 261 const struct qtnf_shm_ipc_int *ipc_int) 262 { 263 const struct qtnf_shm_ipc_rx_callback rx_callback = { 264 qtnf_pcie_control_rx_callback, priv }; 265 266 qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND, 267 ipc_tx_reg, priv->workqueue, 268 ipc_int, &rx_callback); 269 qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND, 270 ipc_rx_reg, priv->workqueue, 271 ipc_int, &rx_callback); 272 } 273 274 static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) 275 { 276 struct qtnf_pcie_bus_priv *pcie_priv; 277 struct qtnf_bus *bus; 278 void __iomem *sysctl_bar; 279 void __iomem *epmem_bar; 280 void __iomem *dmareg_bar; 281 unsigned int chipid; 282 int ret; 283 284 if (!pci_is_pcie(pdev)) { 285 pr_err("device %s is not PCI Express\n", pci_name(pdev)); 286 return -EIO; 287 } 288 289 qtnf_tune_pcie_mps(pdev); 290 291 ret = pcim_enable_device(pdev); 292 if (ret) { 293 pr_err("failed to init PCI device %x\n", pdev->device); 294 return ret; 295 } 296 297 pci_set_master(pdev); 298 299 sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); 300 if (IS_ERR(sysctl_bar)) { 301 pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); 302 return ret; 303 } 304 305 dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); 306 if (IS_ERR(dmareg_bar)) { 307 pr_err("failed to map BAR%u\n", QTN_DMA_BAR); 308 return ret; 309 } 310 311 epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); 312 if (IS_ERR(epmem_bar)) { 313 pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); 314 return ret; 315 } 316 317 chipid = qtnf_chip_id_get(sysctl_bar); 318 319 pr_info("identified device: %s\n", qtnf_chipid_to_string(chipid)); 320 321 switch (chipid) { 322 case QTN_CHIP_ID_PEARL: 323 case QTN_CHIP_ID_PEARL_B: 324 case QTN_CHIP_ID_PEARL_C: 325 bus = qtnf_pcie_pearl_alloc(pdev); 326 break; 327 case QTN_CHIP_ID_TOPAZ: 328 bus = qtnf_pcie_topaz_alloc(pdev); 329 break; 330 default: 331 pr_err("unsupported chip ID 0x%x\n", chipid); 332 return -ENOTSUPP; 333 } 334 335 if (!bus) 336 return -ENOMEM; 337 338 pcie_priv = get_bus_priv(bus); 339 pci_set_drvdata(pdev, bus); 340 bus->dev = &pdev->dev; 341 bus->fw_state = QTNF_FW_STATE_DETACHED; 342 pcie_priv->pdev = pdev; 343 pcie_priv->tx_stopped = 0; 344 pcie_priv->flashboot = flashboot; 345 346 if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ) 347 pcie_priv->fw_blksize = QTN_PCIE_MAX_FW_BUFSZ; 348 else 349 pcie_priv->fw_blksize = fw_blksize_param; 350 351 mutex_init(&bus->bus_lock); 352 spin_lock_init(&pcie_priv->tx_lock); 353 spin_lock_init(&pcie_priv->tx_reclaim_lock); 354 355 pcie_priv->tx_full_count = 0; 356 pcie_priv->tx_done_count = 0; 357 pcie_priv->pcie_irq_count = 0; 358 pcie_priv->tx_reclaim_done = 0; 359 pcie_priv->tx_reclaim_req = 0; 360 361 pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE"); 362 if (!pcie_priv->workqueue) { 363 pr_err("failed to alloc bus workqueue\n"); 364 return -ENODEV; 365 } 366 367 ret = dma_set_mask_and_coherent(&pdev->dev, 368 pcie_priv->dma_mask_get_cb()); 369 if (ret) { 370 pr_err("PCIE DMA coherent mask init failed 0x%llx\n", 371 pcie_priv->dma_mask_get_cb()); 372 goto error; 373 } 374 375 init_dummy_netdev(&bus->mux_dev); 376 qtnf_pcie_init_irq(pcie_priv, use_msi); 377 pcie_priv->sysctl_bar = sysctl_bar; 378 pcie_priv->dmareg_bar = dmareg_bar; 379 pcie_priv->epmem_bar = epmem_bar; 380 pci_save_state(pdev); 381 382 ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param); 383 if (ret) 384 goto error; 385 386 qtnf_pcie_bringup_fw_async(bus); 387 return 0; 388 389 error: 390 flush_workqueue(pcie_priv->workqueue); 391 destroy_workqueue(pcie_priv->workqueue); 392 pci_set_drvdata(pdev, NULL); 393 return ret; 394 } 395 396 static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv) 397 { 398 qtnf_shm_ipc_free(&priv->shm_ipc_ep_in); 399 qtnf_shm_ipc_free(&priv->shm_ipc_ep_out); 400 } 401 402 static void qtnf_pcie_remove(struct pci_dev *dev) 403 { 404 struct qtnf_pcie_bus_priv *priv; 405 struct qtnf_bus *bus; 406 407 bus = pci_get_drvdata(dev); 408 if (!bus) 409 return; 410 411 priv = get_bus_priv(bus); 412 413 cancel_work_sync(&bus->fw_work); 414 415 if (qtnf_fw_is_attached(bus)) 416 qtnf_core_detach(bus); 417 418 netif_napi_del(&bus->mux_napi); 419 flush_workqueue(priv->workqueue); 420 destroy_workqueue(priv->workqueue); 421 tasklet_kill(&priv->reclaim_tq); 422 423 qtnf_pcie_free_shm_ipc(priv); 424 qtnf_debugfs_remove(bus); 425 priv->remove_cb(bus); 426 pci_set_drvdata(priv->pdev, NULL); 427 } 428 429 #ifdef CONFIG_PM_SLEEP 430 static int qtnf_pcie_suspend(struct device *dev) 431 { 432 struct qtnf_pcie_bus_priv *priv; 433 struct qtnf_bus *bus; 434 435 bus = dev_get_drvdata(dev); 436 if (!bus) 437 return -EFAULT; 438 439 priv = get_bus_priv(bus); 440 return priv->suspend_cb(bus); 441 } 442 443 static int qtnf_pcie_resume(struct device *dev) 444 { 445 struct qtnf_pcie_bus_priv *priv; 446 struct qtnf_bus *bus; 447 448 bus = dev_get_drvdata(dev); 449 if (!bus) 450 return -EFAULT; 451 452 priv = get_bus_priv(bus); 453 return priv->resume_cb(bus); 454 } 455 456 /* Power Management Hooks */ 457 static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, 458 qtnf_pcie_resume); 459 #endif 460 461 static const struct pci_device_id qtnf_pcie_devid_table[] = { 462 { 463 PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QSR, 464 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 465 }, 466 { }, 467 }; 468 469 MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); 470 471 static struct pci_driver qtnf_pcie_drv_data = { 472 .name = DRV_NAME, 473 .id_table = qtnf_pcie_devid_table, 474 .probe = qtnf_pcie_probe, 475 .remove = qtnf_pcie_remove, 476 #ifdef CONFIG_PM_SLEEP 477 .driver = { 478 .pm = &qtnf_pcie_pm_ops, 479 }, 480 #endif 481 }; 482 483 static int __init qtnf_pcie_register(void) 484 { 485 return pci_register_driver(&qtnf_pcie_drv_data); 486 } 487 488 static void __exit qtnf_pcie_exit(void) 489 { 490 pci_unregister_driver(&qtnf_pcie_drv_data); 491 } 492 493 module_init(qtnf_pcie_register); 494 module_exit(qtnf_pcie_exit); 495 496 MODULE_AUTHOR("Quantenna Communications"); 497 MODULE_DESCRIPTION("Quantenna PCIe bus driver for 802.11 wireless LAN."); 498 MODULE_LICENSE("GPL"); 499