1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/msi.h> 8 #include <linux/pci.h> 9 10 #include "pci.h" 11 #include "core.h" 12 #include "hif.h" 13 #include "mhi.h" 14 #include "debug.h" 15 16 #define ATH11K_PCI_BAR_NUM 0 17 #define ATH11K_PCI_DMA_MASK 32 18 19 #define ATH11K_PCI_IRQ_CE0_OFFSET 3 20 21 #define WINDOW_ENABLE_BIT 0x40000000 22 #define WINDOW_REG_ADDRESS 0x310c 23 #define WINDOW_VALUE_MASK GENMASK(24, 19) 24 #define WINDOW_START 0x80000 25 #define WINDOW_RANGE_MASK GENMASK(18, 0) 26 27 #define QCA6390_DEVICE_ID 0x1101 28 29 static const struct pci_device_id ath11k_pci_id_table[] = { 30 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, 31 {0} 32 }; 33 34 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); 35 36 static const struct ath11k_bus_params ath11k_pci_bus_params = { 37 .mhi_support = true, 38 .m3_fw_support = true, 39 .fixed_bdf_addr = false, 40 .fixed_mem_region = false, 41 }; 42 43 static const struct ath11k_msi_config msi_config = { 44 .total_vectors = 32, 45 .total_users = 4, 46 .users = (struct ath11k_msi_user[]) { 47 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 48 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 49 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 50 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 51 }, 52 }; 53 54 /* Target firmware's Copy Engine configuration. */ 55 static const struct ce_pipe_config target_ce_config_wlan[] = { 56 /* CE0: host->target HTC control and raw streams */ 57 { 58 .pipenum = __cpu_to_le32(0), 59 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 60 .nentries = __cpu_to_le32(32), 61 .nbytes_max = __cpu_to_le32(2048), 62 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 63 .reserved = __cpu_to_le32(0), 64 }, 65 66 /* CE1: target->host HTT + HTC control */ 67 { 68 .pipenum = __cpu_to_le32(1), 69 .pipedir = __cpu_to_le32(PIPEDIR_IN), 70 .nentries = __cpu_to_le32(32), 71 .nbytes_max = __cpu_to_le32(2048), 72 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 73 .reserved = __cpu_to_le32(0), 74 }, 75 76 /* CE2: target->host WMI */ 77 { 78 .pipenum = __cpu_to_le32(2), 79 .pipedir = __cpu_to_le32(PIPEDIR_IN), 80 .nentries = __cpu_to_le32(32), 81 .nbytes_max = __cpu_to_le32(2048), 82 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 83 .reserved = __cpu_to_le32(0), 84 }, 85 86 /* CE3: host->target WMI */ 87 { 88 .pipenum = __cpu_to_le32(3), 89 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 90 .nentries = __cpu_to_le32(32), 91 .nbytes_max = __cpu_to_le32(2048), 92 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 93 .reserved = __cpu_to_le32(0), 94 }, 95 96 /* CE4: host->target HTT */ 97 { 98 .pipenum = __cpu_to_le32(4), 99 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 100 .nentries = __cpu_to_le32(256), 101 .nbytes_max = __cpu_to_le32(256), 102 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 103 .reserved = __cpu_to_le32(0), 104 }, 105 106 /* CE5: target->host Pktlog */ 107 { 108 .pipenum = __cpu_to_le32(5), 109 .pipedir = __cpu_to_le32(PIPEDIR_IN), 110 .nentries = __cpu_to_le32(32), 111 .nbytes_max = __cpu_to_le32(2048), 112 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 113 .reserved = __cpu_to_le32(0), 114 }, 115 116 /* CE6: Reserved for target autonomous hif_memcpy */ 117 { 118 .pipenum = __cpu_to_le32(6), 119 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 120 .nentries = __cpu_to_le32(32), 121 .nbytes_max = __cpu_to_le32(16384), 122 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 123 .reserved = __cpu_to_le32(0), 124 }, 125 126 /* CE7 used only by Host */ 127 { 128 .pipenum = __cpu_to_le32(7), 129 .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H), 130 .nentries = __cpu_to_le32(0), 131 .nbytes_max = __cpu_to_le32(0), 132 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 133 .reserved = __cpu_to_le32(0), 134 }, 135 136 /* CE8 target->host used only by IPA */ 137 { 138 .pipenum = __cpu_to_le32(8), 139 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 140 .nentries = __cpu_to_le32(32), 141 .nbytes_max = __cpu_to_le32(16384), 142 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 143 .reserved = __cpu_to_le32(0), 144 }, 145 /* CE 9, 10, 11 are used by MHI driver */ 146 }; 147 148 /* Map from service/endpoint to Copy Engine. 149 * This table is derived from the CE_PCI TABLE, above. 150 * It is passed to the Target at startup for use by firmware. 151 */ 152 static const struct service_to_pipe target_service_to_ce_map_wlan[] = { 153 { 154 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), 155 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 156 __cpu_to_le32(3), 157 }, 158 { 159 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO), 160 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 161 __cpu_to_le32(2), 162 }, 163 { 164 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), 165 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 166 __cpu_to_le32(3), 167 }, 168 { 169 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK), 170 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 171 __cpu_to_le32(2), 172 }, 173 { 174 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), 175 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 176 __cpu_to_le32(3), 177 }, 178 { 179 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE), 180 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 181 __cpu_to_le32(2), 182 }, 183 { 184 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), 185 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 186 __cpu_to_le32(3), 187 }, 188 { 189 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI), 190 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 191 __cpu_to_le32(2), 192 }, 193 { 194 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), 195 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 196 __cpu_to_le32(3), 197 }, 198 { 199 __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL), 200 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 201 __cpu_to_le32(2), 202 }, 203 204 { 205 __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), 206 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 207 __cpu_to_le32(0), 208 }, 209 { 210 __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL), 211 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 212 __cpu_to_le32(2), 213 }, 214 215 { 216 __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), 217 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 218 __cpu_to_le32(4), 219 }, 220 { 221 __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG), 222 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 223 __cpu_to_le32(1), 224 }, 225 226 /* (Additions here) */ 227 228 { /* must be last */ 229 __cpu_to_le32(0), 230 __cpu_to_le32(0), 231 __cpu_to_le32(0), 232 }, 233 }; 234 235 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 236 "bhi", 237 "mhi-er0", 238 "mhi-er1", 239 "ce0", 240 "ce1", 241 "ce2", 242 "ce3", 243 "ce4", 244 "ce5", 245 "ce6", 246 "ce7", 247 "ce8", 248 "ce9", 249 "ce10", 250 "ce11", 251 "host2wbm-desc-feed", 252 "host2reo-re-injection", 253 "host2reo-command", 254 "host2rxdma-monitor-ring3", 255 "host2rxdma-monitor-ring2", 256 "host2rxdma-monitor-ring1", 257 "reo2ost-exception", 258 "wbm2host-rx-release", 259 "reo2host-status", 260 "reo2host-destination-ring4", 261 "reo2host-destination-ring3", 262 "reo2host-destination-ring2", 263 "reo2host-destination-ring1", 264 "rxdma2host-monitor-destination-mac3", 265 "rxdma2host-monitor-destination-mac2", 266 "rxdma2host-monitor-destination-mac1", 267 "ppdu-end-interrupts-mac3", 268 "ppdu-end-interrupts-mac2", 269 "ppdu-end-interrupts-mac1", 270 "rxdma2host-monitor-status-ring-mac3", 271 "rxdma2host-monitor-status-ring-mac2", 272 "rxdma2host-monitor-status-ring-mac1", 273 "host2rxdma-host-buf-ring-mac3", 274 "host2rxdma-host-buf-ring-mac2", 275 "host2rxdma-host-buf-ring-mac1", 276 "rxdma2host-destination-ring-mac3", 277 "rxdma2host-destination-ring-mac2", 278 "rxdma2host-destination-ring-mac1", 279 "host2tcl-input-ring4", 280 "host2tcl-input-ring3", 281 "host2tcl-input-ring2", 282 "host2tcl-input-ring1", 283 "wbm2host-tx-completions-ring3", 284 "wbm2host-tx-completions-ring2", 285 "wbm2host-tx-completions-ring1", 286 "tcl2host-status-ring", 287 }; 288 289 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) 290 { 291 struct ath11k_base *ab = ab_pci->ab; 292 293 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset); 294 295 lockdep_assert_held(&ab_pci->window_lock); 296 297 if (window != ab_pci->register_window) { 298 iowrite32(WINDOW_ENABLE_BIT | window, 299 ab->mem + WINDOW_REG_ADDRESS); 300 ab_pci->register_window = window; 301 } 302 } 303 304 void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) 305 { 306 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 307 308 if (offset < WINDOW_START) { 309 iowrite32(value, ab->mem + offset); 310 } else { 311 spin_lock_bh(&ab_pci->window_lock); 312 ath11k_pci_select_window(ab_pci, offset); 313 iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); 314 spin_unlock_bh(&ab_pci->window_lock); 315 } 316 } 317 318 u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) 319 { 320 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 321 u32 val; 322 323 if (offset < WINDOW_START) { 324 val = ioread32(ab->mem + offset); 325 } else { 326 spin_lock_bh(&ab_pci->window_lock); 327 ath11k_pci_select_window(ab_pci, offset); 328 val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); 329 spin_unlock_bh(&ab_pci->window_lock); 330 } 331 332 return val; 333 } 334 335 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) 336 { 337 u32 val, delay; 338 339 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 340 341 val |= PCIE_SOC_GLOBAL_RESET_V; 342 343 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 344 345 /* TODO: exact time to sleep is uncertain */ 346 delay = 10; 347 mdelay(delay); 348 349 /* Need to toggle V bit back otherwise stuck in reset status */ 350 val &= ~PCIE_SOC_GLOBAL_RESET_V; 351 352 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 353 354 mdelay(delay); 355 356 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 357 if (val == 0xffffffff) 358 ath11k_warn(ab, "link down error during global reset\n"); 359 } 360 361 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) 362 { 363 u32 val; 364 365 /* read cookie */ 366 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 367 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val); 368 369 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 370 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 371 372 /* TODO: exact time to sleep is uncertain */ 373 mdelay(10); 374 375 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 376 * continuing warm path and entering dead loop. 377 */ 378 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 379 mdelay(10); 380 381 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 382 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 383 384 /* A read clear register. clear the register to prevent 385 * Q6 from entering wrong code path. 386 */ 387 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 388 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); 389 } 390 391 static void ath11k_pci_force_wake(struct ath11k_base *ab) 392 { 393 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 394 mdelay(5); 395 } 396 397 static void ath11k_pci_sw_reset(struct ath11k_base *ab) 398 { 399 ath11k_pci_soc_global_reset(ab); 400 ath11k_mhi_clear_vector(ab); 401 ath11k_pci_soc_global_reset(ab); 402 ath11k_mhi_set_mhictrl_reset(ab); 403 ath11k_pci_clear_dbg_registers(ab); 404 } 405 406 int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) 407 { 408 struct pci_dev *pci_dev = to_pci_dev(dev); 409 410 return pci_irq_vector(pci_dev, vector); 411 } 412 413 static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, 414 u32 *msi_addr_hi) 415 { 416 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 417 418 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 419 msi_addr_lo); 420 421 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 422 msi_addr_hi); 423 } 424 425 int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, 426 int *num_vectors, u32 *user_base_data, 427 u32 *base_vector) 428 { 429 struct ath11k_base *ab = ab_pci->ab; 430 int idx; 431 432 for (idx = 0; idx < msi_config.total_users; idx++) { 433 if (strcmp(user_name, msi_config.users[idx].name) == 0) { 434 *num_vectors = msi_config.users[idx].num_vectors; 435 *user_base_data = msi_config.users[idx].base_vector 436 + ab_pci->msi_ep_base_data; 437 *base_vector = msi_config.users[idx].base_vector; 438 439 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 440 user_name, *num_vectors, *user_base_data, 441 *base_vector); 442 443 return 0; 444 } 445 } 446 447 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 448 449 return -EINVAL; 450 } 451 452 static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, 453 int *num_vectors, u32 *user_base_data, 454 u32 *base_vector) 455 { 456 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 457 458 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name, 459 num_vectors, user_base_data, 460 base_vector); 461 } 462 463 static void ath11k_pci_free_ext_irq(struct ath11k_base *ab) 464 { 465 int i, j; 466 467 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 468 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 469 470 for (j = 0; j < irq_grp->num_irq; j++) 471 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 472 473 netif_napi_del(&irq_grp->napi); 474 } 475 } 476 477 static void ath11k_pci_free_irq(struct ath11k_base *ab) 478 { 479 int i, irq_idx; 480 481 for (i = 0; i < ab->hw_params.ce_count; i++) { 482 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 483 continue; 484 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 485 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 486 } 487 488 ath11k_pci_free_ext_irq(ab); 489 } 490 491 static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 492 { 493 u32 irq_idx; 494 495 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 496 enable_irq(ab->irq_num[irq_idx]); 497 } 498 499 static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 500 { 501 u32 irq_idx; 502 503 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 504 disable_irq_nosync(ab->irq_num[irq_idx]); 505 } 506 507 static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab) 508 { 509 int i; 510 511 for (i = 0; i < ab->hw_params.ce_count; i++) { 512 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 513 continue; 514 ath11k_pci_ce_irq_disable(ab, i); 515 } 516 } 517 518 static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) 519 { 520 int i; 521 int irq_idx; 522 523 for (i = 0; i < ab->hw_params.ce_count; i++) { 524 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 525 continue; 526 527 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 528 synchronize_irq(ab->irq_num[irq_idx]); 529 } 530 } 531 532 static void ath11k_pci_ce_tasklet(unsigned long data) 533 { 534 struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data; 535 536 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 537 538 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 539 } 540 541 static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) 542 { 543 struct ath11k_ce_pipe *ce_pipe = arg; 544 545 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 546 tasklet_schedule(&ce_pipe->intr_tq); 547 548 return IRQ_HANDLED; 549 } 550 551 static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 552 { 553 int i; 554 555 for (i = 0; i < irq_grp->num_irq; i++) 556 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 557 } 558 559 static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) 560 { 561 int i; 562 563 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 564 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; 565 566 ath11k_pci_ext_grp_disable(irq_grp); 567 568 napi_synchronize(&irq_grp->napi); 569 napi_disable(&irq_grp->napi); 570 } 571 } 572 573 static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 574 { 575 int i; 576 577 for (i = 0; i < irq_grp->num_irq; i++) 578 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 579 } 580 581 static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) 582 { 583 int i; 584 585 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 586 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 587 588 napi_enable(&irq_grp->napi); 589 ath11k_pci_ext_grp_enable(irq_grp); 590 } 591 } 592 593 static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab) 594 { 595 int i, j, irq_idx; 596 597 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 598 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 599 600 for (j = 0; j < irq_grp->num_irq; j++) { 601 irq_idx = irq_grp->irqs[j]; 602 synchronize_irq(ab->irq_num[irq_idx]); 603 } 604 } 605 } 606 607 static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) 608 { 609 __ath11k_pci_ext_irq_disable(ab); 610 ath11k_pci_sync_ext_irqs(ab); 611 } 612 613 static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 614 { 615 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 616 struct ath11k_ext_irq_grp, 617 napi); 618 struct ath11k_base *ab = irq_grp->ab; 619 int work_done; 620 621 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 622 if (work_done < budget) { 623 napi_complete_done(napi, work_done); 624 ath11k_pci_ext_grp_enable(irq_grp); 625 } 626 627 if (work_done > budget) 628 work_done = budget; 629 630 return work_done; 631 } 632 633 static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) 634 { 635 struct ath11k_ext_irq_grp *irq_grp = arg; 636 637 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); 638 639 ath11k_pci_ext_grp_disable(irq_grp); 640 641 napi_schedule(&irq_grp->napi); 642 643 return IRQ_HANDLED; 644 } 645 646 static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) 647 { 648 int i, j, ret, num_vectors = 0; 649 u32 user_base_data = 0, base_vector = 0; 650 651 ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", 652 &num_vectors, &user_base_data, 653 &base_vector); 654 655 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 656 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 657 u32 num_irq = 0; 658 659 irq_grp->ab = ab; 660 irq_grp->grp_id = i; 661 init_dummy_netdev(&irq_grp->napi_ndev); 662 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 663 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT); 664 665 if (ab->hw_params.ring_mask->tx[i] || 666 ab->hw_params.ring_mask->rx[i] || 667 ab->hw_params.ring_mask->rx_err[i] || 668 ab->hw_params.ring_mask->rx_wbm_rel[i] || 669 ab->hw_params.ring_mask->reo_status[i] || 670 ab->hw_params.ring_mask->rxdma2host[i] || 671 ab->hw_params.ring_mask->host2rxdma[i] || 672 ab->hw_params.ring_mask->rx_mon_status[i]) { 673 num_irq = 1; 674 } 675 676 irq_grp->num_irq = num_irq; 677 irq_grp->irqs[0] = base_vector + i; 678 679 for (j = 0; j < irq_grp->num_irq; j++) { 680 int irq_idx = irq_grp->irqs[j]; 681 int vector = (i % num_vectors) + base_vector; 682 int irq = ath11k_pci_get_msi_irq(ab->dev, vector); 683 684 ab->irq_num[irq_idx] = irq; 685 686 ath11k_dbg(ab, ATH11K_DBG_PCI, 687 "irq:%d group:%d\n", irq, i); 688 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, 689 IRQF_SHARED, 690 "DP_EXT_IRQ", irq_grp); 691 if (ret) { 692 ath11k_err(ab, "failed request irq %d: %d\n", 693 vector, ret); 694 return ret; 695 } 696 697 disable_irq_nosync(ab->irq_num[irq_idx]); 698 } 699 } 700 701 return 0; 702 } 703 704 static int ath11k_pci_config_irq(struct ath11k_base *ab) 705 { 706 struct ath11k_ce_pipe *ce_pipe; 707 u32 msi_data_start; 708 u32 msi_data_count; 709 u32 msi_irq_start; 710 unsigned int msi_data; 711 int irq, i, ret, irq_idx; 712 713 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), 714 "CE", &msi_data_count, 715 &msi_data_start, &msi_irq_start); 716 if (ret) 717 return ret; 718 719 /* Configure CE irqs */ 720 for (i = 0; i < ab->hw_params.ce_count; i++) { 721 msi_data = (i % msi_data_count) + msi_irq_start; 722 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); 723 ce_pipe = &ab->ce.ce_pipe[i]; 724 725 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 726 continue; 727 728 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 729 730 tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet, 731 (unsigned long)ce_pipe); 732 733 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, 734 IRQF_SHARED, irq_name[irq_idx], 735 ce_pipe); 736 if (ret) { 737 ath11k_err(ab, "failed to request irq %d: %d\n", 738 irq_idx, ret); 739 return ret; 740 } 741 742 ab->irq_num[irq_idx] = irq; 743 ath11k_pci_ce_irq_disable(ab, i); 744 } 745 746 ret = ath11k_pci_ext_irq_config(ab); 747 if (ret) 748 return ret; 749 750 return 0; 751 } 752 753 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) 754 { 755 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 756 757 cfg->tgt_ce = target_ce_config_wlan; 758 cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan); 759 760 cfg->svc_to_ce_map = target_service_to_ce_map_wlan; 761 cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan); 762 ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390; 763 } 764 765 static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) 766 { 767 int i; 768 769 for (i = 0; i < ab->hw_params.ce_count; i++) { 770 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 771 continue; 772 ath11k_pci_ce_irq_enable(ab, i); 773 } 774 } 775 776 static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) 777 { 778 struct ath11k_base *ab = ab_pci->ab; 779 struct msi_desc *msi_desc; 780 int num_vectors; 781 int ret; 782 783 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 784 msi_config.total_vectors, 785 msi_config.total_vectors, 786 PCI_IRQ_MSI); 787 if (num_vectors != msi_config.total_vectors) { 788 ath11k_err(ab, "failed to get %d MSI vectors, only %d available", 789 msi_config.total_vectors, num_vectors); 790 791 if (num_vectors >= 0) 792 return -EINVAL; 793 else 794 return num_vectors; 795 } 796 797 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 798 if (!msi_desc) { 799 ath11k_err(ab, "msi_desc is NULL!\n"); 800 ret = -EINVAL; 801 goto free_msi_vector; 802 } 803 804 ab_pci->msi_ep_base_data = msi_desc->msg.data; 805 806 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 807 808 return 0; 809 810 free_msi_vector: 811 pci_free_irq_vectors(ab_pci->pdev); 812 813 return ret; 814 } 815 816 static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci) 817 { 818 pci_free_irq_vectors(ab_pci->pdev); 819 } 820 821 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) 822 { 823 struct ath11k_base *ab = ab_pci->ab; 824 u16 device_id; 825 int ret = 0; 826 827 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 828 if (device_id != ab_pci->dev_id) { 829 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 830 device_id, ab_pci->dev_id); 831 ret = -EIO; 832 goto out; 833 } 834 835 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM); 836 if (ret) { 837 ath11k_err(ab, "failed to assign pci resource: %d\n", ret); 838 goto out; 839 } 840 841 ret = pci_enable_device(pdev); 842 if (ret) { 843 ath11k_err(ab, "failed to enable pci device: %d\n", ret); 844 goto out; 845 } 846 847 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci"); 848 if (ret) { 849 ath11k_err(ab, "failed to request pci region: %d\n", ret); 850 goto disable_device; 851 } 852 853 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 854 if (ret) { 855 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", 856 ATH11K_PCI_DMA_MASK, ret); 857 goto release_region; 858 } 859 860 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 861 if (ret) { 862 ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n", 863 ATH11K_PCI_DMA_MASK, ret); 864 goto release_region; 865 } 866 867 pci_set_master(pdev); 868 869 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM); 870 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0); 871 if (!ab->mem) { 872 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM); 873 ret = -EIO; 874 goto clear_master; 875 } 876 877 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); 878 return 0; 879 880 clear_master: 881 pci_clear_master(pdev); 882 release_region: 883 pci_release_region(pdev, ATH11K_PCI_BAR_NUM); 884 disable_device: 885 pci_disable_device(pdev); 886 out: 887 return ret; 888 } 889 890 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci) 891 { 892 struct ath11k_base *ab = ab_pci->ab; 893 struct pci_dev *pci_dev = ab_pci->pdev; 894 895 pci_iounmap(pci_dev, ab->mem); 896 ab->mem = NULL; 897 pci_clear_master(pci_dev); 898 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM); 899 if (pci_is_enabled(pci_dev)) 900 pci_disable_device(pci_dev); 901 } 902 903 static int ath11k_pci_power_up(struct ath11k_base *ab) 904 { 905 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 906 int ret; 907 908 ath11k_pci_sw_reset(ab_pci->ab); 909 910 ret = ath11k_mhi_start(ab_pci); 911 if (ret) { 912 ath11k_err(ab, "failed to start mhi: %d\n", ret); 913 return ret; 914 } 915 916 return 0; 917 } 918 919 static void ath11k_pci_power_down(struct ath11k_base *ab) 920 { 921 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 922 923 ath11k_mhi_stop(ab_pci); 924 ath11k_pci_force_wake(ab_pci->ab); 925 ath11k_pci_sw_reset(ab_pci->ab); 926 } 927 928 static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) 929 { 930 int i; 931 932 for (i = 0; i < ab->hw_params.ce_count; i++) { 933 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 934 935 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 936 continue; 937 938 tasklet_kill(&ce_pipe->intr_tq); 939 } 940 } 941 942 static void ath11k_pci_stop(struct ath11k_base *ab) 943 { 944 ath11k_pci_ce_irqs_disable(ab); 945 ath11k_pci_sync_ce_irqs(ab); 946 ath11k_pci_kill_tasklets(ab); 947 ath11k_ce_cleanup_pipes(ab); 948 } 949 950 static int ath11k_pci_start(struct ath11k_base *ab) 951 { 952 ath11k_pci_ce_irqs_enable(ab); 953 ath11k_ce_rx_post_buf(ab); 954 955 return 0; 956 } 957 958 static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 959 u8 *ul_pipe, u8 *dl_pipe) 960 { 961 const struct service_to_pipe *entry; 962 bool ul_set = false, dl_set = false; 963 int i; 964 965 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 966 entry = &target_service_to_ce_map_wlan[i]; 967 968 if (__le32_to_cpu(entry->service_id) != service_id) 969 continue; 970 971 switch (__le32_to_cpu(entry->pipedir)) { 972 case PIPEDIR_NONE: 973 break; 974 case PIPEDIR_IN: 975 WARN_ON(dl_set); 976 *dl_pipe = __le32_to_cpu(entry->pipenum); 977 dl_set = true; 978 break; 979 case PIPEDIR_OUT: 980 WARN_ON(ul_set); 981 *ul_pipe = __le32_to_cpu(entry->pipenum); 982 ul_set = true; 983 break; 984 case PIPEDIR_INOUT: 985 WARN_ON(dl_set); 986 WARN_ON(ul_set); 987 *dl_pipe = __le32_to_cpu(entry->pipenum); 988 *ul_pipe = __le32_to_cpu(entry->pipenum); 989 dl_set = true; 990 ul_set = true; 991 break; 992 } 993 } 994 995 if (WARN_ON(!ul_set || !dl_set)) 996 return -ENOENT; 997 998 return 0; 999 } 1000 1001 static const struct ath11k_hif_ops ath11k_pci_hif_ops = { 1002 .start = ath11k_pci_start, 1003 .stop = ath11k_pci_stop, 1004 .read32 = ath11k_pci_read32, 1005 .write32 = ath11k_pci_write32, 1006 .power_down = ath11k_pci_power_down, 1007 .power_up = ath11k_pci_power_up, 1008 .irq_enable = ath11k_pci_ext_irq_enable, 1009 .irq_disable = ath11k_pci_ext_irq_disable, 1010 .get_msi_address = ath11k_pci_get_msi_address, 1011 .get_user_msi_vector = ath11k_get_user_msi_assignment, 1012 .map_service_to_pipe = ath11k_pci_map_service_to_pipe, 1013 }; 1014 1015 static int ath11k_pci_probe(struct pci_dev *pdev, 1016 const struct pci_device_id *pci_dev) 1017 { 1018 struct ath11k_base *ab; 1019 struct ath11k_pci *ab_pci; 1020 enum ath11k_hw_rev hw_rev; 1021 int ret; 1022 1023 dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n"); 1024 1025 switch (pci_dev->device) { 1026 case QCA6390_DEVICE_ID: 1027 hw_rev = ATH11K_HW_QCA6390_HW20; 1028 break; 1029 default: 1030 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1031 pci_dev->device); 1032 return -ENOTSUPP; 1033 } 1034 1035 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, 1036 &ath11k_pci_bus_params); 1037 if (!ab) { 1038 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 1039 return -ENOMEM; 1040 } 1041 1042 ab->dev = &pdev->dev; 1043 ab->hw_rev = hw_rev; 1044 pci_set_drvdata(pdev, ab); 1045 ab_pci = ath11k_pci_priv(ab); 1046 ab_pci->dev_id = pci_dev->device; 1047 ab_pci->ab = ab; 1048 ab_pci->pdev = pdev; 1049 ab->hif.ops = &ath11k_pci_hif_ops; 1050 pci_set_drvdata(pdev, ab); 1051 spin_lock_init(&ab_pci->window_lock); 1052 1053 ret = ath11k_pci_claim(ab_pci, pdev); 1054 if (ret) { 1055 ath11k_err(ab, "failed to claim device: %d\n", ret); 1056 goto err_free_core; 1057 } 1058 1059 ret = ath11k_pci_enable_msi(ab_pci); 1060 if (ret) { 1061 ath11k_err(ab, "failed to enable msi: %d\n", ret); 1062 goto err_pci_free_region; 1063 } 1064 1065 ret = ath11k_core_pre_init(ab); 1066 if (ret) 1067 goto err_pci_disable_msi; 1068 1069 ret = ath11k_mhi_register(ab_pci); 1070 if (ret) { 1071 ath11k_err(ab, "failed to register mhi: %d\n", ret); 1072 goto err_pci_disable_msi; 1073 } 1074 1075 ret = ath11k_hal_srng_init(ab); 1076 if (ret) 1077 goto err_mhi_unregister; 1078 1079 ret = ath11k_ce_alloc_pipes(ab); 1080 if (ret) { 1081 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1082 goto err_hal_srng_deinit; 1083 } 1084 1085 ath11k_pci_init_qmi_ce_config(ab); 1086 1087 ret = ath11k_pci_config_irq(ab); 1088 if (ret) { 1089 ath11k_err(ab, "failed to config irq: %d\n", ret); 1090 goto err_ce_free; 1091 } 1092 1093 ret = ath11k_core_init(ab); 1094 if (ret) { 1095 ath11k_err(ab, "failed to init core: %d\n", ret); 1096 goto err_free_irq; 1097 } 1098 return 0; 1099 1100 err_free_irq: 1101 ath11k_pci_free_irq(ab); 1102 1103 err_ce_free: 1104 ath11k_ce_free_pipes(ab); 1105 1106 err_hal_srng_deinit: 1107 ath11k_hal_srng_deinit(ab); 1108 1109 err_mhi_unregister: 1110 ath11k_mhi_unregister(ab_pci); 1111 1112 err_pci_disable_msi: 1113 ath11k_pci_disable_msi(ab_pci); 1114 1115 err_pci_free_region: 1116 ath11k_pci_free_region(ab_pci); 1117 1118 err_free_core: 1119 ath11k_core_free(ab); 1120 1121 return ret; 1122 } 1123 1124 static void ath11k_pci_remove(struct pci_dev *pdev) 1125 { 1126 struct ath11k_base *ab = pci_get_drvdata(pdev); 1127 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1128 1129 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1130 ath11k_mhi_unregister(ab_pci); 1131 ath11k_pci_disable_msi(ab_pci); 1132 ath11k_pci_free_region(ab_pci); 1133 ath11k_pci_free_irq(ab); 1134 ath11k_core_free(ab); 1135 } 1136 1137 static void ath11k_pci_shutdown(struct pci_dev *pdev) 1138 { 1139 struct ath11k_base *ab = pci_get_drvdata(pdev); 1140 1141 ath11k_pci_power_down(ab); 1142 } 1143 1144 static struct pci_driver ath11k_pci_driver = { 1145 .name = "ath11k_pci", 1146 .id_table = ath11k_pci_id_table, 1147 .probe = ath11k_pci_probe, 1148 .remove = ath11k_pci_remove, 1149 .shutdown = ath11k_pci_shutdown, 1150 }; 1151 1152 static int ath11k_pci_init(void) 1153 { 1154 int ret; 1155 1156 ret = pci_register_driver(&ath11k_pci_driver); 1157 if (ret) 1158 pr_err("failed to register ath11k pci driver: %d\n", 1159 ret); 1160 1161 return ret; 1162 } 1163 module_init(ath11k_pci_init); 1164 1165 static void ath11k_pci_exit(void) 1166 { 1167 pci_unregister_driver(&ath11k_pci_driver); 1168 } 1169 1170 module_exit(ath11k_pci_exit); 1171 1172 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); 1173 MODULE_LICENSE("Dual BSD/GPL"); 1174