1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/msi.h> 8 #include <linux/pci.h> 9 10 #include "pci.h" 11 #include "core.h" 12 #include "hif.h" 13 #include "mhi.h" 14 #include "debug.h" 15 16 #define ATH11K_PCI_BAR_NUM 0 17 #define ATH11K_PCI_DMA_MASK 32 18 19 #define ATH11K_PCI_IRQ_CE0_OFFSET 3 20 21 #define WINDOW_ENABLE_BIT 0x40000000 22 #define WINDOW_REG_ADDRESS 0x310c 23 #define WINDOW_VALUE_MASK GENMASK(24, 19) 24 #define WINDOW_START 0x80000 25 #define WINDOW_RANGE_MASK GENMASK(18, 0) 26 27 #define TCSR_SOC_HW_VERSION 0x0224 28 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8) 29 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) 30 31 /* BAR0 + 4k is always accessible, and no 32 * need to force wakeup. 33 * 4K - 32 = 0xFE0 34 */ 35 #define ACCESS_ALWAYS_OFF 0xFE0 36 37 #define QCA6390_DEVICE_ID 0x1101 38 #define QCN9074_DEVICE_ID 0x1104 39 #define WCN6855_DEVICE_ID 0x1103 40 41 static const struct pci_device_id ath11k_pci_id_table[] = { 42 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, 43 { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) }, 44 { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) }, 45 {0} 46 }; 47 48 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); 49 50 static const struct ath11k_bus_params ath11k_pci_bus_params = { 51 .mhi_support = true, 52 .m3_fw_support = true, 53 .fixed_bdf_addr = false, 54 .fixed_mem_region = false, 55 }; 56 57 static const struct ath11k_msi_config ath11k_msi_config[] = { 58 { 59 .total_vectors = 32, 60 .total_users = 4, 61 .users = (struct ath11k_msi_user[]) { 62 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 63 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 64 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 65 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 66 }, 67 }, 68 { 69 .total_vectors = 16, 70 .total_users = 3, 71 .users = (struct ath11k_msi_user[]) { 72 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 73 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 74 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 75 }, 76 }, 77 }; 78 79 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 80 "bhi", 81 "mhi-er0", 82 "mhi-er1", 83 "ce0", 84 "ce1", 85 "ce2", 86 "ce3", 87 "ce4", 88 "ce5", 89 "ce6", 90 "ce7", 91 "ce8", 92 "ce9", 93 "ce10", 94 "ce11", 95 "host2wbm-desc-feed", 96 "host2reo-re-injection", 97 "host2reo-command", 98 "host2rxdma-monitor-ring3", 99 "host2rxdma-monitor-ring2", 100 "host2rxdma-monitor-ring1", 101 "reo2ost-exception", 102 "wbm2host-rx-release", 103 "reo2host-status", 104 "reo2host-destination-ring4", 105 "reo2host-destination-ring3", 106 "reo2host-destination-ring2", 107 "reo2host-destination-ring1", 108 "rxdma2host-monitor-destination-mac3", 109 "rxdma2host-monitor-destination-mac2", 110 "rxdma2host-monitor-destination-mac1", 111 "ppdu-end-interrupts-mac3", 112 "ppdu-end-interrupts-mac2", 113 "ppdu-end-interrupts-mac1", 114 "rxdma2host-monitor-status-ring-mac3", 115 "rxdma2host-monitor-status-ring-mac2", 116 "rxdma2host-monitor-status-ring-mac1", 117 "host2rxdma-host-buf-ring-mac3", 118 "host2rxdma-host-buf-ring-mac2", 119 "host2rxdma-host-buf-ring-mac1", 120 "rxdma2host-destination-ring-mac3", 121 "rxdma2host-destination-ring-mac2", 122 "rxdma2host-destination-ring-mac1", 123 "host2tcl-input-ring4", 124 "host2tcl-input-ring3", 125 "host2tcl-input-ring2", 126 "host2tcl-input-ring1", 127 "wbm2host-tx-completions-ring3", 128 "wbm2host-tx-completions-ring2", 129 "wbm2host-tx-completions-ring1", 130 "tcl2host-status-ring", 131 }; 132 133 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) 134 { 135 struct ath11k_base *ab = ab_pci->ab; 136 137 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset); 138 139 lockdep_assert_held(&ab_pci->window_lock); 140 141 if (window != ab_pci->register_window) { 142 iowrite32(WINDOW_ENABLE_BIT | window, 143 ab->mem + WINDOW_REG_ADDRESS); 144 ioread32(ab->mem + WINDOW_REG_ADDRESS); 145 ab_pci->register_window = window; 146 } 147 } 148 149 static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci) 150 { 151 u32 umac_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET); 152 u32 ce_window = FIELD_GET(WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE); 153 u32 window; 154 155 window = (umac_window << 12) | (ce_window << 6); 156 157 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 158 } 159 160 static inline u32 ath11k_pci_get_window_start(struct ath11k_base *ab, 161 u32 offset) 162 { 163 u32 window_start; 164 165 /* If offset lies within DP register range, use 3rd window */ 166 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 167 window_start = 3 * WINDOW_START; 168 /* If offset lies within CE register range, use 2nd window */ 169 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 170 window_start = 2 * WINDOW_START; 171 else 172 window_start = WINDOW_START; 173 174 return window_start; 175 } 176 177 void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) 178 { 179 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 180 u32 window_start; 181 182 /* for offset beyond BAR + 4K - 32, may 183 * need to wakeup MHI to access. 184 */ 185 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 186 offset >= ACCESS_ALWAYS_OFF) 187 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 188 189 if (offset < WINDOW_START) { 190 iowrite32(value, ab->mem + offset); 191 } else { 192 if (ab->bus_params.static_window_map) 193 window_start = ath11k_pci_get_window_start(ab, offset); 194 else 195 window_start = WINDOW_START; 196 197 if (window_start == WINDOW_START) { 198 spin_lock_bh(&ab_pci->window_lock); 199 ath11k_pci_select_window(ab_pci, offset); 200 iowrite32(value, ab->mem + window_start + 201 (offset & WINDOW_RANGE_MASK)); 202 spin_unlock_bh(&ab_pci->window_lock); 203 } else { 204 iowrite32(value, ab->mem + window_start + 205 (offset & WINDOW_RANGE_MASK)); 206 } 207 } 208 209 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 210 offset >= ACCESS_ALWAYS_OFF) 211 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 212 } 213 214 u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) 215 { 216 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 217 u32 val, window_start; 218 219 /* for offset beyond BAR + 4K - 32, may 220 * need to wakeup MHI to access. 221 */ 222 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 223 offset >= ACCESS_ALWAYS_OFF) 224 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 225 226 if (offset < WINDOW_START) { 227 val = ioread32(ab->mem + offset); 228 } else { 229 if (ab->bus_params.static_window_map) 230 window_start = ath11k_pci_get_window_start(ab, offset); 231 else 232 window_start = WINDOW_START; 233 234 if (window_start == WINDOW_START) { 235 spin_lock_bh(&ab_pci->window_lock); 236 ath11k_pci_select_window(ab_pci, offset); 237 val = ioread32(ab->mem + window_start + 238 (offset & WINDOW_RANGE_MASK)); 239 spin_unlock_bh(&ab_pci->window_lock); 240 } else { 241 val = ioread32(ab->mem + window_start + 242 (offset & WINDOW_RANGE_MASK)); 243 } 244 } 245 246 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 247 offset >= ACCESS_ALWAYS_OFF) 248 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 249 250 return val; 251 } 252 253 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) 254 { 255 u32 val, delay; 256 257 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 258 259 val |= PCIE_SOC_GLOBAL_RESET_V; 260 261 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 262 263 /* TODO: exact time to sleep is uncertain */ 264 delay = 10; 265 mdelay(delay); 266 267 /* Need to toggle V bit back otherwise stuck in reset status */ 268 val &= ~PCIE_SOC_GLOBAL_RESET_V; 269 270 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 271 272 mdelay(delay); 273 274 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 275 if (val == 0xffffffff) 276 ath11k_warn(ab, "link down error during global reset\n"); 277 } 278 279 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) 280 { 281 u32 val; 282 283 /* read cookie */ 284 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 285 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val); 286 287 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 288 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 289 290 /* TODO: exact time to sleep is uncertain */ 291 mdelay(10); 292 293 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 294 * continuing warm path and entering dead loop. 295 */ 296 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 297 mdelay(10); 298 299 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 300 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 301 302 /* A read clear register. clear the register to prevent 303 * Q6 from entering wrong code path. 304 */ 305 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 306 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); 307 } 308 309 static int ath11k_pci_set_link_reg(struct ath11k_base *ab, 310 u32 offset, u32 value, u32 mask) 311 { 312 u32 v; 313 int i; 314 315 v = ath11k_pci_read32(ab, offset); 316 if ((v & mask) == value) 317 return 0; 318 319 for (i = 0; i < 10; i++) { 320 ath11k_pci_write32(ab, offset, (v & ~mask) | value); 321 322 v = ath11k_pci_read32(ab, offset); 323 if ((v & mask) == value) 324 return 0; 325 326 mdelay(2); 327 } 328 329 ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n", 330 offset, v & mask, value); 331 332 return -ETIMEDOUT; 333 } 334 335 static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) 336 { 337 int ret; 338 339 ret = ath11k_pci_set_link_reg(ab, 340 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab), 341 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, 342 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); 343 if (ret) { 344 ath11k_warn(ab, "failed to set sysclk: %d\n", ret); 345 return ret; 346 } 347 348 ret = ath11k_pci_set_link_reg(ab, 349 PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab), 350 PCIE_PCS_OSC_DTCT_CONFIG1_VAL, 351 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 352 if (ret) { 353 ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); 354 return ret; 355 } 356 357 ret = ath11k_pci_set_link_reg(ab, 358 PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab), 359 PCIE_PCS_OSC_DTCT_CONFIG2_VAL, 360 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 361 if (ret) { 362 ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); 363 return ret; 364 } 365 366 ret = ath11k_pci_set_link_reg(ab, 367 PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab), 368 PCIE_PCS_OSC_DTCT_CONFIG4_VAL, 369 PCIE_PCS_OSC_DTCT_CONFIG_MSK); 370 if (ret) { 371 ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); 372 return ret; 373 } 374 375 return 0; 376 } 377 378 static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) 379 { 380 u32 val; 381 int i; 382 383 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 384 385 /* PCIE link seems very unstable after the Hot Reset*/ 386 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 387 if (val == 0xffffffff) 388 mdelay(5); 389 390 ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 391 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 392 } 393 394 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val); 395 396 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 397 val |= GCC_GCC_PCIE_HOT_RST_VAL; 398 ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 399 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 400 401 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 402 403 mdelay(5); 404 } 405 406 static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) 407 { 408 /* This is a WAR for PCIE Hotreset. 409 * When target receive Hotreset, but will set the interrupt. 410 * So when download SBL again, SBL will open Interrupt and 411 * receive it, and crash immediately. 412 */ 413 ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 414 } 415 416 static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) 417 { 418 u32 val; 419 420 val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 421 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 422 ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 423 } 424 425 static void ath11k_pci_force_wake(struct ath11k_base *ab) 426 { 427 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 428 mdelay(5); 429 } 430 431 static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) 432 { 433 mdelay(100); 434 435 if (power_on) { 436 ath11k_pci_enable_ltssm(ab); 437 ath11k_pci_clear_all_intrs(ab); 438 ath11k_pci_set_wlaon_pwr_ctrl(ab); 439 if (ab->hw_params.fix_l1ss) 440 ath11k_pci_fix_l1ss(ab); 441 } 442 443 ath11k_mhi_clear_vector(ab); 444 ath11k_pci_clear_dbg_registers(ab); 445 ath11k_pci_soc_global_reset(ab); 446 ath11k_mhi_set_mhictrl_reset(ab); 447 } 448 449 int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) 450 { 451 struct pci_dev *pci_dev = to_pci_dev(dev); 452 453 return pci_irq_vector(pci_dev, vector); 454 } 455 456 static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, 457 u32 *msi_addr_hi) 458 { 459 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 460 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 461 462 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 463 msi_addr_lo); 464 465 if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 466 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 467 msi_addr_hi); 468 } else { 469 *msi_addr_hi = 0; 470 } 471 } 472 473 int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, 474 int *num_vectors, u32 *user_base_data, 475 u32 *base_vector) 476 { 477 struct ath11k_base *ab = ab_pci->ab; 478 const struct ath11k_msi_config *msi_config = ab_pci->msi_config; 479 int idx; 480 481 for (idx = 0; idx < msi_config->total_users; idx++) { 482 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 483 *num_vectors = msi_config->users[idx].num_vectors; 484 *user_base_data = msi_config->users[idx].base_vector 485 + ab_pci->msi_ep_base_data; 486 *base_vector = msi_config->users[idx].base_vector; 487 488 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 489 user_name, *num_vectors, *user_base_data, 490 *base_vector); 491 492 return 0; 493 } 494 } 495 496 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 497 498 return -EINVAL; 499 } 500 501 static void ath11k_pci_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, 502 u32 *msi_idx) 503 { 504 u32 i, msi_data_idx; 505 506 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { 507 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 508 continue; 509 510 if (ce_id == i) 511 break; 512 513 msi_data_idx++; 514 } 515 *msi_idx = msi_data_idx; 516 } 517 518 static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, 519 int *num_vectors, u32 *user_base_data, 520 u32 *base_vector) 521 { 522 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 523 524 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name, 525 num_vectors, user_base_data, 526 base_vector); 527 } 528 529 static void ath11k_pci_free_ext_irq(struct ath11k_base *ab) 530 { 531 int i, j; 532 533 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 534 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 535 536 for (j = 0; j < irq_grp->num_irq; j++) 537 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 538 539 netif_napi_del(&irq_grp->napi); 540 } 541 } 542 543 static void ath11k_pci_free_irq(struct ath11k_base *ab) 544 { 545 int i, irq_idx; 546 547 for (i = 0; i < ab->hw_params.ce_count; i++) { 548 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 549 continue; 550 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 551 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 552 } 553 554 ath11k_pci_free_ext_irq(ab); 555 } 556 557 static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 558 { 559 u32 irq_idx; 560 561 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 562 enable_irq(ab->irq_num[irq_idx]); 563 } 564 565 static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 566 { 567 u32 irq_idx; 568 569 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 570 disable_irq_nosync(ab->irq_num[irq_idx]); 571 } 572 573 static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab) 574 { 575 int i; 576 577 for (i = 0; i < ab->hw_params.ce_count; i++) { 578 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 579 continue; 580 ath11k_pci_ce_irq_disable(ab, i); 581 } 582 } 583 584 static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) 585 { 586 int i; 587 int irq_idx; 588 589 for (i = 0; i < ab->hw_params.ce_count; i++) { 590 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 591 continue; 592 593 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 594 synchronize_irq(ab->irq_num[irq_idx]); 595 } 596 } 597 598 static void ath11k_pci_ce_tasklet(struct tasklet_struct *t) 599 { 600 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 601 602 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 603 604 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 605 } 606 607 static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) 608 { 609 struct ath11k_ce_pipe *ce_pipe = arg; 610 611 /* last interrupt received for this CE */ 612 ce_pipe->timestamp = jiffies; 613 614 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 615 tasklet_schedule(&ce_pipe->intr_tq); 616 617 return IRQ_HANDLED; 618 } 619 620 static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 621 { 622 int i; 623 624 for (i = 0; i < irq_grp->num_irq; i++) 625 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 626 } 627 628 static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) 629 { 630 int i; 631 632 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 633 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; 634 635 ath11k_pci_ext_grp_disable(irq_grp); 636 637 napi_synchronize(&irq_grp->napi); 638 napi_disable(&irq_grp->napi); 639 } 640 } 641 642 static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 643 { 644 int i; 645 646 for (i = 0; i < irq_grp->num_irq; i++) 647 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 648 } 649 650 static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) 651 { 652 int i; 653 654 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 655 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 656 657 napi_enable(&irq_grp->napi); 658 ath11k_pci_ext_grp_enable(irq_grp); 659 } 660 } 661 662 static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab) 663 { 664 int i, j, irq_idx; 665 666 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 667 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 668 669 for (j = 0; j < irq_grp->num_irq; j++) { 670 irq_idx = irq_grp->irqs[j]; 671 synchronize_irq(ab->irq_num[irq_idx]); 672 } 673 } 674 } 675 676 static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) 677 { 678 __ath11k_pci_ext_irq_disable(ab); 679 ath11k_pci_sync_ext_irqs(ab); 680 } 681 682 static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 683 { 684 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 685 struct ath11k_ext_irq_grp, 686 napi); 687 struct ath11k_base *ab = irq_grp->ab; 688 int work_done; 689 690 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 691 if (work_done < budget) { 692 napi_complete_done(napi, work_done); 693 ath11k_pci_ext_grp_enable(irq_grp); 694 } 695 696 if (work_done > budget) 697 work_done = budget; 698 699 return work_done; 700 } 701 702 static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) 703 { 704 struct ath11k_ext_irq_grp *irq_grp = arg; 705 706 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); 707 708 /* last interrupt received for this group */ 709 irq_grp->timestamp = jiffies; 710 711 ath11k_pci_ext_grp_disable(irq_grp); 712 713 napi_schedule(&irq_grp->napi); 714 715 return IRQ_HANDLED; 716 } 717 718 static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) 719 { 720 int i, j, ret, num_vectors = 0; 721 u32 user_base_data = 0, base_vector = 0, base_idx; 722 723 base_idx = ATH11K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 724 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", 725 &num_vectors, 726 &user_base_data, 727 &base_vector); 728 if (ret < 0) 729 return ret; 730 731 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 732 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 733 u32 num_irq = 0; 734 735 irq_grp->ab = ab; 736 irq_grp->grp_id = i; 737 init_dummy_netdev(&irq_grp->napi_ndev); 738 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 739 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT); 740 741 if (ab->hw_params.ring_mask->tx[i] || 742 ab->hw_params.ring_mask->rx[i] || 743 ab->hw_params.ring_mask->rx_err[i] || 744 ab->hw_params.ring_mask->rx_wbm_rel[i] || 745 ab->hw_params.ring_mask->reo_status[i] || 746 ab->hw_params.ring_mask->rxdma2host[i] || 747 ab->hw_params.ring_mask->host2rxdma[i] || 748 ab->hw_params.ring_mask->rx_mon_status[i]) { 749 num_irq = 1; 750 } 751 752 irq_grp->num_irq = num_irq; 753 irq_grp->irqs[0] = base_idx + i; 754 755 for (j = 0; j < irq_grp->num_irq; j++) { 756 int irq_idx = irq_grp->irqs[j]; 757 int vector = (i % num_vectors) + base_vector; 758 int irq = ath11k_pci_get_msi_irq(ab->dev, vector); 759 760 ab->irq_num[irq_idx] = irq; 761 762 ath11k_dbg(ab, ATH11K_DBG_PCI, 763 "irq:%d group:%d\n", irq, i); 764 765 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 766 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, 767 IRQF_SHARED, 768 "DP_EXT_IRQ", irq_grp); 769 if (ret) { 770 ath11k_err(ab, "failed request irq %d: %d\n", 771 vector, ret); 772 return ret; 773 } 774 775 disable_irq_nosync(ab->irq_num[irq_idx]); 776 } 777 } 778 779 return 0; 780 } 781 782 static int ath11k_pci_config_irq(struct ath11k_base *ab) 783 { 784 struct ath11k_ce_pipe *ce_pipe; 785 u32 msi_data_start; 786 u32 msi_data_count, msi_data_idx; 787 u32 msi_irq_start; 788 unsigned int msi_data; 789 int irq, i, ret, irq_idx; 790 791 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), 792 "CE", &msi_data_count, 793 &msi_data_start, &msi_irq_start); 794 if (ret) 795 return ret; 796 797 /* Configure CE irqs */ 798 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { 799 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 800 continue; 801 802 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 803 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); 804 ce_pipe = &ab->ce.ce_pipe[i]; 805 806 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 807 808 tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); 809 810 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, 811 IRQF_SHARED, irq_name[irq_idx], 812 ce_pipe); 813 if (ret) { 814 ath11k_err(ab, "failed to request irq %d: %d\n", 815 irq_idx, ret); 816 return ret; 817 } 818 819 ab->irq_num[irq_idx] = irq; 820 msi_data_idx++; 821 822 ath11k_pci_ce_irq_disable(ab, i); 823 } 824 825 ret = ath11k_pci_ext_irq_config(ab); 826 if (ret) 827 return ret; 828 829 return 0; 830 } 831 832 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) 833 { 834 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 835 836 cfg->tgt_ce = ab->hw_params.target_ce_config; 837 cfg->tgt_ce_len = ab->hw_params.target_ce_count; 838 839 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; 840 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; 841 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; 842 843 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, 844 &cfg->shadow_reg_v2_len); 845 } 846 847 static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) 848 { 849 int i; 850 851 for (i = 0; i < ab->hw_params.ce_count; i++) { 852 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 853 continue; 854 ath11k_pci_ce_irq_enable(ab, i); 855 } 856 } 857 858 static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable) 859 { 860 struct pci_dev *dev = ab_pci->pdev; 861 u16 control; 862 863 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 864 865 if (enable) 866 control |= PCI_MSI_FLAGS_ENABLE; 867 else 868 control &= ~PCI_MSI_FLAGS_ENABLE; 869 870 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 871 } 872 873 static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci) 874 { 875 ath11k_pci_msi_config(ab_pci, true); 876 } 877 878 static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci) 879 { 880 ath11k_pci_msi_config(ab_pci, false); 881 } 882 883 static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci) 884 { 885 struct ath11k_base *ab = ab_pci->ab; 886 const struct ath11k_msi_config *msi_config = ab_pci->msi_config; 887 struct msi_desc *msi_desc; 888 int num_vectors; 889 int ret; 890 891 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 892 msi_config->total_vectors, 893 msi_config->total_vectors, 894 PCI_IRQ_MSI); 895 if (num_vectors != msi_config->total_vectors) { 896 ath11k_err(ab, "failed to get %d MSI vectors, only %d available", 897 msi_config->total_vectors, num_vectors); 898 899 if (num_vectors >= 0) 900 return -EINVAL; 901 else 902 return num_vectors; 903 } 904 ath11k_pci_msi_disable(ab_pci); 905 906 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 907 if (!msi_desc) { 908 ath11k_err(ab, "msi_desc is NULL!\n"); 909 ret = -EINVAL; 910 goto free_msi_vector; 911 } 912 913 ab_pci->msi_ep_base_data = msi_desc->msg.data; 914 if (msi_desc->msi_attrib.is_64) 915 set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 916 917 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 918 919 return 0; 920 921 free_msi_vector: 922 pci_free_irq_vectors(ab_pci->pdev); 923 924 return ret; 925 } 926 927 static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci) 928 { 929 pci_free_irq_vectors(ab_pci->pdev); 930 } 931 932 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) 933 { 934 struct ath11k_base *ab = ab_pci->ab; 935 u16 device_id; 936 int ret = 0; 937 938 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 939 if (device_id != ab_pci->dev_id) { 940 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 941 device_id, ab_pci->dev_id); 942 ret = -EIO; 943 goto out; 944 } 945 946 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM); 947 if (ret) { 948 ath11k_err(ab, "failed to assign pci resource: %d\n", ret); 949 goto out; 950 } 951 952 ret = pci_enable_device(pdev); 953 if (ret) { 954 ath11k_err(ab, "failed to enable pci device: %d\n", ret); 955 goto out; 956 } 957 958 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci"); 959 if (ret) { 960 ath11k_err(ab, "failed to request pci region: %d\n", ret); 961 goto disable_device; 962 } 963 964 ret = dma_set_mask_and_coherent(&pdev->dev, 965 DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 966 if (ret) { 967 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", 968 ATH11K_PCI_DMA_MASK, ret); 969 goto release_region; 970 } 971 972 pci_set_master(pdev); 973 974 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM); 975 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0); 976 if (!ab->mem) { 977 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM); 978 ret = -EIO; 979 goto clear_master; 980 } 981 982 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); 983 return 0; 984 985 clear_master: 986 pci_clear_master(pdev); 987 release_region: 988 pci_release_region(pdev, ATH11K_PCI_BAR_NUM); 989 disable_device: 990 pci_disable_device(pdev); 991 out: 992 return ret; 993 } 994 995 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci) 996 { 997 struct ath11k_base *ab = ab_pci->ab; 998 struct pci_dev *pci_dev = ab_pci->pdev; 999 1000 pci_iounmap(pci_dev, ab->mem); 1001 ab->mem = NULL; 1002 pci_clear_master(pci_dev); 1003 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM); 1004 if (pci_is_enabled(pci_dev)) 1005 pci_disable_device(pci_dev); 1006 } 1007 1008 static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci) 1009 { 1010 struct ath11k_base *ab = ab_pci->ab; 1011 1012 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 1013 &ab_pci->link_ctl); 1014 1015 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 1016 ab_pci->link_ctl, 1017 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 1018 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 1019 1020 /* disable L0s and L1 */ 1021 pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, 1022 ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 1023 1024 set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags); 1025 } 1026 1027 static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci) 1028 { 1029 if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags)) 1030 pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, 1031 ab_pci->link_ctl); 1032 } 1033 1034 static int ath11k_pci_power_up(struct ath11k_base *ab) 1035 { 1036 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1037 int ret; 1038 1039 ab_pci->register_window = 0; 1040 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1041 ath11k_pci_sw_reset(ab_pci->ab, true); 1042 1043 /* Disable ASPM during firmware download due to problems switching 1044 * to AMSS state. 1045 */ 1046 ath11k_pci_aspm_disable(ab_pci); 1047 1048 ath11k_pci_msi_enable(ab_pci); 1049 1050 ret = ath11k_mhi_start(ab_pci); 1051 if (ret) { 1052 ath11k_err(ab, "failed to start mhi: %d\n", ret); 1053 return ret; 1054 } 1055 1056 if (ab->bus_params.static_window_map) 1057 ath11k_pci_select_static_window(ab_pci); 1058 1059 return 0; 1060 } 1061 1062 static void ath11k_pci_power_down(struct ath11k_base *ab) 1063 { 1064 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1065 1066 /* restore aspm in case firmware bootup fails */ 1067 ath11k_pci_aspm_restore(ab_pci); 1068 1069 ath11k_pci_force_wake(ab_pci->ab); 1070 1071 ath11k_pci_msi_disable(ab_pci); 1072 1073 ath11k_mhi_stop(ab_pci); 1074 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1075 ath11k_pci_sw_reset(ab_pci->ab, false); 1076 } 1077 1078 static int ath11k_pci_hif_suspend(struct ath11k_base *ab) 1079 { 1080 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); 1081 1082 ath11k_mhi_suspend(ar_pci); 1083 1084 return 0; 1085 } 1086 1087 static int ath11k_pci_hif_resume(struct ath11k_base *ab) 1088 { 1089 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); 1090 1091 ath11k_mhi_resume(ar_pci); 1092 1093 return 0; 1094 } 1095 1096 static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) 1097 { 1098 int i; 1099 1100 for (i = 0; i < ab->hw_params.ce_count; i++) { 1101 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 1102 1103 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1104 continue; 1105 1106 tasklet_kill(&ce_pipe->intr_tq); 1107 } 1108 } 1109 1110 static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) 1111 { 1112 ath11k_pci_ce_irqs_disable(ab); 1113 ath11k_pci_sync_ce_irqs(ab); 1114 ath11k_pci_kill_tasklets(ab); 1115 } 1116 1117 static void ath11k_pci_stop(struct ath11k_base *ab) 1118 { 1119 ath11k_pci_ce_irq_disable_sync(ab); 1120 ath11k_ce_cleanup_pipes(ab); 1121 } 1122 1123 static int ath11k_pci_start(struct ath11k_base *ab) 1124 { 1125 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1126 1127 set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1128 1129 ath11k_pci_aspm_restore(ab_pci); 1130 1131 ath11k_pci_ce_irqs_enable(ab); 1132 ath11k_ce_rx_post_buf(ab); 1133 1134 return 0; 1135 } 1136 1137 static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) 1138 { 1139 ath11k_pci_ce_irqs_enable(ab); 1140 } 1141 1142 static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) 1143 { 1144 ath11k_pci_ce_irq_disable_sync(ab); 1145 } 1146 1147 static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 1148 u8 *ul_pipe, u8 *dl_pipe) 1149 { 1150 const struct service_to_pipe *entry; 1151 bool ul_set = false, dl_set = false; 1152 int i; 1153 1154 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { 1155 entry = &ab->hw_params.svc_to_ce_map[i]; 1156 1157 if (__le32_to_cpu(entry->service_id) != service_id) 1158 continue; 1159 1160 switch (__le32_to_cpu(entry->pipedir)) { 1161 case PIPEDIR_NONE: 1162 break; 1163 case PIPEDIR_IN: 1164 WARN_ON(dl_set); 1165 *dl_pipe = __le32_to_cpu(entry->pipenum); 1166 dl_set = true; 1167 break; 1168 case PIPEDIR_OUT: 1169 WARN_ON(ul_set); 1170 *ul_pipe = __le32_to_cpu(entry->pipenum); 1171 ul_set = true; 1172 break; 1173 case PIPEDIR_INOUT: 1174 WARN_ON(dl_set); 1175 WARN_ON(ul_set); 1176 *dl_pipe = __le32_to_cpu(entry->pipenum); 1177 *ul_pipe = __le32_to_cpu(entry->pipenum); 1178 dl_set = true; 1179 ul_set = true; 1180 break; 1181 } 1182 } 1183 1184 if (WARN_ON(!ul_set || !dl_set)) 1185 return -ENOENT; 1186 1187 return 0; 1188 } 1189 1190 static const struct ath11k_hif_ops ath11k_pci_hif_ops = { 1191 .start = ath11k_pci_start, 1192 .stop = ath11k_pci_stop, 1193 .read32 = ath11k_pci_read32, 1194 .write32 = ath11k_pci_write32, 1195 .power_down = ath11k_pci_power_down, 1196 .power_up = ath11k_pci_power_up, 1197 .suspend = ath11k_pci_hif_suspend, 1198 .resume = ath11k_pci_hif_resume, 1199 .irq_enable = ath11k_pci_ext_irq_enable, 1200 .irq_disable = ath11k_pci_ext_irq_disable, 1201 .get_msi_address = ath11k_pci_get_msi_address, 1202 .get_user_msi_vector = ath11k_get_user_msi_assignment, 1203 .map_service_to_pipe = ath11k_pci_map_service_to_pipe, 1204 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, 1205 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, 1206 .get_ce_msi_idx = ath11k_pci_get_ce_msi_idx, 1207 }; 1208 1209 static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor) 1210 { 1211 u32 soc_hw_version; 1212 1213 soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1214 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1215 soc_hw_version); 1216 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1217 soc_hw_version); 1218 1219 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n", 1220 *major, *minor); 1221 } 1222 1223 static int ath11k_pci_probe(struct pci_dev *pdev, 1224 const struct pci_device_id *pci_dev) 1225 { 1226 struct ath11k_base *ab; 1227 struct ath11k_pci *ab_pci; 1228 u32 soc_hw_version_major, soc_hw_version_minor; 1229 int ret; 1230 1231 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, 1232 &ath11k_pci_bus_params); 1233 if (!ab) { 1234 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 1235 return -ENOMEM; 1236 } 1237 1238 ab->dev = &pdev->dev; 1239 pci_set_drvdata(pdev, ab); 1240 ab_pci = ath11k_pci_priv(ab); 1241 ab_pci->dev_id = pci_dev->device; 1242 ab_pci->ab = ab; 1243 ab_pci->pdev = pdev; 1244 ab->hif.ops = &ath11k_pci_hif_ops; 1245 pci_set_drvdata(pdev, ab); 1246 spin_lock_init(&ab_pci->window_lock); 1247 1248 ret = ath11k_pci_claim(ab_pci, pdev); 1249 if (ret) { 1250 ath11k_err(ab, "failed to claim device: %d\n", ret); 1251 goto err_free_core; 1252 } 1253 1254 switch (pci_dev->device) { 1255 case QCA6390_DEVICE_ID: 1256 ath11k_pci_read_hw_version(ab, &soc_hw_version_major, 1257 &soc_hw_version_minor); 1258 switch (soc_hw_version_major) { 1259 case 2: 1260 ab->hw_rev = ATH11K_HW_QCA6390_HW20; 1261 break; 1262 default: 1263 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n", 1264 soc_hw_version_major, soc_hw_version_minor); 1265 ret = -EOPNOTSUPP; 1266 goto err_pci_free_region; 1267 } 1268 ab_pci->msi_config = &ath11k_msi_config[0]; 1269 break; 1270 case QCN9074_DEVICE_ID: 1271 ab_pci->msi_config = &ath11k_msi_config[1]; 1272 ab->bus_params.static_window_map = true; 1273 ab->hw_rev = ATH11K_HW_QCN9074_HW10; 1274 break; 1275 case WCN6855_DEVICE_ID: 1276 ath11k_pci_read_hw_version(ab, &soc_hw_version_major, 1277 &soc_hw_version_minor); 1278 switch (soc_hw_version_major) { 1279 case 2: 1280 ab->hw_rev = ATH11K_HW_WCN6855_HW20; 1281 break; 1282 default: 1283 dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n", 1284 soc_hw_version_major, soc_hw_version_minor); 1285 ret = -EOPNOTSUPP; 1286 goto err_pci_free_region; 1287 } 1288 ab_pci->msi_config = &ath11k_msi_config[0]; 1289 break; 1290 default: 1291 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1292 pci_dev->device); 1293 ret = -EOPNOTSUPP; 1294 goto err_pci_free_region; 1295 } 1296 1297 ret = ath11k_pci_alloc_msi(ab_pci); 1298 if (ret) { 1299 ath11k_err(ab, "failed to enable msi: %d\n", ret); 1300 goto err_pci_free_region; 1301 } 1302 1303 ret = ath11k_core_pre_init(ab); 1304 if (ret) 1305 goto err_pci_disable_msi; 1306 1307 ret = ath11k_mhi_register(ab_pci); 1308 if (ret) { 1309 ath11k_err(ab, "failed to register mhi: %d\n", ret); 1310 goto err_pci_disable_msi; 1311 } 1312 1313 ret = ath11k_hal_srng_init(ab); 1314 if (ret) 1315 goto err_mhi_unregister; 1316 1317 ret = ath11k_ce_alloc_pipes(ab); 1318 if (ret) { 1319 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1320 goto err_hal_srng_deinit; 1321 } 1322 1323 ath11k_pci_init_qmi_ce_config(ab); 1324 1325 ret = ath11k_pci_config_irq(ab); 1326 if (ret) { 1327 ath11k_err(ab, "failed to config irq: %d\n", ret); 1328 goto err_ce_free; 1329 } 1330 1331 ret = ath11k_core_init(ab); 1332 if (ret) { 1333 ath11k_err(ab, "failed to init core: %d\n", ret); 1334 goto err_free_irq; 1335 } 1336 return 0; 1337 1338 err_free_irq: 1339 ath11k_pci_free_irq(ab); 1340 1341 err_ce_free: 1342 ath11k_ce_free_pipes(ab); 1343 1344 err_hal_srng_deinit: 1345 ath11k_hal_srng_deinit(ab); 1346 1347 err_mhi_unregister: 1348 ath11k_mhi_unregister(ab_pci); 1349 1350 err_pci_disable_msi: 1351 ath11k_pci_free_msi(ab_pci); 1352 1353 err_pci_free_region: 1354 ath11k_pci_free_region(ab_pci); 1355 1356 err_free_core: 1357 ath11k_core_free(ab); 1358 1359 return ret; 1360 } 1361 1362 static void ath11k_pci_remove(struct pci_dev *pdev) 1363 { 1364 struct ath11k_base *ab = pci_get_drvdata(pdev); 1365 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1366 1367 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1368 ath11k_pci_power_down(ab); 1369 ath11k_debugfs_soc_destroy(ab); 1370 ath11k_qmi_deinit_service(ab); 1371 goto qmi_fail; 1372 } 1373 1374 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1375 1376 ath11k_core_deinit(ab); 1377 1378 qmi_fail: 1379 ath11k_mhi_unregister(ab_pci); 1380 1381 ath11k_pci_free_irq(ab); 1382 ath11k_pci_free_msi(ab_pci); 1383 ath11k_pci_free_region(ab_pci); 1384 1385 ath11k_hal_srng_deinit(ab); 1386 ath11k_ce_free_pipes(ab); 1387 ath11k_core_free(ab); 1388 } 1389 1390 static void ath11k_pci_shutdown(struct pci_dev *pdev) 1391 { 1392 struct ath11k_base *ab = pci_get_drvdata(pdev); 1393 1394 ath11k_pci_power_down(ab); 1395 } 1396 1397 static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) 1398 { 1399 struct ath11k_base *ab = dev_get_drvdata(dev); 1400 int ret; 1401 1402 ret = ath11k_core_suspend(ab); 1403 if (ret) 1404 ath11k_warn(ab, "failed to suspend core: %d\n", ret); 1405 1406 return ret; 1407 } 1408 1409 static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) 1410 { 1411 struct ath11k_base *ab = dev_get_drvdata(dev); 1412 int ret; 1413 1414 ret = ath11k_core_resume(ab); 1415 if (ret) 1416 ath11k_warn(ab, "failed to resume core: %d\n", ret); 1417 1418 return ret; 1419 } 1420 1421 static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops, 1422 ath11k_pci_pm_suspend, 1423 ath11k_pci_pm_resume); 1424 1425 static struct pci_driver ath11k_pci_driver = { 1426 .name = "ath11k_pci", 1427 .id_table = ath11k_pci_id_table, 1428 .probe = ath11k_pci_probe, 1429 .remove = ath11k_pci_remove, 1430 .shutdown = ath11k_pci_shutdown, 1431 #ifdef CONFIG_PM 1432 .driver.pm = &ath11k_pci_pm_ops, 1433 #endif 1434 }; 1435 1436 static int ath11k_pci_init(void) 1437 { 1438 int ret; 1439 1440 ret = pci_register_driver(&ath11k_pci_driver); 1441 if (ret) 1442 pr_err("failed to register ath11k pci driver: %d\n", 1443 ret); 1444 1445 return ret; 1446 } 1447 module_init(ath11k_pci_init); 1448 1449 static void ath11k_pci_exit(void) 1450 { 1451 pci_unregister_driver(&ath11k_pci_driver); 1452 } 1453 1454 module_exit(ath11k_pci_exit); 1455 1456 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); 1457 MODULE_LICENSE("Dual BSD/GPL"); 1458 1459 /* QCA639x 2.0 firmware files */ 1460 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_BOARD_API2_FILE); 1461 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_AMSS_FILE); 1462 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_M3_FILE); 1463