1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/pci.h> 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/spinlock.h> 22 #include <linux/bitops.h> 23 24 #include "core.h" 25 #include "debug.h" 26 27 #include "targaddrs.h" 28 #include "bmi.h" 29 30 #include "hif.h" 31 #include "htc.h" 32 33 #include "ce.h" 34 #include "pci.h" 35 36 enum ath10k_pci_irq_mode { 37 ATH10K_PCI_IRQ_AUTO = 0, 38 ATH10K_PCI_IRQ_LEGACY = 1, 39 ATH10K_PCI_IRQ_MSI = 2, 40 }; 41 42 enum ath10k_pci_reset_mode { 43 ATH10K_PCI_RESET_AUTO = 0, 44 ATH10K_PCI_RESET_WARM_ONLY = 1, 45 }; 46 47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 49 50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 52 53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 55 56 /* how long wait to wait for target to initialise, in ms */ 57 #define ATH10K_PCI_TARGET_WAIT 3000 58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 59 60 #define QCA988X_2_0_DEVICE_ID (0x003c) 61 #define QCA6164_2_1_DEVICE_ID (0x0041) 62 #define QCA6174_2_1_DEVICE_ID (0x003e) 63 #define QCA99X0_2_0_DEVICE_ID (0x0040) 64 65 static const struct pci_device_id ath10k_pci_id_table[] = { 66 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 67 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 68 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 69 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 70 {0} 71 }; 72 73 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 74 /* QCA988X pre 2.0 chips are not supported because they need some nasty 75 * hacks. ath10k doesn't have them and these devices crash horribly 76 * because of that. 77 */ 78 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 79 80 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 81 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 82 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 83 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 84 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 85 86 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 87 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 88 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 89 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 90 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 91 92 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 93 }; 94 95 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 96 static int ath10k_pci_cold_reset(struct ath10k *ar); 97 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 98 static int ath10k_pci_wait_for_target_init(struct ath10k *ar); 99 static int ath10k_pci_init_irq(struct ath10k *ar); 100 static int ath10k_pci_deinit_irq(struct ath10k *ar); 101 static int ath10k_pci_request_irq(struct ath10k *ar); 102 static void ath10k_pci_free_irq(struct ath10k *ar); 103 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 104 struct ath10k_ce_pipe *rx_pipe, 105 struct bmi_xfer *xfer); 106 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 107 108 static const struct ce_attr host_ce_config_wlan[] = { 109 /* CE0: host->target HTC control and raw streams */ 110 { 111 .flags = CE_ATTR_FLAGS, 112 .src_nentries = 16, 113 .src_sz_max = 256, 114 .dest_nentries = 0, 115 }, 116 117 /* CE1: target->host HTT + HTC control */ 118 { 119 .flags = CE_ATTR_FLAGS, 120 .src_nentries = 0, 121 .src_sz_max = 2048, 122 .dest_nentries = 512, 123 }, 124 125 /* CE2: target->host WMI */ 126 { 127 .flags = CE_ATTR_FLAGS, 128 .src_nentries = 0, 129 .src_sz_max = 2048, 130 .dest_nentries = 128, 131 }, 132 133 /* CE3: host->target WMI */ 134 { 135 .flags = CE_ATTR_FLAGS, 136 .src_nentries = 32, 137 .src_sz_max = 2048, 138 .dest_nentries = 0, 139 }, 140 141 /* CE4: host->target HTT */ 142 { 143 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 144 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 145 .src_sz_max = 256, 146 .dest_nentries = 0, 147 }, 148 149 /* CE5: unused */ 150 { 151 .flags = CE_ATTR_FLAGS, 152 .src_nentries = 0, 153 .src_sz_max = 0, 154 .dest_nentries = 0, 155 }, 156 157 /* CE6: target autonomous hif_memcpy */ 158 { 159 .flags = CE_ATTR_FLAGS, 160 .src_nentries = 0, 161 .src_sz_max = 0, 162 .dest_nentries = 0, 163 }, 164 165 /* CE7: ce_diag, the Diagnostic Window */ 166 { 167 .flags = CE_ATTR_FLAGS, 168 .src_nentries = 2, 169 .src_sz_max = DIAG_TRANSFER_LIMIT, 170 .dest_nentries = 2, 171 }, 172 173 /* CE8: target->host pktlog */ 174 { 175 .flags = CE_ATTR_FLAGS, 176 .src_nentries = 0, 177 .src_sz_max = 2048, 178 .dest_nentries = 128, 179 }, 180 181 /* CE9 target autonomous qcache memcpy */ 182 { 183 .flags = CE_ATTR_FLAGS, 184 .src_nentries = 0, 185 .src_sz_max = 0, 186 .dest_nentries = 0, 187 }, 188 189 /* CE10: target autonomous hif memcpy */ 190 { 191 .flags = CE_ATTR_FLAGS, 192 .src_nentries = 0, 193 .src_sz_max = 0, 194 .dest_nentries = 0, 195 }, 196 197 /* CE11: target autonomous hif memcpy */ 198 { 199 .flags = CE_ATTR_FLAGS, 200 .src_nentries = 0, 201 .src_sz_max = 0, 202 .dest_nentries = 0, 203 }, 204 }; 205 206 /* Target firmware's Copy Engine configuration. */ 207 static const struct ce_pipe_config target_ce_config_wlan[] = { 208 /* CE0: host->target HTC control and raw streams */ 209 { 210 .pipenum = __cpu_to_le32(0), 211 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 212 .nentries = __cpu_to_le32(32), 213 .nbytes_max = __cpu_to_le32(256), 214 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 215 .reserved = __cpu_to_le32(0), 216 }, 217 218 /* CE1: target->host HTT + HTC control */ 219 { 220 .pipenum = __cpu_to_le32(1), 221 .pipedir = __cpu_to_le32(PIPEDIR_IN), 222 .nentries = __cpu_to_le32(32), 223 .nbytes_max = __cpu_to_le32(2048), 224 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 225 .reserved = __cpu_to_le32(0), 226 }, 227 228 /* CE2: target->host WMI */ 229 { 230 .pipenum = __cpu_to_le32(2), 231 .pipedir = __cpu_to_le32(PIPEDIR_IN), 232 .nentries = __cpu_to_le32(64), 233 .nbytes_max = __cpu_to_le32(2048), 234 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 235 .reserved = __cpu_to_le32(0), 236 }, 237 238 /* CE3: host->target WMI */ 239 { 240 .pipenum = __cpu_to_le32(3), 241 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 242 .nentries = __cpu_to_le32(32), 243 .nbytes_max = __cpu_to_le32(2048), 244 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 245 .reserved = __cpu_to_le32(0), 246 }, 247 248 /* CE4: host->target HTT */ 249 { 250 .pipenum = __cpu_to_le32(4), 251 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 252 .nentries = __cpu_to_le32(256), 253 .nbytes_max = __cpu_to_le32(256), 254 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 255 .reserved = __cpu_to_le32(0), 256 }, 257 258 /* NB: 50% of src nentries, since tx has 2 frags */ 259 260 /* CE5: unused */ 261 { 262 .pipenum = __cpu_to_le32(5), 263 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 264 .nentries = __cpu_to_le32(32), 265 .nbytes_max = __cpu_to_le32(2048), 266 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 267 .reserved = __cpu_to_le32(0), 268 }, 269 270 /* CE6: Reserved for target autonomous hif_memcpy */ 271 { 272 .pipenum = __cpu_to_le32(6), 273 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 274 .nentries = __cpu_to_le32(32), 275 .nbytes_max = __cpu_to_le32(4096), 276 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 277 .reserved = __cpu_to_le32(0), 278 }, 279 280 /* CE7 used only by Host */ 281 { 282 .pipenum = __cpu_to_le32(7), 283 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 284 .nentries = __cpu_to_le32(0), 285 .nbytes_max = __cpu_to_le32(0), 286 .flags = __cpu_to_le32(0), 287 .reserved = __cpu_to_le32(0), 288 }, 289 290 /* CE8 target->host packtlog */ 291 { 292 .pipenum = __cpu_to_le32(8), 293 .pipedir = __cpu_to_le32(PIPEDIR_IN), 294 .nentries = __cpu_to_le32(64), 295 .nbytes_max = __cpu_to_le32(2048), 296 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 297 .reserved = __cpu_to_le32(0), 298 }, 299 300 /* CE9 target autonomous qcache memcpy */ 301 { 302 .pipenum = __cpu_to_le32(9), 303 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 304 .nentries = __cpu_to_le32(32), 305 .nbytes_max = __cpu_to_le32(2048), 306 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 307 .reserved = __cpu_to_le32(0), 308 }, 309 310 /* It not necessary to send target wlan configuration for CE10 & CE11 311 * as these CEs are not actively used in target. 312 */ 313 }; 314 315 /* 316 * Map from service/endpoint to Copy Engine. 317 * This table is derived from the CE_PCI TABLE, above. 318 * It is passed to the Target at startup for use by firmware. 319 */ 320 static const struct service_to_pipe target_service_to_ce_map_wlan[] = { 321 { 322 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 323 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 324 __cpu_to_le32(3), 325 }, 326 { 327 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 328 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 329 __cpu_to_le32(2), 330 }, 331 { 332 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 333 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 334 __cpu_to_le32(3), 335 }, 336 { 337 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 338 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 339 __cpu_to_le32(2), 340 }, 341 { 342 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 343 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 344 __cpu_to_le32(3), 345 }, 346 { 347 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 348 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 349 __cpu_to_le32(2), 350 }, 351 { 352 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 353 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 354 __cpu_to_le32(3), 355 }, 356 { 357 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 358 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 359 __cpu_to_le32(2), 360 }, 361 { 362 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 363 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 364 __cpu_to_le32(3), 365 }, 366 { 367 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 368 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 369 __cpu_to_le32(2), 370 }, 371 { 372 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 373 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 374 __cpu_to_le32(0), 375 }, 376 { 377 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 378 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 379 __cpu_to_le32(1), 380 }, 381 { /* not used */ 382 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 383 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 384 __cpu_to_le32(0), 385 }, 386 { /* not used */ 387 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 388 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 389 __cpu_to_le32(1), 390 }, 391 { 392 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 393 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 394 __cpu_to_le32(4), 395 }, 396 { 397 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 398 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 399 __cpu_to_le32(1), 400 }, 401 402 /* (Additions here) */ 403 404 { /* must be last */ 405 __cpu_to_le32(0), 406 __cpu_to_le32(0), 407 __cpu_to_le32(0), 408 }, 409 }; 410 411 static bool ath10k_pci_is_awake(struct ath10k *ar) 412 { 413 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 414 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 415 RTC_STATE_ADDRESS); 416 417 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 418 } 419 420 static void __ath10k_pci_wake(struct ath10k *ar) 421 { 422 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 423 424 lockdep_assert_held(&ar_pci->ps_lock); 425 426 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 427 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 428 429 iowrite32(PCIE_SOC_WAKE_V_MASK, 430 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 431 PCIE_SOC_WAKE_ADDRESS); 432 } 433 434 static void __ath10k_pci_sleep(struct ath10k *ar) 435 { 436 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 437 438 lockdep_assert_held(&ar_pci->ps_lock); 439 440 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 441 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 442 443 iowrite32(PCIE_SOC_WAKE_RESET, 444 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 445 PCIE_SOC_WAKE_ADDRESS); 446 ar_pci->ps_awake = false; 447 } 448 449 static int ath10k_pci_wake_wait(struct ath10k *ar) 450 { 451 int tot_delay = 0; 452 int curr_delay = 5; 453 454 while (tot_delay < PCIE_WAKE_TIMEOUT) { 455 if (ath10k_pci_is_awake(ar)) 456 return 0; 457 458 udelay(curr_delay); 459 tot_delay += curr_delay; 460 461 if (curr_delay < 50) 462 curr_delay += 5; 463 } 464 465 return -ETIMEDOUT; 466 } 467 468 static int ath10k_pci_wake(struct ath10k *ar) 469 { 470 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 471 unsigned long flags; 472 int ret = 0; 473 474 spin_lock_irqsave(&ar_pci->ps_lock, flags); 475 476 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 477 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 478 479 /* This function can be called very frequently. To avoid excessive 480 * CPU stalls for MMIO reads use a cache var to hold the device state. 481 */ 482 if (!ar_pci->ps_awake) { 483 __ath10k_pci_wake(ar); 484 485 ret = ath10k_pci_wake_wait(ar); 486 if (ret == 0) 487 ar_pci->ps_awake = true; 488 } 489 490 if (ret == 0) { 491 ar_pci->ps_wake_refcount++; 492 WARN_ON(ar_pci->ps_wake_refcount == 0); 493 } 494 495 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 496 497 return ret; 498 } 499 500 static void ath10k_pci_sleep(struct ath10k *ar) 501 { 502 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 503 unsigned long flags; 504 505 spin_lock_irqsave(&ar_pci->ps_lock, flags); 506 507 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 508 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 509 510 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 511 goto skip; 512 513 ar_pci->ps_wake_refcount--; 514 515 mod_timer(&ar_pci->ps_timer, jiffies + 516 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 517 518 skip: 519 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 520 } 521 522 static void ath10k_pci_ps_timer(unsigned long ptr) 523 { 524 struct ath10k *ar = (void *)ptr; 525 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 526 unsigned long flags; 527 528 spin_lock_irqsave(&ar_pci->ps_lock, flags); 529 530 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 531 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 532 533 if (ar_pci->ps_wake_refcount > 0) 534 goto skip; 535 536 __ath10k_pci_sleep(ar); 537 538 skip: 539 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 540 } 541 542 static void ath10k_pci_sleep_sync(struct ath10k *ar) 543 { 544 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 545 unsigned long flags; 546 547 del_timer_sync(&ar_pci->ps_timer); 548 549 spin_lock_irqsave(&ar_pci->ps_lock, flags); 550 WARN_ON(ar_pci->ps_wake_refcount > 0); 551 __ath10k_pci_sleep(ar); 552 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 553 } 554 555 void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 556 { 557 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 558 int ret; 559 560 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 561 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 562 offset, offset + sizeof(value), ar_pci->mem_len); 563 return; 564 } 565 566 ret = ath10k_pci_wake(ar); 567 if (ret) { 568 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 569 value, offset, ret); 570 return; 571 } 572 573 iowrite32(value, ar_pci->mem + offset); 574 ath10k_pci_sleep(ar); 575 } 576 577 u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 578 { 579 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 580 u32 val; 581 int ret; 582 583 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 584 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 585 offset, offset + sizeof(val), ar_pci->mem_len); 586 return 0; 587 } 588 589 ret = ath10k_pci_wake(ar); 590 if (ret) { 591 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 592 offset, ret); 593 return 0xffffffff; 594 } 595 596 val = ioread32(ar_pci->mem + offset); 597 ath10k_pci_sleep(ar); 598 599 return val; 600 } 601 602 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 603 { 604 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 605 } 606 607 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 608 { 609 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 610 } 611 612 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 613 { 614 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 615 } 616 617 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 618 { 619 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 620 } 621 622 static bool ath10k_pci_irq_pending(struct ath10k *ar) 623 { 624 u32 cause; 625 626 /* Check if the shared legacy irq is for us */ 627 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 628 PCIE_INTR_CAUSE_ADDRESS); 629 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 630 return true; 631 632 return false; 633 } 634 635 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 636 { 637 /* IMPORTANT: INTR_CLR register has to be set after 638 * INTR_ENABLE is set to 0, otherwise interrupt can not be 639 * really cleared. */ 640 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 641 0); 642 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 643 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 644 645 /* IMPORTANT: this extra read transaction is required to 646 * flush the posted write buffer. */ 647 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 648 PCIE_INTR_ENABLE_ADDRESS); 649 } 650 651 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 652 { 653 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 654 PCIE_INTR_ENABLE_ADDRESS, 655 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 656 657 /* IMPORTANT: this extra read transaction is required to 658 * flush the posted write buffer. */ 659 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 660 PCIE_INTR_ENABLE_ADDRESS); 661 } 662 663 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 664 { 665 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 666 667 if (ar_pci->num_msi_intrs > 1) 668 return "msi-x"; 669 670 if (ar_pci->num_msi_intrs == 1) 671 return "msi"; 672 673 return "legacy"; 674 } 675 676 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 677 { 678 struct ath10k *ar = pipe->hif_ce_state; 679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 680 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 681 struct sk_buff *skb; 682 dma_addr_t paddr; 683 int ret; 684 685 lockdep_assert_held(&ar_pci->ce_lock); 686 687 skb = dev_alloc_skb(pipe->buf_sz); 688 if (!skb) 689 return -ENOMEM; 690 691 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 692 693 paddr = dma_map_single(ar->dev, skb->data, 694 skb->len + skb_tailroom(skb), 695 DMA_FROM_DEVICE); 696 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 697 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 698 dev_kfree_skb_any(skb); 699 return -EIO; 700 } 701 702 ATH10K_SKB_RXCB(skb)->paddr = paddr; 703 704 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); 705 if (ret) { 706 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 707 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 708 DMA_FROM_DEVICE); 709 dev_kfree_skb_any(skb); 710 return ret; 711 } 712 713 return 0; 714 } 715 716 static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 717 { 718 struct ath10k *ar = pipe->hif_ce_state; 719 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 720 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 721 int ret, num; 722 723 lockdep_assert_held(&ar_pci->ce_lock); 724 725 if (pipe->buf_sz == 0) 726 return; 727 728 if (!ce_pipe->dest_ring) 729 return; 730 731 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 732 while (num--) { 733 ret = __ath10k_pci_rx_post_buf(pipe); 734 if (ret) { 735 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 736 mod_timer(&ar_pci->rx_post_retry, jiffies + 737 ATH10K_PCI_RX_POST_RETRY_MS); 738 break; 739 } 740 } 741 } 742 743 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 744 { 745 struct ath10k *ar = pipe->hif_ce_state; 746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 747 748 spin_lock_bh(&ar_pci->ce_lock); 749 __ath10k_pci_rx_post_pipe(pipe); 750 spin_unlock_bh(&ar_pci->ce_lock); 751 } 752 753 static void ath10k_pci_rx_post(struct ath10k *ar) 754 { 755 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 756 int i; 757 758 spin_lock_bh(&ar_pci->ce_lock); 759 for (i = 0; i < CE_COUNT; i++) 760 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 761 spin_unlock_bh(&ar_pci->ce_lock); 762 } 763 764 static void ath10k_pci_rx_replenish_retry(unsigned long ptr) 765 { 766 struct ath10k *ar = (void *)ptr; 767 768 ath10k_pci_rx_post(ar); 769 } 770 771 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 772 { 773 u32 val = 0; 774 775 switch (ar->hw_rev) { 776 case ATH10K_HW_QCA988X: 777 case ATH10K_HW_QCA6174: 778 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 779 CORE_CTRL_ADDRESS) & 780 0x7ff) << 21; 781 break; 782 case ATH10K_HW_QCA99X0: 783 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 784 break; 785 } 786 787 val |= 0x100000 | (addr & 0xfffff); 788 return val; 789 } 790 791 /* 792 * Diagnostic read/write access is provided for startup/config/debug usage. 793 * Caller must guarantee proper alignment, when applicable, and single user 794 * at any moment. 795 */ 796 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 797 int nbytes) 798 { 799 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 800 int ret = 0; 801 u32 buf; 802 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 803 unsigned int id; 804 unsigned int flags; 805 struct ath10k_ce_pipe *ce_diag; 806 /* Host buffer address in CE space */ 807 u32 ce_data; 808 dma_addr_t ce_data_base = 0; 809 void *data_buf = NULL; 810 int i; 811 812 spin_lock_bh(&ar_pci->ce_lock); 813 814 ce_diag = ar_pci->ce_diag; 815 816 /* 817 * Allocate a temporary bounce buffer to hold caller's data 818 * to be DMA'ed from Target. This guarantees 819 * 1) 4-byte alignment 820 * 2) Buffer in DMA-able space 821 */ 822 orig_nbytes = nbytes; 823 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 824 orig_nbytes, 825 &ce_data_base, 826 GFP_ATOMIC); 827 828 if (!data_buf) { 829 ret = -ENOMEM; 830 goto done; 831 } 832 memset(data_buf, 0, orig_nbytes); 833 834 remaining_bytes = orig_nbytes; 835 ce_data = ce_data_base; 836 while (remaining_bytes) { 837 nbytes = min_t(unsigned int, remaining_bytes, 838 DIAG_TRANSFER_LIMIT); 839 840 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data); 841 if (ret != 0) 842 goto done; 843 844 /* Request CE to send from Target(!) address to Host buffer */ 845 /* 846 * The address supplied by the caller is in the 847 * Target CPU virtual address space. 848 * 849 * In order to use this address with the diagnostic CE, 850 * convert it from Target CPU virtual address space 851 * to CE address space 852 */ 853 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 854 855 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 856 0); 857 if (ret) 858 goto done; 859 860 i = 0; 861 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, 862 &completed_nbytes, 863 &id) != 0) { 864 mdelay(1); 865 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 866 ret = -EBUSY; 867 goto done; 868 } 869 } 870 871 if (nbytes != completed_nbytes) { 872 ret = -EIO; 873 goto done; 874 } 875 876 if (buf != (u32)address) { 877 ret = -EIO; 878 goto done; 879 } 880 881 i = 0; 882 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 883 &completed_nbytes, 884 &id, &flags) != 0) { 885 mdelay(1); 886 887 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 888 ret = -EBUSY; 889 goto done; 890 } 891 } 892 893 if (nbytes != completed_nbytes) { 894 ret = -EIO; 895 goto done; 896 } 897 898 if (buf != ce_data) { 899 ret = -EIO; 900 goto done; 901 } 902 903 remaining_bytes -= nbytes; 904 address += nbytes; 905 ce_data += nbytes; 906 } 907 908 done: 909 if (ret == 0) 910 memcpy(data, data_buf, orig_nbytes); 911 else 912 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", 913 address, ret); 914 915 if (data_buf) 916 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 917 ce_data_base); 918 919 spin_unlock_bh(&ar_pci->ce_lock); 920 921 return ret; 922 } 923 924 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 925 { 926 __le32 val = 0; 927 int ret; 928 929 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 930 *value = __le32_to_cpu(val); 931 932 return ret; 933 } 934 935 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 936 u32 src, u32 len) 937 { 938 u32 host_addr, addr; 939 int ret; 940 941 host_addr = host_interest_item_address(src); 942 943 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 944 if (ret != 0) { 945 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 946 src, ret); 947 return ret; 948 } 949 950 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 951 if (ret != 0) { 952 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 953 addr, len, ret); 954 return ret; 955 } 956 957 return 0; 958 } 959 960 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 961 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 962 963 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 964 const void *data, int nbytes) 965 { 966 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 967 int ret = 0; 968 u32 buf; 969 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 970 unsigned int id; 971 unsigned int flags; 972 struct ath10k_ce_pipe *ce_diag; 973 void *data_buf = NULL; 974 u32 ce_data; /* Host buffer address in CE space */ 975 dma_addr_t ce_data_base = 0; 976 int i; 977 978 spin_lock_bh(&ar_pci->ce_lock); 979 980 ce_diag = ar_pci->ce_diag; 981 982 /* 983 * Allocate a temporary bounce buffer to hold caller's data 984 * to be DMA'ed to Target. This guarantees 985 * 1) 4-byte alignment 986 * 2) Buffer in DMA-able space 987 */ 988 orig_nbytes = nbytes; 989 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 990 orig_nbytes, 991 &ce_data_base, 992 GFP_ATOMIC); 993 if (!data_buf) { 994 ret = -ENOMEM; 995 goto done; 996 } 997 998 /* Copy caller's data to allocated DMA buf */ 999 memcpy(data_buf, data, orig_nbytes); 1000 1001 /* 1002 * The address supplied by the caller is in the 1003 * Target CPU virtual address space. 1004 * 1005 * In order to use this address with the diagnostic CE, 1006 * convert it from 1007 * Target CPU virtual address space 1008 * to 1009 * CE address space 1010 */ 1011 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1012 1013 remaining_bytes = orig_nbytes; 1014 ce_data = ce_data_base; 1015 while (remaining_bytes) { 1016 /* FIXME: check cast */ 1017 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1018 1019 /* Set up to receive directly into Target(!) address */ 1020 ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address); 1021 if (ret != 0) 1022 goto done; 1023 1024 /* 1025 * Request CE to send caller-supplied data that 1026 * was copied to bounce buffer to Target(!) address. 1027 */ 1028 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1029 nbytes, 0, 0); 1030 if (ret != 0) 1031 goto done; 1032 1033 i = 0; 1034 while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, 1035 &completed_nbytes, 1036 &id) != 0) { 1037 mdelay(1); 1038 1039 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1040 ret = -EBUSY; 1041 goto done; 1042 } 1043 } 1044 1045 if (nbytes != completed_nbytes) { 1046 ret = -EIO; 1047 goto done; 1048 } 1049 1050 if (buf != ce_data) { 1051 ret = -EIO; 1052 goto done; 1053 } 1054 1055 i = 0; 1056 while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf, 1057 &completed_nbytes, 1058 &id, &flags) != 0) { 1059 mdelay(1); 1060 1061 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1062 ret = -EBUSY; 1063 goto done; 1064 } 1065 } 1066 1067 if (nbytes != completed_nbytes) { 1068 ret = -EIO; 1069 goto done; 1070 } 1071 1072 if (buf != address) { 1073 ret = -EIO; 1074 goto done; 1075 } 1076 1077 remaining_bytes -= nbytes; 1078 address += nbytes; 1079 ce_data += nbytes; 1080 } 1081 1082 done: 1083 if (data_buf) { 1084 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1085 ce_data_base); 1086 } 1087 1088 if (ret != 0) 1089 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1090 address, ret); 1091 1092 spin_unlock_bh(&ar_pci->ce_lock); 1093 1094 return ret; 1095 } 1096 1097 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1098 { 1099 __le32 val = __cpu_to_le32(value); 1100 1101 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1102 } 1103 1104 /* Called by lower (CE) layer when a send to Target completes. */ 1105 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state) 1106 { 1107 struct ath10k *ar = ce_state->ar; 1108 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1109 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; 1110 struct sk_buff_head list; 1111 struct sk_buff *skb; 1112 u32 ce_data; 1113 unsigned int nbytes; 1114 unsigned int transfer_id; 1115 1116 __skb_queue_head_init(&list); 1117 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, 1118 &nbytes, &transfer_id) == 0) { 1119 /* no need to call tx completion for NULL pointers */ 1120 if (skb == NULL) 1121 continue; 1122 1123 __skb_queue_tail(&list, skb); 1124 } 1125 1126 while ((skb = __skb_dequeue(&list))) 1127 cb->tx_completion(ar, skb); 1128 } 1129 1130 /* Called by lower (CE) layer when data is received from the Target. */ 1131 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) 1132 { 1133 struct ath10k *ar = ce_state->ar; 1134 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1135 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1136 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current; 1137 struct sk_buff *skb; 1138 struct sk_buff_head list; 1139 void *transfer_context; 1140 u32 ce_data; 1141 unsigned int nbytes, max_nbytes; 1142 unsigned int transfer_id; 1143 unsigned int flags; 1144 1145 __skb_queue_head_init(&list); 1146 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1147 &ce_data, &nbytes, &transfer_id, 1148 &flags) == 0) { 1149 skb = transfer_context; 1150 max_nbytes = skb->len + skb_tailroom(skb); 1151 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1152 max_nbytes, DMA_FROM_DEVICE); 1153 1154 if (unlikely(max_nbytes < nbytes)) { 1155 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1156 nbytes, max_nbytes); 1157 dev_kfree_skb_any(skb); 1158 continue; 1159 } 1160 1161 skb_put(skb, nbytes); 1162 __skb_queue_tail(&list, skb); 1163 } 1164 1165 while ((skb = __skb_dequeue(&list))) { 1166 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1167 ce_state->id, skb->len); 1168 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1169 skb->data, skb->len); 1170 1171 cb->rx_completion(ar, skb); 1172 } 1173 1174 ath10k_pci_rx_post_pipe(pipe_info); 1175 } 1176 1177 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1178 struct ath10k_hif_sg_item *items, int n_items) 1179 { 1180 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1181 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1182 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1183 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1184 unsigned int nentries_mask; 1185 unsigned int sw_index; 1186 unsigned int write_index; 1187 int err, i = 0; 1188 1189 spin_lock_bh(&ar_pci->ce_lock); 1190 1191 nentries_mask = src_ring->nentries_mask; 1192 sw_index = src_ring->sw_index; 1193 write_index = src_ring->write_index; 1194 1195 if (unlikely(CE_RING_DELTA(nentries_mask, 1196 write_index, sw_index - 1) < n_items)) { 1197 err = -ENOBUFS; 1198 goto err; 1199 } 1200 1201 for (i = 0; i < n_items - 1; i++) { 1202 ath10k_dbg(ar, ATH10K_DBG_PCI, 1203 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1204 i, items[i].paddr, items[i].len, n_items); 1205 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1206 items[i].vaddr, items[i].len); 1207 1208 err = ath10k_ce_send_nolock(ce_pipe, 1209 items[i].transfer_context, 1210 items[i].paddr, 1211 items[i].len, 1212 items[i].transfer_id, 1213 CE_SEND_FLAG_GATHER); 1214 if (err) 1215 goto err; 1216 } 1217 1218 /* `i` is equal to `n_items -1` after for() */ 1219 1220 ath10k_dbg(ar, ATH10K_DBG_PCI, 1221 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1222 i, items[i].paddr, items[i].len, n_items); 1223 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1224 items[i].vaddr, items[i].len); 1225 1226 err = ath10k_ce_send_nolock(ce_pipe, 1227 items[i].transfer_context, 1228 items[i].paddr, 1229 items[i].len, 1230 items[i].transfer_id, 1231 0); 1232 if (err) 1233 goto err; 1234 1235 spin_unlock_bh(&ar_pci->ce_lock); 1236 return 0; 1237 1238 err: 1239 for (; i > 0; i--) 1240 __ath10k_ce_send_revert(ce_pipe); 1241 1242 spin_unlock_bh(&ar_pci->ce_lock); 1243 return err; 1244 } 1245 1246 static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1247 size_t buf_len) 1248 { 1249 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1250 } 1251 1252 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1253 { 1254 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1255 1256 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1257 1258 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1259 } 1260 1261 static void ath10k_pci_dump_registers(struct ath10k *ar, 1262 struct ath10k_fw_crash_data *crash_data) 1263 { 1264 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1265 int i, ret; 1266 1267 lockdep_assert_held(&ar->data_lock); 1268 1269 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1270 hi_failure_state, 1271 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1272 if (ret) { 1273 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1274 return; 1275 } 1276 1277 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1278 1279 ath10k_err(ar, "firmware register dump:\n"); 1280 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1281 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1282 i, 1283 __le32_to_cpu(reg_dump_values[i]), 1284 __le32_to_cpu(reg_dump_values[i + 1]), 1285 __le32_to_cpu(reg_dump_values[i + 2]), 1286 __le32_to_cpu(reg_dump_values[i + 3])); 1287 1288 if (!crash_data) 1289 return; 1290 1291 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1292 crash_data->registers[i] = reg_dump_values[i]; 1293 } 1294 1295 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1296 { 1297 struct ath10k_fw_crash_data *crash_data; 1298 char uuid[50]; 1299 1300 spin_lock_bh(&ar->data_lock); 1301 1302 ar->stats.fw_crash_counter++; 1303 1304 crash_data = ath10k_debug_get_new_fw_crash_data(ar); 1305 1306 if (crash_data) 1307 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); 1308 else 1309 scnprintf(uuid, sizeof(uuid), "n/a"); 1310 1311 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); 1312 ath10k_print_driver_info(ar); 1313 ath10k_pci_dump_registers(ar, crash_data); 1314 1315 spin_unlock_bh(&ar->data_lock); 1316 1317 queue_work(ar->workqueue, &ar->restart_work); 1318 } 1319 1320 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1321 int force) 1322 { 1323 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1324 1325 if (!force) { 1326 int resources; 1327 /* 1328 * Decide whether to actually poll for completions, or just 1329 * wait for a later chance. 1330 * If there seem to be plenty of resources left, then just wait 1331 * since checking involves reading a CE register, which is a 1332 * relatively expensive operation. 1333 */ 1334 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1335 1336 /* 1337 * If at least 50% of the total resources are still available, 1338 * don't bother checking again yet. 1339 */ 1340 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) 1341 return; 1342 } 1343 ath10k_ce_per_engine_service(ar, pipe); 1344 } 1345 1346 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar, 1347 struct ath10k_hif_cb *callbacks) 1348 { 1349 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1350 1351 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n"); 1352 1353 memcpy(&ar_pci->msg_callbacks_current, callbacks, 1354 sizeof(ar_pci->msg_callbacks_current)); 1355 } 1356 1357 static void ath10k_pci_kill_tasklet(struct ath10k *ar) 1358 { 1359 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1360 int i; 1361 1362 tasklet_kill(&ar_pci->intr_tq); 1363 tasklet_kill(&ar_pci->msi_fw_err); 1364 1365 for (i = 0; i < CE_COUNT; i++) 1366 tasklet_kill(&ar_pci->pipe_info[i].intr); 1367 1368 del_timer_sync(&ar_pci->rx_post_retry); 1369 } 1370 1371 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, 1372 u16 service_id, u8 *ul_pipe, 1373 u8 *dl_pipe, int *ul_is_polled, 1374 int *dl_is_polled) 1375 { 1376 const struct service_to_pipe *entry; 1377 bool ul_set = false, dl_set = false; 1378 int i; 1379 1380 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1381 1382 /* polling for received messages not supported */ 1383 *dl_is_polled = 0; 1384 1385 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 1386 entry = &target_service_to_ce_map_wlan[i]; 1387 1388 if (__le32_to_cpu(entry->service_id) != service_id) 1389 continue; 1390 1391 switch (__le32_to_cpu(entry->pipedir)) { 1392 case PIPEDIR_NONE: 1393 break; 1394 case PIPEDIR_IN: 1395 WARN_ON(dl_set); 1396 *dl_pipe = __le32_to_cpu(entry->pipenum); 1397 dl_set = true; 1398 break; 1399 case PIPEDIR_OUT: 1400 WARN_ON(ul_set); 1401 *ul_pipe = __le32_to_cpu(entry->pipenum); 1402 ul_set = true; 1403 break; 1404 case PIPEDIR_INOUT: 1405 WARN_ON(dl_set); 1406 WARN_ON(ul_set); 1407 *dl_pipe = __le32_to_cpu(entry->pipenum); 1408 *ul_pipe = __le32_to_cpu(entry->pipenum); 1409 dl_set = true; 1410 ul_set = true; 1411 break; 1412 } 1413 } 1414 1415 if (WARN_ON(!ul_set || !dl_set)) 1416 return -ENOENT; 1417 1418 *ul_is_polled = 1419 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0; 1420 1421 return 0; 1422 } 1423 1424 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1425 u8 *ul_pipe, u8 *dl_pipe) 1426 { 1427 int ul_is_polled, dl_is_polled; 1428 1429 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1430 1431 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1432 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1433 ul_pipe, 1434 dl_pipe, 1435 &ul_is_polled, 1436 &dl_is_polled); 1437 } 1438 1439 static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1440 { 1441 u32 val; 1442 1443 switch (ar->hw_rev) { 1444 case ATH10K_HW_QCA988X: 1445 case ATH10K_HW_QCA6174: 1446 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1447 CORE_CTRL_ADDRESS); 1448 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1449 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1450 CORE_CTRL_ADDRESS, val); 1451 break; 1452 case ATH10K_HW_QCA99X0: 1453 /* TODO: Find appropriate register configuration for QCA99X0 1454 * to mask irq/MSI. 1455 */ 1456 break; 1457 } 1458 } 1459 1460 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1461 { 1462 u32 val; 1463 1464 switch (ar->hw_rev) { 1465 case ATH10K_HW_QCA988X: 1466 case ATH10K_HW_QCA6174: 1467 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1468 CORE_CTRL_ADDRESS); 1469 val |= CORE_CTRL_PCIE_REG_31_MASK; 1470 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1471 CORE_CTRL_ADDRESS, val); 1472 break; 1473 case ATH10K_HW_QCA99X0: 1474 /* TODO: Find appropriate register configuration for QCA99X0 1475 * to unmask irq/MSI. 1476 */ 1477 break; 1478 } 1479 } 1480 1481 static void ath10k_pci_irq_disable(struct ath10k *ar) 1482 { 1483 ath10k_ce_disable_interrupts(ar); 1484 ath10k_pci_disable_and_clear_legacy_irq(ar); 1485 ath10k_pci_irq_msi_fw_mask(ar); 1486 } 1487 1488 static void ath10k_pci_irq_sync(struct ath10k *ar) 1489 { 1490 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1491 int i; 1492 1493 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 1494 synchronize_irq(ar_pci->pdev->irq + i); 1495 } 1496 1497 static void ath10k_pci_irq_enable(struct ath10k *ar) 1498 { 1499 ath10k_ce_enable_interrupts(ar); 1500 ath10k_pci_enable_legacy_irq(ar); 1501 ath10k_pci_irq_msi_fw_unmask(ar); 1502 } 1503 1504 static int ath10k_pci_hif_start(struct ath10k *ar) 1505 { 1506 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1507 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1508 1509 ath10k_pci_irq_enable(ar); 1510 ath10k_pci_rx_post(ar); 1511 1512 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1513 ar_pci->link_ctl); 1514 1515 return 0; 1516 } 1517 1518 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1519 { 1520 struct ath10k *ar; 1521 struct ath10k_ce_pipe *ce_pipe; 1522 struct ath10k_ce_ring *ce_ring; 1523 struct sk_buff *skb; 1524 int i; 1525 1526 ar = pci_pipe->hif_ce_state; 1527 ce_pipe = pci_pipe->ce_hdl; 1528 ce_ring = ce_pipe->dest_ring; 1529 1530 if (!ce_ring) 1531 return; 1532 1533 if (!pci_pipe->buf_sz) 1534 return; 1535 1536 for (i = 0; i < ce_ring->nentries; i++) { 1537 skb = ce_ring->per_transfer_context[i]; 1538 if (!skb) 1539 continue; 1540 1541 ce_ring->per_transfer_context[i] = NULL; 1542 1543 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1544 skb->len + skb_tailroom(skb), 1545 DMA_FROM_DEVICE); 1546 dev_kfree_skb_any(skb); 1547 } 1548 } 1549 1550 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1551 { 1552 struct ath10k *ar; 1553 struct ath10k_pci *ar_pci; 1554 struct ath10k_ce_pipe *ce_pipe; 1555 struct ath10k_ce_ring *ce_ring; 1556 struct ce_desc *ce_desc; 1557 struct sk_buff *skb; 1558 int i; 1559 1560 ar = pci_pipe->hif_ce_state; 1561 ar_pci = ath10k_pci_priv(ar); 1562 ce_pipe = pci_pipe->ce_hdl; 1563 ce_ring = ce_pipe->src_ring; 1564 1565 if (!ce_ring) 1566 return; 1567 1568 if (!pci_pipe->buf_sz) 1569 return; 1570 1571 ce_desc = ce_ring->shadow_base; 1572 if (WARN_ON(!ce_desc)) 1573 return; 1574 1575 for (i = 0; i < ce_ring->nentries; i++) { 1576 skb = ce_ring->per_transfer_context[i]; 1577 if (!skb) 1578 continue; 1579 1580 ce_ring->per_transfer_context[i] = NULL; 1581 1582 ar_pci->msg_callbacks_current.tx_completion(ar, skb); 1583 } 1584 } 1585 1586 /* 1587 * Cleanup residual buffers for device shutdown: 1588 * buffers that were enqueued for receive 1589 * buffers that were to be sent 1590 * Note: Buffers that had completed but which were 1591 * not yet processed are on a completion queue. They 1592 * are handled when the completion thread shuts down. 1593 */ 1594 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 1595 { 1596 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1597 int pipe_num; 1598 1599 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1600 struct ath10k_pci_pipe *pipe_info; 1601 1602 pipe_info = &ar_pci->pipe_info[pipe_num]; 1603 ath10k_pci_rx_pipe_cleanup(pipe_info); 1604 ath10k_pci_tx_pipe_cleanup(pipe_info); 1605 } 1606 } 1607 1608 static void ath10k_pci_ce_deinit(struct ath10k *ar) 1609 { 1610 int i; 1611 1612 for (i = 0; i < CE_COUNT; i++) 1613 ath10k_ce_deinit_pipe(ar, i); 1614 } 1615 1616 static void ath10k_pci_flush(struct ath10k *ar) 1617 { 1618 ath10k_pci_kill_tasklet(ar); 1619 ath10k_pci_buffer_cleanup(ar); 1620 } 1621 1622 static void ath10k_pci_hif_stop(struct ath10k *ar) 1623 { 1624 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1625 unsigned long flags; 1626 1627 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 1628 1629 /* Most likely the device has HTT Rx ring configured. The only way to 1630 * prevent the device from accessing (and possible corrupting) host 1631 * memory is to reset the chip now. 1632 * 1633 * There's also no known way of masking MSI interrupts on the device. 1634 * For ranged MSI the CE-related interrupts can be masked. However 1635 * regardless how many MSI interrupts are assigned the first one 1636 * is always used for firmware indications (crashes) and cannot be 1637 * masked. To prevent the device from asserting the interrupt reset it 1638 * before proceeding with cleanup. 1639 */ 1640 ath10k_pci_safe_chip_reset(ar); 1641 1642 ath10k_pci_irq_disable(ar); 1643 ath10k_pci_irq_sync(ar); 1644 ath10k_pci_flush(ar); 1645 1646 spin_lock_irqsave(&ar_pci->ps_lock, flags); 1647 WARN_ON(ar_pci->ps_wake_refcount > 0); 1648 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 1649 } 1650 1651 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1652 void *req, u32 req_len, 1653 void *resp, u32 *resp_len) 1654 { 1655 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1656 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 1657 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 1658 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 1659 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 1660 dma_addr_t req_paddr = 0; 1661 dma_addr_t resp_paddr = 0; 1662 struct bmi_xfer xfer = {}; 1663 void *treq, *tresp = NULL; 1664 int ret = 0; 1665 1666 might_sleep(); 1667 1668 if (resp && !resp_len) 1669 return -EINVAL; 1670 1671 if (resp && resp_len && *resp_len == 0) 1672 return -EINVAL; 1673 1674 treq = kmemdup(req, req_len, GFP_KERNEL); 1675 if (!treq) 1676 return -ENOMEM; 1677 1678 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 1679 ret = dma_mapping_error(ar->dev, req_paddr); 1680 if (ret) { 1681 ret = -EIO; 1682 goto err_dma; 1683 } 1684 1685 if (resp && resp_len) { 1686 tresp = kzalloc(*resp_len, GFP_KERNEL); 1687 if (!tresp) { 1688 ret = -ENOMEM; 1689 goto err_req; 1690 } 1691 1692 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 1693 DMA_FROM_DEVICE); 1694 ret = dma_mapping_error(ar->dev, resp_paddr); 1695 if (ret) { 1696 ret = EIO; 1697 goto err_req; 1698 } 1699 1700 xfer.wait_for_resp = true; 1701 xfer.resp_len = 0; 1702 1703 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 1704 } 1705 1706 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 1707 if (ret) 1708 goto err_resp; 1709 1710 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); 1711 if (ret) { 1712 u32 unused_buffer; 1713 unsigned int unused_nbytes; 1714 unsigned int unused_id; 1715 1716 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 1717 &unused_nbytes, &unused_id); 1718 } else { 1719 /* non-zero means we did not time out */ 1720 ret = 0; 1721 } 1722 1723 err_resp: 1724 if (resp) { 1725 u32 unused_buffer; 1726 1727 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 1728 dma_unmap_single(ar->dev, resp_paddr, 1729 *resp_len, DMA_FROM_DEVICE); 1730 } 1731 err_req: 1732 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 1733 1734 if (ret == 0 && resp_len) { 1735 *resp_len = min(*resp_len, xfer.resp_len); 1736 memcpy(resp, tresp, xfer.resp_len); 1737 } 1738 err_dma: 1739 kfree(treq); 1740 kfree(tresp); 1741 1742 return ret; 1743 } 1744 1745 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 1746 { 1747 struct bmi_xfer *xfer; 1748 u32 ce_data; 1749 unsigned int nbytes; 1750 unsigned int transfer_id; 1751 1752 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, 1753 &nbytes, &transfer_id)) 1754 return; 1755 1756 xfer->tx_done = true; 1757 } 1758 1759 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 1760 { 1761 struct ath10k *ar = ce_state->ar; 1762 struct bmi_xfer *xfer; 1763 u32 ce_data; 1764 unsigned int nbytes; 1765 unsigned int transfer_id; 1766 unsigned int flags; 1767 1768 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data, 1769 &nbytes, &transfer_id, &flags)) 1770 return; 1771 1772 if (WARN_ON_ONCE(!xfer)) 1773 return; 1774 1775 if (!xfer->wait_for_resp) { 1776 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 1777 return; 1778 } 1779 1780 xfer->resp_len = nbytes; 1781 xfer->rx_done = true; 1782 } 1783 1784 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 1785 struct ath10k_ce_pipe *rx_pipe, 1786 struct bmi_xfer *xfer) 1787 { 1788 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1789 1790 while (time_before_eq(jiffies, timeout)) { 1791 ath10k_pci_bmi_send_done(tx_pipe); 1792 ath10k_pci_bmi_recv_data(rx_pipe); 1793 1794 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) 1795 return 0; 1796 1797 schedule(); 1798 } 1799 1800 return -ETIMEDOUT; 1801 } 1802 1803 /* 1804 * Send an interrupt to the device to wake up the Target CPU 1805 * so it has an opportunity to notice any changed state. 1806 */ 1807 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 1808 { 1809 u32 addr, val; 1810 1811 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; 1812 val = ath10k_pci_read32(ar, addr); 1813 val |= CORE_CTRL_CPU_INTR_MASK; 1814 ath10k_pci_write32(ar, addr, val); 1815 1816 return 0; 1817 } 1818 1819 static int ath10k_pci_get_num_banks(struct ath10k *ar) 1820 { 1821 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1822 1823 switch (ar_pci->pdev->device) { 1824 case QCA988X_2_0_DEVICE_ID: 1825 case QCA99X0_2_0_DEVICE_ID: 1826 return 1; 1827 case QCA6164_2_1_DEVICE_ID: 1828 case QCA6174_2_1_DEVICE_ID: 1829 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { 1830 case QCA6174_HW_1_0_CHIP_ID_REV: 1831 case QCA6174_HW_1_1_CHIP_ID_REV: 1832 case QCA6174_HW_2_1_CHIP_ID_REV: 1833 case QCA6174_HW_2_2_CHIP_ID_REV: 1834 return 3; 1835 case QCA6174_HW_1_3_CHIP_ID_REV: 1836 return 2; 1837 case QCA6174_HW_3_0_CHIP_ID_REV: 1838 case QCA6174_HW_3_1_CHIP_ID_REV: 1839 case QCA6174_HW_3_2_CHIP_ID_REV: 1840 return 9; 1841 } 1842 break; 1843 } 1844 1845 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 1846 return 1; 1847 } 1848 1849 static int ath10k_pci_init_config(struct ath10k *ar) 1850 { 1851 u32 interconnect_targ_addr; 1852 u32 pcie_state_targ_addr = 0; 1853 u32 pipe_cfg_targ_addr = 0; 1854 u32 svc_to_pipe_map = 0; 1855 u32 pcie_config_flags = 0; 1856 u32 ealloc_value; 1857 u32 ealloc_targ_addr; 1858 u32 flag2_value; 1859 u32 flag2_targ_addr; 1860 int ret = 0; 1861 1862 /* Download to Target the CE Config and the service-to-CE map */ 1863 interconnect_targ_addr = 1864 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 1865 1866 /* Supply Target-side CE configuration */ 1867 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 1868 &pcie_state_targ_addr); 1869 if (ret != 0) { 1870 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 1871 return ret; 1872 } 1873 1874 if (pcie_state_targ_addr == 0) { 1875 ret = -EIO; 1876 ath10k_err(ar, "Invalid pcie state addr\n"); 1877 return ret; 1878 } 1879 1880 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1881 offsetof(struct pcie_state, 1882 pipe_cfg_addr)), 1883 &pipe_cfg_targ_addr); 1884 if (ret != 0) { 1885 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 1886 return ret; 1887 } 1888 1889 if (pipe_cfg_targ_addr == 0) { 1890 ret = -EIO; 1891 ath10k_err(ar, "Invalid pipe cfg addr\n"); 1892 return ret; 1893 } 1894 1895 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 1896 target_ce_config_wlan, 1897 sizeof(struct ce_pipe_config) * 1898 NUM_TARGET_CE_CONFIG_WLAN); 1899 1900 if (ret != 0) { 1901 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 1902 return ret; 1903 } 1904 1905 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1906 offsetof(struct pcie_state, 1907 svc_to_pipe_map)), 1908 &svc_to_pipe_map); 1909 if (ret != 0) { 1910 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 1911 return ret; 1912 } 1913 1914 if (svc_to_pipe_map == 0) { 1915 ret = -EIO; 1916 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 1917 return ret; 1918 } 1919 1920 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 1921 target_service_to_ce_map_wlan, 1922 sizeof(target_service_to_ce_map_wlan)); 1923 if (ret != 0) { 1924 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 1925 return ret; 1926 } 1927 1928 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 1929 offsetof(struct pcie_state, 1930 config_flags)), 1931 &pcie_config_flags); 1932 if (ret != 0) { 1933 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 1934 return ret; 1935 } 1936 1937 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 1938 1939 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 1940 offsetof(struct pcie_state, 1941 config_flags)), 1942 pcie_config_flags); 1943 if (ret != 0) { 1944 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 1945 return ret; 1946 } 1947 1948 /* configure early allocation */ 1949 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 1950 1951 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 1952 if (ret != 0) { 1953 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret); 1954 return ret; 1955 } 1956 1957 /* first bank is switched to IRAM */ 1958 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 1959 HI_EARLY_ALLOC_MAGIC_MASK); 1960 ealloc_value |= ((ath10k_pci_get_num_banks(ar) << 1961 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 1962 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 1963 1964 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 1965 if (ret != 0) { 1966 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 1967 return ret; 1968 } 1969 1970 /* Tell Target to proceed with initialization */ 1971 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 1972 1973 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 1974 if (ret != 0) { 1975 ath10k_err(ar, "Failed to get option val: %d\n", ret); 1976 return ret; 1977 } 1978 1979 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 1980 1981 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 1982 if (ret != 0) { 1983 ath10k_err(ar, "Failed to set option val: %d\n", ret); 1984 return ret; 1985 } 1986 1987 return 0; 1988 } 1989 1990 static int ath10k_pci_alloc_pipes(struct ath10k *ar) 1991 { 1992 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1993 struct ath10k_pci_pipe *pipe; 1994 int i, ret; 1995 1996 for (i = 0; i < CE_COUNT; i++) { 1997 pipe = &ar_pci->pipe_info[i]; 1998 pipe->ce_hdl = &ar_pci->ce_states[i]; 1999 pipe->pipe_num = i; 2000 pipe->hif_ce_state = ar; 2001 2002 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i], 2003 ath10k_pci_ce_send_done, 2004 ath10k_pci_ce_recv_data); 2005 if (ret) { 2006 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2007 i, ret); 2008 return ret; 2009 } 2010 2011 /* Last CE is Diagnostic Window */ 2012 if (i == CE_DIAG_PIPE) { 2013 ar_pci->ce_diag = pipe->ce_hdl; 2014 continue; 2015 } 2016 2017 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); 2018 } 2019 2020 return 0; 2021 } 2022 2023 static void ath10k_pci_free_pipes(struct ath10k *ar) 2024 { 2025 int i; 2026 2027 for (i = 0; i < CE_COUNT; i++) 2028 ath10k_ce_free_pipe(ar, i); 2029 } 2030 2031 static int ath10k_pci_init_pipes(struct ath10k *ar) 2032 { 2033 int i, ret; 2034 2035 for (i = 0; i < CE_COUNT; i++) { 2036 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); 2037 if (ret) { 2038 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2039 i, ret); 2040 return ret; 2041 } 2042 } 2043 2044 return 0; 2045 } 2046 2047 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2048 { 2049 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2050 FW_IND_EVENT_PENDING; 2051 } 2052 2053 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2054 { 2055 u32 val; 2056 2057 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2058 val &= ~FW_IND_EVENT_PENDING; 2059 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2060 } 2061 2062 /* this function effectively clears target memory controller assert line */ 2063 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2064 { 2065 u32 val; 2066 2067 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2068 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2069 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2070 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2071 2072 msleep(10); 2073 2074 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2075 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2076 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2077 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2078 2079 msleep(10); 2080 } 2081 2082 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2083 { 2084 u32 val; 2085 2086 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2087 2088 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2089 SOC_RESET_CONTROL_ADDRESS); 2090 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2091 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2092 } 2093 2094 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2095 { 2096 u32 val; 2097 2098 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2099 SOC_RESET_CONTROL_ADDRESS); 2100 2101 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2102 val | SOC_RESET_CONTROL_CE_RST_MASK); 2103 msleep(10); 2104 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2105 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2106 } 2107 2108 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2109 { 2110 u32 val; 2111 2112 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2113 SOC_LF_TIMER_CONTROL0_ADDRESS); 2114 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + 2115 SOC_LF_TIMER_CONTROL0_ADDRESS, 2116 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2117 } 2118 2119 static int ath10k_pci_warm_reset(struct ath10k *ar) 2120 { 2121 int ret; 2122 2123 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2124 2125 spin_lock_bh(&ar->data_lock); 2126 ar->stats.fw_warm_reset_counter++; 2127 spin_unlock_bh(&ar->data_lock); 2128 2129 ath10k_pci_irq_disable(ar); 2130 2131 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2132 * were to access copy engine while host performs copy engine reset 2133 * then it is possible for the device to confuse pci-e controller to 2134 * the point of bringing host system to a complete stop (i.e. hang). 2135 */ 2136 ath10k_pci_warm_reset_si0(ar); 2137 ath10k_pci_warm_reset_cpu(ar); 2138 ath10k_pci_init_pipes(ar); 2139 ath10k_pci_wait_for_target_init(ar); 2140 2141 ath10k_pci_warm_reset_clear_lf(ar); 2142 ath10k_pci_warm_reset_ce(ar); 2143 ath10k_pci_warm_reset_cpu(ar); 2144 ath10k_pci_init_pipes(ar); 2145 2146 ret = ath10k_pci_wait_for_target_init(ar); 2147 if (ret) { 2148 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2149 return ret; 2150 } 2151 2152 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2153 2154 return 0; 2155 } 2156 2157 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2158 { 2159 if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { 2160 return ath10k_pci_warm_reset(ar); 2161 } else if (QCA_REV_99X0(ar)) { 2162 ath10k_pci_irq_disable(ar); 2163 return ath10k_pci_qca99x0_chip_reset(ar); 2164 } else { 2165 return -ENOTSUPP; 2166 } 2167 } 2168 2169 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2170 { 2171 int i, ret; 2172 u32 val; 2173 2174 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2175 2176 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2177 * It is thus preferred to use warm reset which is safer but may not be 2178 * able to recover the device from all possible fail scenarios. 2179 * 2180 * Warm reset doesn't always work on first try so attempt it a few 2181 * times before giving up. 2182 */ 2183 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2184 ret = ath10k_pci_warm_reset(ar); 2185 if (ret) { 2186 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2187 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2188 ret); 2189 continue; 2190 } 2191 2192 /* FIXME: Sometimes copy engine doesn't recover after warm 2193 * reset. In most cases this needs cold reset. In some of these 2194 * cases the device is in such a state that a cold reset may 2195 * lock up the host. 2196 * 2197 * Reading any host interest register via copy engine is 2198 * sufficient to verify if device is capable of booting 2199 * firmware blob. 2200 */ 2201 ret = ath10k_pci_init_pipes(ar); 2202 if (ret) { 2203 ath10k_warn(ar, "failed to init copy engine: %d\n", 2204 ret); 2205 continue; 2206 } 2207 2208 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2209 &val); 2210 if (ret) { 2211 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2212 ret); 2213 continue; 2214 } 2215 2216 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2217 return 0; 2218 } 2219 2220 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2221 ath10k_warn(ar, "refusing cold reset as requested\n"); 2222 return -EPERM; 2223 } 2224 2225 ret = ath10k_pci_cold_reset(ar); 2226 if (ret) { 2227 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2228 return ret; 2229 } 2230 2231 ret = ath10k_pci_wait_for_target_init(ar); 2232 if (ret) { 2233 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2234 ret); 2235 return ret; 2236 } 2237 2238 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2239 2240 return 0; 2241 } 2242 2243 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2244 { 2245 int ret; 2246 2247 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2248 2249 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2250 2251 ret = ath10k_pci_cold_reset(ar); 2252 if (ret) { 2253 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2254 return ret; 2255 } 2256 2257 ret = ath10k_pci_wait_for_target_init(ar); 2258 if (ret) { 2259 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2260 ret); 2261 return ret; 2262 } 2263 2264 ret = ath10k_pci_warm_reset(ar); 2265 if (ret) { 2266 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2267 return ret; 2268 } 2269 2270 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2271 2272 return 0; 2273 } 2274 2275 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2276 { 2277 int ret; 2278 2279 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2280 2281 ret = ath10k_pci_cold_reset(ar); 2282 if (ret) { 2283 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2284 return ret; 2285 } 2286 2287 ret = ath10k_pci_wait_for_target_init(ar); 2288 if (ret) { 2289 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2290 ret); 2291 return ret; 2292 } 2293 2294 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2295 2296 return 0; 2297 } 2298 2299 static int ath10k_pci_chip_reset(struct ath10k *ar) 2300 { 2301 if (QCA_REV_988X(ar)) 2302 return ath10k_pci_qca988x_chip_reset(ar); 2303 else if (QCA_REV_6174(ar)) 2304 return ath10k_pci_qca6174_chip_reset(ar); 2305 else if (QCA_REV_99X0(ar)) 2306 return ath10k_pci_qca99x0_chip_reset(ar); 2307 else 2308 return -ENOTSUPP; 2309 } 2310 2311 static int ath10k_pci_hif_power_up(struct ath10k *ar) 2312 { 2313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2314 int ret; 2315 2316 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2317 2318 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2319 &ar_pci->link_ctl); 2320 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2321 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 2322 2323 /* 2324 * Bring the target up cleanly. 2325 * 2326 * The target may be in an undefined state with an AUX-powered Target 2327 * and a Host in WoW mode. If the Host crashes, loses power, or is 2328 * restarted (without unloading the driver) then the Target is left 2329 * (aux) powered and running. On a subsequent driver load, the Target 2330 * is in an unexpected state. We try to catch that here in order to 2331 * reset the Target and retry the probe. 2332 */ 2333 ret = ath10k_pci_chip_reset(ar); 2334 if (ret) { 2335 if (ath10k_pci_has_fw_crashed(ar)) { 2336 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2337 ath10k_pci_fw_crashed_clear(ar); 2338 ath10k_pci_fw_crashed_dump(ar); 2339 } 2340 2341 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2342 goto err_sleep; 2343 } 2344 2345 ret = ath10k_pci_init_pipes(ar); 2346 if (ret) { 2347 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2348 goto err_sleep; 2349 } 2350 2351 ret = ath10k_pci_init_config(ar); 2352 if (ret) { 2353 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2354 goto err_ce; 2355 } 2356 2357 ret = ath10k_pci_wake_target_cpu(ar); 2358 if (ret) { 2359 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2360 goto err_ce; 2361 } 2362 2363 return 0; 2364 2365 err_ce: 2366 ath10k_pci_ce_deinit(ar); 2367 2368 err_sleep: 2369 return ret; 2370 } 2371 2372 static void ath10k_pci_hif_power_down(struct ath10k *ar) 2373 { 2374 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2375 2376 /* Currently hif_power_up performs effectively a reset and hif_stop 2377 * resets the chip as well so there's no point in resetting here. 2378 */ 2379 } 2380 2381 #ifdef CONFIG_PM 2382 2383 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2384 { 2385 /* The grace timer can still be counting down and ar->ps_awake be true. 2386 * It is known that the device may be asleep after resuming regardless 2387 * of the SoC powersave state before suspending. Hence make sure the 2388 * device is asleep before proceeding. 2389 */ 2390 ath10k_pci_sleep_sync(ar); 2391 2392 return 0; 2393 } 2394 2395 static int ath10k_pci_hif_resume(struct ath10k *ar) 2396 { 2397 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2398 struct pci_dev *pdev = ar_pci->pdev; 2399 u32 val; 2400 2401 /* Suspend/Resume resets the PCI configuration space, so we have to 2402 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2403 * from interfering with C3 CPU state. pci_restore_state won't help 2404 * here since it only restores the first 64 bytes pci config header. 2405 */ 2406 pci_read_config_dword(pdev, 0x40, &val); 2407 if ((val & 0x0000ff00) != 0) 2408 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2409 2410 return 0; 2411 } 2412 #endif 2413 2414 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2415 .tx_sg = ath10k_pci_hif_tx_sg, 2416 .diag_read = ath10k_pci_hif_diag_read, 2417 .diag_write = ath10k_pci_diag_write_mem, 2418 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2419 .start = ath10k_pci_hif_start, 2420 .stop = ath10k_pci_hif_stop, 2421 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 2422 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 2423 .send_complete_check = ath10k_pci_hif_send_complete_check, 2424 .set_callbacks = ath10k_pci_hif_set_callbacks, 2425 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 2426 .power_up = ath10k_pci_hif_power_up, 2427 .power_down = ath10k_pci_hif_power_down, 2428 .read32 = ath10k_pci_read32, 2429 .write32 = ath10k_pci_write32, 2430 #ifdef CONFIG_PM 2431 .suspend = ath10k_pci_hif_suspend, 2432 .resume = ath10k_pci_hif_resume, 2433 #endif 2434 }; 2435 2436 static void ath10k_pci_ce_tasklet(unsigned long ptr) 2437 { 2438 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr; 2439 struct ath10k_pci *ar_pci = pipe->ar_pci; 2440 2441 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num); 2442 } 2443 2444 static void ath10k_msi_err_tasklet(unsigned long data) 2445 { 2446 struct ath10k *ar = (struct ath10k *)data; 2447 2448 if (!ath10k_pci_has_fw_crashed(ar)) { 2449 ath10k_warn(ar, "received unsolicited fw crash interrupt\n"); 2450 return; 2451 } 2452 2453 ath10k_pci_irq_disable(ar); 2454 ath10k_pci_fw_crashed_clear(ar); 2455 ath10k_pci_fw_crashed_dump(ar); 2456 } 2457 2458 /* 2459 * Handler for a per-engine interrupt on a PARTICULAR CE. 2460 * This is used in cases where each CE has a private MSI interrupt. 2461 */ 2462 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg) 2463 { 2464 struct ath10k *ar = arg; 2465 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2466 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL; 2467 2468 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) { 2469 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq, 2470 ce_id); 2471 return IRQ_HANDLED; 2472 } 2473 2474 /* 2475 * NOTE: We are able to derive ce_id from irq because we 2476 * use a one-to-one mapping for CE's 0..5. 2477 * CE's 6 & 7 do not use interrupts at all. 2478 * 2479 * This mapping must be kept in sync with the mapping 2480 * used by firmware. 2481 */ 2482 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr); 2483 return IRQ_HANDLED; 2484 } 2485 2486 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg) 2487 { 2488 struct ath10k *ar = arg; 2489 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2490 2491 tasklet_schedule(&ar_pci->msi_fw_err); 2492 return IRQ_HANDLED; 2493 } 2494 2495 /* 2496 * Top-level interrupt handler for all PCI interrupts from a Target. 2497 * When a block of MSI interrupts is allocated, this top-level handler 2498 * is not used; instead, we directly call the correct sub-handler. 2499 */ 2500 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 2501 { 2502 struct ath10k *ar = arg; 2503 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2504 2505 if (ar_pci->num_msi_intrs == 0) { 2506 if (!ath10k_pci_irq_pending(ar)) 2507 return IRQ_NONE; 2508 2509 ath10k_pci_disable_and_clear_legacy_irq(ar); 2510 } 2511 2512 tasklet_schedule(&ar_pci->intr_tq); 2513 2514 return IRQ_HANDLED; 2515 } 2516 2517 static void ath10k_pci_tasklet(unsigned long data) 2518 { 2519 struct ath10k *ar = (struct ath10k *)data; 2520 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2521 2522 if (ath10k_pci_has_fw_crashed(ar)) { 2523 ath10k_pci_irq_disable(ar); 2524 ath10k_pci_fw_crashed_clear(ar); 2525 ath10k_pci_fw_crashed_dump(ar); 2526 return; 2527 } 2528 2529 ath10k_ce_per_engine_service_any(ar); 2530 2531 /* Re-enable legacy irq that was disabled in the irq handler */ 2532 if (ar_pci->num_msi_intrs == 0) 2533 ath10k_pci_enable_legacy_irq(ar); 2534 } 2535 2536 static int ath10k_pci_request_irq_msix(struct ath10k *ar) 2537 { 2538 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2539 int ret, i; 2540 2541 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, 2542 ath10k_pci_msi_fw_handler, 2543 IRQF_SHARED, "ath10k_pci", ar); 2544 if (ret) { 2545 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n", 2546 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret); 2547 return ret; 2548 } 2549 2550 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) { 2551 ret = request_irq(ar_pci->pdev->irq + i, 2552 ath10k_pci_per_engine_handler, 2553 IRQF_SHARED, "ath10k_pci", ar); 2554 if (ret) { 2555 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n", 2556 ar_pci->pdev->irq + i, ret); 2557 2558 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--) 2559 free_irq(ar_pci->pdev->irq + i, ar); 2560 2561 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar); 2562 return ret; 2563 } 2564 } 2565 2566 return 0; 2567 } 2568 2569 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 2570 { 2571 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2572 int ret; 2573 2574 ret = request_irq(ar_pci->pdev->irq, 2575 ath10k_pci_interrupt_handler, 2576 IRQF_SHARED, "ath10k_pci", ar); 2577 if (ret) { 2578 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 2579 ar_pci->pdev->irq, ret); 2580 return ret; 2581 } 2582 2583 return 0; 2584 } 2585 2586 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) 2587 { 2588 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2589 int ret; 2590 2591 ret = request_irq(ar_pci->pdev->irq, 2592 ath10k_pci_interrupt_handler, 2593 IRQF_SHARED, "ath10k_pci", ar); 2594 if (ret) { 2595 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 2596 ar_pci->pdev->irq, ret); 2597 return ret; 2598 } 2599 2600 return 0; 2601 } 2602 2603 static int ath10k_pci_request_irq(struct ath10k *ar) 2604 { 2605 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2606 2607 switch (ar_pci->num_msi_intrs) { 2608 case 0: 2609 return ath10k_pci_request_irq_legacy(ar); 2610 case 1: 2611 return ath10k_pci_request_irq_msi(ar); 2612 case MSI_NUM_REQUEST: 2613 return ath10k_pci_request_irq_msix(ar); 2614 } 2615 2616 ath10k_warn(ar, "unknown irq configuration upon request\n"); 2617 return -EINVAL; 2618 } 2619 2620 static void ath10k_pci_free_irq(struct ath10k *ar) 2621 { 2622 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2623 int i; 2624 2625 /* There's at least one interrupt irregardless whether its legacy INTR 2626 * or MSI or MSI-X */ 2627 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++) 2628 free_irq(ar_pci->pdev->irq + i, ar); 2629 } 2630 2631 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) 2632 { 2633 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2634 int i; 2635 2636 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar); 2637 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet, 2638 (unsigned long)ar); 2639 2640 for (i = 0; i < CE_COUNT; i++) { 2641 ar_pci->pipe_info[i].ar_pci = ar_pci; 2642 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet, 2643 (unsigned long)&ar_pci->pipe_info[i]); 2644 } 2645 } 2646 2647 static int ath10k_pci_init_irq(struct ath10k *ar) 2648 { 2649 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2650 int ret; 2651 2652 ath10k_pci_init_irq_tasklets(ar); 2653 2654 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 2655 ath10k_info(ar, "limiting irq mode to: %d\n", 2656 ath10k_pci_irq_mode); 2657 2658 /* Try MSI-X */ 2659 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) { 2660 ar_pci->num_msi_intrs = MSI_NUM_REQUEST; 2661 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs, 2662 ar_pci->num_msi_intrs); 2663 if (ret > 0) 2664 return 0; 2665 2666 /* fall-through */ 2667 } 2668 2669 /* Try MSI */ 2670 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { 2671 ar_pci->num_msi_intrs = 1; 2672 ret = pci_enable_msi(ar_pci->pdev); 2673 if (ret == 0) 2674 return 0; 2675 2676 /* fall-through */ 2677 } 2678 2679 /* Try legacy irq 2680 * 2681 * A potential race occurs here: The CORE_BASE write 2682 * depends on target correctly decoding AXI address but 2683 * host won't know when target writes BAR to CORE_CTRL. 2684 * This write might get lost if target has NOT written BAR. 2685 * For now, fix the race by repeating the write in below 2686 * synchronization checking. */ 2687 ar_pci->num_msi_intrs = 0; 2688 2689 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2690 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 2691 2692 return 0; 2693 } 2694 2695 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) 2696 { 2697 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2698 0); 2699 } 2700 2701 static int ath10k_pci_deinit_irq(struct ath10k *ar) 2702 { 2703 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2704 2705 switch (ar_pci->num_msi_intrs) { 2706 case 0: 2707 ath10k_pci_deinit_irq_legacy(ar); 2708 return 0; 2709 case 1: 2710 /* fall-through */ 2711 case MSI_NUM_REQUEST: 2712 pci_disable_msi(ar_pci->pdev); 2713 return 0; 2714 default: 2715 pci_disable_msi(ar_pci->pdev); 2716 } 2717 2718 ath10k_warn(ar, "unknown irq configuration upon deinit\n"); 2719 return -EINVAL; 2720 } 2721 2722 static int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2723 { 2724 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2725 unsigned long timeout; 2726 u32 val; 2727 2728 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 2729 2730 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 2731 2732 do { 2733 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2734 2735 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 2736 val); 2737 2738 /* target should never return this */ 2739 if (val == 0xffffffff) 2740 continue; 2741 2742 /* the device has crashed so don't bother trying anymore */ 2743 if (val & FW_IND_EVENT_PENDING) 2744 break; 2745 2746 if (val & FW_IND_INITIALIZED) 2747 break; 2748 2749 if (ar_pci->num_msi_intrs == 0) 2750 /* Fix potential race by repeating CORE_BASE writes */ 2751 ath10k_pci_enable_legacy_irq(ar); 2752 2753 mdelay(10); 2754 } while (time_before(jiffies, timeout)); 2755 2756 ath10k_pci_disable_and_clear_legacy_irq(ar); 2757 ath10k_pci_irq_msi_fw_mask(ar); 2758 2759 if (val == 0xffffffff) { 2760 ath10k_err(ar, "failed to read device register, device is gone\n"); 2761 return -EIO; 2762 } 2763 2764 if (val & FW_IND_EVENT_PENDING) { 2765 ath10k_warn(ar, "device has crashed during init\n"); 2766 return -ECOMM; 2767 } 2768 2769 if (!(val & FW_IND_INITIALIZED)) { 2770 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 2771 val); 2772 return -ETIMEDOUT; 2773 } 2774 2775 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 2776 return 0; 2777 } 2778 2779 static int ath10k_pci_cold_reset(struct ath10k *ar) 2780 { 2781 u32 val; 2782 2783 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 2784 2785 spin_lock_bh(&ar->data_lock); 2786 2787 ar->stats.fw_cold_reset_counter++; 2788 2789 spin_unlock_bh(&ar->data_lock); 2790 2791 /* Put Target, including PCIe, into RESET. */ 2792 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 2793 val |= 1; 2794 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2795 2796 /* After writing into SOC_GLOBAL_RESET to put device into 2797 * reset and pulling out of reset pcie may not be stable 2798 * for any immediate pcie register access and cause bus error, 2799 * add delay before any pcie access request to fix this issue. 2800 */ 2801 msleep(20); 2802 2803 /* Pull Target, including PCIe, out of RESET. */ 2804 val &= ~1; 2805 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 2806 2807 msleep(20); 2808 2809 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 2810 2811 return 0; 2812 } 2813 2814 static int ath10k_pci_claim(struct ath10k *ar) 2815 { 2816 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2817 struct pci_dev *pdev = ar_pci->pdev; 2818 int ret; 2819 2820 pci_set_drvdata(pdev, ar); 2821 2822 ret = pci_enable_device(pdev); 2823 if (ret) { 2824 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 2825 return ret; 2826 } 2827 2828 ret = pci_request_region(pdev, BAR_NUM, "ath"); 2829 if (ret) { 2830 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 2831 ret); 2832 goto err_device; 2833 } 2834 2835 /* Target expects 32 bit DMA. Enforce it. */ 2836 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2837 if (ret) { 2838 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 2839 goto err_region; 2840 } 2841 2842 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 2843 if (ret) { 2844 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", 2845 ret); 2846 goto err_region; 2847 } 2848 2849 pci_set_master(pdev); 2850 2851 /* Arrange for access to Target SoC registers. */ 2852 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 2853 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 2854 if (!ar_pci->mem) { 2855 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 2856 ret = -EIO; 2857 goto err_master; 2858 } 2859 2860 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); 2861 return 0; 2862 2863 err_master: 2864 pci_clear_master(pdev); 2865 2866 err_region: 2867 pci_release_region(pdev, BAR_NUM); 2868 2869 err_device: 2870 pci_disable_device(pdev); 2871 2872 return ret; 2873 } 2874 2875 static void ath10k_pci_release(struct ath10k *ar) 2876 { 2877 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2878 struct pci_dev *pdev = ar_pci->pdev; 2879 2880 pci_iounmap(pdev, ar_pci->mem); 2881 pci_release_region(pdev, BAR_NUM); 2882 pci_clear_master(pdev); 2883 pci_disable_device(pdev); 2884 } 2885 2886 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 2887 { 2888 const struct ath10k_pci_supp_chip *supp_chip; 2889 int i; 2890 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 2891 2892 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 2893 supp_chip = &ath10k_pci_supp_chips[i]; 2894 2895 if (supp_chip->dev_id == dev_id && 2896 supp_chip->rev_id == rev_id) 2897 return true; 2898 } 2899 2900 return false; 2901 } 2902 2903 static int ath10k_pci_probe(struct pci_dev *pdev, 2904 const struct pci_device_id *pci_dev) 2905 { 2906 int ret = 0; 2907 struct ath10k *ar; 2908 struct ath10k_pci *ar_pci; 2909 enum ath10k_hw_rev hw_rev; 2910 u32 chip_id; 2911 2912 switch (pci_dev->device) { 2913 case QCA988X_2_0_DEVICE_ID: 2914 hw_rev = ATH10K_HW_QCA988X; 2915 break; 2916 case QCA6164_2_1_DEVICE_ID: 2917 case QCA6174_2_1_DEVICE_ID: 2918 hw_rev = ATH10K_HW_QCA6174; 2919 break; 2920 case QCA99X0_2_0_DEVICE_ID: 2921 hw_rev = ATH10K_HW_QCA99X0; 2922 break; 2923 default: 2924 WARN_ON(1); 2925 return -ENOTSUPP; 2926 } 2927 2928 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 2929 hw_rev, &ath10k_pci_hif_ops); 2930 if (!ar) { 2931 dev_err(&pdev->dev, "failed to allocate core\n"); 2932 return -ENOMEM; 2933 } 2934 2935 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n"); 2936 2937 ar_pci = ath10k_pci_priv(ar); 2938 ar_pci->pdev = pdev; 2939 ar_pci->dev = &pdev->dev; 2940 ar_pci->ar = ar; 2941 ar->dev_id = pci_dev->device; 2942 2943 if (pdev->subsystem_vendor || pdev->subsystem_device) 2944 scnprintf(ar->spec_board_id, sizeof(ar->spec_board_id), 2945 "%04x:%04x:%04x:%04x", 2946 pdev->vendor, pdev->device, 2947 pdev->subsystem_vendor, pdev->subsystem_device); 2948 2949 spin_lock_init(&ar_pci->ce_lock); 2950 spin_lock_init(&ar_pci->ps_lock); 2951 2952 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 2953 (unsigned long)ar); 2954 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, 2955 (unsigned long)ar); 2956 2957 ret = ath10k_pci_claim(ar); 2958 if (ret) { 2959 ath10k_err(ar, "failed to claim device: %d\n", ret); 2960 goto err_core_destroy; 2961 } 2962 2963 ret = ath10k_pci_alloc_pipes(ar); 2964 if (ret) { 2965 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 2966 ret); 2967 goto err_sleep; 2968 } 2969 2970 ath10k_pci_ce_deinit(ar); 2971 ath10k_pci_irq_disable(ar); 2972 2973 ret = ath10k_pci_init_irq(ar); 2974 if (ret) { 2975 ath10k_err(ar, "failed to init irqs: %d\n", ret); 2976 goto err_free_pipes; 2977 } 2978 2979 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", 2980 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs, 2981 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 2982 2983 ret = ath10k_pci_request_irq(ar); 2984 if (ret) { 2985 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 2986 goto err_deinit_irq; 2987 } 2988 2989 ret = ath10k_pci_chip_reset(ar); 2990 if (ret) { 2991 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2992 goto err_free_irq; 2993 } 2994 2995 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 2996 if (chip_id == 0xffffffff) { 2997 ath10k_err(ar, "failed to get chip id\n"); 2998 goto err_free_irq; 2999 } 3000 3001 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { 3002 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3003 pdev->device, chip_id); 3004 goto err_free_irq; 3005 } 3006 3007 ret = ath10k_core_register(ar, chip_id); 3008 if (ret) { 3009 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3010 goto err_free_irq; 3011 } 3012 3013 return 0; 3014 3015 err_free_irq: 3016 ath10k_pci_free_irq(ar); 3017 ath10k_pci_kill_tasklet(ar); 3018 3019 err_deinit_irq: 3020 ath10k_pci_deinit_irq(ar); 3021 3022 err_free_pipes: 3023 ath10k_pci_free_pipes(ar); 3024 3025 err_sleep: 3026 ath10k_pci_sleep_sync(ar); 3027 ath10k_pci_release(ar); 3028 3029 err_core_destroy: 3030 ath10k_core_destroy(ar); 3031 3032 return ret; 3033 } 3034 3035 static void ath10k_pci_remove(struct pci_dev *pdev) 3036 { 3037 struct ath10k *ar = pci_get_drvdata(pdev); 3038 struct ath10k_pci *ar_pci; 3039 3040 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3041 3042 if (!ar) 3043 return; 3044 3045 ar_pci = ath10k_pci_priv(ar); 3046 3047 if (!ar_pci) 3048 return; 3049 3050 ath10k_core_unregister(ar); 3051 ath10k_pci_free_irq(ar); 3052 ath10k_pci_kill_tasklet(ar); 3053 ath10k_pci_deinit_irq(ar); 3054 ath10k_pci_ce_deinit(ar); 3055 ath10k_pci_free_pipes(ar); 3056 ath10k_pci_sleep_sync(ar); 3057 ath10k_pci_release(ar); 3058 ath10k_core_destroy(ar); 3059 } 3060 3061 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3062 3063 static struct pci_driver ath10k_pci_driver = { 3064 .name = "ath10k_pci", 3065 .id_table = ath10k_pci_id_table, 3066 .probe = ath10k_pci_probe, 3067 .remove = ath10k_pci_remove, 3068 }; 3069 3070 static int __init ath10k_pci_init(void) 3071 { 3072 int ret; 3073 3074 ret = pci_register_driver(&ath10k_pci_driver); 3075 if (ret) 3076 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3077 ret); 3078 3079 return ret; 3080 } 3081 module_init(ath10k_pci_init); 3082 3083 static void __exit ath10k_pci_exit(void) 3084 { 3085 pci_unregister_driver(&ath10k_pci_driver); 3086 } 3087 3088 module_exit(ath10k_pci_exit); 3089 3090 MODULE_AUTHOR("Qualcomm Atheros"); 3091 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); 3092 MODULE_LICENSE("Dual BSD/GPL"); 3093 3094 /* QCA988x 2.0 firmware files */ 3095 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE); 3096 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3097 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3098 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3099 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3100 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3101 3102 /* QCA6174 2.1 firmware files */ 3103 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3104 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3105 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); 3106 3107 /* QCA6174 3.1 firmware files */ 3108 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3109 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3110 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); 3111