1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/pci.h> 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/spinlock.h> 22 #include <linux/bitops.h> 23 24 #include "core.h" 25 #include "debug.h" 26 #include "coredump.h" 27 28 #include "targaddrs.h" 29 #include "bmi.h" 30 31 #include "hif.h" 32 #include "htc.h" 33 34 #include "ce.h" 35 #include "pci.h" 36 37 enum ath10k_pci_reset_mode { 38 ATH10K_PCI_RESET_AUTO = 0, 39 ATH10K_PCI_RESET_WARM_ONLY = 1, 40 }; 41 42 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 43 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 44 45 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 46 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 47 48 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 49 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 50 51 /* how long wait to wait for target to initialise, in ms */ 52 #define ATH10K_PCI_TARGET_WAIT 3000 53 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 54 55 /* Maximum number of bytes that can be handled atomically by 56 * diag read and write. 57 */ 58 #define ATH10K_DIAG_TRANSFER_LIMIT 0x5000 59 60 #define QCA99X0_PCIE_BAR0_START_REG 0x81030 61 #define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c 62 #define QCA99X0_CPU_MEM_DATA_REG 0x4d010 63 64 static const struct pci_device_id ath10k_pci_id_table[] = { 65 /* PCI-E QCA988X V2 (Ubiquiti branded) */ 66 { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) }, 67 68 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 69 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 70 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 71 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 72 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ 73 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ 74 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 75 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ 76 {0} 77 }; 78 79 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 80 /* QCA988X pre 2.0 chips are not supported because they need some nasty 81 * hacks. ath10k doesn't have them and these devices crash horribly 82 * because of that. 83 */ 84 { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV }, 85 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 86 87 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 88 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 89 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 90 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 91 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 92 93 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 94 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 95 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 96 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 97 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 98 99 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 100 101 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, 102 103 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, 104 105 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 106 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 107 108 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, 109 }; 110 111 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 112 static int ath10k_pci_cold_reset(struct ath10k *ar); 113 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 114 static int ath10k_pci_init_irq(struct ath10k *ar); 115 static int ath10k_pci_deinit_irq(struct ath10k *ar); 116 static int ath10k_pci_request_irq(struct ath10k *ar); 117 static void ath10k_pci_free_irq(struct ath10k *ar); 118 static int ath10k_pci_bmi_wait(struct ath10k *ar, 119 struct ath10k_ce_pipe *tx_pipe, 120 struct ath10k_ce_pipe *rx_pipe, 121 struct bmi_xfer *xfer); 122 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 123 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); 124 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 125 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 126 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 127 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 128 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); 129 130 static struct ce_attr host_ce_config_wlan[] = { 131 /* CE0: host->target HTC control and raw streams */ 132 { 133 .flags = CE_ATTR_FLAGS, 134 .src_nentries = 16, 135 .src_sz_max = 256, 136 .dest_nentries = 0, 137 .send_cb = ath10k_pci_htc_tx_cb, 138 }, 139 140 /* CE1: target->host HTT + HTC control */ 141 { 142 .flags = CE_ATTR_FLAGS, 143 .src_nentries = 0, 144 .src_sz_max = 2048, 145 .dest_nentries = 512, 146 .recv_cb = ath10k_pci_htt_htc_rx_cb, 147 }, 148 149 /* CE2: target->host WMI */ 150 { 151 .flags = CE_ATTR_FLAGS, 152 .src_nentries = 0, 153 .src_sz_max = 2048, 154 .dest_nentries = 128, 155 .recv_cb = ath10k_pci_htc_rx_cb, 156 }, 157 158 /* CE3: host->target WMI */ 159 { 160 .flags = CE_ATTR_FLAGS, 161 .src_nentries = 32, 162 .src_sz_max = 2048, 163 .dest_nentries = 0, 164 .send_cb = ath10k_pci_htc_tx_cb, 165 }, 166 167 /* CE4: host->target HTT */ 168 { 169 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 170 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 171 .src_sz_max = 256, 172 .dest_nentries = 0, 173 .send_cb = ath10k_pci_htt_tx_cb, 174 }, 175 176 /* CE5: target->host HTT (HIF->HTT) */ 177 { 178 .flags = CE_ATTR_FLAGS, 179 .src_nentries = 0, 180 .src_sz_max = 512, 181 .dest_nentries = 512, 182 .recv_cb = ath10k_pci_htt_rx_cb, 183 }, 184 185 /* CE6: target autonomous hif_memcpy */ 186 { 187 .flags = CE_ATTR_FLAGS, 188 .src_nentries = 0, 189 .src_sz_max = 0, 190 .dest_nentries = 0, 191 }, 192 193 /* CE7: ce_diag, the Diagnostic Window */ 194 { 195 .flags = CE_ATTR_FLAGS | CE_ATTR_POLL, 196 .src_nentries = 2, 197 .src_sz_max = DIAG_TRANSFER_LIMIT, 198 .dest_nentries = 2, 199 }, 200 201 /* CE8: target->host pktlog */ 202 { 203 .flags = CE_ATTR_FLAGS, 204 .src_nentries = 0, 205 .src_sz_max = 2048, 206 .dest_nentries = 128, 207 .recv_cb = ath10k_pci_pktlog_rx_cb, 208 }, 209 210 /* CE9 target autonomous qcache memcpy */ 211 { 212 .flags = CE_ATTR_FLAGS, 213 .src_nentries = 0, 214 .src_sz_max = 0, 215 .dest_nentries = 0, 216 }, 217 218 /* CE10: target autonomous hif memcpy */ 219 { 220 .flags = CE_ATTR_FLAGS, 221 .src_nentries = 0, 222 .src_sz_max = 0, 223 .dest_nentries = 0, 224 }, 225 226 /* CE11: target autonomous hif memcpy */ 227 { 228 .flags = CE_ATTR_FLAGS, 229 .src_nentries = 0, 230 .src_sz_max = 0, 231 .dest_nentries = 0, 232 }, 233 }; 234 235 /* Target firmware's Copy Engine configuration. */ 236 static struct ce_pipe_config target_ce_config_wlan[] = { 237 /* CE0: host->target HTC control and raw streams */ 238 { 239 .pipenum = __cpu_to_le32(0), 240 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 241 .nentries = __cpu_to_le32(32), 242 .nbytes_max = __cpu_to_le32(256), 243 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 244 .reserved = __cpu_to_le32(0), 245 }, 246 247 /* CE1: target->host HTT + HTC control */ 248 { 249 .pipenum = __cpu_to_le32(1), 250 .pipedir = __cpu_to_le32(PIPEDIR_IN), 251 .nentries = __cpu_to_le32(32), 252 .nbytes_max = __cpu_to_le32(2048), 253 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 254 .reserved = __cpu_to_le32(0), 255 }, 256 257 /* CE2: target->host WMI */ 258 { 259 .pipenum = __cpu_to_le32(2), 260 .pipedir = __cpu_to_le32(PIPEDIR_IN), 261 .nentries = __cpu_to_le32(64), 262 .nbytes_max = __cpu_to_le32(2048), 263 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 264 .reserved = __cpu_to_le32(0), 265 }, 266 267 /* CE3: host->target WMI */ 268 { 269 .pipenum = __cpu_to_le32(3), 270 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 271 .nentries = __cpu_to_le32(32), 272 .nbytes_max = __cpu_to_le32(2048), 273 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 274 .reserved = __cpu_to_le32(0), 275 }, 276 277 /* CE4: host->target HTT */ 278 { 279 .pipenum = __cpu_to_le32(4), 280 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 281 .nentries = __cpu_to_le32(256), 282 .nbytes_max = __cpu_to_le32(256), 283 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 284 .reserved = __cpu_to_le32(0), 285 }, 286 287 /* NB: 50% of src nentries, since tx has 2 frags */ 288 289 /* CE5: target->host HTT (HIF->HTT) */ 290 { 291 .pipenum = __cpu_to_le32(5), 292 .pipedir = __cpu_to_le32(PIPEDIR_IN), 293 .nentries = __cpu_to_le32(32), 294 .nbytes_max = __cpu_to_le32(512), 295 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 296 .reserved = __cpu_to_le32(0), 297 }, 298 299 /* CE6: Reserved for target autonomous hif_memcpy */ 300 { 301 .pipenum = __cpu_to_le32(6), 302 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 303 .nentries = __cpu_to_le32(32), 304 .nbytes_max = __cpu_to_le32(4096), 305 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 306 .reserved = __cpu_to_le32(0), 307 }, 308 309 /* CE7 used only by Host */ 310 { 311 .pipenum = __cpu_to_le32(7), 312 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 313 .nentries = __cpu_to_le32(0), 314 .nbytes_max = __cpu_to_le32(0), 315 .flags = __cpu_to_le32(0), 316 .reserved = __cpu_to_le32(0), 317 }, 318 319 /* CE8 target->host packtlog */ 320 { 321 .pipenum = __cpu_to_le32(8), 322 .pipedir = __cpu_to_le32(PIPEDIR_IN), 323 .nentries = __cpu_to_le32(64), 324 .nbytes_max = __cpu_to_le32(2048), 325 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 326 .reserved = __cpu_to_le32(0), 327 }, 328 329 /* CE9 target autonomous qcache memcpy */ 330 { 331 .pipenum = __cpu_to_le32(9), 332 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 333 .nentries = __cpu_to_le32(32), 334 .nbytes_max = __cpu_to_le32(2048), 335 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 336 .reserved = __cpu_to_le32(0), 337 }, 338 339 /* It not necessary to send target wlan configuration for CE10 & CE11 340 * as these CEs are not actively used in target. 341 */ 342 }; 343 344 /* 345 * Map from service/endpoint to Copy Engine. 346 * This table is derived from the CE_PCI TABLE, above. 347 * It is passed to the Target at startup for use by firmware. 348 */ 349 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 350 { 351 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 352 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 353 __cpu_to_le32(3), 354 }, 355 { 356 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 357 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 358 __cpu_to_le32(2), 359 }, 360 { 361 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 362 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 363 __cpu_to_le32(3), 364 }, 365 { 366 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 367 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 368 __cpu_to_le32(2), 369 }, 370 { 371 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 372 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 373 __cpu_to_le32(3), 374 }, 375 { 376 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 377 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 378 __cpu_to_le32(2), 379 }, 380 { 381 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 382 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 383 __cpu_to_le32(3), 384 }, 385 { 386 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 387 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 388 __cpu_to_le32(2), 389 }, 390 { 391 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 392 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 393 __cpu_to_le32(3), 394 }, 395 { 396 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 397 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 398 __cpu_to_le32(2), 399 }, 400 { 401 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 402 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 403 __cpu_to_le32(0), 404 }, 405 { 406 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 407 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 408 __cpu_to_le32(1), 409 }, 410 { /* not used */ 411 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 412 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 413 __cpu_to_le32(0), 414 }, 415 { /* not used */ 416 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 417 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 418 __cpu_to_le32(1), 419 }, 420 { 421 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 422 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 423 __cpu_to_le32(4), 424 }, 425 { 426 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 427 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 428 __cpu_to_le32(5), 429 }, 430 431 /* (Additions here) */ 432 433 { /* must be last */ 434 __cpu_to_le32(0), 435 __cpu_to_le32(0), 436 __cpu_to_le32(0), 437 }, 438 }; 439 440 static bool ath10k_pci_is_awake(struct ath10k *ar) 441 { 442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 443 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 444 RTC_STATE_ADDRESS); 445 446 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 447 } 448 449 static void __ath10k_pci_wake(struct ath10k *ar) 450 { 451 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 452 453 lockdep_assert_held(&ar_pci->ps_lock); 454 455 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 456 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 457 458 iowrite32(PCIE_SOC_WAKE_V_MASK, 459 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 460 PCIE_SOC_WAKE_ADDRESS); 461 } 462 463 static void __ath10k_pci_sleep(struct ath10k *ar) 464 { 465 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 466 467 lockdep_assert_held(&ar_pci->ps_lock); 468 469 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 470 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 471 472 iowrite32(PCIE_SOC_WAKE_RESET, 473 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 474 PCIE_SOC_WAKE_ADDRESS); 475 ar_pci->ps_awake = false; 476 } 477 478 static int ath10k_pci_wake_wait(struct ath10k *ar) 479 { 480 int tot_delay = 0; 481 int curr_delay = 5; 482 483 while (tot_delay < PCIE_WAKE_TIMEOUT) { 484 if (ath10k_pci_is_awake(ar)) { 485 if (tot_delay > PCIE_WAKE_LATE_US) 486 ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n", 487 tot_delay / 1000); 488 return 0; 489 } 490 491 udelay(curr_delay); 492 tot_delay += curr_delay; 493 494 if (curr_delay < 50) 495 curr_delay += 5; 496 } 497 498 return -ETIMEDOUT; 499 } 500 501 static int ath10k_pci_force_wake(struct ath10k *ar) 502 { 503 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 504 unsigned long flags; 505 int ret = 0; 506 507 if (ar_pci->pci_ps) 508 return ret; 509 510 spin_lock_irqsave(&ar_pci->ps_lock, flags); 511 512 if (!ar_pci->ps_awake) { 513 iowrite32(PCIE_SOC_WAKE_V_MASK, 514 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 515 PCIE_SOC_WAKE_ADDRESS); 516 517 ret = ath10k_pci_wake_wait(ar); 518 if (ret == 0) 519 ar_pci->ps_awake = true; 520 } 521 522 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 523 524 return ret; 525 } 526 527 static void ath10k_pci_force_sleep(struct ath10k *ar) 528 { 529 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 530 unsigned long flags; 531 532 spin_lock_irqsave(&ar_pci->ps_lock, flags); 533 534 iowrite32(PCIE_SOC_WAKE_RESET, 535 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 536 PCIE_SOC_WAKE_ADDRESS); 537 ar_pci->ps_awake = false; 538 539 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 540 } 541 542 static int ath10k_pci_wake(struct ath10k *ar) 543 { 544 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 545 unsigned long flags; 546 int ret = 0; 547 548 if (ar_pci->pci_ps == 0) 549 return ret; 550 551 spin_lock_irqsave(&ar_pci->ps_lock, flags); 552 553 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 554 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 555 556 /* This function can be called very frequently. To avoid excessive 557 * CPU stalls for MMIO reads use a cache var to hold the device state. 558 */ 559 if (!ar_pci->ps_awake) { 560 __ath10k_pci_wake(ar); 561 562 ret = ath10k_pci_wake_wait(ar); 563 if (ret == 0) 564 ar_pci->ps_awake = true; 565 } 566 567 if (ret == 0) { 568 ar_pci->ps_wake_refcount++; 569 WARN_ON(ar_pci->ps_wake_refcount == 0); 570 } 571 572 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 573 574 return ret; 575 } 576 577 static void ath10k_pci_sleep(struct ath10k *ar) 578 { 579 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 580 unsigned long flags; 581 582 if (ar_pci->pci_ps == 0) 583 return; 584 585 spin_lock_irqsave(&ar_pci->ps_lock, flags); 586 587 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 588 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 589 590 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 591 goto skip; 592 593 ar_pci->ps_wake_refcount--; 594 595 mod_timer(&ar_pci->ps_timer, jiffies + 596 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 597 598 skip: 599 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 600 } 601 602 static void ath10k_pci_ps_timer(struct timer_list *t) 603 { 604 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer); 605 struct ath10k *ar = ar_pci->ar; 606 unsigned long flags; 607 608 spin_lock_irqsave(&ar_pci->ps_lock, flags); 609 610 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 611 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 612 613 if (ar_pci->ps_wake_refcount > 0) 614 goto skip; 615 616 __ath10k_pci_sleep(ar); 617 618 skip: 619 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 620 } 621 622 static void ath10k_pci_sleep_sync(struct ath10k *ar) 623 { 624 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 625 unsigned long flags; 626 627 if (ar_pci->pci_ps == 0) { 628 ath10k_pci_force_sleep(ar); 629 return; 630 } 631 632 del_timer_sync(&ar_pci->ps_timer); 633 634 spin_lock_irqsave(&ar_pci->ps_lock, flags); 635 WARN_ON(ar_pci->ps_wake_refcount > 0); 636 __ath10k_pci_sleep(ar); 637 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 638 } 639 640 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) 641 { 642 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 643 int ret; 644 645 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 646 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 647 offset, offset + sizeof(value), ar_pci->mem_len); 648 return; 649 } 650 651 ret = ath10k_pci_wake(ar); 652 if (ret) { 653 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 654 value, offset, ret); 655 return; 656 } 657 658 iowrite32(value, ar_pci->mem + offset); 659 ath10k_pci_sleep(ar); 660 } 661 662 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) 663 { 664 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 665 u32 val; 666 int ret; 667 668 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 669 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 670 offset, offset + sizeof(val), ar_pci->mem_len); 671 return 0; 672 } 673 674 ret = ath10k_pci_wake(ar); 675 if (ret) { 676 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 677 offset, ret); 678 return 0xffffffff; 679 } 680 681 val = ioread32(ar_pci->mem + offset); 682 ath10k_pci_sleep(ar); 683 684 return val; 685 } 686 687 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 688 { 689 struct ath10k_ce *ce = ath10k_ce_priv(ar); 690 691 ce->bus_ops->write32(ar, offset, value); 692 } 693 694 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 695 { 696 struct ath10k_ce *ce = ath10k_ce_priv(ar); 697 698 return ce->bus_ops->read32(ar, offset); 699 } 700 701 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 702 { 703 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 704 } 705 706 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 707 { 708 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 709 } 710 711 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 712 { 713 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 714 } 715 716 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 717 { 718 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 719 } 720 721 bool ath10k_pci_irq_pending(struct ath10k *ar) 722 { 723 u32 cause; 724 725 /* Check if the shared legacy irq is for us */ 726 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 727 PCIE_INTR_CAUSE_ADDRESS); 728 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 729 return true; 730 731 return false; 732 } 733 734 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 735 { 736 /* IMPORTANT: INTR_CLR register has to be set after 737 * INTR_ENABLE is set to 0, otherwise interrupt can not be 738 * really cleared. 739 */ 740 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 741 0); 742 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 743 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 744 745 /* IMPORTANT: this extra read transaction is required to 746 * flush the posted write buffer. 747 */ 748 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 749 PCIE_INTR_ENABLE_ADDRESS); 750 } 751 752 void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 753 { 754 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 755 PCIE_INTR_ENABLE_ADDRESS, 756 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 757 758 /* IMPORTANT: this extra read transaction is required to 759 * flush the posted write buffer. 760 */ 761 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 762 PCIE_INTR_ENABLE_ADDRESS); 763 } 764 765 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 766 { 767 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 768 769 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) 770 return "msi"; 771 772 return "legacy"; 773 } 774 775 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 776 { 777 struct ath10k *ar = pipe->hif_ce_state; 778 struct ath10k_ce *ce = ath10k_ce_priv(ar); 779 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 780 struct sk_buff *skb; 781 dma_addr_t paddr; 782 int ret; 783 784 skb = dev_alloc_skb(pipe->buf_sz); 785 if (!skb) 786 return -ENOMEM; 787 788 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 789 790 paddr = dma_map_single(ar->dev, skb->data, 791 skb->len + skb_tailroom(skb), 792 DMA_FROM_DEVICE); 793 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 794 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 795 dev_kfree_skb_any(skb); 796 return -EIO; 797 } 798 799 ATH10K_SKB_RXCB(skb)->paddr = paddr; 800 801 spin_lock_bh(&ce->ce_lock); 802 ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr); 803 spin_unlock_bh(&ce->ce_lock); 804 if (ret) { 805 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 806 DMA_FROM_DEVICE); 807 dev_kfree_skb_any(skb); 808 return ret; 809 } 810 811 return 0; 812 } 813 814 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 815 { 816 struct ath10k *ar = pipe->hif_ce_state; 817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 818 struct ath10k_ce *ce = ath10k_ce_priv(ar); 819 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 820 int ret, num; 821 822 if (pipe->buf_sz == 0) 823 return; 824 825 if (!ce_pipe->dest_ring) 826 return; 827 828 spin_lock_bh(&ce->ce_lock); 829 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 830 spin_unlock_bh(&ce->ce_lock); 831 832 while (num >= 0) { 833 ret = __ath10k_pci_rx_post_buf(pipe); 834 if (ret) { 835 if (ret == -ENOSPC) 836 break; 837 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 838 mod_timer(&ar_pci->rx_post_retry, jiffies + 839 ATH10K_PCI_RX_POST_RETRY_MS); 840 break; 841 } 842 num--; 843 } 844 } 845 846 void ath10k_pci_rx_post(struct ath10k *ar) 847 { 848 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 849 int i; 850 851 for (i = 0; i < CE_COUNT; i++) 852 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 853 } 854 855 void ath10k_pci_rx_replenish_retry(struct timer_list *t) 856 { 857 struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry); 858 struct ath10k *ar = ar_pci->ar; 859 860 ath10k_pci_rx_post(ar); 861 } 862 863 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 864 { 865 u32 val = 0, region = addr & 0xfffff; 866 867 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) 868 & 0x7ff) << 21; 869 val |= 0x100000 | region; 870 return val; 871 } 872 873 /* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr. 874 * Support to access target space below 1M for qca6174 and qca9377. 875 * If target space is below 1M, the bit[20] of converted CE addr is 0. 876 * Otherwise bit[20] of converted CE addr is 1. 877 */ 878 static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 879 { 880 u32 val = 0, region = addr & 0xfffff; 881 882 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) 883 & 0x7ff) << 21; 884 val |= ((addr >= 0x100000) ? 0x100000 : 0) | region; 885 return val; 886 } 887 888 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 889 { 890 u32 val = 0, region = addr & 0xfffff; 891 892 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 893 val |= 0x100000 | region; 894 return val; 895 } 896 897 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 898 { 899 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 900 901 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) 902 return -ENOTSUPP; 903 904 return ar_pci->targ_cpu_to_ce_addr(ar, addr); 905 } 906 907 /* 908 * Diagnostic read/write access is provided for startup/config/debug usage. 909 * Caller must guarantee proper alignment, when applicable, and single user 910 * at any moment. 911 */ 912 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 913 int nbytes) 914 { 915 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 916 struct ath10k_ce *ce = ath10k_ce_priv(ar); 917 int ret = 0; 918 u32 *buf; 919 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 920 struct ath10k_ce_pipe *ce_diag; 921 /* Host buffer address in CE space */ 922 u32 ce_data; 923 dma_addr_t ce_data_base = 0; 924 void *data_buf = NULL; 925 int i; 926 927 spin_lock_bh(&ce->ce_lock); 928 929 ce_diag = ar_pci->ce_diag; 930 931 /* 932 * Allocate a temporary bounce buffer to hold caller's data 933 * to be DMA'ed from Target. This guarantees 934 * 1) 4-byte alignment 935 * 2) Buffer in DMA-able space 936 */ 937 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 938 939 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, alloc_nbytes, 940 &ce_data_base, 941 GFP_ATOMIC); 942 943 if (!data_buf) { 944 ret = -ENOMEM; 945 goto done; 946 } 947 948 /* The address supplied by the caller is in the 949 * Target CPU virtual address space. 950 * 951 * In order to use this address with the diagnostic CE, 952 * convert it from Target CPU virtual address space 953 * to CE address space 954 */ 955 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 956 957 remaining_bytes = nbytes; 958 ce_data = ce_data_base; 959 while (remaining_bytes) { 960 nbytes = min_t(unsigned int, remaining_bytes, 961 DIAG_TRANSFER_LIMIT); 962 963 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data); 964 if (ret != 0) 965 goto done; 966 967 /* Request CE to send from Target(!) address to Host buffer */ 968 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 969 0); 970 if (ret) 971 goto done; 972 973 i = 0; 974 while (ath10k_ce_completed_send_next_nolock(ce_diag, 975 NULL) != 0) { 976 udelay(DIAG_ACCESS_CE_WAIT_US); 977 i += DIAG_ACCESS_CE_WAIT_US; 978 979 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 980 ret = -EBUSY; 981 goto done; 982 } 983 } 984 985 i = 0; 986 while (ath10k_ce_completed_recv_next_nolock(ce_diag, 987 (void **)&buf, 988 &completed_nbytes) 989 != 0) { 990 udelay(DIAG_ACCESS_CE_WAIT_US); 991 i += DIAG_ACCESS_CE_WAIT_US; 992 993 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 994 ret = -EBUSY; 995 goto done; 996 } 997 } 998 999 if (nbytes != completed_nbytes) { 1000 ret = -EIO; 1001 goto done; 1002 } 1003 1004 if (*buf != ce_data) { 1005 ret = -EIO; 1006 goto done; 1007 } 1008 1009 remaining_bytes -= nbytes; 1010 memcpy(data, data_buf, nbytes); 1011 1012 address += nbytes; 1013 data += nbytes; 1014 } 1015 1016 done: 1017 1018 if (data_buf) 1019 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 1020 ce_data_base); 1021 1022 spin_unlock_bh(&ce->ce_lock); 1023 1024 return ret; 1025 } 1026 1027 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 1028 { 1029 __le32 val = 0; 1030 int ret; 1031 1032 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 1033 *value = __le32_to_cpu(val); 1034 1035 return ret; 1036 } 1037 1038 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 1039 u32 src, u32 len) 1040 { 1041 u32 host_addr, addr; 1042 int ret; 1043 1044 host_addr = host_interest_item_address(src); 1045 1046 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 1047 if (ret != 0) { 1048 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 1049 src, ret); 1050 return ret; 1051 } 1052 1053 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 1054 if (ret != 0) { 1055 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 1056 addr, len, ret); 1057 return ret; 1058 } 1059 1060 return 0; 1061 } 1062 1063 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1064 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1065 1066 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1067 const void *data, int nbytes) 1068 { 1069 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1070 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1071 int ret = 0; 1072 u32 *buf; 1073 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 1074 struct ath10k_ce_pipe *ce_diag; 1075 void *data_buf = NULL; 1076 dma_addr_t ce_data_base = 0; 1077 int i; 1078 1079 spin_lock_bh(&ce->ce_lock); 1080 1081 ce_diag = ar_pci->ce_diag; 1082 1083 /* 1084 * Allocate a temporary bounce buffer to hold caller's data 1085 * to be DMA'ed to Target. This guarantees 1086 * 1) 4-byte alignment 1087 * 2) Buffer in DMA-able space 1088 */ 1089 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 1090 1091 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1092 alloc_nbytes, 1093 &ce_data_base, 1094 GFP_ATOMIC); 1095 if (!data_buf) { 1096 ret = -ENOMEM; 1097 goto done; 1098 } 1099 1100 /* 1101 * The address supplied by the caller is in the 1102 * Target CPU virtual address space. 1103 * 1104 * In order to use this address with the diagnostic CE, 1105 * convert it from 1106 * Target CPU virtual address space 1107 * to 1108 * CE address space 1109 */ 1110 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1111 1112 remaining_bytes = nbytes; 1113 while (remaining_bytes) { 1114 /* FIXME: check cast */ 1115 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1116 1117 /* Copy caller's data to allocated DMA buf */ 1118 memcpy(data_buf, data, nbytes); 1119 1120 /* Set up to receive directly into Target(!) address */ 1121 ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address); 1122 if (ret != 0) 1123 goto done; 1124 1125 /* 1126 * Request CE to send caller-supplied data that 1127 * was copied to bounce buffer to Target(!) address. 1128 */ 1129 ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base, 1130 nbytes, 0, 0); 1131 if (ret != 0) 1132 goto done; 1133 1134 i = 0; 1135 while (ath10k_ce_completed_send_next_nolock(ce_diag, 1136 NULL) != 0) { 1137 udelay(DIAG_ACCESS_CE_WAIT_US); 1138 i += DIAG_ACCESS_CE_WAIT_US; 1139 1140 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 1141 ret = -EBUSY; 1142 goto done; 1143 } 1144 } 1145 1146 i = 0; 1147 while (ath10k_ce_completed_recv_next_nolock(ce_diag, 1148 (void **)&buf, 1149 &completed_nbytes) 1150 != 0) { 1151 udelay(DIAG_ACCESS_CE_WAIT_US); 1152 i += DIAG_ACCESS_CE_WAIT_US; 1153 1154 if (i > DIAG_ACCESS_CE_TIMEOUT_US) { 1155 ret = -EBUSY; 1156 goto done; 1157 } 1158 } 1159 1160 if (nbytes != completed_nbytes) { 1161 ret = -EIO; 1162 goto done; 1163 } 1164 1165 if (*buf != address) { 1166 ret = -EIO; 1167 goto done; 1168 } 1169 1170 remaining_bytes -= nbytes; 1171 address += nbytes; 1172 data += nbytes; 1173 } 1174 1175 done: 1176 if (data_buf) { 1177 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 1178 ce_data_base); 1179 } 1180 1181 if (ret != 0) 1182 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1183 address, ret); 1184 1185 spin_unlock_bh(&ce->ce_lock); 1186 1187 return ret; 1188 } 1189 1190 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1191 { 1192 __le32 val = __cpu_to_le32(value); 1193 1194 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1195 } 1196 1197 /* Called by lower (CE) layer when a send to Target completes. */ 1198 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) 1199 { 1200 struct ath10k *ar = ce_state->ar; 1201 struct sk_buff_head list; 1202 struct sk_buff *skb; 1203 1204 __skb_queue_head_init(&list); 1205 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1206 /* no need to call tx completion for NULL pointers */ 1207 if (skb == NULL) 1208 continue; 1209 1210 __skb_queue_tail(&list, skb); 1211 } 1212 1213 while ((skb = __skb_dequeue(&list))) 1214 ath10k_htc_tx_completion_handler(ar, skb); 1215 } 1216 1217 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, 1218 void (*callback)(struct ath10k *ar, 1219 struct sk_buff *skb)) 1220 { 1221 struct ath10k *ar = ce_state->ar; 1222 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1223 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1224 struct sk_buff *skb; 1225 struct sk_buff_head list; 1226 void *transfer_context; 1227 unsigned int nbytes, max_nbytes; 1228 1229 __skb_queue_head_init(&list); 1230 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1231 &nbytes) == 0) { 1232 skb = transfer_context; 1233 max_nbytes = skb->len + skb_tailroom(skb); 1234 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1235 max_nbytes, DMA_FROM_DEVICE); 1236 1237 if (unlikely(max_nbytes < nbytes)) { 1238 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1239 nbytes, max_nbytes); 1240 dev_kfree_skb_any(skb); 1241 continue; 1242 } 1243 1244 skb_put(skb, nbytes); 1245 __skb_queue_tail(&list, skb); 1246 } 1247 1248 while ((skb = __skb_dequeue(&list))) { 1249 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1250 ce_state->id, skb->len); 1251 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1252 skb->data, skb->len); 1253 1254 callback(ar, skb); 1255 } 1256 1257 ath10k_pci_rx_post_pipe(pipe_info); 1258 } 1259 1260 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, 1261 void (*callback)(struct ath10k *ar, 1262 struct sk_buff *skb)) 1263 { 1264 struct ath10k *ar = ce_state->ar; 1265 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1266 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1267 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; 1268 struct sk_buff *skb; 1269 struct sk_buff_head list; 1270 void *transfer_context; 1271 unsigned int nbytes, max_nbytes, nentries; 1272 int orig_len; 1273 1274 /* No need to aquire ce_lock for CE5, since this is the only place CE5 1275 * is processed other than init and deinit. Before releasing CE5 1276 * buffers, interrupts are disabled. Thus CE5 access is serialized. 1277 */ 1278 __skb_queue_head_init(&list); 1279 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, 1280 &nbytes) == 0) { 1281 skb = transfer_context; 1282 max_nbytes = skb->len + skb_tailroom(skb); 1283 1284 if (unlikely(max_nbytes < nbytes)) { 1285 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1286 nbytes, max_nbytes); 1287 continue; 1288 } 1289 1290 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1291 max_nbytes, DMA_FROM_DEVICE); 1292 skb_put(skb, nbytes); 1293 __skb_queue_tail(&list, skb); 1294 } 1295 1296 nentries = skb_queue_len(&list); 1297 while ((skb = __skb_dequeue(&list))) { 1298 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1299 ce_state->id, skb->len); 1300 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1301 skb->data, skb->len); 1302 1303 orig_len = skb->len; 1304 callback(ar, skb); 1305 skb_push(skb, orig_len - skb->len); 1306 skb_reset_tail_pointer(skb); 1307 skb_trim(skb, 0); 1308 1309 /*let device gain the buffer again*/ 1310 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1311 skb->len + skb_tailroom(skb), 1312 DMA_FROM_DEVICE); 1313 } 1314 ath10k_ce_rx_update_write_idx(ce_pipe, nentries); 1315 } 1316 1317 /* Called by lower (CE) layer when data is received from the Target. */ 1318 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1319 { 1320 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1321 } 1322 1323 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1324 { 1325 /* CE4 polling needs to be done whenever CE pipe which transports 1326 * HTT Rx (target->host) is processed. 1327 */ 1328 ath10k_ce_per_engine_service(ce_state->ar, 4); 1329 1330 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1331 } 1332 1333 /* Called by lower (CE) layer when data is received from the Target. 1334 * Only 10.4 firmware uses separate CE to transfer pktlog data. 1335 */ 1336 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) 1337 { 1338 ath10k_pci_process_rx_cb(ce_state, 1339 ath10k_htt_rx_pktlog_completion_handler); 1340 } 1341 1342 /* Called by lower (CE) layer when a send to HTT Target completes. */ 1343 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1344 { 1345 struct ath10k *ar = ce_state->ar; 1346 struct sk_buff *skb; 1347 1348 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1349 /* no need to call tx completion for NULL pointers */ 1350 if (!skb) 1351 continue; 1352 1353 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1354 skb->len, DMA_TO_DEVICE); 1355 ath10k_htt_hif_tx_complete(ar, skb); 1356 } 1357 } 1358 1359 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) 1360 { 1361 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 1362 ath10k_htt_t2h_msg_handler(ar, skb); 1363 } 1364 1365 /* Called by lower (CE) layer when HTT data is received from the Target. */ 1366 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) 1367 { 1368 /* CE4 polling needs to be done whenever CE pipe which transports 1369 * HTT Rx (target->host) is processed. 1370 */ 1371 ath10k_ce_per_engine_service(ce_state->ar, 4); 1372 1373 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1374 } 1375 1376 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1377 struct ath10k_hif_sg_item *items, int n_items) 1378 { 1379 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1380 struct ath10k_ce *ce = ath10k_ce_priv(ar); 1381 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1382 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1383 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1384 unsigned int nentries_mask; 1385 unsigned int sw_index; 1386 unsigned int write_index; 1387 int err, i = 0; 1388 1389 spin_lock_bh(&ce->ce_lock); 1390 1391 nentries_mask = src_ring->nentries_mask; 1392 sw_index = src_ring->sw_index; 1393 write_index = src_ring->write_index; 1394 1395 if (unlikely(CE_RING_DELTA(nentries_mask, 1396 write_index, sw_index - 1) < n_items)) { 1397 err = -ENOBUFS; 1398 goto err; 1399 } 1400 1401 for (i = 0; i < n_items - 1; i++) { 1402 ath10k_dbg(ar, ATH10K_DBG_PCI, 1403 "pci tx item %d paddr %pad len %d n_items %d\n", 1404 i, &items[i].paddr, items[i].len, n_items); 1405 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1406 items[i].vaddr, items[i].len); 1407 1408 err = ath10k_ce_send_nolock(ce_pipe, 1409 items[i].transfer_context, 1410 items[i].paddr, 1411 items[i].len, 1412 items[i].transfer_id, 1413 CE_SEND_FLAG_GATHER); 1414 if (err) 1415 goto err; 1416 } 1417 1418 /* `i` is equal to `n_items -1` after for() */ 1419 1420 ath10k_dbg(ar, ATH10K_DBG_PCI, 1421 "pci tx item %d paddr %pad len %d n_items %d\n", 1422 i, &items[i].paddr, items[i].len, n_items); 1423 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1424 items[i].vaddr, items[i].len); 1425 1426 err = ath10k_ce_send_nolock(ce_pipe, 1427 items[i].transfer_context, 1428 items[i].paddr, 1429 items[i].len, 1430 items[i].transfer_id, 1431 0); 1432 if (err) 1433 goto err; 1434 1435 spin_unlock_bh(&ce->ce_lock); 1436 return 0; 1437 1438 err: 1439 for (; i > 0; i--) 1440 __ath10k_ce_send_revert(ce_pipe); 1441 1442 spin_unlock_bh(&ce->ce_lock); 1443 return err; 1444 } 1445 1446 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1447 size_t buf_len) 1448 { 1449 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1450 } 1451 1452 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1453 { 1454 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1455 1456 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1457 1458 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1459 } 1460 1461 static void ath10k_pci_dump_registers(struct ath10k *ar, 1462 struct ath10k_fw_crash_data *crash_data) 1463 { 1464 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1465 int i, ret; 1466 1467 lockdep_assert_held(&ar->data_lock); 1468 1469 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1470 hi_failure_state, 1471 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1472 if (ret) { 1473 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1474 return; 1475 } 1476 1477 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1478 1479 ath10k_err(ar, "firmware register dump:\n"); 1480 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1481 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1482 i, 1483 __le32_to_cpu(reg_dump_values[i]), 1484 __le32_to_cpu(reg_dump_values[i + 1]), 1485 __le32_to_cpu(reg_dump_values[i + 2]), 1486 __le32_to_cpu(reg_dump_values[i + 3])); 1487 1488 if (!crash_data) 1489 return; 1490 1491 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1492 crash_data->registers[i] = reg_dump_values[i]; 1493 } 1494 1495 static int ath10k_pci_dump_memory_section(struct ath10k *ar, 1496 const struct ath10k_mem_region *mem_region, 1497 u8 *buf, size_t buf_len) 1498 { 1499 const struct ath10k_mem_section *cur_section, *next_section; 1500 unsigned int count, section_size, skip_size; 1501 int ret, i, j; 1502 1503 if (!mem_region || !buf) 1504 return 0; 1505 1506 cur_section = &mem_region->section_table.sections[0]; 1507 1508 if (mem_region->start > cur_section->start) { 1509 ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n", 1510 mem_region->start, cur_section->start); 1511 return 0; 1512 } 1513 1514 skip_size = cur_section->start - mem_region->start; 1515 1516 /* fill the gap between the first register section and register 1517 * start address 1518 */ 1519 for (i = 0; i < skip_size; i++) { 1520 *buf = ATH10K_MAGIC_NOT_COPIED; 1521 buf++; 1522 } 1523 1524 count = 0; 1525 1526 for (i = 0; cur_section != NULL; i++) { 1527 section_size = cur_section->end - cur_section->start; 1528 1529 if (section_size <= 0) { 1530 ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n", 1531 cur_section->start, 1532 cur_section->end); 1533 break; 1534 } 1535 1536 if ((i + 1) == mem_region->section_table.size) { 1537 /* last section */ 1538 next_section = NULL; 1539 skip_size = 0; 1540 } else { 1541 next_section = cur_section + 1; 1542 1543 if (cur_section->end > next_section->start) { 1544 ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n", 1545 next_section->start, 1546 cur_section->end); 1547 break; 1548 } 1549 1550 skip_size = next_section->start - cur_section->end; 1551 } 1552 1553 if (buf_len < (skip_size + section_size)) { 1554 ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len); 1555 break; 1556 } 1557 1558 buf_len -= skip_size + section_size; 1559 1560 /* read section to dest memory */ 1561 ret = ath10k_pci_diag_read_mem(ar, cur_section->start, 1562 buf, section_size); 1563 if (ret) { 1564 ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n", 1565 cur_section->start, ret); 1566 break; 1567 } 1568 1569 buf += section_size; 1570 count += section_size; 1571 1572 /* fill in the gap between this section and the next */ 1573 for (j = 0; j < skip_size; j++) { 1574 *buf = ATH10K_MAGIC_NOT_COPIED; 1575 buf++; 1576 } 1577 1578 count += skip_size; 1579 1580 if (!next_section) 1581 /* this was the last section */ 1582 break; 1583 1584 cur_section = next_section; 1585 } 1586 1587 return count; 1588 } 1589 1590 static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config) 1591 { 1592 u32 val; 1593 1594 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1595 FW_RAM_CONFIG_ADDRESS, config); 1596 1597 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1598 FW_RAM_CONFIG_ADDRESS); 1599 if (val != config) { 1600 ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n", 1601 val, config); 1602 return -EIO; 1603 } 1604 1605 return 0; 1606 } 1607 1608 /* if an error happened returns < 0, otherwise the length */ 1609 static int ath10k_pci_dump_memory_sram(struct ath10k *ar, 1610 const struct ath10k_mem_region *region, 1611 u8 *buf) 1612 { 1613 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1614 u32 base_addr, i; 1615 1616 base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG); 1617 base_addr += region->start; 1618 1619 for (i = 0; i < region->len; i += 4) { 1620 iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG); 1621 *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG); 1622 } 1623 1624 return region->len; 1625 } 1626 1627 /* if an error happened returns < 0, otherwise the length */ 1628 static int ath10k_pci_dump_memory_reg(struct ath10k *ar, 1629 const struct ath10k_mem_region *region, 1630 u8 *buf) 1631 { 1632 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1633 u32 i; 1634 1635 for (i = 0; i < region->len; i += 4) 1636 *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i); 1637 1638 return region->len; 1639 } 1640 1641 /* if an error happened returns < 0, otherwise the length */ 1642 static int ath10k_pci_dump_memory_generic(struct ath10k *ar, 1643 const struct ath10k_mem_region *current_region, 1644 u8 *buf) 1645 { 1646 int ret; 1647 1648 if (current_region->section_table.size > 0) 1649 /* Copy each section individually. */ 1650 return ath10k_pci_dump_memory_section(ar, 1651 current_region, 1652 buf, 1653 current_region->len); 1654 1655 /* No individiual memory sections defined so we can 1656 * copy the entire memory region. 1657 */ 1658 ret = ath10k_pci_diag_read_mem(ar, 1659 current_region->start, 1660 buf, 1661 current_region->len); 1662 if (ret) { 1663 ath10k_warn(ar, "failed to copy ramdump region %s: %d\n", 1664 current_region->name, ret); 1665 return ret; 1666 } 1667 1668 return current_region->len; 1669 } 1670 1671 static void ath10k_pci_dump_memory(struct ath10k *ar, 1672 struct ath10k_fw_crash_data *crash_data) 1673 { 1674 const struct ath10k_hw_mem_layout *mem_layout; 1675 const struct ath10k_mem_region *current_region; 1676 struct ath10k_dump_ram_data_hdr *hdr; 1677 u32 count, shift; 1678 size_t buf_len; 1679 int ret, i; 1680 u8 *buf; 1681 1682 lockdep_assert_held(&ar->data_lock); 1683 1684 if (!crash_data) 1685 return; 1686 1687 mem_layout = ath10k_coredump_get_mem_layout(ar); 1688 if (!mem_layout) 1689 return; 1690 1691 current_region = &mem_layout->region_table.regions[0]; 1692 1693 buf = crash_data->ramdump_buf; 1694 buf_len = crash_data->ramdump_buf_len; 1695 1696 memset(buf, 0, buf_len); 1697 1698 for (i = 0; i < mem_layout->region_table.size; i++) { 1699 count = 0; 1700 1701 if (current_region->len > buf_len) { 1702 ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n", 1703 current_region->name, 1704 current_region->len, 1705 buf_len); 1706 break; 1707 } 1708 1709 /* To get IRAM dump, the host driver needs to switch target 1710 * ram config from DRAM to IRAM. 1711 */ 1712 if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 || 1713 current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) { 1714 shift = current_region->start >> 20; 1715 1716 ret = ath10k_pci_set_ram_config(ar, shift); 1717 if (ret) { 1718 ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n", 1719 current_region->name, ret); 1720 break; 1721 } 1722 } 1723 1724 /* Reserve space for the header. */ 1725 hdr = (void *)buf; 1726 buf += sizeof(*hdr); 1727 buf_len -= sizeof(*hdr); 1728 1729 switch (current_region->type) { 1730 case ATH10K_MEM_REGION_TYPE_IOSRAM: 1731 count = ath10k_pci_dump_memory_sram(ar, current_region, buf); 1732 break; 1733 case ATH10K_MEM_REGION_TYPE_IOREG: 1734 count = ath10k_pci_dump_memory_reg(ar, current_region, buf); 1735 break; 1736 default: 1737 ret = ath10k_pci_dump_memory_generic(ar, current_region, buf); 1738 if (ret < 0) 1739 break; 1740 1741 count = ret; 1742 break; 1743 } 1744 1745 hdr->region_type = cpu_to_le32(current_region->type); 1746 hdr->start = cpu_to_le32(current_region->start); 1747 hdr->length = cpu_to_le32(count); 1748 1749 if (count == 0) 1750 /* Note: the header remains, just with zero length. */ 1751 break; 1752 1753 buf += count; 1754 buf_len -= count; 1755 1756 current_region++; 1757 } 1758 } 1759 1760 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1761 { 1762 struct ath10k_fw_crash_data *crash_data; 1763 char guid[UUID_STRING_LEN + 1]; 1764 1765 spin_lock_bh(&ar->data_lock); 1766 1767 ar->stats.fw_crash_counter++; 1768 1769 crash_data = ath10k_coredump_new(ar); 1770 1771 if (crash_data) 1772 scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); 1773 else 1774 scnprintf(guid, sizeof(guid), "n/a"); 1775 1776 ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); 1777 ath10k_print_driver_info(ar); 1778 ath10k_pci_dump_registers(ar, crash_data); 1779 ath10k_ce_dump_registers(ar, crash_data); 1780 ath10k_pci_dump_memory(ar, crash_data); 1781 1782 spin_unlock_bh(&ar->data_lock); 1783 1784 queue_work(ar->workqueue, &ar->restart_work); 1785 } 1786 1787 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1788 int force) 1789 { 1790 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1791 1792 if (!force) { 1793 int resources; 1794 /* 1795 * Decide whether to actually poll for completions, or just 1796 * wait for a later chance. 1797 * If there seem to be plenty of resources left, then just wait 1798 * since checking involves reading a CE register, which is a 1799 * relatively expensive operation. 1800 */ 1801 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1802 1803 /* 1804 * If at least 50% of the total resources are still available, 1805 * don't bother checking again yet. 1806 */ 1807 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) 1808 return; 1809 } 1810 ath10k_ce_per_engine_service(ar, pipe); 1811 } 1812 1813 static void ath10k_pci_rx_retry_sync(struct ath10k *ar) 1814 { 1815 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1816 1817 del_timer_sync(&ar_pci->rx_post_retry); 1818 } 1819 1820 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1821 u8 *ul_pipe, u8 *dl_pipe) 1822 { 1823 const struct service_to_pipe *entry; 1824 bool ul_set = false, dl_set = false; 1825 int i; 1826 1827 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1828 1829 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 1830 entry = &target_service_to_ce_map_wlan[i]; 1831 1832 if (__le32_to_cpu(entry->service_id) != service_id) 1833 continue; 1834 1835 switch (__le32_to_cpu(entry->pipedir)) { 1836 case PIPEDIR_NONE: 1837 break; 1838 case PIPEDIR_IN: 1839 WARN_ON(dl_set); 1840 *dl_pipe = __le32_to_cpu(entry->pipenum); 1841 dl_set = true; 1842 break; 1843 case PIPEDIR_OUT: 1844 WARN_ON(ul_set); 1845 *ul_pipe = __le32_to_cpu(entry->pipenum); 1846 ul_set = true; 1847 break; 1848 case PIPEDIR_INOUT: 1849 WARN_ON(dl_set); 1850 WARN_ON(ul_set); 1851 *dl_pipe = __le32_to_cpu(entry->pipenum); 1852 *ul_pipe = __le32_to_cpu(entry->pipenum); 1853 dl_set = true; 1854 ul_set = true; 1855 break; 1856 } 1857 } 1858 1859 if (!ul_set || !dl_set) 1860 return -ENOENT; 1861 1862 return 0; 1863 } 1864 1865 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1866 u8 *ul_pipe, u8 *dl_pipe) 1867 { 1868 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1869 1870 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1871 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1872 ul_pipe, dl_pipe); 1873 } 1874 1875 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1876 { 1877 u32 val; 1878 1879 switch (ar->hw_rev) { 1880 case ATH10K_HW_QCA988X: 1881 case ATH10K_HW_QCA9887: 1882 case ATH10K_HW_QCA6174: 1883 case ATH10K_HW_QCA9377: 1884 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1885 CORE_CTRL_ADDRESS); 1886 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1887 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1888 CORE_CTRL_ADDRESS, val); 1889 break; 1890 case ATH10K_HW_QCA99X0: 1891 case ATH10K_HW_QCA9984: 1892 case ATH10K_HW_QCA9888: 1893 case ATH10K_HW_QCA4019: 1894 /* TODO: Find appropriate register configuration for QCA99X0 1895 * to mask irq/MSI. 1896 */ 1897 break; 1898 case ATH10K_HW_WCN3990: 1899 break; 1900 } 1901 } 1902 1903 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1904 { 1905 u32 val; 1906 1907 switch (ar->hw_rev) { 1908 case ATH10K_HW_QCA988X: 1909 case ATH10K_HW_QCA9887: 1910 case ATH10K_HW_QCA6174: 1911 case ATH10K_HW_QCA9377: 1912 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1913 CORE_CTRL_ADDRESS); 1914 val |= CORE_CTRL_PCIE_REG_31_MASK; 1915 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1916 CORE_CTRL_ADDRESS, val); 1917 break; 1918 case ATH10K_HW_QCA99X0: 1919 case ATH10K_HW_QCA9984: 1920 case ATH10K_HW_QCA9888: 1921 case ATH10K_HW_QCA4019: 1922 /* TODO: Find appropriate register configuration for QCA99X0 1923 * to unmask irq/MSI. 1924 */ 1925 break; 1926 case ATH10K_HW_WCN3990: 1927 break; 1928 } 1929 } 1930 1931 static void ath10k_pci_irq_disable(struct ath10k *ar) 1932 { 1933 ath10k_ce_disable_interrupts(ar); 1934 ath10k_pci_disable_and_clear_legacy_irq(ar); 1935 ath10k_pci_irq_msi_fw_mask(ar); 1936 } 1937 1938 static void ath10k_pci_irq_sync(struct ath10k *ar) 1939 { 1940 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1941 1942 synchronize_irq(ar_pci->pdev->irq); 1943 } 1944 1945 static void ath10k_pci_irq_enable(struct ath10k *ar) 1946 { 1947 ath10k_ce_enable_interrupts(ar); 1948 ath10k_pci_enable_legacy_irq(ar); 1949 ath10k_pci_irq_msi_fw_unmask(ar); 1950 } 1951 1952 static int ath10k_pci_hif_start(struct ath10k *ar) 1953 { 1954 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1955 1956 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1957 1958 napi_enable(&ar->napi); 1959 1960 ath10k_pci_irq_enable(ar); 1961 ath10k_pci_rx_post(ar); 1962 1963 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1964 ar_pci->link_ctl); 1965 1966 return 0; 1967 } 1968 1969 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1970 { 1971 struct ath10k *ar; 1972 struct ath10k_ce_pipe *ce_pipe; 1973 struct ath10k_ce_ring *ce_ring; 1974 struct sk_buff *skb; 1975 int i; 1976 1977 ar = pci_pipe->hif_ce_state; 1978 ce_pipe = pci_pipe->ce_hdl; 1979 ce_ring = ce_pipe->dest_ring; 1980 1981 if (!ce_ring) 1982 return; 1983 1984 if (!pci_pipe->buf_sz) 1985 return; 1986 1987 for (i = 0; i < ce_ring->nentries; i++) { 1988 skb = ce_ring->per_transfer_context[i]; 1989 if (!skb) 1990 continue; 1991 1992 ce_ring->per_transfer_context[i] = NULL; 1993 1994 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1995 skb->len + skb_tailroom(skb), 1996 DMA_FROM_DEVICE); 1997 dev_kfree_skb_any(skb); 1998 } 1999 } 2000 2001 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 2002 { 2003 struct ath10k *ar; 2004 struct ath10k_ce_pipe *ce_pipe; 2005 struct ath10k_ce_ring *ce_ring; 2006 struct sk_buff *skb; 2007 int i; 2008 2009 ar = pci_pipe->hif_ce_state; 2010 ce_pipe = pci_pipe->ce_hdl; 2011 ce_ring = ce_pipe->src_ring; 2012 2013 if (!ce_ring) 2014 return; 2015 2016 if (!pci_pipe->buf_sz) 2017 return; 2018 2019 for (i = 0; i < ce_ring->nentries; i++) { 2020 skb = ce_ring->per_transfer_context[i]; 2021 if (!skb) 2022 continue; 2023 2024 ce_ring->per_transfer_context[i] = NULL; 2025 2026 ath10k_htc_tx_completion_handler(ar, skb); 2027 } 2028 } 2029 2030 /* 2031 * Cleanup residual buffers for device shutdown: 2032 * buffers that were enqueued for receive 2033 * buffers that were to be sent 2034 * Note: Buffers that had completed but which were 2035 * not yet processed are on a completion queue. They 2036 * are handled when the completion thread shuts down. 2037 */ 2038 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 2039 { 2040 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2041 int pipe_num; 2042 2043 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 2044 struct ath10k_pci_pipe *pipe_info; 2045 2046 pipe_info = &ar_pci->pipe_info[pipe_num]; 2047 ath10k_pci_rx_pipe_cleanup(pipe_info); 2048 ath10k_pci_tx_pipe_cleanup(pipe_info); 2049 } 2050 } 2051 2052 void ath10k_pci_ce_deinit(struct ath10k *ar) 2053 { 2054 int i; 2055 2056 for (i = 0; i < CE_COUNT; i++) 2057 ath10k_ce_deinit_pipe(ar, i); 2058 } 2059 2060 void ath10k_pci_flush(struct ath10k *ar) 2061 { 2062 ath10k_pci_rx_retry_sync(ar); 2063 ath10k_pci_buffer_cleanup(ar); 2064 } 2065 2066 static void ath10k_pci_hif_stop(struct ath10k *ar) 2067 { 2068 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2069 unsigned long flags; 2070 2071 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 2072 2073 /* Most likely the device has HTT Rx ring configured. The only way to 2074 * prevent the device from accessing (and possible corrupting) host 2075 * memory is to reset the chip now. 2076 * 2077 * There's also no known way of masking MSI interrupts on the device. 2078 * For ranged MSI the CE-related interrupts can be masked. However 2079 * regardless how many MSI interrupts are assigned the first one 2080 * is always used for firmware indications (crashes) and cannot be 2081 * masked. To prevent the device from asserting the interrupt reset it 2082 * before proceeding with cleanup. 2083 */ 2084 ath10k_pci_safe_chip_reset(ar); 2085 2086 ath10k_pci_irq_disable(ar); 2087 ath10k_pci_irq_sync(ar); 2088 napi_synchronize(&ar->napi); 2089 napi_disable(&ar->napi); 2090 ath10k_pci_flush(ar); 2091 2092 spin_lock_irqsave(&ar_pci->ps_lock, flags); 2093 WARN_ON(ar_pci->ps_wake_refcount > 0); 2094 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 2095 } 2096 2097 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 2098 void *req, u32 req_len, 2099 void *resp, u32 *resp_len) 2100 { 2101 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2102 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 2103 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 2104 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 2105 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 2106 dma_addr_t req_paddr = 0; 2107 dma_addr_t resp_paddr = 0; 2108 struct bmi_xfer xfer = {}; 2109 void *treq, *tresp = NULL; 2110 int ret = 0; 2111 2112 might_sleep(); 2113 2114 if (resp && !resp_len) 2115 return -EINVAL; 2116 2117 if (resp && resp_len && *resp_len == 0) 2118 return -EINVAL; 2119 2120 treq = kmemdup(req, req_len, GFP_KERNEL); 2121 if (!treq) 2122 return -ENOMEM; 2123 2124 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 2125 ret = dma_mapping_error(ar->dev, req_paddr); 2126 if (ret) { 2127 ret = -EIO; 2128 goto err_dma; 2129 } 2130 2131 if (resp && resp_len) { 2132 tresp = kzalloc(*resp_len, GFP_KERNEL); 2133 if (!tresp) { 2134 ret = -ENOMEM; 2135 goto err_req; 2136 } 2137 2138 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 2139 DMA_FROM_DEVICE); 2140 ret = dma_mapping_error(ar->dev, resp_paddr); 2141 if (ret) { 2142 ret = -EIO; 2143 goto err_req; 2144 } 2145 2146 xfer.wait_for_resp = true; 2147 xfer.resp_len = 0; 2148 2149 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 2150 } 2151 2152 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 2153 if (ret) 2154 goto err_resp; 2155 2156 ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer); 2157 if (ret) { 2158 dma_addr_t unused_buffer; 2159 unsigned int unused_nbytes; 2160 unsigned int unused_id; 2161 2162 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 2163 &unused_nbytes, &unused_id); 2164 } else { 2165 /* non-zero means we did not time out */ 2166 ret = 0; 2167 } 2168 2169 err_resp: 2170 if (resp) { 2171 dma_addr_t unused_buffer; 2172 2173 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 2174 dma_unmap_single(ar->dev, resp_paddr, 2175 *resp_len, DMA_FROM_DEVICE); 2176 } 2177 err_req: 2178 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 2179 2180 if (ret == 0 && resp_len) { 2181 *resp_len = min(*resp_len, xfer.resp_len); 2182 memcpy(resp, tresp, xfer.resp_len); 2183 } 2184 err_dma: 2185 kfree(treq); 2186 kfree(tresp); 2187 2188 return ret; 2189 } 2190 2191 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 2192 { 2193 struct bmi_xfer *xfer; 2194 2195 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) 2196 return; 2197 2198 xfer->tx_done = true; 2199 } 2200 2201 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 2202 { 2203 struct ath10k *ar = ce_state->ar; 2204 struct bmi_xfer *xfer; 2205 unsigned int nbytes; 2206 2207 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, 2208 &nbytes)) 2209 return; 2210 2211 if (WARN_ON_ONCE(!xfer)) 2212 return; 2213 2214 if (!xfer->wait_for_resp) { 2215 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 2216 return; 2217 } 2218 2219 xfer->resp_len = nbytes; 2220 xfer->rx_done = true; 2221 } 2222 2223 static int ath10k_pci_bmi_wait(struct ath10k *ar, 2224 struct ath10k_ce_pipe *tx_pipe, 2225 struct ath10k_ce_pipe *rx_pipe, 2226 struct bmi_xfer *xfer) 2227 { 2228 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 2229 unsigned long started = jiffies; 2230 unsigned long dur; 2231 int ret; 2232 2233 while (time_before_eq(jiffies, timeout)) { 2234 ath10k_pci_bmi_send_done(tx_pipe); 2235 ath10k_pci_bmi_recv_data(rx_pipe); 2236 2237 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) { 2238 ret = 0; 2239 goto out; 2240 } 2241 2242 schedule(); 2243 } 2244 2245 ret = -ETIMEDOUT; 2246 2247 out: 2248 dur = jiffies - started; 2249 if (dur > HZ) 2250 ath10k_dbg(ar, ATH10K_DBG_BMI, 2251 "bmi cmd took %lu jiffies hz %d ret %d\n", 2252 dur, HZ, ret); 2253 return ret; 2254 } 2255 2256 /* 2257 * Send an interrupt to the device to wake up the Target CPU 2258 * so it has an opportunity to notice any changed state. 2259 */ 2260 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 2261 { 2262 u32 addr, val; 2263 2264 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; 2265 val = ath10k_pci_read32(ar, addr); 2266 val |= CORE_CTRL_CPU_INTR_MASK; 2267 ath10k_pci_write32(ar, addr, val); 2268 2269 return 0; 2270 } 2271 2272 static int ath10k_pci_get_num_banks(struct ath10k *ar) 2273 { 2274 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2275 2276 switch (ar_pci->pdev->device) { 2277 case QCA988X_2_0_DEVICE_ID_UBNT: 2278 case QCA988X_2_0_DEVICE_ID: 2279 case QCA99X0_2_0_DEVICE_ID: 2280 case QCA9888_2_0_DEVICE_ID: 2281 case QCA9984_1_0_DEVICE_ID: 2282 case QCA9887_1_0_DEVICE_ID: 2283 return 1; 2284 case QCA6164_2_1_DEVICE_ID: 2285 case QCA6174_2_1_DEVICE_ID: 2286 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { 2287 case QCA6174_HW_1_0_CHIP_ID_REV: 2288 case QCA6174_HW_1_1_CHIP_ID_REV: 2289 case QCA6174_HW_2_1_CHIP_ID_REV: 2290 case QCA6174_HW_2_2_CHIP_ID_REV: 2291 return 3; 2292 case QCA6174_HW_1_3_CHIP_ID_REV: 2293 return 2; 2294 case QCA6174_HW_3_0_CHIP_ID_REV: 2295 case QCA6174_HW_3_1_CHIP_ID_REV: 2296 case QCA6174_HW_3_2_CHIP_ID_REV: 2297 return 9; 2298 } 2299 break; 2300 case QCA9377_1_0_DEVICE_ID: 2301 return 9; 2302 } 2303 2304 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 2305 return 1; 2306 } 2307 2308 static int ath10k_bus_get_num_banks(struct ath10k *ar) 2309 { 2310 struct ath10k_ce *ce = ath10k_ce_priv(ar); 2311 2312 return ce->bus_ops->get_num_banks(ar); 2313 } 2314 2315 int ath10k_pci_init_config(struct ath10k *ar) 2316 { 2317 u32 interconnect_targ_addr; 2318 u32 pcie_state_targ_addr = 0; 2319 u32 pipe_cfg_targ_addr = 0; 2320 u32 svc_to_pipe_map = 0; 2321 u32 pcie_config_flags = 0; 2322 u32 ealloc_value; 2323 u32 ealloc_targ_addr; 2324 u32 flag2_value; 2325 u32 flag2_targ_addr; 2326 int ret = 0; 2327 2328 /* Download to Target the CE Config and the service-to-CE map */ 2329 interconnect_targ_addr = 2330 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 2331 2332 /* Supply Target-side CE configuration */ 2333 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 2334 &pcie_state_targ_addr); 2335 if (ret != 0) { 2336 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 2337 return ret; 2338 } 2339 2340 if (pcie_state_targ_addr == 0) { 2341 ret = -EIO; 2342 ath10k_err(ar, "Invalid pcie state addr\n"); 2343 return ret; 2344 } 2345 2346 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2347 offsetof(struct pcie_state, 2348 pipe_cfg_addr)), 2349 &pipe_cfg_targ_addr); 2350 if (ret != 0) { 2351 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 2352 return ret; 2353 } 2354 2355 if (pipe_cfg_targ_addr == 0) { 2356 ret = -EIO; 2357 ath10k_err(ar, "Invalid pipe cfg addr\n"); 2358 return ret; 2359 } 2360 2361 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 2362 target_ce_config_wlan, 2363 sizeof(struct ce_pipe_config) * 2364 NUM_TARGET_CE_CONFIG_WLAN); 2365 2366 if (ret != 0) { 2367 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 2368 return ret; 2369 } 2370 2371 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2372 offsetof(struct pcie_state, 2373 svc_to_pipe_map)), 2374 &svc_to_pipe_map); 2375 if (ret != 0) { 2376 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 2377 return ret; 2378 } 2379 2380 if (svc_to_pipe_map == 0) { 2381 ret = -EIO; 2382 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 2383 return ret; 2384 } 2385 2386 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 2387 target_service_to_ce_map_wlan, 2388 sizeof(target_service_to_ce_map_wlan)); 2389 if (ret != 0) { 2390 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 2391 return ret; 2392 } 2393 2394 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2395 offsetof(struct pcie_state, 2396 config_flags)), 2397 &pcie_config_flags); 2398 if (ret != 0) { 2399 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 2400 return ret; 2401 } 2402 2403 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 2404 2405 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 2406 offsetof(struct pcie_state, 2407 config_flags)), 2408 pcie_config_flags); 2409 if (ret != 0) { 2410 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 2411 return ret; 2412 } 2413 2414 /* configure early allocation */ 2415 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 2416 2417 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 2418 if (ret != 0) { 2419 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); 2420 return ret; 2421 } 2422 2423 /* first bank is switched to IRAM */ 2424 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2425 HI_EARLY_ALLOC_MAGIC_MASK); 2426 ealloc_value |= ((ath10k_bus_get_num_banks(ar) << 2427 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2428 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2429 2430 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 2431 if (ret != 0) { 2432 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 2433 return ret; 2434 } 2435 2436 /* Tell Target to proceed with initialization */ 2437 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 2438 2439 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 2440 if (ret != 0) { 2441 ath10k_err(ar, "Failed to get option val: %d\n", ret); 2442 return ret; 2443 } 2444 2445 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 2446 2447 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 2448 if (ret != 0) { 2449 ath10k_err(ar, "Failed to set option val: %d\n", ret); 2450 return ret; 2451 } 2452 2453 return 0; 2454 } 2455 2456 static void ath10k_pci_override_ce_config(struct ath10k *ar) 2457 { 2458 struct ce_attr *attr; 2459 struct ce_pipe_config *config; 2460 2461 /* For QCA6174 we're overriding the Copy Engine 5 configuration, 2462 * since it is currently used for other feature. 2463 */ 2464 2465 /* Override Host's Copy Engine 5 configuration */ 2466 attr = &host_ce_config_wlan[5]; 2467 attr->src_sz_max = 0; 2468 attr->dest_nentries = 0; 2469 2470 /* Override Target firmware's Copy Engine configuration */ 2471 config = &target_ce_config_wlan[5]; 2472 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); 2473 config->nbytes_max = __cpu_to_le32(2048); 2474 2475 /* Map from service/endpoint to Copy Engine */ 2476 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); 2477 } 2478 2479 int ath10k_pci_alloc_pipes(struct ath10k *ar) 2480 { 2481 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2482 struct ath10k_pci_pipe *pipe; 2483 struct ath10k_ce *ce = ath10k_ce_priv(ar); 2484 int i, ret; 2485 2486 for (i = 0; i < CE_COUNT; i++) { 2487 pipe = &ar_pci->pipe_info[i]; 2488 pipe->ce_hdl = &ce->ce_states[i]; 2489 pipe->pipe_num = i; 2490 pipe->hif_ce_state = ar; 2491 2492 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); 2493 if (ret) { 2494 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2495 i, ret); 2496 return ret; 2497 } 2498 2499 /* Last CE is Diagnostic Window */ 2500 if (i == CE_DIAG_PIPE) { 2501 ar_pci->ce_diag = pipe->ce_hdl; 2502 continue; 2503 } 2504 2505 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); 2506 } 2507 2508 return 0; 2509 } 2510 2511 void ath10k_pci_free_pipes(struct ath10k *ar) 2512 { 2513 int i; 2514 2515 for (i = 0; i < CE_COUNT; i++) 2516 ath10k_ce_free_pipe(ar, i); 2517 } 2518 2519 int ath10k_pci_init_pipes(struct ath10k *ar) 2520 { 2521 int i, ret; 2522 2523 for (i = 0; i < CE_COUNT; i++) { 2524 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); 2525 if (ret) { 2526 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2527 i, ret); 2528 return ret; 2529 } 2530 } 2531 2532 return 0; 2533 } 2534 2535 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2536 { 2537 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2538 FW_IND_EVENT_PENDING; 2539 } 2540 2541 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2542 { 2543 u32 val; 2544 2545 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2546 val &= ~FW_IND_EVENT_PENDING; 2547 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2548 } 2549 2550 static bool ath10k_pci_has_device_gone(struct ath10k *ar) 2551 { 2552 u32 val; 2553 2554 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2555 return (val == 0xffffffff); 2556 } 2557 2558 /* this function effectively clears target memory controller assert line */ 2559 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2560 { 2561 u32 val; 2562 2563 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2564 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2565 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2566 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2567 2568 msleep(10); 2569 2570 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2571 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2572 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2573 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2574 2575 msleep(10); 2576 } 2577 2578 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2579 { 2580 u32 val; 2581 2582 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2583 2584 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2585 SOC_RESET_CONTROL_ADDRESS); 2586 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2587 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2588 } 2589 2590 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2591 { 2592 u32 val; 2593 2594 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2595 SOC_RESET_CONTROL_ADDRESS); 2596 2597 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2598 val | SOC_RESET_CONTROL_CE_RST_MASK); 2599 msleep(10); 2600 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2601 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2602 } 2603 2604 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2605 { 2606 u32 val; 2607 2608 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2609 SOC_LF_TIMER_CONTROL0_ADDRESS); 2610 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + 2611 SOC_LF_TIMER_CONTROL0_ADDRESS, 2612 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2613 } 2614 2615 static int ath10k_pci_warm_reset(struct ath10k *ar) 2616 { 2617 int ret; 2618 2619 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2620 2621 spin_lock_bh(&ar->data_lock); 2622 ar->stats.fw_warm_reset_counter++; 2623 spin_unlock_bh(&ar->data_lock); 2624 2625 ath10k_pci_irq_disable(ar); 2626 2627 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2628 * were to access copy engine while host performs copy engine reset 2629 * then it is possible for the device to confuse pci-e controller to 2630 * the point of bringing host system to a complete stop (i.e. hang). 2631 */ 2632 ath10k_pci_warm_reset_si0(ar); 2633 ath10k_pci_warm_reset_cpu(ar); 2634 ath10k_pci_init_pipes(ar); 2635 ath10k_pci_wait_for_target_init(ar); 2636 2637 ath10k_pci_warm_reset_clear_lf(ar); 2638 ath10k_pci_warm_reset_ce(ar); 2639 ath10k_pci_warm_reset_cpu(ar); 2640 ath10k_pci_init_pipes(ar); 2641 2642 ret = ath10k_pci_wait_for_target_init(ar); 2643 if (ret) { 2644 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2645 return ret; 2646 } 2647 2648 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2649 2650 return 0; 2651 } 2652 2653 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) 2654 { 2655 ath10k_pci_irq_disable(ar); 2656 return ath10k_pci_qca99x0_chip_reset(ar); 2657 } 2658 2659 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2660 { 2661 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2662 2663 if (!ar_pci->pci_soft_reset) 2664 return -ENOTSUPP; 2665 2666 return ar_pci->pci_soft_reset(ar); 2667 } 2668 2669 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2670 { 2671 int i, ret; 2672 u32 val; 2673 2674 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2675 2676 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2677 * It is thus preferred to use warm reset which is safer but may not be 2678 * able to recover the device from all possible fail scenarios. 2679 * 2680 * Warm reset doesn't always work on first try so attempt it a few 2681 * times before giving up. 2682 */ 2683 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2684 ret = ath10k_pci_warm_reset(ar); 2685 if (ret) { 2686 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2687 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2688 ret); 2689 continue; 2690 } 2691 2692 /* FIXME: Sometimes copy engine doesn't recover after warm 2693 * reset. In most cases this needs cold reset. In some of these 2694 * cases the device is in such a state that a cold reset may 2695 * lock up the host. 2696 * 2697 * Reading any host interest register via copy engine is 2698 * sufficient to verify if device is capable of booting 2699 * firmware blob. 2700 */ 2701 ret = ath10k_pci_init_pipes(ar); 2702 if (ret) { 2703 ath10k_warn(ar, "failed to init copy engine: %d\n", 2704 ret); 2705 continue; 2706 } 2707 2708 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2709 &val); 2710 if (ret) { 2711 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2712 ret); 2713 continue; 2714 } 2715 2716 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2717 return 0; 2718 } 2719 2720 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2721 ath10k_warn(ar, "refusing cold reset as requested\n"); 2722 return -EPERM; 2723 } 2724 2725 ret = ath10k_pci_cold_reset(ar); 2726 if (ret) { 2727 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2728 return ret; 2729 } 2730 2731 ret = ath10k_pci_wait_for_target_init(ar); 2732 if (ret) { 2733 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2734 ret); 2735 return ret; 2736 } 2737 2738 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2739 2740 return 0; 2741 } 2742 2743 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2744 { 2745 int ret; 2746 2747 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2748 2749 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2750 2751 ret = ath10k_pci_cold_reset(ar); 2752 if (ret) { 2753 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2754 return ret; 2755 } 2756 2757 ret = ath10k_pci_wait_for_target_init(ar); 2758 if (ret) { 2759 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2760 ret); 2761 return ret; 2762 } 2763 2764 ret = ath10k_pci_warm_reset(ar); 2765 if (ret) { 2766 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2767 return ret; 2768 } 2769 2770 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2771 2772 return 0; 2773 } 2774 2775 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2776 { 2777 int ret; 2778 2779 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2780 2781 ret = ath10k_pci_cold_reset(ar); 2782 if (ret) { 2783 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2784 return ret; 2785 } 2786 2787 ret = ath10k_pci_wait_for_target_init(ar); 2788 if (ret) { 2789 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2790 ret); 2791 return ret; 2792 } 2793 2794 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2795 2796 return 0; 2797 } 2798 2799 static int ath10k_pci_chip_reset(struct ath10k *ar) 2800 { 2801 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2802 2803 if (WARN_ON(!ar_pci->pci_hard_reset)) 2804 return -ENOTSUPP; 2805 2806 return ar_pci->pci_hard_reset(ar); 2807 } 2808 2809 static int ath10k_pci_hif_power_up(struct ath10k *ar) 2810 { 2811 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2812 int ret; 2813 2814 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2815 2816 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2817 &ar_pci->link_ctl); 2818 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2819 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 2820 2821 /* 2822 * Bring the target up cleanly. 2823 * 2824 * The target may be in an undefined state with an AUX-powered Target 2825 * and a Host in WoW mode. If the Host crashes, loses power, or is 2826 * restarted (without unloading the driver) then the Target is left 2827 * (aux) powered and running. On a subsequent driver load, the Target 2828 * is in an unexpected state. We try to catch that here in order to 2829 * reset the Target and retry the probe. 2830 */ 2831 ret = ath10k_pci_chip_reset(ar); 2832 if (ret) { 2833 if (ath10k_pci_has_fw_crashed(ar)) { 2834 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2835 ath10k_pci_fw_crashed_clear(ar); 2836 ath10k_pci_fw_crashed_dump(ar); 2837 } 2838 2839 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2840 goto err_sleep; 2841 } 2842 2843 ret = ath10k_pci_init_pipes(ar); 2844 if (ret) { 2845 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2846 goto err_sleep; 2847 } 2848 2849 ret = ath10k_pci_init_config(ar); 2850 if (ret) { 2851 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2852 goto err_ce; 2853 } 2854 2855 ret = ath10k_pci_wake_target_cpu(ar); 2856 if (ret) { 2857 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2858 goto err_ce; 2859 } 2860 2861 return 0; 2862 2863 err_ce: 2864 ath10k_pci_ce_deinit(ar); 2865 2866 err_sleep: 2867 return ret; 2868 } 2869 2870 void ath10k_pci_hif_power_down(struct ath10k *ar) 2871 { 2872 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2873 2874 /* Currently hif_power_up performs effectively a reset and hif_stop 2875 * resets the chip as well so there's no point in resetting here. 2876 */ 2877 } 2878 2879 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2880 { 2881 /* Nothing to do; the important stuff is in the driver suspend. */ 2882 return 0; 2883 } 2884 2885 static int ath10k_pci_suspend(struct ath10k *ar) 2886 { 2887 /* The grace timer can still be counting down and ar->ps_awake be true. 2888 * It is known that the device may be asleep after resuming regardless 2889 * of the SoC powersave state before suspending. Hence make sure the 2890 * device is asleep before proceeding. 2891 */ 2892 ath10k_pci_sleep_sync(ar); 2893 2894 return 0; 2895 } 2896 2897 static int ath10k_pci_hif_resume(struct ath10k *ar) 2898 { 2899 /* Nothing to do; the important stuff is in the driver resume. */ 2900 return 0; 2901 } 2902 2903 static int ath10k_pci_resume(struct ath10k *ar) 2904 { 2905 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2906 struct pci_dev *pdev = ar_pci->pdev; 2907 u32 val; 2908 int ret = 0; 2909 2910 ret = ath10k_pci_force_wake(ar); 2911 if (ret) { 2912 ath10k_err(ar, "failed to wake up target: %d\n", ret); 2913 return ret; 2914 } 2915 2916 /* Suspend/Resume resets the PCI configuration space, so we have to 2917 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2918 * from interfering with C3 CPU state. pci_restore_state won't help 2919 * here since it only restores the first 64 bytes pci config header. 2920 */ 2921 pci_read_config_dword(pdev, 0x40, &val); 2922 if ((val & 0x0000ff00) != 0) 2923 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2924 2925 return ret; 2926 } 2927 2928 static bool ath10k_pci_validate_cal(void *data, size_t size) 2929 { 2930 __le16 *cal_words = data; 2931 u16 checksum = 0; 2932 size_t i; 2933 2934 if (size % 2 != 0) 2935 return false; 2936 2937 for (i = 0; i < size / 2; i++) 2938 checksum ^= le16_to_cpu(cal_words[i]); 2939 2940 return checksum == 0xffff; 2941 } 2942 2943 static void ath10k_pci_enable_eeprom(struct ath10k *ar) 2944 { 2945 /* Enable SI clock */ 2946 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); 2947 2948 /* Configure GPIOs for I2C operation */ 2949 ath10k_pci_write32(ar, 2950 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2951 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, 2952 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, 2953 GPIO_PIN0_CONFIG) | 2954 SM(1, GPIO_PIN0_PAD_PULL)); 2955 2956 ath10k_pci_write32(ar, 2957 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2958 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, 2959 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | 2960 SM(1, GPIO_PIN0_PAD_PULL)); 2961 2962 ath10k_pci_write32(ar, 2963 GPIO_BASE_ADDRESS + 2964 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, 2965 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); 2966 2967 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ 2968 ath10k_pci_write32(ar, 2969 SI_BASE_ADDRESS + SI_CONFIG_OFFSET, 2970 SM(1, SI_CONFIG_ERR_INT) | 2971 SM(1, SI_CONFIG_BIDIR_OD_DATA) | 2972 SM(1, SI_CONFIG_I2C) | 2973 SM(1, SI_CONFIG_POS_SAMPLE) | 2974 SM(1, SI_CONFIG_INACTIVE_DATA) | 2975 SM(1, SI_CONFIG_INACTIVE_CLK) | 2976 SM(8, SI_CONFIG_DIVIDER)); 2977 } 2978 2979 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) 2980 { 2981 u32 reg; 2982 int wait_limit; 2983 2984 /* set device select byte and for the read operation */ 2985 reg = QCA9887_EEPROM_SELECT_READ | 2986 SM(addr, QCA9887_EEPROM_ADDR_LO) | 2987 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); 2988 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); 2989 2990 /* write transmit data, transfer length, and START bit */ 2991 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, 2992 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | 2993 SM(4, SI_CS_TX_CNT)); 2994 2995 /* wait max 1 sec */ 2996 wait_limit = 100000; 2997 2998 /* wait for SI_CS_DONE_INT */ 2999 do { 3000 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); 3001 if (MS(reg, SI_CS_DONE_INT)) 3002 break; 3003 3004 wait_limit--; 3005 udelay(10); 3006 } while (wait_limit > 0); 3007 3008 if (!MS(reg, SI_CS_DONE_INT)) { 3009 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", 3010 addr); 3011 return -ETIMEDOUT; 3012 } 3013 3014 /* clear SI_CS_DONE_INT */ 3015 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); 3016 3017 if (MS(reg, SI_CS_DONE_ERR)) { 3018 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); 3019 return -EIO; 3020 } 3021 3022 /* extract receive data */ 3023 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); 3024 *out = reg; 3025 3026 return 0; 3027 } 3028 3029 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, 3030 size_t *data_len) 3031 { 3032 u8 *caldata = NULL; 3033 size_t calsize, i; 3034 int ret; 3035 3036 if (!QCA_REV_9887(ar)) 3037 return -EOPNOTSUPP; 3038 3039 calsize = ar->hw_params.cal_data_len; 3040 caldata = kmalloc(calsize, GFP_KERNEL); 3041 if (!caldata) 3042 return -ENOMEM; 3043 3044 ath10k_pci_enable_eeprom(ar); 3045 3046 for (i = 0; i < calsize; i++) { 3047 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); 3048 if (ret) 3049 goto err_free; 3050 } 3051 3052 if (!ath10k_pci_validate_cal(caldata, calsize)) 3053 goto err_free; 3054 3055 *data = caldata; 3056 *data_len = calsize; 3057 3058 return 0; 3059 3060 err_free: 3061 kfree(caldata); 3062 3063 return -EINVAL; 3064 } 3065 3066 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 3067 .tx_sg = ath10k_pci_hif_tx_sg, 3068 .diag_read = ath10k_pci_hif_diag_read, 3069 .diag_write = ath10k_pci_diag_write_mem, 3070 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 3071 .start = ath10k_pci_hif_start, 3072 .stop = ath10k_pci_hif_stop, 3073 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 3074 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 3075 .send_complete_check = ath10k_pci_hif_send_complete_check, 3076 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 3077 .power_up = ath10k_pci_hif_power_up, 3078 .power_down = ath10k_pci_hif_power_down, 3079 .read32 = ath10k_pci_read32, 3080 .write32 = ath10k_pci_write32, 3081 .suspend = ath10k_pci_hif_suspend, 3082 .resume = ath10k_pci_hif_resume, 3083 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, 3084 }; 3085 3086 /* 3087 * Top-level interrupt handler for all PCI interrupts from a Target. 3088 * When a block of MSI interrupts is allocated, this top-level handler 3089 * is not used; instead, we directly call the correct sub-handler. 3090 */ 3091 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 3092 { 3093 struct ath10k *ar = arg; 3094 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3095 int ret; 3096 3097 if (ath10k_pci_has_device_gone(ar)) 3098 return IRQ_NONE; 3099 3100 ret = ath10k_pci_force_wake(ar); 3101 if (ret) { 3102 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); 3103 return IRQ_NONE; 3104 } 3105 3106 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) && 3107 !ath10k_pci_irq_pending(ar)) 3108 return IRQ_NONE; 3109 3110 ath10k_pci_disable_and_clear_legacy_irq(ar); 3111 ath10k_pci_irq_msi_fw_mask(ar); 3112 napi_schedule(&ar->napi); 3113 3114 return IRQ_HANDLED; 3115 } 3116 3117 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) 3118 { 3119 struct ath10k *ar = container_of(ctx, struct ath10k, napi); 3120 int done = 0; 3121 3122 if (ath10k_pci_has_fw_crashed(ar)) { 3123 ath10k_pci_fw_crashed_clear(ar); 3124 ath10k_pci_fw_crashed_dump(ar); 3125 napi_complete(ctx); 3126 return done; 3127 } 3128 3129 ath10k_ce_per_engine_service_any(ar); 3130 3131 done = ath10k_htt_txrx_compl_task(ar, budget); 3132 3133 if (done < budget) { 3134 napi_complete_done(ctx, done); 3135 /* In case of MSI, it is possible that interrupts are received 3136 * while NAPI poll is inprogress. So pending interrupts that are 3137 * received after processing all copy engine pipes by NAPI poll 3138 * will not be handled again. This is causing failure to 3139 * complete boot sequence in x86 platform. So before enabling 3140 * interrupts safer to check for pending interrupts for 3141 * immediate servicing. 3142 */ 3143 if (ath10k_ce_interrupt_summary(ar)) { 3144 napi_reschedule(ctx); 3145 goto out; 3146 } 3147 ath10k_pci_enable_legacy_irq(ar); 3148 ath10k_pci_irq_msi_fw_unmask(ar); 3149 } 3150 3151 out: 3152 return done; 3153 } 3154 3155 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 3156 { 3157 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3158 int ret; 3159 3160 ret = request_irq(ar_pci->pdev->irq, 3161 ath10k_pci_interrupt_handler, 3162 IRQF_SHARED, "ath10k_pci", ar); 3163 if (ret) { 3164 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 3165 ar_pci->pdev->irq, ret); 3166 return ret; 3167 } 3168 3169 return 0; 3170 } 3171 3172 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) 3173 { 3174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3175 int ret; 3176 3177 ret = request_irq(ar_pci->pdev->irq, 3178 ath10k_pci_interrupt_handler, 3179 IRQF_SHARED, "ath10k_pci", ar); 3180 if (ret) { 3181 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 3182 ar_pci->pdev->irq, ret); 3183 return ret; 3184 } 3185 3186 return 0; 3187 } 3188 3189 static int ath10k_pci_request_irq(struct ath10k *ar) 3190 { 3191 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3192 3193 switch (ar_pci->oper_irq_mode) { 3194 case ATH10K_PCI_IRQ_LEGACY: 3195 return ath10k_pci_request_irq_legacy(ar); 3196 case ATH10K_PCI_IRQ_MSI: 3197 return ath10k_pci_request_irq_msi(ar); 3198 default: 3199 return -EINVAL; 3200 } 3201 } 3202 3203 static void ath10k_pci_free_irq(struct ath10k *ar) 3204 { 3205 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3206 3207 free_irq(ar_pci->pdev->irq, ar); 3208 } 3209 3210 void ath10k_pci_init_napi(struct ath10k *ar) 3211 { 3212 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll, 3213 ATH10K_NAPI_BUDGET); 3214 } 3215 3216 static int ath10k_pci_init_irq(struct ath10k *ar) 3217 { 3218 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3219 int ret; 3220 3221 ath10k_pci_init_napi(ar); 3222 3223 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 3224 ath10k_info(ar, "limiting irq mode to: %d\n", 3225 ath10k_pci_irq_mode); 3226 3227 /* Try MSI */ 3228 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { 3229 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; 3230 ret = pci_enable_msi(ar_pci->pdev); 3231 if (ret == 0) 3232 return 0; 3233 3234 /* fall-through */ 3235 } 3236 3237 /* Try legacy irq 3238 * 3239 * A potential race occurs here: The CORE_BASE write 3240 * depends on target correctly decoding AXI address but 3241 * host won't know when target writes BAR to CORE_CTRL. 3242 * This write might get lost if target has NOT written BAR. 3243 * For now, fix the race by repeating the write in below 3244 * synchronization checking. 3245 */ 3246 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; 3247 3248 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 3249 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 3250 3251 return 0; 3252 } 3253 3254 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) 3255 { 3256 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 3257 0); 3258 } 3259 3260 static int ath10k_pci_deinit_irq(struct ath10k *ar) 3261 { 3262 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3263 3264 switch (ar_pci->oper_irq_mode) { 3265 case ATH10K_PCI_IRQ_LEGACY: 3266 ath10k_pci_deinit_irq_legacy(ar); 3267 break; 3268 default: 3269 pci_disable_msi(ar_pci->pdev); 3270 break; 3271 } 3272 3273 return 0; 3274 } 3275 3276 int ath10k_pci_wait_for_target_init(struct ath10k *ar) 3277 { 3278 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3279 unsigned long timeout; 3280 u32 val; 3281 3282 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 3283 3284 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 3285 3286 do { 3287 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 3288 3289 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 3290 val); 3291 3292 /* target should never return this */ 3293 if (val == 0xffffffff) 3294 continue; 3295 3296 /* the device has crashed so don't bother trying anymore */ 3297 if (val & FW_IND_EVENT_PENDING) 3298 break; 3299 3300 if (val & FW_IND_INITIALIZED) 3301 break; 3302 3303 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) 3304 /* Fix potential race by repeating CORE_BASE writes */ 3305 ath10k_pci_enable_legacy_irq(ar); 3306 3307 mdelay(10); 3308 } while (time_before(jiffies, timeout)); 3309 3310 ath10k_pci_disable_and_clear_legacy_irq(ar); 3311 ath10k_pci_irq_msi_fw_mask(ar); 3312 3313 if (val == 0xffffffff) { 3314 ath10k_err(ar, "failed to read device register, device is gone\n"); 3315 return -EIO; 3316 } 3317 3318 if (val & FW_IND_EVENT_PENDING) { 3319 ath10k_warn(ar, "device has crashed during init\n"); 3320 return -ECOMM; 3321 } 3322 3323 if (!(val & FW_IND_INITIALIZED)) { 3324 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 3325 val); 3326 return -ETIMEDOUT; 3327 } 3328 3329 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 3330 return 0; 3331 } 3332 3333 static int ath10k_pci_cold_reset(struct ath10k *ar) 3334 { 3335 u32 val; 3336 3337 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 3338 3339 spin_lock_bh(&ar->data_lock); 3340 3341 ar->stats.fw_cold_reset_counter++; 3342 3343 spin_unlock_bh(&ar->data_lock); 3344 3345 /* Put Target, including PCIe, into RESET. */ 3346 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 3347 val |= 1; 3348 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3349 3350 /* After writing into SOC_GLOBAL_RESET to put device into 3351 * reset and pulling out of reset pcie may not be stable 3352 * for any immediate pcie register access and cause bus error, 3353 * add delay before any pcie access request to fix this issue. 3354 */ 3355 msleep(20); 3356 3357 /* Pull Target, including PCIe, out of RESET. */ 3358 val &= ~1; 3359 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3360 3361 msleep(20); 3362 3363 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 3364 3365 return 0; 3366 } 3367 3368 static int ath10k_pci_claim(struct ath10k *ar) 3369 { 3370 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3371 struct pci_dev *pdev = ar_pci->pdev; 3372 int ret; 3373 3374 pci_set_drvdata(pdev, ar); 3375 3376 ret = pci_enable_device(pdev); 3377 if (ret) { 3378 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 3379 return ret; 3380 } 3381 3382 ret = pci_request_region(pdev, BAR_NUM, "ath"); 3383 if (ret) { 3384 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 3385 ret); 3386 goto err_device; 3387 } 3388 3389 /* Target expects 32 bit DMA. Enforce it. */ 3390 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3391 if (ret) { 3392 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 3393 goto err_region; 3394 } 3395 3396 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3397 if (ret) { 3398 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", 3399 ret); 3400 goto err_region; 3401 } 3402 3403 pci_set_master(pdev); 3404 3405 /* Arrange for access to Target SoC registers. */ 3406 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 3407 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 3408 if (!ar_pci->mem) { 3409 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 3410 ret = -EIO; 3411 goto err_master; 3412 } 3413 3414 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem); 3415 return 0; 3416 3417 err_master: 3418 pci_clear_master(pdev); 3419 3420 err_region: 3421 pci_release_region(pdev, BAR_NUM); 3422 3423 err_device: 3424 pci_disable_device(pdev); 3425 3426 return ret; 3427 } 3428 3429 static void ath10k_pci_release(struct ath10k *ar) 3430 { 3431 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3432 struct pci_dev *pdev = ar_pci->pdev; 3433 3434 pci_iounmap(pdev, ar_pci->mem); 3435 pci_release_region(pdev, BAR_NUM); 3436 pci_clear_master(pdev); 3437 pci_disable_device(pdev); 3438 } 3439 3440 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 3441 { 3442 const struct ath10k_pci_supp_chip *supp_chip; 3443 int i; 3444 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 3445 3446 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 3447 supp_chip = &ath10k_pci_supp_chips[i]; 3448 3449 if (supp_chip->dev_id == dev_id && 3450 supp_chip->rev_id == rev_id) 3451 return true; 3452 } 3453 3454 return false; 3455 } 3456 3457 int ath10k_pci_setup_resource(struct ath10k *ar) 3458 { 3459 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3460 struct ath10k_ce *ce = ath10k_ce_priv(ar); 3461 int ret; 3462 3463 spin_lock_init(&ce->ce_lock); 3464 spin_lock_init(&ar_pci->ps_lock); 3465 3466 timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0); 3467 3468 if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) 3469 ath10k_pci_override_ce_config(ar); 3470 3471 ret = ath10k_pci_alloc_pipes(ar); 3472 if (ret) { 3473 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3474 ret); 3475 return ret; 3476 } 3477 3478 return 0; 3479 } 3480 3481 void ath10k_pci_release_resource(struct ath10k *ar) 3482 { 3483 ath10k_pci_rx_retry_sync(ar); 3484 netif_napi_del(&ar->napi); 3485 ath10k_pci_ce_deinit(ar); 3486 ath10k_pci_free_pipes(ar); 3487 } 3488 3489 static const struct ath10k_bus_ops ath10k_pci_bus_ops = { 3490 .read32 = ath10k_bus_pci_read32, 3491 .write32 = ath10k_bus_pci_write32, 3492 .get_num_banks = ath10k_pci_get_num_banks, 3493 }; 3494 3495 static int ath10k_pci_probe(struct pci_dev *pdev, 3496 const struct pci_device_id *pci_dev) 3497 { 3498 int ret = 0; 3499 struct ath10k *ar; 3500 struct ath10k_pci *ar_pci; 3501 enum ath10k_hw_rev hw_rev; 3502 struct ath10k_bus_params bus_params; 3503 bool pci_ps; 3504 int (*pci_soft_reset)(struct ath10k *ar); 3505 int (*pci_hard_reset)(struct ath10k *ar); 3506 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); 3507 3508 switch (pci_dev->device) { 3509 case QCA988X_2_0_DEVICE_ID_UBNT: 3510 case QCA988X_2_0_DEVICE_ID: 3511 hw_rev = ATH10K_HW_QCA988X; 3512 pci_ps = false; 3513 pci_soft_reset = ath10k_pci_warm_reset; 3514 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3515 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3516 break; 3517 case QCA9887_1_0_DEVICE_ID: 3518 hw_rev = ATH10K_HW_QCA9887; 3519 pci_ps = false; 3520 pci_soft_reset = ath10k_pci_warm_reset; 3521 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3522 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3523 break; 3524 case QCA6164_2_1_DEVICE_ID: 3525 case QCA6174_2_1_DEVICE_ID: 3526 hw_rev = ATH10K_HW_QCA6174; 3527 pci_ps = true; 3528 pci_soft_reset = ath10k_pci_warm_reset; 3529 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3530 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; 3531 break; 3532 case QCA99X0_2_0_DEVICE_ID: 3533 hw_rev = ATH10K_HW_QCA99X0; 3534 pci_ps = false; 3535 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3536 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3537 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3538 break; 3539 case QCA9984_1_0_DEVICE_ID: 3540 hw_rev = ATH10K_HW_QCA9984; 3541 pci_ps = false; 3542 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3543 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3544 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3545 break; 3546 case QCA9888_2_0_DEVICE_ID: 3547 hw_rev = ATH10K_HW_QCA9888; 3548 pci_ps = false; 3549 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3550 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3551 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3552 break; 3553 case QCA9377_1_0_DEVICE_ID: 3554 hw_rev = ATH10K_HW_QCA9377; 3555 pci_ps = true; 3556 pci_soft_reset = NULL; 3557 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3558 targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr; 3559 break; 3560 default: 3561 WARN_ON(1); 3562 return -ENOTSUPP; 3563 } 3564 3565 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 3566 hw_rev, &ath10k_pci_hif_ops); 3567 if (!ar) { 3568 dev_err(&pdev->dev, "failed to allocate core\n"); 3569 return -ENOMEM; 3570 } 3571 3572 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 3573 pdev->vendor, pdev->device, 3574 pdev->subsystem_vendor, pdev->subsystem_device); 3575 3576 ar_pci = ath10k_pci_priv(ar); 3577 ar_pci->pdev = pdev; 3578 ar_pci->dev = &pdev->dev; 3579 ar_pci->ar = ar; 3580 ar->dev_id = pci_dev->device; 3581 ar_pci->pci_ps = pci_ps; 3582 ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; 3583 ar_pci->pci_soft_reset = pci_soft_reset; 3584 ar_pci->pci_hard_reset = pci_hard_reset; 3585 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; 3586 ar->ce_priv = &ar_pci->ce; 3587 3588 ar->id.vendor = pdev->vendor; 3589 ar->id.device = pdev->device; 3590 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3591 ar->id.subsystem_device = pdev->subsystem_device; 3592 3593 timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0); 3594 3595 ret = ath10k_pci_setup_resource(ar); 3596 if (ret) { 3597 ath10k_err(ar, "failed to setup resource: %d\n", ret); 3598 goto err_core_destroy; 3599 } 3600 3601 ret = ath10k_pci_claim(ar); 3602 if (ret) { 3603 ath10k_err(ar, "failed to claim device: %d\n", ret); 3604 goto err_free_pipes; 3605 } 3606 3607 ret = ath10k_pci_force_wake(ar); 3608 if (ret) { 3609 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3610 goto err_sleep; 3611 } 3612 3613 ath10k_pci_ce_deinit(ar); 3614 ath10k_pci_irq_disable(ar); 3615 3616 ret = ath10k_pci_init_irq(ar); 3617 if (ret) { 3618 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3619 goto err_sleep; 3620 } 3621 3622 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", 3623 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, 3624 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 3625 3626 ret = ath10k_pci_request_irq(ar); 3627 if (ret) { 3628 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 3629 goto err_deinit_irq; 3630 } 3631 3632 ret = ath10k_pci_chip_reset(ar); 3633 if (ret) { 3634 ath10k_err(ar, "failed to reset chip: %d\n", ret); 3635 goto err_free_irq; 3636 } 3637 3638 bus_params.dev_type = ATH10K_DEV_TYPE_LL; 3639 bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3640 if (bus_params.chip_id == 0xffffffff) { 3641 ath10k_err(ar, "failed to get chip id\n"); 3642 goto err_free_irq; 3643 } 3644 3645 if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) { 3646 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3647 pdev->device, bus_params.chip_id); 3648 goto err_free_irq; 3649 } 3650 3651 ret = ath10k_core_register(ar, &bus_params); 3652 if (ret) { 3653 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3654 goto err_free_irq; 3655 } 3656 3657 return 0; 3658 3659 err_free_irq: 3660 ath10k_pci_free_irq(ar); 3661 ath10k_pci_rx_retry_sync(ar); 3662 3663 err_deinit_irq: 3664 ath10k_pci_deinit_irq(ar); 3665 3666 err_sleep: 3667 ath10k_pci_sleep_sync(ar); 3668 ath10k_pci_release(ar); 3669 3670 err_free_pipes: 3671 ath10k_pci_free_pipes(ar); 3672 3673 err_core_destroy: 3674 ath10k_core_destroy(ar); 3675 3676 return ret; 3677 } 3678 3679 static void ath10k_pci_remove(struct pci_dev *pdev) 3680 { 3681 struct ath10k *ar = pci_get_drvdata(pdev); 3682 struct ath10k_pci *ar_pci; 3683 3684 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3685 3686 if (!ar) 3687 return; 3688 3689 ar_pci = ath10k_pci_priv(ar); 3690 3691 if (!ar_pci) 3692 return; 3693 3694 ath10k_core_unregister(ar); 3695 ath10k_pci_free_irq(ar); 3696 ath10k_pci_deinit_irq(ar); 3697 ath10k_pci_release_resource(ar); 3698 ath10k_pci_sleep_sync(ar); 3699 ath10k_pci_release(ar); 3700 ath10k_core_destroy(ar); 3701 } 3702 3703 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3704 3705 static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) 3706 { 3707 struct ath10k *ar = dev_get_drvdata(dev); 3708 int ret; 3709 3710 ret = ath10k_pci_suspend(ar); 3711 if (ret) 3712 ath10k_warn(ar, "failed to suspend hif: %d\n", ret); 3713 3714 return ret; 3715 } 3716 3717 static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) 3718 { 3719 struct ath10k *ar = dev_get_drvdata(dev); 3720 int ret; 3721 3722 ret = ath10k_pci_resume(ar); 3723 if (ret) 3724 ath10k_warn(ar, "failed to resume hif: %d\n", ret); 3725 3726 return ret; 3727 } 3728 3729 static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, 3730 ath10k_pci_pm_suspend, 3731 ath10k_pci_pm_resume); 3732 3733 static struct pci_driver ath10k_pci_driver = { 3734 .name = "ath10k_pci", 3735 .id_table = ath10k_pci_id_table, 3736 .probe = ath10k_pci_probe, 3737 .remove = ath10k_pci_remove, 3738 #ifdef CONFIG_PM 3739 .driver.pm = &ath10k_pci_pm_ops, 3740 #endif 3741 }; 3742 3743 static int __init ath10k_pci_init(void) 3744 { 3745 int ret; 3746 3747 ret = pci_register_driver(&ath10k_pci_driver); 3748 if (ret) 3749 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3750 ret); 3751 3752 ret = ath10k_ahb_init(); 3753 if (ret) 3754 printk(KERN_ERR "ahb init failed: %d\n", ret); 3755 3756 return ret; 3757 } 3758 module_init(ath10k_pci_init); 3759 3760 static void __exit ath10k_pci_exit(void) 3761 { 3762 pci_unregister_driver(&ath10k_pci_driver); 3763 ath10k_ahb_exit(); 3764 } 3765 3766 module_exit(ath10k_pci_exit); 3767 3768 MODULE_AUTHOR("Qualcomm Atheros"); 3769 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); 3770 MODULE_LICENSE("Dual BSD/GPL"); 3771 3772 /* QCA988x 2.0 firmware files */ 3773 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3774 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3775 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3776 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3777 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3778 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3779 3780 /* QCA9887 1.0 firmware files */ 3781 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3782 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); 3783 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3784 3785 /* QCA6174 2.1 firmware files */ 3786 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3787 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3788 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); 3789 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3790 3791 /* QCA6174 3.1 firmware files */ 3792 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3793 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3794 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE); 3795 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); 3796 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3797 3798 /* QCA9377 1.0 firmware files */ 3799 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE); 3800 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3801 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); 3802