1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/pci.h> 19 #include <linux/module.h> 20 #include <linux/interrupt.h> 21 #include <linux/spinlock.h> 22 #include <linux/bitops.h> 23 24 #include "core.h" 25 #include "debug.h" 26 27 #include "targaddrs.h" 28 #include "bmi.h" 29 30 #include "hif.h" 31 #include "htc.h" 32 33 #include "ce.h" 34 #include "pci.h" 35 36 enum ath10k_pci_reset_mode { 37 ATH10K_PCI_RESET_AUTO = 0, 38 ATH10K_PCI_RESET_WARM_ONLY = 1, 39 }; 40 41 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO; 42 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO; 43 44 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644); 45 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)"); 46 47 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644); 48 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); 49 50 /* how long wait to wait for target to initialise, in ms */ 51 #define ATH10K_PCI_TARGET_WAIT 3000 52 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 53 54 static const struct pci_device_id ath10k_pci_id_table[] = { 55 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ 56 { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ 57 { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ 58 { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ 59 { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ 60 { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ 61 { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ 62 { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ 63 {0} 64 }; 65 66 static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { 67 /* QCA988X pre 2.0 chips are not supported because they need some nasty 68 * hacks. ath10k doesn't have them and these devices crash horribly 69 * because of that. 70 */ 71 { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, 72 73 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 74 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 75 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 76 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 77 { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 78 79 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV }, 80 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV }, 81 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV }, 82 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV }, 83 { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, 84 85 { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, 86 87 { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, 88 89 { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, 90 91 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, 92 { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, 93 94 { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, 95 }; 96 97 static void ath10k_pci_buffer_cleanup(struct ath10k *ar); 98 static int ath10k_pci_cold_reset(struct ath10k *ar); 99 static int ath10k_pci_safe_chip_reset(struct ath10k *ar); 100 static int ath10k_pci_init_irq(struct ath10k *ar); 101 static int ath10k_pci_deinit_irq(struct ath10k *ar); 102 static int ath10k_pci_request_irq(struct ath10k *ar); 103 static void ath10k_pci_free_irq(struct ath10k *ar); 104 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 105 struct ath10k_ce_pipe *rx_pipe, 106 struct bmi_xfer *xfer); 107 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar); 108 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); 109 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 110 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); 111 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); 112 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); 113 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state); 114 115 static struct ce_attr host_ce_config_wlan[] = { 116 /* CE0: host->target HTC control and raw streams */ 117 { 118 .flags = CE_ATTR_FLAGS, 119 .src_nentries = 16, 120 .src_sz_max = 256, 121 .dest_nentries = 0, 122 .send_cb = ath10k_pci_htc_tx_cb, 123 }, 124 125 /* CE1: target->host HTT + HTC control */ 126 { 127 .flags = CE_ATTR_FLAGS, 128 .src_nentries = 0, 129 .src_sz_max = 2048, 130 .dest_nentries = 512, 131 .recv_cb = ath10k_pci_htt_htc_rx_cb, 132 }, 133 134 /* CE2: target->host WMI */ 135 { 136 .flags = CE_ATTR_FLAGS, 137 .src_nentries = 0, 138 .src_sz_max = 2048, 139 .dest_nentries = 128, 140 .recv_cb = ath10k_pci_htc_rx_cb, 141 }, 142 143 /* CE3: host->target WMI */ 144 { 145 .flags = CE_ATTR_FLAGS, 146 .src_nentries = 32, 147 .src_sz_max = 2048, 148 .dest_nentries = 0, 149 .send_cb = ath10k_pci_htc_tx_cb, 150 }, 151 152 /* CE4: host->target HTT */ 153 { 154 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 155 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES, 156 .src_sz_max = 256, 157 .dest_nentries = 0, 158 .send_cb = ath10k_pci_htt_tx_cb, 159 }, 160 161 /* CE5: target->host HTT (HIF->HTT) */ 162 { 163 .flags = CE_ATTR_FLAGS, 164 .src_nentries = 0, 165 .src_sz_max = 512, 166 .dest_nentries = 512, 167 .recv_cb = ath10k_pci_htt_rx_cb, 168 }, 169 170 /* CE6: target autonomous hif_memcpy */ 171 { 172 .flags = CE_ATTR_FLAGS, 173 .src_nentries = 0, 174 .src_sz_max = 0, 175 .dest_nentries = 0, 176 }, 177 178 /* CE7: ce_diag, the Diagnostic Window */ 179 { 180 .flags = CE_ATTR_FLAGS, 181 .src_nentries = 2, 182 .src_sz_max = DIAG_TRANSFER_LIMIT, 183 .dest_nentries = 2, 184 }, 185 186 /* CE8: target->host pktlog */ 187 { 188 .flags = CE_ATTR_FLAGS, 189 .src_nentries = 0, 190 .src_sz_max = 2048, 191 .dest_nentries = 128, 192 .recv_cb = ath10k_pci_pktlog_rx_cb, 193 }, 194 195 /* CE9 target autonomous qcache memcpy */ 196 { 197 .flags = CE_ATTR_FLAGS, 198 .src_nentries = 0, 199 .src_sz_max = 0, 200 .dest_nentries = 0, 201 }, 202 203 /* CE10: target autonomous hif memcpy */ 204 { 205 .flags = CE_ATTR_FLAGS, 206 .src_nentries = 0, 207 .src_sz_max = 0, 208 .dest_nentries = 0, 209 }, 210 211 /* CE11: target autonomous hif memcpy */ 212 { 213 .flags = CE_ATTR_FLAGS, 214 .src_nentries = 0, 215 .src_sz_max = 0, 216 .dest_nentries = 0, 217 }, 218 }; 219 220 /* Target firmware's Copy Engine configuration. */ 221 static struct ce_pipe_config target_ce_config_wlan[] = { 222 /* CE0: host->target HTC control and raw streams */ 223 { 224 .pipenum = __cpu_to_le32(0), 225 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 226 .nentries = __cpu_to_le32(32), 227 .nbytes_max = __cpu_to_le32(256), 228 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 229 .reserved = __cpu_to_le32(0), 230 }, 231 232 /* CE1: target->host HTT + HTC control */ 233 { 234 .pipenum = __cpu_to_le32(1), 235 .pipedir = __cpu_to_le32(PIPEDIR_IN), 236 .nentries = __cpu_to_le32(32), 237 .nbytes_max = __cpu_to_le32(2048), 238 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 239 .reserved = __cpu_to_le32(0), 240 }, 241 242 /* CE2: target->host WMI */ 243 { 244 .pipenum = __cpu_to_le32(2), 245 .pipedir = __cpu_to_le32(PIPEDIR_IN), 246 .nentries = __cpu_to_le32(64), 247 .nbytes_max = __cpu_to_le32(2048), 248 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 249 .reserved = __cpu_to_le32(0), 250 }, 251 252 /* CE3: host->target WMI */ 253 { 254 .pipenum = __cpu_to_le32(3), 255 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 256 .nentries = __cpu_to_le32(32), 257 .nbytes_max = __cpu_to_le32(2048), 258 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 259 .reserved = __cpu_to_le32(0), 260 }, 261 262 /* CE4: host->target HTT */ 263 { 264 .pipenum = __cpu_to_le32(4), 265 .pipedir = __cpu_to_le32(PIPEDIR_OUT), 266 .nentries = __cpu_to_le32(256), 267 .nbytes_max = __cpu_to_le32(256), 268 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 269 .reserved = __cpu_to_le32(0), 270 }, 271 272 /* NB: 50% of src nentries, since tx has 2 frags */ 273 274 /* CE5: target->host HTT (HIF->HTT) */ 275 { 276 .pipenum = __cpu_to_le32(5), 277 .pipedir = __cpu_to_le32(PIPEDIR_IN), 278 .nentries = __cpu_to_le32(32), 279 .nbytes_max = __cpu_to_le32(512), 280 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 281 .reserved = __cpu_to_le32(0), 282 }, 283 284 /* CE6: Reserved for target autonomous hif_memcpy */ 285 { 286 .pipenum = __cpu_to_le32(6), 287 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 288 .nentries = __cpu_to_le32(32), 289 .nbytes_max = __cpu_to_le32(4096), 290 .flags = __cpu_to_le32(CE_ATTR_FLAGS), 291 .reserved = __cpu_to_le32(0), 292 }, 293 294 /* CE7 used only by Host */ 295 { 296 .pipenum = __cpu_to_le32(7), 297 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 298 .nentries = __cpu_to_le32(0), 299 .nbytes_max = __cpu_to_le32(0), 300 .flags = __cpu_to_le32(0), 301 .reserved = __cpu_to_le32(0), 302 }, 303 304 /* CE8 target->host packtlog */ 305 { 306 .pipenum = __cpu_to_le32(8), 307 .pipedir = __cpu_to_le32(PIPEDIR_IN), 308 .nentries = __cpu_to_le32(64), 309 .nbytes_max = __cpu_to_le32(2048), 310 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 311 .reserved = __cpu_to_le32(0), 312 }, 313 314 /* CE9 target autonomous qcache memcpy */ 315 { 316 .pipenum = __cpu_to_le32(9), 317 .pipedir = __cpu_to_le32(PIPEDIR_INOUT), 318 .nentries = __cpu_to_le32(32), 319 .nbytes_max = __cpu_to_le32(2048), 320 .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR), 321 .reserved = __cpu_to_le32(0), 322 }, 323 324 /* It not necessary to send target wlan configuration for CE10 & CE11 325 * as these CEs are not actively used in target. 326 */ 327 }; 328 329 /* 330 * Map from service/endpoint to Copy Engine. 331 * This table is derived from the CE_PCI TABLE, above. 332 * It is passed to the Target at startup for use by firmware. 333 */ 334 static struct service_to_pipe target_service_to_ce_map_wlan[] = { 335 { 336 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 337 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 338 __cpu_to_le32(3), 339 }, 340 { 341 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), 342 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 343 __cpu_to_le32(2), 344 }, 345 { 346 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 347 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 348 __cpu_to_le32(3), 349 }, 350 { 351 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK), 352 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 353 __cpu_to_le32(2), 354 }, 355 { 356 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 357 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 358 __cpu_to_le32(3), 359 }, 360 { 361 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE), 362 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 363 __cpu_to_le32(2), 364 }, 365 { 366 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 367 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 368 __cpu_to_le32(3), 369 }, 370 { 371 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI), 372 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 373 __cpu_to_le32(2), 374 }, 375 { 376 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 377 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 378 __cpu_to_le32(3), 379 }, 380 { 381 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL), 382 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 383 __cpu_to_le32(2), 384 }, 385 { 386 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 387 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 388 __cpu_to_le32(0), 389 }, 390 { 391 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL), 392 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 393 __cpu_to_le32(1), 394 }, 395 { /* not used */ 396 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 397 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 398 __cpu_to_le32(0), 399 }, 400 { /* not used */ 401 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS), 402 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 403 __cpu_to_le32(1), 404 }, 405 { 406 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 407 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ 408 __cpu_to_le32(4), 409 }, 410 { 411 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG), 412 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */ 413 __cpu_to_le32(5), 414 }, 415 416 /* (Additions here) */ 417 418 { /* must be last */ 419 __cpu_to_le32(0), 420 __cpu_to_le32(0), 421 __cpu_to_le32(0), 422 }, 423 }; 424 425 static bool ath10k_pci_is_awake(struct ath10k *ar) 426 { 427 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 428 u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 429 RTC_STATE_ADDRESS); 430 431 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON; 432 } 433 434 static void __ath10k_pci_wake(struct ath10k *ar) 435 { 436 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 437 438 lockdep_assert_held(&ar_pci->ps_lock); 439 440 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n", 441 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 442 443 iowrite32(PCIE_SOC_WAKE_V_MASK, 444 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 445 PCIE_SOC_WAKE_ADDRESS); 446 } 447 448 static void __ath10k_pci_sleep(struct ath10k *ar) 449 { 450 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 451 452 lockdep_assert_held(&ar_pci->ps_lock); 453 454 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n", 455 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 456 457 iowrite32(PCIE_SOC_WAKE_RESET, 458 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 459 PCIE_SOC_WAKE_ADDRESS); 460 ar_pci->ps_awake = false; 461 } 462 463 static int ath10k_pci_wake_wait(struct ath10k *ar) 464 { 465 int tot_delay = 0; 466 int curr_delay = 5; 467 468 while (tot_delay < PCIE_WAKE_TIMEOUT) { 469 if (ath10k_pci_is_awake(ar)) { 470 if (tot_delay > PCIE_WAKE_LATE_US) 471 ath10k_warn(ar, "device wakeup took %d ms which is unusally long, otherwise it works normally.\n", 472 tot_delay / 1000); 473 return 0; 474 } 475 476 udelay(curr_delay); 477 tot_delay += curr_delay; 478 479 if (curr_delay < 50) 480 curr_delay += 5; 481 } 482 483 return -ETIMEDOUT; 484 } 485 486 static int ath10k_pci_force_wake(struct ath10k *ar) 487 { 488 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 489 unsigned long flags; 490 int ret = 0; 491 492 if (ar_pci->pci_ps) 493 return ret; 494 495 spin_lock_irqsave(&ar_pci->ps_lock, flags); 496 497 if (!ar_pci->ps_awake) { 498 iowrite32(PCIE_SOC_WAKE_V_MASK, 499 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 500 PCIE_SOC_WAKE_ADDRESS); 501 502 ret = ath10k_pci_wake_wait(ar); 503 if (ret == 0) 504 ar_pci->ps_awake = true; 505 } 506 507 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 508 509 return ret; 510 } 511 512 static void ath10k_pci_force_sleep(struct ath10k *ar) 513 { 514 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 515 unsigned long flags; 516 517 spin_lock_irqsave(&ar_pci->ps_lock, flags); 518 519 iowrite32(PCIE_SOC_WAKE_RESET, 520 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + 521 PCIE_SOC_WAKE_ADDRESS); 522 ar_pci->ps_awake = false; 523 524 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 525 } 526 527 static int ath10k_pci_wake(struct ath10k *ar) 528 { 529 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 530 unsigned long flags; 531 int ret = 0; 532 533 if (ar_pci->pci_ps == 0) 534 return ret; 535 536 spin_lock_irqsave(&ar_pci->ps_lock, flags); 537 538 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n", 539 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 540 541 /* This function can be called very frequently. To avoid excessive 542 * CPU stalls for MMIO reads use a cache var to hold the device state. 543 */ 544 if (!ar_pci->ps_awake) { 545 __ath10k_pci_wake(ar); 546 547 ret = ath10k_pci_wake_wait(ar); 548 if (ret == 0) 549 ar_pci->ps_awake = true; 550 } 551 552 if (ret == 0) { 553 ar_pci->ps_wake_refcount++; 554 WARN_ON(ar_pci->ps_wake_refcount == 0); 555 } 556 557 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 558 559 return ret; 560 } 561 562 static void ath10k_pci_sleep(struct ath10k *ar) 563 { 564 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 565 unsigned long flags; 566 567 if (ar_pci->pci_ps == 0) 568 return; 569 570 spin_lock_irqsave(&ar_pci->ps_lock, flags); 571 572 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n", 573 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 574 575 if (WARN_ON(ar_pci->ps_wake_refcount == 0)) 576 goto skip; 577 578 ar_pci->ps_wake_refcount--; 579 580 mod_timer(&ar_pci->ps_timer, jiffies + 581 msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC)); 582 583 skip: 584 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 585 } 586 587 static void ath10k_pci_ps_timer(unsigned long ptr) 588 { 589 struct ath10k *ar = (void *)ptr; 590 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 591 unsigned long flags; 592 593 spin_lock_irqsave(&ar_pci->ps_lock, flags); 594 595 ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n", 596 ar_pci->ps_wake_refcount, ar_pci->ps_awake); 597 598 if (ar_pci->ps_wake_refcount > 0) 599 goto skip; 600 601 __ath10k_pci_sleep(ar); 602 603 skip: 604 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 605 } 606 607 static void ath10k_pci_sleep_sync(struct ath10k *ar) 608 { 609 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 610 unsigned long flags; 611 612 if (ar_pci->pci_ps == 0) { 613 ath10k_pci_force_sleep(ar); 614 return; 615 } 616 617 del_timer_sync(&ar_pci->ps_timer); 618 619 spin_lock_irqsave(&ar_pci->ps_lock, flags); 620 WARN_ON(ar_pci->ps_wake_refcount > 0); 621 __ath10k_pci_sleep(ar); 622 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 623 } 624 625 static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) 626 { 627 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 628 int ret; 629 630 if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) { 631 ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 632 offset, offset + sizeof(value), ar_pci->mem_len); 633 return; 634 } 635 636 ret = ath10k_pci_wake(ar); 637 if (ret) { 638 ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n", 639 value, offset, ret); 640 return; 641 } 642 643 iowrite32(value, ar_pci->mem + offset); 644 ath10k_pci_sleep(ar); 645 } 646 647 static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) 648 { 649 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 650 u32 val; 651 int ret; 652 653 if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) { 654 ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n", 655 offset, offset + sizeof(val), ar_pci->mem_len); 656 return 0; 657 } 658 659 ret = ath10k_pci_wake(ar); 660 if (ret) { 661 ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n", 662 offset, ret); 663 return 0xffffffff; 664 } 665 666 val = ioread32(ar_pci->mem + offset); 667 ath10k_pci_sleep(ar); 668 669 return val; 670 } 671 672 inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) 673 { 674 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 675 676 ar_pci->bus_ops->write32(ar, offset, value); 677 } 678 679 inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) 680 { 681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 682 683 return ar_pci->bus_ops->read32(ar, offset); 684 } 685 686 u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) 687 { 688 return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); 689 } 690 691 void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val) 692 { 693 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val); 694 } 695 696 u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr) 697 { 698 return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr); 699 } 700 701 void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) 702 { 703 ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); 704 } 705 706 bool ath10k_pci_irq_pending(struct ath10k *ar) 707 { 708 u32 cause; 709 710 /* Check if the shared legacy irq is for us */ 711 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 712 PCIE_INTR_CAUSE_ADDRESS); 713 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL)) 714 return true; 715 716 return false; 717 } 718 719 void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) 720 { 721 /* IMPORTANT: INTR_CLR register has to be set after 722 * INTR_ENABLE is set to 0, otherwise interrupt can not be 723 * really cleared. */ 724 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 725 0); 726 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS, 727 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 728 729 /* IMPORTANT: this extra read transaction is required to 730 * flush the posted write buffer. */ 731 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 732 PCIE_INTR_ENABLE_ADDRESS); 733 } 734 735 void ath10k_pci_enable_legacy_irq(struct ath10k *ar) 736 { 737 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 738 PCIE_INTR_ENABLE_ADDRESS, 739 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 740 741 /* IMPORTANT: this extra read transaction is required to 742 * flush the posted write buffer. */ 743 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 744 PCIE_INTR_ENABLE_ADDRESS); 745 } 746 747 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) 748 { 749 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 750 751 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI) 752 return "msi"; 753 754 return "legacy"; 755 } 756 757 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) 758 { 759 struct ath10k *ar = pipe->hif_ce_state; 760 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 761 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 762 struct sk_buff *skb; 763 dma_addr_t paddr; 764 int ret; 765 766 skb = dev_alloc_skb(pipe->buf_sz); 767 if (!skb) 768 return -ENOMEM; 769 770 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb"); 771 772 paddr = dma_map_single(ar->dev, skb->data, 773 skb->len + skb_tailroom(skb), 774 DMA_FROM_DEVICE); 775 if (unlikely(dma_mapping_error(ar->dev, paddr))) { 776 ath10k_warn(ar, "failed to dma map pci rx buf\n"); 777 dev_kfree_skb_any(skb); 778 return -EIO; 779 } 780 781 ATH10K_SKB_RXCB(skb)->paddr = paddr; 782 783 spin_lock_bh(&ar_pci->ce_lock); 784 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); 785 spin_unlock_bh(&ar_pci->ce_lock); 786 if (ret) { 787 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), 788 DMA_FROM_DEVICE); 789 dev_kfree_skb_any(skb); 790 return ret; 791 } 792 793 return 0; 794 } 795 796 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) 797 { 798 struct ath10k *ar = pipe->hif_ce_state; 799 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 800 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; 801 int ret, num; 802 803 if (pipe->buf_sz == 0) 804 return; 805 806 if (!ce_pipe->dest_ring) 807 return; 808 809 spin_lock_bh(&ar_pci->ce_lock); 810 num = __ath10k_ce_rx_num_free_bufs(ce_pipe); 811 spin_unlock_bh(&ar_pci->ce_lock); 812 813 while (num >= 0) { 814 ret = __ath10k_pci_rx_post_buf(pipe); 815 if (ret) { 816 if (ret == -ENOSPC) 817 break; 818 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret); 819 mod_timer(&ar_pci->rx_post_retry, jiffies + 820 ATH10K_PCI_RX_POST_RETRY_MS); 821 break; 822 } 823 num--; 824 } 825 } 826 827 void ath10k_pci_rx_post(struct ath10k *ar) 828 { 829 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 830 int i; 831 832 for (i = 0; i < CE_COUNT; i++) 833 ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); 834 } 835 836 void ath10k_pci_rx_replenish_retry(unsigned long ptr) 837 { 838 struct ath10k *ar = (void *)ptr; 839 840 ath10k_pci_rx_post(ar); 841 } 842 843 static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 844 { 845 u32 val = 0, region = addr & 0xfffff; 846 847 val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) 848 & 0x7ff) << 21; 849 val |= 0x100000 | region; 850 return val; 851 } 852 853 static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 854 { 855 u32 val = 0, region = addr & 0xfffff; 856 857 val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); 858 val |= 0x100000 | region; 859 return val; 860 } 861 862 static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 863 { 864 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 865 866 if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr)) 867 return -ENOTSUPP; 868 869 return ar_pci->targ_cpu_to_ce_addr(ar, addr); 870 } 871 872 /* 873 * Diagnostic read/write access is provided for startup/config/debug usage. 874 * Caller must guarantee proper alignment, when applicable, and single user 875 * at any moment. 876 */ 877 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, 878 int nbytes) 879 { 880 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 881 int ret = 0; 882 u32 *buf; 883 unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; 884 struct ath10k_ce_pipe *ce_diag; 885 /* Host buffer address in CE space */ 886 u32 ce_data; 887 dma_addr_t ce_data_base = 0; 888 void *data_buf = NULL; 889 int i; 890 891 spin_lock_bh(&ar_pci->ce_lock); 892 893 ce_diag = ar_pci->ce_diag; 894 895 /* 896 * Allocate a temporary bounce buffer to hold caller's data 897 * to be DMA'ed from Target. This guarantees 898 * 1) 4-byte alignment 899 * 2) Buffer in DMA-able space 900 */ 901 alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); 902 903 data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev, 904 alloc_nbytes, 905 &ce_data_base, 906 GFP_ATOMIC); 907 908 if (!data_buf) { 909 ret = -ENOMEM; 910 goto done; 911 } 912 913 remaining_bytes = nbytes; 914 ce_data = ce_data_base; 915 while (remaining_bytes) { 916 nbytes = min_t(unsigned int, remaining_bytes, 917 DIAG_TRANSFER_LIMIT); 918 919 ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data); 920 if (ret != 0) 921 goto done; 922 923 /* Request CE to send from Target(!) address to Host buffer */ 924 /* 925 * The address supplied by the caller is in the 926 * Target CPU virtual address space. 927 * 928 * In order to use this address with the diagnostic CE, 929 * convert it from Target CPU virtual address space 930 * to CE address space 931 */ 932 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 933 934 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0, 935 0); 936 if (ret) 937 goto done; 938 939 i = 0; 940 while (ath10k_ce_completed_send_next_nolock(ce_diag, 941 NULL) != 0) { 942 mdelay(1); 943 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 944 ret = -EBUSY; 945 goto done; 946 } 947 } 948 949 i = 0; 950 while (ath10k_ce_completed_recv_next_nolock(ce_diag, 951 (void **)&buf, 952 &completed_nbytes) 953 != 0) { 954 mdelay(1); 955 956 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 957 ret = -EBUSY; 958 goto done; 959 } 960 } 961 962 if (nbytes != completed_nbytes) { 963 ret = -EIO; 964 goto done; 965 } 966 967 if (*buf != ce_data) { 968 ret = -EIO; 969 goto done; 970 } 971 972 remaining_bytes -= nbytes; 973 974 if (ret) { 975 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", 976 address, ret); 977 break; 978 } 979 memcpy(data, data_buf, nbytes); 980 981 address += nbytes; 982 data += nbytes; 983 } 984 985 done: 986 987 if (data_buf) 988 dma_free_coherent(ar->dev, alloc_nbytes, data_buf, 989 ce_data_base); 990 991 spin_unlock_bh(&ar_pci->ce_lock); 992 993 return ret; 994 } 995 996 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value) 997 { 998 __le32 val = 0; 999 int ret; 1000 1001 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val)); 1002 *value = __le32_to_cpu(val); 1003 1004 return ret; 1005 } 1006 1007 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, 1008 u32 src, u32 len) 1009 { 1010 u32 host_addr, addr; 1011 int ret; 1012 1013 host_addr = host_interest_item_address(src); 1014 1015 ret = ath10k_pci_diag_read32(ar, host_addr, &addr); 1016 if (ret != 0) { 1017 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n", 1018 src, ret); 1019 return ret; 1020 } 1021 1022 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len); 1023 if (ret != 0) { 1024 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n", 1025 addr, len, ret); 1026 return ret; 1027 } 1028 1029 return 0; 1030 } 1031 1032 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ 1033 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) 1034 1035 int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, 1036 const void *data, int nbytes) 1037 { 1038 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1039 int ret = 0; 1040 u32 *buf; 1041 unsigned int completed_nbytes, orig_nbytes, remaining_bytes; 1042 struct ath10k_ce_pipe *ce_diag; 1043 void *data_buf = NULL; 1044 u32 ce_data; /* Host buffer address in CE space */ 1045 dma_addr_t ce_data_base = 0; 1046 int i; 1047 1048 spin_lock_bh(&ar_pci->ce_lock); 1049 1050 ce_diag = ar_pci->ce_diag; 1051 1052 /* 1053 * Allocate a temporary bounce buffer to hold caller's data 1054 * to be DMA'ed to Target. This guarantees 1055 * 1) 4-byte alignment 1056 * 2) Buffer in DMA-able space 1057 */ 1058 orig_nbytes = nbytes; 1059 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, 1060 orig_nbytes, 1061 &ce_data_base, 1062 GFP_ATOMIC); 1063 if (!data_buf) { 1064 ret = -ENOMEM; 1065 goto done; 1066 } 1067 1068 /* Copy caller's data to allocated DMA buf */ 1069 memcpy(data_buf, data, orig_nbytes); 1070 1071 /* 1072 * The address supplied by the caller is in the 1073 * Target CPU virtual address space. 1074 * 1075 * In order to use this address with the diagnostic CE, 1076 * convert it from 1077 * Target CPU virtual address space 1078 * to 1079 * CE address space 1080 */ 1081 address = ath10k_pci_targ_cpu_to_ce_addr(ar, address); 1082 1083 remaining_bytes = orig_nbytes; 1084 ce_data = ce_data_base; 1085 while (remaining_bytes) { 1086 /* FIXME: check cast */ 1087 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT); 1088 1089 /* Set up to receive directly into Target(!) address */ 1090 ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address); 1091 if (ret != 0) 1092 goto done; 1093 1094 /* 1095 * Request CE to send caller-supplied data that 1096 * was copied to bounce buffer to Target(!) address. 1097 */ 1098 ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data, 1099 nbytes, 0, 0); 1100 if (ret != 0) 1101 goto done; 1102 1103 i = 0; 1104 while (ath10k_ce_completed_send_next_nolock(ce_diag, 1105 NULL) != 0) { 1106 mdelay(1); 1107 1108 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1109 ret = -EBUSY; 1110 goto done; 1111 } 1112 } 1113 1114 i = 0; 1115 while (ath10k_ce_completed_recv_next_nolock(ce_diag, 1116 (void **)&buf, 1117 &completed_nbytes) 1118 != 0) { 1119 mdelay(1); 1120 1121 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { 1122 ret = -EBUSY; 1123 goto done; 1124 } 1125 } 1126 1127 if (nbytes != completed_nbytes) { 1128 ret = -EIO; 1129 goto done; 1130 } 1131 1132 if (*buf != address) { 1133 ret = -EIO; 1134 goto done; 1135 } 1136 1137 remaining_bytes -= nbytes; 1138 address += nbytes; 1139 ce_data += nbytes; 1140 } 1141 1142 done: 1143 if (data_buf) { 1144 dma_free_coherent(ar->dev, orig_nbytes, data_buf, 1145 ce_data_base); 1146 } 1147 1148 if (ret != 0) 1149 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", 1150 address, ret); 1151 1152 spin_unlock_bh(&ar_pci->ce_lock); 1153 1154 return ret; 1155 } 1156 1157 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value) 1158 { 1159 __le32 val = __cpu_to_le32(value); 1160 1161 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val)); 1162 } 1163 1164 /* Called by lower (CE) layer when a send to Target completes. */ 1165 static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) 1166 { 1167 struct ath10k *ar = ce_state->ar; 1168 struct sk_buff_head list; 1169 struct sk_buff *skb; 1170 1171 __skb_queue_head_init(&list); 1172 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1173 /* no need to call tx completion for NULL pointers */ 1174 if (skb == NULL) 1175 continue; 1176 1177 __skb_queue_tail(&list, skb); 1178 } 1179 1180 while ((skb = __skb_dequeue(&list))) 1181 ath10k_htc_tx_completion_handler(ar, skb); 1182 } 1183 1184 static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state, 1185 void (*callback)(struct ath10k *ar, 1186 struct sk_buff *skb)) 1187 { 1188 struct ath10k *ar = ce_state->ar; 1189 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1190 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1191 struct sk_buff *skb; 1192 struct sk_buff_head list; 1193 void *transfer_context; 1194 unsigned int nbytes, max_nbytes; 1195 1196 __skb_queue_head_init(&list); 1197 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context, 1198 &nbytes) == 0) { 1199 skb = transfer_context; 1200 max_nbytes = skb->len + skb_tailroom(skb); 1201 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1202 max_nbytes, DMA_FROM_DEVICE); 1203 1204 if (unlikely(max_nbytes < nbytes)) { 1205 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1206 nbytes, max_nbytes); 1207 dev_kfree_skb_any(skb); 1208 continue; 1209 } 1210 1211 skb_put(skb, nbytes); 1212 __skb_queue_tail(&list, skb); 1213 } 1214 1215 while ((skb = __skb_dequeue(&list))) { 1216 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1217 ce_state->id, skb->len); 1218 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1219 skb->data, skb->len); 1220 1221 callback(ar, skb); 1222 } 1223 1224 ath10k_pci_rx_post_pipe(pipe_info); 1225 } 1226 1227 static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state, 1228 void (*callback)(struct ath10k *ar, 1229 struct sk_buff *skb)) 1230 { 1231 struct ath10k *ar = ce_state->ar; 1232 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1233 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id]; 1234 struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl; 1235 struct sk_buff *skb; 1236 struct sk_buff_head list; 1237 void *transfer_context; 1238 unsigned int nbytes, max_nbytes, nentries; 1239 int orig_len; 1240 1241 /* No need to aquire ce_lock for CE5, since this is the only place CE5 1242 * is processed other than init and deinit. Before releasing CE5 1243 * buffers, interrupts are disabled. Thus CE5 access is serialized. 1244 */ 1245 __skb_queue_head_init(&list); 1246 while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context, 1247 &nbytes) == 0) { 1248 skb = transfer_context; 1249 max_nbytes = skb->len + skb_tailroom(skb); 1250 1251 if (unlikely(max_nbytes < nbytes)) { 1252 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)", 1253 nbytes, max_nbytes); 1254 continue; 1255 } 1256 1257 dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1258 max_nbytes, DMA_FROM_DEVICE); 1259 skb_put(skb, nbytes); 1260 __skb_queue_tail(&list, skb); 1261 } 1262 1263 nentries = skb_queue_len(&list); 1264 while ((skb = __skb_dequeue(&list))) { 1265 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n", 1266 ce_state->id, skb->len); 1267 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ", 1268 skb->data, skb->len); 1269 1270 orig_len = skb->len; 1271 callback(ar, skb); 1272 skb_push(skb, orig_len - skb->len); 1273 skb_reset_tail_pointer(skb); 1274 skb_trim(skb, 0); 1275 1276 /*let device gain the buffer again*/ 1277 dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1278 skb->len + skb_tailroom(skb), 1279 DMA_FROM_DEVICE); 1280 } 1281 ath10k_ce_rx_update_write_idx(ce_pipe, nentries); 1282 } 1283 1284 /* Called by lower (CE) layer when data is received from the Target. */ 1285 static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1286 { 1287 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1288 } 1289 1290 static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) 1291 { 1292 /* CE4 polling needs to be done whenever CE pipe which transports 1293 * HTT Rx (target->host) is processed. 1294 */ 1295 ath10k_ce_per_engine_service(ce_state->ar, 4); 1296 1297 ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); 1298 } 1299 1300 /* Called by lower (CE) layer when data is received from the Target. 1301 * Only 10.4 firmware uses separate CE to transfer pktlog data. 1302 */ 1303 static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state) 1304 { 1305 ath10k_pci_process_rx_cb(ce_state, 1306 ath10k_htt_rx_pktlog_completion_handler); 1307 } 1308 1309 /* Called by lower (CE) layer when a send to HTT Target completes. */ 1310 static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) 1311 { 1312 struct ath10k *ar = ce_state->ar; 1313 struct sk_buff *skb; 1314 1315 while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) { 1316 /* no need to call tx completion for NULL pointers */ 1317 if (!skb) 1318 continue; 1319 1320 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, 1321 skb->len, DMA_TO_DEVICE); 1322 ath10k_htt_hif_tx_complete(ar, skb); 1323 } 1324 } 1325 1326 static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb) 1327 { 1328 skb_pull(skb, sizeof(struct ath10k_htc_hdr)); 1329 ath10k_htt_t2h_msg_handler(ar, skb); 1330 } 1331 1332 /* Called by lower (CE) layer when HTT data is received from the Target. */ 1333 static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) 1334 { 1335 /* CE4 polling needs to be done whenever CE pipe which transports 1336 * HTT Rx (target->host) is processed. 1337 */ 1338 ath10k_ce_per_engine_service(ce_state->ar, 4); 1339 1340 ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); 1341 } 1342 1343 int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, 1344 struct ath10k_hif_sg_item *items, int n_items) 1345 { 1346 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1347 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; 1348 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; 1349 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; 1350 unsigned int nentries_mask; 1351 unsigned int sw_index; 1352 unsigned int write_index; 1353 int err, i = 0; 1354 1355 spin_lock_bh(&ar_pci->ce_lock); 1356 1357 nentries_mask = src_ring->nentries_mask; 1358 sw_index = src_ring->sw_index; 1359 write_index = src_ring->write_index; 1360 1361 if (unlikely(CE_RING_DELTA(nentries_mask, 1362 write_index, sw_index - 1) < n_items)) { 1363 err = -ENOBUFS; 1364 goto err; 1365 } 1366 1367 for (i = 0; i < n_items - 1; i++) { 1368 ath10k_dbg(ar, ATH10K_DBG_PCI, 1369 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1370 i, items[i].paddr, items[i].len, n_items); 1371 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1372 items[i].vaddr, items[i].len); 1373 1374 err = ath10k_ce_send_nolock(ce_pipe, 1375 items[i].transfer_context, 1376 items[i].paddr, 1377 items[i].len, 1378 items[i].transfer_id, 1379 CE_SEND_FLAG_GATHER); 1380 if (err) 1381 goto err; 1382 } 1383 1384 /* `i` is equal to `n_items -1` after for() */ 1385 1386 ath10k_dbg(ar, ATH10K_DBG_PCI, 1387 "pci tx item %d paddr 0x%08x len %d n_items %d\n", 1388 i, items[i].paddr, items[i].len, n_items); 1389 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ", 1390 items[i].vaddr, items[i].len); 1391 1392 err = ath10k_ce_send_nolock(ce_pipe, 1393 items[i].transfer_context, 1394 items[i].paddr, 1395 items[i].len, 1396 items[i].transfer_id, 1397 0); 1398 if (err) 1399 goto err; 1400 1401 spin_unlock_bh(&ar_pci->ce_lock); 1402 return 0; 1403 1404 err: 1405 for (; i > 0; i--) 1406 __ath10k_ce_send_revert(ce_pipe); 1407 1408 spin_unlock_bh(&ar_pci->ce_lock); 1409 return err; 1410 } 1411 1412 int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, 1413 size_t buf_len) 1414 { 1415 return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); 1416 } 1417 1418 u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) 1419 { 1420 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1421 1422 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n"); 1423 1424 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl); 1425 } 1426 1427 static void ath10k_pci_dump_registers(struct ath10k *ar, 1428 struct ath10k_fw_crash_data *crash_data) 1429 { 1430 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {}; 1431 int i, ret; 1432 1433 lockdep_assert_held(&ar->data_lock); 1434 1435 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0], 1436 hi_failure_state, 1437 REG_DUMP_COUNT_QCA988X * sizeof(__le32)); 1438 if (ret) { 1439 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret); 1440 return; 1441 } 1442 1443 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4); 1444 1445 ath10k_err(ar, "firmware register dump:\n"); 1446 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4) 1447 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n", 1448 i, 1449 __le32_to_cpu(reg_dump_values[i]), 1450 __le32_to_cpu(reg_dump_values[i + 1]), 1451 __le32_to_cpu(reg_dump_values[i + 2]), 1452 __le32_to_cpu(reg_dump_values[i + 3])); 1453 1454 if (!crash_data) 1455 return; 1456 1457 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++) 1458 crash_data->registers[i] = reg_dump_values[i]; 1459 } 1460 1461 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) 1462 { 1463 struct ath10k_fw_crash_data *crash_data; 1464 char uuid[50]; 1465 1466 spin_lock_bh(&ar->data_lock); 1467 1468 ar->stats.fw_crash_counter++; 1469 1470 crash_data = ath10k_debug_get_new_fw_crash_data(ar); 1471 1472 if (crash_data) 1473 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); 1474 else 1475 scnprintf(uuid, sizeof(uuid), "n/a"); 1476 1477 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); 1478 ath10k_print_driver_info(ar); 1479 ath10k_pci_dump_registers(ar, crash_data); 1480 ath10k_ce_dump_registers(ar, crash_data); 1481 1482 spin_unlock_bh(&ar->data_lock); 1483 1484 queue_work(ar->workqueue, &ar->restart_work); 1485 } 1486 1487 void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, 1488 int force) 1489 { 1490 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); 1491 1492 if (!force) { 1493 int resources; 1494 /* 1495 * Decide whether to actually poll for completions, or just 1496 * wait for a later chance. 1497 * If there seem to be plenty of resources left, then just wait 1498 * since checking involves reading a CE register, which is a 1499 * relatively expensive operation. 1500 */ 1501 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe); 1502 1503 /* 1504 * If at least 50% of the total resources are still available, 1505 * don't bother checking again yet. 1506 */ 1507 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1)) 1508 return; 1509 } 1510 ath10k_ce_per_engine_service(ar, pipe); 1511 } 1512 1513 static void ath10k_pci_rx_retry_sync(struct ath10k *ar) 1514 { 1515 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1516 1517 del_timer_sync(&ar_pci->rx_post_retry); 1518 } 1519 1520 int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, 1521 u8 *ul_pipe, u8 *dl_pipe) 1522 { 1523 const struct service_to_pipe *entry; 1524 bool ul_set = false, dl_set = false; 1525 int i; 1526 1527 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n"); 1528 1529 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) { 1530 entry = &target_service_to_ce_map_wlan[i]; 1531 1532 if (__le32_to_cpu(entry->service_id) != service_id) 1533 continue; 1534 1535 switch (__le32_to_cpu(entry->pipedir)) { 1536 case PIPEDIR_NONE: 1537 break; 1538 case PIPEDIR_IN: 1539 WARN_ON(dl_set); 1540 *dl_pipe = __le32_to_cpu(entry->pipenum); 1541 dl_set = true; 1542 break; 1543 case PIPEDIR_OUT: 1544 WARN_ON(ul_set); 1545 *ul_pipe = __le32_to_cpu(entry->pipenum); 1546 ul_set = true; 1547 break; 1548 case PIPEDIR_INOUT: 1549 WARN_ON(dl_set); 1550 WARN_ON(ul_set); 1551 *dl_pipe = __le32_to_cpu(entry->pipenum); 1552 *ul_pipe = __le32_to_cpu(entry->pipenum); 1553 dl_set = true; 1554 ul_set = true; 1555 break; 1556 } 1557 } 1558 1559 if (WARN_ON(!ul_set || !dl_set)) 1560 return -ENOENT; 1561 1562 return 0; 1563 } 1564 1565 void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, 1566 u8 *ul_pipe, u8 *dl_pipe) 1567 { 1568 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); 1569 1570 (void)ath10k_pci_hif_map_service_to_pipe(ar, 1571 ATH10K_HTC_SVC_ID_RSVD_CTRL, 1572 ul_pipe, dl_pipe); 1573 } 1574 1575 void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) 1576 { 1577 u32 val; 1578 1579 switch (ar->hw_rev) { 1580 case ATH10K_HW_QCA988X: 1581 case ATH10K_HW_QCA9887: 1582 case ATH10K_HW_QCA6174: 1583 case ATH10K_HW_QCA9377: 1584 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1585 CORE_CTRL_ADDRESS); 1586 val &= ~CORE_CTRL_PCIE_REG_31_MASK; 1587 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1588 CORE_CTRL_ADDRESS, val); 1589 break; 1590 case ATH10K_HW_QCA99X0: 1591 case ATH10K_HW_QCA9984: 1592 case ATH10K_HW_QCA9888: 1593 case ATH10K_HW_QCA4019: 1594 /* TODO: Find appropriate register configuration for QCA99X0 1595 * to mask irq/MSI. 1596 */ 1597 break; 1598 } 1599 } 1600 1601 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) 1602 { 1603 u32 val; 1604 1605 switch (ar->hw_rev) { 1606 case ATH10K_HW_QCA988X: 1607 case ATH10K_HW_QCA9887: 1608 case ATH10K_HW_QCA6174: 1609 case ATH10K_HW_QCA9377: 1610 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + 1611 CORE_CTRL_ADDRESS); 1612 val |= CORE_CTRL_PCIE_REG_31_MASK; 1613 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + 1614 CORE_CTRL_ADDRESS, val); 1615 break; 1616 case ATH10K_HW_QCA99X0: 1617 case ATH10K_HW_QCA9984: 1618 case ATH10K_HW_QCA9888: 1619 case ATH10K_HW_QCA4019: 1620 /* TODO: Find appropriate register configuration for QCA99X0 1621 * to unmask irq/MSI. 1622 */ 1623 break; 1624 } 1625 } 1626 1627 static void ath10k_pci_irq_disable(struct ath10k *ar) 1628 { 1629 ath10k_ce_disable_interrupts(ar); 1630 ath10k_pci_disable_and_clear_legacy_irq(ar); 1631 ath10k_pci_irq_msi_fw_mask(ar); 1632 } 1633 1634 static void ath10k_pci_irq_sync(struct ath10k *ar) 1635 { 1636 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1637 1638 synchronize_irq(ar_pci->pdev->irq); 1639 } 1640 1641 static void ath10k_pci_irq_enable(struct ath10k *ar) 1642 { 1643 ath10k_ce_enable_interrupts(ar); 1644 ath10k_pci_enable_legacy_irq(ar); 1645 ath10k_pci_irq_msi_fw_unmask(ar); 1646 } 1647 1648 static int ath10k_pci_hif_start(struct ath10k *ar) 1649 { 1650 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1651 1652 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); 1653 1654 napi_enable(&ar->napi); 1655 1656 ath10k_pci_irq_enable(ar); 1657 ath10k_pci_rx_post(ar); 1658 1659 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 1660 ar_pci->link_ctl); 1661 1662 return 0; 1663 } 1664 1665 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1666 { 1667 struct ath10k *ar; 1668 struct ath10k_ce_pipe *ce_pipe; 1669 struct ath10k_ce_ring *ce_ring; 1670 struct sk_buff *skb; 1671 int i; 1672 1673 ar = pci_pipe->hif_ce_state; 1674 ce_pipe = pci_pipe->ce_hdl; 1675 ce_ring = ce_pipe->dest_ring; 1676 1677 if (!ce_ring) 1678 return; 1679 1680 if (!pci_pipe->buf_sz) 1681 return; 1682 1683 for (i = 0; i < ce_ring->nentries; i++) { 1684 skb = ce_ring->per_transfer_context[i]; 1685 if (!skb) 1686 continue; 1687 1688 ce_ring->per_transfer_context[i] = NULL; 1689 1690 dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr, 1691 skb->len + skb_tailroom(skb), 1692 DMA_FROM_DEVICE); 1693 dev_kfree_skb_any(skb); 1694 } 1695 } 1696 1697 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) 1698 { 1699 struct ath10k *ar; 1700 struct ath10k_ce_pipe *ce_pipe; 1701 struct ath10k_ce_ring *ce_ring; 1702 struct sk_buff *skb; 1703 int i; 1704 1705 ar = pci_pipe->hif_ce_state; 1706 ce_pipe = pci_pipe->ce_hdl; 1707 ce_ring = ce_pipe->src_ring; 1708 1709 if (!ce_ring) 1710 return; 1711 1712 if (!pci_pipe->buf_sz) 1713 return; 1714 1715 for (i = 0; i < ce_ring->nentries; i++) { 1716 skb = ce_ring->per_transfer_context[i]; 1717 if (!skb) 1718 continue; 1719 1720 ce_ring->per_transfer_context[i] = NULL; 1721 1722 ath10k_htc_tx_completion_handler(ar, skb); 1723 } 1724 } 1725 1726 /* 1727 * Cleanup residual buffers for device shutdown: 1728 * buffers that were enqueued for receive 1729 * buffers that were to be sent 1730 * Note: Buffers that had completed but which were 1731 * not yet processed are on a completion queue. They 1732 * are handled when the completion thread shuts down. 1733 */ 1734 static void ath10k_pci_buffer_cleanup(struct ath10k *ar) 1735 { 1736 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1737 int pipe_num; 1738 1739 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { 1740 struct ath10k_pci_pipe *pipe_info; 1741 1742 pipe_info = &ar_pci->pipe_info[pipe_num]; 1743 ath10k_pci_rx_pipe_cleanup(pipe_info); 1744 ath10k_pci_tx_pipe_cleanup(pipe_info); 1745 } 1746 } 1747 1748 void ath10k_pci_ce_deinit(struct ath10k *ar) 1749 { 1750 int i; 1751 1752 for (i = 0; i < CE_COUNT; i++) 1753 ath10k_ce_deinit_pipe(ar, i); 1754 } 1755 1756 void ath10k_pci_flush(struct ath10k *ar) 1757 { 1758 ath10k_pci_rx_retry_sync(ar); 1759 ath10k_pci_buffer_cleanup(ar); 1760 } 1761 1762 static void ath10k_pci_hif_stop(struct ath10k *ar) 1763 { 1764 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1765 unsigned long flags; 1766 1767 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n"); 1768 1769 /* Most likely the device has HTT Rx ring configured. The only way to 1770 * prevent the device from accessing (and possible corrupting) host 1771 * memory is to reset the chip now. 1772 * 1773 * There's also no known way of masking MSI interrupts on the device. 1774 * For ranged MSI the CE-related interrupts can be masked. However 1775 * regardless how many MSI interrupts are assigned the first one 1776 * is always used for firmware indications (crashes) and cannot be 1777 * masked. To prevent the device from asserting the interrupt reset it 1778 * before proceeding with cleanup. 1779 */ 1780 ath10k_pci_safe_chip_reset(ar); 1781 1782 ath10k_pci_irq_disable(ar); 1783 ath10k_pci_irq_sync(ar); 1784 ath10k_pci_flush(ar); 1785 napi_synchronize(&ar->napi); 1786 napi_disable(&ar->napi); 1787 1788 spin_lock_irqsave(&ar_pci->ps_lock, flags); 1789 WARN_ON(ar_pci->ps_wake_refcount > 0); 1790 spin_unlock_irqrestore(&ar_pci->ps_lock, flags); 1791 } 1792 1793 int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, 1794 void *req, u32 req_len, 1795 void *resp, u32 *resp_len) 1796 { 1797 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1798 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; 1799 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST]; 1800 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl; 1801 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl; 1802 dma_addr_t req_paddr = 0; 1803 dma_addr_t resp_paddr = 0; 1804 struct bmi_xfer xfer = {}; 1805 void *treq, *tresp = NULL; 1806 int ret = 0; 1807 1808 might_sleep(); 1809 1810 if (resp && !resp_len) 1811 return -EINVAL; 1812 1813 if (resp && resp_len && *resp_len == 0) 1814 return -EINVAL; 1815 1816 treq = kmemdup(req, req_len, GFP_KERNEL); 1817 if (!treq) 1818 return -ENOMEM; 1819 1820 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE); 1821 ret = dma_mapping_error(ar->dev, req_paddr); 1822 if (ret) { 1823 ret = -EIO; 1824 goto err_dma; 1825 } 1826 1827 if (resp && resp_len) { 1828 tresp = kzalloc(*resp_len, GFP_KERNEL); 1829 if (!tresp) { 1830 ret = -ENOMEM; 1831 goto err_req; 1832 } 1833 1834 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len, 1835 DMA_FROM_DEVICE); 1836 ret = dma_mapping_error(ar->dev, resp_paddr); 1837 if (ret) { 1838 ret = -EIO; 1839 goto err_req; 1840 } 1841 1842 xfer.wait_for_resp = true; 1843 xfer.resp_len = 0; 1844 1845 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr); 1846 } 1847 1848 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0); 1849 if (ret) 1850 goto err_resp; 1851 1852 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer); 1853 if (ret) { 1854 u32 unused_buffer; 1855 unsigned int unused_nbytes; 1856 unsigned int unused_id; 1857 1858 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer, 1859 &unused_nbytes, &unused_id); 1860 } else { 1861 /* non-zero means we did not time out */ 1862 ret = 0; 1863 } 1864 1865 err_resp: 1866 if (resp) { 1867 u32 unused_buffer; 1868 1869 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer); 1870 dma_unmap_single(ar->dev, resp_paddr, 1871 *resp_len, DMA_FROM_DEVICE); 1872 } 1873 err_req: 1874 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE); 1875 1876 if (ret == 0 && resp_len) { 1877 *resp_len = min(*resp_len, xfer.resp_len); 1878 memcpy(resp, tresp, xfer.resp_len); 1879 } 1880 err_dma: 1881 kfree(treq); 1882 kfree(tresp); 1883 1884 return ret; 1885 } 1886 1887 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) 1888 { 1889 struct bmi_xfer *xfer; 1890 1891 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer)) 1892 return; 1893 1894 xfer->tx_done = true; 1895 } 1896 1897 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state) 1898 { 1899 struct ath10k *ar = ce_state->ar; 1900 struct bmi_xfer *xfer; 1901 unsigned int nbytes; 1902 1903 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, 1904 &nbytes)) 1905 return; 1906 1907 if (WARN_ON_ONCE(!xfer)) 1908 return; 1909 1910 if (!xfer->wait_for_resp) { 1911 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n"); 1912 return; 1913 } 1914 1915 xfer->resp_len = nbytes; 1916 xfer->rx_done = true; 1917 } 1918 1919 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe, 1920 struct ath10k_ce_pipe *rx_pipe, 1921 struct bmi_xfer *xfer) 1922 { 1923 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ; 1924 1925 while (time_before_eq(jiffies, timeout)) { 1926 ath10k_pci_bmi_send_done(tx_pipe); 1927 ath10k_pci_bmi_recv_data(rx_pipe); 1928 1929 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) 1930 return 0; 1931 1932 schedule(); 1933 } 1934 1935 return -ETIMEDOUT; 1936 } 1937 1938 /* 1939 * Send an interrupt to the device to wake up the Target CPU 1940 * so it has an opportunity to notice any changed state. 1941 */ 1942 static int ath10k_pci_wake_target_cpu(struct ath10k *ar) 1943 { 1944 u32 addr, val; 1945 1946 addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS; 1947 val = ath10k_pci_read32(ar, addr); 1948 val |= CORE_CTRL_CPU_INTR_MASK; 1949 ath10k_pci_write32(ar, addr, val); 1950 1951 return 0; 1952 } 1953 1954 static int ath10k_pci_get_num_banks(struct ath10k *ar) 1955 { 1956 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1957 1958 switch (ar_pci->pdev->device) { 1959 case QCA988X_2_0_DEVICE_ID: 1960 case QCA99X0_2_0_DEVICE_ID: 1961 case QCA9888_2_0_DEVICE_ID: 1962 case QCA9984_1_0_DEVICE_ID: 1963 case QCA9887_1_0_DEVICE_ID: 1964 return 1; 1965 case QCA6164_2_1_DEVICE_ID: 1966 case QCA6174_2_1_DEVICE_ID: 1967 switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) { 1968 case QCA6174_HW_1_0_CHIP_ID_REV: 1969 case QCA6174_HW_1_1_CHIP_ID_REV: 1970 case QCA6174_HW_2_1_CHIP_ID_REV: 1971 case QCA6174_HW_2_2_CHIP_ID_REV: 1972 return 3; 1973 case QCA6174_HW_1_3_CHIP_ID_REV: 1974 return 2; 1975 case QCA6174_HW_3_0_CHIP_ID_REV: 1976 case QCA6174_HW_3_1_CHIP_ID_REV: 1977 case QCA6174_HW_3_2_CHIP_ID_REV: 1978 return 9; 1979 } 1980 break; 1981 case QCA9377_1_0_DEVICE_ID: 1982 return 4; 1983 } 1984 1985 ath10k_warn(ar, "unknown number of banks, assuming 1\n"); 1986 return 1; 1987 } 1988 1989 static int ath10k_bus_get_num_banks(struct ath10k *ar) 1990 { 1991 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1992 1993 return ar_pci->bus_ops->get_num_banks(ar); 1994 } 1995 1996 int ath10k_pci_init_config(struct ath10k *ar) 1997 { 1998 u32 interconnect_targ_addr; 1999 u32 pcie_state_targ_addr = 0; 2000 u32 pipe_cfg_targ_addr = 0; 2001 u32 svc_to_pipe_map = 0; 2002 u32 pcie_config_flags = 0; 2003 u32 ealloc_value; 2004 u32 ealloc_targ_addr; 2005 u32 flag2_value; 2006 u32 flag2_targ_addr; 2007 int ret = 0; 2008 2009 /* Download to Target the CE Config and the service-to-CE map */ 2010 interconnect_targ_addr = 2011 host_interest_item_address(HI_ITEM(hi_interconnect_state)); 2012 2013 /* Supply Target-side CE configuration */ 2014 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr, 2015 &pcie_state_targ_addr); 2016 if (ret != 0) { 2017 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret); 2018 return ret; 2019 } 2020 2021 if (pcie_state_targ_addr == 0) { 2022 ret = -EIO; 2023 ath10k_err(ar, "Invalid pcie state addr\n"); 2024 return ret; 2025 } 2026 2027 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2028 offsetof(struct pcie_state, 2029 pipe_cfg_addr)), 2030 &pipe_cfg_targ_addr); 2031 if (ret != 0) { 2032 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret); 2033 return ret; 2034 } 2035 2036 if (pipe_cfg_targ_addr == 0) { 2037 ret = -EIO; 2038 ath10k_err(ar, "Invalid pipe cfg addr\n"); 2039 return ret; 2040 } 2041 2042 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr, 2043 target_ce_config_wlan, 2044 sizeof(struct ce_pipe_config) * 2045 NUM_TARGET_CE_CONFIG_WLAN); 2046 2047 if (ret != 0) { 2048 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret); 2049 return ret; 2050 } 2051 2052 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2053 offsetof(struct pcie_state, 2054 svc_to_pipe_map)), 2055 &svc_to_pipe_map); 2056 if (ret != 0) { 2057 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret); 2058 return ret; 2059 } 2060 2061 if (svc_to_pipe_map == 0) { 2062 ret = -EIO; 2063 ath10k_err(ar, "Invalid svc_to_pipe map\n"); 2064 return ret; 2065 } 2066 2067 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map, 2068 target_service_to_ce_map_wlan, 2069 sizeof(target_service_to_ce_map_wlan)); 2070 if (ret != 0) { 2071 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret); 2072 return ret; 2073 } 2074 2075 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr + 2076 offsetof(struct pcie_state, 2077 config_flags)), 2078 &pcie_config_flags); 2079 if (ret != 0) { 2080 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret); 2081 return ret; 2082 } 2083 2084 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1; 2085 2086 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr + 2087 offsetof(struct pcie_state, 2088 config_flags)), 2089 pcie_config_flags); 2090 if (ret != 0) { 2091 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret); 2092 return ret; 2093 } 2094 2095 /* configure early allocation */ 2096 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc)); 2097 2098 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value); 2099 if (ret != 0) { 2100 ath10k_err(ar, "Failed to get early alloc val: %d\n", ret); 2101 return ret; 2102 } 2103 2104 /* first bank is switched to IRAM */ 2105 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & 2106 HI_EARLY_ALLOC_MAGIC_MASK); 2107 ealloc_value |= ((ath10k_bus_get_num_banks(ar) << 2108 HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & 2109 HI_EARLY_ALLOC_IRAM_BANKS_MASK); 2110 2111 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); 2112 if (ret != 0) { 2113 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret); 2114 return ret; 2115 } 2116 2117 /* Tell Target to proceed with initialization */ 2118 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2)); 2119 2120 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value); 2121 if (ret != 0) { 2122 ath10k_err(ar, "Failed to get option val: %d\n", ret); 2123 return ret; 2124 } 2125 2126 flag2_value |= HI_OPTION_EARLY_CFG_DONE; 2127 2128 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value); 2129 if (ret != 0) { 2130 ath10k_err(ar, "Failed to set option val: %d\n", ret); 2131 return ret; 2132 } 2133 2134 return 0; 2135 } 2136 2137 static void ath10k_pci_override_ce_config(struct ath10k *ar) 2138 { 2139 struct ce_attr *attr; 2140 struct ce_pipe_config *config; 2141 2142 /* For QCA6174 we're overriding the Copy Engine 5 configuration, 2143 * since it is currently used for other feature. 2144 */ 2145 2146 /* Override Host's Copy Engine 5 configuration */ 2147 attr = &host_ce_config_wlan[5]; 2148 attr->src_sz_max = 0; 2149 attr->dest_nentries = 0; 2150 2151 /* Override Target firmware's Copy Engine configuration */ 2152 config = &target_ce_config_wlan[5]; 2153 config->pipedir = __cpu_to_le32(PIPEDIR_OUT); 2154 config->nbytes_max = __cpu_to_le32(2048); 2155 2156 /* Map from service/endpoint to Copy Engine */ 2157 target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); 2158 } 2159 2160 int ath10k_pci_alloc_pipes(struct ath10k *ar) 2161 { 2162 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2163 struct ath10k_pci_pipe *pipe; 2164 int i, ret; 2165 2166 for (i = 0; i < CE_COUNT; i++) { 2167 pipe = &ar_pci->pipe_info[i]; 2168 pipe->ce_hdl = &ar_pci->ce_states[i]; 2169 pipe->pipe_num = i; 2170 pipe->hif_ce_state = ar; 2171 2172 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]); 2173 if (ret) { 2174 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n", 2175 i, ret); 2176 return ret; 2177 } 2178 2179 /* Last CE is Diagnostic Window */ 2180 if (i == CE_DIAG_PIPE) { 2181 ar_pci->ce_diag = pipe->ce_hdl; 2182 continue; 2183 } 2184 2185 pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max); 2186 } 2187 2188 return 0; 2189 } 2190 2191 void ath10k_pci_free_pipes(struct ath10k *ar) 2192 { 2193 int i; 2194 2195 for (i = 0; i < CE_COUNT; i++) 2196 ath10k_ce_free_pipe(ar, i); 2197 } 2198 2199 int ath10k_pci_init_pipes(struct ath10k *ar) 2200 { 2201 int i, ret; 2202 2203 for (i = 0; i < CE_COUNT; i++) { 2204 ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]); 2205 if (ret) { 2206 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n", 2207 i, ret); 2208 return ret; 2209 } 2210 } 2211 2212 return 0; 2213 } 2214 2215 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar) 2216 { 2217 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) & 2218 FW_IND_EVENT_PENDING; 2219 } 2220 2221 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) 2222 { 2223 u32 val; 2224 2225 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2226 val &= ~FW_IND_EVENT_PENDING; 2227 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); 2228 } 2229 2230 static bool ath10k_pci_has_device_gone(struct ath10k *ar) 2231 { 2232 u32 val; 2233 2234 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2235 return (val == 0xffffffff); 2236 } 2237 2238 /* this function effectively clears target memory controller assert line */ 2239 static void ath10k_pci_warm_reset_si0(struct ath10k *ar) 2240 { 2241 u32 val; 2242 2243 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2244 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2245 val | SOC_RESET_CONTROL_SI0_RST_MASK); 2246 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2247 2248 msleep(10); 2249 2250 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2251 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS, 2252 val & ~SOC_RESET_CONTROL_SI0_RST_MASK); 2253 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS); 2254 2255 msleep(10); 2256 } 2257 2258 static void ath10k_pci_warm_reset_cpu(struct ath10k *ar) 2259 { 2260 u32 val; 2261 2262 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0); 2263 2264 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2265 SOC_RESET_CONTROL_ADDRESS); 2266 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2267 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK); 2268 } 2269 2270 static void ath10k_pci_warm_reset_ce(struct ath10k *ar) 2271 { 2272 u32 val; 2273 2274 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2275 SOC_RESET_CONTROL_ADDRESS); 2276 2277 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2278 val | SOC_RESET_CONTROL_CE_RST_MASK); 2279 msleep(10); 2280 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS, 2281 val & ~SOC_RESET_CONTROL_CE_RST_MASK); 2282 } 2283 2284 static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar) 2285 { 2286 u32 val; 2287 2288 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + 2289 SOC_LF_TIMER_CONTROL0_ADDRESS); 2290 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + 2291 SOC_LF_TIMER_CONTROL0_ADDRESS, 2292 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK); 2293 } 2294 2295 static int ath10k_pci_warm_reset(struct ath10k *ar) 2296 { 2297 int ret; 2298 2299 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n"); 2300 2301 spin_lock_bh(&ar->data_lock); 2302 ar->stats.fw_warm_reset_counter++; 2303 spin_unlock_bh(&ar->data_lock); 2304 2305 ath10k_pci_irq_disable(ar); 2306 2307 /* Make sure the target CPU is not doing anything dangerous, e.g. if it 2308 * were to access copy engine while host performs copy engine reset 2309 * then it is possible for the device to confuse pci-e controller to 2310 * the point of bringing host system to a complete stop (i.e. hang). 2311 */ 2312 ath10k_pci_warm_reset_si0(ar); 2313 ath10k_pci_warm_reset_cpu(ar); 2314 ath10k_pci_init_pipes(ar); 2315 ath10k_pci_wait_for_target_init(ar); 2316 2317 ath10k_pci_warm_reset_clear_lf(ar); 2318 ath10k_pci_warm_reset_ce(ar); 2319 ath10k_pci_warm_reset_cpu(ar); 2320 ath10k_pci_init_pipes(ar); 2321 2322 ret = ath10k_pci_wait_for_target_init(ar); 2323 if (ret) { 2324 ath10k_warn(ar, "failed to wait for target init: %d\n", ret); 2325 return ret; 2326 } 2327 2328 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n"); 2329 2330 return 0; 2331 } 2332 2333 static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) 2334 { 2335 ath10k_pci_irq_disable(ar); 2336 return ath10k_pci_qca99x0_chip_reset(ar); 2337 } 2338 2339 static int ath10k_pci_safe_chip_reset(struct ath10k *ar) 2340 { 2341 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2342 2343 if (!ar_pci->pci_soft_reset) 2344 return -ENOTSUPP; 2345 2346 return ar_pci->pci_soft_reset(ar); 2347 } 2348 2349 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) 2350 { 2351 int i, ret; 2352 u32 val; 2353 2354 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n"); 2355 2356 /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. 2357 * It is thus preferred to use warm reset which is safer but may not be 2358 * able to recover the device from all possible fail scenarios. 2359 * 2360 * Warm reset doesn't always work on first try so attempt it a few 2361 * times before giving up. 2362 */ 2363 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) { 2364 ret = ath10k_pci_warm_reset(ar); 2365 if (ret) { 2366 ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n", 2367 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, 2368 ret); 2369 continue; 2370 } 2371 2372 /* FIXME: Sometimes copy engine doesn't recover after warm 2373 * reset. In most cases this needs cold reset. In some of these 2374 * cases the device is in such a state that a cold reset may 2375 * lock up the host. 2376 * 2377 * Reading any host interest register via copy engine is 2378 * sufficient to verify if device is capable of booting 2379 * firmware blob. 2380 */ 2381 ret = ath10k_pci_init_pipes(ar); 2382 if (ret) { 2383 ath10k_warn(ar, "failed to init copy engine: %d\n", 2384 ret); 2385 continue; 2386 } 2387 2388 ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS, 2389 &val); 2390 if (ret) { 2391 ath10k_warn(ar, "failed to poke copy engine: %d\n", 2392 ret); 2393 continue; 2394 } 2395 2396 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n"); 2397 return 0; 2398 } 2399 2400 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) { 2401 ath10k_warn(ar, "refusing cold reset as requested\n"); 2402 return -EPERM; 2403 } 2404 2405 ret = ath10k_pci_cold_reset(ar); 2406 if (ret) { 2407 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2408 return ret; 2409 } 2410 2411 ret = ath10k_pci_wait_for_target_init(ar); 2412 if (ret) { 2413 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2414 ret); 2415 return ret; 2416 } 2417 2418 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n"); 2419 2420 return 0; 2421 } 2422 2423 static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar) 2424 { 2425 int ret; 2426 2427 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n"); 2428 2429 /* FIXME: QCA6174 requires cold + warm reset to work. */ 2430 2431 ret = ath10k_pci_cold_reset(ar); 2432 if (ret) { 2433 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2434 return ret; 2435 } 2436 2437 ret = ath10k_pci_wait_for_target_init(ar); 2438 if (ret) { 2439 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2440 ret); 2441 return ret; 2442 } 2443 2444 ret = ath10k_pci_warm_reset(ar); 2445 if (ret) { 2446 ath10k_warn(ar, "failed to warm reset: %d\n", ret); 2447 return ret; 2448 } 2449 2450 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n"); 2451 2452 return 0; 2453 } 2454 2455 static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) 2456 { 2457 int ret; 2458 2459 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n"); 2460 2461 ret = ath10k_pci_cold_reset(ar); 2462 if (ret) { 2463 ath10k_warn(ar, "failed to cold reset: %d\n", ret); 2464 return ret; 2465 } 2466 2467 ret = ath10k_pci_wait_for_target_init(ar); 2468 if (ret) { 2469 ath10k_warn(ar, "failed to wait for target after cold reset: %d\n", 2470 ret); 2471 return ret; 2472 } 2473 2474 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n"); 2475 2476 return 0; 2477 } 2478 2479 static int ath10k_pci_chip_reset(struct ath10k *ar) 2480 { 2481 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2482 2483 if (WARN_ON(!ar_pci->pci_hard_reset)) 2484 return -ENOTSUPP; 2485 2486 return ar_pci->pci_hard_reset(ar); 2487 } 2488 2489 static int ath10k_pci_hif_power_up(struct ath10k *ar) 2490 { 2491 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2492 int ret; 2493 2494 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n"); 2495 2496 pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2497 &ar_pci->link_ctl); 2498 pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL, 2499 ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 2500 2501 /* 2502 * Bring the target up cleanly. 2503 * 2504 * The target may be in an undefined state with an AUX-powered Target 2505 * and a Host in WoW mode. If the Host crashes, loses power, or is 2506 * restarted (without unloading the driver) then the Target is left 2507 * (aux) powered and running. On a subsequent driver load, the Target 2508 * is in an unexpected state. We try to catch that here in order to 2509 * reset the Target and retry the probe. 2510 */ 2511 ret = ath10k_pci_chip_reset(ar); 2512 if (ret) { 2513 if (ath10k_pci_has_fw_crashed(ar)) { 2514 ath10k_warn(ar, "firmware crashed during chip reset\n"); 2515 ath10k_pci_fw_crashed_clear(ar); 2516 ath10k_pci_fw_crashed_dump(ar); 2517 } 2518 2519 ath10k_err(ar, "failed to reset chip: %d\n", ret); 2520 goto err_sleep; 2521 } 2522 2523 ret = ath10k_pci_init_pipes(ar); 2524 if (ret) { 2525 ath10k_err(ar, "failed to initialize CE: %d\n", ret); 2526 goto err_sleep; 2527 } 2528 2529 ret = ath10k_pci_init_config(ar); 2530 if (ret) { 2531 ath10k_err(ar, "failed to setup init config: %d\n", ret); 2532 goto err_ce; 2533 } 2534 2535 ret = ath10k_pci_wake_target_cpu(ar); 2536 if (ret) { 2537 ath10k_err(ar, "could not wake up target CPU: %d\n", ret); 2538 goto err_ce; 2539 } 2540 2541 return 0; 2542 2543 err_ce: 2544 ath10k_pci_ce_deinit(ar); 2545 2546 err_sleep: 2547 return ret; 2548 } 2549 2550 void ath10k_pci_hif_power_down(struct ath10k *ar) 2551 { 2552 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); 2553 2554 /* Currently hif_power_up performs effectively a reset and hif_stop 2555 * resets the chip as well so there's no point in resetting here. 2556 */ 2557 } 2558 2559 #ifdef CONFIG_PM 2560 2561 static int ath10k_pci_hif_suspend(struct ath10k *ar) 2562 { 2563 /* The grace timer can still be counting down and ar->ps_awake be true. 2564 * It is known that the device may be asleep after resuming regardless 2565 * of the SoC powersave state before suspending. Hence make sure the 2566 * device is asleep before proceeding. 2567 */ 2568 ath10k_pci_sleep_sync(ar); 2569 2570 return 0; 2571 } 2572 2573 static int ath10k_pci_hif_resume(struct ath10k *ar) 2574 { 2575 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2576 struct pci_dev *pdev = ar_pci->pdev; 2577 u32 val; 2578 int ret = 0; 2579 2580 ret = ath10k_pci_force_wake(ar); 2581 if (ret) { 2582 ath10k_err(ar, "failed to wake up target: %d\n", ret); 2583 return ret; 2584 } 2585 2586 /* Suspend/Resume resets the PCI configuration space, so we have to 2587 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries 2588 * from interfering with C3 CPU state. pci_restore_state won't help 2589 * here since it only restores the first 64 bytes pci config header. 2590 */ 2591 pci_read_config_dword(pdev, 0x40, &val); 2592 if ((val & 0x0000ff00) != 0) 2593 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); 2594 2595 return ret; 2596 } 2597 #endif 2598 2599 static bool ath10k_pci_validate_cal(void *data, size_t size) 2600 { 2601 __le16 *cal_words = data; 2602 u16 checksum = 0; 2603 size_t i; 2604 2605 if (size % 2 != 0) 2606 return false; 2607 2608 for (i = 0; i < size / 2; i++) 2609 checksum ^= le16_to_cpu(cal_words[i]); 2610 2611 return checksum == 0xffff; 2612 } 2613 2614 static void ath10k_pci_enable_eeprom(struct ath10k *ar) 2615 { 2616 /* Enable SI clock */ 2617 ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); 2618 2619 /* Configure GPIOs for I2C operation */ 2620 ath10k_pci_write32(ar, 2621 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2622 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, 2623 SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, 2624 GPIO_PIN0_CONFIG) | 2625 SM(1, GPIO_PIN0_PAD_PULL)); 2626 2627 ath10k_pci_write32(ar, 2628 GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + 2629 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, 2630 SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | 2631 SM(1, GPIO_PIN0_PAD_PULL)); 2632 2633 ath10k_pci_write32(ar, 2634 GPIO_BASE_ADDRESS + 2635 QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, 2636 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); 2637 2638 /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ 2639 ath10k_pci_write32(ar, 2640 SI_BASE_ADDRESS + SI_CONFIG_OFFSET, 2641 SM(1, SI_CONFIG_ERR_INT) | 2642 SM(1, SI_CONFIG_BIDIR_OD_DATA) | 2643 SM(1, SI_CONFIG_I2C) | 2644 SM(1, SI_CONFIG_POS_SAMPLE) | 2645 SM(1, SI_CONFIG_INACTIVE_DATA) | 2646 SM(1, SI_CONFIG_INACTIVE_CLK) | 2647 SM(8, SI_CONFIG_DIVIDER)); 2648 } 2649 2650 static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) 2651 { 2652 u32 reg; 2653 int wait_limit; 2654 2655 /* set device select byte and for the read operation */ 2656 reg = QCA9887_EEPROM_SELECT_READ | 2657 SM(addr, QCA9887_EEPROM_ADDR_LO) | 2658 SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); 2659 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); 2660 2661 /* write transmit data, transfer length, and START bit */ 2662 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, 2663 SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | 2664 SM(4, SI_CS_TX_CNT)); 2665 2666 /* wait max 1 sec */ 2667 wait_limit = 100000; 2668 2669 /* wait for SI_CS_DONE_INT */ 2670 do { 2671 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); 2672 if (MS(reg, SI_CS_DONE_INT)) 2673 break; 2674 2675 wait_limit--; 2676 udelay(10); 2677 } while (wait_limit > 0); 2678 2679 if (!MS(reg, SI_CS_DONE_INT)) { 2680 ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", 2681 addr); 2682 return -ETIMEDOUT; 2683 } 2684 2685 /* clear SI_CS_DONE_INT */ 2686 ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); 2687 2688 if (MS(reg, SI_CS_DONE_ERR)) { 2689 ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); 2690 return -EIO; 2691 } 2692 2693 /* extract receive data */ 2694 reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); 2695 *out = reg; 2696 2697 return 0; 2698 } 2699 2700 static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, 2701 size_t *data_len) 2702 { 2703 u8 *caldata = NULL; 2704 size_t calsize, i; 2705 int ret; 2706 2707 if (!QCA_REV_9887(ar)) 2708 return -EOPNOTSUPP; 2709 2710 calsize = ar->hw_params.cal_data_len; 2711 caldata = kmalloc(calsize, GFP_KERNEL); 2712 if (!caldata) 2713 return -ENOMEM; 2714 2715 ath10k_pci_enable_eeprom(ar); 2716 2717 for (i = 0; i < calsize; i++) { 2718 ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); 2719 if (ret) 2720 goto err_free; 2721 } 2722 2723 if (!ath10k_pci_validate_cal(caldata, calsize)) 2724 goto err_free; 2725 2726 *data = caldata; 2727 *data_len = calsize; 2728 2729 return 0; 2730 2731 err_free: 2732 kfree(caldata); 2733 2734 return -EINVAL; 2735 } 2736 2737 static const struct ath10k_hif_ops ath10k_pci_hif_ops = { 2738 .tx_sg = ath10k_pci_hif_tx_sg, 2739 .diag_read = ath10k_pci_hif_diag_read, 2740 .diag_write = ath10k_pci_diag_write_mem, 2741 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, 2742 .start = ath10k_pci_hif_start, 2743 .stop = ath10k_pci_hif_stop, 2744 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, 2745 .get_default_pipe = ath10k_pci_hif_get_default_pipe, 2746 .send_complete_check = ath10k_pci_hif_send_complete_check, 2747 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, 2748 .power_up = ath10k_pci_hif_power_up, 2749 .power_down = ath10k_pci_hif_power_down, 2750 .read32 = ath10k_pci_read32, 2751 .write32 = ath10k_pci_write32, 2752 #ifdef CONFIG_PM 2753 .suspend = ath10k_pci_hif_suspend, 2754 .resume = ath10k_pci_hif_resume, 2755 #endif 2756 .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, 2757 }; 2758 2759 /* 2760 * Top-level interrupt handler for all PCI interrupts from a Target. 2761 * When a block of MSI interrupts is allocated, this top-level handler 2762 * is not used; instead, we directly call the correct sub-handler. 2763 */ 2764 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) 2765 { 2766 struct ath10k *ar = arg; 2767 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2768 int ret; 2769 2770 if (ath10k_pci_has_device_gone(ar)) 2771 return IRQ_NONE; 2772 2773 ret = ath10k_pci_force_wake(ar); 2774 if (ret) { 2775 ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); 2776 return IRQ_NONE; 2777 } 2778 2779 if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) && 2780 !ath10k_pci_irq_pending(ar)) 2781 return IRQ_NONE; 2782 2783 ath10k_pci_disable_and_clear_legacy_irq(ar); 2784 ath10k_pci_irq_msi_fw_mask(ar); 2785 napi_schedule(&ar->napi); 2786 2787 return IRQ_HANDLED; 2788 } 2789 2790 static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) 2791 { 2792 struct ath10k *ar = container_of(ctx, struct ath10k, napi); 2793 int done = 0; 2794 2795 if (ath10k_pci_has_fw_crashed(ar)) { 2796 ath10k_pci_fw_crashed_clear(ar); 2797 ath10k_pci_fw_crashed_dump(ar); 2798 napi_complete(ctx); 2799 return done; 2800 } 2801 2802 ath10k_ce_per_engine_service_any(ar); 2803 2804 done = ath10k_htt_txrx_compl_task(ar, budget); 2805 2806 if (done < budget) { 2807 napi_complete_done(ctx, done); 2808 /* In case of MSI, it is possible that interrupts are received 2809 * while NAPI poll is inprogress. So pending interrupts that are 2810 * received after processing all copy engine pipes by NAPI poll 2811 * will not be handled again. This is causing failure to 2812 * complete boot sequence in x86 platform. So before enabling 2813 * interrupts safer to check for pending interrupts for 2814 * immediate servicing. 2815 */ 2816 if (CE_INTERRUPT_SUMMARY(ar)) { 2817 napi_reschedule(ctx); 2818 goto out; 2819 } 2820 ath10k_pci_enable_legacy_irq(ar); 2821 ath10k_pci_irq_msi_fw_unmask(ar); 2822 } 2823 2824 out: 2825 return done; 2826 } 2827 2828 static int ath10k_pci_request_irq_msi(struct ath10k *ar) 2829 { 2830 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2831 int ret; 2832 2833 ret = request_irq(ar_pci->pdev->irq, 2834 ath10k_pci_interrupt_handler, 2835 IRQF_SHARED, "ath10k_pci", ar); 2836 if (ret) { 2837 ath10k_warn(ar, "failed to request MSI irq %d: %d\n", 2838 ar_pci->pdev->irq, ret); 2839 return ret; 2840 } 2841 2842 return 0; 2843 } 2844 2845 static int ath10k_pci_request_irq_legacy(struct ath10k *ar) 2846 { 2847 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2848 int ret; 2849 2850 ret = request_irq(ar_pci->pdev->irq, 2851 ath10k_pci_interrupt_handler, 2852 IRQF_SHARED, "ath10k_pci", ar); 2853 if (ret) { 2854 ath10k_warn(ar, "failed to request legacy irq %d: %d\n", 2855 ar_pci->pdev->irq, ret); 2856 return ret; 2857 } 2858 2859 return 0; 2860 } 2861 2862 static int ath10k_pci_request_irq(struct ath10k *ar) 2863 { 2864 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2865 2866 switch (ar_pci->oper_irq_mode) { 2867 case ATH10K_PCI_IRQ_LEGACY: 2868 return ath10k_pci_request_irq_legacy(ar); 2869 case ATH10K_PCI_IRQ_MSI: 2870 return ath10k_pci_request_irq_msi(ar); 2871 default: 2872 return -EINVAL; 2873 } 2874 } 2875 2876 static void ath10k_pci_free_irq(struct ath10k *ar) 2877 { 2878 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2879 2880 free_irq(ar_pci->pdev->irq, ar); 2881 } 2882 2883 void ath10k_pci_init_napi(struct ath10k *ar) 2884 { 2885 netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll, 2886 ATH10K_NAPI_BUDGET); 2887 } 2888 2889 static int ath10k_pci_init_irq(struct ath10k *ar) 2890 { 2891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2892 int ret; 2893 2894 ath10k_pci_init_napi(ar); 2895 2896 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO) 2897 ath10k_info(ar, "limiting irq mode to: %d\n", 2898 ath10k_pci_irq_mode); 2899 2900 /* Try MSI */ 2901 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) { 2902 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI; 2903 ret = pci_enable_msi(ar_pci->pdev); 2904 if (ret == 0) 2905 return 0; 2906 2907 /* fall-through */ 2908 } 2909 2910 /* Try legacy irq 2911 * 2912 * A potential race occurs here: The CORE_BASE write 2913 * depends on target correctly decoding AXI address but 2914 * host won't know when target writes BAR to CORE_CTRL. 2915 * This write might get lost if target has NOT written BAR. 2916 * For now, fix the race by repeating the write in below 2917 * synchronization checking. */ 2918 ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; 2919 2920 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2921 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL); 2922 2923 return 0; 2924 } 2925 2926 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar) 2927 { 2928 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, 2929 0); 2930 } 2931 2932 static int ath10k_pci_deinit_irq(struct ath10k *ar) 2933 { 2934 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2935 2936 switch (ar_pci->oper_irq_mode) { 2937 case ATH10K_PCI_IRQ_LEGACY: 2938 ath10k_pci_deinit_irq_legacy(ar); 2939 break; 2940 default: 2941 pci_disable_msi(ar_pci->pdev); 2942 break; 2943 } 2944 2945 return 0; 2946 } 2947 2948 int ath10k_pci_wait_for_target_init(struct ath10k *ar) 2949 { 2950 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 2951 unsigned long timeout; 2952 u32 val; 2953 2954 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n"); 2955 2956 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT); 2957 2958 do { 2959 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); 2960 2961 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n", 2962 val); 2963 2964 /* target should never return this */ 2965 if (val == 0xffffffff) 2966 continue; 2967 2968 /* the device has crashed so don't bother trying anymore */ 2969 if (val & FW_IND_EVENT_PENDING) 2970 break; 2971 2972 if (val & FW_IND_INITIALIZED) 2973 break; 2974 2975 if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) 2976 /* Fix potential race by repeating CORE_BASE writes */ 2977 ath10k_pci_enable_legacy_irq(ar); 2978 2979 mdelay(10); 2980 } while (time_before(jiffies, timeout)); 2981 2982 ath10k_pci_disable_and_clear_legacy_irq(ar); 2983 ath10k_pci_irq_msi_fw_mask(ar); 2984 2985 if (val == 0xffffffff) { 2986 ath10k_err(ar, "failed to read device register, device is gone\n"); 2987 return -EIO; 2988 } 2989 2990 if (val & FW_IND_EVENT_PENDING) { 2991 ath10k_warn(ar, "device has crashed during init\n"); 2992 return -ECOMM; 2993 } 2994 2995 if (!(val & FW_IND_INITIALIZED)) { 2996 ath10k_err(ar, "failed to receive initialized event from target: %08x\n", 2997 val); 2998 return -ETIMEDOUT; 2999 } 3000 3001 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n"); 3002 return 0; 3003 } 3004 3005 static int ath10k_pci_cold_reset(struct ath10k *ar) 3006 { 3007 u32 val; 3008 3009 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n"); 3010 3011 spin_lock_bh(&ar->data_lock); 3012 3013 ar->stats.fw_cold_reset_counter++; 3014 3015 spin_unlock_bh(&ar->data_lock); 3016 3017 /* Put Target, including PCIe, into RESET. */ 3018 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS); 3019 val |= 1; 3020 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3021 3022 /* After writing into SOC_GLOBAL_RESET to put device into 3023 * reset and pulling out of reset pcie may not be stable 3024 * for any immediate pcie register access and cause bus error, 3025 * add delay before any pcie access request to fix this issue. 3026 */ 3027 msleep(20); 3028 3029 /* Pull Target, including PCIe, out of RESET. */ 3030 val &= ~1; 3031 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val); 3032 3033 msleep(20); 3034 3035 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n"); 3036 3037 return 0; 3038 } 3039 3040 static int ath10k_pci_claim(struct ath10k *ar) 3041 { 3042 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3043 struct pci_dev *pdev = ar_pci->pdev; 3044 int ret; 3045 3046 pci_set_drvdata(pdev, ar); 3047 3048 ret = pci_enable_device(pdev); 3049 if (ret) { 3050 ath10k_err(ar, "failed to enable pci device: %d\n", ret); 3051 return ret; 3052 } 3053 3054 ret = pci_request_region(pdev, BAR_NUM, "ath"); 3055 if (ret) { 3056 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM, 3057 ret); 3058 goto err_device; 3059 } 3060 3061 /* Target expects 32 bit DMA. Enforce it. */ 3062 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3063 if (ret) { 3064 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret); 3065 goto err_region; 3066 } 3067 3068 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3069 if (ret) { 3070 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n", 3071 ret); 3072 goto err_region; 3073 } 3074 3075 pci_set_master(pdev); 3076 3077 /* Arrange for access to Target SoC registers. */ 3078 ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM); 3079 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0); 3080 if (!ar_pci->mem) { 3081 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM); 3082 ret = -EIO; 3083 goto err_master; 3084 } 3085 3086 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem); 3087 return 0; 3088 3089 err_master: 3090 pci_clear_master(pdev); 3091 3092 err_region: 3093 pci_release_region(pdev, BAR_NUM); 3094 3095 err_device: 3096 pci_disable_device(pdev); 3097 3098 return ret; 3099 } 3100 3101 static void ath10k_pci_release(struct ath10k *ar) 3102 { 3103 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3104 struct pci_dev *pdev = ar_pci->pdev; 3105 3106 pci_iounmap(pdev, ar_pci->mem); 3107 pci_release_region(pdev, BAR_NUM); 3108 pci_clear_master(pdev); 3109 pci_disable_device(pdev); 3110 } 3111 3112 static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) 3113 { 3114 const struct ath10k_pci_supp_chip *supp_chip; 3115 int i; 3116 u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV); 3117 3118 for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) { 3119 supp_chip = &ath10k_pci_supp_chips[i]; 3120 3121 if (supp_chip->dev_id == dev_id && 3122 supp_chip->rev_id == rev_id) 3123 return true; 3124 } 3125 3126 return false; 3127 } 3128 3129 int ath10k_pci_setup_resource(struct ath10k *ar) 3130 { 3131 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 3132 int ret; 3133 3134 spin_lock_init(&ar_pci->ce_lock); 3135 spin_lock_init(&ar_pci->ps_lock); 3136 3137 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 3138 (unsigned long)ar); 3139 3140 if (QCA_REV_6174(ar) || QCA_REV_9377(ar)) 3141 ath10k_pci_override_ce_config(ar); 3142 3143 ret = ath10k_pci_alloc_pipes(ar); 3144 if (ret) { 3145 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", 3146 ret); 3147 return ret; 3148 } 3149 3150 return 0; 3151 } 3152 3153 void ath10k_pci_release_resource(struct ath10k *ar) 3154 { 3155 ath10k_pci_rx_retry_sync(ar); 3156 netif_napi_del(&ar->napi); 3157 ath10k_pci_ce_deinit(ar); 3158 ath10k_pci_free_pipes(ar); 3159 } 3160 3161 static const struct ath10k_bus_ops ath10k_pci_bus_ops = { 3162 .read32 = ath10k_bus_pci_read32, 3163 .write32 = ath10k_bus_pci_write32, 3164 .get_num_banks = ath10k_pci_get_num_banks, 3165 }; 3166 3167 static int ath10k_pci_probe(struct pci_dev *pdev, 3168 const struct pci_device_id *pci_dev) 3169 { 3170 int ret = 0; 3171 struct ath10k *ar; 3172 struct ath10k_pci *ar_pci; 3173 enum ath10k_hw_rev hw_rev; 3174 u32 chip_id; 3175 bool pci_ps; 3176 int (*pci_soft_reset)(struct ath10k *ar); 3177 int (*pci_hard_reset)(struct ath10k *ar); 3178 u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr); 3179 3180 switch (pci_dev->device) { 3181 case QCA988X_2_0_DEVICE_ID: 3182 hw_rev = ATH10K_HW_QCA988X; 3183 pci_ps = false; 3184 pci_soft_reset = ath10k_pci_warm_reset; 3185 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3186 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3187 break; 3188 case QCA9887_1_0_DEVICE_ID: 3189 hw_rev = ATH10K_HW_QCA9887; 3190 pci_ps = false; 3191 pci_soft_reset = ath10k_pci_warm_reset; 3192 pci_hard_reset = ath10k_pci_qca988x_chip_reset; 3193 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3194 break; 3195 case QCA6164_2_1_DEVICE_ID: 3196 case QCA6174_2_1_DEVICE_ID: 3197 hw_rev = ATH10K_HW_QCA6174; 3198 pci_ps = true; 3199 pci_soft_reset = ath10k_pci_warm_reset; 3200 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3201 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3202 break; 3203 case QCA99X0_2_0_DEVICE_ID: 3204 hw_rev = ATH10K_HW_QCA99X0; 3205 pci_ps = false; 3206 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3207 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3208 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3209 break; 3210 case QCA9984_1_0_DEVICE_ID: 3211 hw_rev = ATH10K_HW_QCA9984; 3212 pci_ps = false; 3213 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3214 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3215 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3216 break; 3217 case QCA9888_2_0_DEVICE_ID: 3218 hw_rev = ATH10K_HW_QCA9888; 3219 pci_ps = false; 3220 pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; 3221 pci_hard_reset = ath10k_pci_qca99x0_chip_reset; 3222 targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr; 3223 break; 3224 case QCA9377_1_0_DEVICE_ID: 3225 hw_rev = ATH10K_HW_QCA9377; 3226 pci_ps = true; 3227 pci_soft_reset = NULL; 3228 pci_hard_reset = ath10k_pci_qca6174_chip_reset; 3229 targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr; 3230 break; 3231 default: 3232 WARN_ON(1); 3233 return -ENOTSUPP; 3234 } 3235 3236 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI, 3237 hw_rev, &ath10k_pci_hif_ops); 3238 if (!ar) { 3239 dev_err(&pdev->dev, "failed to allocate core\n"); 3240 return -ENOMEM; 3241 } 3242 3243 ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 3244 pdev->vendor, pdev->device, 3245 pdev->subsystem_vendor, pdev->subsystem_device); 3246 3247 ar_pci = ath10k_pci_priv(ar); 3248 ar_pci->pdev = pdev; 3249 ar_pci->dev = &pdev->dev; 3250 ar_pci->ar = ar; 3251 ar->dev_id = pci_dev->device; 3252 ar_pci->pci_ps = pci_ps; 3253 ar_pci->bus_ops = &ath10k_pci_bus_ops; 3254 ar_pci->pci_soft_reset = pci_soft_reset; 3255 ar_pci->pci_hard_reset = pci_hard_reset; 3256 ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; 3257 3258 ar->id.vendor = pdev->vendor; 3259 ar->id.device = pdev->device; 3260 ar->id.subsystem_vendor = pdev->subsystem_vendor; 3261 ar->id.subsystem_device = pdev->subsystem_device; 3262 3263 setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, 3264 (unsigned long)ar); 3265 3266 ret = ath10k_pci_setup_resource(ar); 3267 if (ret) { 3268 ath10k_err(ar, "failed to setup resource: %d\n", ret); 3269 goto err_core_destroy; 3270 } 3271 3272 ret = ath10k_pci_claim(ar); 3273 if (ret) { 3274 ath10k_err(ar, "failed to claim device: %d\n", ret); 3275 goto err_free_pipes; 3276 } 3277 3278 ret = ath10k_pci_force_wake(ar); 3279 if (ret) { 3280 ath10k_warn(ar, "failed to wake up device : %d\n", ret); 3281 goto err_sleep; 3282 } 3283 3284 ath10k_pci_ce_deinit(ar); 3285 ath10k_pci_irq_disable(ar); 3286 3287 ret = ath10k_pci_init_irq(ar); 3288 if (ret) { 3289 ath10k_err(ar, "failed to init irqs: %d\n", ret); 3290 goto err_sleep; 3291 } 3292 3293 ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n", 3294 ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode, 3295 ath10k_pci_irq_mode, ath10k_pci_reset_mode); 3296 3297 ret = ath10k_pci_request_irq(ar); 3298 if (ret) { 3299 ath10k_warn(ar, "failed to request irqs: %d\n", ret); 3300 goto err_deinit_irq; 3301 } 3302 3303 ret = ath10k_pci_chip_reset(ar); 3304 if (ret) { 3305 ath10k_err(ar, "failed to reset chip: %d\n", ret); 3306 goto err_free_irq; 3307 } 3308 3309 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS); 3310 if (chip_id == 0xffffffff) { 3311 ath10k_err(ar, "failed to get chip id\n"); 3312 goto err_free_irq; 3313 } 3314 3315 if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) { 3316 ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n", 3317 pdev->device, chip_id); 3318 goto err_free_irq; 3319 } 3320 3321 ret = ath10k_core_register(ar, chip_id); 3322 if (ret) { 3323 ath10k_err(ar, "failed to register driver core: %d\n", ret); 3324 goto err_free_irq; 3325 } 3326 3327 return 0; 3328 3329 err_free_irq: 3330 ath10k_pci_free_irq(ar); 3331 ath10k_pci_rx_retry_sync(ar); 3332 3333 err_deinit_irq: 3334 ath10k_pci_deinit_irq(ar); 3335 3336 err_sleep: 3337 ath10k_pci_sleep_sync(ar); 3338 ath10k_pci_release(ar); 3339 3340 err_free_pipes: 3341 ath10k_pci_free_pipes(ar); 3342 3343 err_core_destroy: 3344 ath10k_core_destroy(ar); 3345 3346 return ret; 3347 } 3348 3349 static void ath10k_pci_remove(struct pci_dev *pdev) 3350 { 3351 struct ath10k *ar = pci_get_drvdata(pdev); 3352 struct ath10k_pci *ar_pci; 3353 3354 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n"); 3355 3356 if (!ar) 3357 return; 3358 3359 ar_pci = ath10k_pci_priv(ar); 3360 3361 if (!ar_pci) 3362 return; 3363 3364 ath10k_core_unregister(ar); 3365 ath10k_pci_free_irq(ar); 3366 ath10k_pci_deinit_irq(ar); 3367 ath10k_pci_release_resource(ar); 3368 ath10k_pci_sleep_sync(ar); 3369 ath10k_pci_release(ar); 3370 ath10k_core_destroy(ar); 3371 } 3372 3373 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3374 3375 static struct pci_driver ath10k_pci_driver = { 3376 .name = "ath10k_pci", 3377 .id_table = ath10k_pci_id_table, 3378 .probe = ath10k_pci_probe, 3379 .remove = ath10k_pci_remove, 3380 }; 3381 3382 static int __init ath10k_pci_init(void) 3383 { 3384 int ret; 3385 3386 ret = pci_register_driver(&ath10k_pci_driver); 3387 if (ret) 3388 printk(KERN_ERR "failed to register ath10k pci driver: %d\n", 3389 ret); 3390 3391 ret = ath10k_ahb_init(); 3392 if (ret) 3393 printk(KERN_ERR "ahb init failed: %d\n", ret); 3394 3395 return ret; 3396 } 3397 module_init(ath10k_pci_init); 3398 3399 static void __exit ath10k_pci_exit(void) 3400 { 3401 pci_unregister_driver(&ath10k_pci_driver); 3402 ath10k_ahb_exit(); 3403 } 3404 3405 module_exit(ath10k_pci_exit); 3406 3407 MODULE_AUTHOR("Qualcomm Atheros"); 3408 MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); 3409 MODULE_LICENSE("Dual BSD/GPL"); 3410 3411 /* QCA988x 2.0 firmware files */ 3412 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE); 3413 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE); 3414 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3415 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3416 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE); 3417 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3418 3419 /* QCA9887 1.0 firmware files */ 3420 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3421 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE); 3422 MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3423 3424 /* QCA6174 2.1 firmware files */ 3425 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE); 3426 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE); 3427 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE); 3428 MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3429 3430 /* QCA6174 3.1 firmware files */ 3431 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE); 3432 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3433 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE); 3434 MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE); 3435 3436 /* QCA9377 1.0 firmware files */ 3437 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE); 3438 MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE); 3439