1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/platform_device.h> 9 #include <linux/of_device.h> 10 #include <linux/of.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/of_address.h> 13 #include <linux/iommu.h> 14 #include "ahb.h" 15 #include "debug.h" 16 #include "hif.h" 17 #include <linux/remoteproc.h> 18 #include "pcic.h" 19 #include <linux/soc/qcom/smem.h> 20 #include <linux/soc/qcom/smem_state.h> 21 22 static const struct of_device_id ath11k_ahb_of_match[] = { 23 /* TODO: Should we change the compatible string to something similar 24 * to one that ath10k uses? 25 */ 26 { .compatible = "qcom,ipq8074-wifi", 27 .data = (void *)ATH11K_HW_IPQ8074, 28 }, 29 { .compatible = "qcom,ipq6018-wifi", 30 .data = (void *)ATH11K_HW_IPQ6018_HW10, 31 }, 32 { .compatible = "qcom,wcn6750-wifi", 33 .data = (void *)ATH11K_HW_WCN6750_HW10, 34 }, 35 { .compatible = "qcom,ipq5018-wifi", 36 .data = (void *)ATH11K_HW_IPQ5018_HW10, 37 }, 38 { } 39 }; 40 41 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match); 42 43 #define ATH11K_IRQ_CE0_OFFSET 4 44 45 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 46 "misc-pulse1", 47 "misc-latch", 48 "sw-exception", 49 "watchdog", 50 "ce0", 51 "ce1", 52 "ce2", 53 "ce3", 54 "ce4", 55 "ce5", 56 "ce6", 57 "ce7", 58 "ce8", 59 "ce9", 60 "ce10", 61 "ce11", 62 "host2wbm-desc-feed", 63 "host2reo-re-injection", 64 "host2reo-command", 65 "host2rxdma-monitor-ring3", 66 "host2rxdma-monitor-ring2", 67 "host2rxdma-monitor-ring1", 68 "reo2ost-exception", 69 "wbm2host-rx-release", 70 "reo2host-status", 71 "reo2host-destination-ring4", 72 "reo2host-destination-ring3", 73 "reo2host-destination-ring2", 74 "reo2host-destination-ring1", 75 "rxdma2host-monitor-destination-mac3", 76 "rxdma2host-monitor-destination-mac2", 77 "rxdma2host-monitor-destination-mac1", 78 "ppdu-end-interrupts-mac3", 79 "ppdu-end-interrupts-mac2", 80 "ppdu-end-interrupts-mac1", 81 "rxdma2host-monitor-status-ring-mac3", 82 "rxdma2host-monitor-status-ring-mac2", 83 "rxdma2host-monitor-status-ring-mac1", 84 "host2rxdma-host-buf-ring-mac3", 85 "host2rxdma-host-buf-ring-mac2", 86 "host2rxdma-host-buf-ring-mac1", 87 "rxdma2host-destination-ring-mac3", 88 "rxdma2host-destination-ring-mac2", 89 "rxdma2host-destination-ring-mac1", 90 "host2tcl-input-ring4", 91 "host2tcl-input-ring3", 92 "host2tcl-input-ring2", 93 "host2tcl-input-ring1", 94 "wbm2host-tx-completions-ring3", 95 "wbm2host-tx-completions-ring2", 96 "wbm2host-tx-completions-ring1", 97 "tcl2host-status-ring", 98 }; 99 100 /* enum ext_irq_num - irq numbers that can be used by external modules 101 * like datapath 102 */ 103 enum ext_irq_num { 104 host2wbm_desc_feed = 16, 105 host2reo_re_injection, 106 host2reo_command, 107 host2rxdma_monitor_ring3, 108 host2rxdma_monitor_ring2, 109 host2rxdma_monitor_ring1, 110 reo2host_exception, 111 wbm2host_rx_release, 112 reo2host_status, 113 reo2host_destination_ring4, 114 reo2host_destination_ring3, 115 reo2host_destination_ring2, 116 reo2host_destination_ring1, 117 rxdma2host_monitor_destination_mac3, 118 rxdma2host_monitor_destination_mac2, 119 rxdma2host_monitor_destination_mac1, 120 ppdu_end_interrupts_mac3, 121 ppdu_end_interrupts_mac2, 122 ppdu_end_interrupts_mac1, 123 rxdma2host_monitor_status_ring_mac3, 124 rxdma2host_monitor_status_ring_mac2, 125 rxdma2host_monitor_status_ring_mac1, 126 host2rxdma_host_buf_ring_mac3, 127 host2rxdma_host_buf_ring_mac2, 128 host2rxdma_host_buf_ring_mac1, 129 rxdma2host_destination_ring_mac3, 130 rxdma2host_destination_ring_mac2, 131 rxdma2host_destination_ring_mac1, 132 host2tcl_input_ring4, 133 host2tcl_input_ring3, 134 host2tcl_input_ring2, 135 host2tcl_input_ring1, 136 wbm2host_tx_completions_ring3, 137 wbm2host_tx_completions_ring2, 138 wbm2host_tx_completions_ring1, 139 tcl2host_status_ring, 140 }; 141 142 static int 143 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector) 144 { 145 return ab->pci.msi.irqs[vector]; 146 } 147 148 static inline u32 149 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset) 150 { 151 u32 window_start = 0; 152 153 /* If offset lies within DP register range, use 1st window */ 154 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) 155 window_start = ATH11K_PCI_WINDOW_START; 156 /* If offset lies within CE register range, use 2nd window */ 157 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < 158 ATH11K_PCI_WINDOW_RANGE_MASK) 159 window_start = 2 * ATH11K_PCI_WINDOW_START; 160 161 return window_start; 162 } 163 164 static void 165 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value) 166 { 167 u32 window_start; 168 169 /* WCN6750 uses static window based register access*/ 170 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); 171 172 iowrite32(value, ab->mem + window_start + 173 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 174 } 175 176 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset) 177 { 178 u32 window_start; 179 u32 val; 180 181 /* WCN6750 uses static window based register access */ 182 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); 183 184 val = ioread32(ab->mem + window_start + 185 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 186 return val; 187 } 188 189 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = { 190 .wakeup = NULL, 191 .release = NULL, 192 .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750, 193 .window_write32 = ath11k_ahb_window_write32_wcn6750, 194 .window_read32 = ath11k_ahb_window_read32_wcn6750, 195 }; 196 197 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) 198 { 199 return ioread32(ab->mem + offset); 200 } 201 202 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) 203 { 204 iowrite32(value, ab->mem + offset); 205 } 206 207 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) 208 { 209 int i; 210 211 for (i = 0; i < ab->hw_params.ce_count; i++) { 212 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 213 214 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 215 continue; 216 217 tasklet_kill(&ce_pipe->intr_tq); 218 } 219 } 220 221 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 222 { 223 int i; 224 225 for (i = 0; i < irq_grp->num_irq; i++) 226 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 227 } 228 229 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) 230 { 231 int i; 232 233 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 234 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 235 236 ath11k_ahb_ext_grp_disable(irq_grp); 237 238 if (irq_grp->napi_enabled) { 239 napi_synchronize(&irq_grp->napi); 240 napi_disable(&irq_grp->napi); 241 irq_grp->napi_enabled = false; 242 } 243 } 244 } 245 246 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 247 { 248 int i; 249 250 for (i = 0; i < irq_grp->num_irq; i++) 251 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 252 } 253 254 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset) 255 { 256 u32 val; 257 258 val = ath11k_ahb_read32(ab, offset); 259 ath11k_ahb_write32(ab, offset, val | BIT(bit)); 260 } 261 262 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) 263 { 264 u32 val; 265 266 val = ath11k_ahb_read32(ab, offset); 267 ath11k_ahb_write32(ab, offset, val & ~BIT(bit)); 268 } 269 270 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 271 { 272 const struct ce_attr *ce_attr; 273 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; 274 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; 275 276 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); 277 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); 278 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); 279 280 ce_attr = &ab->hw_params.host_ce_config[ce_id]; 281 if (ce_attr->src_nentries) 282 ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr); 283 284 if (ce_attr->dest_nentries) { 285 ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr); 286 ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, 287 ie3_reg_addr); 288 } 289 } 290 291 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 292 { 293 const struct ce_attr *ce_attr; 294 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; 295 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; 296 297 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); 298 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); 299 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); 300 301 ce_attr = &ab->hw_params.host_ce_config[ce_id]; 302 if (ce_attr->src_nentries) 303 ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); 304 305 if (ce_attr->dest_nentries) { 306 ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); 307 ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, 308 ie3_reg_addr); 309 } 310 } 311 312 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab) 313 { 314 int i; 315 int irq_idx; 316 317 for (i = 0; i < ab->hw_params.ce_count; i++) { 318 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 319 continue; 320 321 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 322 synchronize_irq(ab->irq_num[irq_idx]); 323 } 324 } 325 326 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab) 327 { 328 int i, j; 329 int irq_idx; 330 331 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 332 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 333 334 for (j = 0; j < irq_grp->num_irq; j++) { 335 irq_idx = irq_grp->irqs[j]; 336 synchronize_irq(ab->irq_num[irq_idx]); 337 } 338 } 339 } 340 341 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab) 342 { 343 int i; 344 345 for (i = 0; i < ab->hw_params.ce_count; i++) { 346 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 347 continue; 348 ath11k_ahb_ce_irq_enable(ab, i); 349 } 350 } 351 352 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) 353 { 354 int i; 355 356 for (i = 0; i < ab->hw_params.ce_count; i++) { 357 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 358 continue; 359 ath11k_ahb_ce_irq_disable(ab, i); 360 } 361 } 362 363 static int ath11k_ahb_start(struct ath11k_base *ab) 364 { 365 ath11k_ahb_ce_irqs_enable(ab); 366 ath11k_ce_rx_post_buf(ab); 367 368 return 0; 369 } 370 371 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) 372 { 373 int i; 374 375 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 376 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 377 378 if (!irq_grp->napi_enabled) { 379 dev_set_threaded(&irq_grp->napi_ndev, true); 380 napi_enable(&irq_grp->napi); 381 irq_grp->napi_enabled = true; 382 } 383 ath11k_ahb_ext_grp_enable(irq_grp); 384 } 385 } 386 387 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) 388 { 389 __ath11k_ahb_ext_irq_disable(ab); 390 ath11k_ahb_sync_ext_irqs(ab); 391 } 392 393 static void ath11k_ahb_stop(struct ath11k_base *ab) 394 { 395 if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 396 ath11k_ahb_ce_irqs_disable(ab); 397 ath11k_ahb_sync_ce_irqs(ab); 398 ath11k_ahb_kill_tasklets(ab); 399 del_timer_sync(&ab->rx_replenish_retry); 400 ath11k_ce_cleanup_pipes(ab); 401 } 402 403 static int ath11k_ahb_power_up(struct ath11k_base *ab) 404 { 405 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 406 int ret; 407 408 ret = rproc_boot(ab_ahb->tgt_rproc); 409 if (ret) 410 ath11k_err(ab, "failed to boot the remote processor Q6\n"); 411 412 return ret; 413 } 414 415 static void ath11k_ahb_power_down(struct ath11k_base *ab) 416 { 417 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 418 419 rproc_shutdown(ab_ahb->tgt_rproc); 420 } 421 422 static int ath11k_ahb_fwreset_from_cold_boot(struct ath11k_base *ab) 423 { 424 int timeout; 425 426 if (ath11k_cold_boot_cal == 0 || ab->qmi.cal_done || 427 ab->hw_params.cold_boot_calib == 0 || 428 ab->hw_params.cbcal_restart_fw == 0) 429 return 0; 430 431 ath11k_dbg(ab, ATH11K_DBG_AHB, "wait for cold boot done\n"); 432 timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, 433 (ab->qmi.cal_done == 1), 434 ATH11K_COLD_BOOT_FW_RESET_DELAY); 435 if (timeout <= 0) { 436 ath11k_cold_boot_cal = 0; 437 ath11k_warn(ab, "Coldboot Calibration failed timed out\n"); 438 } 439 440 /* reset the firmware */ 441 ath11k_ahb_power_down(ab); 442 ath11k_ahb_power_up(ab); 443 444 ath11k_dbg(ab, ATH11K_DBG_AHB, "exited from cold boot mode\n"); 445 return 0; 446 } 447 448 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) 449 { 450 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 451 452 cfg->tgt_ce_len = ab->hw_params.target_ce_count; 453 cfg->tgt_ce = ab->hw_params.target_ce_config; 454 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; 455 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; 456 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; 457 } 458 459 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) 460 { 461 int i, j; 462 463 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 464 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 465 466 for (j = 0; j < irq_grp->num_irq; j++) 467 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 468 469 netif_napi_del(&irq_grp->napi); 470 } 471 } 472 473 static void ath11k_ahb_free_irq(struct ath11k_base *ab) 474 { 475 int irq_idx; 476 int i; 477 478 if (ab->hw_params.hybrid_bus_type) 479 return ath11k_pcic_free_irq(ab); 480 481 for (i = 0; i < ab->hw_params.ce_count; i++) { 482 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 483 continue; 484 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 485 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 486 } 487 488 ath11k_ahb_free_ext_irq(ab); 489 } 490 491 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t) 492 { 493 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 494 495 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 496 497 ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 498 } 499 500 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg) 501 { 502 struct ath11k_ce_pipe *ce_pipe = arg; 503 504 /* last interrupt received for this CE */ 505 ce_pipe->timestamp = jiffies; 506 507 ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 508 509 tasklet_schedule(&ce_pipe->intr_tq); 510 511 return IRQ_HANDLED; 512 } 513 514 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) 515 { 516 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 517 struct ath11k_ext_irq_grp, 518 napi); 519 struct ath11k_base *ab = irq_grp->ab; 520 int work_done; 521 522 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 523 if (work_done < budget) { 524 napi_complete_done(napi, work_done); 525 ath11k_ahb_ext_grp_enable(irq_grp); 526 } 527 528 if (work_done > budget) 529 work_done = budget; 530 531 return work_done; 532 } 533 534 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg) 535 { 536 struct ath11k_ext_irq_grp *irq_grp = arg; 537 538 /* last interrupt received for this group */ 539 irq_grp->timestamp = jiffies; 540 541 ath11k_ahb_ext_grp_disable(irq_grp); 542 543 napi_schedule(&irq_grp->napi); 544 545 return IRQ_HANDLED; 546 } 547 548 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab) 549 { 550 struct ath11k_hw_params *hw = &ab->hw_params; 551 int i, j; 552 int irq; 553 int ret; 554 555 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 556 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 557 u32 num_irq = 0; 558 559 irq_grp->ab = ab; 560 irq_grp->grp_id = i; 561 init_dummy_netdev(&irq_grp->napi_ndev); 562 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 563 ath11k_ahb_ext_grp_napi_poll); 564 565 for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) { 566 if (ab->hw_params.ring_mask->tx[i] & BIT(j)) { 567 irq_grp->irqs[num_irq++] = 568 wbm2host_tx_completions_ring1 - j; 569 } 570 571 if (ab->hw_params.ring_mask->rx[i] & BIT(j)) { 572 irq_grp->irqs[num_irq++] = 573 reo2host_destination_ring1 - j; 574 } 575 576 if (ab->hw_params.ring_mask->rx_err[i] & BIT(j)) 577 irq_grp->irqs[num_irq++] = reo2host_exception; 578 579 if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j)) 580 irq_grp->irqs[num_irq++] = wbm2host_rx_release; 581 582 if (ab->hw_params.ring_mask->reo_status[i] & BIT(j)) 583 irq_grp->irqs[num_irq++] = reo2host_status; 584 585 if (j < ab->hw_params.max_radios) { 586 if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) { 587 irq_grp->irqs[num_irq++] = 588 rxdma2host_destination_ring_mac1 - 589 ath11k_hw_get_mac_from_pdev_id(hw, j); 590 } 591 592 if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) { 593 irq_grp->irqs[num_irq++] = 594 host2rxdma_host_buf_ring_mac1 - 595 ath11k_hw_get_mac_from_pdev_id(hw, j); 596 } 597 598 if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) { 599 irq_grp->irqs[num_irq++] = 600 ppdu_end_interrupts_mac1 - 601 ath11k_hw_get_mac_from_pdev_id(hw, j); 602 irq_grp->irqs[num_irq++] = 603 rxdma2host_monitor_status_ring_mac1 - 604 ath11k_hw_get_mac_from_pdev_id(hw, j); 605 } 606 } 607 } 608 irq_grp->num_irq = num_irq; 609 610 for (j = 0; j < irq_grp->num_irq; j++) { 611 int irq_idx = irq_grp->irqs[j]; 612 613 irq = platform_get_irq_byname(ab->pdev, 614 irq_name[irq_idx]); 615 ab->irq_num[irq_idx] = irq; 616 irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); 617 ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler, 618 IRQF_TRIGGER_RISING, 619 irq_name[irq_idx], irq_grp); 620 if (ret) { 621 ath11k_err(ab, "failed request_irq for %d\n", 622 irq); 623 } 624 } 625 } 626 627 return 0; 628 } 629 630 static int ath11k_ahb_config_irq(struct ath11k_base *ab) 631 { 632 int irq, irq_idx, i; 633 int ret; 634 635 if (ab->hw_params.hybrid_bus_type) 636 return ath11k_pcic_config_irq(ab); 637 638 /* Configure CE irqs */ 639 for (i = 0; i < ab->hw_params.ce_count; i++) { 640 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 641 642 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 643 continue; 644 645 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 646 647 tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet); 648 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); 649 ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler, 650 IRQF_TRIGGER_RISING, irq_name[irq_idx], 651 ce_pipe); 652 if (ret) 653 return ret; 654 655 ab->irq_num[irq_idx] = irq; 656 } 657 658 /* Configure external interrupts */ 659 ret = ath11k_ahb_config_ext_irq(ab); 660 661 return ret; 662 } 663 664 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 665 u8 *ul_pipe, u8 *dl_pipe) 666 { 667 const struct service_to_pipe *entry; 668 bool ul_set = false, dl_set = false; 669 int i; 670 671 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { 672 entry = &ab->hw_params.svc_to_ce_map[i]; 673 674 if (__le32_to_cpu(entry->service_id) != service_id) 675 continue; 676 677 switch (__le32_to_cpu(entry->pipedir)) { 678 case PIPEDIR_NONE: 679 break; 680 case PIPEDIR_IN: 681 WARN_ON(dl_set); 682 *dl_pipe = __le32_to_cpu(entry->pipenum); 683 dl_set = true; 684 break; 685 case PIPEDIR_OUT: 686 WARN_ON(ul_set); 687 *ul_pipe = __le32_to_cpu(entry->pipenum); 688 ul_set = true; 689 break; 690 case PIPEDIR_INOUT: 691 WARN_ON(dl_set); 692 WARN_ON(ul_set); 693 *dl_pipe = __le32_to_cpu(entry->pipenum); 694 *ul_pipe = __le32_to_cpu(entry->pipenum); 695 dl_set = true; 696 ul_set = true; 697 break; 698 } 699 } 700 701 if (WARN_ON(!ul_set || !dl_set)) 702 return -ENOENT; 703 704 return 0; 705 } 706 707 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab) 708 { 709 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 710 u32 wake_irq; 711 u32 value = 0; 712 int ret; 713 714 if (!device_may_wakeup(ab->dev)) 715 return -EPERM; 716 717 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; 718 719 ret = enable_irq_wake(wake_irq); 720 if (ret) { 721 ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret); 722 return ret; 723 } 724 725 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, 726 ATH11K_AHB_SMP2P_SMEM_SEQ_NO); 727 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER, 728 ATH11K_AHB_SMP2P_SMEM_MSG); 729 730 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, 731 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); 732 if (ret) { 733 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); 734 return ret; 735 } 736 737 ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n"); 738 739 return ret; 740 } 741 742 static int ath11k_ahb_hif_resume(struct ath11k_base *ab) 743 { 744 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 745 u32 wake_irq; 746 u32 value = 0; 747 int ret; 748 749 if (!device_may_wakeup(ab->dev)) 750 return -EPERM; 751 752 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; 753 754 ret = disable_irq_wake(wake_irq); 755 if (ret) { 756 ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret); 757 return ret; 758 } 759 760 reinit_completion(&ab->wow.wakeup_completed); 761 762 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, 763 ATH11K_AHB_SMP2P_SMEM_SEQ_NO); 764 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT, 765 ATH11K_AHB_SMP2P_SMEM_MSG); 766 767 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, 768 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); 769 if (ret) { 770 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); 771 return ret; 772 } 773 774 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); 775 if (ret == 0) { 776 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n"); 777 return -ETIMEDOUT; 778 } 779 780 ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n"); 781 782 return 0; 783 } 784 785 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = { 786 .start = ath11k_ahb_start, 787 .stop = ath11k_ahb_stop, 788 .read32 = ath11k_ahb_read32, 789 .write32 = ath11k_ahb_write32, 790 .read = NULL, 791 .irq_enable = ath11k_ahb_ext_irq_enable, 792 .irq_disable = ath11k_ahb_ext_irq_disable, 793 .map_service_to_pipe = ath11k_ahb_map_service_to_pipe, 794 .power_down = ath11k_ahb_power_down, 795 .power_up = ath11k_ahb_power_up, 796 }; 797 798 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = { 799 .start = ath11k_pcic_start, 800 .stop = ath11k_pcic_stop, 801 .read32 = ath11k_pcic_read32, 802 .write32 = ath11k_pcic_write32, 803 .read = NULL, 804 .irq_enable = ath11k_pcic_ext_irq_enable, 805 .irq_disable = ath11k_pcic_ext_irq_disable, 806 .get_msi_address = ath11k_pcic_get_msi_address, 807 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, 808 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, 809 .power_down = ath11k_ahb_power_down, 810 .power_up = ath11k_ahb_power_up, 811 .suspend = ath11k_ahb_hif_suspend, 812 .resume = ath11k_ahb_hif_resume, 813 .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq, 814 .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq, 815 }; 816 817 static int ath11k_core_get_rproc(struct ath11k_base *ab) 818 { 819 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 820 struct device *dev = ab->dev; 821 struct rproc *prproc; 822 phandle rproc_phandle; 823 824 if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { 825 ath11k_err(ab, "failed to get q6_rproc handle\n"); 826 return -ENOENT; 827 } 828 829 prproc = rproc_get_by_phandle(rproc_phandle); 830 if (!prproc) { 831 ath11k_err(ab, "failed to get rproc\n"); 832 return -EINVAL; 833 } 834 ab_ahb->tgt_rproc = prproc; 835 836 return 0; 837 } 838 839 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) 840 { 841 struct platform_device *pdev = ab->pdev; 842 phys_addr_t msi_addr_pa; 843 dma_addr_t msi_addr_iova; 844 struct resource *res; 845 int int_prop; 846 int ret; 847 int i; 848 849 ret = ath11k_pcic_init_msi_config(ab); 850 if (ret) { 851 ath11k_err(ab, "failed to init msi config: %d\n", ret); 852 return ret; 853 } 854 855 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 856 if (!res) { 857 ath11k_err(ab, "failed to fetch msi_addr\n"); 858 return -ENOENT; 859 } 860 861 msi_addr_pa = res->start; 862 msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE, 863 DMA_FROM_DEVICE, 0); 864 if (dma_mapping_error(ab->dev, msi_addr_iova)) 865 return -ENOMEM; 866 867 ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova); 868 ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova); 869 870 ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop); 871 if (ret) 872 return ret; 873 874 ab->pci.msi.ep_base_data = int_prop + 32; 875 876 for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { 877 ret = platform_get_irq(pdev, i); 878 if (ret < 0) 879 return ret; 880 881 ab->pci.msi.irqs[i] = ret; 882 } 883 884 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); 885 886 return 0; 887 } 888 889 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab) 890 { 891 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 892 893 if (!ab->hw_params.smp2p_wow_exit) 894 return 0; 895 896 ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out", 897 &ab_ahb->smp2p_info.smem_bit); 898 if (IS_ERR(ab_ahb->smp2p_info.smem_state)) { 899 ath11k_err(ab, "failed to fetch smem state: %ld\n", 900 PTR_ERR(ab_ahb->smp2p_info.smem_state)); 901 return PTR_ERR(ab_ahb->smp2p_info.smem_state); 902 } 903 904 return 0; 905 } 906 907 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab) 908 { 909 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 910 911 if (!ab->hw_params.smp2p_wow_exit) 912 return; 913 914 qcom_smem_state_put(ab_ahb->smp2p_info.smem_state); 915 } 916 917 static int ath11k_ahb_setup_resources(struct ath11k_base *ab) 918 { 919 struct platform_device *pdev = ab->pdev; 920 struct resource *mem_res; 921 void __iomem *mem; 922 923 if (ab->hw_params.hybrid_bus_type) 924 return ath11k_ahb_setup_msi_resources(ab); 925 926 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); 927 if (IS_ERR(mem)) { 928 dev_err(&pdev->dev, "ioremap error\n"); 929 return PTR_ERR(mem); 930 } 931 932 ab->mem = mem; 933 ab->mem_len = resource_size(mem_res); 934 935 return 0; 936 } 937 938 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) 939 { 940 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 941 struct device *dev = ab->dev; 942 struct device_node *node; 943 struct resource r; 944 int ret; 945 946 node = of_parse_phandle(dev->of_node, "memory-region", 0); 947 if (!node) 948 return -ENOENT; 949 950 ret = of_address_to_resource(node, 0, &r); 951 of_node_put(node); 952 if (ret) { 953 dev_err(dev, "failed to resolve msa fixed region\n"); 954 return ret; 955 } 956 957 ab_ahb->fw.msa_paddr = r.start; 958 ab_ahb->fw.msa_size = resource_size(&r); 959 960 node = of_parse_phandle(dev->of_node, "memory-region", 1); 961 if (!node) 962 return -ENOENT; 963 964 ret = of_address_to_resource(node, 0, &r); 965 of_node_put(node); 966 if (ret) { 967 dev_err(dev, "failed to resolve ce fixed region\n"); 968 return ret; 969 } 970 971 ab_ahb->fw.ce_paddr = r.start; 972 ab_ahb->fw.ce_size = resource_size(&r); 973 974 return 0; 975 } 976 977 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) 978 { 979 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 980 struct device *host_dev = ab->dev; 981 struct platform_device_info info = {0}; 982 struct iommu_domain *iommu_dom; 983 struct platform_device *pdev; 984 struct device_node *node; 985 int ret; 986 987 /* Chipsets not requiring MSA need not initialize 988 * MSA resources, return success in such cases. 989 */ 990 if (!ab->hw_params.fixed_fw_mem) 991 return 0; 992 993 ret = ath11k_ahb_setup_msa_resources(ab); 994 if (ret) { 995 ath11k_err(ab, "failed to setup msa resources\n"); 996 return ret; 997 } 998 999 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); 1000 if (!node) { 1001 ab_ahb->fw.use_tz = true; 1002 return 0; 1003 } 1004 1005 info.fwnode = &node->fwnode; 1006 info.parent = host_dev; 1007 info.name = node->name; 1008 info.dma_mask = DMA_BIT_MASK(32); 1009 1010 pdev = platform_device_register_full(&info); 1011 if (IS_ERR(pdev)) { 1012 of_node_put(node); 1013 return PTR_ERR(pdev); 1014 } 1015 1016 ret = of_dma_configure(&pdev->dev, node, true); 1017 if (ret) { 1018 ath11k_err(ab, "dma configure fail: %d\n", ret); 1019 goto err_unregister; 1020 } 1021 1022 ab_ahb->fw.dev = &pdev->dev; 1023 1024 iommu_dom = iommu_domain_alloc(&platform_bus_type); 1025 if (!iommu_dom) { 1026 ath11k_err(ab, "failed to allocate iommu domain\n"); 1027 ret = -ENOMEM; 1028 goto err_unregister; 1029 } 1030 1031 ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev); 1032 if (ret) { 1033 ath11k_err(ab, "could not attach device: %d\n", ret); 1034 goto err_iommu_free; 1035 } 1036 1037 ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, 1038 ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, 1039 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); 1040 if (ret) { 1041 ath11k_err(ab, "failed to map firmware region: %d\n", ret); 1042 goto err_iommu_detach; 1043 } 1044 1045 ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, 1046 ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, 1047 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); 1048 if (ret) { 1049 ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); 1050 goto err_iommu_unmap; 1051 } 1052 1053 ab_ahb->fw.use_tz = false; 1054 ab_ahb->fw.iommu_domain = iommu_dom; 1055 of_node_put(node); 1056 1057 return 0; 1058 1059 err_iommu_unmap: 1060 iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); 1061 1062 err_iommu_detach: 1063 iommu_detach_device(iommu_dom, ab_ahb->fw.dev); 1064 1065 err_iommu_free: 1066 iommu_domain_free(iommu_dom); 1067 1068 err_unregister: 1069 platform_device_unregister(pdev); 1070 of_node_put(node); 1071 1072 return ret; 1073 } 1074 1075 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) 1076 { 1077 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 1078 struct iommu_domain *iommu; 1079 size_t unmapped_size; 1080 1081 /* Chipsets not requiring MSA would have not initialized 1082 * MSA resources, return success in such cases. 1083 */ 1084 if (!ab->hw_params.fixed_fw_mem) 1085 return 0; 1086 1087 if (ab_ahb->fw.use_tz) 1088 return 0; 1089 1090 iommu = ab_ahb->fw.iommu_domain; 1091 1092 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); 1093 if (unmapped_size != ab_ahb->fw.msa_size) 1094 ath11k_err(ab, "failed to unmap firmware: %zu\n", 1095 unmapped_size); 1096 1097 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size); 1098 if (unmapped_size != ab_ahb->fw.ce_size) 1099 ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n", 1100 unmapped_size); 1101 1102 iommu_detach_device(iommu, ab_ahb->fw.dev); 1103 iommu_domain_free(iommu); 1104 1105 platform_device_unregister(to_platform_device(ab_ahb->fw.dev)); 1106 1107 return 0; 1108 } 1109 1110 static int ath11k_ahb_probe(struct platform_device *pdev) 1111 { 1112 struct ath11k_base *ab; 1113 const struct of_device_id *of_id; 1114 const struct ath11k_hif_ops *hif_ops; 1115 const struct ath11k_pci_ops *pci_ops; 1116 enum ath11k_hw_rev hw_rev; 1117 int ret; 1118 1119 of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev); 1120 if (!of_id) { 1121 dev_err(&pdev->dev, "failed to find matching device tree id\n"); 1122 return -EINVAL; 1123 } 1124 1125 hw_rev = (enum ath11k_hw_rev)of_id->data; 1126 1127 switch (hw_rev) { 1128 case ATH11K_HW_IPQ8074: 1129 case ATH11K_HW_IPQ6018_HW10: 1130 case ATH11K_HW_IPQ5018_HW10: 1131 hif_ops = &ath11k_ahb_hif_ops_ipq8074; 1132 pci_ops = NULL; 1133 break; 1134 case ATH11K_HW_WCN6750_HW10: 1135 hif_ops = &ath11k_ahb_hif_ops_wcn6750; 1136 pci_ops = &ath11k_ahb_pci_ops_wcn6750; 1137 break; 1138 default: 1139 dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev); 1140 return -EOPNOTSUPP; 1141 } 1142 1143 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1144 if (ret) { 1145 dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n"); 1146 return ret; 1147 } 1148 1149 ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), 1150 ATH11K_BUS_AHB); 1151 if (!ab) { 1152 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 1153 return -ENOMEM; 1154 } 1155 1156 ab->hif.ops = hif_ops; 1157 ab->pdev = pdev; 1158 ab->hw_rev = hw_rev; 1159 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL; 1160 platform_set_drvdata(pdev, ab); 1161 1162 ret = ath11k_pcic_register_pci_ops(ab, pci_ops); 1163 if (ret) { 1164 ath11k_err(ab, "failed to register PCI ops: %d\n", ret); 1165 goto err_core_free; 1166 } 1167 1168 ret = ath11k_core_pre_init(ab); 1169 if (ret) 1170 goto err_core_free; 1171 1172 ret = ath11k_ahb_setup_resources(ab); 1173 if (ret) 1174 goto err_core_free; 1175 1176 ab->mem_ce = ab->mem; 1177 1178 if (ab->hw_params.ce_remap) { 1179 const struct ce_remap *ce_remap = ab->hw_params.ce_remap; 1180 /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 1181 * and the space is not contiguous, hence remapping the CE registers 1182 * to a new space for accessing them. 1183 */ 1184 ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); 1185 if (!ab->mem_ce) { 1186 dev_err(&pdev->dev, "ce ioremap error\n"); 1187 ret = -ENOMEM; 1188 goto err_core_free; 1189 } 1190 } 1191 1192 ret = ath11k_ahb_fw_resources_init(ab); 1193 if (ret) 1194 goto err_core_free; 1195 1196 ret = ath11k_ahb_setup_smp2p_handle(ab); 1197 if (ret) 1198 goto err_fw_deinit; 1199 1200 ret = ath11k_hal_srng_init(ab); 1201 if (ret) 1202 goto err_release_smp2p_handle; 1203 1204 ret = ath11k_ce_alloc_pipes(ab); 1205 if (ret) { 1206 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1207 goto err_hal_srng_deinit; 1208 } 1209 1210 ath11k_ahb_init_qmi_ce_config(ab); 1211 1212 ret = ath11k_core_get_rproc(ab); 1213 if (ret) { 1214 ath11k_err(ab, "failed to get rproc: %d\n", ret); 1215 goto err_ce_free; 1216 } 1217 1218 ret = ath11k_core_init(ab); 1219 if (ret) { 1220 ath11k_err(ab, "failed to init core: %d\n", ret); 1221 goto err_ce_free; 1222 } 1223 1224 ret = ath11k_ahb_config_irq(ab); 1225 if (ret) { 1226 ath11k_err(ab, "failed to configure irq: %d\n", ret); 1227 goto err_ce_free; 1228 } 1229 1230 ath11k_ahb_fwreset_from_cold_boot(ab); 1231 1232 return 0; 1233 1234 err_ce_free: 1235 ath11k_ce_free_pipes(ab); 1236 1237 err_hal_srng_deinit: 1238 ath11k_hal_srng_deinit(ab); 1239 1240 err_release_smp2p_handle: 1241 ath11k_ahb_release_smp2p_handle(ab); 1242 1243 err_fw_deinit: 1244 ath11k_ahb_fw_resource_deinit(ab); 1245 1246 err_core_free: 1247 ath11k_core_free(ab); 1248 platform_set_drvdata(pdev, NULL); 1249 1250 return ret; 1251 } 1252 1253 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab) 1254 { 1255 unsigned long left; 1256 1257 if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { 1258 left = wait_for_completion_timeout(&ab->driver_recovery, 1259 ATH11K_AHB_RECOVERY_TIMEOUT); 1260 if (!left) 1261 ath11k_warn(ab, "failed to receive recovery response completion\n"); 1262 } 1263 1264 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1265 cancel_work_sync(&ab->restart_work); 1266 cancel_work_sync(&ab->qmi.event_work); 1267 } 1268 1269 static void ath11k_ahb_free_resources(struct ath11k_base *ab) 1270 { 1271 struct platform_device *pdev = ab->pdev; 1272 1273 ath11k_ahb_free_irq(ab); 1274 ath11k_hal_srng_deinit(ab); 1275 ath11k_ahb_release_smp2p_handle(ab); 1276 ath11k_ahb_fw_resource_deinit(ab); 1277 ath11k_ce_free_pipes(ab); 1278 1279 if (ab->hw_params.ce_remap) 1280 iounmap(ab->mem_ce); 1281 1282 ath11k_core_free(ab); 1283 platform_set_drvdata(pdev, NULL); 1284 } 1285 1286 static int ath11k_ahb_remove(struct platform_device *pdev) 1287 { 1288 struct ath11k_base *ab = platform_get_drvdata(pdev); 1289 1290 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1291 ath11k_ahb_power_down(ab); 1292 ath11k_debugfs_soc_destroy(ab); 1293 ath11k_qmi_deinit_service(ab); 1294 goto qmi_fail; 1295 } 1296 1297 ath11k_ahb_remove_prepare(ab); 1298 ath11k_core_deinit(ab); 1299 1300 qmi_fail: 1301 ath11k_ahb_free_resources(ab); 1302 1303 return 0; 1304 } 1305 1306 static void ath11k_ahb_shutdown(struct platform_device *pdev) 1307 { 1308 struct ath11k_base *ab = platform_get_drvdata(pdev); 1309 1310 /* platform shutdown() & remove() are mutually exclusive. 1311 * remove() is invoked during rmmod & shutdown() during 1312 * system reboot/shutdown. 1313 */ 1314 ath11k_ahb_remove_prepare(ab); 1315 1316 if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) 1317 goto free_resources; 1318 1319 ath11k_core_deinit(ab); 1320 1321 free_resources: 1322 ath11k_ahb_free_resources(ab); 1323 } 1324 1325 static struct platform_driver ath11k_ahb_driver = { 1326 .driver = { 1327 .name = "ath11k", 1328 .of_match_table = ath11k_ahb_of_match, 1329 }, 1330 .probe = ath11k_ahb_probe, 1331 .remove = ath11k_ahb_remove, 1332 .shutdown = ath11k_ahb_shutdown, 1333 }; 1334 1335 static int ath11k_ahb_init(void) 1336 { 1337 return platform_driver_register(&ath11k_ahb_driver); 1338 } 1339 module_init(ath11k_ahb_init); 1340 1341 static void ath11k_ahb_exit(void) 1342 { 1343 platform_driver_unregister(&ath11k_ahb_driver); 1344 } 1345 module_exit(ath11k_ahb_exit); 1346 1347 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); 1348 MODULE_LICENSE("Dual BSD/GPL"); 1349