1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2020 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/msi.h> 8 #include <linux/pci.h> 9 #include <linux/of.h> 10 #include <linux/of_address.h> 11 #include <linux/ioport.h> 12 13 #include "core.h" 14 #include "debug.h" 15 #include "mhi.h" 16 #include "pci.h" 17 #include "pcic.h" 18 19 #define MHI_TIMEOUT_DEFAULT_MS 20000 20 #define RDDM_DUMP_SIZE 0x420000 21 22 static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = { 23 { 24 .num = 0, 25 .name = "LOOPBACK", 26 .num_elements = 32, 27 .event_ring = 0, 28 .dir = DMA_TO_DEVICE, 29 .ee_mask = 0x4, 30 .pollcfg = 0, 31 .doorbell = MHI_DB_BRST_DISABLE, 32 .lpm_notify = false, 33 .offload_channel = false, 34 .doorbell_mode_switch = false, 35 .auto_queue = false, 36 }, 37 { 38 .num = 1, 39 .name = "LOOPBACK", 40 .num_elements = 32, 41 .event_ring = 0, 42 .dir = DMA_FROM_DEVICE, 43 .ee_mask = 0x4, 44 .pollcfg = 0, 45 .doorbell = MHI_DB_BRST_DISABLE, 46 .lpm_notify = false, 47 .offload_channel = false, 48 .doorbell_mode_switch = false, 49 .auto_queue = false, 50 }, 51 { 52 .num = 20, 53 .name = "IPCR", 54 .num_elements = 64, 55 .event_ring = 1, 56 .dir = DMA_TO_DEVICE, 57 .ee_mask = 0x4, 58 .pollcfg = 0, 59 .doorbell = MHI_DB_BRST_DISABLE, 60 .lpm_notify = false, 61 .offload_channel = false, 62 .doorbell_mode_switch = false, 63 .auto_queue = false, 64 }, 65 { 66 .num = 21, 67 .name = "IPCR", 68 .num_elements = 64, 69 .event_ring = 1, 70 .dir = DMA_FROM_DEVICE, 71 .ee_mask = 0x4, 72 .pollcfg = 0, 73 .doorbell = MHI_DB_BRST_DISABLE, 74 .lpm_notify = false, 75 .offload_channel = false, 76 .doorbell_mode_switch = false, 77 .auto_queue = true, 78 }, 79 }; 80 81 static struct mhi_event_config ath11k_mhi_events_qca6390[] = { 82 { 83 .num_elements = 32, 84 .irq_moderation_ms = 0, 85 .irq = 1, 86 .mode = MHI_DB_BRST_DISABLE, 87 .data_type = MHI_ER_CTRL, 88 .hardware_event = false, 89 .client_managed = false, 90 .offload_channel = false, 91 }, 92 { 93 .num_elements = 256, 94 .irq_moderation_ms = 1, 95 .irq = 2, 96 .mode = MHI_DB_BRST_DISABLE, 97 .priority = 1, 98 .hardware_event = false, 99 .client_managed = false, 100 .offload_channel = false, 101 }, 102 }; 103 104 static struct mhi_controller_config ath11k_mhi_config_qca6390 = { 105 .max_channels = 128, 106 .timeout_ms = 2000, 107 .use_bounce_buf = false, 108 .buf_len = 8192, 109 .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390), 110 .ch_cfg = ath11k_mhi_channels_qca6390, 111 .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390), 112 .event_cfg = ath11k_mhi_events_qca6390, 113 }; 114 115 static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = { 116 { 117 .num = 0, 118 .name = "LOOPBACK", 119 .num_elements = 32, 120 .event_ring = 1, 121 .dir = DMA_TO_DEVICE, 122 .ee_mask = 0x14, 123 .pollcfg = 0, 124 .doorbell = MHI_DB_BRST_DISABLE, 125 .lpm_notify = false, 126 .offload_channel = false, 127 .doorbell_mode_switch = false, 128 .auto_queue = false, 129 }, 130 { 131 .num = 1, 132 .name = "LOOPBACK", 133 .num_elements = 32, 134 .event_ring = 1, 135 .dir = DMA_FROM_DEVICE, 136 .ee_mask = 0x14, 137 .pollcfg = 0, 138 .doorbell = MHI_DB_BRST_DISABLE, 139 .lpm_notify = false, 140 .offload_channel = false, 141 .doorbell_mode_switch = false, 142 .auto_queue = false, 143 }, 144 { 145 .num = 20, 146 .name = "IPCR", 147 .num_elements = 32, 148 .event_ring = 1, 149 .dir = DMA_TO_DEVICE, 150 .ee_mask = 0x14, 151 .pollcfg = 0, 152 .doorbell = MHI_DB_BRST_DISABLE, 153 .lpm_notify = false, 154 .offload_channel = false, 155 .doorbell_mode_switch = false, 156 .auto_queue = false, 157 }, 158 { 159 .num = 21, 160 .name = "IPCR", 161 .num_elements = 32, 162 .event_ring = 1, 163 .dir = DMA_FROM_DEVICE, 164 .ee_mask = 0x14, 165 .pollcfg = 0, 166 .doorbell = MHI_DB_BRST_DISABLE, 167 .lpm_notify = false, 168 .offload_channel = false, 169 .doorbell_mode_switch = false, 170 .auto_queue = true, 171 }, 172 }; 173 174 static struct mhi_event_config ath11k_mhi_events_qcn9074[] = { 175 { 176 .num_elements = 32, 177 .irq_moderation_ms = 0, 178 .irq = 1, 179 .data_type = MHI_ER_CTRL, 180 .mode = MHI_DB_BRST_DISABLE, 181 .hardware_event = false, 182 .client_managed = false, 183 .offload_channel = false, 184 }, 185 { 186 .num_elements = 256, 187 .irq_moderation_ms = 1, 188 .irq = 2, 189 .mode = MHI_DB_BRST_DISABLE, 190 .priority = 1, 191 .hardware_event = false, 192 .client_managed = false, 193 .offload_channel = false, 194 }, 195 }; 196 197 static struct mhi_controller_config ath11k_mhi_config_qcn9074 = { 198 .max_channels = 30, 199 .timeout_ms = 10000, 200 .use_bounce_buf = false, 201 .buf_len = 0, 202 .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074), 203 .ch_cfg = ath11k_mhi_channels_qcn9074, 204 .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074), 205 .event_cfg = ath11k_mhi_events_qcn9074, 206 }; 207 208 void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab) 209 { 210 u32 val; 211 212 val = ath11k_pcic_read32(ab, MHISTATUS); 213 214 ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val); 215 216 /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS 217 * has SYSERR bit set and thus need to set MHICTRL_RESET 218 * to clear SYSERR. 219 */ 220 ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK); 221 222 mdelay(10); 223 } 224 225 static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab) 226 { 227 ath11k_pcic_write32(ab, PCIE_TXVECDB, 0); 228 } 229 230 static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab) 231 { 232 ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0); 233 } 234 235 static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab) 236 { 237 ath11k_pcic_write32(ab, PCIE_RXVECDB, 0); 238 } 239 240 static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab) 241 { 242 ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0); 243 } 244 245 void ath11k_mhi_clear_vector(struct ath11k_base *ab) 246 { 247 ath11k_mhi_reset_txvecdb(ab); 248 ath11k_mhi_reset_txvecstatus(ab); 249 ath11k_mhi_reset_rxvecdb(ab); 250 ath11k_mhi_reset_rxvecstatus(ab); 251 } 252 253 static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci) 254 { 255 struct ath11k_base *ab = ab_pci->ab; 256 u32 user_base_data, base_vector; 257 int ret, num_vectors, i; 258 int *irq; 259 unsigned int msi_data; 260 261 ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors, 262 &user_base_data, &base_vector); 263 if (ret) 264 return ret; 265 266 ath11k_dbg(ab, ATH11K_DBG_PCI, "num_vectors %d base_vector %d\n", 267 num_vectors, base_vector); 268 269 irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL); 270 if (!irq) 271 return -ENOMEM; 272 273 for (i = 0; i < num_vectors; i++) { 274 msi_data = base_vector; 275 276 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 277 msi_data += i; 278 279 irq[i] = ath11k_pci_get_msi_irq(ab, msi_data); 280 } 281 282 ab_pci->mhi_ctrl->irq = irq; 283 ab_pci->mhi_ctrl->nr_irqs = num_vectors; 284 285 return 0; 286 } 287 288 static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl) 289 { 290 return 0; 291 } 292 293 static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl) 294 { 295 } 296 297 static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason) 298 { 299 switch (reason) { 300 case MHI_CB_IDLE: 301 return "MHI_CB_IDLE"; 302 case MHI_CB_PENDING_DATA: 303 return "MHI_CB_PENDING_DATA"; 304 case MHI_CB_LPM_ENTER: 305 return "MHI_CB_LPM_ENTER"; 306 case MHI_CB_LPM_EXIT: 307 return "MHI_CB_LPM_EXIT"; 308 case MHI_CB_EE_RDDM: 309 return "MHI_CB_EE_RDDM"; 310 case MHI_CB_EE_MISSION_MODE: 311 return "MHI_CB_EE_MISSION_MODE"; 312 case MHI_CB_SYS_ERROR: 313 return "MHI_CB_SYS_ERROR"; 314 case MHI_CB_FATAL_ERROR: 315 return "MHI_CB_FATAL_ERROR"; 316 case MHI_CB_BW_REQ: 317 return "MHI_CB_BW_REQ"; 318 default: 319 return "UNKNOWN"; 320 } 321 }; 322 323 static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, 324 enum mhi_callback cb) 325 { 326 struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); 327 328 ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n", 329 ath11k_mhi_op_callback_to_str(cb)); 330 331 switch (cb) { 332 case MHI_CB_SYS_ERROR: 333 ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); 334 break; 335 case MHI_CB_EE_RDDM: 336 if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))) 337 queue_work(ab->workqueue_aux, &ab->reset_work); 338 break; 339 default: 340 break; 341 } 342 } 343 344 static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, 345 void __iomem *addr, 346 u32 *out) 347 { 348 *out = readl(addr); 349 350 return 0; 351 } 352 353 static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl, 354 void __iomem *addr, 355 u32 val) 356 { 357 writel(val, addr); 358 } 359 360 static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl) 361 { 362 struct device_node *np; 363 struct resource res; 364 int ret; 365 366 np = of_find_node_by_type(NULL, "memory"); 367 if (!np) 368 return -ENOENT; 369 370 ret = of_address_to_resource(np, 0, &res); 371 of_node_put(np); 372 if (ret) 373 return ret; 374 375 mhi_ctrl->iova_start = res.start + 0x1000000; 376 mhi_ctrl->iova_stop = res.end; 377 378 return 0; 379 } 380 381 int ath11k_mhi_register(struct ath11k_pci *ab_pci) 382 { 383 struct ath11k_base *ab = ab_pci->ab; 384 struct mhi_controller *mhi_ctrl; 385 struct mhi_controller_config *ath11k_mhi_config; 386 int ret; 387 388 mhi_ctrl = mhi_alloc_controller(); 389 if (!mhi_ctrl) 390 return -ENOMEM; 391 392 ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE, 393 ab_pci->amss_path, 394 sizeof(ab_pci->amss_path)); 395 396 ab_pci->mhi_ctrl = mhi_ctrl; 397 mhi_ctrl->cntrl_dev = ab->dev; 398 mhi_ctrl->fw_image = ab_pci->amss_path; 399 mhi_ctrl->regs = ab->mem; 400 mhi_ctrl->reg_len = ab->mem_len; 401 402 ret = ath11k_mhi_get_msi(ab_pci); 403 if (ret) { 404 ath11k_err(ab, "failed to get msi for mhi\n"); 405 goto free_controller; 406 } 407 408 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 409 mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 410 411 if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) { 412 ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl); 413 if (ret < 0) 414 goto free_controller; 415 } else { 416 mhi_ctrl->iova_start = 0; 417 mhi_ctrl->iova_stop = 0xFFFFFFFF; 418 } 419 420 mhi_ctrl->rddm_size = RDDM_DUMP_SIZE; 421 mhi_ctrl->sbl_size = SZ_512K; 422 mhi_ctrl->seg_len = SZ_512K; 423 mhi_ctrl->fbc_download = true; 424 mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get; 425 mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put; 426 mhi_ctrl->status_cb = ath11k_mhi_op_status_cb; 427 mhi_ctrl->read_reg = ath11k_mhi_op_read_reg; 428 mhi_ctrl->write_reg = ath11k_mhi_op_write_reg; 429 430 switch (ab->hw_rev) { 431 case ATH11K_HW_QCN9074_HW10: 432 ath11k_mhi_config = &ath11k_mhi_config_qcn9074; 433 break; 434 case ATH11K_HW_QCA6390_HW20: 435 case ATH11K_HW_WCN6855_HW20: 436 case ATH11K_HW_WCN6855_HW21: 437 ath11k_mhi_config = &ath11k_mhi_config_qca6390; 438 break; 439 default: 440 ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n", 441 ab->hw_rev); 442 ret = -EINVAL; 443 goto free_controller; 444 } 445 446 ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config); 447 if (ret) { 448 ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret); 449 goto free_controller; 450 } 451 452 return 0; 453 454 free_controller: 455 mhi_free_controller(mhi_ctrl); 456 ab_pci->mhi_ctrl = NULL; 457 return ret; 458 } 459 460 void ath11k_mhi_unregister(struct ath11k_pci *ab_pci) 461 { 462 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl; 463 464 mhi_unregister_controller(mhi_ctrl); 465 kfree(mhi_ctrl->irq); 466 mhi_free_controller(mhi_ctrl); 467 } 468 469 int ath11k_mhi_start(struct ath11k_pci *ab_pci) 470 { 471 struct ath11k_base *ab = ab_pci->ab; 472 int ret; 473 474 ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS; 475 476 ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl); 477 if (ret) { 478 ath11k_warn(ab, "failed to prepare mhi: %d", ret); 479 return ret; 480 } 481 482 ret = mhi_sync_power_up(ab_pci->mhi_ctrl); 483 if (ret) { 484 ath11k_warn(ab, "failed to power up mhi: %d", ret); 485 return ret; 486 } 487 488 return 0; 489 } 490 491 void ath11k_mhi_stop(struct ath11k_pci *ab_pci) 492 { 493 mhi_power_down(ab_pci->mhi_ctrl, true); 494 mhi_unprepare_after_power_down(ab_pci->mhi_ctrl); 495 } 496 497 int ath11k_mhi_suspend(struct ath11k_pci *ab_pci) 498 { 499 struct ath11k_base *ab = ab_pci->ab; 500 int ret; 501 502 ret = mhi_pm_suspend(ab_pci->mhi_ctrl); 503 if (ret) { 504 ath11k_warn(ab, "failed to suspend mhi: %d", ret); 505 return ret; 506 } 507 508 return 0; 509 } 510 511 int ath11k_mhi_resume(struct ath11k_pci *ab_pci) 512 { 513 struct ath11k_base *ab = ab_pci->ab; 514 int ret; 515 516 /* Do force MHI resume as some devices like QCA6390, WCN6855 517 * are not in M3 state but they are functional. So just ignore 518 * the MHI state while resuming. 519 */ 520 ret = mhi_pm_resume_force(ab_pci->mhi_ctrl); 521 if (ret) { 522 ath11k_warn(ab, "failed to resume mhi: %d", ret); 523 return ret; 524 } 525 526 return 0; 527 } 528