1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_domain.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regmap.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/remoteproc.h> 25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h" 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 30 #include "remoteproc_internal.h" 31 #include "qcom_common.h" 32 #include "qcom_q6v5.h" 33 34 #include <linux/qcom_scm.h> 35 36 #define MPSS_CRASH_REASON_SMEM 421 37 38 /* RMB Status Register Values */ 39 #define RMB_PBL_SUCCESS 0x1 40 41 #define RMB_MBA_XPU_UNLOCKED 0x1 42 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 43 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 44 #define RMB_MBA_AUTH_COMPLETE 0x4 45 46 /* PBL/MBA interface registers */ 47 #define RMB_MBA_IMAGE_REG 0x00 48 #define RMB_PBL_STATUS_REG 0x04 49 #define RMB_MBA_COMMAND_REG 0x08 50 #define RMB_MBA_STATUS_REG 0x0C 51 #define RMB_PMI_META_DATA_REG 0x10 52 #define RMB_PMI_CODE_START_REG 0x14 53 #define RMB_PMI_CODE_LENGTH_REG 0x18 54 #define RMB_MBA_MSS_STATUS 0x40 55 #define RMB_MBA_ALT_RESET 0x44 56 57 #define RMB_CMD_META_DATA_READY 0x1 58 #define RMB_CMD_LOAD_READY 0x2 59 60 /* QDSP6SS Register Offsets */ 61 #define QDSP6SS_RESET_REG 0x014 62 #define QDSP6SS_GFMUX_CTL_REG 0x020 63 #define QDSP6SS_PWR_CTL_REG 0x030 64 #define QDSP6SS_MEM_PWR_CTL 0x0B0 65 #define QDSP6V6SS_MEM_PWR_CTL 0x034 66 #define QDSP6SS_STRAP_ACC 0x110 67 68 /* AXI Halt Register Offsets */ 69 #define AXI_HALTREQ_REG 0x0 70 #define AXI_HALTACK_REG 0x4 71 #define AXI_IDLE_REG 0x8 72 #define NAV_AXI_HALTREQ_BIT BIT(0) 73 #define NAV_AXI_HALTACK_BIT BIT(1) 74 #define NAV_AXI_IDLE_BIT BIT(2) 75 #define AXI_GATING_VALID_OVERRIDE BIT(0) 76 77 #define HALT_ACK_TIMEOUT_US 100000 78 #define NAV_HALT_ACK_TIMEOUT_US 200 79 80 /* QDSP6SS_RESET */ 81 #define Q6SS_STOP_CORE BIT(0) 82 #define Q6SS_CORE_ARES BIT(1) 83 #define Q6SS_BUS_ARES_ENABLE BIT(2) 84 85 /* QDSP6SS CBCR */ 86 #define Q6SS_CBCR_CLKEN BIT(0) 87 #define Q6SS_CBCR_CLKOFF BIT(31) 88 #define Q6SS_CBCR_TIMEOUT_US 200 89 90 /* QDSP6SS_GFMUX_CTL */ 91 #define Q6SS_CLK_ENABLE BIT(1) 92 93 /* QDSP6SS_PWR_CTL */ 94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 98 #define Q6SS_ETB_SLP_NRET_N BIT(17) 99 #define Q6SS_L2DATA_STBY_N BIT(18) 100 #define Q6SS_SLP_RET_N BIT(19) 101 #define Q6SS_CLAMP_IO BIT(20) 102 #define QDSS_BHS_ON BIT(21) 103 #define QDSS_LDO_BYP BIT(22) 104 105 /* QDSP6v56 parameters */ 106 #define QDSP6v56_LDO_BYP BIT(25) 107 #define QDSP6v56_BHS_ON BIT(24) 108 #define QDSP6v56_CLAMP_WL BIT(21) 109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 110 #define QDSP6SS_XO_CBCR 0x0038 111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 112 113 /* QDSP6v65 parameters */ 114 #define QDSP6SS_CORE_CBCR 0x20 115 #define QDSP6SS_SLEEP 0x3C 116 #define QDSP6SS_BOOT_CORE_START 0x400 117 #define QDSP6SS_BOOT_CMD 0x404 118 #define QDSP6SS_BOOT_STATUS 0x408 119 #define BOOT_STATUS_TIMEOUT_US 200 120 #define BOOT_FSM_TIMEOUT 10000 121 122 struct reg_info { 123 struct regulator *reg; 124 int uV; 125 int uA; 126 }; 127 128 struct qcom_mss_reg_res { 129 const char *supply; 130 int uV; 131 int uA; 132 }; 133 134 struct rproc_hexagon_res { 135 const char *hexagon_mba_image; 136 struct qcom_mss_reg_res *proxy_supply; 137 struct qcom_mss_reg_res *active_supply; 138 char **proxy_clk_names; 139 char **reset_clk_names; 140 char **active_clk_names; 141 char **active_pd_names; 142 char **proxy_pd_names; 143 int version; 144 bool need_mem_protection; 145 bool has_alt_reset; 146 bool has_halt_nav; 147 }; 148 149 struct q6v5 { 150 struct device *dev; 151 struct rproc *rproc; 152 153 void __iomem *reg_base; 154 void __iomem *rmb_base; 155 156 struct regmap *halt_map; 157 struct regmap *halt_nav_map; 158 struct regmap *conn_map; 159 160 u32 halt_q6; 161 u32 halt_modem; 162 u32 halt_nc; 163 u32 halt_nav; 164 u32 conn_box; 165 166 struct reset_control *mss_restart; 167 struct reset_control *pdc_reset; 168 169 struct qcom_q6v5 q6v5; 170 171 struct clk *active_clks[8]; 172 struct clk *reset_clks[4]; 173 struct clk *proxy_clks[4]; 174 struct device *active_pds[1]; 175 struct device *proxy_pds[3]; 176 int active_clk_count; 177 int reset_clk_count; 178 int proxy_clk_count; 179 int active_pd_count; 180 int proxy_pd_count; 181 182 struct reg_info active_regs[1]; 183 struct reg_info proxy_regs[3]; 184 int active_reg_count; 185 int proxy_reg_count; 186 187 bool running; 188 189 bool dump_mba_loaded; 190 unsigned long dump_segment_mask; 191 unsigned long dump_complete_mask; 192 193 phys_addr_t mba_phys; 194 void *mba_region; 195 size_t mba_size; 196 197 phys_addr_t mpss_phys; 198 phys_addr_t mpss_reloc; 199 void *mpss_region; 200 size_t mpss_size; 201 202 struct qcom_rproc_glink glink_subdev; 203 struct qcom_rproc_subdev smd_subdev; 204 struct qcom_rproc_ssr ssr_subdev; 205 struct qcom_rproc_ipa_notify ipa_notify_subdev; 206 struct qcom_sysmon *sysmon; 207 bool need_mem_protection; 208 bool has_alt_reset; 209 bool has_halt_nav; 210 int mpss_perm; 211 int mba_perm; 212 const char *hexagon_mdt_image; 213 int version; 214 }; 215 216 enum { 217 MSS_MSM8916, 218 MSS_MSM8974, 219 MSS_MSM8996, 220 MSS_MSM8998, 221 MSS_SC7180, 222 MSS_SDM845, 223 }; 224 225 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 226 const struct qcom_mss_reg_res *reg_res) 227 { 228 int rc; 229 int i; 230 231 if (!reg_res) 232 return 0; 233 234 for (i = 0; reg_res[i].supply; i++) { 235 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 236 if (IS_ERR(regs[i].reg)) { 237 rc = PTR_ERR(regs[i].reg); 238 if (rc != -EPROBE_DEFER) 239 dev_err(dev, "Failed to get %s\n regulator", 240 reg_res[i].supply); 241 return rc; 242 } 243 244 regs[i].uV = reg_res[i].uV; 245 regs[i].uA = reg_res[i].uA; 246 } 247 248 return i; 249 } 250 251 static int q6v5_regulator_enable(struct q6v5 *qproc, 252 struct reg_info *regs, int count) 253 { 254 int ret; 255 int i; 256 257 for (i = 0; i < count; i++) { 258 if (regs[i].uV > 0) { 259 ret = regulator_set_voltage(regs[i].reg, 260 regs[i].uV, INT_MAX); 261 if (ret) { 262 dev_err(qproc->dev, 263 "Failed to request voltage for %d.\n", 264 i); 265 goto err; 266 } 267 } 268 269 if (regs[i].uA > 0) { 270 ret = regulator_set_load(regs[i].reg, 271 regs[i].uA); 272 if (ret < 0) { 273 dev_err(qproc->dev, 274 "Failed to set regulator mode\n"); 275 goto err; 276 } 277 } 278 279 ret = regulator_enable(regs[i].reg); 280 if (ret) { 281 dev_err(qproc->dev, "Regulator enable failed\n"); 282 goto err; 283 } 284 } 285 286 return 0; 287 err: 288 for (; i >= 0; i--) { 289 if (regs[i].uV > 0) 290 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 291 292 if (regs[i].uA > 0) 293 regulator_set_load(regs[i].reg, 0); 294 295 regulator_disable(regs[i].reg); 296 } 297 298 return ret; 299 } 300 301 static void q6v5_regulator_disable(struct q6v5 *qproc, 302 struct reg_info *regs, int count) 303 { 304 int i; 305 306 for (i = 0; i < count; i++) { 307 if (regs[i].uV > 0) 308 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 309 310 if (regs[i].uA > 0) 311 regulator_set_load(regs[i].reg, 0); 312 313 regulator_disable(regs[i].reg); 314 } 315 } 316 317 static int q6v5_clk_enable(struct device *dev, 318 struct clk **clks, int count) 319 { 320 int rc; 321 int i; 322 323 for (i = 0; i < count; i++) { 324 rc = clk_prepare_enable(clks[i]); 325 if (rc) { 326 dev_err(dev, "Clock enable failed\n"); 327 goto err; 328 } 329 } 330 331 return 0; 332 err: 333 for (i--; i >= 0; i--) 334 clk_disable_unprepare(clks[i]); 335 336 return rc; 337 } 338 339 static void q6v5_clk_disable(struct device *dev, 340 struct clk **clks, int count) 341 { 342 int i; 343 344 for (i = 0; i < count; i++) 345 clk_disable_unprepare(clks[i]); 346 } 347 348 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 349 size_t pd_count) 350 { 351 int ret; 352 int i; 353 354 for (i = 0; i < pd_count; i++) { 355 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 356 ret = pm_runtime_get_sync(pds[i]); 357 if (ret < 0) 358 goto unroll_pd_votes; 359 } 360 361 return 0; 362 363 unroll_pd_votes: 364 for (i--; i >= 0; i--) { 365 dev_pm_genpd_set_performance_state(pds[i], 0); 366 pm_runtime_put(pds[i]); 367 } 368 369 return ret; 370 }; 371 372 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 373 size_t pd_count) 374 { 375 int i; 376 377 for (i = 0; i < pd_count; i++) { 378 dev_pm_genpd_set_performance_state(pds[i], 0); 379 pm_runtime_put(pds[i]); 380 } 381 } 382 383 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 384 bool remote_owner, phys_addr_t addr, 385 size_t size) 386 { 387 struct qcom_scm_vmperm next; 388 389 if (!qproc->need_mem_protection) 390 return 0; 391 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA)) 392 return 0; 393 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS)) 394 return 0; 395 396 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS; 397 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX; 398 399 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 400 current_perm, &next, 1); 401 } 402 403 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 404 { 405 struct q6v5 *qproc = rproc->priv; 406 407 memcpy(qproc->mba_region, fw->data, fw->size); 408 409 return 0; 410 } 411 412 static int q6v5_reset_assert(struct q6v5 *qproc) 413 { 414 int ret; 415 416 if (qproc->has_alt_reset) { 417 reset_control_assert(qproc->pdc_reset); 418 ret = reset_control_reset(qproc->mss_restart); 419 reset_control_deassert(qproc->pdc_reset); 420 } else if (qproc->has_halt_nav) { 421 /* 422 * When the AXI pipeline is being reset with the Q6 modem partly 423 * operational there is possibility of AXI valid signal to 424 * glitch, leading to spurious transactions and Q6 hangs. A work 425 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 426 * BIT before triggering Q6 MSS reset. Both the HALTREQ and 427 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert 428 * followed by a MSS deassert, while holding the PDC reset. 429 */ 430 reset_control_assert(qproc->pdc_reset); 431 regmap_update_bits(qproc->conn_map, qproc->conn_box, 432 AXI_GATING_VALID_OVERRIDE, 1); 433 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav, 434 NAV_AXI_HALTREQ_BIT, 0); 435 reset_control_assert(qproc->mss_restart); 436 reset_control_deassert(qproc->pdc_reset); 437 regmap_update_bits(qproc->conn_map, qproc->conn_box, 438 AXI_GATING_VALID_OVERRIDE, 0); 439 ret = reset_control_deassert(qproc->mss_restart); 440 } else { 441 ret = reset_control_assert(qproc->mss_restart); 442 } 443 444 return ret; 445 } 446 447 static int q6v5_reset_deassert(struct q6v5 *qproc) 448 { 449 int ret; 450 451 if (qproc->has_alt_reset) { 452 reset_control_assert(qproc->pdc_reset); 453 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 454 ret = reset_control_reset(qproc->mss_restart); 455 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 456 reset_control_deassert(qproc->pdc_reset); 457 } else if (qproc->has_halt_nav) { 458 ret = reset_control_reset(qproc->mss_restart); 459 } else { 460 ret = reset_control_deassert(qproc->mss_restart); 461 } 462 463 return ret; 464 } 465 466 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 467 { 468 unsigned long timeout; 469 s32 val; 470 471 timeout = jiffies + msecs_to_jiffies(ms); 472 for (;;) { 473 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 474 if (val) 475 break; 476 477 if (time_after(jiffies, timeout)) 478 return -ETIMEDOUT; 479 480 msleep(1); 481 } 482 483 return val; 484 } 485 486 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 487 { 488 489 unsigned long timeout; 490 s32 val; 491 492 timeout = jiffies + msecs_to_jiffies(ms); 493 for (;;) { 494 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 495 if (val < 0) 496 break; 497 498 if (!status && val) 499 break; 500 else if (status && val == status) 501 break; 502 503 if (time_after(jiffies, timeout)) 504 return -ETIMEDOUT; 505 506 msleep(1); 507 } 508 509 return val; 510 } 511 512 static int q6v5proc_reset(struct q6v5 *qproc) 513 { 514 u32 val; 515 int ret; 516 int i; 517 518 if (qproc->version == MSS_SDM845) { 519 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 520 val |= Q6SS_CBCR_CLKEN; 521 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 522 523 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 524 val, !(val & Q6SS_CBCR_CLKOFF), 1, 525 Q6SS_CBCR_TIMEOUT_US); 526 if (ret) { 527 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 528 return -ETIMEDOUT; 529 } 530 531 /* De-assert QDSP6 stop core */ 532 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 533 /* Trigger boot FSM */ 534 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 535 536 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 537 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 538 if (ret) { 539 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 540 /* Reset the modem so that boot FSM is in reset state */ 541 q6v5_reset_deassert(qproc); 542 return ret; 543 } 544 545 goto pbl_wait; 546 } else if (qproc->version == MSS_SC7180) { 547 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 548 val |= Q6SS_CBCR_CLKEN; 549 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 550 551 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 552 val, !(val & Q6SS_CBCR_CLKOFF), 1, 553 Q6SS_CBCR_TIMEOUT_US); 554 if (ret) { 555 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 556 return -ETIMEDOUT; 557 } 558 559 /* Turn on the XO clock needed for PLL setup */ 560 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 561 val |= Q6SS_CBCR_CLKEN; 562 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 563 564 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 565 val, !(val & Q6SS_CBCR_CLKOFF), 1, 566 Q6SS_CBCR_TIMEOUT_US); 567 if (ret) { 568 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 569 return -ETIMEDOUT; 570 } 571 572 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 573 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 574 val |= Q6SS_CBCR_CLKEN; 575 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 576 577 /* De-assert the Q6 stop core signal */ 578 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 579 580 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 581 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 582 583 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */ 584 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS, 585 val, (val & BIT(0)) != 0, 1, 586 BOOT_STATUS_TIMEOUT_US); 587 if (ret) { 588 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 589 /* Reset the modem so that boot FSM is in reset state */ 590 q6v5_reset_deassert(qproc); 591 return ret; 592 } 593 goto pbl_wait; 594 } else if (qproc->version == MSS_MSM8996 || 595 qproc->version == MSS_MSM8998) { 596 int mem_pwr_ctl; 597 598 /* Override the ACC value if required */ 599 writel(QDSP6SS_ACC_OVERRIDE_VAL, 600 qproc->reg_base + QDSP6SS_STRAP_ACC); 601 602 /* Assert resets, stop core */ 603 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 604 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 605 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 606 607 /* BHS require xo cbcr to be enabled */ 608 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 609 val |= Q6SS_CBCR_CLKEN; 610 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 611 612 /* Read CLKOFF bit to go low indicating CLK is enabled */ 613 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 614 val, !(val & Q6SS_CBCR_CLKOFF), 1, 615 Q6SS_CBCR_TIMEOUT_US); 616 if (ret) { 617 dev_err(qproc->dev, 618 "xo cbcr enabling timed out (rc:%d)\n", ret); 619 return ret; 620 } 621 /* Enable power block headswitch and wait for it to stabilize */ 622 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 623 val |= QDSP6v56_BHS_ON; 624 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 625 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 626 udelay(1); 627 628 /* Put LDO in bypass mode */ 629 val |= QDSP6v56_LDO_BYP; 630 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 631 632 /* Deassert QDSP6 compiler memory clamp */ 633 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 634 val &= ~QDSP6v56_CLAMP_QMC_MEM; 635 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 636 637 /* Deassert memory peripheral sleep and L2 memory standby */ 638 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 639 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 640 641 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 642 if (qproc->version == MSS_MSM8996) { 643 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 644 i = 19; 645 } else { 646 /* MSS_MSM8998 */ 647 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 648 i = 28; 649 } 650 val = readl(qproc->reg_base + mem_pwr_ctl); 651 for (; i >= 0; i--) { 652 val |= BIT(i); 653 writel(val, qproc->reg_base + mem_pwr_ctl); 654 /* 655 * Read back value to ensure the write is done then 656 * wait for 1us for both memory peripheral and data 657 * array to turn on. 658 */ 659 val |= readl(qproc->reg_base + mem_pwr_ctl); 660 udelay(1); 661 } 662 /* Remove word line clamp */ 663 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 664 val &= ~QDSP6v56_CLAMP_WL; 665 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 666 } else { 667 /* Assert resets, stop core */ 668 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 669 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 670 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 671 672 /* Enable power block headswitch and wait for it to stabilize */ 673 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 674 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 675 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 676 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 677 udelay(1); 678 /* 679 * Turn on memories. L2 banks should be done individually 680 * to minimize inrush current. 681 */ 682 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 683 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 684 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 685 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 686 val |= Q6SS_L2DATA_SLP_NRET_N_2; 687 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 688 val |= Q6SS_L2DATA_SLP_NRET_N_1; 689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 690 val |= Q6SS_L2DATA_SLP_NRET_N_0; 691 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 692 } 693 /* Remove IO clamp */ 694 val &= ~Q6SS_CLAMP_IO; 695 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 696 697 /* Bring core out of reset */ 698 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 699 val &= ~Q6SS_CORE_ARES; 700 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 701 702 /* Turn on core clock */ 703 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 704 val |= Q6SS_CLK_ENABLE; 705 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 706 707 /* Start core execution */ 708 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 709 val &= ~Q6SS_STOP_CORE; 710 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 711 712 pbl_wait: 713 /* Wait for PBL status */ 714 ret = q6v5_rmb_pbl_wait(qproc, 1000); 715 if (ret == -ETIMEDOUT) { 716 dev_err(qproc->dev, "PBL boot timed out\n"); 717 } else if (ret != RMB_PBL_SUCCESS) { 718 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 719 ret = -EINVAL; 720 } else { 721 ret = 0; 722 } 723 724 return ret; 725 } 726 727 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 728 struct regmap *halt_map, 729 u32 offset) 730 { 731 unsigned int val; 732 int ret; 733 734 /* Check if we're already idle */ 735 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 736 if (!ret && val) 737 return; 738 739 /* Assert halt request */ 740 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 741 742 /* Wait for halt */ 743 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 744 val, 1000, HALT_ACK_TIMEOUT_US); 745 746 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 747 if (ret || !val) 748 dev_err(qproc->dev, "port failed halt\n"); 749 750 /* Clear halt request (port will remain halted until reset) */ 751 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 752 } 753 754 static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc, 755 struct regmap *halt_map, 756 u32 offset) 757 { 758 unsigned int val; 759 int ret; 760 761 /* Check if we're already idle */ 762 ret = regmap_read(halt_map, offset, &val); 763 if (!ret && (val & NAV_AXI_IDLE_BIT)) 764 return; 765 766 /* Assert halt request */ 767 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT, 768 NAV_AXI_HALTREQ_BIT); 769 770 /* Wait for halt ack*/ 771 regmap_read_poll_timeout(halt_map, offset, val, 772 (val & NAV_AXI_HALTACK_BIT), 773 5, NAV_HALT_ACK_TIMEOUT_US); 774 775 ret = regmap_read(halt_map, offset, &val); 776 if (ret || !(val & NAV_AXI_IDLE_BIT)) 777 dev_err(qproc->dev, "port failed halt\n"); 778 } 779 780 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 781 { 782 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 783 dma_addr_t phys; 784 void *metadata; 785 int mdata_perm; 786 int xferop_ret; 787 size_t size; 788 void *ptr; 789 int ret; 790 791 metadata = qcom_mdt_read_metadata(fw, &size); 792 if (IS_ERR(metadata)) 793 return PTR_ERR(metadata); 794 795 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 796 if (!ptr) { 797 kfree(metadata); 798 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 799 return -ENOMEM; 800 } 801 802 memcpy(ptr, metadata, size); 803 804 /* Hypervisor mapping to access metadata by modem */ 805 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 806 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size); 807 if (ret) { 808 dev_err(qproc->dev, 809 "assigning Q6 access to metadata failed: %d\n", ret); 810 ret = -EAGAIN; 811 goto free_dma_attrs; 812 } 813 814 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 815 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 816 817 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 818 if (ret == -ETIMEDOUT) 819 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 820 else if (ret < 0) 821 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 822 823 /* Metadata authentication done, remove modem access */ 824 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size); 825 if (xferop_ret) 826 dev_warn(qproc->dev, 827 "mdt buffer not reclaimed system may become unstable\n"); 828 829 free_dma_attrs: 830 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 831 kfree(metadata); 832 833 return ret < 0 ? ret : 0; 834 } 835 836 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 837 { 838 if (phdr->p_type != PT_LOAD) 839 return false; 840 841 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 842 return false; 843 844 if (!phdr->p_memsz) 845 return false; 846 847 return true; 848 } 849 850 static int q6v5_mba_load(struct q6v5 *qproc) 851 { 852 int ret; 853 int xfermemop_ret; 854 855 qcom_q6v5_prepare(&qproc->q6v5); 856 857 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 858 if (ret < 0) { 859 dev_err(qproc->dev, "failed to enable active power domains\n"); 860 goto disable_irqs; 861 } 862 863 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 864 if (ret < 0) { 865 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 866 goto disable_active_pds; 867 } 868 869 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 870 qproc->proxy_reg_count); 871 if (ret) { 872 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 873 goto disable_proxy_pds; 874 } 875 876 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 877 qproc->proxy_clk_count); 878 if (ret) { 879 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 880 goto disable_proxy_reg; 881 } 882 883 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 884 qproc->active_reg_count); 885 if (ret) { 886 dev_err(qproc->dev, "failed to enable supplies\n"); 887 goto disable_proxy_clk; 888 } 889 890 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 891 qproc->reset_clk_count); 892 if (ret) { 893 dev_err(qproc->dev, "failed to enable reset clocks\n"); 894 goto disable_vdd; 895 } 896 897 ret = q6v5_reset_deassert(qproc); 898 if (ret) { 899 dev_err(qproc->dev, "failed to deassert mss restart\n"); 900 goto disable_reset_clks; 901 } 902 903 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 904 qproc->active_clk_count); 905 if (ret) { 906 dev_err(qproc->dev, "failed to enable clocks\n"); 907 goto assert_reset; 908 } 909 910 /* Assign MBA image access in DDR to q6 */ 911 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 912 qproc->mba_phys, qproc->mba_size); 913 if (ret) { 914 dev_err(qproc->dev, 915 "assigning Q6 access to mba memory failed: %d\n", ret); 916 goto disable_active_clks; 917 } 918 919 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 920 921 ret = q6v5proc_reset(qproc); 922 if (ret) 923 goto reclaim_mba; 924 925 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 926 if (ret == -ETIMEDOUT) { 927 dev_err(qproc->dev, "MBA boot timed out\n"); 928 goto halt_axi_ports; 929 } else if (ret != RMB_MBA_XPU_UNLOCKED && 930 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 931 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 932 ret = -EINVAL; 933 goto halt_axi_ports; 934 } 935 936 qproc->dump_mba_loaded = true; 937 return 0; 938 939 halt_axi_ports: 940 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 941 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 942 if (qproc->has_halt_nav) 943 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 944 qproc->halt_nav); 945 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 946 947 reclaim_mba: 948 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 949 qproc->mba_phys, 950 qproc->mba_size); 951 if (xfermemop_ret) { 952 dev_err(qproc->dev, 953 "Failed to reclaim mba buffer, system may become unstable\n"); 954 } 955 956 disable_active_clks: 957 q6v5_clk_disable(qproc->dev, qproc->active_clks, 958 qproc->active_clk_count); 959 assert_reset: 960 q6v5_reset_assert(qproc); 961 disable_reset_clks: 962 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 963 qproc->reset_clk_count); 964 disable_vdd: 965 q6v5_regulator_disable(qproc, qproc->active_regs, 966 qproc->active_reg_count); 967 disable_proxy_clk: 968 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 969 qproc->proxy_clk_count); 970 disable_proxy_reg: 971 q6v5_regulator_disable(qproc, qproc->proxy_regs, 972 qproc->proxy_reg_count); 973 disable_proxy_pds: 974 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 975 disable_active_pds: 976 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 977 disable_irqs: 978 qcom_q6v5_unprepare(&qproc->q6v5); 979 980 return ret; 981 } 982 983 static void q6v5_mba_reclaim(struct q6v5 *qproc) 984 { 985 int ret; 986 u32 val; 987 988 qproc->dump_mba_loaded = false; 989 990 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 991 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 992 if (qproc->has_halt_nav) 993 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 994 qproc->halt_nav); 995 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 996 if (qproc->version == MSS_MSM8996) { 997 /* 998 * To avoid high MX current during LPASS/MSS restart. 999 */ 1000 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1001 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1002 QDSP6v56_CLAMP_QMC_MEM; 1003 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1004 } 1005 1006 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1007 false, qproc->mpss_phys, 1008 qproc->mpss_size); 1009 WARN_ON(ret); 1010 1011 q6v5_reset_assert(qproc); 1012 1013 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1014 qproc->reset_clk_count); 1015 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1016 qproc->active_clk_count); 1017 q6v5_regulator_disable(qproc, qproc->active_regs, 1018 qproc->active_reg_count); 1019 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1020 1021 /* In case of failure or coredump scenario where reclaiming MBA memory 1022 * could not happen reclaim it here. 1023 */ 1024 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 1025 qproc->mba_phys, 1026 qproc->mba_size); 1027 WARN_ON(ret); 1028 1029 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1030 if (ret) { 1031 q6v5_pds_disable(qproc, qproc->proxy_pds, 1032 qproc->proxy_pd_count); 1033 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1034 qproc->proxy_clk_count); 1035 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1036 qproc->proxy_reg_count); 1037 } 1038 } 1039 1040 static int q6v5_mpss_load(struct q6v5 *qproc) 1041 { 1042 const struct elf32_phdr *phdrs; 1043 const struct elf32_phdr *phdr; 1044 const struct firmware *seg_fw; 1045 const struct firmware *fw; 1046 struct elf32_hdr *ehdr; 1047 phys_addr_t mpss_reloc; 1048 phys_addr_t boot_addr; 1049 phys_addr_t min_addr = PHYS_ADDR_MAX; 1050 phys_addr_t max_addr = 0; 1051 bool relocate = false; 1052 char *fw_name; 1053 size_t fw_name_len; 1054 ssize_t offset; 1055 size_t size = 0; 1056 void *ptr; 1057 int ret; 1058 int i; 1059 1060 fw_name_len = strlen(qproc->hexagon_mdt_image); 1061 if (fw_name_len <= 4) 1062 return -EINVAL; 1063 1064 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1065 if (!fw_name) 1066 return -ENOMEM; 1067 1068 ret = request_firmware(&fw, fw_name, qproc->dev); 1069 if (ret < 0) { 1070 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1071 goto out; 1072 } 1073 1074 /* Initialize the RMB validator */ 1075 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1076 1077 ret = q6v5_mpss_init_image(qproc, fw); 1078 if (ret) 1079 goto release_firmware; 1080 1081 ehdr = (struct elf32_hdr *)fw->data; 1082 phdrs = (struct elf32_phdr *)(ehdr + 1); 1083 1084 for (i = 0; i < ehdr->e_phnum; i++) { 1085 phdr = &phdrs[i]; 1086 1087 if (!q6v5_phdr_valid(phdr)) 1088 continue; 1089 1090 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1091 relocate = true; 1092 1093 if (phdr->p_paddr < min_addr) 1094 min_addr = phdr->p_paddr; 1095 1096 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1097 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1098 } 1099 1100 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1101 qproc->mpss_reloc = mpss_reloc; 1102 /* Load firmware segments */ 1103 for (i = 0; i < ehdr->e_phnum; i++) { 1104 phdr = &phdrs[i]; 1105 1106 if (!q6v5_phdr_valid(phdr)) 1107 continue; 1108 1109 offset = phdr->p_paddr - mpss_reloc; 1110 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1111 dev_err(qproc->dev, "segment outside memory range\n"); 1112 ret = -EINVAL; 1113 goto release_firmware; 1114 } 1115 1116 ptr = qproc->mpss_region + offset; 1117 1118 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1119 /* Firmware is large enough to be non-split */ 1120 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1121 dev_err(qproc->dev, 1122 "failed to load segment %d from truncated file %s\n", 1123 i, fw_name); 1124 ret = -EINVAL; 1125 goto release_firmware; 1126 } 1127 1128 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1129 } else if (phdr->p_filesz) { 1130 /* Replace "xxx.xxx" with "xxx.bxx" */ 1131 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1132 ret = request_firmware(&seg_fw, fw_name, qproc->dev); 1133 if (ret) { 1134 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1135 goto release_firmware; 1136 } 1137 1138 memcpy(ptr, seg_fw->data, seg_fw->size); 1139 1140 release_firmware(seg_fw); 1141 } 1142 1143 if (phdr->p_memsz > phdr->p_filesz) { 1144 memset(ptr + phdr->p_filesz, 0, 1145 phdr->p_memsz - phdr->p_filesz); 1146 } 1147 size += phdr->p_memsz; 1148 } 1149 1150 /* Transfer ownership of modem ddr region to q6 */ 1151 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, 1152 qproc->mpss_phys, qproc->mpss_size); 1153 if (ret) { 1154 dev_err(qproc->dev, 1155 "assigning Q6 access to mpss memory failed: %d\n", ret); 1156 ret = -EAGAIN; 1157 goto release_firmware; 1158 } 1159 1160 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1161 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1162 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1163 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1164 1165 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1166 if (ret == -ETIMEDOUT) 1167 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1168 else if (ret < 0) 1169 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1170 1171 release_firmware: 1172 release_firmware(fw); 1173 out: 1174 kfree(fw_name); 1175 1176 return ret < 0 ? ret : 0; 1177 } 1178 1179 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1180 struct rproc_dump_segment *segment, 1181 void *dest) 1182 { 1183 int ret = 0; 1184 struct q6v5 *qproc = rproc->priv; 1185 unsigned long mask = BIT((unsigned long)segment->priv); 1186 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); 1187 1188 /* Unlock mba before copying segments */ 1189 if (!qproc->dump_mba_loaded) 1190 ret = q6v5_mba_load(qproc); 1191 1192 if (!ptr || ret) 1193 memset(dest, 0xff, segment->size); 1194 else 1195 memcpy(dest, ptr, segment->size); 1196 1197 qproc->dump_segment_mask |= mask; 1198 1199 /* Reclaim mba after copying segments */ 1200 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 1201 if (qproc->dump_mba_loaded) 1202 q6v5_mba_reclaim(qproc); 1203 } 1204 } 1205 1206 static int q6v5_start(struct rproc *rproc) 1207 { 1208 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1209 int xfermemop_ret; 1210 int ret; 1211 1212 ret = q6v5_mba_load(qproc); 1213 if (ret) 1214 return ret; 1215 1216 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1217 1218 ret = q6v5_mpss_load(qproc); 1219 if (ret) 1220 goto reclaim_mpss; 1221 1222 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1223 if (ret == -ETIMEDOUT) { 1224 dev_err(qproc->dev, "start timed out\n"); 1225 goto reclaim_mpss; 1226 } 1227 1228 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 1229 qproc->mba_phys, 1230 qproc->mba_size); 1231 if (xfermemop_ret) 1232 dev_err(qproc->dev, 1233 "Failed to reclaim mba buffer system may become unstable\n"); 1234 1235 /* Reset Dump Segment Mask */ 1236 qproc->dump_segment_mask = 0; 1237 qproc->running = true; 1238 1239 return 0; 1240 1241 reclaim_mpss: 1242 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1243 false, qproc->mpss_phys, 1244 qproc->mpss_size); 1245 WARN_ON(xfermemop_ret); 1246 q6v5_mba_reclaim(qproc); 1247 1248 return ret; 1249 } 1250 1251 static int q6v5_stop(struct rproc *rproc) 1252 { 1253 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1254 int ret; 1255 1256 qproc->running = false; 1257 1258 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1259 if (ret == -ETIMEDOUT) 1260 dev_err(qproc->dev, "timed out on wait\n"); 1261 1262 q6v5_mba_reclaim(qproc); 1263 1264 return 0; 1265 } 1266 1267 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) 1268 { 1269 struct q6v5 *qproc = rproc->priv; 1270 int offset; 1271 1272 offset = da - qproc->mpss_reloc; 1273 if (offset < 0 || offset + len > qproc->mpss_size) 1274 return NULL; 1275 1276 return qproc->mpss_region + offset; 1277 } 1278 1279 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1280 const struct firmware *mba_fw) 1281 { 1282 const struct firmware *fw; 1283 const struct elf32_phdr *phdrs; 1284 const struct elf32_phdr *phdr; 1285 const struct elf32_hdr *ehdr; 1286 struct q6v5 *qproc = rproc->priv; 1287 unsigned long i; 1288 int ret; 1289 1290 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1291 if (ret < 0) { 1292 dev_err(qproc->dev, "unable to load %s\n", 1293 qproc->hexagon_mdt_image); 1294 return ret; 1295 } 1296 1297 ehdr = (struct elf32_hdr *)fw->data; 1298 phdrs = (struct elf32_phdr *)(ehdr + 1); 1299 qproc->dump_complete_mask = 0; 1300 1301 for (i = 0; i < ehdr->e_phnum; i++) { 1302 phdr = &phdrs[i]; 1303 1304 if (!q6v5_phdr_valid(phdr)) 1305 continue; 1306 1307 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1308 phdr->p_memsz, 1309 qcom_q6v5_dump_segment, 1310 (void *)i); 1311 if (ret) 1312 break; 1313 1314 qproc->dump_complete_mask |= BIT(i); 1315 } 1316 1317 release_firmware(fw); 1318 return ret; 1319 } 1320 1321 static const struct rproc_ops q6v5_ops = { 1322 .start = q6v5_start, 1323 .stop = q6v5_stop, 1324 .da_to_va = q6v5_da_to_va, 1325 .parse_fw = qcom_q6v5_register_dump_segments, 1326 .load = q6v5_load, 1327 }; 1328 1329 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1330 { 1331 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1332 1333 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1334 qproc->proxy_clk_count); 1335 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1336 qproc->proxy_reg_count); 1337 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1338 } 1339 1340 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1341 { 1342 struct of_phandle_args args; 1343 struct resource *res; 1344 int ret; 1345 1346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1347 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1348 if (IS_ERR(qproc->reg_base)) 1349 return PTR_ERR(qproc->reg_base); 1350 1351 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1352 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1353 if (IS_ERR(qproc->rmb_base)) 1354 return PTR_ERR(qproc->rmb_base); 1355 1356 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1357 "qcom,halt-regs", 3, 0, &args); 1358 if (ret < 0) { 1359 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1360 return -EINVAL; 1361 } 1362 1363 qproc->halt_map = syscon_node_to_regmap(args.np); 1364 of_node_put(args.np); 1365 if (IS_ERR(qproc->halt_map)) 1366 return PTR_ERR(qproc->halt_map); 1367 1368 qproc->halt_q6 = args.args[0]; 1369 qproc->halt_modem = args.args[1]; 1370 qproc->halt_nc = args.args[2]; 1371 1372 if (qproc->has_halt_nav) { 1373 struct platform_device *nav_pdev; 1374 1375 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1376 "qcom,halt-nav-regs", 1377 1, 0, &args); 1378 if (ret < 0) { 1379 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1380 return -EINVAL; 1381 } 1382 1383 nav_pdev = of_find_device_by_node(args.np); 1384 of_node_put(args.np); 1385 if (!nav_pdev) { 1386 dev_err(&pdev->dev, "failed to get mss clock device\n"); 1387 return -EPROBE_DEFER; 1388 } 1389 1390 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL); 1391 if (!qproc->halt_nav_map) { 1392 dev_err(&pdev->dev, "failed to get map from device\n"); 1393 return -EINVAL; 1394 } 1395 qproc->halt_nav = args.args[0]; 1396 1397 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1398 "qcom,halt-nav-regs", 1399 1, 1, &args); 1400 if (ret < 0) { 1401 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1402 return -EINVAL; 1403 } 1404 1405 qproc->conn_map = syscon_node_to_regmap(args.np); 1406 of_node_put(args.np); 1407 if (IS_ERR(qproc->conn_map)) 1408 return PTR_ERR(qproc->conn_map); 1409 1410 qproc->conn_box = args.args[0]; 1411 } 1412 1413 return 0; 1414 } 1415 1416 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1417 char **clk_names) 1418 { 1419 int i; 1420 1421 if (!clk_names) 1422 return 0; 1423 1424 for (i = 0; clk_names[i]; i++) { 1425 clks[i] = devm_clk_get(dev, clk_names[i]); 1426 if (IS_ERR(clks[i])) { 1427 int rc = PTR_ERR(clks[i]); 1428 1429 if (rc != -EPROBE_DEFER) 1430 dev_err(dev, "Failed to get %s clock\n", 1431 clk_names[i]); 1432 return rc; 1433 } 1434 } 1435 1436 return i; 1437 } 1438 1439 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1440 char **pd_names) 1441 { 1442 size_t num_pds = 0; 1443 int ret; 1444 int i; 1445 1446 if (!pd_names) 1447 return 0; 1448 1449 while (pd_names[num_pds]) 1450 num_pds++; 1451 1452 for (i = 0; i < num_pds; i++) { 1453 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1454 if (IS_ERR_OR_NULL(devs[i])) { 1455 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1456 goto unroll_attach; 1457 } 1458 } 1459 1460 return num_pds; 1461 1462 unroll_attach: 1463 for (i--; i >= 0; i--) 1464 dev_pm_domain_detach(devs[i], false); 1465 1466 return ret; 1467 }; 1468 1469 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1470 size_t pd_count) 1471 { 1472 int i; 1473 1474 for (i = 0; i < pd_count; i++) 1475 dev_pm_domain_detach(pds[i], false); 1476 } 1477 1478 static int q6v5_init_reset(struct q6v5 *qproc) 1479 { 1480 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1481 "mss_restart"); 1482 if (IS_ERR(qproc->mss_restart)) { 1483 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1484 return PTR_ERR(qproc->mss_restart); 1485 } 1486 1487 if (qproc->has_alt_reset || qproc->has_halt_nav) { 1488 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1489 "pdc_reset"); 1490 if (IS_ERR(qproc->pdc_reset)) { 1491 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1492 return PTR_ERR(qproc->pdc_reset); 1493 } 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1500 { 1501 struct device_node *child; 1502 struct device_node *node; 1503 struct resource r; 1504 int ret; 1505 1506 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1507 node = of_parse_phandle(child, "memory-region", 0); 1508 ret = of_address_to_resource(node, 0, &r); 1509 if (ret) { 1510 dev_err(qproc->dev, "unable to resolve mba region\n"); 1511 return ret; 1512 } 1513 of_node_put(node); 1514 1515 qproc->mba_phys = r.start; 1516 qproc->mba_size = resource_size(&r); 1517 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1518 if (!qproc->mba_region) { 1519 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1520 &r.start, qproc->mba_size); 1521 return -EBUSY; 1522 } 1523 1524 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1525 node = of_parse_phandle(child, "memory-region", 0); 1526 ret = of_address_to_resource(node, 0, &r); 1527 if (ret) { 1528 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1529 return ret; 1530 } 1531 of_node_put(node); 1532 1533 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1534 qproc->mpss_size = resource_size(&r); 1535 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); 1536 if (!qproc->mpss_region) { 1537 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1538 &r.start, qproc->mpss_size); 1539 return -EBUSY; 1540 } 1541 1542 return 0; 1543 } 1544 1545 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) 1546 1547 /* Register IPA notification function */ 1548 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, 1549 void *data) 1550 { 1551 struct qcom_rproc_ipa_notify *ipa_notify; 1552 struct q6v5 *qproc = rproc->priv; 1553 1554 if (!notify) 1555 return -EINVAL; 1556 1557 ipa_notify = &qproc->ipa_notify_subdev; 1558 if (ipa_notify->notify) 1559 return -EBUSY; 1560 1561 ipa_notify->notify = notify; 1562 ipa_notify->data = data; 1563 1564 return 0; 1565 } 1566 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); 1567 1568 /* Deregister IPA notification function */ 1569 void qcom_deregister_ipa_notify(struct rproc *rproc) 1570 { 1571 struct q6v5 *qproc = rproc->priv; 1572 1573 qproc->ipa_notify_subdev.notify = NULL; 1574 } 1575 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); 1576 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ 1577 1578 static int q6v5_probe(struct platform_device *pdev) 1579 { 1580 const struct rproc_hexagon_res *desc; 1581 struct q6v5 *qproc; 1582 struct rproc *rproc; 1583 const char *mba_image; 1584 int ret; 1585 1586 desc = of_device_get_match_data(&pdev->dev); 1587 if (!desc) 1588 return -EINVAL; 1589 1590 if (desc->need_mem_protection && !qcom_scm_is_available()) 1591 return -EPROBE_DEFER; 1592 1593 mba_image = desc->hexagon_mba_image; 1594 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1595 0, &mba_image); 1596 if (ret < 0 && ret != -EINVAL) 1597 return ret; 1598 1599 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1600 mba_image, sizeof(*qproc)); 1601 if (!rproc) { 1602 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1603 return -ENOMEM; 1604 } 1605 1606 rproc->auto_boot = false; 1607 1608 qproc = (struct q6v5 *)rproc->priv; 1609 qproc->dev = &pdev->dev; 1610 qproc->rproc = rproc; 1611 qproc->hexagon_mdt_image = "modem.mdt"; 1612 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1613 1, &qproc->hexagon_mdt_image); 1614 if (ret < 0 && ret != -EINVAL) 1615 return ret; 1616 1617 platform_set_drvdata(pdev, qproc); 1618 1619 qproc->has_halt_nav = desc->has_halt_nav; 1620 ret = q6v5_init_mem(qproc, pdev); 1621 if (ret) 1622 goto free_rproc; 1623 1624 ret = q6v5_alloc_memory_region(qproc); 1625 if (ret) 1626 goto free_rproc; 1627 1628 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1629 desc->proxy_clk_names); 1630 if (ret < 0) { 1631 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1632 goto free_rproc; 1633 } 1634 qproc->proxy_clk_count = ret; 1635 1636 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1637 desc->reset_clk_names); 1638 if (ret < 0) { 1639 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1640 goto free_rproc; 1641 } 1642 qproc->reset_clk_count = ret; 1643 1644 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1645 desc->active_clk_names); 1646 if (ret < 0) { 1647 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1648 goto free_rproc; 1649 } 1650 qproc->active_clk_count = ret; 1651 1652 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1653 desc->proxy_supply); 1654 if (ret < 0) { 1655 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1656 goto free_rproc; 1657 } 1658 qproc->proxy_reg_count = ret; 1659 1660 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1661 desc->active_supply); 1662 if (ret < 0) { 1663 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1664 goto free_rproc; 1665 } 1666 qproc->active_reg_count = ret; 1667 1668 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1669 desc->active_pd_names); 1670 if (ret < 0) { 1671 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1672 goto free_rproc; 1673 } 1674 qproc->active_pd_count = ret; 1675 1676 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1677 desc->proxy_pd_names); 1678 if (ret < 0) { 1679 dev_err(&pdev->dev, "Failed to init power domains\n"); 1680 goto detach_active_pds; 1681 } 1682 qproc->proxy_pd_count = ret; 1683 1684 qproc->has_alt_reset = desc->has_alt_reset; 1685 ret = q6v5_init_reset(qproc); 1686 if (ret) 1687 goto detach_proxy_pds; 1688 1689 qproc->version = desc->version; 1690 qproc->need_mem_protection = desc->need_mem_protection; 1691 1692 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1693 qcom_msa_handover); 1694 if (ret) 1695 goto detach_proxy_pds; 1696 1697 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1698 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1699 qcom_add_glink_subdev(rproc, &qproc->glink_subdev); 1700 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1701 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1702 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1703 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1704 if (IS_ERR(qproc->sysmon)) { 1705 ret = PTR_ERR(qproc->sysmon); 1706 goto detach_proxy_pds; 1707 } 1708 1709 ret = rproc_add(rproc); 1710 if (ret) 1711 goto detach_proxy_pds; 1712 1713 return 0; 1714 1715 detach_proxy_pds: 1716 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1717 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1718 detach_active_pds: 1719 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1720 free_rproc: 1721 rproc_free(rproc); 1722 1723 return ret; 1724 } 1725 1726 static int q6v5_remove(struct platform_device *pdev) 1727 { 1728 struct q6v5 *qproc = platform_get_drvdata(pdev); 1729 1730 rproc_del(qproc->rproc); 1731 1732 qcom_remove_sysmon_subdev(qproc->sysmon); 1733 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1734 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); 1735 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); 1736 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); 1737 1738 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1739 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1740 1741 rproc_free(qproc->rproc); 1742 1743 return 0; 1744 } 1745 1746 static const struct rproc_hexagon_res sc7180_mss = { 1747 .hexagon_mba_image = "mba.mbn", 1748 .proxy_clk_names = (char*[]){ 1749 "xo", 1750 NULL 1751 }, 1752 .reset_clk_names = (char*[]){ 1753 "iface", 1754 "bus", 1755 "snoc_axi", 1756 NULL 1757 }, 1758 .active_clk_names = (char*[]){ 1759 "mnoc_axi", 1760 "nav", 1761 "mss_nav", 1762 "mss_crypto", 1763 NULL 1764 }, 1765 .active_pd_names = (char*[]){ 1766 "load_state", 1767 NULL 1768 }, 1769 .proxy_pd_names = (char*[]){ 1770 "cx", 1771 "mx", 1772 "mss", 1773 NULL 1774 }, 1775 .need_mem_protection = true, 1776 .has_alt_reset = false, 1777 .has_halt_nav = true, 1778 .version = MSS_SC7180, 1779 }; 1780 1781 static const struct rproc_hexagon_res sdm845_mss = { 1782 .hexagon_mba_image = "mba.mbn", 1783 .proxy_clk_names = (char*[]){ 1784 "xo", 1785 "prng", 1786 NULL 1787 }, 1788 .reset_clk_names = (char*[]){ 1789 "iface", 1790 "snoc_axi", 1791 NULL 1792 }, 1793 .active_clk_names = (char*[]){ 1794 "bus", 1795 "mem", 1796 "gpll0_mss", 1797 "mnoc_axi", 1798 NULL 1799 }, 1800 .active_pd_names = (char*[]){ 1801 "load_state", 1802 NULL 1803 }, 1804 .proxy_pd_names = (char*[]){ 1805 "cx", 1806 "mx", 1807 "mss", 1808 NULL 1809 }, 1810 .need_mem_protection = true, 1811 .has_alt_reset = true, 1812 .has_halt_nav = false, 1813 .version = MSS_SDM845, 1814 }; 1815 1816 static const struct rproc_hexagon_res msm8998_mss = { 1817 .hexagon_mba_image = "mba.mbn", 1818 .proxy_clk_names = (char*[]){ 1819 "xo", 1820 "qdss", 1821 "mem", 1822 NULL 1823 }, 1824 .active_clk_names = (char*[]){ 1825 "iface", 1826 "bus", 1827 "gpll0_mss", 1828 "mnoc_axi", 1829 "snoc_axi", 1830 NULL 1831 }, 1832 .proxy_pd_names = (char*[]){ 1833 "cx", 1834 "mx", 1835 NULL 1836 }, 1837 .need_mem_protection = true, 1838 .has_alt_reset = false, 1839 .has_halt_nav = false, 1840 .version = MSS_MSM8998, 1841 }; 1842 1843 static const struct rproc_hexagon_res msm8996_mss = { 1844 .hexagon_mba_image = "mba.mbn", 1845 .proxy_supply = (struct qcom_mss_reg_res[]) { 1846 { 1847 .supply = "pll", 1848 .uA = 100000, 1849 }, 1850 {} 1851 }, 1852 .proxy_clk_names = (char*[]){ 1853 "xo", 1854 "pnoc", 1855 "qdss", 1856 NULL 1857 }, 1858 .active_clk_names = (char*[]){ 1859 "iface", 1860 "bus", 1861 "mem", 1862 "gpll0_mss", 1863 "snoc_axi", 1864 "mnoc_axi", 1865 NULL 1866 }, 1867 .need_mem_protection = true, 1868 .has_alt_reset = false, 1869 .has_halt_nav = false, 1870 .version = MSS_MSM8996, 1871 }; 1872 1873 static const struct rproc_hexagon_res msm8916_mss = { 1874 .hexagon_mba_image = "mba.mbn", 1875 .proxy_supply = (struct qcom_mss_reg_res[]) { 1876 { 1877 .supply = "mx", 1878 .uV = 1050000, 1879 }, 1880 { 1881 .supply = "cx", 1882 .uA = 100000, 1883 }, 1884 { 1885 .supply = "pll", 1886 .uA = 100000, 1887 }, 1888 {} 1889 }, 1890 .proxy_clk_names = (char*[]){ 1891 "xo", 1892 NULL 1893 }, 1894 .active_clk_names = (char*[]){ 1895 "iface", 1896 "bus", 1897 "mem", 1898 NULL 1899 }, 1900 .need_mem_protection = false, 1901 .has_alt_reset = false, 1902 .has_halt_nav = false, 1903 .version = MSS_MSM8916, 1904 }; 1905 1906 static const struct rproc_hexagon_res msm8974_mss = { 1907 .hexagon_mba_image = "mba.b00", 1908 .proxy_supply = (struct qcom_mss_reg_res[]) { 1909 { 1910 .supply = "mx", 1911 .uV = 1050000, 1912 }, 1913 { 1914 .supply = "cx", 1915 .uA = 100000, 1916 }, 1917 { 1918 .supply = "pll", 1919 .uA = 100000, 1920 }, 1921 {} 1922 }, 1923 .active_supply = (struct qcom_mss_reg_res[]) { 1924 { 1925 .supply = "mss", 1926 .uV = 1050000, 1927 .uA = 100000, 1928 }, 1929 {} 1930 }, 1931 .proxy_clk_names = (char*[]){ 1932 "xo", 1933 NULL 1934 }, 1935 .active_clk_names = (char*[]){ 1936 "iface", 1937 "bus", 1938 "mem", 1939 NULL 1940 }, 1941 .need_mem_protection = false, 1942 .has_alt_reset = false, 1943 .has_halt_nav = false, 1944 .version = MSS_MSM8974, 1945 }; 1946 1947 static const struct of_device_id q6v5_of_match[] = { 1948 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1949 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1950 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1951 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1952 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 1953 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 1954 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1955 { }, 1956 }; 1957 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1958 1959 static struct platform_driver q6v5_driver = { 1960 .probe = q6v5_probe, 1961 .remove = q6v5_remove, 1962 .driver = { 1963 .name = "qcom-q6v5-mss", 1964 .of_match_table = q6v5_of_match, 1965 }, 1966 }; 1967 module_platform_driver(q6v5_driver); 1968 1969 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 1970 MODULE_LICENSE("GPL v2"); 1971