1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/remoteproc.h> 26 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h" 27 #include <linux/reset.h> 28 #include <linux/soc/qcom/mdt_loader.h> 29 #include <linux/iopoll.h> 30 31 #include "remoteproc_internal.h" 32 #include "qcom_common.h" 33 #include "qcom_pil_info.h" 34 #include "qcom_q6v5.h" 35 36 #include <linux/qcom_scm.h> 37 38 #define MPSS_CRASH_REASON_SMEM 421 39 40 #define MBA_LOG_SIZE SZ_4K 41 42 /* RMB Status Register Values */ 43 #define RMB_PBL_SUCCESS 0x1 44 45 #define RMB_MBA_XPU_UNLOCKED 0x1 46 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 47 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 48 #define RMB_MBA_AUTH_COMPLETE 0x4 49 50 /* PBL/MBA interface registers */ 51 #define RMB_MBA_IMAGE_REG 0x00 52 #define RMB_PBL_STATUS_REG 0x04 53 #define RMB_MBA_COMMAND_REG 0x08 54 #define RMB_MBA_STATUS_REG 0x0C 55 #define RMB_PMI_META_DATA_REG 0x10 56 #define RMB_PMI_CODE_START_REG 0x14 57 #define RMB_PMI_CODE_LENGTH_REG 0x18 58 #define RMB_MBA_MSS_STATUS 0x40 59 #define RMB_MBA_ALT_RESET 0x44 60 61 #define RMB_CMD_META_DATA_READY 0x1 62 #define RMB_CMD_LOAD_READY 0x2 63 64 /* QDSP6SS Register Offsets */ 65 #define QDSP6SS_RESET_REG 0x014 66 #define QDSP6SS_GFMUX_CTL_REG 0x020 67 #define QDSP6SS_PWR_CTL_REG 0x030 68 #define QDSP6SS_MEM_PWR_CTL 0x0B0 69 #define QDSP6V6SS_MEM_PWR_CTL 0x034 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 #define AXI_GATING_VALID_OVERRIDE BIT(0) 77 78 #define HALT_ACK_TIMEOUT_US 100000 79 80 /* QDSP6SS_RESET */ 81 #define Q6SS_STOP_CORE BIT(0) 82 #define Q6SS_CORE_ARES BIT(1) 83 #define Q6SS_BUS_ARES_ENABLE BIT(2) 84 85 /* QDSP6SS CBCR */ 86 #define Q6SS_CBCR_CLKEN BIT(0) 87 #define Q6SS_CBCR_CLKOFF BIT(31) 88 #define Q6SS_CBCR_TIMEOUT_US 200 89 90 /* QDSP6SS_GFMUX_CTL */ 91 #define Q6SS_CLK_ENABLE BIT(1) 92 93 /* QDSP6SS_PWR_CTL */ 94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 98 #define Q6SS_ETB_SLP_NRET_N BIT(17) 99 #define Q6SS_L2DATA_STBY_N BIT(18) 100 #define Q6SS_SLP_RET_N BIT(19) 101 #define Q6SS_CLAMP_IO BIT(20) 102 #define QDSS_BHS_ON BIT(21) 103 #define QDSS_LDO_BYP BIT(22) 104 105 /* QDSP6v56 parameters */ 106 #define QDSP6v56_LDO_BYP BIT(25) 107 #define QDSP6v56_BHS_ON BIT(24) 108 #define QDSP6v56_CLAMP_WL BIT(21) 109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 110 #define QDSP6SS_XO_CBCR 0x0038 111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 112 113 /* QDSP6v65 parameters */ 114 #define QDSP6SS_CORE_CBCR 0x20 115 #define QDSP6SS_SLEEP 0x3C 116 #define QDSP6SS_BOOT_CORE_START 0x400 117 #define QDSP6SS_BOOT_CMD 0x404 118 #define BOOT_FSM_TIMEOUT 10000 119 120 struct reg_info { 121 struct regulator *reg; 122 int uV; 123 int uA; 124 }; 125 126 struct qcom_mss_reg_res { 127 const char *supply; 128 int uV; 129 int uA; 130 }; 131 132 struct rproc_hexagon_res { 133 const char *hexagon_mba_image; 134 struct qcom_mss_reg_res *proxy_supply; 135 struct qcom_mss_reg_res *active_supply; 136 char **proxy_clk_names; 137 char **reset_clk_names; 138 char **active_clk_names; 139 char **active_pd_names; 140 char **proxy_pd_names; 141 int version; 142 bool need_mem_protection; 143 bool has_alt_reset; 144 bool has_mba_logs; 145 bool has_spare_reg; 146 }; 147 148 struct q6v5 { 149 struct device *dev; 150 struct rproc *rproc; 151 152 void __iomem *reg_base; 153 void __iomem *rmb_base; 154 155 struct regmap *halt_map; 156 struct regmap *conn_map; 157 158 u32 halt_q6; 159 u32 halt_modem; 160 u32 halt_nc; 161 u32 conn_box; 162 163 struct reset_control *mss_restart; 164 struct reset_control *pdc_reset; 165 166 struct qcom_q6v5 q6v5; 167 168 struct clk *active_clks[8]; 169 struct clk *reset_clks[4]; 170 struct clk *proxy_clks[4]; 171 struct device *active_pds[1]; 172 struct device *proxy_pds[3]; 173 int active_clk_count; 174 int reset_clk_count; 175 int proxy_clk_count; 176 int active_pd_count; 177 int proxy_pd_count; 178 179 struct reg_info active_regs[1]; 180 struct reg_info proxy_regs[3]; 181 int active_reg_count; 182 int proxy_reg_count; 183 184 bool running; 185 186 bool dump_mba_loaded; 187 size_t current_dump_size; 188 size_t total_dump_size; 189 190 phys_addr_t mba_phys; 191 void *mba_region; 192 size_t mba_size; 193 194 phys_addr_t mpss_phys; 195 phys_addr_t mpss_reloc; 196 size_t mpss_size; 197 198 struct qcom_rproc_glink glink_subdev; 199 struct qcom_rproc_subdev smd_subdev; 200 struct qcom_rproc_ssr ssr_subdev; 201 struct qcom_rproc_ipa_notify ipa_notify_subdev; 202 struct qcom_sysmon *sysmon; 203 bool need_mem_protection; 204 bool has_alt_reset; 205 bool has_mba_logs; 206 bool has_spare_reg; 207 int mpss_perm; 208 int mba_perm; 209 const char *hexagon_mdt_image; 210 int version; 211 }; 212 213 enum { 214 MSS_MSM8916, 215 MSS_MSM8974, 216 MSS_MSM8996, 217 MSS_MSM8998, 218 MSS_SC7180, 219 MSS_SDM845, 220 }; 221 222 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 223 const struct qcom_mss_reg_res *reg_res) 224 { 225 int rc; 226 int i; 227 228 if (!reg_res) 229 return 0; 230 231 for (i = 0; reg_res[i].supply; i++) { 232 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 233 if (IS_ERR(regs[i].reg)) { 234 rc = PTR_ERR(regs[i].reg); 235 if (rc != -EPROBE_DEFER) 236 dev_err(dev, "Failed to get %s\n regulator", 237 reg_res[i].supply); 238 return rc; 239 } 240 241 regs[i].uV = reg_res[i].uV; 242 regs[i].uA = reg_res[i].uA; 243 } 244 245 return i; 246 } 247 248 static int q6v5_regulator_enable(struct q6v5 *qproc, 249 struct reg_info *regs, int count) 250 { 251 int ret; 252 int i; 253 254 for (i = 0; i < count; i++) { 255 if (regs[i].uV > 0) { 256 ret = regulator_set_voltage(regs[i].reg, 257 regs[i].uV, INT_MAX); 258 if (ret) { 259 dev_err(qproc->dev, 260 "Failed to request voltage for %d.\n", 261 i); 262 goto err; 263 } 264 } 265 266 if (regs[i].uA > 0) { 267 ret = regulator_set_load(regs[i].reg, 268 regs[i].uA); 269 if (ret < 0) { 270 dev_err(qproc->dev, 271 "Failed to set regulator mode\n"); 272 goto err; 273 } 274 } 275 276 ret = regulator_enable(regs[i].reg); 277 if (ret) { 278 dev_err(qproc->dev, "Regulator enable failed\n"); 279 goto err; 280 } 281 } 282 283 return 0; 284 err: 285 for (; i >= 0; i--) { 286 if (regs[i].uV > 0) 287 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 288 289 if (regs[i].uA > 0) 290 regulator_set_load(regs[i].reg, 0); 291 292 regulator_disable(regs[i].reg); 293 } 294 295 return ret; 296 } 297 298 static void q6v5_regulator_disable(struct q6v5 *qproc, 299 struct reg_info *regs, int count) 300 { 301 int i; 302 303 for (i = 0; i < count; i++) { 304 if (regs[i].uV > 0) 305 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 306 307 if (regs[i].uA > 0) 308 regulator_set_load(regs[i].reg, 0); 309 310 regulator_disable(regs[i].reg); 311 } 312 } 313 314 static int q6v5_clk_enable(struct device *dev, 315 struct clk **clks, int count) 316 { 317 int rc; 318 int i; 319 320 for (i = 0; i < count; i++) { 321 rc = clk_prepare_enable(clks[i]); 322 if (rc) { 323 dev_err(dev, "Clock enable failed\n"); 324 goto err; 325 } 326 } 327 328 return 0; 329 err: 330 for (i--; i >= 0; i--) 331 clk_disable_unprepare(clks[i]); 332 333 return rc; 334 } 335 336 static void q6v5_clk_disable(struct device *dev, 337 struct clk **clks, int count) 338 { 339 int i; 340 341 for (i = 0; i < count; i++) 342 clk_disable_unprepare(clks[i]); 343 } 344 345 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 346 size_t pd_count) 347 { 348 int ret; 349 int i; 350 351 for (i = 0; i < pd_count; i++) { 352 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 353 ret = pm_runtime_get_sync(pds[i]); 354 if (ret < 0) 355 goto unroll_pd_votes; 356 } 357 358 return 0; 359 360 unroll_pd_votes: 361 for (i--; i >= 0; i--) { 362 dev_pm_genpd_set_performance_state(pds[i], 0); 363 pm_runtime_put(pds[i]); 364 } 365 366 return ret; 367 } 368 369 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 370 size_t pd_count) 371 { 372 int i; 373 374 for (i = 0; i < pd_count; i++) { 375 dev_pm_genpd_set_performance_state(pds[i], 0); 376 pm_runtime_put(pds[i]); 377 } 378 } 379 380 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 381 bool local, bool remote, phys_addr_t addr, 382 size_t size) 383 { 384 struct qcom_scm_vmperm next[2]; 385 int perms = 0; 386 387 if (!qproc->need_mem_protection) 388 return 0; 389 390 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 391 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 392 return 0; 393 394 if (local) { 395 next[perms].vmid = QCOM_SCM_VMID_HLOS; 396 next[perms].perm = QCOM_SCM_PERM_RWX; 397 perms++; 398 } 399 400 if (remote) { 401 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 402 next[perms].perm = QCOM_SCM_PERM_RW; 403 perms++; 404 } 405 406 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 407 current_perm, next, perms); 408 } 409 410 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 411 { 412 struct q6v5 *qproc = rproc->priv; 413 414 memcpy(qproc->mba_region, fw->data, fw->size); 415 416 return 0; 417 } 418 419 static int q6v5_reset_assert(struct q6v5 *qproc) 420 { 421 int ret; 422 423 if (qproc->has_alt_reset) { 424 reset_control_assert(qproc->pdc_reset); 425 ret = reset_control_reset(qproc->mss_restart); 426 reset_control_deassert(qproc->pdc_reset); 427 } else if (qproc->has_spare_reg) { 428 /* 429 * When the AXI pipeline is being reset with the Q6 modem partly 430 * operational there is possibility of AXI valid signal to 431 * glitch, leading to spurious transactions and Q6 hangs. A work 432 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 433 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 434 * is withdrawn post MSS assert followed by a MSS deassert, 435 * while holding the PDC reset. 436 */ 437 reset_control_assert(qproc->pdc_reset); 438 regmap_update_bits(qproc->conn_map, qproc->conn_box, 439 AXI_GATING_VALID_OVERRIDE, 1); 440 reset_control_assert(qproc->mss_restart); 441 reset_control_deassert(qproc->pdc_reset); 442 regmap_update_bits(qproc->conn_map, qproc->conn_box, 443 AXI_GATING_VALID_OVERRIDE, 0); 444 ret = reset_control_deassert(qproc->mss_restart); 445 } else { 446 ret = reset_control_assert(qproc->mss_restart); 447 } 448 449 return ret; 450 } 451 452 static int q6v5_reset_deassert(struct q6v5 *qproc) 453 { 454 int ret; 455 456 if (qproc->has_alt_reset) { 457 reset_control_assert(qproc->pdc_reset); 458 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 459 ret = reset_control_reset(qproc->mss_restart); 460 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 461 reset_control_deassert(qproc->pdc_reset); 462 } else if (qproc->has_spare_reg) { 463 ret = reset_control_reset(qproc->mss_restart); 464 } else { 465 ret = reset_control_deassert(qproc->mss_restart); 466 } 467 468 return ret; 469 } 470 471 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 472 { 473 unsigned long timeout; 474 s32 val; 475 476 timeout = jiffies + msecs_to_jiffies(ms); 477 for (;;) { 478 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 479 if (val) 480 break; 481 482 if (time_after(jiffies, timeout)) 483 return -ETIMEDOUT; 484 485 msleep(1); 486 } 487 488 return val; 489 } 490 491 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 492 { 493 494 unsigned long timeout; 495 s32 val; 496 497 timeout = jiffies + msecs_to_jiffies(ms); 498 for (;;) { 499 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 500 if (val < 0) 501 break; 502 503 if (!status && val) 504 break; 505 else if (status && val == status) 506 break; 507 508 if (time_after(jiffies, timeout)) 509 return -ETIMEDOUT; 510 511 msleep(1); 512 } 513 514 return val; 515 } 516 517 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 518 { 519 struct rproc *rproc = qproc->rproc; 520 void *data; 521 522 if (!qproc->has_mba_logs) 523 return; 524 525 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 526 qproc->mba_size)) 527 return; 528 529 data = vmalloc(MBA_LOG_SIZE); 530 if (!data) 531 return; 532 533 memcpy(data, qproc->mba_region, MBA_LOG_SIZE); 534 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 535 } 536 537 static int q6v5proc_reset(struct q6v5 *qproc) 538 { 539 u32 val; 540 int ret; 541 int i; 542 543 if (qproc->version == MSS_SDM845) { 544 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 545 val |= Q6SS_CBCR_CLKEN; 546 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 547 548 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 549 val, !(val & Q6SS_CBCR_CLKOFF), 1, 550 Q6SS_CBCR_TIMEOUT_US); 551 if (ret) { 552 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 553 return -ETIMEDOUT; 554 } 555 556 /* De-assert QDSP6 stop core */ 557 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 558 /* Trigger boot FSM */ 559 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 560 561 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 562 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 563 if (ret) { 564 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 565 /* Reset the modem so that boot FSM is in reset state */ 566 q6v5_reset_deassert(qproc); 567 return ret; 568 } 569 570 goto pbl_wait; 571 } else if (qproc->version == MSS_SC7180) { 572 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 573 val |= Q6SS_CBCR_CLKEN; 574 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 575 576 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 577 val, !(val & Q6SS_CBCR_CLKOFF), 1, 578 Q6SS_CBCR_TIMEOUT_US); 579 if (ret) { 580 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 581 return -ETIMEDOUT; 582 } 583 584 /* Turn on the XO clock needed for PLL setup */ 585 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 586 val |= Q6SS_CBCR_CLKEN; 587 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 588 589 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 590 val, !(val & Q6SS_CBCR_CLKOFF), 1, 591 Q6SS_CBCR_TIMEOUT_US); 592 if (ret) { 593 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 594 return -ETIMEDOUT; 595 } 596 597 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 598 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 599 val |= Q6SS_CBCR_CLKEN; 600 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 601 602 /* De-assert the Q6 stop core signal */ 603 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 604 605 /* Wait for 10 us for any staggering logic to settle */ 606 usleep_range(10, 20); 607 608 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 609 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 610 611 /* Poll the MSS_STATUS for FSM completion */ 612 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 613 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 614 if (ret) { 615 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 616 /* Reset the modem so that boot FSM is in reset state */ 617 q6v5_reset_deassert(qproc); 618 return ret; 619 } 620 goto pbl_wait; 621 } else if (qproc->version == MSS_MSM8996 || 622 qproc->version == MSS_MSM8998) { 623 int mem_pwr_ctl; 624 625 /* Override the ACC value if required */ 626 writel(QDSP6SS_ACC_OVERRIDE_VAL, 627 qproc->reg_base + QDSP6SS_STRAP_ACC); 628 629 /* Assert resets, stop core */ 630 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 631 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 632 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 633 634 /* BHS require xo cbcr to be enabled */ 635 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 636 val |= Q6SS_CBCR_CLKEN; 637 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 638 639 /* Read CLKOFF bit to go low indicating CLK is enabled */ 640 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 641 val, !(val & Q6SS_CBCR_CLKOFF), 1, 642 Q6SS_CBCR_TIMEOUT_US); 643 if (ret) { 644 dev_err(qproc->dev, 645 "xo cbcr enabling timed out (rc:%d)\n", ret); 646 return ret; 647 } 648 /* Enable power block headswitch and wait for it to stabilize */ 649 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 650 val |= QDSP6v56_BHS_ON; 651 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 652 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 653 udelay(1); 654 655 /* Put LDO in bypass mode */ 656 val |= QDSP6v56_LDO_BYP; 657 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 658 659 /* Deassert QDSP6 compiler memory clamp */ 660 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 661 val &= ~QDSP6v56_CLAMP_QMC_MEM; 662 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 663 664 /* Deassert memory peripheral sleep and L2 memory standby */ 665 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 666 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 667 668 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 669 if (qproc->version == MSS_MSM8996) { 670 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 671 i = 19; 672 } else { 673 /* MSS_MSM8998 */ 674 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 675 i = 28; 676 } 677 val = readl(qproc->reg_base + mem_pwr_ctl); 678 for (; i >= 0; i--) { 679 val |= BIT(i); 680 writel(val, qproc->reg_base + mem_pwr_ctl); 681 /* 682 * Read back value to ensure the write is done then 683 * wait for 1us for both memory peripheral and data 684 * array to turn on. 685 */ 686 val |= readl(qproc->reg_base + mem_pwr_ctl); 687 udelay(1); 688 } 689 /* Remove word line clamp */ 690 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 691 val &= ~QDSP6v56_CLAMP_WL; 692 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 693 } else { 694 /* Assert resets, stop core */ 695 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 696 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 697 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 698 699 /* Enable power block headswitch and wait for it to stabilize */ 700 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 701 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 702 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 703 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 704 udelay(1); 705 /* 706 * Turn on memories. L2 banks should be done individually 707 * to minimize inrush current. 708 */ 709 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 710 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 711 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 712 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 713 val |= Q6SS_L2DATA_SLP_NRET_N_2; 714 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 715 val |= Q6SS_L2DATA_SLP_NRET_N_1; 716 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 717 val |= Q6SS_L2DATA_SLP_NRET_N_0; 718 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 719 } 720 /* Remove IO clamp */ 721 val &= ~Q6SS_CLAMP_IO; 722 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 723 724 /* Bring core out of reset */ 725 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 726 val &= ~Q6SS_CORE_ARES; 727 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 728 729 /* Turn on core clock */ 730 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 731 val |= Q6SS_CLK_ENABLE; 732 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 733 734 /* Start core execution */ 735 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 736 val &= ~Q6SS_STOP_CORE; 737 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 738 739 pbl_wait: 740 /* Wait for PBL status */ 741 ret = q6v5_rmb_pbl_wait(qproc, 1000); 742 if (ret == -ETIMEDOUT) { 743 dev_err(qproc->dev, "PBL boot timed out\n"); 744 } else if (ret != RMB_PBL_SUCCESS) { 745 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 746 ret = -EINVAL; 747 } else { 748 ret = 0; 749 } 750 751 return ret; 752 } 753 754 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 755 struct regmap *halt_map, 756 u32 offset) 757 { 758 unsigned int val; 759 int ret; 760 761 /* Check if we're already idle */ 762 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 763 if (!ret && val) 764 return; 765 766 /* Assert halt request */ 767 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 768 769 /* Wait for halt */ 770 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 771 val, 1000, HALT_ACK_TIMEOUT_US); 772 773 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 774 if (ret || !val) 775 dev_err(qproc->dev, "port failed halt\n"); 776 777 /* Clear halt request (port will remain halted until reset) */ 778 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 779 } 780 781 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 782 { 783 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 784 dma_addr_t phys; 785 void *metadata; 786 int mdata_perm; 787 int xferop_ret; 788 size_t size; 789 void *ptr; 790 int ret; 791 792 metadata = qcom_mdt_read_metadata(fw, &size); 793 if (IS_ERR(metadata)) 794 return PTR_ERR(metadata); 795 796 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 797 if (!ptr) { 798 kfree(metadata); 799 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 800 return -ENOMEM; 801 } 802 803 memcpy(ptr, metadata, size); 804 805 /* Hypervisor mapping to access metadata by modem */ 806 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 807 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 808 phys, size); 809 if (ret) { 810 dev_err(qproc->dev, 811 "assigning Q6 access to metadata failed: %d\n", ret); 812 ret = -EAGAIN; 813 goto free_dma_attrs; 814 } 815 816 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 817 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 818 819 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 820 if (ret == -ETIMEDOUT) 821 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 822 else if (ret < 0) 823 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 824 825 /* Metadata authentication done, remove modem access */ 826 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 827 phys, size); 828 if (xferop_ret) 829 dev_warn(qproc->dev, 830 "mdt buffer not reclaimed system may become unstable\n"); 831 832 free_dma_attrs: 833 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 834 kfree(metadata); 835 836 return ret < 0 ? ret : 0; 837 } 838 839 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 840 { 841 if (phdr->p_type != PT_LOAD) 842 return false; 843 844 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 845 return false; 846 847 if (!phdr->p_memsz) 848 return false; 849 850 return true; 851 } 852 853 static int q6v5_mba_load(struct q6v5 *qproc) 854 { 855 int ret; 856 int xfermemop_ret; 857 bool mba_load_err = false; 858 859 qcom_q6v5_prepare(&qproc->q6v5); 860 861 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 862 if (ret < 0) { 863 dev_err(qproc->dev, "failed to enable active power domains\n"); 864 goto disable_irqs; 865 } 866 867 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 868 if (ret < 0) { 869 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 870 goto disable_active_pds; 871 } 872 873 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 874 qproc->proxy_reg_count); 875 if (ret) { 876 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 877 goto disable_proxy_pds; 878 } 879 880 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 881 qproc->proxy_clk_count); 882 if (ret) { 883 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 884 goto disable_proxy_reg; 885 } 886 887 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 888 qproc->active_reg_count); 889 if (ret) { 890 dev_err(qproc->dev, "failed to enable supplies\n"); 891 goto disable_proxy_clk; 892 } 893 894 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 895 qproc->reset_clk_count); 896 if (ret) { 897 dev_err(qproc->dev, "failed to enable reset clocks\n"); 898 goto disable_vdd; 899 } 900 901 ret = q6v5_reset_deassert(qproc); 902 if (ret) { 903 dev_err(qproc->dev, "failed to deassert mss restart\n"); 904 goto disable_reset_clks; 905 } 906 907 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 908 qproc->active_clk_count); 909 if (ret) { 910 dev_err(qproc->dev, "failed to enable clocks\n"); 911 goto assert_reset; 912 } 913 914 /* Assign MBA image access in DDR to q6 */ 915 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 916 qproc->mba_phys, qproc->mba_size); 917 if (ret) { 918 dev_err(qproc->dev, 919 "assigning Q6 access to mba memory failed: %d\n", ret); 920 goto disable_active_clks; 921 } 922 923 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 924 925 ret = q6v5proc_reset(qproc); 926 if (ret) 927 goto reclaim_mba; 928 929 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 930 if (ret == -ETIMEDOUT) { 931 dev_err(qproc->dev, "MBA boot timed out\n"); 932 goto halt_axi_ports; 933 } else if (ret != RMB_MBA_XPU_UNLOCKED && 934 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 935 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 936 ret = -EINVAL; 937 goto halt_axi_ports; 938 } 939 940 qproc->dump_mba_loaded = true; 941 return 0; 942 943 halt_axi_ports: 944 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 945 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 946 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 947 mba_load_err = true; 948 reclaim_mba: 949 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 950 false, qproc->mba_phys, 951 qproc->mba_size); 952 if (xfermemop_ret) { 953 dev_err(qproc->dev, 954 "Failed to reclaim mba buffer, system may become unstable\n"); 955 } else if (mba_load_err) { 956 q6v5_dump_mba_logs(qproc); 957 } 958 959 disable_active_clks: 960 q6v5_clk_disable(qproc->dev, qproc->active_clks, 961 qproc->active_clk_count); 962 assert_reset: 963 q6v5_reset_assert(qproc); 964 disable_reset_clks: 965 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 966 qproc->reset_clk_count); 967 disable_vdd: 968 q6v5_regulator_disable(qproc, qproc->active_regs, 969 qproc->active_reg_count); 970 disable_proxy_clk: 971 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 972 qproc->proxy_clk_count); 973 disable_proxy_reg: 974 q6v5_regulator_disable(qproc, qproc->proxy_regs, 975 qproc->proxy_reg_count); 976 disable_proxy_pds: 977 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 978 disable_active_pds: 979 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 980 disable_irqs: 981 qcom_q6v5_unprepare(&qproc->q6v5); 982 983 return ret; 984 } 985 986 static void q6v5_mba_reclaim(struct q6v5 *qproc) 987 { 988 int ret; 989 u32 val; 990 991 qproc->dump_mba_loaded = false; 992 993 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 994 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 995 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 996 if (qproc->version == MSS_MSM8996) { 997 /* 998 * To avoid high MX current during LPASS/MSS restart. 999 */ 1000 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1001 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1002 QDSP6v56_CLAMP_QMC_MEM; 1003 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1004 } 1005 1006 q6v5_reset_assert(qproc); 1007 1008 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1009 qproc->reset_clk_count); 1010 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1011 qproc->active_clk_count); 1012 q6v5_regulator_disable(qproc, qproc->active_regs, 1013 qproc->active_reg_count); 1014 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1015 1016 /* In case of failure or coredump scenario where reclaiming MBA memory 1017 * could not happen reclaim it here. 1018 */ 1019 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1020 qproc->mba_phys, 1021 qproc->mba_size); 1022 WARN_ON(ret); 1023 1024 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1025 if (ret) { 1026 q6v5_pds_disable(qproc, qproc->proxy_pds, 1027 qproc->proxy_pd_count); 1028 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1029 qproc->proxy_clk_count); 1030 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1031 qproc->proxy_reg_count); 1032 } 1033 } 1034 1035 static int q6v5_reload_mba(struct rproc *rproc) 1036 { 1037 struct q6v5 *qproc = rproc->priv; 1038 const struct firmware *fw; 1039 int ret; 1040 1041 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1042 if (ret < 0) 1043 return ret; 1044 1045 q6v5_load(rproc, fw); 1046 ret = q6v5_mba_load(qproc); 1047 release_firmware(fw); 1048 1049 return ret; 1050 } 1051 1052 static int q6v5_mpss_load(struct q6v5 *qproc) 1053 { 1054 const struct elf32_phdr *phdrs; 1055 const struct elf32_phdr *phdr; 1056 const struct firmware *seg_fw; 1057 const struct firmware *fw; 1058 struct elf32_hdr *ehdr; 1059 phys_addr_t mpss_reloc; 1060 phys_addr_t boot_addr; 1061 phys_addr_t min_addr = PHYS_ADDR_MAX; 1062 phys_addr_t max_addr = 0; 1063 u32 code_length; 1064 bool relocate = false; 1065 char *fw_name; 1066 size_t fw_name_len; 1067 ssize_t offset; 1068 size_t size = 0; 1069 void *ptr; 1070 int ret; 1071 int i; 1072 1073 fw_name_len = strlen(qproc->hexagon_mdt_image); 1074 if (fw_name_len <= 4) 1075 return -EINVAL; 1076 1077 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1078 if (!fw_name) 1079 return -ENOMEM; 1080 1081 ret = request_firmware(&fw, fw_name, qproc->dev); 1082 if (ret < 0) { 1083 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1084 goto out; 1085 } 1086 1087 /* Initialize the RMB validator */ 1088 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1089 1090 ret = q6v5_mpss_init_image(qproc, fw); 1091 if (ret) 1092 goto release_firmware; 1093 1094 ehdr = (struct elf32_hdr *)fw->data; 1095 phdrs = (struct elf32_phdr *)(ehdr + 1); 1096 1097 for (i = 0; i < ehdr->e_phnum; i++) { 1098 phdr = &phdrs[i]; 1099 1100 if (!q6v5_phdr_valid(phdr)) 1101 continue; 1102 1103 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1104 relocate = true; 1105 1106 if (phdr->p_paddr < min_addr) 1107 min_addr = phdr->p_paddr; 1108 1109 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1110 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1111 } 1112 1113 /** 1114 * In case of a modem subsystem restart on secure devices, the modem 1115 * memory can be reclaimed only after MBA is loaded. For modem cold 1116 * boot this will be a nop 1117 */ 1118 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1119 qproc->mpss_phys, qproc->mpss_size); 1120 1121 /* Share ownership between Linux and MSS, during segment loading */ 1122 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1123 qproc->mpss_phys, qproc->mpss_size); 1124 if (ret) { 1125 dev_err(qproc->dev, 1126 "assigning Q6 access to mpss memory failed: %d\n", ret); 1127 ret = -EAGAIN; 1128 goto release_firmware; 1129 } 1130 1131 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1132 qproc->mpss_reloc = mpss_reloc; 1133 /* Load firmware segments */ 1134 for (i = 0; i < ehdr->e_phnum; i++) { 1135 phdr = &phdrs[i]; 1136 1137 if (!q6v5_phdr_valid(phdr)) 1138 continue; 1139 1140 offset = phdr->p_paddr - mpss_reloc; 1141 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1142 dev_err(qproc->dev, "segment outside memory range\n"); 1143 ret = -EINVAL; 1144 goto release_firmware; 1145 } 1146 1147 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); 1148 if (!ptr) { 1149 dev_err(qproc->dev, 1150 "unable to map memory region: %pa+%zx-%x\n", 1151 &qproc->mpss_phys, offset, phdr->p_memsz); 1152 goto release_firmware; 1153 } 1154 1155 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1156 /* Firmware is large enough to be non-split */ 1157 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1158 dev_err(qproc->dev, 1159 "failed to load segment %d from truncated file %s\n", 1160 i, fw_name); 1161 ret = -EINVAL; 1162 iounmap(ptr); 1163 goto release_firmware; 1164 } 1165 1166 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1167 } else if (phdr->p_filesz) { 1168 /* Replace "xxx.xxx" with "xxx.bxx" */ 1169 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1170 ret = request_firmware(&seg_fw, fw_name, qproc->dev); 1171 if (ret) { 1172 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1173 iounmap(ptr); 1174 goto release_firmware; 1175 } 1176 1177 memcpy(ptr, seg_fw->data, seg_fw->size); 1178 1179 release_firmware(seg_fw); 1180 } 1181 1182 if (phdr->p_memsz > phdr->p_filesz) { 1183 memset(ptr + phdr->p_filesz, 0, 1184 phdr->p_memsz - phdr->p_filesz); 1185 } 1186 iounmap(ptr); 1187 size += phdr->p_memsz; 1188 1189 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1190 if (!code_length) { 1191 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1192 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1193 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1194 } 1195 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1196 1197 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1198 if (ret < 0) { 1199 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1200 ret); 1201 goto release_firmware; 1202 } 1203 } 1204 1205 /* Transfer ownership of modem ddr region to q6 */ 1206 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1207 qproc->mpss_phys, qproc->mpss_size); 1208 if (ret) { 1209 dev_err(qproc->dev, 1210 "assigning Q6 access to mpss memory failed: %d\n", ret); 1211 ret = -EAGAIN; 1212 goto release_firmware; 1213 } 1214 1215 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1216 if (ret == -ETIMEDOUT) 1217 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1218 else if (ret < 0) 1219 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1220 1221 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1222 1223 release_firmware: 1224 release_firmware(fw); 1225 out: 1226 kfree(fw_name); 1227 1228 return ret < 0 ? ret : 0; 1229 } 1230 1231 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1232 struct rproc_dump_segment *segment, 1233 void *dest, size_t cp_offset, size_t size) 1234 { 1235 int ret = 0; 1236 struct q6v5 *qproc = rproc->priv; 1237 int offset = segment->da - qproc->mpss_reloc; 1238 void *ptr = NULL; 1239 1240 /* Unlock mba before copying segments */ 1241 if (!qproc->dump_mba_loaded) { 1242 ret = q6v5_reload_mba(rproc); 1243 if (!ret) { 1244 /* Reset ownership back to Linux to copy segments */ 1245 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1246 true, false, 1247 qproc->mpss_phys, 1248 qproc->mpss_size); 1249 } 1250 } 1251 1252 if (!ret) 1253 ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); 1254 1255 if (ptr) { 1256 memcpy(dest, ptr, size); 1257 iounmap(ptr); 1258 } else { 1259 memset(dest, 0xff, size); 1260 } 1261 1262 qproc->current_dump_size += size; 1263 1264 /* Reclaim mba after copying segments */ 1265 if (qproc->current_dump_size == qproc->total_dump_size) { 1266 if (qproc->dump_mba_loaded) { 1267 /* Try to reset ownership back to Q6 */ 1268 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1269 false, true, 1270 qproc->mpss_phys, 1271 qproc->mpss_size); 1272 q6v5_mba_reclaim(qproc); 1273 } 1274 } 1275 } 1276 1277 static int q6v5_start(struct rproc *rproc) 1278 { 1279 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1280 int xfermemop_ret; 1281 int ret; 1282 1283 ret = q6v5_mba_load(qproc); 1284 if (ret) 1285 return ret; 1286 1287 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1288 1289 ret = q6v5_mpss_load(qproc); 1290 if (ret) 1291 goto reclaim_mpss; 1292 1293 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1294 if (ret == -ETIMEDOUT) { 1295 dev_err(qproc->dev, "start timed out\n"); 1296 goto reclaim_mpss; 1297 } 1298 1299 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1300 false, qproc->mba_phys, 1301 qproc->mba_size); 1302 if (xfermemop_ret) 1303 dev_err(qproc->dev, 1304 "Failed to reclaim mba buffer system may become unstable\n"); 1305 1306 /* Reset Dump Segment Mask */ 1307 qproc->current_dump_size = 0; 1308 qproc->running = true; 1309 1310 return 0; 1311 1312 reclaim_mpss: 1313 q6v5_mba_reclaim(qproc); 1314 q6v5_dump_mba_logs(qproc); 1315 1316 return ret; 1317 } 1318 1319 static int q6v5_stop(struct rproc *rproc) 1320 { 1321 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1322 int ret; 1323 1324 qproc->running = false; 1325 1326 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1327 if (ret == -ETIMEDOUT) 1328 dev_err(qproc->dev, "timed out on wait\n"); 1329 1330 q6v5_mba_reclaim(qproc); 1331 1332 return 0; 1333 } 1334 1335 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1336 const struct firmware *mba_fw) 1337 { 1338 const struct firmware *fw; 1339 const struct elf32_phdr *phdrs; 1340 const struct elf32_phdr *phdr; 1341 const struct elf32_hdr *ehdr; 1342 struct q6v5 *qproc = rproc->priv; 1343 unsigned long i; 1344 int ret; 1345 1346 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1347 if (ret < 0) { 1348 dev_err(qproc->dev, "unable to load %s\n", 1349 qproc->hexagon_mdt_image); 1350 return ret; 1351 } 1352 1353 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1354 1355 ehdr = (struct elf32_hdr *)fw->data; 1356 phdrs = (struct elf32_phdr *)(ehdr + 1); 1357 qproc->total_dump_size = 0; 1358 1359 for (i = 0; i < ehdr->e_phnum; i++) { 1360 phdr = &phdrs[i]; 1361 1362 if (!q6v5_phdr_valid(phdr)) 1363 continue; 1364 1365 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1366 phdr->p_memsz, 1367 qcom_q6v5_dump_segment, 1368 NULL); 1369 if (ret) 1370 break; 1371 1372 qproc->total_dump_size += phdr->p_memsz; 1373 } 1374 1375 release_firmware(fw); 1376 return ret; 1377 } 1378 1379 static const struct rproc_ops q6v5_ops = { 1380 .start = q6v5_start, 1381 .stop = q6v5_stop, 1382 .parse_fw = qcom_q6v5_register_dump_segments, 1383 .load = q6v5_load, 1384 }; 1385 1386 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1387 { 1388 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1389 1390 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1391 qproc->proxy_clk_count); 1392 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1393 qproc->proxy_reg_count); 1394 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1395 } 1396 1397 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1398 { 1399 struct of_phandle_args args; 1400 struct resource *res; 1401 int ret; 1402 1403 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1404 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1405 if (IS_ERR(qproc->reg_base)) 1406 return PTR_ERR(qproc->reg_base); 1407 1408 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1409 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1410 if (IS_ERR(qproc->rmb_base)) 1411 return PTR_ERR(qproc->rmb_base); 1412 1413 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1414 "qcom,halt-regs", 3, 0, &args); 1415 if (ret < 0) { 1416 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1417 return -EINVAL; 1418 } 1419 1420 qproc->halt_map = syscon_node_to_regmap(args.np); 1421 of_node_put(args.np); 1422 if (IS_ERR(qproc->halt_map)) 1423 return PTR_ERR(qproc->halt_map); 1424 1425 qproc->halt_q6 = args.args[0]; 1426 qproc->halt_modem = args.args[1]; 1427 qproc->halt_nc = args.args[2]; 1428 1429 if (qproc->has_spare_reg) { 1430 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1431 "qcom,spare-regs", 1432 1, 0, &args); 1433 if (ret < 0) { 1434 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1435 return -EINVAL; 1436 } 1437 1438 qproc->conn_map = syscon_node_to_regmap(args.np); 1439 of_node_put(args.np); 1440 if (IS_ERR(qproc->conn_map)) 1441 return PTR_ERR(qproc->conn_map); 1442 1443 qproc->conn_box = args.args[0]; 1444 } 1445 1446 return 0; 1447 } 1448 1449 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1450 char **clk_names) 1451 { 1452 int i; 1453 1454 if (!clk_names) 1455 return 0; 1456 1457 for (i = 0; clk_names[i]; i++) { 1458 clks[i] = devm_clk_get(dev, clk_names[i]); 1459 if (IS_ERR(clks[i])) { 1460 int rc = PTR_ERR(clks[i]); 1461 1462 if (rc != -EPROBE_DEFER) 1463 dev_err(dev, "Failed to get %s clock\n", 1464 clk_names[i]); 1465 return rc; 1466 } 1467 } 1468 1469 return i; 1470 } 1471 1472 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1473 char **pd_names) 1474 { 1475 size_t num_pds = 0; 1476 int ret; 1477 int i; 1478 1479 if (!pd_names) 1480 return 0; 1481 1482 while (pd_names[num_pds]) 1483 num_pds++; 1484 1485 for (i = 0; i < num_pds; i++) { 1486 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1487 if (IS_ERR_OR_NULL(devs[i])) { 1488 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1489 goto unroll_attach; 1490 } 1491 } 1492 1493 return num_pds; 1494 1495 unroll_attach: 1496 for (i--; i >= 0; i--) 1497 dev_pm_domain_detach(devs[i], false); 1498 1499 return ret; 1500 } 1501 1502 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1503 size_t pd_count) 1504 { 1505 int i; 1506 1507 for (i = 0; i < pd_count; i++) 1508 dev_pm_domain_detach(pds[i], false); 1509 } 1510 1511 static int q6v5_init_reset(struct q6v5 *qproc) 1512 { 1513 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1514 "mss_restart"); 1515 if (IS_ERR(qproc->mss_restart)) { 1516 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1517 return PTR_ERR(qproc->mss_restart); 1518 } 1519 1520 if (qproc->has_alt_reset || qproc->has_spare_reg) { 1521 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1522 "pdc_reset"); 1523 if (IS_ERR(qproc->pdc_reset)) { 1524 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1525 return PTR_ERR(qproc->pdc_reset); 1526 } 1527 } 1528 1529 return 0; 1530 } 1531 1532 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1533 { 1534 struct device_node *child; 1535 struct device_node *node; 1536 struct resource r; 1537 int ret; 1538 1539 /* 1540 * In the absence of mba/mpss sub-child, extract the mba and mpss 1541 * reserved memory regions from device's memory-region property. 1542 */ 1543 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1544 if (!child) 1545 node = of_parse_phandle(qproc->dev->of_node, 1546 "memory-region", 0); 1547 else 1548 node = of_parse_phandle(child, "memory-region", 0); 1549 1550 ret = of_address_to_resource(node, 0, &r); 1551 if (ret) { 1552 dev_err(qproc->dev, "unable to resolve mba region\n"); 1553 return ret; 1554 } 1555 of_node_put(node); 1556 1557 qproc->mba_phys = r.start; 1558 qproc->mba_size = resource_size(&r); 1559 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1560 if (!qproc->mba_region) { 1561 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1562 &r.start, qproc->mba_size); 1563 return -EBUSY; 1564 } 1565 1566 if (!child) { 1567 node = of_parse_phandle(qproc->dev->of_node, 1568 "memory-region", 1); 1569 } else { 1570 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1571 node = of_parse_phandle(child, "memory-region", 0); 1572 } 1573 1574 ret = of_address_to_resource(node, 0, &r); 1575 if (ret) { 1576 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1577 return ret; 1578 } 1579 of_node_put(node); 1580 1581 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1582 qproc->mpss_size = resource_size(&r); 1583 1584 return 0; 1585 } 1586 1587 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) 1588 1589 /* Register IPA notification function */ 1590 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, 1591 void *data) 1592 { 1593 struct qcom_rproc_ipa_notify *ipa_notify; 1594 struct q6v5 *qproc = rproc->priv; 1595 1596 if (!notify) 1597 return -EINVAL; 1598 1599 ipa_notify = &qproc->ipa_notify_subdev; 1600 if (ipa_notify->notify) 1601 return -EBUSY; 1602 1603 ipa_notify->notify = notify; 1604 ipa_notify->data = data; 1605 1606 return 0; 1607 } 1608 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); 1609 1610 /* Deregister IPA notification function */ 1611 void qcom_deregister_ipa_notify(struct rproc *rproc) 1612 { 1613 struct q6v5 *qproc = rproc->priv; 1614 1615 qproc->ipa_notify_subdev.notify = NULL; 1616 } 1617 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); 1618 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ 1619 1620 static int q6v5_probe(struct platform_device *pdev) 1621 { 1622 const struct rproc_hexagon_res *desc; 1623 struct q6v5 *qproc; 1624 struct rproc *rproc; 1625 const char *mba_image; 1626 int ret; 1627 1628 desc = of_device_get_match_data(&pdev->dev); 1629 if (!desc) 1630 return -EINVAL; 1631 1632 if (desc->need_mem_protection && !qcom_scm_is_available()) 1633 return -EPROBE_DEFER; 1634 1635 mba_image = desc->hexagon_mba_image; 1636 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1637 0, &mba_image); 1638 if (ret < 0 && ret != -EINVAL) 1639 return ret; 1640 1641 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1642 mba_image, sizeof(*qproc)); 1643 if (!rproc) { 1644 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1645 return -ENOMEM; 1646 } 1647 1648 rproc->auto_boot = false; 1649 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1650 1651 qproc = (struct q6v5 *)rproc->priv; 1652 qproc->dev = &pdev->dev; 1653 qproc->rproc = rproc; 1654 qproc->hexagon_mdt_image = "modem.mdt"; 1655 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1656 1, &qproc->hexagon_mdt_image); 1657 if (ret < 0 && ret != -EINVAL) 1658 goto free_rproc; 1659 1660 platform_set_drvdata(pdev, qproc); 1661 1662 qproc->has_spare_reg = desc->has_spare_reg; 1663 ret = q6v5_init_mem(qproc, pdev); 1664 if (ret) 1665 goto free_rproc; 1666 1667 ret = q6v5_alloc_memory_region(qproc); 1668 if (ret) 1669 goto free_rproc; 1670 1671 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1672 desc->proxy_clk_names); 1673 if (ret < 0) { 1674 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1675 goto free_rproc; 1676 } 1677 qproc->proxy_clk_count = ret; 1678 1679 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1680 desc->reset_clk_names); 1681 if (ret < 0) { 1682 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1683 goto free_rproc; 1684 } 1685 qproc->reset_clk_count = ret; 1686 1687 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1688 desc->active_clk_names); 1689 if (ret < 0) { 1690 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1691 goto free_rproc; 1692 } 1693 qproc->active_clk_count = ret; 1694 1695 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1696 desc->proxy_supply); 1697 if (ret < 0) { 1698 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1699 goto free_rproc; 1700 } 1701 qproc->proxy_reg_count = ret; 1702 1703 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1704 desc->active_supply); 1705 if (ret < 0) { 1706 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1707 goto free_rproc; 1708 } 1709 qproc->active_reg_count = ret; 1710 1711 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1712 desc->active_pd_names); 1713 if (ret < 0) { 1714 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1715 goto free_rproc; 1716 } 1717 qproc->active_pd_count = ret; 1718 1719 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1720 desc->proxy_pd_names); 1721 if (ret < 0) { 1722 dev_err(&pdev->dev, "Failed to init power domains\n"); 1723 goto detach_active_pds; 1724 } 1725 qproc->proxy_pd_count = ret; 1726 1727 qproc->has_alt_reset = desc->has_alt_reset; 1728 ret = q6v5_init_reset(qproc); 1729 if (ret) 1730 goto detach_proxy_pds; 1731 1732 qproc->version = desc->version; 1733 qproc->need_mem_protection = desc->need_mem_protection; 1734 qproc->has_mba_logs = desc->has_mba_logs; 1735 1736 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1737 qcom_msa_handover); 1738 if (ret) 1739 goto detach_proxy_pds; 1740 1741 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1742 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1743 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1744 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1745 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1746 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1747 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1748 if (IS_ERR(qproc->sysmon)) { 1749 ret = PTR_ERR(qproc->sysmon); 1750 goto remove_subdevs; 1751 } 1752 1753 ret = rproc_add(rproc); 1754 if (ret) 1755 goto remove_sysmon_subdev; 1756 1757 return 0; 1758 1759 remove_sysmon_subdev: 1760 qcom_remove_sysmon_subdev(qproc->sysmon); 1761 remove_subdevs: 1762 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1763 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1764 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1765 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1766 detach_proxy_pds: 1767 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1768 detach_active_pds: 1769 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1770 free_rproc: 1771 rproc_free(rproc); 1772 1773 return ret; 1774 } 1775 1776 static int q6v5_remove(struct platform_device *pdev) 1777 { 1778 struct q6v5 *qproc = platform_get_drvdata(pdev); 1779 struct rproc *rproc = qproc->rproc; 1780 1781 rproc_del(rproc); 1782 1783 qcom_remove_sysmon_subdev(qproc->sysmon); 1784 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1785 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1786 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1787 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1788 1789 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1790 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1791 1792 rproc_free(rproc); 1793 1794 return 0; 1795 } 1796 1797 static const struct rproc_hexagon_res sc7180_mss = { 1798 .hexagon_mba_image = "mba.mbn", 1799 .proxy_clk_names = (char*[]){ 1800 "xo", 1801 NULL 1802 }, 1803 .reset_clk_names = (char*[]){ 1804 "iface", 1805 "bus", 1806 "snoc_axi", 1807 NULL 1808 }, 1809 .active_clk_names = (char*[]){ 1810 "mnoc_axi", 1811 "nav", 1812 NULL 1813 }, 1814 .active_pd_names = (char*[]){ 1815 "load_state", 1816 NULL 1817 }, 1818 .proxy_pd_names = (char*[]){ 1819 "cx", 1820 "mx", 1821 "mss", 1822 NULL 1823 }, 1824 .need_mem_protection = true, 1825 .has_alt_reset = false, 1826 .has_mba_logs = true, 1827 .has_spare_reg = true, 1828 .version = MSS_SC7180, 1829 }; 1830 1831 static const struct rproc_hexagon_res sdm845_mss = { 1832 .hexagon_mba_image = "mba.mbn", 1833 .proxy_clk_names = (char*[]){ 1834 "xo", 1835 "prng", 1836 NULL 1837 }, 1838 .reset_clk_names = (char*[]){ 1839 "iface", 1840 "snoc_axi", 1841 NULL 1842 }, 1843 .active_clk_names = (char*[]){ 1844 "bus", 1845 "mem", 1846 "gpll0_mss", 1847 "mnoc_axi", 1848 NULL 1849 }, 1850 .active_pd_names = (char*[]){ 1851 "load_state", 1852 NULL 1853 }, 1854 .proxy_pd_names = (char*[]){ 1855 "cx", 1856 "mx", 1857 "mss", 1858 NULL 1859 }, 1860 .need_mem_protection = true, 1861 .has_alt_reset = true, 1862 .has_mba_logs = false, 1863 .has_spare_reg = false, 1864 .version = MSS_SDM845, 1865 }; 1866 1867 static const struct rproc_hexagon_res msm8998_mss = { 1868 .hexagon_mba_image = "mba.mbn", 1869 .proxy_clk_names = (char*[]){ 1870 "xo", 1871 "qdss", 1872 "mem", 1873 NULL 1874 }, 1875 .active_clk_names = (char*[]){ 1876 "iface", 1877 "bus", 1878 "gpll0_mss", 1879 "mnoc_axi", 1880 "snoc_axi", 1881 NULL 1882 }, 1883 .proxy_pd_names = (char*[]){ 1884 "cx", 1885 "mx", 1886 NULL 1887 }, 1888 .need_mem_protection = true, 1889 .has_alt_reset = false, 1890 .has_mba_logs = false, 1891 .has_spare_reg = false, 1892 .version = MSS_MSM8998, 1893 }; 1894 1895 static const struct rproc_hexagon_res msm8996_mss = { 1896 .hexagon_mba_image = "mba.mbn", 1897 .proxy_supply = (struct qcom_mss_reg_res[]) { 1898 { 1899 .supply = "pll", 1900 .uA = 100000, 1901 }, 1902 {} 1903 }, 1904 .proxy_clk_names = (char*[]){ 1905 "xo", 1906 "pnoc", 1907 "qdss", 1908 NULL 1909 }, 1910 .active_clk_names = (char*[]){ 1911 "iface", 1912 "bus", 1913 "mem", 1914 "gpll0_mss", 1915 "snoc_axi", 1916 "mnoc_axi", 1917 NULL 1918 }, 1919 .need_mem_protection = true, 1920 .has_alt_reset = false, 1921 .has_mba_logs = false, 1922 .has_spare_reg = false, 1923 .version = MSS_MSM8996, 1924 }; 1925 1926 static const struct rproc_hexagon_res msm8916_mss = { 1927 .hexagon_mba_image = "mba.mbn", 1928 .proxy_supply = (struct qcom_mss_reg_res[]) { 1929 { 1930 .supply = "mx", 1931 .uV = 1050000, 1932 }, 1933 { 1934 .supply = "cx", 1935 .uA = 100000, 1936 }, 1937 { 1938 .supply = "pll", 1939 .uA = 100000, 1940 }, 1941 {} 1942 }, 1943 .proxy_clk_names = (char*[]){ 1944 "xo", 1945 NULL 1946 }, 1947 .active_clk_names = (char*[]){ 1948 "iface", 1949 "bus", 1950 "mem", 1951 NULL 1952 }, 1953 .need_mem_protection = false, 1954 .has_alt_reset = false, 1955 .has_mba_logs = false, 1956 .has_spare_reg = false, 1957 .version = MSS_MSM8916, 1958 }; 1959 1960 static const struct rproc_hexagon_res msm8974_mss = { 1961 .hexagon_mba_image = "mba.b00", 1962 .proxy_supply = (struct qcom_mss_reg_res[]) { 1963 { 1964 .supply = "mx", 1965 .uV = 1050000, 1966 }, 1967 { 1968 .supply = "cx", 1969 .uA = 100000, 1970 }, 1971 { 1972 .supply = "pll", 1973 .uA = 100000, 1974 }, 1975 {} 1976 }, 1977 .active_supply = (struct qcom_mss_reg_res[]) { 1978 { 1979 .supply = "mss", 1980 .uV = 1050000, 1981 .uA = 100000, 1982 }, 1983 {} 1984 }, 1985 .proxy_clk_names = (char*[]){ 1986 "xo", 1987 NULL 1988 }, 1989 .active_clk_names = (char*[]){ 1990 "iface", 1991 "bus", 1992 "mem", 1993 NULL 1994 }, 1995 .need_mem_protection = false, 1996 .has_alt_reset = false, 1997 .has_mba_logs = false, 1998 .has_spare_reg = false, 1999 .version = MSS_MSM8974, 2000 }; 2001 2002 static const struct of_device_id q6v5_of_match[] = { 2003 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2004 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2005 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2006 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2007 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2008 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2009 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2010 { }, 2011 }; 2012 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2013 2014 static struct platform_driver q6v5_driver = { 2015 .probe = q6v5_probe, 2016 .remove = q6v5_remove, 2017 .driver = { 2018 .name = "qcom-q6v5-mss", 2019 .of_match_table = q6v5_of_match, 2020 }, 2021 }; 2022 module_platform_driver(q6v5_driver); 2023 2024 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2025 MODULE_LICENSE("GPL v2"); 2026