1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/remoteproc.h> 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 #include <linux/slab.h> 30 31 #include "remoteproc_internal.h" 32 #include "qcom_common.h" 33 #include "qcom_pil_info.h" 34 #include "qcom_q6v5.h" 35 36 #include <linux/qcom_scm.h> 37 38 #define MPSS_CRASH_REASON_SMEM 421 39 40 #define MBA_LOG_SIZE SZ_4K 41 42 /* RMB Status Register Values */ 43 #define RMB_PBL_SUCCESS 0x1 44 45 #define RMB_MBA_XPU_UNLOCKED 0x1 46 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 47 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 48 #define RMB_MBA_AUTH_COMPLETE 0x4 49 50 /* PBL/MBA interface registers */ 51 #define RMB_MBA_IMAGE_REG 0x00 52 #define RMB_PBL_STATUS_REG 0x04 53 #define RMB_MBA_COMMAND_REG 0x08 54 #define RMB_MBA_STATUS_REG 0x0C 55 #define RMB_PMI_META_DATA_REG 0x10 56 #define RMB_PMI_CODE_START_REG 0x14 57 #define RMB_PMI_CODE_LENGTH_REG 0x18 58 #define RMB_MBA_MSS_STATUS 0x40 59 #define RMB_MBA_ALT_RESET 0x44 60 61 #define RMB_CMD_META_DATA_READY 0x1 62 #define RMB_CMD_LOAD_READY 0x2 63 64 /* QDSP6SS Register Offsets */ 65 #define QDSP6SS_RESET_REG 0x014 66 #define QDSP6SS_GFMUX_CTL_REG 0x020 67 #define QDSP6SS_PWR_CTL_REG 0x030 68 #define QDSP6SS_MEM_PWR_CTL 0x0B0 69 #define QDSP6V6SS_MEM_PWR_CTL 0x034 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 #define AXI_GATING_VALID_OVERRIDE BIT(0) 77 78 #define HALT_ACK_TIMEOUT_US 100000 79 80 /* QDSP6SS_RESET */ 81 #define Q6SS_STOP_CORE BIT(0) 82 #define Q6SS_CORE_ARES BIT(1) 83 #define Q6SS_BUS_ARES_ENABLE BIT(2) 84 85 /* QDSP6SS CBCR */ 86 #define Q6SS_CBCR_CLKEN BIT(0) 87 #define Q6SS_CBCR_CLKOFF BIT(31) 88 #define Q6SS_CBCR_TIMEOUT_US 200 89 90 /* QDSP6SS_GFMUX_CTL */ 91 #define Q6SS_CLK_ENABLE BIT(1) 92 93 /* QDSP6SS_PWR_CTL */ 94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 98 #define Q6SS_ETB_SLP_NRET_N BIT(17) 99 #define Q6SS_L2DATA_STBY_N BIT(18) 100 #define Q6SS_SLP_RET_N BIT(19) 101 #define Q6SS_CLAMP_IO BIT(20) 102 #define QDSS_BHS_ON BIT(21) 103 #define QDSS_LDO_BYP BIT(22) 104 105 /* QDSP6v56 parameters */ 106 #define QDSP6v56_LDO_BYP BIT(25) 107 #define QDSP6v56_BHS_ON BIT(24) 108 #define QDSP6v56_CLAMP_WL BIT(21) 109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 110 #define QDSP6SS_XO_CBCR 0x0038 111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 112 113 /* QDSP6v65 parameters */ 114 #define QDSP6SS_CORE_CBCR 0x20 115 #define QDSP6SS_SLEEP 0x3C 116 #define QDSP6SS_BOOT_CORE_START 0x400 117 #define QDSP6SS_BOOT_CMD 0x404 118 #define BOOT_FSM_TIMEOUT 10000 119 120 struct reg_info { 121 struct regulator *reg; 122 int uV; 123 int uA; 124 }; 125 126 struct qcom_mss_reg_res { 127 const char *supply; 128 int uV; 129 int uA; 130 }; 131 132 struct rproc_hexagon_res { 133 const char *hexagon_mba_image; 134 struct qcom_mss_reg_res *proxy_supply; 135 struct qcom_mss_reg_res *fallback_proxy_supply; 136 struct qcom_mss_reg_res *active_supply; 137 char **proxy_clk_names; 138 char **reset_clk_names; 139 char **active_clk_names; 140 char **active_pd_names; 141 char **proxy_pd_names; 142 int version; 143 bool need_mem_protection; 144 bool has_alt_reset; 145 bool has_mba_logs; 146 bool has_spare_reg; 147 }; 148 149 struct q6v5 { 150 struct device *dev; 151 struct rproc *rproc; 152 153 void __iomem *reg_base; 154 void __iomem *rmb_base; 155 156 struct regmap *halt_map; 157 struct regmap *conn_map; 158 159 u32 halt_q6; 160 u32 halt_modem; 161 u32 halt_nc; 162 u32 conn_box; 163 164 struct reset_control *mss_restart; 165 struct reset_control *pdc_reset; 166 167 struct qcom_q6v5 q6v5; 168 169 struct clk *active_clks[8]; 170 struct clk *reset_clks[4]; 171 struct clk *proxy_clks[4]; 172 struct device *active_pds[1]; 173 struct device *proxy_pds[3]; 174 int active_clk_count; 175 int reset_clk_count; 176 int proxy_clk_count; 177 int active_pd_count; 178 int proxy_pd_count; 179 180 struct reg_info active_regs[1]; 181 struct reg_info proxy_regs[1]; 182 struct reg_info fallback_proxy_regs[2]; 183 int active_reg_count; 184 int proxy_reg_count; 185 int fallback_proxy_reg_count; 186 187 bool dump_mba_loaded; 188 size_t current_dump_size; 189 size_t total_dump_size; 190 191 phys_addr_t mba_phys; 192 size_t mba_size; 193 size_t dp_size; 194 195 phys_addr_t mpss_phys; 196 phys_addr_t mpss_reloc; 197 size_t mpss_size; 198 199 struct qcom_rproc_glink glink_subdev; 200 struct qcom_rproc_subdev smd_subdev; 201 struct qcom_rproc_ssr ssr_subdev; 202 struct qcom_sysmon *sysmon; 203 bool need_mem_protection; 204 bool has_alt_reset; 205 bool has_mba_logs; 206 bool has_spare_reg; 207 int mpss_perm; 208 int mba_perm; 209 const char *hexagon_mdt_image; 210 int version; 211 }; 212 213 enum { 214 MSS_MSM8916, 215 MSS_MSM8974, 216 MSS_MSM8996, 217 MSS_MSM8998, 218 MSS_SC7180, 219 MSS_SDM845, 220 }; 221 222 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 223 const struct qcom_mss_reg_res *reg_res) 224 { 225 int rc; 226 int i; 227 228 if (!reg_res) 229 return 0; 230 231 for (i = 0; reg_res[i].supply; i++) { 232 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 233 if (IS_ERR(regs[i].reg)) { 234 rc = PTR_ERR(regs[i].reg); 235 if (rc != -EPROBE_DEFER) 236 dev_err(dev, "Failed to get %s\n regulator", 237 reg_res[i].supply); 238 return rc; 239 } 240 241 regs[i].uV = reg_res[i].uV; 242 regs[i].uA = reg_res[i].uA; 243 } 244 245 return i; 246 } 247 248 static int q6v5_regulator_enable(struct q6v5 *qproc, 249 struct reg_info *regs, int count) 250 { 251 int ret; 252 int i; 253 254 for (i = 0; i < count; i++) { 255 if (regs[i].uV > 0) { 256 ret = regulator_set_voltage(regs[i].reg, 257 regs[i].uV, INT_MAX); 258 if (ret) { 259 dev_err(qproc->dev, 260 "Failed to request voltage for %d.\n", 261 i); 262 goto err; 263 } 264 } 265 266 if (regs[i].uA > 0) { 267 ret = regulator_set_load(regs[i].reg, 268 regs[i].uA); 269 if (ret < 0) { 270 dev_err(qproc->dev, 271 "Failed to set regulator mode\n"); 272 goto err; 273 } 274 } 275 276 ret = regulator_enable(regs[i].reg); 277 if (ret) { 278 dev_err(qproc->dev, "Regulator enable failed\n"); 279 goto err; 280 } 281 } 282 283 return 0; 284 err: 285 for (; i >= 0; i--) { 286 if (regs[i].uV > 0) 287 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 288 289 if (regs[i].uA > 0) 290 regulator_set_load(regs[i].reg, 0); 291 292 regulator_disable(regs[i].reg); 293 } 294 295 return ret; 296 } 297 298 static void q6v5_regulator_disable(struct q6v5 *qproc, 299 struct reg_info *regs, int count) 300 { 301 int i; 302 303 for (i = 0; i < count; i++) { 304 if (regs[i].uV > 0) 305 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 306 307 if (regs[i].uA > 0) 308 regulator_set_load(regs[i].reg, 0); 309 310 regulator_disable(regs[i].reg); 311 } 312 } 313 314 static int q6v5_clk_enable(struct device *dev, 315 struct clk **clks, int count) 316 { 317 int rc; 318 int i; 319 320 for (i = 0; i < count; i++) { 321 rc = clk_prepare_enable(clks[i]); 322 if (rc) { 323 dev_err(dev, "Clock enable failed\n"); 324 goto err; 325 } 326 } 327 328 return 0; 329 err: 330 for (i--; i >= 0; i--) 331 clk_disable_unprepare(clks[i]); 332 333 return rc; 334 } 335 336 static void q6v5_clk_disable(struct device *dev, 337 struct clk **clks, int count) 338 { 339 int i; 340 341 for (i = 0; i < count; i++) 342 clk_disable_unprepare(clks[i]); 343 } 344 345 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 346 size_t pd_count) 347 { 348 int ret; 349 int i; 350 351 for (i = 0; i < pd_count; i++) { 352 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 353 ret = pm_runtime_get_sync(pds[i]); 354 if (ret < 0) { 355 pm_runtime_put_noidle(pds[i]); 356 dev_pm_genpd_set_performance_state(pds[i], 0); 357 goto unroll_pd_votes; 358 } 359 } 360 361 return 0; 362 363 unroll_pd_votes: 364 for (i--; i >= 0; i--) { 365 dev_pm_genpd_set_performance_state(pds[i], 0); 366 pm_runtime_put(pds[i]); 367 } 368 369 return ret; 370 } 371 372 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 373 size_t pd_count) 374 { 375 int i; 376 377 for (i = 0; i < pd_count; i++) { 378 dev_pm_genpd_set_performance_state(pds[i], 0); 379 pm_runtime_put(pds[i]); 380 } 381 } 382 383 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 384 bool local, bool remote, phys_addr_t addr, 385 size_t size) 386 { 387 struct qcom_scm_vmperm next[2]; 388 int perms = 0; 389 390 if (!qproc->need_mem_protection) 391 return 0; 392 393 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 394 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 395 return 0; 396 397 if (local) { 398 next[perms].vmid = QCOM_SCM_VMID_HLOS; 399 next[perms].perm = QCOM_SCM_PERM_RWX; 400 perms++; 401 } 402 403 if (remote) { 404 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 405 next[perms].perm = QCOM_SCM_PERM_RW; 406 perms++; 407 } 408 409 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 410 current_perm, next, perms); 411 } 412 413 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region) 414 { 415 const struct firmware *dp_fw; 416 417 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) 418 return; 419 420 if (SZ_1M + dp_fw->size <= qproc->mba_size) { 421 memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size); 422 qproc->dp_size = dp_fw->size; 423 } 424 425 release_firmware(dp_fw); 426 } 427 428 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 429 { 430 struct q6v5 *qproc = rproc->priv; 431 void *mba_region; 432 433 /* MBA is restricted to a maximum size of 1M */ 434 if (fw->size > qproc->mba_size || fw->size > SZ_1M) { 435 dev_err(qproc->dev, "MBA firmware load failed\n"); 436 return -EINVAL; 437 } 438 439 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 440 if (!mba_region) { 441 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 442 &qproc->mba_phys, qproc->mba_size); 443 return -EBUSY; 444 } 445 446 memcpy(mba_region, fw->data, fw->size); 447 q6v5_debug_policy_load(qproc, mba_region); 448 memunmap(mba_region); 449 450 return 0; 451 } 452 453 static int q6v5_reset_assert(struct q6v5 *qproc) 454 { 455 int ret; 456 457 if (qproc->has_alt_reset) { 458 reset_control_assert(qproc->pdc_reset); 459 ret = reset_control_reset(qproc->mss_restart); 460 reset_control_deassert(qproc->pdc_reset); 461 } else if (qproc->has_spare_reg) { 462 /* 463 * When the AXI pipeline is being reset with the Q6 modem partly 464 * operational there is possibility of AXI valid signal to 465 * glitch, leading to spurious transactions and Q6 hangs. A work 466 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 467 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 468 * is withdrawn post MSS assert followed by a MSS deassert, 469 * while holding the PDC reset. 470 */ 471 reset_control_assert(qproc->pdc_reset); 472 regmap_update_bits(qproc->conn_map, qproc->conn_box, 473 AXI_GATING_VALID_OVERRIDE, 1); 474 reset_control_assert(qproc->mss_restart); 475 reset_control_deassert(qproc->pdc_reset); 476 regmap_update_bits(qproc->conn_map, qproc->conn_box, 477 AXI_GATING_VALID_OVERRIDE, 0); 478 ret = reset_control_deassert(qproc->mss_restart); 479 } else { 480 ret = reset_control_assert(qproc->mss_restart); 481 } 482 483 return ret; 484 } 485 486 static int q6v5_reset_deassert(struct q6v5 *qproc) 487 { 488 int ret; 489 490 if (qproc->has_alt_reset) { 491 reset_control_assert(qproc->pdc_reset); 492 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 493 ret = reset_control_reset(qproc->mss_restart); 494 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 495 reset_control_deassert(qproc->pdc_reset); 496 } else if (qproc->has_spare_reg) { 497 ret = reset_control_reset(qproc->mss_restart); 498 } else { 499 ret = reset_control_deassert(qproc->mss_restart); 500 } 501 502 return ret; 503 } 504 505 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 506 { 507 unsigned long timeout; 508 s32 val; 509 510 timeout = jiffies + msecs_to_jiffies(ms); 511 for (;;) { 512 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 513 if (val) 514 break; 515 516 if (time_after(jiffies, timeout)) 517 return -ETIMEDOUT; 518 519 msleep(1); 520 } 521 522 return val; 523 } 524 525 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 526 { 527 528 unsigned long timeout; 529 s32 val; 530 531 timeout = jiffies + msecs_to_jiffies(ms); 532 for (;;) { 533 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 534 if (val < 0) 535 break; 536 537 if (!status && val) 538 break; 539 else if (status && val == status) 540 break; 541 542 if (time_after(jiffies, timeout)) 543 return -ETIMEDOUT; 544 545 msleep(1); 546 } 547 548 return val; 549 } 550 551 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 552 { 553 struct rproc *rproc = qproc->rproc; 554 void *data; 555 void *mba_region; 556 557 if (!qproc->has_mba_logs) 558 return; 559 560 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 561 qproc->mba_size)) 562 return; 563 564 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 565 if (!mba_region) 566 return; 567 568 data = vmalloc(MBA_LOG_SIZE); 569 if (data) { 570 memcpy(data, mba_region, MBA_LOG_SIZE); 571 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 572 } 573 memunmap(mba_region); 574 } 575 576 static int q6v5proc_reset(struct q6v5 *qproc) 577 { 578 u32 val; 579 int ret; 580 int i; 581 582 if (qproc->version == MSS_SDM845) { 583 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 584 val |= Q6SS_CBCR_CLKEN; 585 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 586 587 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 588 val, !(val & Q6SS_CBCR_CLKOFF), 1, 589 Q6SS_CBCR_TIMEOUT_US); 590 if (ret) { 591 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 592 return -ETIMEDOUT; 593 } 594 595 /* De-assert QDSP6 stop core */ 596 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 597 /* Trigger boot FSM */ 598 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 599 600 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 601 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 602 if (ret) { 603 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 604 /* Reset the modem so that boot FSM is in reset state */ 605 q6v5_reset_deassert(qproc); 606 return ret; 607 } 608 609 goto pbl_wait; 610 } else if (qproc->version == MSS_SC7180) { 611 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 612 val |= Q6SS_CBCR_CLKEN; 613 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 614 615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 616 val, !(val & Q6SS_CBCR_CLKOFF), 1, 617 Q6SS_CBCR_TIMEOUT_US); 618 if (ret) { 619 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 620 return -ETIMEDOUT; 621 } 622 623 /* Turn on the XO clock needed for PLL setup */ 624 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 625 val |= Q6SS_CBCR_CLKEN; 626 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 627 628 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 629 val, !(val & Q6SS_CBCR_CLKOFF), 1, 630 Q6SS_CBCR_TIMEOUT_US); 631 if (ret) { 632 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 633 return -ETIMEDOUT; 634 } 635 636 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 637 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 638 val |= Q6SS_CBCR_CLKEN; 639 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 640 641 /* De-assert the Q6 stop core signal */ 642 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 643 644 /* Wait for 10 us for any staggering logic to settle */ 645 usleep_range(10, 20); 646 647 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 648 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 649 650 /* Poll the MSS_STATUS for FSM completion */ 651 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 652 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 653 if (ret) { 654 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 655 /* Reset the modem so that boot FSM is in reset state */ 656 q6v5_reset_deassert(qproc); 657 return ret; 658 } 659 goto pbl_wait; 660 } else if (qproc->version == MSS_MSM8996 || 661 qproc->version == MSS_MSM8998) { 662 int mem_pwr_ctl; 663 664 /* Override the ACC value if required */ 665 writel(QDSP6SS_ACC_OVERRIDE_VAL, 666 qproc->reg_base + QDSP6SS_STRAP_ACC); 667 668 /* Assert resets, stop core */ 669 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 670 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 671 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 672 673 /* BHS require xo cbcr to be enabled */ 674 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 675 val |= Q6SS_CBCR_CLKEN; 676 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 677 678 /* Read CLKOFF bit to go low indicating CLK is enabled */ 679 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 680 val, !(val & Q6SS_CBCR_CLKOFF), 1, 681 Q6SS_CBCR_TIMEOUT_US); 682 if (ret) { 683 dev_err(qproc->dev, 684 "xo cbcr enabling timed out (rc:%d)\n", ret); 685 return ret; 686 } 687 /* Enable power block headswitch and wait for it to stabilize */ 688 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 689 val |= QDSP6v56_BHS_ON; 690 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 691 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 692 udelay(1); 693 694 /* Put LDO in bypass mode */ 695 val |= QDSP6v56_LDO_BYP; 696 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 697 698 /* Deassert QDSP6 compiler memory clamp */ 699 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 700 val &= ~QDSP6v56_CLAMP_QMC_MEM; 701 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 702 703 /* Deassert memory peripheral sleep and L2 memory standby */ 704 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 705 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 706 707 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 708 if (qproc->version == MSS_MSM8996) { 709 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 710 i = 19; 711 } else { 712 /* MSS_MSM8998 */ 713 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 714 i = 28; 715 } 716 val = readl(qproc->reg_base + mem_pwr_ctl); 717 for (; i >= 0; i--) { 718 val |= BIT(i); 719 writel(val, qproc->reg_base + mem_pwr_ctl); 720 /* 721 * Read back value to ensure the write is done then 722 * wait for 1us for both memory peripheral and data 723 * array to turn on. 724 */ 725 val |= readl(qproc->reg_base + mem_pwr_ctl); 726 udelay(1); 727 } 728 /* Remove word line clamp */ 729 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 730 val &= ~QDSP6v56_CLAMP_WL; 731 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 732 } else { 733 /* Assert resets, stop core */ 734 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 735 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 736 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 737 738 /* Enable power block headswitch and wait for it to stabilize */ 739 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 740 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 741 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 742 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 743 udelay(1); 744 /* 745 * Turn on memories. L2 banks should be done individually 746 * to minimize inrush current. 747 */ 748 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 749 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 750 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 751 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 752 val |= Q6SS_L2DATA_SLP_NRET_N_2; 753 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 754 val |= Q6SS_L2DATA_SLP_NRET_N_1; 755 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 756 val |= Q6SS_L2DATA_SLP_NRET_N_0; 757 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 758 } 759 /* Remove IO clamp */ 760 val &= ~Q6SS_CLAMP_IO; 761 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 762 763 /* Bring core out of reset */ 764 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 765 val &= ~Q6SS_CORE_ARES; 766 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 767 768 /* Turn on core clock */ 769 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 770 val |= Q6SS_CLK_ENABLE; 771 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 772 773 /* Start core execution */ 774 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 775 val &= ~Q6SS_STOP_CORE; 776 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 777 778 pbl_wait: 779 /* Wait for PBL status */ 780 ret = q6v5_rmb_pbl_wait(qproc, 1000); 781 if (ret == -ETIMEDOUT) { 782 dev_err(qproc->dev, "PBL boot timed out\n"); 783 } else if (ret != RMB_PBL_SUCCESS) { 784 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 785 ret = -EINVAL; 786 } else { 787 ret = 0; 788 } 789 790 return ret; 791 } 792 793 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 794 struct regmap *halt_map, 795 u32 offset) 796 { 797 unsigned int val; 798 int ret; 799 800 /* Check if we're already idle */ 801 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 802 if (!ret && val) 803 return; 804 805 /* Assert halt request */ 806 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 807 808 /* Wait for halt */ 809 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 810 val, 1000, HALT_ACK_TIMEOUT_US); 811 812 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 813 if (ret || !val) 814 dev_err(qproc->dev, "port failed halt\n"); 815 816 /* Clear halt request (port will remain halted until reset) */ 817 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 818 } 819 820 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 821 { 822 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 823 dma_addr_t phys; 824 void *metadata; 825 int mdata_perm; 826 int xferop_ret; 827 size_t size; 828 void *ptr; 829 int ret; 830 831 metadata = qcom_mdt_read_metadata(fw, &size); 832 if (IS_ERR(metadata)) 833 return PTR_ERR(metadata); 834 835 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 836 if (!ptr) { 837 kfree(metadata); 838 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 839 return -ENOMEM; 840 } 841 842 memcpy(ptr, metadata, size); 843 844 /* Hypervisor mapping to access metadata by modem */ 845 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 846 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 847 phys, size); 848 if (ret) { 849 dev_err(qproc->dev, 850 "assigning Q6 access to metadata failed: %d\n", ret); 851 ret = -EAGAIN; 852 goto free_dma_attrs; 853 } 854 855 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 856 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 857 858 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 859 if (ret == -ETIMEDOUT) 860 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 861 else if (ret < 0) 862 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 863 864 /* Metadata authentication done, remove modem access */ 865 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 866 phys, size); 867 if (xferop_ret) 868 dev_warn(qproc->dev, 869 "mdt buffer not reclaimed system may become unstable\n"); 870 871 free_dma_attrs: 872 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 873 kfree(metadata); 874 875 return ret < 0 ? ret : 0; 876 } 877 878 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 879 { 880 if (phdr->p_type != PT_LOAD) 881 return false; 882 883 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 884 return false; 885 886 if (!phdr->p_memsz) 887 return false; 888 889 return true; 890 } 891 892 static int q6v5_mba_load(struct q6v5 *qproc) 893 { 894 int ret; 895 int xfermemop_ret; 896 bool mba_load_err = false; 897 898 qcom_q6v5_prepare(&qproc->q6v5); 899 900 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 901 if (ret < 0) { 902 dev_err(qproc->dev, "failed to enable active power domains\n"); 903 goto disable_irqs; 904 } 905 906 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 907 if (ret < 0) { 908 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 909 goto disable_active_pds; 910 } 911 912 ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs, 913 qproc->fallback_proxy_reg_count); 914 if (ret) { 915 dev_err(qproc->dev, "failed to enable fallback proxy supplies\n"); 916 goto disable_proxy_pds; 917 } 918 919 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 920 qproc->proxy_reg_count); 921 if (ret) { 922 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 923 goto disable_fallback_proxy_reg; 924 } 925 926 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 927 qproc->proxy_clk_count); 928 if (ret) { 929 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 930 goto disable_proxy_reg; 931 } 932 933 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 934 qproc->active_reg_count); 935 if (ret) { 936 dev_err(qproc->dev, "failed to enable supplies\n"); 937 goto disable_proxy_clk; 938 } 939 940 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 941 qproc->reset_clk_count); 942 if (ret) { 943 dev_err(qproc->dev, "failed to enable reset clocks\n"); 944 goto disable_vdd; 945 } 946 947 ret = q6v5_reset_deassert(qproc); 948 if (ret) { 949 dev_err(qproc->dev, "failed to deassert mss restart\n"); 950 goto disable_reset_clks; 951 } 952 953 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 954 qproc->active_clk_count); 955 if (ret) { 956 dev_err(qproc->dev, "failed to enable clocks\n"); 957 goto assert_reset; 958 } 959 960 /* 961 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide 962 * the Q6 access to this region. 963 */ 964 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 965 qproc->mpss_phys, qproc->mpss_size); 966 if (ret) { 967 dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); 968 goto disable_active_clks; 969 } 970 971 /* Assign MBA image access in DDR to q6 */ 972 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 973 qproc->mba_phys, qproc->mba_size); 974 if (ret) { 975 dev_err(qproc->dev, 976 "assigning Q6 access to mba memory failed: %d\n", ret); 977 goto disable_active_clks; 978 } 979 980 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 981 if (qproc->dp_size) { 982 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); 983 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 984 } 985 986 ret = q6v5proc_reset(qproc); 987 if (ret) 988 goto reclaim_mba; 989 990 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 991 if (ret == -ETIMEDOUT) { 992 dev_err(qproc->dev, "MBA boot timed out\n"); 993 goto halt_axi_ports; 994 } else if (ret != RMB_MBA_XPU_UNLOCKED && 995 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 996 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 997 ret = -EINVAL; 998 goto halt_axi_ports; 999 } 1000 1001 qproc->dump_mba_loaded = true; 1002 return 0; 1003 1004 halt_axi_ports: 1005 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1006 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1007 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1008 mba_load_err = true; 1009 reclaim_mba: 1010 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1011 false, qproc->mba_phys, 1012 qproc->mba_size); 1013 if (xfermemop_ret) { 1014 dev_err(qproc->dev, 1015 "Failed to reclaim mba buffer, system may become unstable\n"); 1016 } else if (mba_load_err) { 1017 q6v5_dump_mba_logs(qproc); 1018 } 1019 1020 disable_active_clks: 1021 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1022 qproc->active_clk_count); 1023 assert_reset: 1024 q6v5_reset_assert(qproc); 1025 disable_reset_clks: 1026 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1027 qproc->reset_clk_count); 1028 disable_vdd: 1029 q6v5_regulator_disable(qproc, qproc->active_regs, 1030 qproc->active_reg_count); 1031 disable_proxy_clk: 1032 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1033 qproc->proxy_clk_count); 1034 disable_proxy_reg: 1035 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1036 qproc->proxy_reg_count); 1037 disable_fallback_proxy_reg: 1038 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1039 qproc->fallback_proxy_reg_count); 1040 disable_proxy_pds: 1041 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1042 disable_active_pds: 1043 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1044 disable_irqs: 1045 qcom_q6v5_unprepare(&qproc->q6v5); 1046 1047 return ret; 1048 } 1049 1050 static void q6v5_mba_reclaim(struct q6v5 *qproc) 1051 { 1052 int ret; 1053 u32 val; 1054 1055 qproc->dump_mba_loaded = false; 1056 qproc->dp_size = 0; 1057 1058 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1059 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1060 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1061 if (qproc->version == MSS_MSM8996) { 1062 /* 1063 * To avoid high MX current during LPASS/MSS restart. 1064 */ 1065 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1066 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1067 QDSP6v56_CLAMP_QMC_MEM; 1068 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1069 } 1070 1071 q6v5_reset_assert(qproc); 1072 1073 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1074 qproc->reset_clk_count); 1075 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1076 qproc->active_clk_count); 1077 q6v5_regulator_disable(qproc, qproc->active_regs, 1078 qproc->active_reg_count); 1079 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1080 1081 /* In case of failure or coredump scenario where reclaiming MBA memory 1082 * could not happen reclaim it here. 1083 */ 1084 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1085 qproc->mba_phys, 1086 qproc->mba_size); 1087 WARN_ON(ret); 1088 1089 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1090 if (ret) { 1091 q6v5_pds_disable(qproc, qproc->proxy_pds, 1092 qproc->proxy_pd_count); 1093 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1094 qproc->proxy_clk_count); 1095 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1096 qproc->fallback_proxy_reg_count); 1097 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1098 qproc->proxy_reg_count); 1099 } 1100 } 1101 1102 static int q6v5_reload_mba(struct rproc *rproc) 1103 { 1104 struct q6v5 *qproc = rproc->priv; 1105 const struct firmware *fw; 1106 int ret; 1107 1108 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1109 if (ret < 0) 1110 return ret; 1111 1112 q6v5_load(rproc, fw); 1113 ret = q6v5_mba_load(qproc); 1114 release_firmware(fw); 1115 1116 return ret; 1117 } 1118 1119 static int q6v5_mpss_load(struct q6v5 *qproc) 1120 { 1121 const struct elf32_phdr *phdrs; 1122 const struct elf32_phdr *phdr; 1123 const struct firmware *seg_fw; 1124 const struct firmware *fw; 1125 struct elf32_hdr *ehdr; 1126 phys_addr_t mpss_reloc; 1127 phys_addr_t boot_addr; 1128 phys_addr_t min_addr = PHYS_ADDR_MAX; 1129 phys_addr_t max_addr = 0; 1130 u32 code_length; 1131 bool relocate = false; 1132 char *fw_name; 1133 size_t fw_name_len; 1134 ssize_t offset; 1135 size_t size = 0; 1136 void *ptr; 1137 int ret; 1138 int i; 1139 1140 fw_name_len = strlen(qproc->hexagon_mdt_image); 1141 if (fw_name_len <= 4) 1142 return -EINVAL; 1143 1144 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1145 if (!fw_name) 1146 return -ENOMEM; 1147 1148 ret = request_firmware(&fw, fw_name, qproc->dev); 1149 if (ret < 0) { 1150 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1151 goto out; 1152 } 1153 1154 /* Initialize the RMB validator */ 1155 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1156 1157 ret = q6v5_mpss_init_image(qproc, fw); 1158 if (ret) 1159 goto release_firmware; 1160 1161 ehdr = (struct elf32_hdr *)fw->data; 1162 phdrs = (struct elf32_phdr *)(ehdr + 1); 1163 1164 for (i = 0; i < ehdr->e_phnum; i++) { 1165 phdr = &phdrs[i]; 1166 1167 if (!q6v5_phdr_valid(phdr)) 1168 continue; 1169 1170 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1171 relocate = true; 1172 1173 if (phdr->p_paddr < min_addr) 1174 min_addr = phdr->p_paddr; 1175 1176 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1177 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1178 } 1179 1180 /* 1181 * In case of a modem subsystem restart on secure devices, the modem 1182 * memory can be reclaimed only after MBA is loaded. 1183 */ 1184 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1185 qproc->mpss_phys, qproc->mpss_size); 1186 1187 /* Share ownership between Linux and MSS, during segment loading */ 1188 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1189 qproc->mpss_phys, qproc->mpss_size); 1190 if (ret) { 1191 dev_err(qproc->dev, 1192 "assigning Q6 access to mpss memory failed: %d\n", ret); 1193 ret = -EAGAIN; 1194 goto release_firmware; 1195 } 1196 1197 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1198 qproc->mpss_reloc = mpss_reloc; 1199 /* Load firmware segments */ 1200 for (i = 0; i < ehdr->e_phnum; i++) { 1201 phdr = &phdrs[i]; 1202 1203 if (!q6v5_phdr_valid(phdr)) 1204 continue; 1205 1206 offset = phdr->p_paddr - mpss_reloc; 1207 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1208 dev_err(qproc->dev, "segment outside memory range\n"); 1209 ret = -EINVAL; 1210 goto release_firmware; 1211 } 1212 1213 if (phdr->p_filesz > phdr->p_memsz) { 1214 dev_err(qproc->dev, 1215 "refusing to load segment %d with p_filesz > p_memsz\n", 1216 i); 1217 ret = -EINVAL; 1218 goto release_firmware; 1219 } 1220 1221 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); 1222 if (!ptr) { 1223 dev_err(qproc->dev, 1224 "unable to map memory region: %pa+%zx-%x\n", 1225 &qproc->mpss_phys, offset, phdr->p_memsz); 1226 goto release_firmware; 1227 } 1228 1229 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1230 /* Firmware is large enough to be non-split */ 1231 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1232 dev_err(qproc->dev, 1233 "failed to load segment %d from truncated file %s\n", 1234 i, fw_name); 1235 ret = -EINVAL; 1236 memunmap(ptr); 1237 goto release_firmware; 1238 } 1239 1240 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1241 } else if (phdr->p_filesz) { 1242 /* Replace "xxx.xxx" with "xxx.bxx" */ 1243 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1244 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, 1245 ptr, phdr->p_filesz); 1246 if (ret) { 1247 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1248 memunmap(ptr); 1249 goto release_firmware; 1250 } 1251 1252 if (seg_fw->size != phdr->p_filesz) { 1253 dev_err(qproc->dev, 1254 "failed to load segment %d from truncated file %s\n", 1255 i, fw_name); 1256 ret = -EINVAL; 1257 release_firmware(seg_fw); 1258 memunmap(ptr); 1259 goto release_firmware; 1260 } 1261 1262 release_firmware(seg_fw); 1263 } 1264 1265 if (phdr->p_memsz > phdr->p_filesz) { 1266 memset(ptr + phdr->p_filesz, 0, 1267 phdr->p_memsz - phdr->p_filesz); 1268 } 1269 memunmap(ptr); 1270 size += phdr->p_memsz; 1271 1272 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1273 if (!code_length) { 1274 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1275 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1276 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1277 } 1278 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1279 1280 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1281 if (ret < 0) { 1282 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1283 ret); 1284 goto release_firmware; 1285 } 1286 } 1287 1288 /* Transfer ownership of modem ddr region to q6 */ 1289 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1290 qproc->mpss_phys, qproc->mpss_size); 1291 if (ret) { 1292 dev_err(qproc->dev, 1293 "assigning Q6 access to mpss memory failed: %d\n", ret); 1294 ret = -EAGAIN; 1295 goto release_firmware; 1296 } 1297 1298 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1299 if (ret == -ETIMEDOUT) 1300 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1301 else if (ret < 0) 1302 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1303 1304 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1305 1306 release_firmware: 1307 release_firmware(fw); 1308 out: 1309 kfree(fw_name); 1310 1311 return ret < 0 ? ret : 0; 1312 } 1313 1314 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1315 struct rproc_dump_segment *segment, 1316 void *dest, size_t cp_offset, size_t size) 1317 { 1318 int ret = 0; 1319 struct q6v5 *qproc = rproc->priv; 1320 int offset = segment->da - qproc->mpss_reloc; 1321 void *ptr = NULL; 1322 1323 /* Unlock mba before copying segments */ 1324 if (!qproc->dump_mba_loaded) { 1325 ret = q6v5_reload_mba(rproc); 1326 if (!ret) { 1327 /* Reset ownership back to Linux to copy segments */ 1328 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1329 true, false, 1330 qproc->mpss_phys, 1331 qproc->mpss_size); 1332 } 1333 } 1334 1335 if (!ret) 1336 ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC); 1337 1338 if (ptr) { 1339 memcpy(dest, ptr, size); 1340 memunmap(ptr); 1341 } else { 1342 memset(dest, 0xff, size); 1343 } 1344 1345 qproc->current_dump_size += size; 1346 1347 /* Reclaim mba after copying segments */ 1348 if (qproc->current_dump_size == qproc->total_dump_size) { 1349 if (qproc->dump_mba_loaded) { 1350 /* Try to reset ownership back to Q6 */ 1351 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1352 false, true, 1353 qproc->mpss_phys, 1354 qproc->mpss_size); 1355 q6v5_mba_reclaim(qproc); 1356 } 1357 } 1358 } 1359 1360 static int q6v5_start(struct rproc *rproc) 1361 { 1362 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1363 int xfermemop_ret; 1364 int ret; 1365 1366 ret = q6v5_mba_load(qproc); 1367 if (ret) 1368 return ret; 1369 1370 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", 1371 qproc->dp_size ? "" : "out"); 1372 1373 ret = q6v5_mpss_load(qproc); 1374 if (ret) 1375 goto reclaim_mpss; 1376 1377 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1378 if (ret == -ETIMEDOUT) { 1379 dev_err(qproc->dev, "start timed out\n"); 1380 goto reclaim_mpss; 1381 } 1382 1383 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1384 false, qproc->mba_phys, 1385 qproc->mba_size); 1386 if (xfermemop_ret) 1387 dev_err(qproc->dev, 1388 "Failed to reclaim mba buffer system may become unstable\n"); 1389 1390 /* Reset Dump Segment Mask */ 1391 qproc->current_dump_size = 0; 1392 1393 return 0; 1394 1395 reclaim_mpss: 1396 q6v5_mba_reclaim(qproc); 1397 q6v5_dump_mba_logs(qproc); 1398 1399 return ret; 1400 } 1401 1402 static int q6v5_stop(struct rproc *rproc) 1403 { 1404 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1405 int ret; 1406 1407 ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon); 1408 if (ret == -ETIMEDOUT) 1409 dev_err(qproc->dev, "timed out on wait\n"); 1410 1411 q6v5_mba_reclaim(qproc); 1412 1413 return 0; 1414 } 1415 1416 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1417 const struct firmware *mba_fw) 1418 { 1419 const struct firmware *fw; 1420 const struct elf32_phdr *phdrs; 1421 const struct elf32_phdr *phdr; 1422 const struct elf32_hdr *ehdr; 1423 struct q6v5 *qproc = rproc->priv; 1424 unsigned long i; 1425 int ret; 1426 1427 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1428 if (ret < 0) { 1429 dev_err(qproc->dev, "unable to load %s\n", 1430 qproc->hexagon_mdt_image); 1431 return ret; 1432 } 1433 1434 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1435 1436 ehdr = (struct elf32_hdr *)fw->data; 1437 phdrs = (struct elf32_phdr *)(ehdr + 1); 1438 qproc->total_dump_size = 0; 1439 1440 for (i = 0; i < ehdr->e_phnum; i++) { 1441 phdr = &phdrs[i]; 1442 1443 if (!q6v5_phdr_valid(phdr)) 1444 continue; 1445 1446 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1447 phdr->p_memsz, 1448 qcom_q6v5_dump_segment, 1449 NULL); 1450 if (ret) 1451 break; 1452 1453 qproc->total_dump_size += phdr->p_memsz; 1454 } 1455 1456 release_firmware(fw); 1457 return ret; 1458 } 1459 1460 static const struct rproc_ops q6v5_ops = { 1461 .start = q6v5_start, 1462 .stop = q6v5_stop, 1463 .parse_fw = qcom_q6v5_register_dump_segments, 1464 .load = q6v5_load, 1465 }; 1466 1467 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1468 { 1469 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1470 1471 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1472 qproc->proxy_clk_count); 1473 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1474 qproc->proxy_reg_count); 1475 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1476 qproc->fallback_proxy_reg_count); 1477 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1478 } 1479 1480 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1481 { 1482 struct of_phandle_args args; 1483 struct resource *res; 1484 int ret; 1485 1486 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1487 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1488 if (IS_ERR(qproc->reg_base)) 1489 return PTR_ERR(qproc->reg_base); 1490 1491 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1492 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1493 if (IS_ERR(qproc->rmb_base)) 1494 return PTR_ERR(qproc->rmb_base); 1495 1496 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1497 "qcom,halt-regs", 3, 0, &args); 1498 if (ret < 0) { 1499 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1500 return -EINVAL; 1501 } 1502 1503 qproc->halt_map = syscon_node_to_regmap(args.np); 1504 of_node_put(args.np); 1505 if (IS_ERR(qproc->halt_map)) 1506 return PTR_ERR(qproc->halt_map); 1507 1508 qproc->halt_q6 = args.args[0]; 1509 qproc->halt_modem = args.args[1]; 1510 qproc->halt_nc = args.args[2]; 1511 1512 if (qproc->has_spare_reg) { 1513 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1514 "qcom,spare-regs", 1515 1, 0, &args); 1516 if (ret < 0) { 1517 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1518 return -EINVAL; 1519 } 1520 1521 qproc->conn_map = syscon_node_to_regmap(args.np); 1522 of_node_put(args.np); 1523 if (IS_ERR(qproc->conn_map)) 1524 return PTR_ERR(qproc->conn_map); 1525 1526 qproc->conn_box = args.args[0]; 1527 } 1528 1529 return 0; 1530 } 1531 1532 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1533 char **clk_names) 1534 { 1535 int i; 1536 1537 if (!clk_names) 1538 return 0; 1539 1540 for (i = 0; clk_names[i]; i++) { 1541 clks[i] = devm_clk_get(dev, clk_names[i]); 1542 if (IS_ERR(clks[i])) { 1543 int rc = PTR_ERR(clks[i]); 1544 1545 if (rc != -EPROBE_DEFER) 1546 dev_err(dev, "Failed to get %s clock\n", 1547 clk_names[i]); 1548 return rc; 1549 } 1550 } 1551 1552 return i; 1553 } 1554 1555 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1556 char **pd_names) 1557 { 1558 size_t num_pds = 0; 1559 int ret; 1560 int i; 1561 1562 if (!pd_names) 1563 return 0; 1564 1565 while (pd_names[num_pds]) 1566 num_pds++; 1567 1568 for (i = 0; i < num_pds; i++) { 1569 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1570 if (IS_ERR_OR_NULL(devs[i])) { 1571 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1572 goto unroll_attach; 1573 } 1574 } 1575 1576 return num_pds; 1577 1578 unroll_attach: 1579 for (i--; i >= 0; i--) 1580 dev_pm_domain_detach(devs[i], false); 1581 1582 return ret; 1583 } 1584 1585 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1586 size_t pd_count) 1587 { 1588 int i; 1589 1590 for (i = 0; i < pd_count; i++) 1591 dev_pm_domain_detach(pds[i], false); 1592 } 1593 1594 static int q6v5_init_reset(struct q6v5 *qproc) 1595 { 1596 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1597 "mss_restart"); 1598 if (IS_ERR(qproc->mss_restart)) { 1599 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1600 return PTR_ERR(qproc->mss_restart); 1601 } 1602 1603 if (qproc->has_alt_reset || qproc->has_spare_reg) { 1604 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1605 "pdc_reset"); 1606 if (IS_ERR(qproc->pdc_reset)) { 1607 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1608 return PTR_ERR(qproc->pdc_reset); 1609 } 1610 } 1611 1612 return 0; 1613 } 1614 1615 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1616 { 1617 struct device_node *child; 1618 struct device_node *node; 1619 struct resource r; 1620 int ret; 1621 1622 /* 1623 * In the absence of mba/mpss sub-child, extract the mba and mpss 1624 * reserved memory regions from device's memory-region property. 1625 */ 1626 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1627 if (!child) 1628 node = of_parse_phandle(qproc->dev->of_node, 1629 "memory-region", 0); 1630 else 1631 node = of_parse_phandle(child, "memory-region", 0); 1632 1633 ret = of_address_to_resource(node, 0, &r); 1634 if (ret) { 1635 dev_err(qproc->dev, "unable to resolve mba region\n"); 1636 return ret; 1637 } 1638 of_node_put(node); 1639 1640 qproc->mba_phys = r.start; 1641 qproc->mba_size = resource_size(&r); 1642 1643 if (!child) { 1644 node = of_parse_phandle(qproc->dev->of_node, 1645 "memory-region", 1); 1646 } else { 1647 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1648 node = of_parse_phandle(child, "memory-region", 0); 1649 } 1650 1651 ret = of_address_to_resource(node, 0, &r); 1652 if (ret) { 1653 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1654 return ret; 1655 } 1656 of_node_put(node); 1657 1658 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1659 qproc->mpss_size = resource_size(&r); 1660 1661 return 0; 1662 } 1663 1664 static int q6v5_probe(struct platform_device *pdev) 1665 { 1666 const struct rproc_hexagon_res *desc; 1667 struct q6v5 *qproc; 1668 struct rproc *rproc; 1669 const char *mba_image; 1670 int ret; 1671 1672 desc = of_device_get_match_data(&pdev->dev); 1673 if (!desc) 1674 return -EINVAL; 1675 1676 if (desc->need_mem_protection && !qcom_scm_is_available()) 1677 return -EPROBE_DEFER; 1678 1679 mba_image = desc->hexagon_mba_image; 1680 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1681 0, &mba_image); 1682 if (ret < 0 && ret != -EINVAL) { 1683 dev_err(&pdev->dev, "unable to read mba firmware-name\n"); 1684 return ret; 1685 } 1686 1687 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1688 mba_image, sizeof(*qproc)); 1689 if (!rproc) { 1690 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1691 return -ENOMEM; 1692 } 1693 1694 rproc->auto_boot = false; 1695 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1696 1697 qproc = (struct q6v5 *)rproc->priv; 1698 qproc->dev = &pdev->dev; 1699 qproc->rproc = rproc; 1700 qproc->hexagon_mdt_image = "modem.mdt"; 1701 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1702 1, &qproc->hexagon_mdt_image); 1703 if (ret < 0 && ret != -EINVAL) { 1704 dev_err(&pdev->dev, "unable to read mpss firmware-name\n"); 1705 goto free_rproc; 1706 } 1707 1708 platform_set_drvdata(pdev, qproc); 1709 1710 qproc->has_spare_reg = desc->has_spare_reg; 1711 ret = q6v5_init_mem(qproc, pdev); 1712 if (ret) 1713 goto free_rproc; 1714 1715 ret = q6v5_alloc_memory_region(qproc); 1716 if (ret) 1717 goto free_rproc; 1718 1719 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1720 desc->proxy_clk_names); 1721 if (ret < 0) { 1722 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1723 goto free_rproc; 1724 } 1725 qproc->proxy_clk_count = ret; 1726 1727 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1728 desc->reset_clk_names); 1729 if (ret < 0) { 1730 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1731 goto free_rproc; 1732 } 1733 qproc->reset_clk_count = ret; 1734 1735 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1736 desc->active_clk_names); 1737 if (ret < 0) { 1738 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1739 goto free_rproc; 1740 } 1741 qproc->active_clk_count = ret; 1742 1743 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1744 desc->proxy_supply); 1745 if (ret < 0) { 1746 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1747 goto free_rproc; 1748 } 1749 qproc->proxy_reg_count = ret; 1750 1751 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1752 desc->active_supply); 1753 if (ret < 0) { 1754 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1755 goto free_rproc; 1756 } 1757 qproc->active_reg_count = ret; 1758 1759 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1760 desc->active_pd_names); 1761 if (ret < 0) { 1762 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1763 goto free_rproc; 1764 } 1765 qproc->active_pd_count = ret; 1766 1767 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1768 desc->proxy_pd_names); 1769 /* Fallback to regulators for old device trees */ 1770 if (ret == -ENODATA && desc->fallback_proxy_supply) { 1771 ret = q6v5_regulator_init(&pdev->dev, 1772 qproc->fallback_proxy_regs, 1773 desc->fallback_proxy_supply); 1774 if (ret < 0) { 1775 dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n"); 1776 goto detach_active_pds; 1777 } 1778 qproc->fallback_proxy_reg_count = ret; 1779 } else if (ret < 0) { 1780 dev_err(&pdev->dev, "Failed to init power domains\n"); 1781 goto detach_active_pds; 1782 } else { 1783 qproc->proxy_pd_count = ret; 1784 } 1785 1786 qproc->has_alt_reset = desc->has_alt_reset; 1787 ret = q6v5_init_reset(qproc); 1788 if (ret) 1789 goto detach_proxy_pds; 1790 1791 qproc->version = desc->version; 1792 qproc->need_mem_protection = desc->need_mem_protection; 1793 qproc->has_mba_logs = desc->has_mba_logs; 1794 1795 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1796 qcom_msa_handover); 1797 if (ret) 1798 goto detach_proxy_pds; 1799 1800 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1801 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1802 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1803 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1804 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1805 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1806 if (IS_ERR(qproc->sysmon)) { 1807 ret = PTR_ERR(qproc->sysmon); 1808 goto remove_subdevs; 1809 } 1810 1811 ret = rproc_add(rproc); 1812 if (ret) 1813 goto remove_sysmon_subdev; 1814 1815 return 0; 1816 1817 remove_sysmon_subdev: 1818 qcom_remove_sysmon_subdev(qproc->sysmon); 1819 remove_subdevs: 1820 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1821 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1822 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1823 detach_proxy_pds: 1824 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1825 detach_active_pds: 1826 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1827 free_rproc: 1828 rproc_free(rproc); 1829 1830 return ret; 1831 } 1832 1833 static int q6v5_remove(struct platform_device *pdev) 1834 { 1835 struct q6v5 *qproc = platform_get_drvdata(pdev); 1836 struct rproc *rproc = qproc->rproc; 1837 1838 rproc_del(rproc); 1839 1840 qcom_remove_sysmon_subdev(qproc->sysmon); 1841 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1842 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1843 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1844 1845 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1846 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1847 1848 rproc_free(rproc); 1849 1850 return 0; 1851 } 1852 1853 static const struct rproc_hexagon_res sc7180_mss = { 1854 .hexagon_mba_image = "mba.mbn", 1855 .proxy_clk_names = (char*[]){ 1856 "xo", 1857 NULL 1858 }, 1859 .reset_clk_names = (char*[]){ 1860 "iface", 1861 "bus", 1862 "snoc_axi", 1863 NULL 1864 }, 1865 .active_clk_names = (char*[]){ 1866 "mnoc_axi", 1867 "nav", 1868 NULL 1869 }, 1870 .active_pd_names = (char*[]){ 1871 "load_state", 1872 NULL 1873 }, 1874 .proxy_pd_names = (char*[]){ 1875 "cx", 1876 "mx", 1877 "mss", 1878 NULL 1879 }, 1880 .need_mem_protection = true, 1881 .has_alt_reset = false, 1882 .has_mba_logs = true, 1883 .has_spare_reg = true, 1884 .version = MSS_SC7180, 1885 }; 1886 1887 static const struct rproc_hexagon_res sdm845_mss = { 1888 .hexagon_mba_image = "mba.mbn", 1889 .proxy_clk_names = (char*[]){ 1890 "xo", 1891 "prng", 1892 NULL 1893 }, 1894 .reset_clk_names = (char*[]){ 1895 "iface", 1896 "snoc_axi", 1897 NULL 1898 }, 1899 .active_clk_names = (char*[]){ 1900 "bus", 1901 "mem", 1902 "gpll0_mss", 1903 "mnoc_axi", 1904 NULL 1905 }, 1906 .active_pd_names = (char*[]){ 1907 "load_state", 1908 NULL 1909 }, 1910 .proxy_pd_names = (char*[]){ 1911 "cx", 1912 "mx", 1913 "mss", 1914 NULL 1915 }, 1916 .need_mem_protection = true, 1917 .has_alt_reset = true, 1918 .has_mba_logs = false, 1919 .has_spare_reg = false, 1920 .version = MSS_SDM845, 1921 }; 1922 1923 static const struct rproc_hexagon_res msm8998_mss = { 1924 .hexagon_mba_image = "mba.mbn", 1925 .proxy_clk_names = (char*[]){ 1926 "xo", 1927 "qdss", 1928 "mem", 1929 NULL 1930 }, 1931 .active_clk_names = (char*[]){ 1932 "iface", 1933 "bus", 1934 "gpll0_mss", 1935 "mnoc_axi", 1936 "snoc_axi", 1937 NULL 1938 }, 1939 .proxy_pd_names = (char*[]){ 1940 "cx", 1941 "mx", 1942 NULL 1943 }, 1944 .need_mem_protection = true, 1945 .has_alt_reset = false, 1946 .has_mba_logs = false, 1947 .has_spare_reg = false, 1948 .version = MSS_MSM8998, 1949 }; 1950 1951 static const struct rproc_hexagon_res msm8996_mss = { 1952 .hexagon_mba_image = "mba.mbn", 1953 .proxy_supply = (struct qcom_mss_reg_res[]) { 1954 { 1955 .supply = "pll", 1956 .uA = 100000, 1957 }, 1958 {} 1959 }, 1960 .proxy_clk_names = (char*[]){ 1961 "xo", 1962 "pnoc", 1963 "qdss", 1964 NULL 1965 }, 1966 .active_clk_names = (char*[]){ 1967 "iface", 1968 "bus", 1969 "mem", 1970 "gpll0_mss", 1971 "snoc_axi", 1972 "mnoc_axi", 1973 NULL 1974 }, 1975 .need_mem_protection = true, 1976 .has_alt_reset = false, 1977 .has_mba_logs = false, 1978 .has_spare_reg = false, 1979 .version = MSS_MSM8996, 1980 }; 1981 1982 static const struct rproc_hexagon_res msm8916_mss = { 1983 .hexagon_mba_image = "mba.mbn", 1984 .proxy_supply = (struct qcom_mss_reg_res[]) { 1985 { 1986 .supply = "pll", 1987 .uA = 100000, 1988 }, 1989 {} 1990 }, 1991 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 1992 { 1993 .supply = "mx", 1994 .uV = 1050000, 1995 }, 1996 { 1997 .supply = "cx", 1998 .uA = 100000, 1999 }, 2000 {} 2001 }, 2002 .proxy_clk_names = (char*[]){ 2003 "xo", 2004 NULL 2005 }, 2006 .active_clk_names = (char*[]){ 2007 "iface", 2008 "bus", 2009 "mem", 2010 NULL 2011 }, 2012 .proxy_pd_names = (char*[]){ 2013 "mx", 2014 "cx", 2015 NULL 2016 }, 2017 .need_mem_protection = false, 2018 .has_alt_reset = false, 2019 .has_mba_logs = false, 2020 .has_spare_reg = false, 2021 .version = MSS_MSM8916, 2022 }; 2023 2024 static const struct rproc_hexagon_res msm8974_mss = { 2025 .hexagon_mba_image = "mba.b00", 2026 .proxy_supply = (struct qcom_mss_reg_res[]) { 2027 { 2028 .supply = "pll", 2029 .uA = 100000, 2030 }, 2031 {} 2032 }, 2033 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 2034 { 2035 .supply = "mx", 2036 .uV = 1050000, 2037 }, 2038 { 2039 .supply = "cx", 2040 .uA = 100000, 2041 }, 2042 {} 2043 }, 2044 .active_supply = (struct qcom_mss_reg_res[]) { 2045 { 2046 .supply = "mss", 2047 .uV = 1050000, 2048 .uA = 100000, 2049 }, 2050 {} 2051 }, 2052 .proxy_clk_names = (char*[]){ 2053 "xo", 2054 NULL 2055 }, 2056 .active_clk_names = (char*[]){ 2057 "iface", 2058 "bus", 2059 "mem", 2060 NULL 2061 }, 2062 .proxy_pd_names = (char*[]){ 2063 "mx", 2064 "cx", 2065 NULL 2066 }, 2067 .need_mem_protection = false, 2068 .has_alt_reset = false, 2069 .has_mba_logs = false, 2070 .has_spare_reg = false, 2071 .version = MSS_MSM8974, 2072 }; 2073 2074 static const struct of_device_id q6v5_of_match[] = { 2075 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2076 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2077 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2078 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2079 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2080 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2081 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2082 { }, 2083 }; 2084 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2085 2086 static struct platform_driver q6v5_driver = { 2087 .probe = q6v5_probe, 2088 .remove = q6v5_remove, 2089 .driver = { 2090 .name = "qcom-q6v5-mss", 2091 .of_match_table = q6v5_of_match, 2092 }, 2093 }; 2094 module_platform_driver(q6v5_driver); 2095 2096 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2097 MODULE_LICENSE("GPL v2"); 2098