1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/remoteproc.h> 26 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h" 27 #include <linux/reset.h> 28 #include <linux/soc/qcom/mdt_loader.h> 29 #include <linux/iopoll.h> 30 31 #include "remoteproc_internal.h" 32 #include "qcom_common.h" 33 #include "qcom_pil_info.h" 34 #include "qcom_q6v5.h" 35 36 #include <linux/qcom_scm.h> 37 38 #define MPSS_CRASH_REASON_SMEM 421 39 40 #define MBA_LOG_SIZE SZ_4K 41 42 /* RMB Status Register Values */ 43 #define RMB_PBL_SUCCESS 0x1 44 45 #define RMB_MBA_XPU_UNLOCKED 0x1 46 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 47 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 48 #define RMB_MBA_AUTH_COMPLETE 0x4 49 50 /* PBL/MBA interface registers */ 51 #define RMB_MBA_IMAGE_REG 0x00 52 #define RMB_PBL_STATUS_REG 0x04 53 #define RMB_MBA_COMMAND_REG 0x08 54 #define RMB_MBA_STATUS_REG 0x0C 55 #define RMB_PMI_META_DATA_REG 0x10 56 #define RMB_PMI_CODE_START_REG 0x14 57 #define RMB_PMI_CODE_LENGTH_REG 0x18 58 #define RMB_MBA_MSS_STATUS 0x40 59 #define RMB_MBA_ALT_RESET 0x44 60 61 #define RMB_CMD_META_DATA_READY 0x1 62 #define RMB_CMD_LOAD_READY 0x2 63 64 /* QDSP6SS Register Offsets */ 65 #define QDSP6SS_RESET_REG 0x014 66 #define QDSP6SS_GFMUX_CTL_REG 0x020 67 #define QDSP6SS_PWR_CTL_REG 0x030 68 #define QDSP6SS_MEM_PWR_CTL 0x0B0 69 #define QDSP6V6SS_MEM_PWR_CTL 0x034 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 #define AXI_GATING_VALID_OVERRIDE BIT(0) 77 78 #define HALT_ACK_TIMEOUT_US 100000 79 80 /* QDSP6SS_RESET */ 81 #define Q6SS_STOP_CORE BIT(0) 82 #define Q6SS_CORE_ARES BIT(1) 83 #define Q6SS_BUS_ARES_ENABLE BIT(2) 84 85 /* QDSP6SS CBCR */ 86 #define Q6SS_CBCR_CLKEN BIT(0) 87 #define Q6SS_CBCR_CLKOFF BIT(31) 88 #define Q6SS_CBCR_TIMEOUT_US 200 89 90 /* QDSP6SS_GFMUX_CTL */ 91 #define Q6SS_CLK_ENABLE BIT(1) 92 93 /* QDSP6SS_PWR_CTL */ 94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 98 #define Q6SS_ETB_SLP_NRET_N BIT(17) 99 #define Q6SS_L2DATA_STBY_N BIT(18) 100 #define Q6SS_SLP_RET_N BIT(19) 101 #define Q6SS_CLAMP_IO BIT(20) 102 #define QDSS_BHS_ON BIT(21) 103 #define QDSS_LDO_BYP BIT(22) 104 105 /* QDSP6v56 parameters */ 106 #define QDSP6v56_LDO_BYP BIT(25) 107 #define QDSP6v56_BHS_ON BIT(24) 108 #define QDSP6v56_CLAMP_WL BIT(21) 109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 110 #define QDSP6SS_XO_CBCR 0x0038 111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 112 113 /* QDSP6v65 parameters */ 114 #define QDSP6SS_CORE_CBCR 0x20 115 #define QDSP6SS_SLEEP 0x3C 116 #define QDSP6SS_BOOT_CORE_START 0x400 117 #define QDSP6SS_BOOT_CMD 0x404 118 #define BOOT_FSM_TIMEOUT 10000 119 120 struct reg_info { 121 struct regulator *reg; 122 int uV; 123 int uA; 124 }; 125 126 struct qcom_mss_reg_res { 127 const char *supply; 128 int uV; 129 int uA; 130 }; 131 132 struct rproc_hexagon_res { 133 const char *hexagon_mba_image; 134 struct qcom_mss_reg_res *proxy_supply; 135 struct qcom_mss_reg_res *active_supply; 136 char **proxy_clk_names; 137 char **reset_clk_names; 138 char **active_clk_names; 139 char **active_pd_names; 140 char **proxy_pd_names; 141 int version; 142 bool need_mem_protection; 143 bool has_alt_reset; 144 bool has_mba_logs; 145 bool has_spare_reg; 146 }; 147 148 struct q6v5 { 149 struct device *dev; 150 struct rproc *rproc; 151 152 void __iomem *reg_base; 153 void __iomem *rmb_base; 154 155 struct regmap *halt_map; 156 struct regmap *conn_map; 157 158 u32 halt_q6; 159 u32 halt_modem; 160 u32 halt_nc; 161 u32 conn_box; 162 163 struct reset_control *mss_restart; 164 struct reset_control *pdc_reset; 165 166 struct qcom_q6v5 q6v5; 167 168 struct clk *active_clks[8]; 169 struct clk *reset_clks[4]; 170 struct clk *proxy_clks[4]; 171 struct device *active_pds[1]; 172 struct device *proxy_pds[3]; 173 int active_clk_count; 174 int reset_clk_count; 175 int proxy_clk_count; 176 int active_pd_count; 177 int proxy_pd_count; 178 179 struct reg_info active_regs[1]; 180 struct reg_info proxy_regs[3]; 181 int active_reg_count; 182 int proxy_reg_count; 183 184 bool running; 185 186 bool dump_mba_loaded; 187 size_t current_dump_size; 188 size_t total_dump_size; 189 190 phys_addr_t mba_phys; 191 void *mba_region; 192 size_t mba_size; 193 size_t dp_size; 194 195 phys_addr_t mpss_phys; 196 phys_addr_t mpss_reloc; 197 size_t mpss_size; 198 199 struct qcom_rproc_glink glink_subdev; 200 struct qcom_rproc_subdev smd_subdev; 201 struct qcom_rproc_ssr ssr_subdev; 202 struct qcom_rproc_ipa_notify ipa_notify_subdev; 203 struct qcom_sysmon *sysmon; 204 bool need_mem_protection; 205 bool has_alt_reset; 206 bool has_mba_logs; 207 bool has_spare_reg; 208 int mpss_perm; 209 int mba_perm; 210 const char *hexagon_mdt_image; 211 int version; 212 }; 213 214 enum { 215 MSS_MSM8916, 216 MSS_MSM8974, 217 MSS_MSM8996, 218 MSS_MSM8998, 219 MSS_SC7180, 220 MSS_SDM845, 221 }; 222 223 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 224 const struct qcom_mss_reg_res *reg_res) 225 { 226 int rc; 227 int i; 228 229 if (!reg_res) 230 return 0; 231 232 for (i = 0; reg_res[i].supply; i++) { 233 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 234 if (IS_ERR(regs[i].reg)) { 235 rc = PTR_ERR(regs[i].reg); 236 if (rc != -EPROBE_DEFER) 237 dev_err(dev, "Failed to get %s\n regulator", 238 reg_res[i].supply); 239 return rc; 240 } 241 242 regs[i].uV = reg_res[i].uV; 243 regs[i].uA = reg_res[i].uA; 244 } 245 246 return i; 247 } 248 249 static int q6v5_regulator_enable(struct q6v5 *qproc, 250 struct reg_info *regs, int count) 251 { 252 int ret; 253 int i; 254 255 for (i = 0; i < count; i++) { 256 if (regs[i].uV > 0) { 257 ret = regulator_set_voltage(regs[i].reg, 258 regs[i].uV, INT_MAX); 259 if (ret) { 260 dev_err(qproc->dev, 261 "Failed to request voltage for %d.\n", 262 i); 263 goto err; 264 } 265 } 266 267 if (regs[i].uA > 0) { 268 ret = regulator_set_load(regs[i].reg, 269 regs[i].uA); 270 if (ret < 0) { 271 dev_err(qproc->dev, 272 "Failed to set regulator mode\n"); 273 goto err; 274 } 275 } 276 277 ret = regulator_enable(regs[i].reg); 278 if (ret) { 279 dev_err(qproc->dev, "Regulator enable failed\n"); 280 goto err; 281 } 282 } 283 284 return 0; 285 err: 286 for (; i >= 0; i--) { 287 if (regs[i].uV > 0) 288 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 289 290 if (regs[i].uA > 0) 291 regulator_set_load(regs[i].reg, 0); 292 293 regulator_disable(regs[i].reg); 294 } 295 296 return ret; 297 } 298 299 static void q6v5_regulator_disable(struct q6v5 *qproc, 300 struct reg_info *regs, int count) 301 { 302 int i; 303 304 for (i = 0; i < count; i++) { 305 if (regs[i].uV > 0) 306 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 307 308 if (regs[i].uA > 0) 309 regulator_set_load(regs[i].reg, 0); 310 311 regulator_disable(regs[i].reg); 312 } 313 } 314 315 static int q6v5_clk_enable(struct device *dev, 316 struct clk **clks, int count) 317 { 318 int rc; 319 int i; 320 321 for (i = 0; i < count; i++) { 322 rc = clk_prepare_enable(clks[i]); 323 if (rc) { 324 dev_err(dev, "Clock enable failed\n"); 325 goto err; 326 } 327 } 328 329 return 0; 330 err: 331 for (i--; i >= 0; i--) 332 clk_disable_unprepare(clks[i]); 333 334 return rc; 335 } 336 337 static void q6v5_clk_disable(struct device *dev, 338 struct clk **clks, int count) 339 { 340 int i; 341 342 for (i = 0; i < count; i++) 343 clk_disable_unprepare(clks[i]); 344 } 345 346 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 347 size_t pd_count) 348 { 349 int ret; 350 int i; 351 352 for (i = 0; i < pd_count; i++) { 353 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 354 ret = pm_runtime_get_sync(pds[i]); 355 if (ret < 0) 356 goto unroll_pd_votes; 357 } 358 359 return 0; 360 361 unroll_pd_votes: 362 for (i--; i >= 0; i--) { 363 dev_pm_genpd_set_performance_state(pds[i], 0); 364 pm_runtime_put(pds[i]); 365 } 366 367 return ret; 368 } 369 370 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 371 size_t pd_count) 372 { 373 int i; 374 375 for (i = 0; i < pd_count; i++) { 376 dev_pm_genpd_set_performance_state(pds[i], 0); 377 pm_runtime_put(pds[i]); 378 } 379 } 380 381 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 382 bool local, bool remote, phys_addr_t addr, 383 size_t size) 384 { 385 struct qcom_scm_vmperm next[2]; 386 int perms = 0; 387 388 if (!qproc->need_mem_protection) 389 return 0; 390 391 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 392 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 393 return 0; 394 395 if (local) { 396 next[perms].vmid = QCOM_SCM_VMID_HLOS; 397 next[perms].perm = QCOM_SCM_PERM_RWX; 398 perms++; 399 } 400 401 if (remote) { 402 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 403 next[perms].perm = QCOM_SCM_PERM_RW; 404 perms++; 405 } 406 407 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 408 current_perm, next, perms); 409 } 410 411 static void q6v5_debug_policy_load(struct q6v5 *qproc) 412 { 413 const struct firmware *dp_fw; 414 415 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) 416 return; 417 418 if (SZ_1M + dp_fw->size <= qproc->mba_size) { 419 memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size); 420 qproc->dp_size = dp_fw->size; 421 } 422 423 release_firmware(dp_fw); 424 } 425 426 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 427 { 428 struct q6v5 *qproc = rproc->priv; 429 430 /* MBA is restricted to a maximum size of 1M */ 431 if (fw->size > qproc->mba_size || fw->size > SZ_1M) { 432 dev_err(qproc->dev, "MBA firmware load failed\n"); 433 return -EINVAL; 434 } 435 436 memcpy(qproc->mba_region, fw->data, fw->size); 437 q6v5_debug_policy_load(qproc); 438 439 return 0; 440 } 441 442 static int q6v5_reset_assert(struct q6v5 *qproc) 443 { 444 int ret; 445 446 if (qproc->has_alt_reset) { 447 reset_control_assert(qproc->pdc_reset); 448 ret = reset_control_reset(qproc->mss_restart); 449 reset_control_deassert(qproc->pdc_reset); 450 } else if (qproc->has_spare_reg) { 451 /* 452 * When the AXI pipeline is being reset with the Q6 modem partly 453 * operational there is possibility of AXI valid signal to 454 * glitch, leading to spurious transactions and Q6 hangs. A work 455 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 456 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 457 * is withdrawn post MSS assert followed by a MSS deassert, 458 * while holding the PDC reset. 459 */ 460 reset_control_assert(qproc->pdc_reset); 461 regmap_update_bits(qproc->conn_map, qproc->conn_box, 462 AXI_GATING_VALID_OVERRIDE, 1); 463 reset_control_assert(qproc->mss_restart); 464 reset_control_deassert(qproc->pdc_reset); 465 regmap_update_bits(qproc->conn_map, qproc->conn_box, 466 AXI_GATING_VALID_OVERRIDE, 0); 467 ret = reset_control_deassert(qproc->mss_restart); 468 } else { 469 ret = reset_control_assert(qproc->mss_restart); 470 } 471 472 return ret; 473 } 474 475 static int q6v5_reset_deassert(struct q6v5 *qproc) 476 { 477 int ret; 478 479 if (qproc->has_alt_reset) { 480 reset_control_assert(qproc->pdc_reset); 481 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 482 ret = reset_control_reset(qproc->mss_restart); 483 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 484 reset_control_deassert(qproc->pdc_reset); 485 } else if (qproc->has_spare_reg) { 486 ret = reset_control_reset(qproc->mss_restart); 487 } else { 488 ret = reset_control_deassert(qproc->mss_restart); 489 } 490 491 return ret; 492 } 493 494 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 495 { 496 unsigned long timeout; 497 s32 val; 498 499 timeout = jiffies + msecs_to_jiffies(ms); 500 for (;;) { 501 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 502 if (val) 503 break; 504 505 if (time_after(jiffies, timeout)) 506 return -ETIMEDOUT; 507 508 msleep(1); 509 } 510 511 return val; 512 } 513 514 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 515 { 516 517 unsigned long timeout; 518 s32 val; 519 520 timeout = jiffies + msecs_to_jiffies(ms); 521 for (;;) { 522 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 523 if (val < 0) 524 break; 525 526 if (!status && val) 527 break; 528 else if (status && val == status) 529 break; 530 531 if (time_after(jiffies, timeout)) 532 return -ETIMEDOUT; 533 534 msleep(1); 535 } 536 537 return val; 538 } 539 540 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 541 { 542 struct rproc *rproc = qproc->rproc; 543 void *data; 544 545 if (!qproc->has_mba_logs) 546 return; 547 548 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 549 qproc->mba_size)) 550 return; 551 552 data = vmalloc(MBA_LOG_SIZE); 553 if (!data) 554 return; 555 556 memcpy(data, qproc->mba_region, MBA_LOG_SIZE); 557 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 558 } 559 560 static int q6v5proc_reset(struct q6v5 *qproc) 561 { 562 u32 val; 563 int ret; 564 int i; 565 566 if (qproc->version == MSS_SDM845) { 567 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 568 val |= Q6SS_CBCR_CLKEN; 569 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 570 571 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 572 val, !(val & Q6SS_CBCR_CLKOFF), 1, 573 Q6SS_CBCR_TIMEOUT_US); 574 if (ret) { 575 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 576 return -ETIMEDOUT; 577 } 578 579 /* De-assert QDSP6 stop core */ 580 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 581 /* Trigger boot FSM */ 582 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 583 584 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 585 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 586 if (ret) { 587 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 588 /* Reset the modem so that boot FSM is in reset state */ 589 q6v5_reset_deassert(qproc); 590 return ret; 591 } 592 593 goto pbl_wait; 594 } else if (qproc->version == MSS_SC7180) { 595 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 596 val |= Q6SS_CBCR_CLKEN; 597 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 598 599 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 600 val, !(val & Q6SS_CBCR_CLKOFF), 1, 601 Q6SS_CBCR_TIMEOUT_US); 602 if (ret) { 603 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 604 return -ETIMEDOUT; 605 } 606 607 /* Turn on the XO clock needed for PLL setup */ 608 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 609 val |= Q6SS_CBCR_CLKEN; 610 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 611 612 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 613 val, !(val & Q6SS_CBCR_CLKOFF), 1, 614 Q6SS_CBCR_TIMEOUT_US); 615 if (ret) { 616 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 617 return -ETIMEDOUT; 618 } 619 620 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 621 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 622 val |= Q6SS_CBCR_CLKEN; 623 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 624 625 /* De-assert the Q6 stop core signal */ 626 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 627 628 /* Wait for 10 us for any staggering logic to settle */ 629 usleep_range(10, 20); 630 631 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 632 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 633 634 /* Poll the MSS_STATUS for FSM completion */ 635 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 636 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 637 if (ret) { 638 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 639 /* Reset the modem so that boot FSM is in reset state */ 640 q6v5_reset_deassert(qproc); 641 return ret; 642 } 643 goto pbl_wait; 644 } else if (qproc->version == MSS_MSM8996 || 645 qproc->version == MSS_MSM8998) { 646 int mem_pwr_ctl; 647 648 /* Override the ACC value if required */ 649 writel(QDSP6SS_ACC_OVERRIDE_VAL, 650 qproc->reg_base + QDSP6SS_STRAP_ACC); 651 652 /* Assert resets, stop core */ 653 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 654 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 655 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 656 657 /* BHS require xo cbcr to be enabled */ 658 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 659 val |= Q6SS_CBCR_CLKEN; 660 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 661 662 /* Read CLKOFF bit to go low indicating CLK is enabled */ 663 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 664 val, !(val & Q6SS_CBCR_CLKOFF), 1, 665 Q6SS_CBCR_TIMEOUT_US); 666 if (ret) { 667 dev_err(qproc->dev, 668 "xo cbcr enabling timed out (rc:%d)\n", ret); 669 return ret; 670 } 671 /* Enable power block headswitch and wait for it to stabilize */ 672 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 673 val |= QDSP6v56_BHS_ON; 674 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 675 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 676 udelay(1); 677 678 /* Put LDO in bypass mode */ 679 val |= QDSP6v56_LDO_BYP; 680 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 681 682 /* Deassert QDSP6 compiler memory clamp */ 683 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 684 val &= ~QDSP6v56_CLAMP_QMC_MEM; 685 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 686 687 /* Deassert memory peripheral sleep and L2 memory standby */ 688 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 690 691 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 692 if (qproc->version == MSS_MSM8996) { 693 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 694 i = 19; 695 } else { 696 /* MSS_MSM8998 */ 697 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 698 i = 28; 699 } 700 val = readl(qproc->reg_base + mem_pwr_ctl); 701 for (; i >= 0; i--) { 702 val |= BIT(i); 703 writel(val, qproc->reg_base + mem_pwr_ctl); 704 /* 705 * Read back value to ensure the write is done then 706 * wait for 1us for both memory peripheral and data 707 * array to turn on. 708 */ 709 val |= readl(qproc->reg_base + mem_pwr_ctl); 710 udelay(1); 711 } 712 /* Remove word line clamp */ 713 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 714 val &= ~QDSP6v56_CLAMP_WL; 715 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 716 } else { 717 /* Assert resets, stop core */ 718 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 719 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 720 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 721 722 /* Enable power block headswitch and wait for it to stabilize */ 723 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 724 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 725 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 726 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 727 udelay(1); 728 /* 729 * Turn on memories. L2 banks should be done individually 730 * to minimize inrush current. 731 */ 732 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 733 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 734 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 735 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 736 val |= Q6SS_L2DATA_SLP_NRET_N_2; 737 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 738 val |= Q6SS_L2DATA_SLP_NRET_N_1; 739 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 740 val |= Q6SS_L2DATA_SLP_NRET_N_0; 741 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 742 } 743 /* Remove IO clamp */ 744 val &= ~Q6SS_CLAMP_IO; 745 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 746 747 /* Bring core out of reset */ 748 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 749 val &= ~Q6SS_CORE_ARES; 750 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 751 752 /* Turn on core clock */ 753 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 754 val |= Q6SS_CLK_ENABLE; 755 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 756 757 /* Start core execution */ 758 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 759 val &= ~Q6SS_STOP_CORE; 760 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 761 762 pbl_wait: 763 /* Wait for PBL status */ 764 ret = q6v5_rmb_pbl_wait(qproc, 1000); 765 if (ret == -ETIMEDOUT) { 766 dev_err(qproc->dev, "PBL boot timed out\n"); 767 } else if (ret != RMB_PBL_SUCCESS) { 768 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 769 ret = -EINVAL; 770 } else { 771 ret = 0; 772 } 773 774 return ret; 775 } 776 777 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 778 struct regmap *halt_map, 779 u32 offset) 780 { 781 unsigned int val; 782 int ret; 783 784 /* Check if we're already idle */ 785 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 786 if (!ret && val) 787 return; 788 789 /* Assert halt request */ 790 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 791 792 /* Wait for halt */ 793 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 794 val, 1000, HALT_ACK_TIMEOUT_US); 795 796 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 797 if (ret || !val) 798 dev_err(qproc->dev, "port failed halt\n"); 799 800 /* Clear halt request (port will remain halted until reset) */ 801 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 802 } 803 804 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 805 { 806 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 807 dma_addr_t phys; 808 void *metadata; 809 int mdata_perm; 810 int xferop_ret; 811 size_t size; 812 void *ptr; 813 int ret; 814 815 metadata = qcom_mdt_read_metadata(fw, &size); 816 if (IS_ERR(metadata)) 817 return PTR_ERR(metadata); 818 819 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 820 if (!ptr) { 821 kfree(metadata); 822 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 823 return -ENOMEM; 824 } 825 826 memcpy(ptr, metadata, size); 827 828 /* Hypervisor mapping to access metadata by modem */ 829 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 830 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 831 phys, size); 832 if (ret) { 833 dev_err(qproc->dev, 834 "assigning Q6 access to metadata failed: %d\n", ret); 835 ret = -EAGAIN; 836 goto free_dma_attrs; 837 } 838 839 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 840 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 841 842 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 843 if (ret == -ETIMEDOUT) 844 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 845 else if (ret < 0) 846 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 847 848 /* Metadata authentication done, remove modem access */ 849 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 850 phys, size); 851 if (xferop_ret) 852 dev_warn(qproc->dev, 853 "mdt buffer not reclaimed system may become unstable\n"); 854 855 free_dma_attrs: 856 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 857 kfree(metadata); 858 859 return ret < 0 ? ret : 0; 860 } 861 862 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 863 { 864 if (phdr->p_type != PT_LOAD) 865 return false; 866 867 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 868 return false; 869 870 if (!phdr->p_memsz) 871 return false; 872 873 return true; 874 } 875 876 static int q6v5_mba_load(struct q6v5 *qproc) 877 { 878 int ret; 879 int xfermemop_ret; 880 bool mba_load_err = false; 881 882 qcom_q6v5_prepare(&qproc->q6v5); 883 884 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 885 if (ret < 0) { 886 dev_err(qproc->dev, "failed to enable active power domains\n"); 887 goto disable_irqs; 888 } 889 890 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 891 if (ret < 0) { 892 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 893 goto disable_active_pds; 894 } 895 896 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 897 qproc->proxy_reg_count); 898 if (ret) { 899 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 900 goto disable_proxy_pds; 901 } 902 903 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 904 qproc->proxy_clk_count); 905 if (ret) { 906 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 907 goto disable_proxy_reg; 908 } 909 910 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 911 qproc->active_reg_count); 912 if (ret) { 913 dev_err(qproc->dev, "failed to enable supplies\n"); 914 goto disable_proxy_clk; 915 } 916 917 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 918 qproc->reset_clk_count); 919 if (ret) { 920 dev_err(qproc->dev, "failed to enable reset clocks\n"); 921 goto disable_vdd; 922 } 923 924 ret = q6v5_reset_deassert(qproc); 925 if (ret) { 926 dev_err(qproc->dev, "failed to deassert mss restart\n"); 927 goto disable_reset_clks; 928 } 929 930 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 931 qproc->active_clk_count); 932 if (ret) { 933 dev_err(qproc->dev, "failed to enable clocks\n"); 934 goto assert_reset; 935 } 936 937 /* Assign MBA image access in DDR to q6 */ 938 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 939 qproc->mba_phys, qproc->mba_size); 940 if (ret) { 941 dev_err(qproc->dev, 942 "assigning Q6 access to mba memory failed: %d\n", ret); 943 goto disable_active_clks; 944 } 945 946 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 947 if (qproc->dp_size) { 948 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); 949 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 950 } 951 952 ret = q6v5proc_reset(qproc); 953 if (ret) 954 goto reclaim_mba; 955 956 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 957 if (ret == -ETIMEDOUT) { 958 dev_err(qproc->dev, "MBA boot timed out\n"); 959 goto halt_axi_ports; 960 } else if (ret != RMB_MBA_XPU_UNLOCKED && 961 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 962 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 963 ret = -EINVAL; 964 goto halt_axi_ports; 965 } 966 967 qproc->dump_mba_loaded = true; 968 return 0; 969 970 halt_axi_ports: 971 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 972 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 973 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 974 mba_load_err = true; 975 reclaim_mba: 976 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 977 false, qproc->mba_phys, 978 qproc->mba_size); 979 if (xfermemop_ret) { 980 dev_err(qproc->dev, 981 "Failed to reclaim mba buffer, system may become unstable\n"); 982 } else if (mba_load_err) { 983 q6v5_dump_mba_logs(qproc); 984 } 985 986 disable_active_clks: 987 q6v5_clk_disable(qproc->dev, qproc->active_clks, 988 qproc->active_clk_count); 989 assert_reset: 990 q6v5_reset_assert(qproc); 991 disable_reset_clks: 992 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 993 qproc->reset_clk_count); 994 disable_vdd: 995 q6v5_regulator_disable(qproc, qproc->active_regs, 996 qproc->active_reg_count); 997 disable_proxy_clk: 998 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 999 qproc->proxy_clk_count); 1000 disable_proxy_reg: 1001 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1002 qproc->proxy_reg_count); 1003 disable_proxy_pds: 1004 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1005 disable_active_pds: 1006 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1007 disable_irqs: 1008 qcom_q6v5_unprepare(&qproc->q6v5); 1009 1010 return ret; 1011 } 1012 1013 static void q6v5_mba_reclaim(struct q6v5 *qproc) 1014 { 1015 int ret; 1016 u32 val; 1017 1018 qproc->dump_mba_loaded = false; 1019 qproc->dp_size = 0; 1020 1021 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1022 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1023 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1024 if (qproc->version == MSS_MSM8996) { 1025 /* 1026 * To avoid high MX current during LPASS/MSS restart. 1027 */ 1028 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1029 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1030 QDSP6v56_CLAMP_QMC_MEM; 1031 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1032 } 1033 1034 q6v5_reset_assert(qproc); 1035 1036 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1037 qproc->reset_clk_count); 1038 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1039 qproc->active_clk_count); 1040 q6v5_regulator_disable(qproc, qproc->active_regs, 1041 qproc->active_reg_count); 1042 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1043 1044 /* In case of failure or coredump scenario where reclaiming MBA memory 1045 * could not happen reclaim it here. 1046 */ 1047 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1048 qproc->mba_phys, 1049 qproc->mba_size); 1050 WARN_ON(ret); 1051 1052 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1053 if (ret) { 1054 q6v5_pds_disable(qproc, qproc->proxy_pds, 1055 qproc->proxy_pd_count); 1056 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1057 qproc->proxy_clk_count); 1058 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1059 qproc->proxy_reg_count); 1060 } 1061 } 1062 1063 static int q6v5_reload_mba(struct rproc *rproc) 1064 { 1065 struct q6v5 *qproc = rproc->priv; 1066 const struct firmware *fw; 1067 int ret; 1068 1069 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1070 if (ret < 0) 1071 return ret; 1072 1073 q6v5_load(rproc, fw); 1074 ret = q6v5_mba_load(qproc); 1075 release_firmware(fw); 1076 1077 return ret; 1078 } 1079 1080 static int q6v5_mpss_load(struct q6v5 *qproc) 1081 { 1082 const struct elf32_phdr *phdrs; 1083 const struct elf32_phdr *phdr; 1084 const struct firmware *seg_fw; 1085 const struct firmware *fw; 1086 struct elf32_hdr *ehdr; 1087 phys_addr_t mpss_reloc; 1088 phys_addr_t boot_addr; 1089 phys_addr_t min_addr = PHYS_ADDR_MAX; 1090 phys_addr_t max_addr = 0; 1091 u32 code_length; 1092 bool relocate = false; 1093 char *fw_name; 1094 size_t fw_name_len; 1095 ssize_t offset; 1096 size_t size = 0; 1097 void *ptr; 1098 int ret; 1099 int i; 1100 1101 fw_name_len = strlen(qproc->hexagon_mdt_image); 1102 if (fw_name_len <= 4) 1103 return -EINVAL; 1104 1105 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1106 if (!fw_name) 1107 return -ENOMEM; 1108 1109 ret = request_firmware(&fw, fw_name, qproc->dev); 1110 if (ret < 0) { 1111 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1112 goto out; 1113 } 1114 1115 /* Initialize the RMB validator */ 1116 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1117 1118 ret = q6v5_mpss_init_image(qproc, fw); 1119 if (ret) 1120 goto release_firmware; 1121 1122 ehdr = (struct elf32_hdr *)fw->data; 1123 phdrs = (struct elf32_phdr *)(ehdr + 1); 1124 1125 for (i = 0; i < ehdr->e_phnum; i++) { 1126 phdr = &phdrs[i]; 1127 1128 if (!q6v5_phdr_valid(phdr)) 1129 continue; 1130 1131 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1132 relocate = true; 1133 1134 if (phdr->p_paddr < min_addr) 1135 min_addr = phdr->p_paddr; 1136 1137 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1138 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1139 } 1140 1141 /** 1142 * In case of a modem subsystem restart on secure devices, the modem 1143 * memory can be reclaimed only after MBA is loaded. For modem cold 1144 * boot this will be a nop 1145 */ 1146 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1147 qproc->mpss_phys, qproc->mpss_size); 1148 1149 /* Share ownership between Linux and MSS, during segment loading */ 1150 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1151 qproc->mpss_phys, qproc->mpss_size); 1152 if (ret) { 1153 dev_err(qproc->dev, 1154 "assigning Q6 access to mpss memory failed: %d\n", ret); 1155 ret = -EAGAIN; 1156 goto release_firmware; 1157 } 1158 1159 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1160 qproc->mpss_reloc = mpss_reloc; 1161 /* Load firmware segments */ 1162 for (i = 0; i < ehdr->e_phnum; i++) { 1163 phdr = &phdrs[i]; 1164 1165 if (!q6v5_phdr_valid(phdr)) 1166 continue; 1167 1168 offset = phdr->p_paddr - mpss_reloc; 1169 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1170 dev_err(qproc->dev, "segment outside memory range\n"); 1171 ret = -EINVAL; 1172 goto release_firmware; 1173 } 1174 1175 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); 1176 if (!ptr) { 1177 dev_err(qproc->dev, 1178 "unable to map memory region: %pa+%zx-%x\n", 1179 &qproc->mpss_phys, offset, phdr->p_memsz); 1180 goto release_firmware; 1181 } 1182 1183 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1184 /* Firmware is large enough to be non-split */ 1185 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1186 dev_err(qproc->dev, 1187 "failed to load segment %d from truncated file %s\n", 1188 i, fw_name); 1189 ret = -EINVAL; 1190 iounmap(ptr); 1191 goto release_firmware; 1192 } 1193 1194 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1195 } else if (phdr->p_filesz) { 1196 /* Replace "xxx.xxx" with "xxx.bxx" */ 1197 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1198 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, 1199 ptr, phdr->p_filesz); 1200 if (ret) { 1201 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1202 iounmap(ptr); 1203 goto release_firmware; 1204 } 1205 1206 release_firmware(seg_fw); 1207 } 1208 1209 if (phdr->p_memsz > phdr->p_filesz) { 1210 memset(ptr + phdr->p_filesz, 0, 1211 phdr->p_memsz - phdr->p_filesz); 1212 } 1213 iounmap(ptr); 1214 size += phdr->p_memsz; 1215 1216 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1217 if (!code_length) { 1218 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1219 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1220 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1221 } 1222 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1223 1224 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1225 if (ret < 0) { 1226 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1227 ret); 1228 goto release_firmware; 1229 } 1230 } 1231 1232 /* Transfer ownership of modem ddr region to q6 */ 1233 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1234 qproc->mpss_phys, qproc->mpss_size); 1235 if (ret) { 1236 dev_err(qproc->dev, 1237 "assigning Q6 access to mpss memory failed: %d\n", ret); 1238 ret = -EAGAIN; 1239 goto release_firmware; 1240 } 1241 1242 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1243 if (ret == -ETIMEDOUT) 1244 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1245 else if (ret < 0) 1246 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1247 1248 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1249 1250 release_firmware: 1251 release_firmware(fw); 1252 out: 1253 kfree(fw_name); 1254 1255 return ret < 0 ? ret : 0; 1256 } 1257 1258 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1259 struct rproc_dump_segment *segment, 1260 void *dest, size_t cp_offset, size_t size) 1261 { 1262 int ret = 0; 1263 struct q6v5 *qproc = rproc->priv; 1264 int offset = segment->da - qproc->mpss_reloc; 1265 void *ptr = NULL; 1266 1267 /* Unlock mba before copying segments */ 1268 if (!qproc->dump_mba_loaded) { 1269 ret = q6v5_reload_mba(rproc); 1270 if (!ret) { 1271 /* Reset ownership back to Linux to copy segments */ 1272 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1273 true, false, 1274 qproc->mpss_phys, 1275 qproc->mpss_size); 1276 } 1277 } 1278 1279 if (!ret) 1280 ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); 1281 1282 if (ptr) { 1283 memcpy(dest, ptr, size); 1284 iounmap(ptr); 1285 } else { 1286 memset(dest, 0xff, size); 1287 } 1288 1289 qproc->current_dump_size += size; 1290 1291 /* Reclaim mba after copying segments */ 1292 if (qproc->current_dump_size == qproc->total_dump_size) { 1293 if (qproc->dump_mba_loaded) { 1294 /* Try to reset ownership back to Q6 */ 1295 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1296 false, true, 1297 qproc->mpss_phys, 1298 qproc->mpss_size); 1299 q6v5_mba_reclaim(qproc); 1300 } 1301 } 1302 } 1303 1304 static int q6v5_start(struct rproc *rproc) 1305 { 1306 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1307 int xfermemop_ret; 1308 int ret; 1309 1310 ret = q6v5_mba_load(qproc); 1311 if (ret) 1312 return ret; 1313 1314 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", 1315 qproc->dp_size ? "" : "out"); 1316 1317 ret = q6v5_mpss_load(qproc); 1318 if (ret) 1319 goto reclaim_mpss; 1320 1321 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1322 if (ret == -ETIMEDOUT) { 1323 dev_err(qproc->dev, "start timed out\n"); 1324 goto reclaim_mpss; 1325 } 1326 1327 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1328 false, qproc->mba_phys, 1329 qproc->mba_size); 1330 if (xfermemop_ret) 1331 dev_err(qproc->dev, 1332 "Failed to reclaim mba buffer system may become unstable\n"); 1333 1334 /* Reset Dump Segment Mask */ 1335 qproc->current_dump_size = 0; 1336 qproc->running = true; 1337 1338 return 0; 1339 1340 reclaim_mpss: 1341 q6v5_mba_reclaim(qproc); 1342 q6v5_dump_mba_logs(qproc); 1343 1344 return ret; 1345 } 1346 1347 static int q6v5_stop(struct rproc *rproc) 1348 { 1349 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1350 int ret; 1351 1352 qproc->running = false; 1353 1354 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1355 if (ret == -ETIMEDOUT) 1356 dev_err(qproc->dev, "timed out on wait\n"); 1357 1358 q6v5_mba_reclaim(qproc); 1359 1360 return 0; 1361 } 1362 1363 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1364 const struct firmware *mba_fw) 1365 { 1366 const struct firmware *fw; 1367 const struct elf32_phdr *phdrs; 1368 const struct elf32_phdr *phdr; 1369 const struct elf32_hdr *ehdr; 1370 struct q6v5 *qproc = rproc->priv; 1371 unsigned long i; 1372 int ret; 1373 1374 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1375 if (ret < 0) { 1376 dev_err(qproc->dev, "unable to load %s\n", 1377 qproc->hexagon_mdt_image); 1378 return ret; 1379 } 1380 1381 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1382 1383 ehdr = (struct elf32_hdr *)fw->data; 1384 phdrs = (struct elf32_phdr *)(ehdr + 1); 1385 qproc->total_dump_size = 0; 1386 1387 for (i = 0; i < ehdr->e_phnum; i++) { 1388 phdr = &phdrs[i]; 1389 1390 if (!q6v5_phdr_valid(phdr)) 1391 continue; 1392 1393 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1394 phdr->p_memsz, 1395 qcom_q6v5_dump_segment, 1396 NULL); 1397 if (ret) 1398 break; 1399 1400 qproc->total_dump_size += phdr->p_memsz; 1401 } 1402 1403 release_firmware(fw); 1404 return ret; 1405 } 1406 1407 static const struct rproc_ops q6v5_ops = { 1408 .start = q6v5_start, 1409 .stop = q6v5_stop, 1410 .parse_fw = qcom_q6v5_register_dump_segments, 1411 .load = q6v5_load, 1412 }; 1413 1414 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1415 { 1416 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1417 1418 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1419 qproc->proxy_clk_count); 1420 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1421 qproc->proxy_reg_count); 1422 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1423 } 1424 1425 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1426 { 1427 struct of_phandle_args args; 1428 struct resource *res; 1429 int ret; 1430 1431 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1432 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1433 if (IS_ERR(qproc->reg_base)) 1434 return PTR_ERR(qproc->reg_base); 1435 1436 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1437 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1438 if (IS_ERR(qproc->rmb_base)) 1439 return PTR_ERR(qproc->rmb_base); 1440 1441 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1442 "qcom,halt-regs", 3, 0, &args); 1443 if (ret < 0) { 1444 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1445 return -EINVAL; 1446 } 1447 1448 qproc->halt_map = syscon_node_to_regmap(args.np); 1449 of_node_put(args.np); 1450 if (IS_ERR(qproc->halt_map)) 1451 return PTR_ERR(qproc->halt_map); 1452 1453 qproc->halt_q6 = args.args[0]; 1454 qproc->halt_modem = args.args[1]; 1455 qproc->halt_nc = args.args[2]; 1456 1457 if (qproc->has_spare_reg) { 1458 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1459 "qcom,spare-regs", 1460 1, 0, &args); 1461 if (ret < 0) { 1462 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1463 return -EINVAL; 1464 } 1465 1466 qproc->conn_map = syscon_node_to_regmap(args.np); 1467 of_node_put(args.np); 1468 if (IS_ERR(qproc->conn_map)) 1469 return PTR_ERR(qproc->conn_map); 1470 1471 qproc->conn_box = args.args[0]; 1472 } 1473 1474 return 0; 1475 } 1476 1477 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1478 char **clk_names) 1479 { 1480 int i; 1481 1482 if (!clk_names) 1483 return 0; 1484 1485 for (i = 0; clk_names[i]; i++) { 1486 clks[i] = devm_clk_get(dev, clk_names[i]); 1487 if (IS_ERR(clks[i])) { 1488 int rc = PTR_ERR(clks[i]); 1489 1490 if (rc != -EPROBE_DEFER) 1491 dev_err(dev, "Failed to get %s clock\n", 1492 clk_names[i]); 1493 return rc; 1494 } 1495 } 1496 1497 return i; 1498 } 1499 1500 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1501 char **pd_names) 1502 { 1503 size_t num_pds = 0; 1504 int ret; 1505 int i; 1506 1507 if (!pd_names) 1508 return 0; 1509 1510 while (pd_names[num_pds]) 1511 num_pds++; 1512 1513 for (i = 0; i < num_pds; i++) { 1514 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1515 if (IS_ERR_OR_NULL(devs[i])) { 1516 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1517 goto unroll_attach; 1518 } 1519 } 1520 1521 return num_pds; 1522 1523 unroll_attach: 1524 for (i--; i >= 0; i--) 1525 dev_pm_domain_detach(devs[i], false); 1526 1527 return ret; 1528 } 1529 1530 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1531 size_t pd_count) 1532 { 1533 int i; 1534 1535 for (i = 0; i < pd_count; i++) 1536 dev_pm_domain_detach(pds[i], false); 1537 } 1538 1539 static int q6v5_init_reset(struct q6v5 *qproc) 1540 { 1541 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1542 "mss_restart"); 1543 if (IS_ERR(qproc->mss_restart)) { 1544 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1545 return PTR_ERR(qproc->mss_restart); 1546 } 1547 1548 if (qproc->has_alt_reset || qproc->has_spare_reg) { 1549 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1550 "pdc_reset"); 1551 if (IS_ERR(qproc->pdc_reset)) { 1552 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1553 return PTR_ERR(qproc->pdc_reset); 1554 } 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1561 { 1562 struct device_node *child; 1563 struct device_node *node; 1564 struct resource r; 1565 int ret; 1566 1567 /* 1568 * In the absence of mba/mpss sub-child, extract the mba and mpss 1569 * reserved memory regions from device's memory-region property. 1570 */ 1571 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1572 if (!child) 1573 node = of_parse_phandle(qproc->dev->of_node, 1574 "memory-region", 0); 1575 else 1576 node = of_parse_phandle(child, "memory-region", 0); 1577 1578 ret = of_address_to_resource(node, 0, &r); 1579 if (ret) { 1580 dev_err(qproc->dev, "unable to resolve mba region\n"); 1581 return ret; 1582 } 1583 of_node_put(node); 1584 1585 qproc->mba_phys = r.start; 1586 qproc->mba_size = resource_size(&r); 1587 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1588 if (!qproc->mba_region) { 1589 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1590 &r.start, qproc->mba_size); 1591 return -EBUSY; 1592 } 1593 1594 if (!child) { 1595 node = of_parse_phandle(qproc->dev->of_node, 1596 "memory-region", 1); 1597 } else { 1598 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1599 node = of_parse_phandle(child, "memory-region", 0); 1600 } 1601 1602 ret = of_address_to_resource(node, 0, &r); 1603 if (ret) { 1604 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1605 return ret; 1606 } 1607 of_node_put(node); 1608 1609 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1610 qproc->mpss_size = resource_size(&r); 1611 1612 return 0; 1613 } 1614 1615 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) 1616 1617 /* Register IPA notification function */ 1618 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, 1619 void *data) 1620 { 1621 struct qcom_rproc_ipa_notify *ipa_notify; 1622 struct q6v5 *qproc = rproc->priv; 1623 1624 if (!notify) 1625 return -EINVAL; 1626 1627 ipa_notify = &qproc->ipa_notify_subdev; 1628 if (ipa_notify->notify) 1629 return -EBUSY; 1630 1631 ipa_notify->notify = notify; 1632 ipa_notify->data = data; 1633 1634 return 0; 1635 } 1636 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); 1637 1638 /* Deregister IPA notification function */ 1639 void qcom_deregister_ipa_notify(struct rproc *rproc) 1640 { 1641 struct q6v5 *qproc = rproc->priv; 1642 1643 qproc->ipa_notify_subdev.notify = NULL; 1644 } 1645 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); 1646 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ 1647 1648 static int q6v5_probe(struct platform_device *pdev) 1649 { 1650 const struct rproc_hexagon_res *desc; 1651 struct q6v5 *qproc; 1652 struct rproc *rproc; 1653 const char *mba_image; 1654 int ret; 1655 1656 desc = of_device_get_match_data(&pdev->dev); 1657 if (!desc) 1658 return -EINVAL; 1659 1660 if (desc->need_mem_protection && !qcom_scm_is_available()) 1661 return -EPROBE_DEFER; 1662 1663 mba_image = desc->hexagon_mba_image; 1664 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1665 0, &mba_image); 1666 if (ret < 0 && ret != -EINVAL) 1667 return ret; 1668 1669 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1670 mba_image, sizeof(*qproc)); 1671 if (!rproc) { 1672 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1673 return -ENOMEM; 1674 } 1675 1676 rproc->auto_boot = false; 1677 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1678 1679 qproc = (struct q6v5 *)rproc->priv; 1680 qproc->dev = &pdev->dev; 1681 qproc->rproc = rproc; 1682 qproc->hexagon_mdt_image = "modem.mdt"; 1683 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1684 1, &qproc->hexagon_mdt_image); 1685 if (ret < 0 && ret != -EINVAL) 1686 goto free_rproc; 1687 1688 platform_set_drvdata(pdev, qproc); 1689 1690 qproc->has_spare_reg = desc->has_spare_reg; 1691 ret = q6v5_init_mem(qproc, pdev); 1692 if (ret) 1693 goto free_rproc; 1694 1695 ret = q6v5_alloc_memory_region(qproc); 1696 if (ret) 1697 goto free_rproc; 1698 1699 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1700 desc->proxy_clk_names); 1701 if (ret < 0) { 1702 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1703 goto free_rproc; 1704 } 1705 qproc->proxy_clk_count = ret; 1706 1707 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1708 desc->reset_clk_names); 1709 if (ret < 0) { 1710 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1711 goto free_rproc; 1712 } 1713 qproc->reset_clk_count = ret; 1714 1715 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1716 desc->active_clk_names); 1717 if (ret < 0) { 1718 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1719 goto free_rproc; 1720 } 1721 qproc->active_clk_count = ret; 1722 1723 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1724 desc->proxy_supply); 1725 if (ret < 0) { 1726 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1727 goto free_rproc; 1728 } 1729 qproc->proxy_reg_count = ret; 1730 1731 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1732 desc->active_supply); 1733 if (ret < 0) { 1734 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1735 goto free_rproc; 1736 } 1737 qproc->active_reg_count = ret; 1738 1739 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1740 desc->active_pd_names); 1741 if (ret < 0) { 1742 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1743 goto free_rproc; 1744 } 1745 qproc->active_pd_count = ret; 1746 1747 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1748 desc->proxy_pd_names); 1749 if (ret < 0) { 1750 dev_err(&pdev->dev, "Failed to init power domains\n"); 1751 goto detach_active_pds; 1752 } 1753 qproc->proxy_pd_count = ret; 1754 1755 qproc->has_alt_reset = desc->has_alt_reset; 1756 ret = q6v5_init_reset(qproc); 1757 if (ret) 1758 goto detach_proxy_pds; 1759 1760 qproc->version = desc->version; 1761 qproc->need_mem_protection = desc->need_mem_protection; 1762 qproc->has_mba_logs = desc->has_mba_logs; 1763 1764 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1765 qcom_msa_handover); 1766 if (ret) 1767 goto detach_proxy_pds; 1768 1769 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1770 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1771 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1772 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1773 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1774 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1775 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1776 if (IS_ERR(qproc->sysmon)) { 1777 ret = PTR_ERR(qproc->sysmon); 1778 goto remove_subdevs; 1779 } 1780 1781 ret = rproc_add(rproc); 1782 if (ret) 1783 goto remove_sysmon_subdev; 1784 1785 return 0; 1786 1787 remove_sysmon_subdev: 1788 qcom_remove_sysmon_subdev(qproc->sysmon); 1789 remove_subdevs: 1790 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1791 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1792 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1793 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1794 detach_proxy_pds: 1795 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1796 detach_active_pds: 1797 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1798 free_rproc: 1799 rproc_free(rproc); 1800 1801 return ret; 1802 } 1803 1804 static int q6v5_remove(struct platform_device *pdev) 1805 { 1806 struct q6v5 *qproc = platform_get_drvdata(pdev); 1807 struct rproc *rproc = qproc->rproc; 1808 1809 rproc_del(rproc); 1810 1811 qcom_remove_sysmon_subdev(qproc->sysmon); 1812 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1813 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1814 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1815 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1816 1817 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1818 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1819 1820 rproc_free(rproc); 1821 1822 return 0; 1823 } 1824 1825 static const struct rproc_hexagon_res sc7180_mss = { 1826 .hexagon_mba_image = "mba.mbn", 1827 .proxy_clk_names = (char*[]){ 1828 "xo", 1829 NULL 1830 }, 1831 .reset_clk_names = (char*[]){ 1832 "iface", 1833 "bus", 1834 "snoc_axi", 1835 NULL 1836 }, 1837 .active_clk_names = (char*[]){ 1838 "mnoc_axi", 1839 "nav", 1840 NULL 1841 }, 1842 .active_pd_names = (char*[]){ 1843 "load_state", 1844 NULL 1845 }, 1846 .proxy_pd_names = (char*[]){ 1847 "cx", 1848 "mx", 1849 "mss", 1850 NULL 1851 }, 1852 .need_mem_protection = true, 1853 .has_alt_reset = false, 1854 .has_mba_logs = true, 1855 .has_spare_reg = true, 1856 .version = MSS_SC7180, 1857 }; 1858 1859 static const struct rproc_hexagon_res sdm845_mss = { 1860 .hexagon_mba_image = "mba.mbn", 1861 .proxy_clk_names = (char*[]){ 1862 "xo", 1863 "prng", 1864 NULL 1865 }, 1866 .reset_clk_names = (char*[]){ 1867 "iface", 1868 "snoc_axi", 1869 NULL 1870 }, 1871 .active_clk_names = (char*[]){ 1872 "bus", 1873 "mem", 1874 "gpll0_mss", 1875 "mnoc_axi", 1876 NULL 1877 }, 1878 .active_pd_names = (char*[]){ 1879 "load_state", 1880 NULL 1881 }, 1882 .proxy_pd_names = (char*[]){ 1883 "cx", 1884 "mx", 1885 "mss", 1886 NULL 1887 }, 1888 .need_mem_protection = true, 1889 .has_alt_reset = true, 1890 .has_mba_logs = false, 1891 .has_spare_reg = false, 1892 .version = MSS_SDM845, 1893 }; 1894 1895 static const struct rproc_hexagon_res msm8998_mss = { 1896 .hexagon_mba_image = "mba.mbn", 1897 .proxy_clk_names = (char*[]){ 1898 "xo", 1899 "qdss", 1900 "mem", 1901 NULL 1902 }, 1903 .active_clk_names = (char*[]){ 1904 "iface", 1905 "bus", 1906 "gpll0_mss", 1907 "mnoc_axi", 1908 "snoc_axi", 1909 NULL 1910 }, 1911 .proxy_pd_names = (char*[]){ 1912 "cx", 1913 "mx", 1914 NULL 1915 }, 1916 .need_mem_protection = true, 1917 .has_alt_reset = false, 1918 .has_mba_logs = false, 1919 .has_spare_reg = false, 1920 .version = MSS_MSM8998, 1921 }; 1922 1923 static const struct rproc_hexagon_res msm8996_mss = { 1924 .hexagon_mba_image = "mba.mbn", 1925 .proxy_supply = (struct qcom_mss_reg_res[]) { 1926 { 1927 .supply = "pll", 1928 .uA = 100000, 1929 }, 1930 {} 1931 }, 1932 .proxy_clk_names = (char*[]){ 1933 "xo", 1934 "pnoc", 1935 "qdss", 1936 NULL 1937 }, 1938 .active_clk_names = (char*[]){ 1939 "iface", 1940 "bus", 1941 "mem", 1942 "gpll0_mss", 1943 "snoc_axi", 1944 "mnoc_axi", 1945 NULL 1946 }, 1947 .need_mem_protection = true, 1948 .has_alt_reset = false, 1949 .has_mba_logs = false, 1950 .has_spare_reg = false, 1951 .version = MSS_MSM8996, 1952 }; 1953 1954 static const struct rproc_hexagon_res msm8916_mss = { 1955 .hexagon_mba_image = "mba.mbn", 1956 .proxy_supply = (struct qcom_mss_reg_res[]) { 1957 { 1958 .supply = "mx", 1959 .uV = 1050000, 1960 }, 1961 { 1962 .supply = "cx", 1963 .uA = 100000, 1964 }, 1965 { 1966 .supply = "pll", 1967 .uA = 100000, 1968 }, 1969 {} 1970 }, 1971 .proxy_clk_names = (char*[]){ 1972 "xo", 1973 NULL 1974 }, 1975 .active_clk_names = (char*[]){ 1976 "iface", 1977 "bus", 1978 "mem", 1979 NULL 1980 }, 1981 .need_mem_protection = false, 1982 .has_alt_reset = false, 1983 .has_mba_logs = false, 1984 .has_spare_reg = false, 1985 .version = MSS_MSM8916, 1986 }; 1987 1988 static const struct rproc_hexagon_res msm8974_mss = { 1989 .hexagon_mba_image = "mba.b00", 1990 .proxy_supply = (struct qcom_mss_reg_res[]) { 1991 { 1992 .supply = "mx", 1993 .uV = 1050000, 1994 }, 1995 { 1996 .supply = "cx", 1997 .uA = 100000, 1998 }, 1999 { 2000 .supply = "pll", 2001 .uA = 100000, 2002 }, 2003 {} 2004 }, 2005 .active_supply = (struct qcom_mss_reg_res[]) { 2006 { 2007 .supply = "mss", 2008 .uV = 1050000, 2009 .uA = 100000, 2010 }, 2011 {} 2012 }, 2013 .proxy_clk_names = (char*[]){ 2014 "xo", 2015 NULL 2016 }, 2017 .active_clk_names = (char*[]){ 2018 "iface", 2019 "bus", 2020 "mem", 2021 NULL 2022 }, 2023 .need_mem_protection = false, 2024 .has_alt_reset = false, 2025 .has_mba_logs = false, 2026 .has_spare_reg = false, 2027 .version = MSS_MSM8974, 2028 }; 2029 2030 static const struct of_device_id q6v5_of_match[] = { 2031 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2032 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2033 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2034 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2035 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2036 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2037 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2038 { }, 2039 }; 2040 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2041 2042 static struct platform_driver q6v5_driver = { 2043 .probe = q6v5_probe, 2044 .remove = q6v5_remove, 2045 .driver = { 2046 .name = "qcom-q6v5-mss", 2047 .of_match_table = q6v5_of_match, 2048 }, 2049 }; 2050 module_platform_driver(q6v5_driver); 2051 2052 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2053 MODULE_LICENSE("GPL v2"); 2054