1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/remoteproc.h> 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 #include <linux/slab.h> 30 31 #include "remoteproc_internal.h" 32 #include "qcom_common.h" 33 #include "qcom_pil_info.h" 34 #include "qcom_q6v5.h" 35 36 #include <linux/qcom_scm.h> 37 38 #define MPSS_CRASH_REASON_SMEM 421 39 40 #define MBA_LOG_SIZE SZ_4K 41 42 /* RMB Status Register Values */ 43 #define RMB_PBL_SUCCESS 0x1 44 45 #define RMB_MBA_XPU_UNLOCKED 0x1 46 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 47 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 48 #define RMB_MBA_AUTH_COMPLETE 0x4 49 50 /* PBL/MBA interface registers */ 51 #define RMB_MBA_IMAGE_REG 0x00 52 #define RMB_PBL_STATUS_REG 0x04 53 #define RMB_MBA_COMMAND_REG 0x08 54 #define RMB_MBA_STATUS_REG 0x0C 55 #define RMB_PMI_META_DATA_REG 0x10 56 #define RMB_PMI_CODE_START_REG 0x14 57 #define RMB_PMI_CODE_LENGTH_REG 0x18 58 #define RMB_MBA_MSS_STATUS 0x40 59 #define RMB_MBA_ALT_RESET 0x44 60 61 #define RMB_CMD_META_DATA_READY 0x1 62 #define RMB_CMD_LOAD_READY 0x2 63 64 /* QDSP6SS Register Offsets */ 65 #define QDSP6SS_RESET_REG 0x014 66 #define QDSP6SS_GFMUX_CTL_REG 0x020 67 #define QDSP6SS_PWR_CTL_REG 0x030 68 #define QDSP6SS_MEM_PWR_CTL 0x0B0 69 #define QDSP6V6SS_MEM_PWR_CTL 0x034 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 #define AXI_GATING_VALID_OVERRIDE BIT(0) 77 78 #define HALT_ACK_TIMEOUT_US 100000 79 80 /* QACCEPT Register Offsets */ 81 #define QACCEPT_ACCEPT_REG 0x0 82 #define QACCEPT_ACTIVE_REG 0x4 83 #define QACCEPT_DENY_REG 0x8 84 #define QACCEPT_REQ_REG 0xC 85 86 #define QACCEPT_TIMEOUT_US 50 87 88 /* QDSP6SS_RESET */ 89 #define Q6SS_STOP_CORE BIT(0) 90 #define Q6SS_CORE_ARES BIT(1) 91 #define Q6SS_BUS_ARES_ENABLE BIT(2) 92 93 /* QDSP6SS CBCR */ 94 #define Q6SS_CBCR_CLKEN BIT(0) 95 #define Q6SS_CBCR_CLKOFF BIT(31) 96 #define Q6SS_CBCR_TIMEOUT_US 200 97 98 /* QDSP6SS_GFMUX_CTL */ 99 #define Q6SS_CLK_ENABLE BIT(1) 100 101 /* QDSP6SS_PWR_CTL */ 102 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 103 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 104 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 105 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 106 #define Q6SS_ETB_SLP_NRET_N BIT(17) 107 #define Q6SS_L2DATA_STBY_N BIT(18) 108 #define Q6SS_SLP_RET_N BIT(19) 109 #define Q6SS_CLAMP_IO BIT(20) 110 #define QDSS_BHS_ON BIT(21) 111 #define QDSS_LDO_BYP BIT(22) 112 113 /* QDSP6v56 parameters */ 114 #define QDSP6v56_LDO_BYP BIT(25) 115 #define QDSP6v56_BHS_ON BIT(24) 116 #define QDSP6v56_CLAMP_WL BIT(21) 117 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 118 #define QDSP6SS_XO_CBCR 0x0038 119 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 120 121 /* QDSP6v65 parameters */ 122 #define QDSP6SS_CORE_CBCR 0x20 123 #define QDSP6SS_SLEEP 0x3C 124 #define QDSP6SS_BOOT_CORE_START 0x400 125 #define QDSP6SS_BOOT_CMD 0x404 126 #define BOOT_FSM_TIMEOUT 10000 127 128 struct reg_info { 129 struct regulator *reg; 130 int uV; 131 int uA; 132 }; 133 134 struct qcom_mss_reg_res { 135 const char *supply; 136 int uV; 137 int uA; 138 }; 139 140 struct rproc_hexagon_res { 141 const char *hexagon_mba_image; 142 struct qcom_mss_reg_res *proxy_supply; 143 struct qcom_mss_reg_res *fallback_proxy_supply; 144 struct qcom_mss_reg_res *active_supply; 145 char **proxy_clk_names; 146 char **reset_clk_names; 147 char **active_clk_names; 148 char **proxy_pd_names; 149 int version; 150 bool need_mem_protection; 151 bool has_alt_reset; 152 bool has_mba_logs; 153 bool has_spare_reg; 154 bool has_qaccept_regs; 155 bool has_ext_cntl_regs; 156 bool has_vq6; 157 }; 158 159 struct q6v5 { 160 struct device *dev; 161 struct rproc *rproc; 162 163 void __iomem *reg_base; 164 void __iomem *rmb_base; 165 166 struct regmap *halt_map; 167 struct regmap *conn_map; 168 169 u32 halt_q6; 170 u32 halt_modem; 171 u32 halt_nc; 172 u32 halt_vq6; 173 u32 conn_box; 174 175 u32 qaccept_mdm; 176 u32 qaccept_cx; 177 u32 qaccept_axi; 178 179 u32 axim1_clk_off; 180 u32 crypto_clk_off; 181 u32 force_clk_on; 182 u32 rscc_disable; 183 184 struct reset_control *mss_restart; 185 struct reset_control *pdc_reset; 186 187 struct qcom_q6v5 q6v5; 188 189 struct clk *active_clks[8]; 190 struct clk *reset_clks[4]; 191 struct clk *proxy_clks[4]; 192 struct device *proxy_pds[3]; 193 int active_clk_count; 194 int reset_clk_count; 195 int proxy_clk_count; 196 int proxy_pd_count; 197 198 struct reg_info active_regs[1]; 199 struct reg_info proxy_regs[1]; 200 struct reg_info fallback_proxy_regs[2]; 201 int active_reg_count; 202 int proxy_reg_count; 203 int fallback_proxy_reg_count; 204 205 bool dump_mba_loaded; 206 size_t current_dump_size; 207 size_t total_dump_size; 208 209 phys_addr_t mba_phys; 210 size_t mba_size; 211 size_t dp_size; 212 213 phys_addr_t mpss_phys; 214 phys_addr_t mpss_reloc; 215 size_t mpss_size; 216 217 struct qcom_rproc_glink glink_subdev; 218 struct qcom_rproc_subdev smd_subdev; 219 struct qcom_rproc_ssr ssr_subdev; 220 struct qcom_sysmon *sysmon; 221 bool need_mem_protection; 222 bool has_alt_reset; 223 bool has_mba_logs; 224 bool has_spare_reg; 225 bool has_qaccept_regs; 226 bool has_ext_cntl_regs; 227 bool has_vq6; 228 int mpss_perm; 229 int mba_perm; 230 const char *hexagon_mdt_image; 231 int version; 232 }; 233 234 enum { 235 MSS_MSM8916, 236 MSS_MSM8974, 237 MSS_MSM8996, 238 MSS_MSM8998, 239 MSS_SC7180, 240 MSS_SC7280, 241 MSS_SDM845, 242 }; 243 244 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 245 const struct qcom_mss_reg_res *reg_res) 246 { 247 int rc; 248 int i; 249 250 if (!reg_res) 251 return 0; 252 253 for (i = 0; reg_res[i].supply; i++) { 254 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 255 if (IS_ERR(regs[i].reg)) { 256 rc = PTR_ERR(regs[i].reg); 257 if (rc != -EPROBE_DEFER) 258 dev_err(dev, "Failed to get %s\n regulator", 259 reg_res[i].supply); 260 return rc; 261 } 262 263 regs[i].uV = reg_res[i].uV; 264 regs[i].uA = reg_res[i].uA; 265 } 266 267 return i; 268 } 269 270 static int q6v5_regulator_enable(struct q6v5 *qproc, 271 struct reg_info *regs, int count) 272 { 273 int ret; 274 int i; 275 276 for (i = 0; i < count; i++) { 277 if (regs[i].uV > 0) { 278 ret = regulator_set_voltage(regs[i].reg, 279 regs[i].uV, INT_MAX); 280 if (ret) { 281 dev_err(qproc->dev, 282 "Failed to request voltage for %d.\n", 283 i); 284 goto err; 285 } 286 } 287 288 if (regs[i].uA > 0) { 289 ret = regulator_set_load(regs[i].reg, 290 regs[i].uA); 291 if (ret < 0) { 292 dev_err(qproc->dev, 293 "Failed to set regulator mode\n"); 294 goto err; 295 } 296 } 297 298 ret = regulator_enable(regs[i].reg); 299 if (ret) { 300 dev_err(qproc->dev, "Regulator enable failed\n"); 301 goto err; 302 } 303 } 304 305 return 0; 306 err: 307 for (; i >= 0; i--) { 308 if (regs[i].uV > 0) 309 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 310 311 if (regs[i].uA > 0) 312 regulator_set_load(regs[i].reg, 0); 313 314 regulator_disable(regs[i].reg); 315 } 316 317 return ret; 318 } 319 320 static void q6v5_regulator_disable(struct q6v5 *qproc, 321 struct reg_info *regs, int count) 322 { 323 int i; 324 325 for (i = 0; i < count; i++) { 326 if (regs[i].uV > 0) 327 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 328 329 if (regs[i].uA > 0) 330 regulator_set_load(regs[i].reg, 0); 331 332 regulator_disable(regs[i].reg); 333 } 334 } 335 336 static int q6v5_clk_enable(struct device *dev, 337 struct clk **clks, int count) 338 { 339 int rc; 340 int i; 341 342 for (i = 0; i < count; i++) { 343 rc = clk_prepare_enable(clks[i]); 344 if (rc) { 345 dev_err(dev, "Clock enable failed\n"); 346 goto err; 347 } 348 } 349 350 return 0; 351 err: 352 for (i--; i >= 0; i--) 353 clk_disable_unprepare(clks[i]); 354 355 return rc; 356 } 357 358 static void q6v5_clk_disable(struct device *dev, 359 struct clk **clks, int count) 360 { 361 int i; 362 363 for (i = 0; i < count; i++) 364 clk_disable_unprepare(clks[i]); 365 } 366 367 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 368 size_t pd_count) 369 { 370 int ret; 371 int i; 372 373 for (i = 0; i < pd_count; i++) { 374 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 375 ret = pm_runtime_get_sync(pds[i]); 376 if (ret < 0) { 377 pm_runtime_put_noidle(pds[i]); 378 dev_pm_genpd_set_performance_state(pds[i], 0); 379 goto unroll_pd_votes; 380 } 381 } 382 383 return 0; 384 385 unroll_pd_votes: 386 for (i--; i >= 0; i--) { 387 dev_pm_genpd_set_performance_state(pds[i], 0); 388 pm_runtime_put(pds[i]); 389 } 390 391 return ret; 392 } 393 394 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 395 size_t pd_count) 396 { 397 int i; 398 399 for (i = 0; i < pd_count; i++) { 400 dev_pm_genpd_set_performance_state(pds[i], 0); 401 pm_runtime_put(pds[i]); 402 } 403 } 404 405 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 406 bool local, bool remote, phys_addr_t addr, 407 size_t size) 408 { 409 struct qcom_scm_vmperm next[2]; 410 int perms = 0; 411 412 if (!qproc->need_mem_protection) 413 return 0; 414 415 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 416 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 417 return 0; 418 419 if (local) { 420 next[perms].vmid = QCOM_SCM_VMID_HLOS; 421 next[perms].perm = QCOM_SCM_PERM_RWX; 422 perms++; 423 } 424 425 if (remote) { 426 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 427 next[perms].perm = QCOM_SCM_PERM_RW; 428 perms++; 429 } 430 431 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 432 current_perm, next, perms); 433 } 434 435 static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region) 436 { 437 const struct firmware *dp_fw; 438 439 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) 440 return; 441 442 if (SZ_1M + dp_fw->size <= qproc->mba_size) { 443 memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size); 444 qproc->dp_size = dp_fw->size; 445 } 446 447 release_firmware(dp_fw); 448 } 449 450 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 451 { 452 struct q6v5 *qproc = rproc->priv; 453 void *mba_region; 454 455 /* MBA is restricted to a maximum size of 1M */ 456 if (fw->size > qproc->mba_size || fw->size > SZ_1M) { 457 dev_err(qproc->dev, "MBA firmware load failed\n"); 458 return -EINVAL; 459 } 460 461 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 462 if (!mba_region) { 463 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 464 &qproc->mba_phys, qproc->mba_size); 465 return -EBUSY; 466 } 467 468 memcpy(mba_region, fw->data, fw->size); 469 q6v5_debug_policy_load(qproc, mba_region); 470 memunmap(mba_region); 471 472 return 0; 473 } 474 475 static int q6v5_reset_assert(struct q6v5 *qproc) 476 { 477 int ret; 478 479 if (qproc->has_alt_reset) { 480 reset_control_assert(qproc->pdc_reset); 481 ret = reset_control_reset(qproc->mss_restart); 482 reset_control_deassert(qproc->pdc_reset); 483 } else if (qproc->has_spare_reg) { 484 /* 485 * When the AXI pipeline is being reset with the Q6 modem partly 486 * operational there is possibility of AXI valid signal to 487 * glitch, leading to spurious transactions and Q6 hangs. A work 488 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 489 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 490 * is withdrawn post MSS assert followed by a MSS deassert, 491 * while holding the PDC reset. 492 */ 493 reset_control_assert(qproc->pdc_reset); 494 regmap_update_bits(qproc->conn_map, qproc->conn_box, 495 AXI_GATING_VALID_OVERRIDE, 1); 496 reset_control_assert(qproc->mss_restart); 497 reset_control_deassert(qproc->pdc_reset); 498 regmap_update_bits(qproc->conn_map, qproc->conn_box, 499 AXI_GATING_VALID_OVERRIDE, 0); 500 ret = reset_control_deassert(qproc->mss_restart); 501 } else if (qproc->has_ext_cntl_regs) { 502 regmap_write(qproc->conn_map, qproc->rscc_disable, 0); 503 reset_control_assert(qproc->pdc_reset); 504 reset_control_assert(qproc->mss_restart); 505 reset_control_deassert(qproc->pdc_reset); 506 ret = reset_control_deassert(qproc->mss_restart); 507 } else { 508 ret = reset_control_assert(qproc->mss_restart); 509 } 510 511 return ret; 512 } 513 514 static int q6v5_reset_deassert(struct q6v5 *qproc) 515 { 516 int ret; 517 518 if (qproc->has_alt_reset) { 519 reset_control_assert(qproc->pdc_reset); 520 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 521 ret = reset_control_reset(qproc->mss_restart); 522 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 523 reset_control_deassert(qproc->pdc_reset); 524 } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) { 525 ret = reset_control_reset(qproc->mss_restart); 526 } else { 527 ret = reset_control_deassert(qproc->mss_restart); 528 } 529 530 return ret; 531 } 532 533 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 534 { 535 unsigned long timeout; 536 s32 val; 537 538 timeout = jiffies + msecs_to_jiffies(ms); 539 for (;;) { 540 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 541 if (val) 542 break; 543 544 if (time_after(jiffies, timeout)) 545 return -ETIMEDOUT; 546 547 msleep(1); 548 } 549 550 return val; 551 } 552 553 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 554 { 555 556 unsigned long timeout; 557 s32 val; 558 559 timeout = jiffies + msecs_to_jiffies(ms); 560 for (;;) { 561 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 562 if (val < 0) 563 break; 564 565 if (!status && val) 566 break; 567 else if (status && val == status) 568 break; 569 570 if (time_after(jiffies, timeout)) 571 return -ETIMEDOUT; 572 573 msleep(1); 574 } 575 576 return val; 577 } 578 579 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 580 { 581 struct rproc *rproc = qproc->rproc; 582 void *data; 583 void *mba_region; 584 585 if (!qproc->has_mba_logs) 586 return; 587 588 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 589 qproc->mba_size)) 590 return; 591 592 mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC); 593 if (!mba_region) 594 return; 595 596 data = vmalloc(MBA_LOG_SIZE); 597 if (data) { 598 memcpy(data, mba_region, MBA_LOG_SIZE); 599 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 600 } 601 memunmap(mba_region); 602 } 603 604 static int q6v5proc_reset(struct q6v5 *qproc) 605 { 606 u32 val; 607 int ret; 608 int i; 609 610 if (qproc->version == MSS_SDM845) { 611 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 612 val |= Q6SS_CBCR_CLKEN; 613 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 614 615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 616 val, !(val & Q6SS_CBCR_CLKOFF), 1, 617 Q6SS_CBCR_TIMEOUT_US); 618 if (ret) { 619 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 620 return -ETIMEDOUT; 621 } 622 623 /* De-assert QDSP6 stop core */ 624 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 625 /* Trigger boot FSM */ 626 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 627 628 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 629 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 630 if (ret) { 631 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 632 /* Reset the modem so that boot FSM is in reset state */ 633 q6v5_reset_deassert(qproc); 634 return ret; 635 } 636 637 goto pbl_wait; 638 } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) { 639 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 640 val |= Q6SS_CBCR_CLKEN; 641 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 642 643 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 644 val, !(val & Q6SS_CBCR_CLKOFF), 1, 645 Q6SS_CBCR_TIMEOUT_US); 646 if (ret) { 647 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 648 return -ETIMEDOUT; 649 } 650 651 /* Turn on the XO clock needed for PLL setup */ 652 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 653 val |= Q6SS_CBCR_CLKEN; 654 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 655 656 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 657 val, !(val & Q6SS_CBCR_CLKOFF), 1, 658 Q6SS_CBCR_TIMEOUT_US); 659 if (ret) { 660 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 661 return -ETIMEDOUT; 662 } 663 664 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 665 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 666 val |= Q6SS_CBCR_CLKEN; 667 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 668 669 /* De-assert the Q6 stop core signal */ 670 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 671 672 /* Wait for 10 us for any staggering logic to settle */ 673 usleep_range(10, 20); 674 675 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 676 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 677 678 /* Poll the MSS_STATUS for FSM completion */ 679 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 680 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 681 if (ret) { 682 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 683 /* Reset the modem so that boot FSM is in reset state */ 684 q6v5_reset_deassert(qproc); 685 return ret; 686 } 687 goto pbl_wait; 688 } else if (qproc->version == MSS_MSM8996 || 689 qproc->version == MSS_MSM8998) { 690 int mem_pwr_ctl; 691 692 /* Override the ACC value if required */ 693 writel(QDSP6SS_ACC_OVERRIDE_VAL, 694 qproc->reg_base + QDSP6SS_STRAP_ACC); 695 696 /* Assert resets, stop core */ 697 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 698 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 699 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 700 701 /* BHS require xo cbcr to be enabled */ 702 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 703 val |= Q6SS_CBCR_CLKEN; 704 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 705 706 /* Read CLKOFF bit to go low indicating CLK is enabled */ 707 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 708 val, !(val & Q6SS_CBCR_CLKOFF), 1, 709 Q6SS_CBCR_TIMEOUT_US); 710 if (ret) { 711 dev_err(qproc->dev, 712 "xo cbcr enabling timed out (rc:%d)\n", ret); 713 return ret; 714 } 715 /* Enable power block headswitch and wait for it to stabilize */ 716 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 717 val |= QDSP6v56_BHS_ON; 718 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 719 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 720 udelay(1); 721 722 /* Put LDO in bypass mode */ 723 val |= QDSP6v56_LDO_BYP; 724 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 725 726 /* Deassert QDSP6 compiler memory clamp */ 727 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 728 val &= ~QDSP6v56_CLAMP_QMC_MEM; 729 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 730 731 /* Deassert memory peripheral sleep and L2 memory standby */ 732 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 733 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 734 735 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 736 if (qproc->version == MSS_MSM8996) { 737 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 738 i = 19; 739 } else { 740 /* MSS_MSM8998 */ 741 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 742 i = 28; 743 } 744 val = readl(qproc->reg_base + mem_pwr_ctl); 745 for (; i >= 0; i--) { 746 val |= BIT(i); 747 writel(val, qproc->reg_base + mem_pwr_ctl); 748 /* 749 * Read back value to ensure the write is done then 750 * wait for 1us for both memory peripheral and data 751 * array to turn on. 752 */ 753 val |= readl(qproc->reg_base + mem_pwr_ctl); 754 udelay(1); 755 } 756 /* Remove word line clamp */ 757 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 758 val &= ~QDSP6v56_CLAMP_WL; 759 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 760 } else { 761 /* Assert resets, stop core */ 762 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 763 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 764 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 765 766 /* Enable power block headswitch and wait for it to stabilize */ 767 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 768 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 769 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 770 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 771 udelay(1); 772 /* 773 * Turn on memories. L2 banks should be done individually 774 * to minimize inrush current. 775 */ 776 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 777 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 778 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 779 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 780 val |= Q6SS_L2DATA_SLP_NRET_N_2; 781 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 782 val |= Q6SS_L2DATA_SLP_NRET_N_1; 783 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 784 val |= Q6SS_L2DATA_SLP_NRET_N_0; 785 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 786 } 787 /* Remove IO clamp */ 788 val &= ~Q6SS_CLAMP_IO; 789 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 790 791 /* Bring core out of reset */ 792 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 793 val &= ~Q6SS_CORE_ARES; 794 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 795 796 /* Turn on core clock */ 797 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 798 val |= Q6SS_CLK_ENABLE; 799 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 800 801 /* Start core execution */ 802 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 803 val &= ~Q6SS_STOP_CORE; 804 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 805 806 pbl_wait: 807 /* Wait for PBL status */ 808 ret = q6v5_rmb_pbl_wait(qproc, 1000); 809 if (ret == -ETIMEDOUT) { 810 dev_err(qproc->dev, "PBL boot timed out\n"); 811 } else if (ret != RMB_PBL_SUCCESS) { 812 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 813 ret = -EINVAL; 814 } else { 815 ret = 0; 816 } 817 818 return ret; 819 } 820 821 static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) 822 { 823 unsigned int val; 824 int ret; 825 826 if (!qproc->has_qaccept_regs) 827 return 0; 828 829 if (qproc->has_ext_cntl_regs) { 830 regmap_write(qproc->conn_map, qproc->rscc_disable, 0); 831 regmap_write(qproc->conn_map, qproc->force_clk_on, 1); 832 833 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, 834 !val, 1, Q6SS_CBCR_TIMEOUT_US); 835 if (ret) { 836 dev_err(qproc->dev, "failed to enable axim1 clock\n"); 837 return -ETIMEDOUT; 838 } 839 } 840 841 regmap_write(map, offset + QACCEPT_REQ_REG, 1); 842 843 /* Wait for accept */ 844 ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5, 845 QACCEPT_TIMEOUT_US); 846 if (ret) { 847 dev_err(qproc->dev, "qchannel enable failed\n"); 848 return -ETIMEDOUT; 849 } 850 851 return 0; 852 } 853 854 static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset) 855 { 856 int ret; 857 unsigned int val, retry; 858 unsigned int nretry = 10; 859 bool takedown_complete = false; 860 861 if (!qproc->has_qaccept_regs) 862 return; 863 864 while (!takedown_complete && nretry) { 865 nretry--; 866 867 /* Wait for active transactions to complete */ 868 regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5, 869 QACCEPT_TIMEOUT_US); 870 871 /* Request Q-channel transaction takedown */ 872 regmap_write(map, offset + QACCEPT_REQ_REG, 0); 873 874 /* 875 * If the request is denied, reset the Q-channel takedown request, 876 * wait for active transactions to complete and retry takedown. 877 */ 878 retry = 10; 879 while (retry) { 880 usleep_range(5, 10); 881 retry--; 882 ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val); 883 if (!ret && val) { 884 regmap_write(map, offset + QACCEPT_REQ_REG, 1); 885 break; 886 } 887 888 ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val); 889 if (!ret && !val) { 890 takedown_complete = true; 891 break; 892 } 893 } 894 895 if (!retry) 896 break; 897 } 898 899 /* Rely on mss_restart to clear out pending transactions on takedown failure */ 900 if (!takedown_complete) 901 dev_err(qproc->dev, "qchannel takedown failed\n"); 902 } 903 904 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 905 struct regmap *halt_map, 906 u32 offset) 907 { 908 unsigned int val; 909 int ret; 910 911 /* Check if we're already idle */ 912 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 913 if (!ret && val) 914 return; 915 916 /* Assert halt request */ 917 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 918 919 /* Wait for halt */ 920 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 921 val, 1000, HALT_ACK_TIMEOUT_US); 922 923 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 924 if (ret || !val) 925 dev_err(qproc->dev, "port failed halt\n"); 926 927 /* Clear halt request (port will remain halted until reset) */ 928 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 929 } 930 931 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, 932 const char *fw_name) 933 { 934 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 935 dma_addr_t phys; 936 void *metadata; 937 int mdata_perm; 938 int xferop_ret; 939 size_t size; 940 void *ptr; 941 int ret; 942 943 metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev); 944 if (IS_ERR(metadata)) 945 return PTR_ERR(metadata); 946 947 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 948 if (!ptr) { 949 kfree(metadata); 950 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 951 return -ENOMEM; 952 } 953 954 memcpy(ptr, metadata, size); 955 956 /* Hypervisor mapping to access metadata by modem */ 957 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 958 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 959 phys, size); 960 if (ret) { 961 dev_err(qproc->dev, 962 "assigning Q6 access to metadata failed: %d\n", ret); 963 ret = -EAGAIN; 964 goto free_dma_attrs; 965 } 966 967 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 968 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 969 970 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 971 if (ret == -ETIMEDOUT) 972 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 973 else if (ret < 0) 974 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 975 976 /* Metadata authentication done, remove modem access */ 977 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 978 phys, size); 979 if (xferop_ret) 980 dev_warn(qproc->dev, 981 "mdt buffer not reclaimed system may become unstable\n"); 982 983 free_dma_attrs: 984 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 985 kfree(metadata); 986 987 return ret < 0 ? ret : 0; 988 } 989 990 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 991 { 992 if (phdr->p_type != PT_LOAD) 993 return false; 994 995 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 996 return false; 997 998 if (!phdr->p_memsz) 999 return false; 1000 1001 return true; 1002 } 1003 1004 static int q6v5_mba_load(struct q6v5 *qproc) 1005 { 1006 int ret; 1007 int xfermemop_ret; 1008 bool mba_load_err = false; 1009 1010 ret = qcom_q6v5_prepare(&qproc->q6v5); 1011 if (ret) 1012 return ret; 1013 1014 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1015 if (ret < 0) { 1016 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 1017 goto disable_irqs; 1018 } 1019 1020 ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs, 1021 qproc->fallback_proxy_reg_count); 1022 if (ret) { 1023 dev_err(qproc->dev, "failed to enable fallback proxy supplies\n"); 1024 goto disable_proxy_pds; 1025 } 1026 1027 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 1028 qproc->proxy_reg_count); 1029 if (ret) { 1030 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 1031 goto disable_fallback_proxy_reg; 1032 } 1033 1034 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 1035 qproc->proxy_clk_count); 1036 if (ret) { 1037 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 1038 goto disable_proxy_reg; 1039 } 1040 1041 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 1042 qproc->active_reg_count); 1043 if (ret) { 1044 dev_err(qproc->dev, "failed to enable supplies\n"); 1045 goto disable_proxy_clk; 1046 } 1047 1048 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 1049 qproc->reset_clk_count); 1050 if (ret) { 1051 dev_err(qproc->dev, "failed to enable reset clocks\n"); 1052 goto disable_vdd; 1053 } 1054 1055 ret = q6v5_reset_deassert(qproc); 1056 if (ret) { 1057 dev_err(qproc->dev, "failed to deassert mss restart\n"); 1058 goto disable_reset_clks; 1059 } 1060 1061 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 1062 qproc->active_clk_count); 1063 if (ret) { 1064 dev_err(qproc->dev, "failed to enable clocks\n"); 1065 goto assert_reset; 1066 } 1067 1068 ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1069 if (ret) { 1070 dev_err(qproc->dev, "failed to enable axi bridge\n"); 1071 goto disable_active_clks; 1072 } 1073 1074 /* 1075 * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide 1076 * the Q6 access to this region. 1077 */ 1078 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1079 qproc->mpss_phys, qproc->mpss_size); 1080 if (ret) { 1081 dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret); 1082 goto disable_active_clks; 1083 } 1084 1085 /* Assign MBA image access in DDR to q6 */ 1086 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 1087 qproc->mba_phys, qproc->mba_size); 1088 if (ret) { 1089 dev_err(qproc->dev, 1090 "assigning Q6 access to mba memory failed: %d\n", ret); 1091 goto disable_active_clks; 1092 } 1093 1094 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 1095 if (qproc->dp_size) { 1096 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1097 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1098 } 1099 1100 ret = q6v5proc_reset(qproc); 1101 if (ret) 1102 goto reclaim_mba; 1103 1104 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 1105 if (ret == -ETIMEDOUT) { 1106 dev_err(qproc->dev, "MBA boot timed out\n"); 1107 goto halt_axi_ports; 1108 } else if (ret != RMB_MBA_XPU_UNLOCKED && 1109 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 1110 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 1111 ret = -EINVAL; 1112 goto halt_axi_ports; 1113 } 1114 1115 qproc->dump_mba_loaded = true; 1116 return 0; 1117 1118 halt_axi_ports: 1119 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1120 if (qproc->has_vq6) 1121 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); 1122 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1123 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1124 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); 1125 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); 1126 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1127 mba_load_err = true; 1128 reclaim_mba: 1129 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1130 false, qproc->mba_phys, 1131 qproc->mba_size); 1132 if (xfermemop_ret) { 1133 dev_err(qproc->dev, 1134 "Failed to reclaim mba buffer, system may become unstable\n"); 1135 } else if (mba_load_err) { 1136 q6v5_dump_mba_logs(qproc); 1137 } 1138 1139 disable_active_clks: 1140 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1141 qproc->active_clk_count); 1142 assert_reset: 1143 q6v5_reset_assert(qproc); 1144 disable_reset_clks: 1145 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1146 qproc->reset_clk_count); 1147 disable_vdd: 1148 q6v5_regulator_disable(qproc, qproc->active_regs, 1149 qproc->active_reg_count); 1150 disable_proxy_clk: 1151 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1152 qproc->proxy_clk_count); 1153 disable_proxy_reg: 1154 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1155 qproc->proxy_reg_count); 1156 disable_fallback_proxy_reg: 1157 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1158 qproc->fallback_proxy_reg_count); 1159 disable_proxy_pds: 1160 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1161 disable_irqs: 1162 qcom_q6v5_unprepare(&qproc->q6v5); 1163 1164 return ret; 1165 } 1166 1167 static void q6v5_mba_reclaim(struct q6v5 *qproc) 1168 { 1169 int ret; 1170 u32 val; 1171 1172 qproc->dump_mba_loaded = false; 1173 qproc->dp_size = 0; 1174 1175 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1176 if (qproc->has_vq6) 1177 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6); 1178 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1179 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1180 if (qproc->version == MSS_MSM8996) { 1181 /* 1182 * To avoid high MX current during LPASS/MSS restart. 1183 */ 1184 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1185 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1186 QDSP6v56_CLAMP_QMC_MEM; 1187 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1188 } 1189 1190 if (qproc->has_ext_cntl_regs) { 1191 regmap_write(qproc->conn_map, qproc->rscc_disable, 1); 1192 1193 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val, 1194 !val, 1, Q6SS_CBCR_TIMEOUT_US); 1195 if (ret) 1196 dev_err(qproc->dev, "failed to enable axim1 clock\n"); 1197 1198 ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val, 1199 !val, 1, Q6SS_CBCR_TIMEOUT_US); 1200 if (ret) 1201 dev_err(qproc->dev, "failed to enable crypto clock\n"); 1202 } 1203 1204 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm); 1205 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx); 1206 q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi); 1207 1208 q6v5_reset_assert(qproc); 1209 1210 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1211 qproc->reset_clk_count); 1212 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1213 qproc->active_clk_count); 1214 q6v5_regulator_disable(qproc, qproc->active_regs, 1215 qproc->active_reg_count); 1216 1217 /* In case of failure or coredump scenario where reclaiming MBA memory 1218 * could not happen reclaim it here. 1219 */ 1220 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1221 qproc->mba_phys, 1222 qproc->mba_size); 1223 WARN_ON(ret); 1224 1225 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1226 if (ret) { 1227 q6v5_pds_disable(qproc, qproc->proxy_pds, 1228 qproc->proxy_pd_count); 1229 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1230 qproc->proxy_clk_count); 1231 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1232 qproc->fallback_proxy_reg_count); 1233 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1234 qproc->proxy_reg_count); 1235 } 1236 } 1237 1238 static int q6v5_reload_mba(struct rproc *rproc) 1239 { 1240 struct q6v5 *qproc = rproc->priv; 1241 const struct firmware *fw; 1242 int ret; 1243 1244 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1245 if (ret < 0) 1246 return ret; 1247 1248 q6v5_load(rproc, fw); 1249 ret = q6v5_mba_load(qproc); 1250 release_firmware(fw); 1251 1252 return ret; 1253 } 1254 1255 static int q6v5_mpss_load(struct q6v5 *qproc) 1256 { 1257 const struct elf32_phdr *phdrs; 1258 const struct elf32_phdr *phdr; 1259 const struct firmware *seg_fw; 1260 const struct firmware *fw; 1261 struct elf32_hdr *ehdr; 1262 phys_addr_t mpss_reloc; 1263 phys_addr_t boot_addr; 1264 phys_addr_t min_addr = PHYS_ADDR_MAX; 1265 phys_addr_t max_addr = 0; 1266 u32 code_length; 1267 bool relocate = false; 1268 char *fw_name; 1269 size_t fw_name_len; 1270 ssize_t offset; 1271 size_t size = 0; 1272 void *ptr; 1273 int ret; 1274 int i; 1275 1276 fw_name_len = strlen(qproc->hexagon_mdt_image); 1277 if (fw_name_len <= 4) 1278 return -EINVAL; 1279 1280 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1281 if (!fw_name) 1282 return -ENOMEM; 1283 1284 ret = request_firmware(&fw, fw_name, qproc->dev); 1285 if (ret < 0) { 1286 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1287 goto out; 1288 } 1289 1290 /* Initialize the RMB validator */ 1291 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1292 1293 ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image); 1294 if (ret) 1295 goto release_firmware; 1296 1297 ehdr = (struct elf32_hdr *)fw->data; 1298 phdrs = (struct elf32_phdr *)(ehdr + 1); 1299 1300 for (i = 0; i < ehdr->e_phnum; i++) { 1301 phdr = &phdrs[i]; 1302 1303 if (!q6v5_phdr_valid(phdr)) 1304 continue; 1305 1306 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1307 relocate = true; 1308 1309 if (phdr->p_paddr < min_addr) 1310 min_addr = phdr->p_paddr; 1311 1312 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1313 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1314 } 1315 1316 /* 1317 * In case of a modem subsystem restart on secure devices, the modem 1318 * memory can be reclaimed only after MBA is loaded. 1319 */ 1320 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1321 qproc->mpss_phys, qproc->mpss_size); 1322 1323 /* Share ownership between Linux and MSS, during segment loading */ 1324 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1325 qproc->mpss_phys, qproc->mpss_size); 1326 if (ret) { 1327 dev_err(qproc->dev, 1328 "assigning Q6 access to mpss memory failed: %d\n", ret); 1329 ret = -EAGAIN; 1330 goto release_firmware; 1331 } 1332 1333 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1334 qproc->mpss_reloc = mpss_reloc; 1335 /* Load firmware segments */ 1336 for (i = 0; i < ehdr->e_phnum; i++) { 1337 phdr = &phdrs[i]; 1338 1339 if (!q6v5_phdr_valid(phdr)) 1340 continue; 1341 1342 offset = phdr->p_paddr - mpss_reloc; 1343 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1344 dev_err(qproc->dev, "segment outside memory range\n"); 1345 ret = -EINVAL; 1346 goto release_firmware; 1347 } 1348 1349 if (phdr->p_filesz > phdr->p_memsz) { 1350 dev_err(qproc->dev, 1351 "refusing to load segment %d with p_filesz > p_memsz\n", 1352 i); 1353 ret = -EINVAL; 1354 goto release_firmware; 1355 } 1356 1357 ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC); 1358 if (!ptr) { 1359 dev_err(qproc->dev, 1360 "unable to map memory region: %pa+%zx-%x\n", 1361 &qproc->mpss_phys, offset, phdr->p_memsz); 1362 goto release_firmware; 1363 } 1364 1365 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1366 /* Firmware is large enough to be non-split */ 1367 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1368 dev_err(qproc->dev, 1369 "failed to load segment %d from truncated file %s\n", 1370 i, fw_name); 1371 ret = -EINVAL; 1372 memunmap(ptr); 1373 goto release_firmware; 1374 } 1375 1376 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1377 } else if (phdr->p_filesz) { 1378 /* Replace "xxx.xxx" with "xxx.bxx" */ 1379 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1380 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, 1381 ptr, phdr->p_filesz); 1382 if (ret) { 1383 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1384 memunmap(ptr); 1385 goto release_firmware; 1386 } 1387 1388 if (seg_fw->size != phdr->p_filesz) { 1389 dev_err(qproc->dev, 1390 "failed to load segment %d from truncated file %s\n", 1391 i, fw_name); 1392 ret = -EINVAL; 1393 release_firmware(seg_fw); 1394 memunmap(ptr); 1395 goto release_firmware; 1396 } 1397 1398 release_firmware(seg_fw); 1399 } 1400 1401 if (phdr->p_memsz > phdr->p_filesz) { 1402 memset(ptr + phdr->p_filesz, 0, 1403 phdr->p_memsz - phdr->p_filesz); 1404 } 1405 memunmap(ptr); 1406 size += phdr->p_memsz; 1407 1408 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1409 if (!code_length) { 1410 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1411 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1412 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1413 } 1414 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1415 1416 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1417 if (ret < 0) { 1418 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1419 ret); 1420 goto release_firmware; 1421 } 1422 } 1423 1424 /* Transfer ownership of modem ddr region to q6 */ 1425 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1426 qproc->mpss_phys, qproc->mpss_size); 1427 if (ret) { 1428 dev_err(qproc->dev, 1429 "assigning Q6 access to mpss memory failed: %d\n", ret); 1430 ret = -EAGAIN; 1431 goto release_firmware; 1432 } 1433 1434 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1435 if (ret == -ETIMEDOUT) 1436 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1437 else if (ret < 0) 1438 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1439 1440 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1441 1442 release_firmware: 1443 release_firmware(fw); 1444 out: 1445 kfree(fw_name); 1446 1447 return ret < 0 ? ret : 0; 1448 } 1449 1450 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1451 struct rproc_dump_segment *segment, 1452 void *dest, size_t cp_offset, size_t size) 1453 { 1454 int ret = 0; 1455 struct q6v5 *qproc = rproc->priv; 1456 int offset = segment->da - qproc->mpss_reloc; 1457 void *ptr = NULL; 1458 1459 /* Unlock mba before copying segments */ 1460 if (!qproc->dump_mba_loaded) { 1461 ret = q6v5_reload_mba(rproc); 1462 if (!ret) { 1463 /* Reset ownership back to Linux to copy segments */ 1464 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1465 true, false, 1466 qproc->mpss_phys, 1467 qproc->mpss_size); 1468 } 1469 } 1470 1471 if (!ret) 1472 ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC); 1473 1474 if (ptr) { 1475 memcpy(dest, ptr, size); 1476 memunmap(ptr); 1477 } else { 1478 memset(dest, 0xff, size); 1479 } 1480 1481 qproc->current_dump_size += size; 1482 1483 /* Reclaim mba after copying segments */ 1484 if (qproc->current_dump_size == qproc->total_dump_size) { 1485 if (qproc->dump_mba_loaded) { 1486 /* Try to reset ownership back to Q6 */ 1487 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1488 false, true, 1489 qproc->mpss_phys, 1490 qproc->mpss_size); 1491 q6v5_mba_reclaim(qproc); 1492 } 1493 } 1494 } 1495 1496 static int q6v5_start(struct rproc *rproc) 1497 { 1498 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1499 int xfermemop_ret; 1500 int ret; 1501 1502 ret = q6v5_mba_load(qproc); 1503 if (ret) 1504 return ret; 1505 1506 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", 1507 qproc->dp_size ? "" : "out"); 1508 1509 ret = q6v5_mpss_load(qproc); 1510 if (ret) 1511 goto reclaim_mpss; 1512 1513 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1514 if (ret == -ETIMEDOUT) { 1515 dev_err(qproc->dev, "start timed out\n"); 1516 goto reclaim_mpss; 1517 } 1518 1519 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1520 false, qproc->mba_phys, 1521 qproc->mba_size); 1522 if (xfermemop_ret) 1523 dev_err(qproc->dev, 1524 "Failed to reclaim mba buffer system may become unstable\n"); 1525 1526 /* Reset Dump Segment Mask */ 1527 qproc->current_dump_size = 0; 1528 1529 return 0; 1530 1531 reclaim_mpss: 1532 q6v5_mba_reclaim(qproc); 1533 q6v5_dump_mba_logs(qproc); 1534 1535 return ret; 1536 } 1537 1538 static int q6v5_stop(struct rproc *rproc) 1539 { 1540 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1541 int ret; 1542 1543 ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon); 1544 if (ret == -ETIMEDOUT) 1545 dev_err(qproc->dev, "timed out on wait\n"); 1546 1547 q6v5_mba_reclaim(qproc); 1548 1549 return 0; 1550 } 1551 1552 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1553 const struct firmware *mba_fw) 1554 { 1555 const struct firmware *fw; 1556 const struct elf32_phdr *phdrs; 1557 const struct elf32_phdr *phdr; 1558 const struct elf32_hdr *ehdr; 1559 struct q6v5 *qproc = rproc->priv; 1560 unsigned long i; 1561 int ret; 1562 1563 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1564 if (ret < 0) { 1565 dev_err(qproc->dev, "unable to load %s\n", 1566 qproc->hexagon_mdt_image); 1567 return ret; 1568 } 1569 1570 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1571 1572 ehdr = (struct elf32_hdr *)fw->data; 1573 phdrs = (struct elf32_phdr *)(ehdr + 1); 1574 qproc->total_dump_size = 0; 1575 1576 for (i = 0; i < ehdr->e_phnum; i++) { 1577 phdr = &phdrs[i]; 1578 1579 if (!q6v5_phdr_valid(phdr)) 1580 continue; 1581 1582 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1583 phdr->p_memsz, 1584 qcom_q6v5_dump_segment, 1585 NULL); 1586 if (ret) 1587 break; 1588 1589 qproc->total_dump_size += phdr->p_memsz; 1590 } 1591 1592 release_firmware(fw); 1593 return ret; 1594 } 1595 1596 static const struct rproc_ops q6v5_ops = { 1597 .start = q6v5_start, 1598 .stop = q6v5_stop, 1599 .parse_fw = qcom_q6v5_register_dump_segments, 1600 .load = q6v5_load, 1601 }; 1602 1603 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1604 { 1605 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1606 1607 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1608 qproc->proxy_clk_count); 1609 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1610 qproc->proxy_reg_count); 1611 q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs, 1612 qproc->fallback_proxy_reg_count); 1613 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1614 } 1615 1616 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1617 { 1618 struct of_phandle_args args; 1619 int halt_cell_cnt = 3; 1620 int ret; 1621 1622 qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6"); 1623 if (IS_ERR(qproc->reg_base)) 1624 return PTR_ERR(qproc->reg_base); 1625 1626 qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb"); 1627 if (IS_ERR(qproc->rmb_base)) 1628 return PTR_ERR(qproc->rmb_base); 1629 1630 if (qproc->has_vq6) 1631 halt_cell_cnt++; 1632 1633 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1634 "qcom,halt-regs", halt_cell_cnt, 0, &args); 1635 if (ret < 0) { 1636 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1637 return -EINVAL; 1638 } 1639 1640 qproc->halt_map = syscon_node_to_regmap(args.np); 1641 of_node_put(args.np); 1642 if (IS_ERR(qproc->halt_map)) 1643 return PTR_ERR(qproc->halt_map); 1644 1645 qproc->halt_q6 = args.args[0]; 1646 qproc->halt_modem = args.args[1]; 1647 qproc->halt_nc = args.args[2]; 1648 1649 if (qproc->has_vq6) 1650 qproc->halt_vq6 = args.args[3]; 1651 1652 if (qproc->has_qaccept_regs) { 1653 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1654 "qcom,qaccept-regs", 1655 3, 0, &args); 1656 if (ret < 0) { 1657 dev_err(&pdev->dev, "failed to parse qaccept-regs\n"); 1658 return -EINVAL; 1659 } 1660 1661 qproc->qaccept_mdm = args.args[0]; 1662 qproc->qaccept_cx = args.args[1]; 1663 qproc->qaccept_axi = args.args[2]; 1664 } 1665 1666 if (qproc->has_ext_cntl_regs) { 1667 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1668 "qcom,ext-regs", 1669 2, 0, &args); 1670 if (ret < 0) { 1671 dev_err(&pdev->dev, "failed to parse ext-regs index 0\n"); 1672 return -EINVAL; 1673 } 1674 1675 qproc->conn_map = syscon_node_to_regmap(args.np); 1676 of_node_put(args.np); 1677 if (IS_ERR(qproc->conn_map)) 1678 return PTR_ERR(qproc->conn_map); 1679 1680 qproc->force_clk_on = args.args[0]; 1681 qproc->rscc_disable = args.args[1]; 1682 1683 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1684 "qcom,ext-regs", 1685 2, 1, &args); 1686 if (ret < 0) { 1687 dev_err(&pdev->dev, "failed to parse ext-regs index 1\n"); 1688 return -EINVAL; 1689 } 1690 1691 qproc->axim1_clk_off = args.args[0]; 1692 qproc->crypto_clk_off = args.args[1]; 1693 } 1694 1695 if (qproc->has_spare_reg) { 1696 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1697 "qcom,spare-regs", 1698 1, 0, &args); 1699 if (ret < 0) { 1700 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1701 return -EINVAL; 1702 } 1703 1704 qproc->conn_map = syscon_node_to_regmap(args.np); 1705 of_node_put(args.np); 1706 if (IS_ERR(qproc->conn_map)) 1707 return PTR_ERR(qproc->conn_map); 1708 1709 qproc->conn_box = args.args[0]; 1710 } 1711 1712 return 0; 1713 } 1714 1715 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1716 char **clk_names) 1717 { 1718 int i; 1719 1720 if (!clk_names) 1721 return 0; 1722 1723 for (i = 0; clk_names[i]; i++) { 1724 clks[i] = devm_clk_get(dev, clk_names[i]); 1725 if (IS_ERR(clks[i])) { 1726 int rc = PTR_ERR(clks[i]); 1727 1728 if (rc != -EPROBE_DEFER) 1729 dev_err(dev, "Failed to get %s clock\n", 1730 clk_names[i]); 1731 return rc; 1732 } 1733 } 1734 1735 return i; 1736 } 1737 1738 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1739 char **pd_names) 1740 { 1741 size_t num_pds = 0; 1742 int ret; 1743 int i; 1744 1745 if (!pd_names) 1746 return 0; 1747 1748 while (pd_names[num_pds]) 1749 num_pds++; 1750 1751 for (i = 0; i < num_pds; i++) { 1752 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1753 if (IS_ERR_OR_NULL(devs[i])) { 1754 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1755 goto unroll_attach; 1756 } 1757 } 1758 1759 return num_pds; 1760 1761 unroll_attach: 1762 for (i--; i >= 0; i--) 1763 dev_pm_domain_detach(devs[i], false); 1764 1765 return ret; 1766 } 1767 1768 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1769 size_t pd_count) 1770 { 1771 int i; 1772 1773 for (i = 0; i < pd_count; i++) 1774 dev_pm_domain_detach(pds[i], false); 1775 } 1776 1777 static int q6v5_init_reset(struct q6v5 *qproc) 1778 { 1779 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1780 "mss_restart"); 1781 if (IS_ERR(qproc->mss_restart)) { 1782 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1783 return PTR_ERR(qproc->mss_restart); 1784 } 1785 1786 if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) { 1787 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1788 "pdc_reset"); 1789 if (IS_ERR(qproc->pdc_reset)) { 1790 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1791 return PTR_ERR(qproc->pdc_reset); 1792 } 1793 } 1794 1795 return 0; 1796 } 1797 1798 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1799 { 1800 struct device_node *child; 1801 struct device_node *node; 1802 struct resource r; 1803 int ret; 1804 1805 /* 1806 * In the absence of mba/mpss sub-child, extract the mba and mpss 1807 * reserved memory regions from device's memory-region property. 1808 */ 1809 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1810 if (!child) 1811 node = of_parse_phandle(qproc->dev->of_node, 1812 "memory-region", 0); 1813 else 1814 node = of_parse_phandle(child, "memory-region", 0); 1815 1816 ret = of_address_to_resource(node, 0, &r); 1817 if (ret) { 1818 dev_err(qproc->dev, "unable to resolve mba region\n"); 1819 return ret; 1820 } 1821 of_node_put(node); 1822 1823 qproc->mba_phys = r.start; 1824 qproc->mba_size = resource_size(&r); 1825 1826 if (!child) { 1827 node = of_parse_phandle(qproc->dev->of_node, 1828 "memory-region", 1); 1829 } else { 1830 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1831 node = of_parse_phandle(child, "memory-region", 0); 1832 } 1833 1834 ret = of_address_to_resource(node, 0, &r); 1835 if (ret) { 1836 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1837 return ret; 1838 } 1839 of_node_put(node); 1840 1841 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1842 qproc->mpss_size = resource_size(&r); 1843 1844 return 0; 1845 } 1846 1847 static int q6v5_probe(struct platform_device *pdev) 1848 { 1849 const struct rproc_hexagon_res *desc; 1850 struct q6v5 *qproc; 1851 struct rproc *rproc; 1852 const char *mba_image; 1853 int ret; 1854 1855 desc = of_device_get_match_data(&pdev->dev); 1856 if (!desc) 1857 return -EINVAL; 1858 1859 if (desc->need_mem_protection && !qcom_scm_is_available()) 1860 return -EPROBE_DEFER; 1861 1862 mba_image = desc->hexagon_mba_image; 1863 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1864 0, &mba_image); 1865 if (ret < 0 && ret != -EINVAL) { 1866 dev_err(&pdev->dev, "unable to read mba firmware-name\n"); 1867 return ret; 1868 } 1869 1870 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1871 mba_image, sizeof(*qproc)); 1872 if (!rproc) { 1873 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1874 return -ENOMEM; 1875 } 1876 1877 rproc->auto_boot = false; 1878 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1879 1880 qproc = (struct q6v5 *)rproc->priv; 1881 qproc->dev = &pdev->dev; 1882 qproc->rproc = rproc; 1883 qproc->hexagon_mdt_image = "modem.mdt"; 1884 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1885 1, &qproc->hexagon_mdt_image); 1886 if (ret < 0 && ret != -EINVAL) { 1887 dev_err(&pdev->dev, "unable to read mpss firmware-name\n"); 1888 goto free_rproc; 1889 } 1890 1891 platform_set_drvdata(pdev, qproc); 1892 1893 qproc->has_qaccept_regs = desc->has_qaccept_regs; 1894 qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs; 1895 qproc->has_vq6 = desc->has_vq6; 1896 qproc->has_spare_reg = desc->has_spare_reg; 1897 ret = q6v5_init_mem(qproc, pdev); 1898 if (ret) 1899 goto free_rproc; 1900 1901 ret = q6v5_alloc_memory_region(qproc); 1902 if (ret) 1903 goto free_rproc; 1904 1905 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1906 desc->proxy_clk_names); 1907 if (ret < 0) { 1908 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1909 goto free_rproc; 1910 } 1911 qproc->proxy_clk_count = ret; 1912 1913 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1914 desc->reset_clk_names); 1915 if (ret < 0) { 1916 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1917 goto free_rproc; 1918 } 1919 qproc->reset_clk_count = ret; 1920 1921 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1922 desc->active_clk_names); 1923 if (ret < 0) { 1924 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1925 goto free_rproc; 1926 } 1927 qproc->active_clk_count = ret; 1928 1929 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1930 desc->proxy_supply); 1931 if (ret < 0) { 1932 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1933 goto free_rproc; 1934 } 1935 qproc->proxy_reg_count = ret; 1936 1937 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1938 desc->active_supply); 1939 if (ret < 0) { 1940 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1941 goto free_rproc; 1942 } 1943 qproc->active_reg_count = ret; 1944 1945 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1946 desc->proxy_pd_names); 1947 /* Fallback to regulators for old device trees */ 1948 if (ret == -ENODATA && desc->fallback_proxy_supply) { 1949 ret = q6v5_regulator_init(&pdev->dev, 1950 qproc->fallback_proxy_regs, 1951 desc->fallback_proxy_supply); 1952 if (ret < 0) { 1953 dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n"); 1954 goto free_rproc; 1955 } 1956 qproc->fallback_proxy_reg_count = ret; 1957 } else if (ret < 0) { 1958 dev_err(&pdev->dev, "Failed to init power domains\n"); 1959 goto free_rproc; 1960 } else { 1961 qproc->proxy_pd_count = ret; 1962 } 1963 1964 qproc->has_alt_reset = desc->has_alt_reset; 1965 ret = q6v5_init_reset(qproc); 1966 if (ret) 1967 goto detach_proxy_pds; 1968 1969 qproc->version = desc->version; 1970 qproc->need_mem_protection = desc->need_mem_protection; 1971 qproc->has_mba_logs = desc->has_mba_logs; 1972 1973 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem", 1974 qcom_msa_handover); 1975 if (ret) 1976 goto detach_proxy_pds; 1977 1978 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1979 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1980 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1981 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1982 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1983 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1984 if (IS_ERR(qproc->sysmon)) { 1985 ret = PTR_ERR(qproc->sysmon); 1986 goto remove_subdevs; 1987 } 1988 1989 ret = rproc_add(rproc); 1990 if (ret) 1991 goto remove_sysmon_subdev; 1992 1993 return 0; 1994 1995 remove_sysmon_subdev: 1996 qcom_remove_sysmon_subdev(qproc->sysmon); 1997 remove_subdevs: 1998 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1999 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 2000 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 2001 detach_proxy_pds: 2002 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 2003 free_rproc: 2004 rproc_free(rproc); 2005 2006 return ret; 2007 } 2008 2009 static int q6v5_remove(struct platform_device *pdev) 2010 { 2011 struct q6v5 *qproc = platform_get_drvdata(pdev); 2012 struct rproc *rproc = qproc->rproc; 2013 2014 rproc_del(rproc); 2015 2016 qcom_q6v5_deinit(&qproc->q6v5); 2017 qcom_remove_sysmon_subdev(qproc->sysmon); 2018 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 2019 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 2020 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 2021 2022 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 2023 2024 rproc_free(rproc); 2025 2026 return 0; 2027 } 2028 2029 static const struct rproc_hexagon_res sc7180_mss = { 2030 .hexagon_mba_image = "mba.mbn", 2031 .proxy_clk_names = (char*[]){ 2032 "xo", 2033 NULL 2034 }, 2035 .reset_clk_names = (char*[]){ 2036 "iface", 2037 "bus", 2038 "snoc_axi", 2039 NULL 2040 }, 2041 .active_clk_names = (char*[]){ 2042 "mnoc_axi", 2043 "nav", 2044 NULL 2045 }, 2046 .proxy_pd_names = (char*[]){ 2047 "cx", 2048 "mx", 2049 "mss", 2050 NULL 2051 }, 2052 .need_mem_protection = true, 2053 .has_alt_reset = false, 2054 .has_mba_logs = true, 2055 .has_spare_reg = true, 2056 .has_qaccept_regs = false, 2057 .has_ext_cntl_regs = false, 2058 .has_vq6 = false, 2059 .version = MSS_SC7180, 2060 }; 2061 2062 static const struct rproc_hexagon_res sc7280_mss = { 2063 .hexagon_mba_image = "mba.mbn", 2064 .proxy_clk_names = (char*[]){ 2065 "xo", 2066 "pka", 2067 NULL 2068 }, 2069 .active_clk_names = (char*[]){ 2070 "iface", 2071 "offline", 2072 "snoc_axi", 2073 NULL 2074 }, 2075 .proxy_pd_names = (char*[]){ 2076 "cx", 2077 "mss", 2078 NULL 2079 }, 2080 .need_mem_protection = true, 2081 .has_alt_reset = false, 2082 .has_mba_logs = true, 2083 .has_spare_reg = false, 2084 .has_qaccept_regs = true, 2085 .has_ext_cntl_regs = true, 2086 .has_vq6 = true, 2087 .version = MSS_SC7280, 2088 }; 2089 2090 static const struct rproc_hexagon_res sdm845_mss = { 2091 .hexagon_mba_image = "mba.mbn", 2092 .proxy_clk_names = (char*[]){ 2093 "xo", 2094 "prng", 2095 NULL 2096 }, 2097 .reset_clk_names = (char*[]){ 2098 "iface", 2099 "snoc_axi", 2100 NULL 2101 }, 2102 .active_clk_names = (char*[]){ 2103 "bus", 2104 "mem", 2105 "gpll0_mss", 2106 "mnoc_axi", 2107 NULL 2108 }, 2109 .proxy_pd_names = (char*[]){ 2110 "cx", 2111 "mx", 2112 "mss", 2113 NULL 2114 }, 2115 .need_mem_protection = true, 2116 .has_alt_reset = true, 2117 .has_mba_logs = false, 2118 .has_spare_reg = false, 2119 .has_qaccept_regs = false, 2120 .has_ext_cntl_regs = false, 2121 .has_vq6 = false, 2122 .version = MSS_SDM845, 2123 }; 2124 2125 static const struct rproc_hexagon_res msm8998_mss = { 2126 .hexagon_mba_image = "mba.mbn", 2127 .proxy_clk_names = (char*[]){ 2128 "xo", 2129 "qdss", 2130 "mem", 2131 NULL 2132 }, 2133 .active_clk_names = (char*[]){ 2134 "iface", 2135 "bus", 2136 "gpll0_mss", 2137 "mnoc_axi", 2138 "snoc_axi", 2139 NULL 2140 }, 2141 .proxy_pd_names = (char*[]){ 2142 "cx", 2143 "mx", 2144 NULL 2145 }, 2146 .need_mem_protection = true, 2147 .has_alt_reset = false, 2148 .has_mba_logs = false, 2149 .has_spare_reg = false, 2150 .has_qaccept_regs = false, 2151 .has_ext_cntl_regs = false, 2152 .has_vq6 = false, 2153 .version = MSS_MSM8998, 2154 }; 2155 2156 static const struct rproc_hexagon_res msm8996_mss = { 2157 .hexagon_mba_image = "mba.mbn", 2158 .proxy_supply = (struct qcom_mss_reg_res[]) { 2159 { 2160 .supply = "pll", 2161 .uA = 100000, 2162 }, 2163 {} 2164 }, 2165 .proxy_clk_names = (char*[]){ 2166 "xo", 2167 "pnoc", 2168 "qdss", 2169 NULL 2170 }, 2171 .active_clk_names = (char*[]){ 2172 "iface", 2173 "bus", 2174 "mem", 2175 "gpll0_mss", 2176 "snoc_axi", 2177 "mnoc_axi", 2178 NULL 2179 }, 2180 .need_mem_protection = true, 2181 .has_alt_reset = false, 2182 .has_mba_logs = false, 2183 .has_spare_reg = false, 2184 .has_qaccept_regs = false, 2185 .has_ext_cntl_regs = false, 2186 .has_vq6 = false, 2187 .version = MSS_MSM8996, 2188 }; 2189 2190 static const struct rproc_hexagon_res msm8916_mss = { 2191 .hexagon_mba_image = "mba.mbn", 2192 .proxy_supply = (struct qcom_mss_reg_res[]) { 2193 { 2194 .supply = "pll", 2195 .uA = 100000, 2196 }, 2197 {} 2198 }, 2199 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 2200 { 2201 .supply = "mx", 2202 .uV = 1050000, 2203 }, 2204 { 2205 .supply = "cx", 2206 .uA = 100000, 2207 }, 2208 {} 2209 }, 2210 .proxy_clk_names = (char*[]){ 2211 "xo", 2212 NULL 2213 }, 2214 .active_clk_names = (char*[]){ 2215 "iface", 2216 "bus", 2217 "mem", 2218 NULL 2219 }, 2220 .proxy_pd_names = (char*[]){ 2221 "mx", 2222 "cx", 2223 NULL 2224 }, 2225 .need_mem_protection = false, 2226 .has_alt_reset = false, 2227 .has_mba_logs = false, 2228 .has_spare_reg = false, 2229 .has_qaccept_regs = false, 2230 .has_ext_cntl_regs = false, 2231 .has_vq6 = false, 2232 .version = MSS_MSM8916, 2233 }; 2234 2235 static const struct rproc_hexagon_res msm8974_mss = { 2236 .hexagon_mba_image = "mba.b00", 2237 .proxy_supply = (struct qcom_mss_reg_res[]) { 2238 { 2239 .supply = "pll", 2240 .uA = 100000, 2241 }, 2242 {} 2243 }, 2244 .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { 2245 { 2246 .supply = "mx", 2247 .uV = 1050000, 2248 }, 2249 { 2250 .supply = "cx", 2251 .uA = 100000, 2252 }, 2253 {} 2254 }, 2255 .active_supply = (struct qcom_mss_reg_res[]) { 2256 { 2257 .supply = "mss", 2258 .uV = 1050000, 2259 .uA = 100000, 2260 }, 2261 {} 2262 }, 2263 .proxy_clk_names = (char*[]){ 2264 "xo", 2265 NULL 2266 }, 2267 .active_clk_names = (char*[]){ 2268 "iface", 2269 "bus", 2270 "mem", 2271 NULL 2272 }, 2273 .proxy_pd_names = (char*[]){ 2274 "mx", 2275 "cx", 2276 NULL 2277 }, 2278 .need_mem_protection = false, 2279 .has_alt_reset = false, 2280 .has_mba_logs = false, 2281 .has_spare_reg = false, 2282 .has_qaccept_regs = false, 2283 .has_ext_cntl_regs = false, 2284 .has_vq6 = false, 2285 .version = MSS_MSM8974, 2286 }; 2287 2288 static const struct of_device_id q6v5_of_match[] = { 2289 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2290 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2291 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2292 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2293 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2294 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2295 { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss}, 2296 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2297 { }, 2298 }; 2299 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2300 2301 static struct platform_driver q6v5_driver = { 2302 .probe = q6v5_probe, 2303 .remove = q6v5_remove, 2304 .driver = { 2305 .name = "qcom-q6v5-mss", 2306 .of_match_table = q6v5_of_match, 2307 }, 2308 }; 2309 module_platform_driver(q6v5_driver); 2310 2311 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2312 MODULE_LICENSE("GPL v2"); 2313