1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/devcoredump.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel.h> 16 #include <linux/mfd/syscon.h> 17 #include <linux/module.h> 18 #include <linux/of_address.h> 19 #include <linux/of_device.h> 20 #include <linux/platform_device.h> 21 #include <linux/pm_domain.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/regmap.h> 24 #include <linux/regulator/consumer.h> 25 #include <linux/remoteproc.h> 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 30 #include "remoteproc_internal.h" 31 #include "qcom_common.h" 32 #include "qcom_pil_info.h" 33 #include "qcom_q6v5.h" 34 35 #include <linux/qcom_scm.h> 36 37 #define MPSS_CRASH_REASON_SMEM 421 38 39 #define MBA_LOG_SIZE SZ_4K 40 41 /* RMB Status Register Values */ 42 #define RMB_PBL_SUCCESS 0x1 43 44 #define RMB_MBA_XPU_UNLOCKED 0x1 45 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 46 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 47 #define RMB_MBA_AUTH_COMPLETE 0x4 48 49 /* PBL/MBA interface registers */ 50 #define RMB_MBA_IMAGE_REG 0x00 51 #define RMB_PBL_STATUS_REG 0x04 52 #define RMB_MBA_COMMAND_REG 0x08 53 #define RMB_MBA_STATUS_REG 0x0C 54 #define RMB_PMI_META_DATA_REG 0x10 55 #define RMB_PMI_CODE_START_REG 0x14 56 #define RMB_PMI_CODE_LENGTH_REG 0x18 57 #define RMB_MBA_MSS_STATUS 0x40 58 #define RMB_MBA_ALT_RESET 0x44 59 60 #define RMB_CMD_META_DATA_READY 0x1 61 #define RMB_CMD_LOAD_READY 0x2 62 63 /* QDSP6SS Register Offsets */ 64 #define QDSP6SS_RESET_REG 0x014 65 #define QDSP6SS_GFMUX_CTL_REG 0x020 66 #define QDSP6SS_PWR_CTL_REG 0x030 67 #define QDSP6SS_MEM_PWR_CTL 0x0B0 68 #define QDSP6V6SS_MEM_PWR_CTL 0x034 69 #define QDSP6SS_STRAP_ACC 0x110 70 71 /* AXI Halt Register Offsets */ 72 #define AXI_HALTREQ_REG 0x0 73 #define AXI_HALTACK_REG 0x4 74 #define AXI_IDLE_REG 0x8 75 #define AXI_GATING_VALID_OVERRIDE BIT(0) 76 77 #define HALT_ACK_TIMEOUT_US 100000 78 79 /* QDSP6SS_RESET */ 80 #define Q6SS_STOP_CORE BIT(0) 81 #define Q6SS_CORE_ARES BIT(1) 82 #define Q6SS_BUS_ARES_ENABLE BIT(2) 83 84 /* QDSP6SS CBCR */ 85 #define Q6SS_CBCR_CLKEN BIT(0) 86 #define Q6SS_CBCR_CLKOFF BIT(31) 87 #define Q6SS_CBCR_TIMEOUT_US 200 88 89 /* QDSP6SS_GFMUX_CTL */ 90 #define Q6SS_CLK_ENABLE BIT(1) 91 92 /* QDSP6SS_PWR_CTL */ 93 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 94 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 95 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 96 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 97 #define Q6SS_ETB_SLP_NRET_N BIT(17) 98 #define Q6SS_L2DATA_STBY_N BIT(18) 99 #define Q6SS_SLP_RET_N BIT(19) 100 #define Q6SS_CLAMP_IO BIT(20) 101 #define QDSS_BHS_ON BIT(21) 102 #define QDSS_LDO_BYP BIT(22) 103 104 /* QDSP6v56 parameters */ 105 #define QDSP6v56_LDO_BYP BIT(25) 106 #define QDSP6v56_BHS_ON BIT(24) 107 #define QDSP6v56_CLAMP_WL BIT(21) 108 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 109 #define QDSP6SS_XO_CBCR 0x0038 110 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 111 112 /* QDSP6v65 parameters */ 113 #define QDSP6SS_CORE_CBCR 0x20 114 #define QDSP6SS_SLEEP 0x3C 115 #define QDSP6SS_BOOT_CORE_START 0x400 116 #define QDSP6SS_BOOT_CMD 0x404 117 #define BOOT_FSM_TIMEOUT 10000 118 119 struct reg_info { 120 struct regulator *reg; 121 int uV; 122 int uA; 123 }; 124 125 struct qcom_mss_reg_res { 126 const char *supply; 127 int uV; 128 int uA; 129 }; 130 131 struct rproc_hexagon_res { 132 const char *hexagon_mba_image; 133 struct qcom_mss_reg_res *proxy_supply; 134 struct qcom_mss_reg_res *active_supply; 135 char **proxy_clk_names; 136 char **reset_clk_names; 137 char **active_clk_names; 138 char **active_pd_names; 139 char **proxy_pd_names; 140 int version; 141 bool need_mem_protection; 142 bool has_alt_reset; 143 bool has_mba_logs; 144 bool has_spare_reg; 145 }; 146 147 struct q6v5 { 148 struct device *dev; 149 struct rproc *rproc; 150 151 void __iomem *reg_base; 152 void __iomem *rmb_base; 153 154 struct regmap *halt_map; 155 struct regmap *conn_map; 156 157 u32 halt_q6; 158 u32 halt_modem; 159 u32 halt_nc; 160 u32 conn_box; 161 162 struct reset_control *mss_restart; 163 struct reset_control *pdc_reset; 164 165 struct qcom_q6v5 q6v5; 166 167 struct clk *active_clks[8]; 168 struct clk *reset_clks[4]; 169 struct clk *proxy_clks[4]; 170 struct device *active_pds[1]; 171 struct device *proxy_pds[3]; 172 int active_clk_count; 173 int reset_clk_count; 174 int proxy_clk_count; 175 int active_pd_count; 176 int proxy_pd_count; 177 178 struct reg_info active_regs[1]; 179 struct reg_info proxy_regs[3]; 180 int active_reg_count; 181 int proxy_reg_count; 182 183 bool dump_mba_loaded; 184 size_t current_dump_size; 185 size_t total_dump_size; 186 187 phys_addr_t mba_phys; 188 void *mba_region; 189 size_t mba_size; 190 size_t dp_size; 191 192 phys_addr_t mpss_phys; 193 phys_addr_t mpss_reloc; 194 size_t mpss_size; 195 196 struct qcom_rproc_glink glink_subdev; 197 struct qcom_rproc_subdev smd_subdev; 198 struct qcom_rproc_ssr ssr_subdev; 199 struct qcom_sysmon *sysmon; 200 bool need_mem_protection; 201 bool has_alt_reset; 202 bool has_mba_logs; 203 bool has_spare_reg; 204 int mpss_perm; 205 int mba_perm; 206 const char *hexagon_mdt_image; 207 int version; 208 }; 209 210 enum { 211 MSS_MSM8916, 212 MSS_MSM8974, 213 MSS_MSM8996, 214 MSS_MSM8998, 215 MSS_SC7180, 216 MSS_SDM845, 217 }; 218 219 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 220 const struct qcom_mss_reg_res *reg_res) 221 { 222 int rc; 223 int i; 224 225 if (!reg_res) 226 return 0; 227 228 for (i = 0; reg_res[i].supply; i++) { 229 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 230 if (IS_ERR(regs[i].reg)) { 231 rc = PTR_ERR(regs[i].reg); 232 if (rc != -EPROBE_DEFER) 233 dev_err(dev, "Failed to get %s\n regulator", 234 reg_res[i].supply); 235 return rc; 236 } 237 238 regs[i].uV = reg_res[i].uV; 239 regs[i].uA = reg_res[i].uA; 240 } 241 242 return i; 243 } 244 245 static int q6v5_regulator_enable(struct q6v5 *qproc, 246 struct reg_info *regs, int count) 247 { 248 int ret; 249 int i; 250 251 for (i = 0; i < count; i++) { 252 if (regs[i].uV > 0) { 253 ret = regulator_set_voltage(regs[i].reg, 254 regs[i].uV, INT_MAX); 255 if (ret) { 256 dev_err(qproc->dev, 257 "Failed to request voltage for %d.\n", 258 i); 259 goto err; 260 } 261 } 262 263 if (regs[i].uA > 0) { 264 ret = regulator_set_load(regs[i].reg, 265 regs[i].uA); 266 if (ret < 0) { 267 dev_err(qproc->dev, 268 "Failed to set regulator mode\n"); 269 goto err; 270 } 271 } 272 273 ret = regulator_enable(regs[i].reg); 274 if (ret) { 275 dev_err(qproc->dev, "Regulator enable failed\n"); 276 goto err; 277 } 278 } 279 280 return 0; 281 err: 282 for (; i >= 0; i--) { 283 if (regs[i].uV > 0) 284 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 285 286 if (regs[i].uA > 0) 287 regulator_set_load(regs[i].reg, 0); 288 289 regulator_disable(regs[i].reg); 290 } 291 292 return ret; 293 } 294 295 static void q6v5_regulator_disable(struct q6v5 *qproc, 296 struct reg_info *regs, int count) 297 { 298 int i; 299 300 for (i = 0; i < count; i++) { 301 if (regs[i].uV > 0) 302 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 303 304 if (regs[i].uA > 0) 305 regulator_set_load(regs[i].reg, 0); 306 307 regulator_disable(regs[i].reg); 308 } 309 } 310 311 static int q6v5_clk_enable(struct device *dev, 312 struct clk **clks, int count) 313 { 314 int rc; 315 int i; 316 317 for (i = 0; i < count; i++) { 318 rc = clk_prepare_enable(clks[i]); 319 if (rc) { 320 dev_err(dev, "Clock enable failed\n"); 321 goto err; 322 } 323 } 324 325 return 0; 326 err: 327 for (i--; i >= 0; i--) 328 clk_disable_unprepare(clks[i]); 329 330 return rc; 331 } 332 333 static void q6v5_clk_disable(struct device *dev, 334 struct clk **clks, int count) 335 { 336 int i; 337 338 for (i = 0; i < count; i++) 339 clk_disable_unprepare(clks[i]); 340 } 341 342 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 343 size_t pd_count) 344 { 345 int ret; 346 int i; 347 348 for (i = 0; i < pd_count; i++) { 349 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 350 ret = pm_runtime_get_sync(pds[i]); 351 if (ret < 0) 352 goto unroll_pd_votes; 353 } 354 355 return 0; 356 357 unroll_pd_votes: 358 for (i--; i >= 0; i--) { 359 dev_pm_genpd_set_performance_state(pds[i], 0); 360 pm_runtime_put(pds[i]); 361 } 362 363 return ret; 364 } 365 366 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 367 size_t pd_count) 368 { 369 int i; 370 371 for (i = 0; i < pd_count; i++) { 372 dev_pm_genpd_set_performance_state(pds[i], 0); 373 pm_runtime_put(pds[i]); 374 } 375 } 376 377 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 378 bool local, bool remote, phys_addr_t addr, 379 size_t size) 380 { 381 struct qcom_scm_vmperm next[2]; 382 int perms = 0; 383 384 if (!qproc->need_mem_protection) 385 return 0; 386 387 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 388 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 389 return 0; 390 391 if (local) { 392 next[perms].vmid = QCOM_SCM_VMID_HLOS; 393 next[perms].perm = QCOM_SCM_PERM_RWX; 394 perms++; 395 } 396 397 if (remote) { 398 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 399 next[perms].perm = QCOM_SCM_PERM_RW; 400 perms++; 401 } 402 403 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 404 current_perm, next, perms); 405 } 406 407 static void q6v5_debug_policy_load(struct q6v5 *qproc) 408 { 409 const struct firmware *dp_fw; 410 411 if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) 412 return; 413 414 if (SZ_1M + dp_fw->size <= qproc->mba_size) { 415 memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size); 416 qproc->dp_size = dp_fw->size; 417 } 418 419 release_firmware(dp_fw); 420 } 421 422 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 423 { 424 struct q6v5 *qproc = rproc->priv; 425 426 /* MBA is restricted to a maximum size of 1M */ 427 if (fw->size > qproc->mba_size || fw->size > SZ_1M) { 428 dev_err(qproc->dev, "MBA firmware load failed\n"); 429 return -EINVAL; 430 } 431 432 memcpy(qproc->mba_region, fw->data, fw->size); 433 q6v5_debug_policy_load(qproc); 434 435 return 0; 436 } 437 438 static int q6v5_reset_assert(struct q6v5 *qproc) 439 { 440 int ret; 441 442 if (qproc->has_alt_reset) { 443 reset_control_assert(qproc->pdc_reset); 444 ret = reset_control_reset(qproc->mss_restart); 445 reset_control_deassert(qproc->pdc_reset); 446 } else if (qproc->has_spare_reg) { 447 /* 448 * When the AXI pipeline is being reset with the Q6 modem partly 449 * operational there is possibility of AXI valid signal to 450 * glitch, leading to spurious transactions and Q6 hangs. A work 451 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 452 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 453 * is withdrawn post MSS assert followed by a MSS deassert, 454 * while holding the PDC reset. 455 */ 456 reset_control_assert(qproc->pdc_reset); 457 regmap_update_bits(qproc->conn_map, qproc->conn_box, 458 AXI_GATING_VALID_OVERRIDE, 1); 459 reset_control_assert(qproc->mss_restart); 460 reset_control_deassert(qproc->pdc_reset); 461 regmap_update_bits(qproc->conn_map, qproc->conn_box, 462 AXI_GATING_VALID_OVERRIDE, 0); 463 ret = reset_control_deassert(qproc->mss_restart); 464 } else { 465 ret = reset_control_assert(qproc->mss_restart); 466 } 467 468 return ret; 469 } 470 471 static int q6v5_reset_deassert(struct q6v5 *qproc) 472 { 473 int ret; 474 475 if (qproc->has_alt_reset) { 476 reset_control_assert(qproc->pdc_reset); 477 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 478 ret = reset_control_reset(qproc->mss_restart); 479 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 480 reset_control_deassert(qproc->pdc_reset); 481 } else if (qproc->has_spare_reg) { 482 ret = reset_control_reset(qproc->mss_restart); 483 } else { 484 ret = reset_control_deassert(qproc->mss_restart); 485 } 486 487 return ret; 488 } 489 490 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 491 { 492 unsigned long timeout; 493 s32 val; 494 495 timeout = jiffies + msecs_to_jiffies(ms); 496 for (;;) { 497 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 498 if (val) 499 break; 500 501 if (time_after(jiffies, timeout)) 502 return -ETIMEDOUT; 503 504 msleep(1); 505 } 506 507 return val; 508 } 509 510 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 511 { 512 513 unsigned long timeout; 514 s32 val; 515 516 timeout = jiffies + msecs_to_jiffies(ms); 517 for (;;) { 518 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 519 if (val < 0) 520 break; 521 522 if (!status && val) 523 break; 524 else if (status && val == status) 525 break; 526 527 if (time_after(jiffies, timeout)) 528 return -ETIMEDOUT; 529 530 msleep(1); 531 } 532 533 return val; 534 } 535 536 static void q6v5_dump_mba_logs(struct q6v5 *qproc) 537 { 538 struct rproc *rproc = qproc->rproc; 539 void *data; 540 541 if (!qproc->has_mba_logs) 542 return; 543 544 if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, 545 qproc->mba_size)) 546 return; 547 548 data = vmalloc(MBA_LOG_SIZE); 549 if (!data) 550 return; 551 552 memcpy(data, qproc->mba_region, MBA_LOG_SIZE); 553 dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); 554 } 555 556 static int q6v5proc_reset(struct q6v5 *qproc) 557 { 558 u32 val; 559 int ret; 560 int i; 561 562 if (qproc->version == MSS_SDM845) { 563 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 564 val |= Q6SS_CBCR_CLKEN; 565 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 566 567 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 568 val, !(val & Q6SS_CBCR_CLKOFF), 1, 569 Q6SS_CBCR_TIMEOUT_US); 570 if (ret) { 571 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 572 return -ETIMEDOUT; 573 } 574 575 /* De-assert QDSP6 stop core */ 576 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 577 /* Trigger boot FSM */ 578 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 579 580 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 581 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 582 if (ret) { 583 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 584 /* Reset the modem so that boot FSM is in reset state */ 585 q6v5_reset_deassert(qproc); 586 return ret; 587 } 588 589 goto pbl_wait; 590 } else if (qproc->version == MSS_SC7180) { 591 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 592 val |= Q6SS_CBCR_CLKEN; 593 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 594 595 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 596 val, !(val & Q6SS_CBCR_CLKOFF), 1, 597 Q6SS_CBCR_TIMEOUT_US); 598 if (ret) { 599 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 600 return -ETIMEDOUT; 601 } 602 603 /* Turn on the XO clock needed for PLL setup */ 604 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 605 val |= Q6SS_CBCR_CLKEN; 606 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 607 608 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 609 val, !(val & Q6SS_CBCR_CLKOFF), 1, 610 Q6SS_CBCR_TIMEOUT_US); 611 if (ret) { 612 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 613 return -ETIMEDOUT; 614 } 615 616 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 617 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 618 val |= Q6SS_CBCR_CLKEN; 619 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 620 621 /* De-assert the Q6 stop core signal */ 622 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 623 624 /* Wait for 10 us for any staggering logic to settle */ 625 usleep_range(10, 20); 626 627 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 628 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 629 630 /* Poll the MSS_STATUS for FSM completion */ 631 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 632 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 633 if (ret) { 634 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 635 /* Reset the modem so that boot FSM is in reset state */ 636 q6v5_reset_deassert(qproc); 637 return ret; 638 } 639 goto pbl_wait; 640 } else if (qproc->version == MSS_MSM8996 || 641 qproc->version == MSS_MSM8998) { 642 int mem_pwr_ctl; 643 644 /* Override the ACC value if required */ 645 writel(QDSP6SS_ACC_OVERRIDE_VAL, 646 qproc->reg_base + QDSP6SS_STRAP_ACC); 647 648 /* Assert resets, stop core */ 649 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 650 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 651 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 652 653 /* BHS require xo cbcr to be enabled */ 654 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 655 val |= Q6SS_CBCR_CLKEN; 656 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 657 658 /* Read CLKOFF bit to go low indicating CLK is enabled */ 659 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 660 val, !(val & Q6SS_CBCR_CLKOFF), 1, 661 Q6SS_CBCR_TIMEOUT_US); 662 if (ret) { 663 dev_err(qproc->dev, 664 "xo cbcr enabling timed out (rc:%d)\n", ret); 665 return ret; 666 } 667 /* Enable power block headswitch and wait for it to stabilize */ 668 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 669 val |= QDSP6v56_BHS_ON; 670 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 671 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 672 udelay(1); 673 674 /* Put LDO in bypass mode */ 675 val |= QDSP6v56_LDO_BYP; 676 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 677 678 /* Deassert QDSP6 compiler memory clamp */ 679 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 680 val &= ~QDSP6v56_CLAMP_QMC_MEM; 681 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 682 683 /* Deassert memory peripheral sleep and L2 memory standby */ 684 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 685 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 686 687 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 688 if (qproc->version == MSS_MSM8996) { 689 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 690 i = 19; 691 } else { 692 /* MSS_MSM8998 */ 693 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 694 i = 28; 695 } 696 val = readl(qproc->reg_base + mem_pwr_ctl); 697 for (; i >= 0; i--) { 698 val |= BIT(i); 699 writel(val, qproc->reg_base + mem_pwr_ctl); 700 /* 701 * Read back value to ensure the write is done then 702 * wait for 1us for both memory peripheral and data 703 * array to turn on. 704 */ 705 val |= readl(qproc->reg_base + mem_pwr_ctl); 706 udelay(1); 707 } 708 /* Remove word line clamp */ 709 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 710 val &= ~QDSP6v56_CLAMP_WL; 711 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 712 } else { 713 /* Assert resets, stop core */ 714 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 715 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 716 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 717 718 /* Enable power block headswitch and wait for it to stabilize */ 719 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 720 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 721 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 722 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 723 udelay(1); 724 /* 725 * Turn on memories. L2 banks should be done individually 726 * to minimize inrush current. 727 */ 728 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 729 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 730 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 731 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 732 val |= Q6SS_L2DATA_SLP_NRET_N_2; 733 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 734 val |= Q6SS_L2DATA_SLP_NRET_N_1; 735 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 736 val |= Q6SS_L2DATA_SLP_NRET_N_0; 737 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 738 } 739 /* Remove IO clamp */ 740 val &= ~Q6SS_CLAMP_IO; 741 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 742 743 /* Bring core out of reset */ 744 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 745 val &= ~Q6SS_CORE_ARES; 746 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 747 748 /* Turn on core clock */ 749 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 750 val |= Q6SS_CLK_ENABLE; 751 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 752 753 /* Start core execution */ 754 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 755 val &= ~Q6SS_STOP_CORE; 756 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 757 758 pbl_wait: 759 /* Wait for PBL status */ 760 ret = q6v5_rmb_pbl_wait(qproc, 1000); 761 if (ret == -ETIMEDOUT) { 762 dev_err(qproc->dev, "PBL boot timed out\n"); 763 } else if (ret != RMB_PBL_SUCCESS) { 764 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 765 ret = -EINVAL; 766 } else { 767 ret = 0; 768 } 769 770 return ret; 771 } 772 773 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 774 struct regmap *halt_map, 775 u32 offset) 776 { 777 unsigned int val; 778 int ret; 779 780 /* Check if we're already idle */ 781 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 782 if (!ret && val) 783 return; 784 785 /* Assert halt request */ 786 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 787 788 /* Wait for halt */ 789 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 790 val, 1000, HALT_ACK_TIMEOUT_US); 791 792 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 793 if (ret || !val) 794 dev_err(qproc->dev, "port failed halt\n"); 795 796 /* Clear halt request (port will remain halted until reset) */ 797 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 798 } 799 800 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 801 { 802 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 803 dma_addr_t phys; 804 void *metadata; 805 int mdata_perm; 806 int xferop_ret; 807 size_t size; 808 void *ptr; 809 int ret; 810 811 metadata = qcom_mdt_read_metadata(fw, &size); 812 if (IS_ERR(metadata)) 813 return PTR_ERR(metadata); 814 815 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 816 if (!ptr) { 817 kfree(metadata); 818 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 819 return -ENOMEM; 820 } 821 822 memcpy(ptr, metadata, size); 823 824 /* Hypervisor mapping to access metadata by modem */ 825 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 826 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 827 phys, size); 828 if (ret) { 829 dev_err(qproc->dev, 830 "assigning Q6 access to metadata failed: %d\n", ret); 831 ret = -EAGAIN; 832 goto free_dma_attrs; 833 } 834 835 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 836 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 837 838 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 839 if (ret == -ETIMEDOUT) 840 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 841 else if (ret < 0) 842 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 843 844 /* Metadata authentication done, remove modem access */ 845 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 846 phys, size); 847 if (xferop_ret) 848 dev_warn(qproc->dev, 849 "mdt buffer not reclaimed system may become unstable\n"); 850 851 free_dma_attrs: 852 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 853 kfree(metadata); 854 855 return ret < 0 ? ret : 0; 856 } 857 858 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 859 { 860 if (phdr->p_type != PT_LOAD) 861 return false; 862 863 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 864 return false; 865 866 if (!phdr->p_memsz) 867 return false; 868 869 return true; 870 } 871 872 static int q6v5_mba_load(struct q6v5 *qproc) 873 { 874 int ret; 875 int xfermemop_ret; 876 bool mba_load_err = false; 877 878 qcom_q6v5_prepare(&qproc->q6v5); 879 880 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 881 if (ret < 0) { 882 dev_err(qproc->dev, "failed to enable active power domains\n"); 883 goto disable_irqs; 884 } 885 886 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 887 if (ret < 0) { 888 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 889 goto disable_active_pds; 890 } 891 892 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 893 qproc->proxy_reg_count); 894 if (ret) { 895 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 896 goto disable_proxy_pds; 897 } 898 899 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 900 qproc->proxy_clk_count); 901 if (ret) { 902 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 903 goto disable_proxy_reg; 904 } 905 906 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 907 qproc->active_reg_count); 908 if (ret) { 909 dev_err(qproc->dev, "failed to enable supplies\n"); 910 goto disable_proxy_clk; 911 } 912 913 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 914 qproc->reset_clk_count); 915 if (ret) { 916 dev_err(qproc->dev, "failed to enable reset clocks\n"); 917 goto disable_vdd; 918 } 919 920 ret = q6v5_reset_deassert(qproc); 921 if (ret) { 922 dev_err(qproc->dev, "failed to deassert mss restart\n"); 923 goto disable_reset_clks; 924 } 925 926 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 927 qproc->active_clk_count); 928 if (ret) { 929 dev_err(qproc->dev, "failed to enable clocks\n"); 930 goto assert_reset; 931 } 932 933 /* Assign MBA image access in DDR to q6 */ 934 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 935 qproc->mba_phys, qproc->mba_size); 936 if (ret) { 937 dev_err(qproc->dev, 938 "assigning Q6 access to mba memory failed: %d\n", ret); 939 goto disable_active_clks; 940 } 941 942 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 943 if (qproc->dp_size) { 944 writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); 945 writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 946 } 947 948 ret = q6v5proc_reset(qproc); 949 if (ret) 950 goto reclaim_mba; 951 952 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 953 if (ret == -ETIMEDOUT) { 954 dev_err(qproc->dev, "MBA boot timed out\n"); 955 goto halt_axi_ports; 956 } else if (ret != RMB_MBA_XPU_UNLOCKED && 957 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 958 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 959 ret = -EINVAL; 960 goto halt_axi_ports; 961 } 962 963 qproc->dump_mba_loaded = true; 964 return 0; 965 966 halt_axi_ports: 967 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 968 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 969 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 970 mba_load_err = true; 971 reclaim_mba: 972 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 973 false, qproc->mba_phys, 974 qproc->mba_size); 975 if (xfermemop_ret) { 976 dev_err(qproc->dev, 977 "Failed to reclaim mba buffer, system may become unstable\n"); 978 } else if (mba_load_err) { 979 q6v5_dump_mba_logs(qproc); 980 } 981 982 disable_active_clks: 983 q6v5_clk_disable(qproc->dev, qproc->active_clks, 984 qproc->active_clk_count); 985 assert_reset: 986 q6v5_reset_assert(qproc); 987 disable_reset_clks: 988 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 989 qproc->reset_clk_count); 990 disable_vdd: 991 q6v5_regulator_disable(qproc, qproc->active_regs, 992 qproc->active_reg_count); 993 disable_proxy_clk: 994 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 995 qproc->proxy_clk_count); 996 disable_proxy_reg: 997 q6v5_regulator_disable(qproc, qproc->proxy_regs, 998 qproc->proxy_reg_count); 999 disable_proxy_pds: 1000 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1001 disable_active_pds: 1002 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1003 disable_irqs: 1004 qcom_q6v5_unprepare(&qproc->q6v5); 1005 1006 return ret; 1007 } 1008 1009 static void q6v5_mba_reclaim(struct q6v5 *qproc) 1010 { 1011 int ret; 1012 u32 val; 1013 1014 qproc->dump_mba_loaded = false; 1015 qproc->dp_size = 0; 1016 1017 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1018 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1019 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1020 if (qproc->version == MSS_MSM8996) { 1021 /* 1022 * To avoid high MX current during LPASS/MSS restart. 1023 */ 1024 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1025 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1026 QDSP6v56_CLAMP_QMC_MEM; 1027 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1028 } 1029 1030 q6v5_reset_assert(qproc); 1031 1032 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1033 qproc->reset_clk_count); 1034 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1035 qproc->active_clk_count); 1036 q6v5_regulator_disable(qproc, qproc->active_regs, 1037 qproc->active_reg_count); 1038 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1039 1040 /* In case of failure or coredump scenario where reclaiming MBA memory 1041 * could not happen reclaim it here. 1042 */ 1043 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1044 qproc->mba_phys, 1045 qproc->mba_size); 1046 WARN_ON(ret); 1047 1048 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1049 if (ret) { 1050 q6v5_pds_disable(qproc, qproc->proxy_pds, 1051 qproc->proxy_pd_count); 1052 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1053 qproc->proxy_clk_count); 1054 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1055 qproc->proxy_reg_count); 1056 } 1057 } 1058 1059 static int q6v5_reload_mba(struct rproc *rproc) 1060 { 1061 struct q6v5 *qproc = rproc->priv; 1062 const struct firmware *fw; 1063 int ret; 1064 1065 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1066 if (ret < 0) 1067 return ret; 1068 1069 q6v5_load(rproc, fw); 1070 ret = q6v5_mba_load(qproc); 1071 release_firmware(fw); 1072 1073 return ret; 1074 } 1075 1076 static int q6v5_mpss_load(struct q6v5 *qproc) 1077 { 1078 const struct elf32_phdr *phdrs; 1079 const struct elf32_phdr *phdr; 1080 const struct firmware *seg_fw; 1081 const struct firmware *fw; 1082 struct elf32_hdr *ehdr; 1083 phys_addr_t mpss_reloc; 1084 phys_addr_t boot_addr; 1085 phys_addr_t min_addr = PHYS_ADDR_MAX; 1086 phys_addr_t max_addr = 0; 1087 u32 code_length; 1088 bool relocate = false; 1089 char *fw_name; 1090 size_t fw_name_len; 1091 ssize_t offset; 1092 size_t size = 0; 1093 void *ptr; 1094 int ret; 1095 int i; 1096 1097 fw_name_len = strlen(qproc->hexagon_mdt_image); 1098 if (fw_name_len <= 4) 1099 return -EINVAL; 1100 1101 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1102 if (!fw_name) 1103 return -ENOMEM; 1104 1105 ret = request_firmware(&fw, fw_name, qproc->dev); 1106 if (ret < 0) { 1107 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1108 goto out; 1109 } 1110 1111 /* Initialize the RMB validator */ 1112 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1113 1114 ret = q6v5_mpss_init_image(qproc, fw); 1115 if (ret) 1116 goto release_firmware; 1117 1118 ehdr = (struct elf32_hdr *)fw->data; 1119 phdrs = (struct elf32_phdr *)(ehdr + 1); 1120 1121 for (i = 0; i < ehdr->e_phnum; i++) { 1122 phdr = &phdrs[i]; 1123 1124 if (!q6v5_phdr_valid(phdr)) 1125 continue; 1126 1127 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1128 relocate = true; 1129 1130 if (phdr->p_paddr < min_addr) 1131 min_addr = phdr->p_paddr; 1132 1133 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1134 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1135 } 1136 1137 /** 1138 * In case of a modem subsystem restart on secure devices, the modem 1139 * memory can be reclaimed only after MBA is loaded. For modem cold 1140 * boot this will be a nop 1141 */ 1142 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1143 qproc->mpss_phys, qproc->mpss_size); 1144 1145 /* Share ownership between Linux and MSS, during segment loading */ 1146 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1147 qproc->mpss_phys, qproc->mpss_size); 1148 if (ret) { 1149 dev_err(qproc->dev, 1150 "assigning Q6 access to mpss memory failed: %d\n", ret); 1151 ret = -EAGAIN; 1152 goto release_firmware; 1153 } 1154 1155 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1156 qproc->mpss_reloc = mpss_reloc; 1157 /* Load firmware segments */ 1158 for (i = 0; i < ehdr->e_phnum; i++) { 1159 phdr = &phdrs[i]; 1160 1161 if (!q6v5_phdr_valid(phdr)) 1162 continue; 1163 1164 offset = phdr->p_paddr - mpss_reloc; 1165 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1166 dev_err(qproc->dev, "segment outside memory range\n"); 1167 ret = -EINVAL; 1168 goto release_firmware; 1169 } 1170 1171 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); 1172 if (!ptr) { 1173 dev_err(qproc->dev, 1174 "unable to map memory region: %pa+%zx-%x\n", 1175 &qproc->mpss_phys, offset, phdr->p_memsz); 1176 goto release_firmware; 1177 } 1178 1179 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1180 /* Firmware is large enough to be non-split */ 1181 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1182 dev_err(qproc->dev, 1183 "failed to load segment %d from truncated file %s\n", 1184 i, fw_name); 1185 ret = -EINVAL; 1186 iounmap(ptr); 1187 goto release_firmware; 1188 } 1189 1190 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1191 } else if (phdr->p_filesz) { 1192 /* Replace "xxx.xxx" with "xxx.bxx" */ 1193 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1194 ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, 1195 ptr, phdr->p_filesz); 1196 if (ret) { 1197 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1198 iounmap(ptr); 1199 goto release_firmware; 1200 } 1201 1202 release_firmware(seg_fw); 1203 } 1204 1205 if (phdr->p_memsz > phdr->p_filesz) { 1206 memset(ptr + phdr->p_filesz, 0, 1207 phdr->p_memsz - phdr->p_filesz); 1208 } 1209 iounmap(ptr); 1210 size += phdr->p_memsz; 1211 1212 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1213 if (!code_length) { 1214 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1215 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1216 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1217 } 1218 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1219 1220 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1221 if (ret < 0) { 1222 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1223 ret); 1224 goto release_firmware; 1225 } 1226 } 1227 1228 /* Transfer ownership of modem ddr region to q6 */ 1229 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1230 qproc->mpss_phys, qproc->mpss_size); 1231 if (ret) { 1232 dev_err(qproc->dev, 1233 "assigning Q6 access to mpss memory failed: %d\n", ret); 1234 ret = -EAGAIN; 1235 goto release_firmware; 1236 } 1237 1238 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1239 if (ret == -ETIMEDOUT) 1240 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1241 else if (ret < 0) 1242 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1243 1244 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1245 1246 release_firmware: 1247 release_firmware(fw); 1248 out: 1249 kfree(fw_name); 1250 1251 return ret < 0 ? ret : 0; 1252 } 1253 1254 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1255 struct rproc_dump_segment *segment, 1256 void *dest, size_t cp_offset, size_t size) 1257 { 1258 int ret = 0; 1259 struct q6v5 *qproc = rproc->priv; 1260 int offset = segment->da - qproc->mpss_reloc; 1261 void *ptr = NULL; 1262 1263 /* Unlock mba before copying segments */ 1264 if (!qproc->dump_mba_loaded) { 1265 ret = q6v5_reload_mba(rproc); 1266 if (!ret) { 1267 /* Reset ownership back to Linux to copy segments */ 1268 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1269 true, false, 1270 qproc->mpss_phys, 1271 qproc->mpss_size); 1272 } 1273 } 1274 1275 if (!ret) 1276 ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); 1277 1278 if (ptr) { 1279 memcpy(dest, ptr, size); 1280 iounmap(ptr); 1281 } else { 1282 memset(dest, 0xff, size); 1283 } 1284 1285 qproc->current_dump_size += size; 1286 1287 /* Reclaim mba after copying segments */ 1288 if (qproc->current_dump_size == qproc->total_dump_size) { 1289 if (qproc->dump_mba_loaded) { 1290 /* Try to reset ownership back to Q6 */ 1291 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1292 false, true, 1293 qproc->mpss_phys, 1294 qproc->mpss_size); 1295 q6v5_mba_reclaim(qproc); 1296 } 1297 } 1298 } 1299 1300 static int q6v5_start(struct rproc *rproc) 1301 { 1302 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1303 int xfermemop_ret; 1304 int ret; 1305 1306 ret = q6v5_mba_load(qproc); 1307 if (ret) 1308 return ret; 1309 1310 dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", 1311 qproc->dp_size ? "" : "out"); 1312 1313 ret = q6v5_mpss_load(qproc); 1314 if (ret) 1315 goto reclaim_mpss; 1316 1317 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1318 if (ret == -ETIMEDOUT) { 1319 dev_err(qproc->dev, "start timed out\n"); 1320 goto reclaim_mpss; 1321 } 1322 1323 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1324 false, qproc->mba_phys, 1325 qproc->mba_size); 1326 if (xfermemop_ret) 1327 dev_err(qproc->dev, 1328 "Failed to reclaim mba buffer system may become unstable\n"); 1329 1330 /* Reset Dump Segment Mask */ 1331 qproc->current_dump_size = 0; 1332 1333 return 0; 1334 1335 reclaim_mpss: 1336 q6v5_mba_reclaim(qproc); 1337 q6v5_dump_mba_logs(qproc); 1338 1339 return ret; 1340 } 1341 1342 static int q6v5_stop(struct rproc *rproc) 1343 { 1344 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1345 int ret; 1346 1347 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1348 if (ret == -ETIMEDOUT) 1349 dev_err(qproc->dev, "timed out on wait\n"); 1350 1351 q6v5_mba_reclaim(qproc); 1352 1353 return 0; 1354 } 1355 1356 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1357 const struct firmware *mba_fw) 1358 { 1359 const struct firmware *fw; 1360 const struct elf32_phdr *phdrs; 1361 const struct elf32_phdr *phdr; 1362 const struct elf32_hdr *ehdr; 1363 struct q6v5 *qproc = rproc->priv; 1364 unsigned long i; 1365 int ret; 1366 1367 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1368 if (ret < 0) { 1369 dev_err(qproc->dev, "unable to load %s\n", 1370 qproc->hexagon_mdt_image); 1371 return ret; 1372 } 1373 1374 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1375 1376 ehdr = (struct elf32_hdr *)fw->data; 1377 phdrs = (struct elf32_phdr *)(ehdr + 1); 1378 qproc->total_dump_size = 0; 1379 1380 for (i = 0; i < ehdr->e_phnum; i++) { 1381 phdr = &phdrs[i]; 1382 1383 if (!q6v5_phdr_valid(phdr)) 1384 continue; 1385 1386 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1387 phdr->p_memsz, 1388 qcom_q6v5_dump_segment, 1389 NULL); 1390 if (ret) 1391 break; 1392 1393 qproc->total_dump_size += phdr->p_memsz; 1394 } 1395 1396 release_firmware(fw); 1397 return ret; 1398 } 1399 1400 static const struct rproc_ops q6v5_ops = { 1401 .start = q6v5_start, 1402 .stop = q6v5_stop, 1403 .parse_fw = qcom_q6v5_register_dump_segments, 1404 .load = q6v5_load, 1405 }; 1406 1407 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1408 { 1409 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1410 1411 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1412 qproc->proxy_clk_count); 1413 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1414 qproc->proxy_reg_count); 1415 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1416 } 1417 1418 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1419 { 1420 struct of_phandle_args args; 1421 struct resource *res; 1422 int ret; 1423 1424 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1425 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1426 if (IS_ERR(qproc->reg_base)) 1427 return PTR_ERR(qproc->reg_base); 1428 1429 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1430 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1431 if (IS_ERR(qproc->rmb_base)) 1432 return PTR_ERR(qproc->rmb_base); 1433 1434 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1435 "qcom,halt-regs", 3, 0, &args); 1436 if (ret < 0) { 1437 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1438 return -EINVAL; 1439 } 1440 1441 qproc->halt_map = syscon_node_to_regmap(args.np); 1442 of_node_put(args.np); 1443 if (IS_ERR(qproc->halt_map)) 1444 return PTR_ERR(qproc->halt_map); 1445 1446 qproc->halt_q6 = args.args[0]; 1447 qproc->halt_modem = args.args[1]; 1448 qproc->halt_nc = args.args[2]; 1449 1450 if (qproc->has_spare_reg) { 1451 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1452 "qcom,spare-regs", 1453 1, 0, &args); 1454 if (ret < 0) { 1455 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1456 return -EINVAL; 1457 } 1458 1459 qproc->conn_map = syscon_node_to_regmap(args.np); 1460 of_node_put(args.np); 1461 if (IS_ERR(qproc->conn_map)) 1462 return PTR_ERR(qproc->conn_map); 1463 1464 qproc->conn_box = args.args[0]; 1465 } 1466 1467 return 0; 1468 } 1469 1470 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1471 char **clk_names) 1472 { 1473 int i; 1474 1475 if (!clk_names) 1476 return 0; 1477 1478 for (i = 0; clk_names[i]; i++) { 1479 clks[i] = devm_clk_get(dev, clk_names[i]); 1480 if (IS_ERR(clks[i])) { 1481 int rc = PTR_ERR(clks[i]); 1482 1483 if (rc != -EPROBE_DEFER) 1484 dev_err(dev, "Failed to get %s clock\n", 1485 clk_names[i]); 1486 return rc; 1487 } 1488 } 1489 1490 return i; 1491 } 1492 1493 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1494 char **pd_names) 1495 { 1496 size_t num_pds = 0; 1497 int ret; 1498 int i; 1499 1500 if (!pd_names) 1501 return 0; 1502 1503 while (pd_names[num_pds]) 1504 num_pds++; 1505 1506 for (i = 0; i < num_pds; i++) { 1507 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1508 if (IS_ERR_OR_NULL(devs[i])) { 1509 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1510 goto unroll_attach; 1511 } 1512 } 1513 1514 return num_pds; 1515 1516 unroll_attach: 1517 for (i--; i >= 0; i--) 1518 dev_pm_domain_detach(devs[i], false); 1519 1520 return ret; 1521 } 1522 1523 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1524 size_t pd_count) 1525 { 1526 int i; 1527 1528 for (i = 0; i < pd_count; i++) 1529 dev_pm_domain_detach(pds[i], false); 1530 } 1531 1532 static int q6v5_init_reset(struct q6v5 *qproc) 1533 { 1534 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1535 "mss_restart"); 1536 if (IS_ERR(qproc->mss_restart)) { 1537 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1538 return PTR_ERR(qproc->mss_restart); 1539 } 1540 1541 if (qproc->has_alt_reset || qproc->has_spare_reg) { 1542 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1543 "pdc_reset"); 1544 if (IS_ERR(qproc->pdc_reset)) { 1545 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1546 return PTR_ERR(qproc->pdc_reset); 1547 } 1548 } 1549 1550 return 0; 1551 } 1552 1553 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1554 { 1555 struct device_node *child; 1556 struct device_node *node; 1557 struct resource r; 1558 int ret; 1559 1560 /* 1561 * In the absence of mba/mpss sub-child, extract the mba and mpss 1562 * reserved memory regions from device's memory-region property. 1563 */ 1564 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1565 if (!child) 1566 node = of_parse_phandle(qproc->dev->of_node, 1567 "memory-region", 0); 1568 else 1569 node = of_parse_phandle(child, "memory-region", 0); 1570 1571 ret = of_address_to_resource(node, 0, &r); 1572 if (ret) { 1573 dev_err(qproc->dev, "unable to resolve mba region\n"); 1574 return ret; 1575 } 1576 of_node_put(node); 1577 1578 qproc->mba_phys = r.start; 1579 qproc->mba_size = resource_size(&r); 1580 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1581 if (!qproc->mba_region) { 1582 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1583 &r.start, qproc->mba_size); 1584 return -EBUSY; 1585 } 1586 1587 if (!child) { 1588 node = of_parse_phandle(qproc->dev->of_node, 1589 "memory-region", 1); 1590 } else { 1591 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1592 node = of_parse_phandle(child, "memory-region", 0); 1593 } 1594 1595 ret = of_address_to_resource(node, 0, &r); 1596 if (ret) { 1597 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1598 return ret; 1599 } 1600 of_node_put(node); 1601 1602 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1603 qproc->mpss_size = resource_size(&r); 1604 1605 return 0; 1606 } 1607 1608 static int q6v5_probe(struct platform_device *pdev) 1609 { 1610 const struct rproc_hexagon_res *desc; 1611 struct q6v5 *qproc; 1612 struct rproc *rproc; 1613 const char *mba_image; 1614 int ret; 1615 1616 desc = of_device_get_match_data(&pdev->dev); 1617 if (!desc) 1618 return -EINVAL; 1619 1620 if (desc->need_mem_protection && !qcom_scm_is_available()) 1621 return -EPROBE_DEFER; 1622 1623 mba_image = desc->hexagon_mba_image; 1624 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1625 0, &mba_image); 1626 if (ret < 0 && ret != -EINVAL) 1627 return ret; 1628 1629 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1630 mba_image, sizeof(*qproc)); 1631 if (!rproc) { 1632 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1633 return -ENOMEM; 1634 } 1635 1636 rproc->auto_boot = false; 1637 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1638 1639 qproc = (struct q6v5 *)rproc->priv; 1640 qproc->dev = &pdev->dev; 1641 qproc->rproc = rproc; 1642 qproc->hexagon_mdt_image = "modem.mdt"; 1643 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1644 1, &qproc->hexagon_mdt_image); 1645 if (ret < 0 && ret != -EINVAL) 1646 goto free_rproc; 1647 1648 platform_set_drvdata(pdev, qproc); 1649 1650 qproc->has_spare_reg = desc->has_spare_reg; 1651 ret = q6v5_init_mem(qproc, pdev); 1652 if (ret) 1653 goto free_rproc; 1654 1655 ret = q6v5_alloc_memory_region(qproc); 1656 if (ret) 1657 goto free_rproc; 1658 1659 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1660 desc->proxy_clk_names); 1661 if (ret < 0) { 1662 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1663 goto free_rproc; 1664 } 1665 qproc->proxy_clk_count = ret; 1666 1667 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1668 desc->reset_clk_names); 1669 if (ret < 0) { 1670 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1671 goto free_rproc; 1672 } 1673 qproc->reset_clk_count = ret; 1674 1675 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1676 desc->active_clk_names); 1677 if (ret < 0) { 1678 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1679 goto free_rproc; 1680 } 1681 qproc->active_clk_count = ret; 1682 1683 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1684 desc->proxy_supply); 1685 if (ret < 0) { 1686 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1687 goto free_rproc; 1688 } 1689 qproc->proxy_reg_count = ret; 1690 1691 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1692 desc->active_supply); 1693 if (ret < 0) { 1694 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1695 goto free_rproc; 1696 } 1697 qproc->active_reg_count = ret; 1698 1699 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1700 desc->active_pd_names); 1701 if (ret < 0) { 1702 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1703 goto free_rproc; 1704 } 1705 qproc->active_pd_count = ret; 1706 1707 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1708 desc->proxy_pd_names); 1709 if (ret < 0) { 1710 dev_err(&pdev->dev, "Failed to init power domains\n"); 1711 goto detach_active_pds; 1712 } 1713 qproc->proxy_pd_count = ret; 1714 1715 qproc->has_alt_reset = desc->has_alt_reset; 1716 ret = q6v5_init_reset(qproc); 1717 if (ret) 1718 goto detach_proxy_pds; 1719 1720 qproc->version = desc->version; 1721 qproc->need_mem_protection = desc->need_mem_protection; 1722 qproc->has_mba_logs = desc->has_mba_logs; 1723 1724 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1725 qcom_msa_handover); 1726 if (ret) 1727 goto detach_proxy_pds; 1728 1729 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1730 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1731 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1732 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1733 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1734 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1735 if (IS_ERR(qproc->sysmon)) { 1736 ret = PTR_ERR(qproc->sysmon); 1737 goto remove_subdevs; 1738 } 1739 1740 ret = rproc_add(rproc); 1741 if (ret) 1742 goto remove_sysmon_subdev; 1743 1744 return 0; 1745 1746 remove_sysmon_subdev: 1747 qcom_remove_sysmon_subdev(qproc->sysmon); 1748 remove_subdevs: 1749 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1750 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1751 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1752 detach_proxy_pds: 1753 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1754 detach_active_pds: 1755 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1756 free_rproc: 1757 rproc_free(rproc); 1758 1759 return ret; 1760 } 1761 1762 static int q6v5_remove(struct platform_device *pdev) 1763 { 1764 struct q6v5 *qproc = platform_get_drvdata(pdev); 1765 struct rproc *rproc = qproc->rproc; 1766 1767 rproc_del(rproc); 1768 1769 qcom_remove_sysmon_subdev(qproc->sysmon); 1770 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1771 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1772 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1773 1774 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1775 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1776 1777 rproc_free(rproc); 1778 1779 return 0; 1780 } 1781 1782 static const struct rproc_hexagon_res sc7180_mss = { 1783 .hexagon_mba_image = "mba.mbn", 1784 .proxy_clk_names = (char*[]){ 1785 "xo", 1786 NULL 1787 }, 1788 .reset_clk_names = (char*[]){ 1789 "iface", 1790 "bus", 1791 "snoc_axi", 1792 NULL 1793 }, 1794 .active_clk_names = (char*[]){ 1795 "mnoc_axi", 1796 "nav", 1797 NULL 1798 }, 1799 .active_pd_names = (char*[]){ 1800 "load_state", 1801 NULL 1802 }, 1803 .proxy_pd_names = (char*[]){ 1804 "cx", 1805 "mx", 1806 "mss", 1807 NULL 1808 }, 1809 .need_mem_protection = true, 1810 .has_alt_reset = false, 1811 .has_mba_logs = true, 1812 .has_spare_reg = true, 1813 .version = MSS_SC7180, 1814 }; 1815 1816 static const struct rproc_hexagon_res sdm845_mss = { 1817 .hexagon_mba_image = "mba.mbn", 1818 .proxy_clk_names = (char*[]){ 1819 "xo", 1820 "prng", 1821 NULL 1822 }, 1823 .reset_clk_names = (char*[]){ 1824 "iface", 1825 "snoc_axi", 1826 NULL 1827 }, 1828 .active_clk_names = (char*[]){ 1829 "bus", 1830 "mem", 1831 "gpll0_mss", 1832 "mnoc_axi", 1833 NULL 1834 }, 1835 .active_pd_names = (char*[]){ 1836 "load_state", 1837 NULL 1838 }, 1839 .proxy_pd_names = (char*[]){ 1840 "cx", 1841 "mx", 1842 "mss", 1843 NULL 1844 }, 1845 .need_mem_protection = true, 1846 .has_alt_reset = true, 1847 .has_mba_logs = false, 1848 .has_spare_reg = false, 1849 .version = MSS_SDM845, 1850 }; 1851 1852 static const struct rproc_hexagon_res msm8998_mss = { 1853 .hexagon_mba_image = "mba.mbn", 1854 .proxy_clk_names = (char*[]){ 1855 "xo", 1856 "qdss", 1857 "mem", 1858 NULL 1859 }, 1860 .active_clk_names = (char*[]){ 1861 "iface", 1862 "bus", 1863 "gpll0_mss", 1864 "mnoc_axi", 1865 "snoc_axi", 1866 NULL 1867 }, 1868 .proxy_pd_names = (char*[]){ 1869 "cx", 1870 "mx", 1871 NULL 1872 }, 1873 .need_mem_protection = true, 1874 .has_alt_reset = false, 1875 .has_mba_logs = false, 1876 .has_spare_reg = false, 1877 .version = MSS_MSM8998, 1878 }; 1879 1880 static const struct rproc_hexagon_res msm8996_mss = { 1881 .hexagon_mba_image = "mba.mbn", 1882 .proxy_supply = (struct qcom_mss_reg_res[]) { 1883 { 1884 .supply = "pll", 1885 .uA = 100000, 1886 }, 1887 {} 1888 }, 1889 .proxy_clk_names = (char*[]){ 1890 "xo", 1891 "pnoc", 1892 "qdss", 1893 NULL 1894 }, 1895 .active_clk_names = (char*[]){ 1896 "iface", 1897 "bus", 1898 "mem", 1899 "gpll0_mss", 1900 "snoc_axi", 1901 "mnoc_axi", 1902 NULL 1903 }, 1904 .need_mem_protection = true, 1905 .has_alt_reset = false, 1906 .has_mba_logs = false, 1907 .has_spare_reg = false, 1908 .version = MSS_MSM8996, 1909 }; 1910 1911 static const struct rproc_hexagon_res msm8916_mss = { 1912 .hexagon_mba_image = "mba.mbn", 1913 .proxy_supply = (struct qcom_mss_reg_res[]) { 1914 { 1915 .supply = "mx", 1916 .uV = 1050000, 1917 }, 1918 { 1919 .supply = "cx", 1920 .uA = 100000, 1921 }, 1922 { 1923 .supply = "pll", 1924 .uA = 100000, 1925 }, 1926 {} 1927 }, 1928 .proxy_clk_names = (char*[]){ 1929 "xo", 1930 NULL 1931 }, 1932 .active_clk_names = (char*[]){ 1933 "iface", 1934 "bus", 1935 "mem", 1936 NULL 1937 }, 1938 .need_mem_protection = false, 1939 .has_alt_reset = false, 1940 .has_mba_logs = false, 1941 .has_spare_reg = false, 1942 .version = MSS_MSM8916, 1943 }; 1944 1945 static const struct rproc_hexagon_res msm8974_mss = { 1946 .hexagon_mba_image = "mba.b00", 1947 .proxy_supply = (struct qcom_mss_reg_res[]) { 1948 { 1949 .supply = "mx", 1950 .uV = 1050000, 1951 }, 1952 { 1953 .supply = "cx", 1954 .uA = 100000, 1955 }, 1956 { 1957 .supply = "pll", 1958 .uA = 100000, 1959 }, 1960 {} 1961 }, 1962 .active_supply = (struct qcom_mss_reg_res[]) { 1963 { 1964 .supply = "mss", 1965 .uV = 1050000, 1966 .uA = 100000, 1967 }, 1968 {} 1969 }, 1970 .proxy_clk_names = (char*[]){ 1971 "xo", 1972 NULL 1973 }, 1974 .active_clk_names = (char*[]){ 1975 "iface", 1976 "bus", 1977 "mem", 1978 NULL 1979 }, 1980 .need_mem_protection = false, 1981 .has_alt_reset = false, 1982 .has_mba_logs = false, 1983 .has_spare_reg = false, 1984 .version = MSS_MSM8974, 1985 }; 1986 1987 static const struct of_device_id q6v5_of_match[] = { 1988 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1989 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1990 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1991 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1992 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 1993 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 1994 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1995 { }, 1996 }; 1997 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1998 1999 static struct platform_driver q6v5_driver = { 2000 .probe = q6v5_probe, 2001 .remove = q6v5_remove, 2002 .driver = { 2003 .name = "qcom-q6v5-mss", 2004 .of_match_table = q6v5_of_match, 2005 }, 2006 }; 2007 module_platform_driver(q6v5_driver); 2008 2009 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2010 MODULE_LICENSE("GPL v2"); 2011