1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_domain.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regmap.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/remoteproc.h> 25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h" 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 30 #include "remoteproc_internal.h" 31 #include "qcom_common.h" 32 #include "qcom_q6v5.h" 33 34 #include <linux/qcom_scm.h> 35 36 #define MPSS_CRASH_REASON_SMEM 421 37 38 /* RMB Status Register Values */ 39 #define RMB_PBL_SUCCESS 0x1 40 41 #define RMB_MBA_XPU_UNLOCKED 0x1 42 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 43 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 44 #define RMB_MBA_AUTH_COMPLETE 0x4 45 46 /* PBL/MBA interface registers */ 47 #define RMB_MBA_IMAGE_REG 0x00 48 #define RMB_PBL_STATUS_REG 0x04 49 #define RMB_MBA_COMMAND_REG 0x08 50 #define RMB_MBA_STATUS_REG 0x0C 51 #define RMB_PMI_META_DATA_REG 0x10 52 #define RMB_PMI_CODE_START_REG 0x14 53 #define RMB_PMI_CODE_LENGTH_REG 0x18 54 #define RMB_MBA_MSS_STATUS 0x40 55 #define RMB_MBA_ALT_RESET 0x44 56 57 #define RMB_CMD_META_DATA_READY 0x1 58 #define RMB_CMD_LOAD_READY 0x2 59 60 /* QDSP6SS Register Offsets */ 61 #define QDSP6SS_RESET_REG 0x014 62 #define QDSP6SS_GFMUX_CTL_REG 0x020 63 #define QDSP6SS_PWR_CTL_REG 0x030 64 #define QDSP6SS_MEM_PWR_CTL 0x0B0 65 #define QDSP6V6SS_MEM_PWR_CTL 0x034 66 #define QDSP6SS_STRAP_ACC 0x110 67 68 /* AXI Halt Register Offsets */ 69 #define AXI_HALTREQ_REG 0x0 70 #define AXI_HALTACK_REG 0x4 71 #define AXI_IDLE_REG 0x8 72 #define NAV_AXI_HALTREQ_BIT BIT(0) 73 #define NAV_AXI_HALTACK_BIT BIT(1) 74 #define NAV_AXI_IDLE_BIT BIT(2) 75 #define AXI_GATING_VALID_OVERRIDE BIT(0) 76 77 #define HALT_ACK_TIMEOUT_US 100000 78 #define NAV_HALT_ACK_TIMEOUT_US 200 79 80 /* QDSP6SS_RESET */ 81 #define Q6SS_STOP_CORE BIT(0) 82 #define Q6SS_CORE_ARES BIT(1) 83 #define Q6SS_BUS_ARES_ENABLE BIT(2) 84 85 /* QDSP6SS CBCR */ 86 #define Q6SS_CBCR_CLKEN BIT(0) 87 #define Q6SS_CBCR_CLKOFF BIT(31) 88 #define Q6SS_CBCR_TIMEOUT_US 200 89 90 /* QDSP6SS_GFMUX_CTL */ 91 #define Q6SS_CLK_ENABLE BIT(1) 92 93 /* QDSP6SS_PWR_CTL */ 94 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 95 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 96 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 97 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 98 #define Q6SS_ETB_SLP_NRET_N BIT(17) 99 #define Q6SS_L2DATA_STBY_N BIT(18) 100 #define Q6SS_SLP_RET_N BIT(19) 101 #define Q6SS_CLAMP_IO BIT(20) 102 #define QDSS_BHS_ON BIT(21) 103 #define QDSS_LDO_BYP BIT(22) 104 105 /* QDSP6v56 parameters */ 106 #define QDSP6v56_LDO_BYP BIT(25) 107 #define QDSP6v56_BHS_ON BIT(24) 108 #define QDSP6v56_CLAMP_WL BIT(21) 109 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 110 #define QDSP6SS_XO_CBCR 0x0038 111 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 112 113 /* QDSP6v65 parameters */ 114 #define QDSP6SS_CORE_CBCR 0x20 115 #define QDSP6SS_SLEEP 0x3C 116 #define QDSP6SS_BOOT_CORE_START 0x400 117 #define QDSP6SS_BOOT_CMD 0x404 118 #define QDSP6SS_BOOT_STATUS 0x408 119 #define BOOT_STATUS_TIMEOUT_US 200 120 #define BOOT_FSM_TIMEOUT 10000 121 122 struct reg_info { 123 struct regulator *reg; 124 int uV; 125 int uA; 126 }; 127 128 struct qcom_mss_reg_res { 129 const char *supply; 130 int uV; 131 int uA; 132 }; 133 134 struct rproc_hexagon_res { 135 const char *hexagon_mba_image; 136 struct qcom_mss_reg_res *proxy_supply; 137 struct qcom_mss_reg_res *active_supply; 138 char **proxy_clk_names; 139 char **reset_clk_names; 140 char **active_clk_names; 141 char **active_pd_names; 142 char **proxy_pd_names; 143 int version; 144 bool need_mem_protection; 145 bool has_alt_reset; 146 bool has_halt_nav; 147 }; 148 149 struct q6v5 { 150 struct device *dev; 151 struct rproc *rproc; 152 153 void __iomem *reg_base; 154 void __iomem *rmb_base; 155 156 struct regmap *halt_map; 157 struct regmap *halt_nav_map; 158 struct regmap *conn_map; 159 160 u32 halt_q6; 161 u32 halt_modem; 162 u32 halt_nc; 163 u32 halt_nav; 164 u32 conn_box; 165 166 struct reset_control *mss_restart; 167 struct reset_control *pdc_reset; 168 169 struct qcom_q6v5 q6v5; 170 171 struct clk *active_clks[8]; 172 struct clk *reset_clks[4]; 173 struct clk *proxy_clks[4]; 174 struct device *active_pds[1]; 175 struct device *proxy_pds[3]; 176 int active_clk_count; 177 int reset_clk_count; 178 int proxy_clk_count; 179 int active_pd_count; 180 int proxy_pd_count; 181 182 struct reg_info active_regs[1]; 183 struct reg_info proxy_regs[3]; 184 int active_reg_count; 185 int proxy_reg_count; 186 187 bool running; 188 189 bool dump_mba_loaded; 190 unsigned long dump_segment_mask; 191 unsigned long dump_complete_mask; 192 193 phys_addr_t mba_phys; 194 void *mba_region; 195 size_t mba_size; 196 197 phys_addr_t mpss_phys; 198 phys_addr_t mpss_reloc; 199 void *mpss_region; 200 size_t mpss_size; 201 202 struct qcom_rproc_glink glink_subdev; 203 struct qcom_rproc_subdev smd_subdev; 204 struct qcom_rproc_ssr ssr_subdev; 205 struct qcom_rproc_ipa_notify ipa_notify_subdev; 206 struct qcom_sysmon *sysmon; 207 bool need_mem_protection; 208 bool has_alt_reset; 209 bool has_halt_nav; 210 int mpss_perm; 211 int mba_perm; 212 const char *hexagon_mdt_image; 213 int version; 214 }; 215 216 enum { 217 MSS_MSM8916, 218 MSS_MSM8974, 219 MSS_MSM8996, 220 MSS_MSM8998, 221 MSS_SC7180, 222 MSS_SDM845, 223 }; 224 225 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 226 const struct qcom_mss_reg_res *reg_res) 227 { 228 int rc; 229 int i; 230 231 if (!reg_res) 232 return 0; 233 234 for (i = 0; reg_res[i].supply; i++) { 235 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 236 if (IS_ERR(regs[i].reg)) { 237 rc = PTR_ERR(regs[i].reg); 238 if (rc != -EPROBE_DEFER) 239 dev_err(dev, "Failed to get %s\n regulator", 240 reg_res[i].supply); 241 return rc; 242 } 243 244 regs[i].uV = reg_res[i].uV; 245 regs[i].uA = reg_res[i].uA; 246 } 247 248 return i; 249 } 250 251 static int q6v5_regulator_enable(struct q6v5 *qproc, 252 struct reg_info *regs, int count) 253 { 254 int ret; 255 int i; 256 257 for (i = 0; i < count; i++) { 258 if (regs[i].uV > 0) { 259 ret = regulator_set_voltage(regs[i].reg, 260 regs[i].uV, INT_MAX); 261 if (ret) { 262 dev_err(qproc->dev, 263 "Failed to request voltage for %d.\n", 264 i); 265 goto err; 266 } 267 } 268 269 if (regs[i].uA > 0) { 270 ret = regulator_set_load(regs[i].reg, 271 regs[i].uA); 272 if (ret < 0) { 273 dev_err(qproc->dev, 274 "Failed to set regulator mode\n"); 275 goto err; 276 } 277 } 278 279 ret = regulator_enable(regs[i].reg); 280 if (ret) { 281 dev_err(qproc->dev, "Regulator enable failed\n"); 282 goto err; 283 } 284 } 285 286 return 0; 287 err: 288 for (; i >= 0; i--) { 289 if (regs[i].uV > 0) 290 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 291 292 if (regs[i].uA > 0) 293 regulator_set_load(regs[i].reg, 0); 294 295 regulator_disable(regs[i].reg); 296 } 297 298 return ret; 299 } 300 301 static void q6v5_regulator_disable(struct q6v5 *qproc, 302 struct reg_info *regs, int count) 303 { 304 int i; 305 306 for (i = 0; i < count; i++) { 307 if (regs[i].uV > 0) 308 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 309 310 if (regs[i].uA > 0) 311 regulator_set_load(regs[i].reg, 0); 312 313 regulator_disable(regs[i].reg); 314 } 315 } 316 317 static int q6v5_clk_enable(struct device *dev, 318 struct clk **clks, int count) 319 { 320 int rc; 321 int i; 322 323 for (i = 0; i < count; i++) { 324 rc = clk_prepare_enable(clks[i]); 325 if (rc) { 326 dev_err(dev, "Clock enable failed\n"); 327 goto err; 328 } 329 } 330 331 return 0; 332 err: 333 for (i--; i >= 0; i--) 334 clk_disable_unprepare(clks[i]); 335 336 return rc; 337 } 338 339 static void q6v5_clk_disable(struct device *dev, 340 struct clk **clks, int count) 341 { 342 int i; 343 344 for (i = 0; i < count; i++) 345 clk_disable_unprepare(clks[i]); 346 } 347 348 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 349 size_t pd_count) 350 { 351 int ret; 352 int i; 353 354 for (i = 0; i < pd_count; i++) { 355 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 356 ret = pm_runtime_get_sync(pds[i]); 357 if (ret < 0) 358 goto unroll_pd_votes; 359 } 360 361 return 0; 362 363 unroll_pd_votes: 364 for (i--; i >= 0; i--) { 365 dev_pm_genpd_set_performance_state(pds[i], 0); 366 pm_runtime_put(pds[i]); 367 } 368 369 return ret; 370 } 371 372 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 373 size_t pd_count) 374 { 375 int i; 376 377 for (i = 0; i < pd_count; i++) { 378 dev_pm_genpd_set_performance_state(pds[i], 0); 379 pm_runtime_put(pds[i]); 380 } 381 } 382 383 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 384 bool local, bool remote, phys_addr_t addr, 385 size_t size) 386 { 387 struct qcom_scm_vmperm next[2]; 388 int perms = 0; 389 390 if (!qproc->need_mem_protection) 391 return 0; 392 393 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 394 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 395 return 0; 396 397 if (local) { 398 next[perms].vmid = QCOM_SCM_VMID_HLOS; 399 next[perms].perm = QCOM_SCM_PERM_RWX; 400 perms++; 401 } 402 403 if (remote) { 404 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 405 next[perms].perm = QCOM_SCM_PERM_RW; 406 perms++; 407 } 408 409 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 410 current_perm, next, perms); 411 } 412 413 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 414 { 415 struct q6v5 *qproc = rproc->priv; 416 417 memcpy(qproc->mba_region, fw->data, fw->size); 418 419 return 0; 420 } 421 422 static int q6v5_reset_assert(struct q6v5 *qproc) 423 { 424 int ret; 425 426 if (qproc->has_alt_reset) { 427 reset_control_assert(qproc->pdc_reset); 428 ret = reset_control_reset(qproc->mss_restart); 429 reset_control_deassert(qproc->pdc_reset); 430 } else if (qproc->has_halt_nav) { 431 /* 432 * When the AXI pipeline is being reset with the Q6 modem partly 433 * operational there is possibility of AXI valid signal to 434 * glitch, leading to spurious transactions and Q6 hangs. A work 435 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 436 * BIT before triggering Q6 MSS reset. Both the HALTREQ and 437 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert 438 * followed by a MSS deassert, while holding the PDC reset. 439 */ 440 reset_control_assert(qproc->pdc_reset); 441 regmap_update_bits(qproc->conn_map, qproc->conn_box, 442 AXI_GATING_VALID_OVERRIDE, 1); 443 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav, 444 NAV_AXI_HALTREQ_BIT, 0); 445 reset_control_assert(qproc->mss_restart); 446 reset_control_deassert(qproc->pdc_reset); 447 regmap_update_bits(qproc->conn_map, qproc->conn_box, 448 AXI_GATING_VALID_OVERRIDE, 0); 449 ret = reset_control_deassert(qproc->mss_restart); 450 } else { 451 ret = reset_control_assert(qproc->mss_restart); 452 } 453 454 return ret; 455 } 456 457 static int q6v5_reset_deassert(struct q6v5 *qproc) 458 { 459 int ret; 460 461 if (qproc->has_alt_reset) { 462 reset_control_assert(qproc->pdc_reset); 463 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 464 ret = reset_control_reset(qproc->mss_restart); 465 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 466 reset_control_deassert(qproc->pdc_reset); 467 } else if (qproc->has_halt_nav) { 468 ret = reset_control_reset(qproc->mss_restart); 469 } else { 470 ret = reset_control_deassert(qproc->mss_restart); 471 } 472 473 return ret; 474 } 475 476 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 477 { 478 unsigned long timeout; 479 s32 val; 480 481 timeout = jiffies + msecs_to_jiffies(ms); 482 for (;;) { 483 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 484 if (val) 485 break; 486 487 if (time_after(jiffies, timeout)) 488 return -ETIMEDOUT; 489 490 msleep(1); 491 } 492 493 return val; 494 } 495 496 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 497 { 498 499 unsigned long timeout; 500 s32 val; 501 502 timeout = jiffies + msecs_to_jiffies(ms); 503 for (;;) { 504 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 505 if (val < 0) 506 break; 507 508 if (!status && val) 509 break; 510 else if (status && val == status) 511 break; 512 513 if (time_after(jiffies, timeout)) 514 return -ETIMEDOUT; 515 516 msleep(1); 517 } 518 519 return val; 520 } 521 522 static int q6v5proc_reset(struct q6v5 *qproc) 523 { 524 u32 val; 525 int ret; 526 int i; 527 528 if (qproc->version == MSS_SDM845) { 529 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 530 val |= Q6SS_CBCR_CLKEN; 531 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 532 533 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 534 val, !(val & Q6SS_CBCR_CLKOFF), 1, 535 Q6SS_CBCR_TIMEOUT_US); 536 if (ret) { 537 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 538 return -ETIMEDOUT; 539 } 540 541 /* De-assert QDSP6 stop core */ 542 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 543 /* Trigger boot FSM */ 544 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 545 546 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 547 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 548 if (ret) { 549 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 550 /* Reset the modem so that boot FSM is in reset state */ 551 q6v5_reset_deassert(qproc); 552 return ret; 553 } 554 555 goto pbl_wait; 556 } else if (qproc->version == MSS_SC7180) { 557 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 558 val |= Q6SS_CBCR_CLKEN; 559 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 560 561 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 562 val, !(val & Q6SS_CBCR_CLKOFF), 1, 563 Q6SS_CBCR_TIMEOUT_US); 564 if (ret) { 565 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 566 return -ETIMEDOUT; 567 } 568 569 /* Turn on the XO clock needed for PLL setup */ 570 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 571 val |= Q6SS_CBCR_CLKEN; 572 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 573 574 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 575 val, !(val & Q6SS_CBCR_CLKOFF), 1, 576 Q6SS_CBCR_TIMEOUT_US); 577 if (ret) { 578 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 579 return -ETIMEDOUT; 580 } 581 582 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 583 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 584 val |= Q6SS_CBCR_CLKEN; 585 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 586 587 /* De-assert the Q6 stop core signal */ 588 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 589 590 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 591 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 592 593 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */ 594 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS, 595 val, (val & BIT(0)) != 0, 1, 596 BOOT_STATUS_TIMEOUT_US); 597 if (ret) { 598 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 599 /* Reset the modem so that boot FSM is in reset state */ 600 q6v5_reset_deassert(qproc); 601 return ret; 602 } 603 goto pbl_wait; 604 } else if (qproc->version == MSS_MSM8996 || 605 qproc->version == MSS_MSM8998) { 606 int mem_pwr_ctl; 607 608 /* Override the ACC value if required */ 609 writel(QDSP6SS_ACC_OVERRIDE_VAL, 610 qproc->reg_base + QDSP6SS_STRAP_ACC); 611 612 /* Assert resets, stop core */ 613 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 614 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 615 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 616 617 /* BHS require xo cbcr to be enabled */ 618 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 619 val |= Q6SS_CBCR_CLKEN; 620 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 621 622 /* Read CLKOFF bit to go low indicating CLK is enabled */ 623 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 624 val, !(val & Q6SS_CBCR_CLKOFF), 1, 625 Q6SS_CBCR_TIMEOUT_US); 626 if (ret) { 627 dev_err(qproc->dev, 628 "xo cbcr enabling timed out (rc:%d)\n", ret); 629 return ret; 630 } 631 /* Enable power block headswitch and wait for it to stabilize */ 632 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 633 val |= QDSP6v56_BHS_ON; 634 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 635 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 636 udelay(1); 637 638 /* Put LDO in bypass mode */ 639 val |= QDSP6v56_LDO_BYP; 640 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 641 642 /* Deassert QDSP6 compiler memory clamp */ 643 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 644 val &= ~QDSP6v56_CLAMP_QMC_MEM; 645 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 646 647 /* Deassert memory peripheral sleep and L2 memory standby */ 648 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 649 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 650 651 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 652 if (qproc->version == MSS_MSM8996) { 653 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 654 i = 19; 655 } else { 656 /* MSS_MSM8998 */ 657 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 658 i = 28; 659 } 660 val = readl(qproc->reg_base + mem_pwr_ctl); 661 for (; i >= 0; i--) { 662 val |= BIT(i); 663 writel(val, qproc->reg_base + mem_pwr_ctl); 664 /* 665 * Read back value to ensure the write is done then 666 * wait for 1us for both memory peripheral and data 667 * array to turn on. 668 */ 669 val |= readl(qproc->reg_base + mem_pwr_ctl); 670 udelay(1); 671 } 672 /* Remove word line clamp */ 673 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 674 val &= ~QDSP6v56_CLAMP_WL; 675 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 676 } else { 677 /* Assert resets, stop core */ 678 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 679 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 680 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 681 682 /* Enable power block headswitch and wait for it to stabilize */ 683 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 684 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 685 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 686 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 687 udelay(1); 688 /* 689 * Turn on memories. L2 banks should be done individually 690 * to minimize inrush current. 691 */ 692 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 693 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 694 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 695 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 696 val |= Q6SS_L2DATA_SLP_NRET_N_2; 697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 698 val |= Q6SS_L2DATA_SLP_NRET_N_1; 699 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 700 val |= Q6SS_L2DATA_SLP_NRET_N_0; 701 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 702 } 703 /* Remove IO clamp */ 704 val &= ~Q6SS_CLAMP_IO; 705 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 706 707 /* Bring core out of reset */ 708 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 709 val &= ~Q6SS_CORE_ARES; 710 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 711 712 /* Turn on core clock */ 713 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 714 val |= Q6SS_CLK_ENABLE; 715 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 716 717 /* Start core execution */ 718 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 719 val &= ~Q6SS_STOP_CORE; 720 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 721 722 pbl_wait: 723 /* Wait for PBL status */ 724 ret = q6v5_rmb_pbl_wait(qproc, 1000); 725 if (ret == -ETIMEDOUT) { 726 dev_err(qproc->dev, "PBL boot timed out\n"); 727 } else if (ret != RMB_PBL_SUCCESS) { 728 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 729 ret = -EINVAL; 730 } else { 731 ret = 0; 732 } 733 734 return ret; 735 } 736 737 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 738 struct regmap *halt_map, 739 u32 offset) 740 { 741 unsigned int val; 742 int ret; 743 744 /* Check if we're already idle */ 745 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 746 if (!ret && val) 747 return; 748 749 /* Assert halt request */ 750 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 751 752 /* Wait for halt */ 753 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 754 val, 1000, HALT_ACK_TIMEOUT_US); 755 756 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 757 if (ret || !val) 758 dev_err(qproc->dev, "port failed halt\n"); 759 760 /* Clear halt request (port will remain halted until reset) */ 761 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 762 } 763 764 static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc, 765 struct regmap *halt_map, 766 u32 offset) 767 { 768 unsigned int val; 769 int ret; 770 771 /* Check if we're already idle */ 772 ret = regmap_read(halt_map, offset, &val); 773 if (!ret && (val & NAV_AXI_IDLE_BIT)) 774 return; 775 776 /* Assert halt request */ 777 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT, 778 NAV_AXI_HALTREQ_BIT); 779 780 /* Wait for halt ack*/ 781 regmap_read_poll_timeout(halt_map, offset, val, 782 (val & NAV_AXI_HALTACK_BIT), 783 5, NAV_HALT_ACK_TIMEOUT_US); 784 785 ret = regmap_read(halt_map, offset, &val); 786 if (ret || !(val & NAV_AXI_IDLE_BIT)) 787 dev_err(qproc->dev, "port failed halt\n"); 788 } 789 790 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 791 { 792 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 793 dma_addr_t phys; 794 void *metadata; 795 int mdata_perm; 796 int xferop_ret; 797 size_t size; 798 void *ptr; 799 int ret; 800 801 metadata = qcom_mdt_read_metadata(fw, &size); 802 if (IS_ERR(metadata)) 803 return PTR_ERR(metadata); 804 805 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 806 if (!ptr) { 807 kfree(metadata); 808 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 809 return -ENOMEM; 810 } 811 812 memcpy(ptr, metadata, size); 813 814 /* Hypervisor mapping to access metadata by modem */ 815 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 816 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 817 phys, size); 818 if (ret) { 819 dev_err(qproc->dev, 820 "assigning Q6 access to metadata failed: %d\n", ret); 821 ret = -EAGAIN; 822 goto free_dma_attrs; 823 } 824 825 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 826 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 827 828 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 829 if (ret == -ETIMEDOUT) 830 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 831 else if (ret < 0) 832 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 833 834 /* Metadata authentication done, remove modem access */ 835 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 836 phys, size); 837 if (xferop_ret) 838 dev_warn(qproc->dev, 839 "mdt buffer not reclaimed system may become unstable\n"); 840 841 free_dma_attrs: 842 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 843 kfree(metadata); 844 845 return ret < 0 ? ret : 0; 846 } 847 848 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 849 { 850 if (phdr->p_type != PT_LOAD) 851 return false; 852 853 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 854 return false; 855 856 if (!phdr->p_memsz) 857 return false; 858 859 return true; 860 } 861 862 static int q6v5_mba_load(struct q6v5 *qproc) 863 { 864 int ret; 865 int xfermemop_ret; 866 867 qcom_q6v5_prepare(&qproc->q6v5); 868 869 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 870 if (ret < 0) { 871 dev_err(qproc->dev, "failed to enable active power domains\n"); 872 goto disable_irqs; 873 } 874 875 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 876 if (ret < 0) { 877 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 878 goto disable_active_pds; 879 } 880 881 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 882 qproc->proxy_reg_count); 883 if (ret) { 884 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 885 goto disable_proxy_pds; 886 } 887 888 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 889 qproc->proxy_clk_count); 890 if (ret) { 891 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 892 goto disable_proxy_reg; 893 } 894 895 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 896 qproc->active_reg_count); 897 if (ret) { 898 dev_err(qproc->dev, "failed to enable supplies\n"); 899 goto disable_proxy_clk; 900 } 901 902 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 903 qproc->reset_clk_count); 904 if (ret) { 905 dev_err(qproc->dev, "failed to enable reset clocks\n"); 906 goto disable_vdd; 907 } 908 909 ret = q6v5_reset_deassert(qproc); 910 if (ret) { 911 dev_err(qproc->dev, "failed to deassert mss restart\n"); 912 goto disable_reset_clks; 913 } 914 915 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 916 qproc->active_clk_count); 917 if (ret) { 918 dev_err(qproc->dev, "failed to enable clocks\n"); 919 goto assert_reset; 920 } 921 922 /* Assign MBA image access in DDR to q6 */ 923 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 924 qproc->mba_phys, qproc->mba_size); 925 if (ret) { 926 dev_err(qproc->dev, 927 "assigning Q6 access to mba memory failed: %d\n", ret); 928 goto disable_active_clks; 929 } 930 931 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 932 933 ret = q6v5proc_reset(qproc); 934 if (ret) 935 goto reclaim_mba; 936 937 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 938 if (ret == -ETIMEDOUT) { 939 dev_err(qproc->dev, "MBA boot timed out\n"); 940 goto halt_axi_ports; 941 } else if (ret != RMB_MBA_XPU_UNLOCKED && 942 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 943 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 944 ret = -EINVAL; 945 goto halt_axi_ports; 946 } 947 948 qproc->dump_mba_loaded = true; 949 return 0; 950 951 halt_axi_ports: 952 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 953 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 954 if (qproc->has_halt_nav) 955 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 956 qproc->halt_nav); 957 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 958 959 reclaim_mba: 960 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 961 false, qproc->mba_phys, 962 qproc->mba_size); 963 if (xfermemop_ret) { 964 dev_err(qproc->dev, 965 "Failed to reclaim mba buffer, system may become unstable\n"); 966 } 967 968 disable_active_clks: 969 q6v5_clk_disable(qproc->dev, qproc->active_clks, 970 qproc->active_clk_count); 971 assert_reset: 972 q6v5_reset_assert(qproc); 973 disable_reset_clks: 974 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 975 qproc->reset_clk_count); 976 disable_vdd: 977 q6v5_regulator_disable(qproc, qproc->active_regs, 978 qproc->active_reg_count); 979 disable_proxy_clk: 980 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 981 qproc->proxy_clk_count); 982 disable_proxy_reg: 983 q6v5_regulator_disable(qproc, qproc->proxy_regs, 984 qproc->proxy_reg_count); 985 disable_proxy_pds: 986 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 987 disable_active_pds: 988 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 989 disable_irqs: 990 qcom_q6v5_unprepare(&qproc->q6v5); 991 992 return ret; 993 } 994 995 static void q6v5_mba_reclaim(struct q6v5 *qproc) 996 { 997 int ret; 998 u32 val; 999 1000 qproc->dump_mba_loaded = false; 1001 1002 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1003 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1004 if (qproc->has_halt_nav) 1005 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 1006 qproc->halt_nav); 1007 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1008 if (qproc->version == MSS_MSM8996) { 1009 /* 1010 * To avoid high MX current during LPASS/MSS restart. 1011 */ 1012 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1013 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1014 QDSP6v56_CLAMP_QMC_MEM; 1015 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1016 } 1017 1018 q6v5_reset_assert(qproc); 1019 1020 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1021 qproc->reset_clk_count); 1022 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1023 qproc->active_clk_count); 1024 q6v5_regulator_disable(qproc, qproc->active_regs, 1025 qproc->active_reg_count); 1026 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1027 1028 /* In case of failure or coredump scenario where reclaiming MBA memory 1029 * could not happen reclaim it here. 1030 */ 1031 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1032 qproc->mba_phys, 1033 qproc->mba_size); 1034 WARN_ON(ret); 1035 1036 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1037 if (ret) { 1038 q6v5_pds_disable(qproc, qproc->proxy_pds, 1039 qproc->proxy_pd_count); 1040 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1041 qproc->proxy_clk_count); 1042 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1043 qproc->proxy_reg_count); 1044 } 1045 } 1046 1047 static int q6v5_reload_mba(struct rproc *rproc) 1048 { 1049 struct q6v5 *qproc = rproc->priv; 1050 const struct firmware *fw; 1051 int ret; 1052 1053 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1054 if (ret < 0) 1055 return ret; 1056 1057 q6v5_load(rproc, fw); 1058 ret = q6v5_mba_load(qproc); 1059 release_firmware(fw); 1060 1061 return ret; 1062 } 1063 1064 static int q6v5_mpss_load(struct q6v5 *qproc) 1065 { 1066 const struct elf32_phdr *phdrs; 1067 const struct elf32_phdr *phdr; 1068 const struct firmware *seg_fw; 1069 const struct firmware *fw; 1070 struct elf32_hdr *ehdr; 1071 phys_addr_t mpss_reloc; 1072 phys_addr_t boot_addr; 1073 phys_addr_t min_addr = PHYS_ADDR_MAX; 1074 phys_addr_t max_addr = 0; 1075 u32 code_length; 1076 bool relocate = false; 1077 char *fw_name; 1078 size_t fw_name_len; 1079 ssize_t offset; 1080 size_t size = 0; 1081 void *ptr; 1082 int ret; 1083 int i; 1084 1085 fw_name_len = strlen(qproc->hexagon_mdt_image); 1086 if (fw_name_len <= 4) 1087 return -EINVAL; 1088 1089 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1090 if (!fw_name) 1091 return -ENOMEM; 1092 1093 ret = request_firmware(&fw, fw_name, qproc->dev); 1094 if (ret < 0) { 1095 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1096 goto out; 1097 } 1098 1099 /* Initialize the RMB validator */ 1100 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1101 1102 ret = q6v5_mpss_init_image(qproc, fw); 1103 if (ret) 1104 goto release_firmware; 1105 1106 ehdr = (struct elf32_hdr *)fw->data; 1107 phdrs = (struct elf32_phdr *)(ehdr + 1); 1108 1109 for (i = 0; i < ehdr->e_phnum; i++) { 1110 phdr = &phdrs[i]; 1111 1112 if (!q6v5_phdr_valid(phdr)) 1113 continue; 1114 1115 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1116 relocate = true; 1117 1118 if (phdr->p_paddr < min_addr) 1119 min_addr = phdr->p_paddr; 1120 1121 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1122 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1123 } 1124 1125 /** 1126 * In case of a modem subsystem restart on secure devices, the modem 1127 * memory can be reclaimed only after MBA is loaded. For modem cold 1128 * boot this will be a nop 1129 */ 1130 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1131 qproc->mpss_phys, qproc->mpss_size); 1132 1133 /* Share ownership between Linux and MSS, during segment loading */ 1134 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1135 qproc->mpss_phys, qproc->mpss_size); 1136 if (ret) { 1137 dev_err(qproc->dev, 1138 "assigning Q6 access to mpss memory failed: %d\n", ret); 1139 ret = -EAGAIN; 1140 goto release_firmware; 1141 } 1142 1143 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1144 qproc->mpss_reloc = mpss_reloc; 1145 /* Load firmware segments */ 1146 for (i = 0; i < ehdr->e_phnum; i++) { 1147 phdr = &phdrs[i]; 1148 1149 if (!q6v5_phdr_valid(phdr)) 1150 continue; 1151 1152 offset = phdr->p_paddr - mpss_reloc; 1153 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1154 dev_err(qproc->dev, "segment outside memory range\n"); 1155 ret = -EINVAL; 1156 goto release_firmware; 1157 } 1158 1159 ptr = qproc->mpss_region + offset; 1160 1161 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1162 /* Firmware is large enough to be non-split */ 1163 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1164 dev_err(qproc->dev, 1165 "failed to load segment %d from truncated file %s\n", 1166 i, fw_name); 1167 ret = -EINVAL; 1168 goto release_firmware; 1169 } 1170 1171 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1172 } else if (phdr->p_filesz) { 1173 /* Replace "xxx.xxx" with "xxx.bxx" */ 1174 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1175 ret = request_firmware(&seg_fw, fw_name, qproc->dev); 1176 if (ret) { 1177 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1178 goto release_firmware; 1179 } 1180 1181 memcpy(ptr, seg_fw->data, seg_fw->size); 1182 1183 release_firmware(seg_fw); 1184 } 1185 1186 if (phdr->p_memsz > phdr->p_filesz) { 1187 memset(ptr + phdr->p_filesz, 0, 1188 phdr->p_memsz - phdr->p_filesz); 1189 } 1190 size += phdr->p_memsz; 1191 1192 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1193 if (!code_length) { 1194 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1195 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1196 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1197 } 1198 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1199 1200 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1201 if (ret < 0) { 1202 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1203 ret); 1204 goto release_firmware; 1205 } 1206 } 1207 1208 /* Transfer ownership of modem ddr region to q6 */ 1209 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1210 qproc->mpss_phys, qproc->mpss_size); 1211 if (ret) { 1212 dev_err(qproc->dev, 1213 "assigning Q6 access to mpss memory failed: %d\n", ret); 1214 ret = -EAGAIN; 1215 goto release_firmware; 1216 } 1217 1218 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1219 if (ret == -ETIMEDOUT) 1220 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1221 else if (ret < 0) 1222 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1223 1224 release_firmware: 1225 release_firmware(fw); 1226 out: 1227 kfree(fw_name); 1228 1229 return ret < 0 ? ret : 0; 1230 } 1231 1232 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1233 struct rproc_dump_segment *segment, 1234 void *dest) 1235 { 1236 int ret = 0; 1237 struct q6v5 *qproc = rproc->priv; 1238 unsigned long mask = BIT((unsigned long)segment->priv); 1239 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); 1240 1241 /* Unlock mba before copying segments */ 1242 if (!qproc->dump_mba_loaded) { 1243 ret = q6v5_reload_mba(rproc); 1244 if (!ret) { 1245 /* Reset ownership back to Linux to copy segments */ 1246 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1247 true, false, 1248 qproc->mpss_phys, 1249 qproc->mpss_size); 1250 } 1251 } 1252 1253 if (!ptr || ret) 1254 memset(dest, 0xff, segment->size); 1255 else 1256 memcpy(dest, ptr, segment->size); 1257 1258 qproc->dump_segment_mask |= mask; 1259 1260 /* Reclaim mba after copying segments */ 1261 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 1262 if (qproc->dump_mba_loaded) { 1263 /* Try to reset ownership back to Q6 */ 1264 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1265 false, true, 1266 qproc->mpss_phys, 1267 qproc->mpss_size); 1268 q6v5_mba_reclaim(qproc); 1269 } 1270 } 1271 } 1272 1273 static int q6v5_start(struct rproc *rproc) 1274 { 1275 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1276 int xfermemop_ret; 1277 int ret; 1278 1279 ret = q6v5_mba_load(qproc); 1280 if (ret) 1281 return ret; 1282 1283 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1284 1285 ret = q6v5_mpss_load(qproc); 1286 if (ret) 1287 goto reclaim_mpss; 1288 1289 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1290 if (ret == -ETIMEDOUT) { 1291 dev_err(qproc->dev, "start timed out\n"); 1292 goto reclaim_mpss; 1293 } 1294 1295 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1296 false, qproc->mba_phys, 1297 qproc->mba_size); 1298 if (xfermemop_ret) 1299 dev_err(qproc->dev, 1300 "Failed to reclaim mba buffer system may become unstable\n"); 1301 1302 /* Reset Dump Segment Mask */ 1303 qproc->dump_segment_mask = 0; 1304 qproc->running = true; 1305 1306 return 0; 1307 1308 reclaim_mpss: 1309 q6v5_mba_reclaim(qproc); 1310 1311 return ret; 1312 } 1313 1314 static int q6v5_stop(struct rproc *rproc) 1315 { 1316 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1317 int ret; 1318 1319 qproc->running = false; 1320 1321 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1322 if (ret == -ETIMEDOUT) 1323 dev_err(qproc->dev, "timed out on wait\n"); 1324 1325 q6v5_mba_reclaim(qproc); 1326 1327 return 0; 1328 } 1329 1330 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len) 1331 { 1332 struct q6v5 *qproc = rproc->priv; 1333 int offset; 1334 1335 offset = da - qproc->mpss_reloc; 1336 if (offset < 0 || offset + len > qproc->mpss_size) 1337 return NULL; 1338 1339 return qproc->mpss_region + offset; 1340 } 1341 1342 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1343 const struct firmware *mba_fw) 1344 { 1345 const struct firmware *fw; 1346 const struct elf32_phdr *phdrs; 1347 const struct elf32_phdr *phdr; 1348 const struct elf32_hdr *ehdr; 1349 struct q6v5 *qproc = rproc->priv; 1350 unsigned long i; 1351 int ret; 1352 1353 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1354 if (ret < 0) { 1355 dev_err(qproc->dev, "unable to load %s\n", 1356 qproc->hexagon_mdt_image); 1357 return ret; 1358 } 1359 1360 ehdr = (struct elf32_hdr *)fw->data; 1361 phdrs = (struct elf32_phdr *)(ehdr + 1); 1362 qproc->dump_complete_mask = 0; 1363 1364 for (i = 0; i < ehdr->e_phnum; i++) { 1365 phdr = &phdrs[i]; 1366 1367 if (!q6v5_phdr_valid(phdr)) 1368 continue; 1369 1370 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1371 phdr->p_memsz, 1372 qcom_q6v5_dump_segment, 1373 (void *)i); 1374 if (ret) 1375 break; 1376 1377 qproc->dump_complete_mask |= BIT(i); 1378 } 1379 1380 release_firmware(fw); 1381 return ret; 1382 } 1383 1384 static const struct rproc_ops q6v5_ops = { 1385 .start = q6v5_start, 1386 .stop = q6v5_stop, 1387 .da_to_va = q6v5_da_to_va, 1388 .parse_fw = qcom_q6v5_register_dump_segments, 1389 .load = q6v5_load, 1390 }; 1391 1392 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1393 { 1394 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1395 1396 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1397 qproc->proxy_clk_count); 1398 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1399 qproc->proxy_reg_count); 1400 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1401 } 1402 1403 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1404 { 1405 struct of_phandle_args args; 1406 struct resource *res; 1407 int ret; 1408 1409 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1410 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1411 if (IS_ERR(qproc->reg_base)) 1412 return PTR_ERR(qproc->reg_base); 1413 1414 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1415 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1416 if (IS_ERR(qproc->rmb_base)) 1417 return PTR_ERR(qproc->rmb_base); 1418 1419 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1420 "qcom,halt-regs", 3, 0, &args); 1421 if (ret < 0) { 1422 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1423 return -EINVAL; 1424 } 1425 1426 qproc->halt_map = syscon_node_to_regmap(args.np); 1427 of_node_put(args.np); 1428 if (IS_ERR(qproc->halt_map)) 1429 return PTR_ERR(qproc->halt_map); 1430 1431 qproc->halt_q6 = args.args[0]; 1432 qproc->halt_modem = args.args[1]; 1433 qproc->halt_nc = args.args[2]; 1434 1435 if (qproc->has_halt_nav) { 1436 struct platform_device *nav_pdev; 1437 1438 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1439 "qcom,halt-nav-regs", 1440 1, 0, &args); 1441 if (ret < 0) { 1442 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1443 return -EINVAL; 1444 } 1445 1446 nav_pdev = of_find_device_by_node(args.np); 1447 of_node_put(args.np); 1448 if (!nav_pdev) { 1449 dev_err(&pdev->dev, "failed to get mss clock device\n"); 1450 return -EPROBE_DEFER; 1451 } 1452 1453 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL); 1454 if (!qproc->halt_nav_map) { 1455 dev_err(&pdev->dev, "failed to get map from device\n"); 1456 return -EINVAL; 1457 } 1458 qproc->halt_nav = args.args[0]; 1459 1460 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1461 "qcom,halt-nav-regs", 1462 1, 1, &args); 1463 if (ret < 0) { 1464 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1465 return -EINVAL; 1466 } 1467 1468 qproc->conn_map = syscon_node_to_regmap(args.np); 1469 of_node_put(args.np); 1470 if (IS_ERR(qproc->conn_map)) 1471 return PTR_ERR(qproc->conn_map); 1472 1473 qproc->conn_box = args.args[0]; 1474 } 1475 1476 return 0; 1477 } 1478 1479 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1480 char **clk_names) 1481 { 1482 int i; 1483 1484 if (!clk_names) 1485 return 0; 1486 1487 for (i = 0; clk_names[i]; i++) { 1488 clks[i] = devm_clk_get(dev, clk_names[i]); 1489 if (IS_ERR(clks[i])) { 1490 int rc = PTR_ERR(clks[i]); 1491 1492 if (rc != -EPROBE_DEFER) 1493 dev_err(dev, "Failed to get %s clock\n", 1494 clk_names[i]); 1495 return rc; 1496 } 1497 } 1498 1499 return i; 1500 } 1501 1502 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1503 char **pd_names) 1504 { 1505 size_t num_pds = 0; 1506 int ret; 1507 int i; 1508 1509 if (!pd_names) 1510 return 0; 1511 1512 while (pd_names[num_pds]) 1513 num_pds++; 1514 1515 for (i = 0; i < num_pds; i++) { 1516 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1517 if (IS_ERR_OR_NULL(devs[i])) { 1518 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1519 goto unroll_attach; 1520 } 1521 } 1522 1523 return num_pds; 1524 1525 unroll_attach: 1526 for (i--; i >= 0; i--) 1527 dev_pm_domain_detach(devs[i], false); 1528 1529 return ret; 1530 } 1531 1532 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1533 size_t pd_count) 1534 { 1535 int i; 1536 1537 for (i = 0; i < pd_count; i++) 1538 dev_pm_domain_detach(pds[i], false); 1539 } 1540 1541 static int q6v5_init_reset(struct q6v5 *qproc) 1542 { 1543 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1544 "mss_restart"); 1545 if (IS_ERR(qproc->mss_restart)) { 1546 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1547 return PTR_ERR(qproc->mss_restart); 1548 } 1549 1550 if (qproc->has_alt_reset || qproc->has_halt_nav) { 1551 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1552 "pdc_reset"); 1553 if (IS_ERR(qproc->pdc_reset)) { 1554 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1555 return PTR_ERR(qproc->pdc_reset); 1556 } 1557 } 1558 1559 return 0; 1560 } 1561 1562 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1563 { 1564 struct device_node *child; 1565 struct device_node *node; 1566 struct resource r; 1567 int ret; 1568 1569 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1570 node = of_parse_phandle(child, "memory-region", 0); 1571 ret = of_address_to_resource(node, 0, &r); 1572 if (ret) { 1573 dev_err(qproc->dev, "unable to resolve mba region\n"); 1574 return ret; 1575 } 1576 of_node_put(node); 1577 1578 qproc->mba_phys = r.start; 1579 qproc->mba_size = resource_size(&r); 1580 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1581 if (!qproc->mba_region) { 1582 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1583 &r.start, qproc->mba_size); 1584 return -EBUSY; 1585 } 1586 1587 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1588 node = of_parse_phandle(child, "memory-region", 0); 1589 ret = of_address_to_resource(node, 0, &r); 1590 if (ret) { 1591 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1592 return ret; 1593 } 1594 of_node_put(node); 1595 1596 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1597 qproc->mpss_size = resource_size(&r); 1598 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); 1599 if (!qproc->mpss_region) { 1600 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1601 &r.start, qproc->mpss_size); 1602 return -EBUSY; 1603 } 1604 1605 return 0; 1606 } 1607 1608 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) 1609 1610 /* Register IPA notification function */ 1611 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, 1612 void *data) 1613 { 1614 struct qcom_rproc_ipa_notify *ipa_notify; 1615 struct q6v5 *qproc = rproc->priv; 1616 1617 if (!notify) 1618 return -EINVAL; 1619 1620 ipa_notify = &qproc->ipa_notify_subdev; 1621 if (ipa_notify->notify) 1622 return -EBUSY; 1623 1624 ipa_notify->notify = notify; 1625 ipa_notify->data = data; 1626 1627 return 0; 1628 } 1629 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); 1630 1631 /* Deregister IPA notification function */ 1632 void qcom_deregister_ipa_notify(struct rproc *rproc) 1633 { 1634 struct q6v5 *qproc = rproc->priv; 1635 1636 qproc->ipa_notify_subdev.notify = NULL; 1637 } 1638 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); 1639 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ 1640 1641 static int q6v5_probe(struct platform_device *pdev) 1642 { 1643 const struct rproc_hexagon_res *desc; 1644 struct q6v5 *qproc; 1645 struct rproc *rproc; 1646 const char *mba_image; 1647 int ret; 1648 1649 desc = of_device_get_match_data(&pdev->dev); 1650 if (!desc) 1651 return -EINVAL; 1652 1653 if (desc->need_mem_protection && !qcom_scm_is_available()) 1654 return -EPROBE_DEFER; 1655 1656 mba_image = desc->hexagon_mba_image; 1657 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1658 0, &mba_image); 1659 if (ret < 0 && ret != -EINVAL) 1660 return ret; 1661 1662 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1663 mba_image, sizeof(*qproc)); 1664 if (!rproc) { 1665 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1666 return -ENOMEM; 1667 } 1668 1669 rproc->auto_boot = false; 1670 1671 qproc = (struct q6v5 *)rproc->priv; 1672 qproc->dev = &pdev->dev; 1673 qproc->rproc = rproc; 1674 qproc->hexagon_mdt_image = "modem.mdt"; 1675 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1676 1, &qproc->hexagon_mdt_image); 1677 if (ret < 0 && ret != -EINVAL) 1678 goto free_rproc; 1679 1680 platform_set_drvdata(pdev, qproc); 1681 1682 qproc->has_halt_nav = desc->has_halt_nav; 1683 ret = q6v5_init_mem(qproc, pdev); 1684 if (ret) 1685 goto free_rproc; 1686 1687 ret = q6v5_alloc_memory_region(qproc); 1688 if (ret) 1689 goto free_rproc; 1690 1691 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1692 desc->proxy_clk_names); 1693 if (ret < 0) { 1694 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1695 goto free_rproc; 1696 } 1697 qproc->proxy_clk_count = ret; 1698 1699 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1700 desc->reset_clk_names); 1701 if (ret < 0) { 1702 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1703 goto free_rproc; 1704 } 1705 qproc->reset_clk_count = ret; 1706 1707 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1708 desc->active_clk_names); 1709 if (ret < 0) { 1710 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1711 goto free_rproc; 1712 } 1713 qproc->active_clk_count = ret; 1714 1715 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1716 desc->proxy_supply); 1717 if (ret < 0) { 1718 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1719 goto free_rproc; 1720 } 1721 qproc->proxy_reg_count = ret; 1722 1723 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1724 desc->active_supply); 1725 if (ret < 0) { 1726 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1727 goto free_rproc; 1728 } 1729 qproc->active_reg_count = ret; 1730 1731 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1732 desc->active_pd_names); 1733 if (ret < 0) { 1734 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1735 goto free_rproc; 1736 } 1737 qproc->active_pd_count = ret; 1738 1739 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1740 desc->proxy_pd_names); 1741 if (ret < 0) { 1742 dev_err(&pdev->dev, "Failed to init power domains\n"); 1743 goto detach_active_pds; 1744 } 1745 qproc->proxy_pd_count = ret; 1746 1747 qproc->has_alt_reset = desc->has_alt_reset; 1748 ret = q6v5_init_reset(qproc); 1749 if (ret) 1750 goto detach_proxy_pds; 1751 1752 qproc->version = desc->version; 1753 qproc->need_mem_protection = desc->need_mem_protection; 1754 1755 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1756 qcom_msa_handover); 1757 if (ret) 1758 goto detach_proxy_pds; 1759 1760 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1761 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1762 qcom_add_glink_subdev(rproc, &qproc->glink_subdev); 1763 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1764 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1765 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1766 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1767 if (IS_ERR(qproc->sysmon)) { 1768 ret = PTR_ERR(qproc->sysmon); 1769 goto remove_subdevs; 1770 } 1771 1772 ret = rproc_add(rproc); 1773 if (ret) 1774 goto remove_sysmon_subdev; 1775 1776 return 0; 1777 1778 remove_sysmon_subdev: 1779 qcom_remove_sysmon_subdev(qproc->sysmon); 1780 remove_subdevs: 1781 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1782 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1783 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1784 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1785 detach_proxy_pds: 1786 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1787 detach_active_pds: 1788 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1789 free_rproc: 1790 rproc_free(rproc); 1791 1792 return ret; 1793 } 1794 1795 static int q6v5_remove(struct platform_device *pdev) 1796 { 1797 struct q6v5 *qproc = platform_get_drvdata(pdev); 1798 struct rproc *rproc = qproc->rproc; 1799 1800 rproc_del(rproc); 1801 1802 qcom_remove_sysmon_subdev(qproc->sysmon); 1803 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1804 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1805 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1806 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1807 1808 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1809 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1810 1811 rproc_free(rproc); 1812 1813 return 0; 1814 } 1815 1816 static const struct rproc_hexagon_res sc7180_mss = { 1817 .hexagon_mba_image = "mba.mbn", 1818 .proxy_clk_names = (char*[]){ 1819 "xo", 1820 NULL 1821 }, 1822 .reset_clk_names = (char*[]){ 1823 "iface", 1824 "bus", 1825 "snoc_axi", 1826 NULL 1827 }, 1828 .active_clk_names = (char*[]){ 1829 "mnoc_axi", 1830 "nav", 1831 "mss_nav", 1832 "mss_crypto", 1833 NULL 1834 }, 1835 .active_pd_names = (char*[]){ 1836 "load_state", 1837 NULL 1838 }, 1839 .proxy_pd_names = (char*[]){ 1840 "cx", 1841 "mx", 1842 "mss", 1843 NULL 1844 }, 1845 .need_mem_protection = true, 1846 .has_alt_reset = false, 1847 .has_halt_nav = true, 1848 .version = MSS_SC7180, 1849 }; 1850 1851 static const struct rproc_hexagon_res sdm845_mss = { 1852 .hexagon_mba_image = "mba.mbn", 1853 .proxy_clk_names = (char*[]){ 1854 "xo", 1855 "prng", 1856 NULL 1857 }, 1858 .reset_clk_names = (char*[]){ 1859 "iface", 1860 "snoc_axi", 1861 NULL 1862 }, 1863 .active_clk_names = (char*[]){ 1864 "bus", 1865 "mem", 1866 "gpll0_mss", 1867 "mnoc_axi", 1868 NULL 1869 }, 1870 .active_pd_names = (char*[]){ 1871 "load_state", 1872 NULL 1873 }, 1874 .proxy_pd_names = (char*[]){ 1875 "cx", 1876 "mx", 1877 "mss", 1878 NULL 1879 }, 1880 .need_mem_protection = true, 1881 .has_alt_reset = true, 1882 .has_halt_nav = false, 1883 .version = MSS_SDM845, 1884 }; 1885 1886 static const struct rproc_hexagon_res msm8998_mss = { 1887 .hexagon_mba_image = "mba.mbn", 1888 .proxy_clk_names = (char*[]){ 1889 "xo", 1890 "qdss", 1891 "mem", 1892 NULL 1893 }, 1894 .active_clk_names = (char*[]){ 1895 "iface", 1896 "bus", 1897 "gpll0_mss", 1898 "mnoc_axi", 1899 "snoc_axi", 1900 NULL 1901 }, 1902 .proxy_pd_names = (char*[]){ 1903 "cx", 1904 "mx", 1905 NULL 1906 }, 1907 .need_mem_protection = true, 1908 .has_alt_reset = false, 1909 .has_halt_nav = false, 1910 .version = MSS_MSM8998, 1911 }; 1912 1913 static const struct rproc_hexagon_res msm8996_mss = { 1914 .hexagon_mba_image = "mba.mbn", 1915 .proxy_supply = (struct qcom_mss_reg_res[]) { 1916 { 1917 .supply = "pll", 1918 .uA = 100000, 1919 }, 1920 {} 1921 }, 1922 .proxy_clk_names = (char*[]){ 1923 "xo", 1924 "pnoc", 1925 "qdss", 1926 NULL 1927 }, 1928 .active_clk_names = (char*[]){ 1929 "iface", 1930 "bus", 1931 "mem", 1932 "gpll0_mss", 1933 "snoc_axi", 1934 "mnoc_axi", 1935 NULL 1936 }, 1937 .need_mem_protection = true, 1938 .has_alt_reset = false, 1939 .has_halt_nav = false, 1940 .version = MSS_MSM8996, 1941 }; 1942 1943 static const struct rproc_hexagon_res msm8916_mss = { 1944 .hexagon_mba_image = "mba.mbn", 1945 .proxy_supply = (struct qcom_mss_reg_res[]) { 1946 { 1947 .supply = "mx", 1948 .uV = 1050000, 1949 }, 1950 { 1951 .supply = "cx", 1952 .uA = 100000, 1953 }, 1954 { 1955 .supply = "pll", 1956 .uA = 100000, 1957 }, 1958 {} 1959 }, 1960 .proxy_clk_names = (char*[]){ 1961 "xo", 1962 NULL 1963 }, 1964 .active_clk_names = (char*[]){ 1965 "iface", 1966 "bus", 1967 "mem", 1968 NULL 1969 }, 1970 .need_mem_protection = false, 1971 .has_alt_reset = false, 1972 .has_halt_nav = false, 1973 .version = MSS_MSM8916, 1974 }; 1975 1976 static const struct rproc_hexagon_res msm8974_mss = { 1977 .hexagon_mba_image = "mba.b00", 1978 .proxy_supply = (struct qcom_mss_reg_res[]) { 1979 { 1980 .supply = "mx", 1981 .uV = 1050000, 1982 }, 1983 { 1984 .supply = "cx", 1985 .uA = 100000, 1986 }, 1987 { 1988 .supply = "pll", 1989 .uA = 100000, 1990 }, 1991 {} 1992 }, 1993 .active_supply = (struct qcom_mss_reg_res[]) { 1994 { 1995 .supply = "mss", 1996 .uV = 1050000, 1997 .uA = 100000, 1998 }, 1999 {} 2000 }, 2001 .proxy_clk_names = (char*[]){ 2002 "xo", 2003 NULL 2004 }, 2005 .active_clk_names = (char*[]){ 2006 "iface", 2007 "bus", 2008 "mem", 2009 NULL 2010 }, 2011 .need_mem_protection = false, 2012 .has_alt_reset = false, 2013 .has_halt_nav = false, 2014 .version = MSS_MSM8974, 2015 }; 2016 2017 static const struct of_device_id q6v5_of_match[] = { 2018 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 2019 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 2020 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 2021 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 2022 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 2023 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 2024 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 2025 { }, 2026 }; 2027 MODULE_DEVICE_TABLE(of, q6v5_of_match); 2028 2029 static struct platform_driver q6v5_driver = { 2030 .probe = q6v5_probe, 2031 .remove = q6v5_remove, 2032 .driver = { 2033 .name = "qcom-q6v5-mss", 2034 .of_match_table = q6v5_of_match, 2035 }, 2036 }; 2037 module_platform_driver(q6v5_driver); 2038 2039 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 2040 MODULE_LICENSE("GPL v2"); 2041