1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_domain.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regmap.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/remoteproc.h> 25 #include <linux/reset.h> 26 #include <linux/soc/qcom/mdt_loader.h> 27 #include <linux/iopoll.h> 28 29 #include "remoteproc_internal.h" 30 #include "qcom_common.h" 31 #include "qcom_q6v5.h" 32 33 #include <linux/qcom_scm.h> 34 35 #define MPSS_CRASH_REASON_SMEM 421 36 37 /* RMB Status Register Values */ 38 #define RMB_PBL_SUCCESS 0x1 39 40 #define RMB_MBA_XPU_UNLOCKED 0x1 41 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 42 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 43 #define RMB_MBA_AUTH_COMPLETE 0x4 44 45 /* PBL/MBA interface registers */ 46 #define RMB_MBA_IMAGE_REG 0x00 47 #define RMB_PBL_STATUS_REG 0x04 48 #define RMB_MBA_COMMAND_REG 0x08 49 #define RMB_MBA_STATUS_REG 0x0C 50 #define RMB_PMI_META_DATA_REG 0x10 51 #define RMB_PMI_CODE_START_REG 0x14 52 #define RMB_PMI_CODE_LENGTH_REG 0x18 53 #define RMB_MBA_MSS_STATUS 0x40 54 #define RMB_MBA_ALT_RESET 0x44 55 56 #define RMB_CMD_META_DATA_READY 0x1 57 #define RMB_CMD_LOAD_READY 0x2 58 59 /* QDSP6SS Register Offsets */ 60 #define QDSP6SS_RESET_REG 0x014 61 #define QDSP6SS_GFMUX_CTL_REG 0x020 62 #define QDSP6SS_PWR_CTL_REG 0x030 63 #define QDSP6SS_MEM_PWR_CTL 0x0B0 64 #define QDSP6V6SS_MEM_PWR_CTL 0x034 65 #define QDSP6SS_STRAP_ACC 0x110 66 67 /* AXI Halt Register Offsets */ 68 #define AXI_HALTREQ_REG 0x0 69 #define AXI_HALTACK_REG 0x4 70 #define AXI_IDLE_REG 0x8 71 #define NAV_AXI_HALTREQ_BIT BIT(0) 72 #define NAV_AXI_HALTACK_BIT BIT(1) 73 #define NAV_AXI_IDLE_BIT BIT(2) 74 #define AXI_GATING_VALID_OVERRIDE BIT(0) 75 76 #define HALT_ACK_TIMEOUT_US 100000 77 #define NAV_HALT_ACK_TIMEOUT_US 200 78 79 /* QDSP6SS_RESET */ 80 #define Q6SS_STOP_CORE BIT(0) 81 #define Q6SS_CORE_ARES BIT(1) 82 #define Q6SS_BUS_ARES_ENABLE BIT(2) 83 84 /* QDSP6SS CBCR */ 85 #define Q6SS_CBCR_CLKEN BIT(0) 86 #define Q6SS_CBCR_CLKOFF BIT(31) 87 #define Q6SS_CBCR_TIMEOUT_US 200 88 89 /* QDSP6SS_GFMUX_CTL */ 90 #define Q6SS_CLK_ENABLE BIT(1) 91 92 /* QDSP6SS_PWR_CTL */ 93 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 94 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 95 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 96 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 97 #define Q6SS_ETB_SLP_NRET_N BIT(17) 98 #define Q6SS_L2DATA_STBY_N BIT(18) 99 #define Q6SS_SLP_RET_N BIT(19) 100 #define Q6SS_CLAMP_IO BIT(20) 101 #define QDSS_BHS_ON BIT(21) 102 #define QDSS_LDO_BYP BIT(22) 103 104 /* QDSP6v56 parameters */ 105 #define QDSP6v56_LDO_BYP BIT(25) 106 #define QDSP6v56_BHS_ON BIT(24) 107 #define QDSP6v56_CLAMP_WL BIT(21) 108 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 109 #define QDSP6SS_XO_CBCR 0x0038 110 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 111 112 /* QDSP6v65 parameters */ 113 #define QDSP6SS_CORE_CBCR 0x20 114 #define QDSP6SS_SLEEP 0x3C 115 #define QDSP6SS_BOOT_CORE_START 0x400 116 #define QDSP6SS_BOOT_CMD 0x404 117 #define QDSP6SS_BOOT_STATUS 0x408 118 #define BOOT_STATUS_TIMEOUT_US 200 119 #define BOOT_FSM_TIMEOUT 10000 120 121 struct reg_info { 122 struct regulator *reg; 123 int uV; 124 int uA; 125 }; 126 127 struct qcom_mss_reg_res { 128 const char *supply; 129 int uV; 130 int uA; 131 }; 132 133 struct rproc_hexagon_res { 134 const char *hexagon_mba_image; 135 struct qcom_mss_reg_res *proxy_supply; 136 struct qcom_mss_reg_res *active_supply; 137 char **proxy_clk_names; 138 char **reset_clk_names; 139 char **active_clk_names; 140 char **active_pd_names; 141 char **proxy_pd_names; 142 int version; 143 bool need_mem_protection; 144 bool has_alt_reset; 145 bool has_halt_nav; 146 }; 147 148 struct q6v5 { 149 struct device *dev; 150 struct rproc *rproc; 151 152 void __iomem *reg_base; 153 void __iomem *rmb_base; 154 155 struct regmap *halt_map; 156 struct regmap *halt_nav_map; 157 struct regmap *conn_map; 158 159 u32 halt_q6; 160 u32 halt_modem; 161 u32 halt_nc; 162 u32 halt_nav; 163 u32 conn_box; 164 165 struct reset_control *mss_restart; 166 struct reset_control *pdc_reset; 167 168 struct qcom_q6v5 q6v5; 169 170 struct clk *active_clks[8]; 171 struct clk *reset_clks[4]; 172 struct clk *proxy_clks[4]; 173 struct device *active_pds[1]; 174 struct device *proxy_pds[3]; 175 int active_clk_count; 176 int reset_clk_count; 177 int proxy_clk_count; 178 int active_pd_count; 179 int proxy_pd_count; 180 181 struct reg_info active_regs[1]; 182 struct reg_info proxy_regs[3]; 183 int active_reg_count; 184 int proxy_reg_count; 185 186 bool running; 187 188 bool dump_mba_loaded; 189 unsigned long dump_segment_mask; 190 unsigned long dump_complete_mask; 191 192 phys_addr_t mba_phys; 193 void *mba_region; 194 size_t mba_size; 195 196 phys_addr_t mpss_phys; 197 phys_addr_t mpss_reloc; 198 void *mpss_region; 199 size_t mpss_size; 200 201 struct qcom_rproc_glink glink_subdev; 202 struct qcom_rproc_subdev smd_subdev; 203 struct qcom_rproc_ssr ssr_subdev; 204 struct qcom_sysmon *sysmon; 205 bool need_mem_protection; 206 bool has_alt_reset; 207 bool has_halt_nav; 208 int mpss_perm; 209 int mba_perm; 210 const char *hexagon_mdt_image; 211 int version; 212 }; 213 214 enum { 215 MSS_MSM8916, 216 MSS_MSM8974, 217 MSS_MSM8996, 218 MSS_MSM8998, 219 MSS_SC7180, 220 MSS_SDM845, 221 }; 222 223 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 224 const struct qcom_mss_reg_res *reg_res) 225 { 226 int rc; 227 int i; 228 229 if (!reg_res) 230 return 0; 231 232 for (i = 0; reg_res[i].supply; i++) { 233 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 234 if (IS_ERR(regs[i].reg)) { 235 rc = PTR_ERR(regs[i].reg); 236 if (rc != -EPROBE_DEFER) 237 dev_err(dev, "Failed to get %s\n regulator", 238 reg_res[i].supply); 239 return rc; 240 } 241 242 regs[i].uV = reg_res[i].uV; 243 regs[i].uA = reg_res[i].uA; 244 } 245 246 return i; 247 } 248 249 static int q6v5_regulator_enable(struct q6v5 *qproc, 250 struct reg_info *regs, int count) 251 { 252 int ret; 253 int i; 254 255 for (i = 0; i < count; i++) { 256 if (regs[i].uV > 0) { 257 ret = regulator_set_voltage(regs[i].reg, 258 regs[i].uV, INT_MAX); 259 if (ret) { 260 dev_err(qproc->dev, 261 "Failed to request voltage for %d.\n", 262 i); 263 goto err; 264 } 265 } 266 267 if (regs[i].uA > 0) { 268 ret = regulator_set_load(regs[i].reg, 269 regs[i].uA); 270 if (ret < 0) { 271 dev_err(qproc->dev, 272 "Failed to set regulator mode\n"); 273 goto err; 274 } 275 } 276 277 ret = regulator_enable(regs[i].reg); 278 if (ret) { 279 dev_err(qproc->dev, "Regulator enable failed\n"); 280 goto err; 281 } 282 } 283 284 return 0; 285 err: 286 for (; i >= 0; i--) { 287 if (regs[i].uV > 0) 288 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 289 290 if (regs[i].uA > 0) 291 regulator_set_load(regs[i].reg, 0); 292 293 regulator_disable(regs[i].reg); 294 } 295 296 return ret; 297 } 298 299 static void q6v5_regulator_disable(struct q6v5 *qproc, 300 struct reg_info *regs, int count) 301 { 302 int i; 303 304 for (i = 0; i < count; i++) { 305 if (regs[i].uV > 0) 306 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 307 308 if (regs[i].uA > 0) 309 regulator_set_load(regs[i].reg, 0); 310 311 regulator_disable(regs[i].reg); 312 } 313 } 314 315 static int q6v5_clk_enable(struct device *dev, 316 struct clk **clks, int count) 317 { 318 int rc; 319 int i; 320 321 for (i = 0; i < count; i++) { 322 rc = clk_prepare_enable(clks[i]); 323 if (rc) { 324 dev_err(dev, "Clock enable failed\n"); 325 goto err; 326 } 327 } 328 329 return 0; 330 err: 331 for (i--; i >= 0; i--) 332 clk_disable_unprepare(clks[i]); 333 334 return rc; 335 } 336 337 static void q6v5_clk_disable(struct device *dev, 338 struct clk **clks, int count) 339 { 340 int i; 341 342 for (i = 0; i < count; i++) 343 clk_disable_unprepare(clks[i]); 344 } 345 346 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 347 size_t pd_count) 348 { 349 int ret; 350 int i; 351 352 for (i = 0; i < pd_count; i++) { 353 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 354 ret = pm_runtime_get_sync(pds[i]); 355 if (ret < 0) 356 goto unroll_pd_votes; 357 } 358 359 return 0; 360 361 unroll_pd_votes: 362 for (i--; i >= 0; i--) { 363 dev_pm_genpd_set_performance_state(pds[i], 0); 364 pm_runtime_put(pds[i]); 365 } 366 367 return ret; 368 }; 369 370 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 371 size_t pd_count) 372 { 373 int i; 374 375 for (i = 0; i < pd_count; i++) { 376 dev_pm_genpd_set_performance_state(pds[i], 0); 377 pm_runtime_put(pds[i]); 378 } 379 } 380 381 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 382 bool local, bool remote, phys_addr_t addr, 383 size_t size) 384 { 385 struct qcom_scm_vmperm next[2]; 386 int perms = 0; 387 388 if (!qproc->need_mem_protection) 389 return 0; 390 391 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 392 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 393 return 0; 394 395 if (local) { 396 next[perms].vmid = QCOM_SCM_VMID_HLOS; 397 next[perms].perm = QCOM_SCM_PERM_RWX; 398 perms++; 399 } 400 401 if (remote) { 402 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 403 next[perms].perm = QCOM_SCM_PERM_RW; 404 perms++; 405 } 406 407 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 408 current_perm, next, perms); 409 } 410 411 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 412 { 413 struct q6v5 *qproc = rproc->priv; 414 415 memcpy(qproc->mba_region, fw->data, fw->size); 416 417 return 0; 418 } 419 420 static int q6v5_reset_assert(struct q6v5 *qproc) 421 { 422 int ret; 423 424 if (qproc->has_alt_reset) { 425 reset_control_assert(qproc->pdc_reset); 426 ret = reset_control_reset(qproc->mss_restart); 427 reset_control_deassert(qproc->pdc_reset); 428 } else if (qproc->has_halt_nav) { 429 /* 430 * When the AXI pipeline is being reset with the Q6 modem partly 431 * operational there is possibility of AXI valid signal to 432 * glitch, leading to spurious transactions and Q6 hangs. A work 433 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 434 * BIT before triggering Q6 MSS reset. Both the HALTREQ and 435 * AXI_GATING_VALID_OVERRIDE are withdrawn post MSS assert 436 * followed by a MSS deassert, while holding the PDC reset. 437 */ 438 reset_control_assert(qproc->pdc_reset); 439 regmap_update_bits(qproc->conn_map, qproc->conn_box, 440 AXI_GATING_VALID_OVERRIDE, 1); 441 regmap_update_bits(qproc->halt_nav_map, qproc->halt_nav, 442 NAV_AXI_HALTREQ_BIT, 0); 443 reset_control_assert(qproc->mss_restart); 444 reset_control_deassert(qproc->pdc_reset); 445 regmap_update_bits(qproc->conn_map, qproc->conn_box, 446 AXI_GATING_VALID_OVERRIDE, 0); 447 ret = reset_control_deassert(qproc->mss_restart); 448 } else { 449 ret = reset_control_assert(qproc->mss_restart); 450 } 451 452 return ret; 453 } 454 455 static int q6v5_reset_deassert(struct q6v5 *qproc) 456 { 457 int ret; 458 459 if (qproc->has_alt_reset) { 460 reset_control_assert(qproc->pdc_reset); 461 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 462 ret = reset_control_reset(qproc->mss_restart); 463 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 464 reset_control_deassert(qproc->pdc_reset); 465 } else if (qproc->has_halt_nav) { 466 ret = reset_control_reset(qproc->mss_restart); 467 } else { 468 ret = reset_control_deassert(qproc->mss_restart); 469 } 470 471 return ret; 472 } 473 474 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 475 { 476 unsigned long timeout; 477 s32 val; 478 479 timeout = jiffies + msecs_to_jiffies(ms); 480 for (;;) { 481 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 482 if (val) 483 break; 484 485 if (time_after(jiffies, timeout)) 486 return -ETIMEDOUT; 487 488 msleep(1); 489 } 490 491 return val; 492 } 493 494 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 495 { 496 497 unsigned long timeout; 498 s32 val; 499 500 timeout = jiffies + msecs_to_jiffies(ms); 501 for (;;) { 502 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 503 if (val < 0) 504 break; 505 506 if (!status && val) 507 break; 508 else if (status && val == status) 509 break; 510 511 if (time_after(jiffies, timeout)) 512 return -ETIMEDOUT; 513 514 msleep(1); 515 } 516 517 return val; 518 } 519 520 static int q6v5proc_reset(struct q6v5 *qproc) 521 { 522 u32 val; 523 int ret; 524 int i; 525 526 if (qproc->version == MSS_SDM845) { 527 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 528 val |= Q6SS_CBCR_CLKEN; 529 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 530 531 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 532 val, !(val & Q6SS_CBCR_CLKOFF), 1, 533 Q6SS_CBCR_TIMEOUT_US); 534 if (ret) { 535 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 536 return -ETIMEDOUT; 537 } 538 539 /* De-assert QDSP6 stop core */ 540 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 541 /* Trigger boot FSM */ 542 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 543 544 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 545 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 546 if (ret) { 547 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 548 /* Reset the modem so that boot FSM is in reset state */ 549 q6v5_reset_deassert(qproc); 550 return ret; 551 } 552 553 goto pbl_wait; 554 } else if (qproc->version == MSS_SC7180) { 555 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 556 val |= Q6SS_CBCR_CLKEN; 557 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 558 559 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 560 val, !(val & Q6SS_CBCR_CLKOFF), 1, 561 Q6SS_CBCR_TIMEOUT_US); 562 if (ret) { 563 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 564 return -ETIMEDOUT; 565 } 566 567 /* Turn on the XO clock needed for PLL setup */ 568 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 569 val |= Q6SS_CBCR_CLKEN; 570 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 571 572 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 573 val, !(val & Q6SS_CBCR_CLKOFF), 1, 574 Q6SS_CBCR_TIMEOUT_US); 575 if (ret) { 576 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 577 return -ETIMEDOUT; 578 } 579 580 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 581 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 582 val |= Q6SS_CBCR_CLKEN; 583 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 584 585 /* De-assert the Q6 stop core signal */ 586 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 587 588 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 589 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 590 591 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */ 592 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS, 593 val, (val & BIT(0)) != 0, 1, 594 BOOT_STATUS_TIMEOUT_US); 595 if (ret) { 596 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 597 /* Reset the modem so that boot FSM is in reset state */ 598 q6v5_reset_deassert(qproc); 599 return ret; 600 } 601 goto pbl_wait; 602 } else if (qproc->version == MSS_MSM8996 || 603 qproc->version == MSS_MSM8998) { 604 int mem_pwr_ctl; 605 606 /* Override the ACC value if required */ 607 writel(QDSP6SS_ACC_OVERRIDE_VAL, 608 qproc->reg_base + QDSP6SS_STRAP_ACC); 609 610 /* Assert resets, stop core */ 611 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 612 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 613 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 614 615 /* BHS require xo cbcr to be enabled */ 616 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 617 val |= Q6SS_CBCR_CLKEN; 618 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 619 620 /* Read CLKOFF bit to go low indicating CLK is enabled */ 621 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 622 val, !(val & Q6SS_CBCR_CLKOFF), 1, 623 Q6SS_CBCR_TIMEOUT_US); 624 if (ret) { 625 dev_err(qproc->dev, 626 "xo cbcr enabling timed out (rc:%d)\n", ret); 627 return ret; 628 } 629 /* Enable power block headswitch and wait for it to stabilize */ 630 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 631 val |= QDSP6v56_BHS_ON; 632 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 633 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 634 udelay(1); 635 636 /* Put LDO in bypass mode */ 637 val |= QDSP6v56_LDO_BYP; 638 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 639 640 /* Deassert QDSP6 compiler memory clamp */ 641 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 642 val &= ~QDSP6v56_CLAMP_QMC_MEM; 643 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 644 645 /* Deassert memory peripheral sleep and L2 memory standby */ 646 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 647 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 648 649 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 650 if (qproc->version == MSS_MSM8996) { 651 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 652 i = 19; 653 } else { 654 /* MSS_MSM8998 */ 655 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 656 i = 28; 657 } 658 val = readl(qproc->reg_base + mem_pwr_ctl); 659 for (; i >= 0; i--) { 660 val |= BIT(i); 661 writel(val, qproc->reg_base + mem_pwr_ctl); 662 /* 663 * Read back value to ensure the write is done then 664 * wait for 1us for both memory peripheral and data 665 * array to turn on. 666 */ 667 val |= readl(qproc->reg_base + mem_pwr_ctl); 668 udelay(1); 669 } 670 /* Remove word line clamp */ 671 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 672 val &= ~QDSP6v56_CLAMP_WL; 673 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 674 } else { 675 /* Assert resets, stop core */ 676 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 677 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 678 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 679 680 /* Enable power block headswitch and wait for it to stabilize */ 681 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 682 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 683 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 684 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 685 udelay(1); 686 /* 687 * Turn on memories. L2 banks should be done individually 688 * to minimize inrush current. 689 */ 690 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 691 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 692 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 694 val |= Q6SS_L2DATA_SLP_NRET_N_2; 695 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 696 val |= Q6SS_L2DATA_SLP_NRET_N_1; 697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 698 val |= Q6SS_L2DATA_SLP_NRET_N_0; 699 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 700 } 701 /* Remove IO clamp */ 702 val &= ~Q6SS_CLAMP_IO; 703 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 704 705 /* Bring core out of reset */ 706 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 707 val &= ~Q6SS_CORE_ARES; 708 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 709 710 /* Turn on core clock */ 711 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 712 val |= Q6SS_CLK_ENABLE; 713 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 714 715 /* Start core execution */ 716 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 717 val &= ~Q6SS_STOP_CORE; 718 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 719 720 pbl_wait: 721 /* Wait for PBL status */ 722 ret = q6v5_rmb_pbl_wait(qproc, 1000); 723 if (ret == -ETIMEDOUT) { 724 dev_err(qproc->dev, "PBL boot timed out\n"); 725 } else if (ret != RMB_PBL_SUCCESS) { 726 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 727 ret = -EINVAL; 728 } else { 729 ret = 0; 730 } 731 732 return ret; 733 } 734 735 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 736 struct regmap *halt_map, 737 u32 offset) 738 { 739 unsigned int val; 740 int ret; 741 742 /* Check if we're already idle */ 743 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 744 if (!ret && val) 745 return; 746 747 /* Assert halt request */ 748 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 749 750 /* Wait for halt */ 751 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 752 val, 1000, HALT_ACK_TIMEOUT_US); 753 754 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 755 if (ret || !val) 756 dev_err(qproc->dev, "port failed halt\n"); 757 758 /* Clear halt request (port will remain halted until reset) */ 759 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 760 } 761 762 static void q6v5proc_halt_nav_axi_port(struct q6v5 *qproc, 763 struct regmap *halt_map, 764 u32 offset) 765 { 766 unsigned int val; 767 int ret; 768 769 /* Check if we're already idle */ 770 ret = regmap_read(halt_map, offset, &val); 771 if (!ret && (val & NAV_AXI_IDLE_BIT)) 772 return; 773 774 /* Assert halt request */ 775 regmap_update_bits(halt_map, offset, NAV_AXI_HALTREQ_BIT, 776 NAV_AXI_HALTREQ_BIT); 777 778 /* Wait for halt ack*/ 779 regmap_read_poll_timeout(halt_map, offset, val, 780 (val & NAV_AXI_HALTACK_BIT), 781 5, NAV_HALT_ACK_TIMEOUT_US); 782 783 ret = regmap_read(halt_map, offset, &val); 784 if (ret || !(val & NAV_AXI_IDLE_BIT)) 785 dev_err(qproc->dev, "port failed halt\n"); 786 } 787 788 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 789 { 790 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 791 dma_addr_t phys; 792 void *metadata; 793 int mdata_perm; 794 int xferop_ret; 795 size_t size; 796 void *ptr; 797 int ret; 798 799 metadata = qcom_mdt_read_metadata(fw, &size); 800 if (IS_ERR(metadata)) 801 return PTR_ERR(metadata); 802 803 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 804 if (!ptr) { 805 kfree(metadata); 806 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 807 return -ENOMEM; 808 } 809 810 memcpy(ptr, metadata, size); 811 812 /* Hypervisor mapping to access metadata by modem */ 813 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 814 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 815 phys, size); 816 if (ret) { 817 dev_err(qproc->dev, 818 "assigning Q6 access to metadata failed: %d\n", ret); 819 ret = -EAGAIN; 820 goto free_dma_attrs; 821 } 822 823 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 824 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 825 826 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 827 if (ret == -ETIMEDOUT) 828 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 829 else if (ret < 0) 830 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 831 832 /* Metadata authentication done, remove modem access */ 833 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 834 phys, size); 835 if (xferop_ret) 836 dev_warn(qproc->dev, 837 "mdt buffer not reclaimed system may become unstable\n"); 838 839 free_dma_attrs: 840 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 841 kfree(metadata); 842 843 return ret < 0 ? ret : 0; 844 } 845 846 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 847 { 848 if (phdr->p_type != PT_LOAD) 849 return false; 850 851 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 852 return false; 853 854 if (!phdr->p_memsz) 855 return false; 856 857 return true; 858 } 859 860 static int q6v5_mba_load(struct q6v5 *qproc) 861 { 862 int ret; 863 int xfermemop_ret; 864 865 qcom_q6v5_prepare(&qproc->q6v5); 866 867 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 868 if (ret < 0) { 869 dev_err(qproc->dev, "failed to enable active power domains\n"); 870 goto disable_irqs; 871 } 872 873 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 874 if (ret < 0) { 875 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 876 goto disable_active_pds; 877 } 878 879 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 880 qproc->proxy_reg_count); 881 if (ret) { 882 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 883 goto disable_proxy_pds; 884 } 885 886 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 887 qproc->proxy_clk_count); 888 if (ret) { 889 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 890 goto disable_proxy_reg; 891 } 892 893 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 894 qproc->active_reg_count); 895 if (ret) { 896 dev_err(qproc->dev, "failed to enable supplies\n"); 897 goto disable_proxy_clk; 898 } 899 900 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 901 qproc->reset_clk_count); 902 if (ret) { 903 dev_err(qproc->dev, "failed to enable reset clocks\n"); 904 goto disable_vdd; 905 } 906 907 ret = q6v5_reset_deassert(qproc); 908 if (ret) { 909 dev_err(qproc->dev, "failed to deassert mss restart\n"); 910 goto disable_reset_clks; 911 } 912 913 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 914 qproc->active_clk_count); 915 if (ret) { 916 dev_err(qproc->dev, "failed to enable clocks\n"); 917 goto assert_reset; 918 } 919 920 /* Assign MBA image access in DDR to q6 */ 921 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 922 qproc->mba_phys, qproc->mba_size); 923 if (ret) { 924 dev_err(qproc->dev, 925 "assigning Q6 access to mba memory failed: %d\n", ret); 926 goto disable_active_clks; 927 } 928 929 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 930 931 ret = q6v5proc_reset(qproc); 932 if (ret) 933 goto reclaim_mba; 934 935 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 936 if (ret == -ETIMEDOUT) { 937 dev_err(qproc->dev, "MBA boot timed out\n"); 938 goto halt_axi_ports; 939 } else if (ret != RMB_MBA_XPU_UNLOCKED && 940 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 941 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 942 ret = -EINVAL; 943 goto halt_axi_ports; 944 } 945 946 qproc->dump_mba_loaded = true; 947 return 0; 948 949 halt_axi_ports: 950 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 951 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 952 if (qproc->has_halt_nav) 953 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 954 qproc->halt_nav); 955 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 956 957 reclaim_mba: 958 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 959 false, qproc->mba_phys, 960 qproc->mba_size); 961 if (xfermemop_ret) { 962 dev_err(qproc->dev, 963 "Failed to reclaim mba buffer, system may become unstable\n"); 964 } 965 966 disable_active_clks: 967 q6v5_clk_disable(qproc->dev, qproc->active_clks, 968 qproc->active_clk_count); 969 assert_reset: 970 q6v5_reset_assert(qproc); 971 disable_reset_clks: 972 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 973 qproc->reset_clk_count); 974 disable_vdd: 975 q6v5_regulator_disable(qproc, qproc->active_regs, 976 qproc->active_reg_count); 977 disable_proxy_clk: 978 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 979 qproc->proxy_clk_count); 980 disable_proxy_reg: 981 q6v5_regulator_disable(qproc, qproc->proxy_regs, 982 qproc->proxy_reg_count); 983 disable_proxy_pds: 984 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 985 disable_active_pds: 986 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 987 disable_irqs: 988 qcom_q6v5_unprepare(&qproc->q6v5); 989 990 return ret; 991 } 992 993 static void q6v5_mba_reclaim(struct q6v5 *qproc) 994 { 995 int ret; 996 u32 val; 997 998 qproc->dump_mba_loaded = false; 999 1000 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 1001 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 1002 if (qproc->has_halt_nav) 1003 q6v5proc_halt_nav_axi_port(qproc, qproc->halt_nav_map, 1004 qproc->halt_nav); 1005 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 1006 if (qproc->version == MSS_MSM8996) { 1007 /* 1008 * To avoid high MX current during LPASS/MSS restart. 1009 */ 1010 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1011 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 1012 QDSP6v56_CLAMP_QMC_MEM; 1013 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 1014 } 1015 1016 q6v5_reset_assert(qproc); 1017 1018 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 1019 qproc->reset_clk_count); 1020 q6v5_clk_disable(qproc->dev, qproc->active_clks, 1021 qproc->active_clk_count); 1022 q6v5_regulator_disable(qproc, qproc->active_regs, 1023 qproc->active_reg_count); 1024 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 1025 1026 /* In case of failure or coredump scenario where reclaiming MBA memory 1027 * could not happen reclaim it here. 1028 */ 1029 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 1030 qproc->mba_phys, 1031 qproc->mba_size); 1032 WARN_ON(ret); 1033 1034 ret = qcom_q6v5_unprepare(&qproc->q6v5); 1035 if (ret) { 1036 q6v5_pds_disable(qproc, qproc->proxy_pds, 1037 qproc->proxy_pd_count); 1038 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1039 qproc->proxy_clk_count); 1040 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1041 qproc->proxy_reg_count); 1042 } 1043 } 1044 1045 static int q6v5_reload_mba(struct rproc *rproc) 1046 { 1047 struct q6v5 *qproc = rproc->priv; 1048 const struct firmware *fw; 1049 int ret; 1050 1051 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1052 if (ret < 0) 1053 return ret; 1054 1055 q6v5_load(rproc, fw); 1056 ret = q6v5_mba_load(qproc); 1057 release_firmware(fw); 1058 1059 return ret; 1060 } 1061 1062 static int q6v5_mpss_load(struct q6v5 *qproc) 1063 { 1064 const struct elf32_phdr *phdrs; 1065 const struct elf32_phdr *phdr; 1066 const struct firmware *seg_fw; 1067 const struct firmware *fw; 1068 struct elf32_hdr *ehdr; 1069 phys_addr_t mpss_reloc; 1070 phys_addr_t boot_addr; 1071 phys_addr_t min_addr = PHYS_ADDR_MAX; 1072 phys_addr_t max_addr = 0; 1073 u32 code_length; 1074 bool relocate = false; 1075 char *fw_name; 1076 size_t fw_name_len; 1077 ssize_t offset; 1078 size_t size = 0; 1079 void *ptr; 1080 int ret; 1081 int i; 1082 1083 fw_name_len = strlen(qproc->hexagon_mdt_image); 1084 if (fw_name_len <= 4) 1085 return -EINVAL; 1086 1087 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1088 if (!fw_name) 1089 return -ENOMEM; 1090 1091 ret = request_firmware(&fw, fw_name, qproc->dev); 1092 if (ret < 0) { 1093 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1094 goto out; 1095 } 1096 1097 /* Initialize the RMB validator */ 1098 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1099 1100 ret = q6v5_mpss_init_image(qproc, fw); 1101 if (ret) 1102 goto release_firmware; 1103 1104 ehdr = (struct elf32_hdr *)fw->data; 1105 phdrs = (struct elf32_phdr *)(ehdr + 1); 1106 1107 for (i = 0; i < ehdr->e_phnum; i++) { 1108 phdr = &phdrs[i]; 1109 1110 if (!q6v5_phdr_valid(phdr)) 1111 continue; 1112 1113 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1114 relocate = true; 1115 1116 if (phdr->p_paddr < min_addr) 1117 min_addr = phdr->p_paddr; 1118 1119 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1120 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1121 } 1122 1123 /** 1124 * In case of a modem subsystem restart on secure devices, the modem 1125 * memory can be reclaimed only after MBA is loaded. For modem cold 1126 * boot this will be a nop 1127 */ 1128 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1129 qproc->mpss_phys, qproc->mpss_size); 1130 1131 /* Share ownership between Linux and MSS, during segment loading */ 1132 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1133 qproc->mpss_phys, qproc->mpss_size); 1134 if (ret) { 1135 dev_err(qproc->dev, 1136 "assigning Q6 access to mpss memory failed: %d\n", ret); 1137 ret = -EAGAIN; 1138 goto release_firmware; 1139 } 1140 1141 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1142 qproc->mpss_reloc = mpss_reloc; 1143 /* Load firmware segments */ 1144 for (i = 0; i < ehdr->e_phnum; i++) { 1145 phdr = &phdrs[i]; 1146 1147 if (!q6v5_phdr_valid(phdr)) 1148 continue; 1149 1150 offset = phdr->p_paddr - mpss_reloc; 1151 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1152 dev_err(qproc->dev, "segment outside memory range\n"); 1153 ret = -EINVAL; 1154 goto release_firmware; 1155 } 1156 1157 ptr = qproc->mpss_region + offset; 1158 1159 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1160 /* Firmware is large enough to be non-split */ 1161 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1162 dev_err(qproc->dev, 1163 "failed to load segment %d from truncated file %s\n", 1164 i, fw_name); 1165 ret = -EINVAL; 1166 goto release_firmware; 1167 } 1168 1169 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1170 } else if (phdr->p_filesz) { 1171 /* Replace "xxx.xxx" with "xxx.bxx" */ 1172 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1173 ret = request_firmware(&seg_fw, fw_name, qproc->dev); 1174 if (ret) { 1175 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1176 goto release_firmware; 1177 } 1178 1179 memcpy(ptr, seg_fw->data, seg_fw->size); 1180 1181 release_firmware(seg_fw); 1182 } 1183 1184 if (phdr->p_memsz > phdr->p_filesz) { 1185 memset(ptr + phdr->p_filesz, 0, 1186 phdr->p_memsz - phdr->p_filesz); 1187 } 1188 size += phdr->p_memsz; 1189 1190 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1191 if (!code_length) { 1192 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1193 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1194 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1195 } 1196 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1197 1198 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1199 if (ret < 0) { 1200 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1201 ret); 1202 goto release_firmware; 1203 } 1204 } 1205 1206 /* Transfer ownership of modem ddr region to q6 */ 1207 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1208 qproc->mpss_phys, qproc->mpss_size); 1209 if (ret) { 1210 dev_err(qproc->dev, 1211 "assigning Q6 access to mpss memory failed: %d\n", ret); 1212 ret = -EAGAIN; 1213 goto release_firmware; 1214 } 1215 1216 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1217 if (ret == -ETIMEDOUT) 1218 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1219 else if (ret < 0) 1220 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1221 1222 release_firmware: 1223 release_firmware(fw); 1224 out: 1225 kfree(fw_name); 1226 1227 return ret < 0 ? ret : 0; 1228 } 1229 1230 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1231 struct rproc_dump_segment *segment, 1232 void *dest) 1233 { 1234 int ret = 0; 1235 struct q6v5 *qproc = rproc->priv; 1236 unsigned long mask = BIT((unsigned long)segment->priv); 1237 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); 1238 1239 /* Unlock mba before copying segments */ 1240 if (!qproc->dump_mba_loaded) { 1241 ret = q6v5_reload_mba(rproc); 1242 if (!ret) { 1243 /* Reset ownership back to Linux to copy segments */ 1244 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1245 true, false, 1246 qproc->mpss_phys, 1247 qproc->mpss_size); 1248 } 1249 } 1250 1251 if (!ptr || ret) 1252 memset(dest, 0xff, segment->size); 1253 else 1254 memcpy(dest, ptr, segment->size); 1255 1256 qproc->dump_segment_mask |= mask; 1257 1258 /* Reclaim mba after copying segments */ 1259 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 1260 if (qproc->dump_mba_loaded) { 1261 /* Try to reset ownership back to Q6 */ 1262 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1263 false, true, 1264 qproc->mpss_phys, 1265 qproc->mpss_size); 1266 q6v5_mba_reclaim(qproc); 1267 } 1268 } 1269 } 1270 1271 static int q6v5_start(struct rproc *rproc) 1272 { 1273 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1274 int xfermemop_ret; 1275 int ret; 1276 1277 ret = q6v5_mba_load(qproc); 1278 if (ret) 1279 return ret; 1280 1281 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1282 1283 ret = q6v5_mpss_load(qproc); 1284 if (ret) 1285 goto reclaim_mpss; 1286 1287 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1288 if (ret == -ETIMEDOUT) { 1289 dev_err(qproc->dev, "start timed out\n"); 1290 goto reclaim_mpss; 1291 } 1292 1293 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1294 false, qproc->mba_phys, 1295 qproc->mba_size); 1296 if (xfermemop_ret) 1297 dev_err(qproc->dev, 1298 "Failed to reclaim mba buffer system may become unstable\n"); 1299 1300 /* Reset Dump Segment Mask */ 1301 qproc->dump_segment_mask = 0; 1302 qproc->running = true; 1303 1304 return 0; 1305 1306 reclaim_mpss: 1307 q6v5_mba_reclaim(qproc); 1308 1309 return ret; 1310 } 1311 1312 static int q6v5_stop(struct rproc *rproc) 1313 { 1314 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1315 int ret; 1316 1317 qproc->running = false; 1318 1319 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1320 if (ret == -ETIMEDOUT) 1321 dev_err(qproc->dev, "timed out on wait\n"); 1322 1323 q6v5_mba_reclaim(qproc); 1324 1325 return 0; 1326 } 1327 1328 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, size_t len) 1329 { 1330 struct q6v5 *qproc = rproc->priv; 1331 int offset; 1332 1333 offset = da - qproc->mpss_reloc; 1334 if (offset < 0 || offset + len > qproc->mpss_size) 1335 return NULL; 1336 1337 return qproc->mpss_region + offset; 1338 } 1339 1340 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1341 const struct firmware *mba_fw) 1342 { 1343 const struct firmware *fw; 1344 const struct elf32_phdr *phdrs; 1345 const struct elf32_phdr *phdr; 1346 const struct elf32_hdr *ehdr; 1347 struct q6v5 *qproc = rproc->priv; 1348 unsigned long i; 1349 int ret; 1350 1351 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1352 if (ret < 0) { 1353 dev_err(qproc->dev, "unable to load %s\n", 1354 qproc->hexagon_mdt_image); 1355 return ret; 1356 } 1357 1358 ehdr = (struct elf32_hdr *)fw->data; 1359 phdrs = (struct elf32_phdr *)(ehdr + 1); 1360 qproc->dump_complete_mask = 0; 1361 1362 for (i = 0; i < ehdr->e_phnum; i++) { 1363 phdr = &phdrs[i]; 1364 1365 if (!q6v5_phdr_valid(phdr)) 1366 continue; 1367 1368 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1369 phdr->p_memsz, 1370 qcom_q6v5_dump_segment, 1371 (void *)i); 1372 if (ret) 1373 break; 1374 1375 qproc->dump_complete_mask |= BIT(i); 1376 } 1377 1378 release_firmware(fw); 1379 return ret; 1380 } 1381 1382 static const struct rproc_ops q6v5_ops = { 1383 .start = q6v5_start, 1384 .stop = q6v5_stop, 1385 .da_to_va = q6v5_da_to_va, 1386 .parse_fw = qcom_q6v5_register_dump_segments, 1387 .load = q6v5_load, 1388 }; 1389 1390 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1391 { 1392 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1393 1394 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1395 qproc->proxy_clk_count); 1396 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1397 qproc->proxy_reg_count); 1398 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1399 } 1400 1401 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1402 { 1403 struct of_phandle_args args; 1404 struct resource *res; 1405 int ret; 1406 1407 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1408 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1409 if (IS_ERR(qproc->reg_base)) 1410 return PTR_ERR(qproc->reg_base); 1411 1412 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1413 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1414 if (IS_ERR(qproc->rmb_base)) 1415 return PTR_ERR(qproc->rmb_base); 1416 1417 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1418 "qcom,halt-regs", 3, 0, &args); 1419 if (ret < 0) { 1420 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1421 return -EINVAL; 1422 } 1423 1424 qproc->halt_map = syscon_node_to_regmap(args.np); 1425 of_node_put(args.np); 1426 if (IS_ERR(qproc->halt_map)) 1427 return PTR_ERR(qproc->halt_map); 1428 1429 qproc->halt_q6 = args.args[0]; 1430 qproc->halt_modem = args.args[1]; 1431 qproc->halt_nc = args.args[2]; 1432 1433 if (qproc->has_halt_nav) { 1434 struct platform_device *nav_pdev; 1435 1436 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1437 "qcom,halt-nav-regs", 1438 1, 0, &args); 1439 if (ret < 0) { 1440 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1441 return -EINVAL; 1442 } 1443 1444 nav_pdev = of_find_device_by_node(args.np); 1445 of_node_put(args.np); 1446 if (!nav_pdev) { 1447 dev_err(&pdev->dev, "failed to get mss clock device\n"); 1448 return -EPROBE_DEFER; 1449 } 1450 1451 qproc->halt_nav_map = dev_get_regmap(&nav_pdev->dev, NULL); 1452 if (!qproc->halt_nav_map) { 1453 dev_err(&pdev->dev, "failed to get map from device\n"); 1454 return -EINVAL; 1455 } 1456 qproc->halt_nav = args.args[0]; 1457 1458 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1459 "qcom,halt-nav-regs", 1460 1, 1, &args); 1461 if (ret < 0) { 1462 dev_err(&pdev->dev, "failed to parse halt-nav-regs\n"); 1463 return -EINVAL; 1464 } 1465 1466 qproc->conn_map = syscon_node_to_regmap(args.np); 1467 of_node_put(args.np); 1468 if (IS_ERR(qproc->conn_map)) 1469 return PTR_ERR(qproc->conn_map); 1470 1471 qproc->conn_box = args.args[0]; 1472 } 1473 1474 return 0; 1475 } 1476 1477 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1478 char **clk_names) 1479 { 1480 int i; 1481 1482 if (!clk_names) 1483 return 0; 1484 1485 for (i = 0; clk_names[i]; i++) { 1486 clks[i] = devm_clk_get(dev, clk_names[i]); 1487 if (IS_ERR(clks[i])) { 1488 int rc = PTR_ERR(clks[i]); 1489 1490 if (rc != -EPROBE_DEFER) 1491 dev_err(dev, "Failed to get %s clock\n", 1492 clk_names[i]); 1493 return rc; 1494 } 1495 } 1496 1497 return i; 1498 } 1499 1500 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1501 char **pd_names) 1502 { 1503 size_t num_pds = 0; 1504 int ret; 1505 int i; 1506 1507 if (!pd_names) 1508 return 0; 1509 1510 while (pd_names[num_pds]) 1511 num_pds++; 1512 1513 for (i = 0; i < num_pds; i++) { 1514 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1515 if (IS_ERR_OR_NULL(devs[i])) { 1516 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1517 goto unroll_attach; 1518 } 1519 } 1520 1521 return num_pds; 1522 1523 unroll_attach: 1524 for (i--; i >= 0; i--) 1525 dev_pm_domain_detach(devs[i], false); 1526 1527 return ret; 1528 }; 1529 1530 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1531 size_t pd_count) 1532 { 1533 int i; 1534 1535 for (i = 0; i < pd_count; i++) 1536 dev_pm_domain_detach(pds[i], false); 1537 } 1538 1539 static int q6v5_init_reset(struct q6v5 *qproc) 1540 { 1541 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1542 "mss_restart"); 1543 if (IS_ERR(qproc->mss_restart)) { 1544 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1545 return PTR_ERR(qproc->mss_restart); 1546 } 1547 1548 if (qproc->has_alt_reset || qproc->has_halt_nav) { 1549 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1550 "pdc_reset"); 1551 if (IS_ERR(qproc->pdc_reset)) { 1552 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1553 return PTR_ERR(qproc->pdc_reset); 1554 } 1555 } 1556 1557 return 0; 1558 } 1559 1560 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1561 { 1562 struct device_node *child; 1563 struct device_node *node; 1564 struct resource r; 1565 int ret; 1566 1567 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1568 node = of_parse_phandle(child, "memory-region", 0); 1569 ret = of_address_to_resource(node, 0, &r); 1570 if (ret) { 1571 dev_err(qproc->dev, "unable to resolve mba region\n"); 1572 return ret; 1573 } 1574 of_node_put(node); 1575 1576 qproc->mba_phys = r.start; 1577 qproc->mba_size = resource_size(&r); 1578 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1579 if (!qproc->mba_region) { 1580 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1581 &r.start, qproc->mba_size); 1582 return -EBUSY; 1583 } 1584 1585 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1586 node = of_parse_phandle(child, "memory-region", 0); 1587 ret = of_address_to_resource(node, 0, &r); 1588 if (ret) { 1589 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1590 return ret; 1591 } 1592 of_node_put(node); 1593 1594 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1595 qproc->mpss_size = resource_size(&r); 1596 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); 1597 if (!qproc->mpss_region) { 1598 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1599 &r.start, qproc->mpss_size); 1600 return -EBUSY; 1601 } 1602 1603 return 0; 1604 } 1605 1606 static int q6v5_probe(struct platform_device *pdev) 1607 { 1608 const struct rproc_hexagon_res *desc; 1609 struct q6v5 *qproc; 1610 struct rproc *rproc; 1611 const char *mba_image; 1612 int ret; 1613 1614 desc = of_device_get_match_data(&pdev->dev); 1615 if (!desc) 1616 return -EINVAL; 1617 1618 if (desc->need_mem_protection && !qcom_scm_is_available()) 1619 return -EPROBE_DEFER; 1620 1621 mba_image = desc->hexagon_mba_image; 1622 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1623 0, &mba_image); 1624 if (ret < 0 && ret != -EINVAL) 1625 return ret; 1626 1627 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1628 mba_image, sizeof(*qproc)); 1629 if (!rproc) { 1630 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1631 return -ENOMEM; 1632 } 1633 1634 rproc->auto_boot = false; 1635 1636 qproc = (struct q6v5 *)rproc->priv; 1637 qproc->dev = &pdev->dev; 1638 qproc->rproc = rproc; 1639 qproc->hexagon_mdt_image = "modem.mdt"; 1640 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1641 1, &qproc->hexagon_mdt_image); 1642 if (ret < 0 && ret != -EINVAL) 1643 return ret; 1644 1645 platform_set_drvdata(pdev, qproc); 1646 1647 qproc->has_halt_nav = desc->has_halt_nav; 1648 ret = q6v5_init_mem(qproc, pdev); 1649 if (ret) 1650 goto free_rproc; 1651 1652 ret = q6v5_alloc_memory_region(qproc); 1653 if (ret) 1654 goto free_rproc; 1655 1656 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1657 desc->proxy_clk_names); 1658 if (ret < 0) { 1659 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1660 goto free_rproc; 1661 } 1662 qproc->proxy_clk_count = ret; 1663 1664 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1665 desc->reset_clk_names); 1666 if (ret < 0) { 1667 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1668 goto free_rproc; 1669 } 1670 qproc->reset_clk_count = ret; 1671 1672 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1673 desc->active_clk_names); 1674 if (ret < 0) { 1675 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1676 goto free_rproc; 1677 } 1678 qproc->active_clk_count = ret; 1679 1680 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1681 desc->proxy_supply); 1682 if (ret < 0) { 1683 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1684 goto free_rproc; 1685 } 1686 qproc->proxy_reg_count = ret; 1687 1688 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1689 desc->active_supply); 1690 if (ret < 0) { 1691 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1692 goto free_rproc; 1693 } 1694 qproc->active_reg_count = ret; 1695 1696 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1697 desc->active_pd_names); 1698 if (ret < 0) { 1699 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1700 goto free_rproc; 1701 } 1702 qproc->active_pd_count = ret; 1703 1704 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1705 desc->proxy_pd_names); 1706 if (ret < 0) { 1707 dev_err(&pdev->dev, "Failed to init power domains\n"); 1708 goto detach_active_pds; 1709 } 1710 qproc->proxy_pd_count = ret; 1711 1712 qproc->has_alt_reset = desc->has_alt_reset; 1713 ret = q6v5_init_reset(qproc); 1714 if (ret) 1715 goto detach_proxy_pds; 1716 1717 qproc->version = desc->version; 1718 qproc->need_mem_protection = desc->need_mem_protection; 1719 1720 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1721 qcom_msa_handover); 1722 if (ret) 1723 goto detach_proxy_pds; 1724 1725 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1726 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1727 qcom_add_glink_subdev(rproc, &qproc->glink_subdev); 1728 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1729 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1730 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1731 if (IS_ERR(qproc->sysmon)) { 1732 ret = PTR_ERR(qproc->sysmon); 1733 goto detach_proxy_pds; 1734 } 1735 1736 ret = rproc_add(rproc); 1737 if (ret) 1738 goto detach_proxy_pds; 1739 1740 return 0; 1741 1742 detach_proxy_pds: 1743 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1744 detach_active_pds: 1745 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1746 free_rproc: 1747 rproc_free(rproc); 1748 1749 return ret; 1750 } 1751 1752 static int q6v5_remove(struct platform_device *pdev) 1753 { 1754 struct q6v5 *qproc = platform_get_drvdata(pdev); 1755 1756 rproc_del(qproc->rproc); 1757 1758 qcom_remove_sysmon_subdev(qproc->sysmon); 1759 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); 1760 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); 1761 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); 1762 1763 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1764 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1765 1766 rproc_free(qproc->rproc); 1767 1768 return 0; 1769 } 1770 1771 static const struct rproc_hexagon_res sc7180_mss = { 1772 .hexagon_mba_image = "mba.mbn", 1773 .proxy_clk_names = (char*[]){ 1774 "xo", 1775 NULL 1776 }, 1777 .reset_clk_names = (char*[]){ 1778 "iface", 1779 "bus", 1780 "snoc_axi", 1781 NULL 1782 }, 1783 .active_clk_names = (char*[]){ 1784 "mnoc_axi", 1785 "nav", 1786 "mss_nav", 1787 "mss_crypto", 1788 NULL 1789 }, 1790 .active_pd_names = (char*[]){ 1791 "load_state", 1792 NULL 1793 }, 1794 .proxy_pd_names = (char*[]){ 1795 "cx", 1796 "mx", 1797 "mss", 1798 NULL 1799 }, 1800 .need_mem_protection = true, 1801 .has_alt_reset = false, 1802 .has_halt_nav = true, 1803 .version = MSS_SC7180, 1804 }; 1805 1806 static const struct rproc_hexagon_res sdm845_mss = { 1807 .hexagon_mba_image = "mba.mbn", 1808 .proxy_clk_names = (char*[]){ 1809 "xo", 1810 "prng", 1811 NULL 1812 }, 1813 .reset_clk_names = (char*[]){ 1814 "iface", 1815 "snoc_axi", 1816 NULL 1817 }, 1818 .active_clk_names = (char*[]){ 1819 "bus", 1820 "mem", 1821 "gpll0_mss", 1822 "mnoc_axi", 1823 NULL 1824 }, 1825 .active_pd_names = (char*[]){ 1826 "load_state", 1827 NULL 1828 }, 1829 .proxy_pd_names = (char*[]){ 1830 "cx", 1831 "mx", 1832 "mss", 1833 NULL 1834 }, 1835 .need_mem_protection = true, 1836 .has_alt_reset = true, 1837 .has_halt_nav = false, 1838 .version = MSS_SDM845, 1839 }; 1840 1841 static const struct rproc_hexagon_res msm8998_mss = { 1842 .hexagon_mba_image = "mba.mbn", 1843 .proxy_clk_names = (char*[]){ 1844 "xo", 1845 "qdss", 1846 "mem", 1847 NULL 1848 }, 1849 .active_clk_names = (char*[]){ 1850 "iface", 1851 "bus", 1852 "gpll0_mss", 1853 "mnoc_axi", 1854 "snoc_axi", 1855 NULL 1856 }, 1857 .proxy_pd_names = (char*[]){ 1858 "cx", 1859 "mx", 1860 NULL 1861 }, 1862 .need_mem_protection = true, 1863 .has_alt_reset = false, 1864 .has_halt_nav = false, 1865 .version = MSS_MSM8998, 1866 }; 1867 1868 static const struct rproc_hexagon_res msm8996_mss = { 1869 .hexagon_mba_image = "mba.mbn", 1870 .proxy_supply = (struct qcom_mss_reg_res[]) { 1871 { 1872 .supply = "pll", 1873 .uA = 100000, 1874 }, 1875 {} 1876 }, 1877 .proxy_clk_names = (char*[]){ 1878 "xo", 1879 "pnoc", 1880 "qdss", 1881 NULL 1882 }, 1883 .active_clk_names = (char*[]){ 1884 "iface", 1885 "bus", 1886 "mem", 1887 "gpll0_mss", 1888 "snoc_axi", 1889 "mnoc_axi", 1890 NULL 1891 }, 1892 .need_mem_protection = true, 1893 .has_alt_reset = false, 1894 .has_halt_nav = false, 1895 .version = MSS_MSM8996, 1896 }; 1897 1898 static const struct rproc_hexagon_res msm8916_mss = { 1899 .hexagon_mba_image = "mba.mbn", 1900 .proxy_supply = (struct qcom_mss_reg_res[]) { 1901 { 1902 .supply = "mx", 1903 .uV = 1050000, 1904 }, 1905 { 1906 .supply = "cx", 1907 .uA = 100000, 1908 }, 1909 { 1910 .supply = "pll", 1911 .uA = 100000, 1912 }, 1913 {} 1914 }, 1915 .proxy_clk_names = (char*[]){ 1916 "xo", 1917 NULL 1918 }, 1919 .active_clk_names = (char*[]){ 1920 "iface", 1921 "bus", 1922 "mem", 1923 NULL 1924 }, 1925 .need_mem_protection = false, 1926 .has_alt_reset = false, 1927 .has_halt_nav = false, 1928 .version = MSS_MSM8916, 1929 }; 1930 1931 static const struct rproc_hexagon_res msm8974_mss = { 1932 .hexagon_mba_image = "mba.b00", 1933 .proxy_supply = (struct qcom_mss_reg_res[]) { 1934 { 1935 .supply = "mx", 1936 .uV = 1050000, 1937 }, 1938 { 1939 .supply = "cx", 1940 .uA = 100000, 1941 }, 1942 { 1943 .supply = "pll", 1944 .uA = 100000, 1945 }, 1946 {} 1947 }, 1948 .active_supply = (struct qcom_mss_reg_res[]) { 1949 { 1950 .supply = "mss", 1951 .uV = 1050000, 1952 .uA = 100000, 1953 }, 1954 {} 1955 }, 1956 .proxy_clk_names = (char*[]){ 1957 "xo", 1958 NULL 1959 }, 1960 .active_clk_names = (char*[]){ 1961 "iface", 1962 "bus", 1963 "mem", 1964 NULL 1965 }, 1966 .need_mem_protection = false, 1967 .has_alt_reset = false, 1968 .has_halt_nav = false, 1969 .version = MSS_MSM8974, 1970 }; 1971 1972 static const struct of_device_id q6v5_of_match[] = { 1973 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1974 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1975 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1976 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1977 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 1978 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 1979 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1980 { }, 1981 }; 1982 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1983 1984 static struct platform_driver q6v5_driver = { 1985 .probe = q6v5_probe, 1986 .remove = q6v5_remove, 1987 .driver = { 1988 .name = "qcom-q6v5-mss", 1989 .of_match_table = q6v5_of_match, 1990 }, 1991 }; 1992 module_platform_driver(q6v5_driver); 1993 1994 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 1995 MODULE_LICENSE("GPL v2"); 1996