1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm self-authenticating modem subsystem remoteproc driver 4 * 5 * Copyright (C) 2016 Linaro Ltd. 6 * Copyright (C) 2014 Sony Mobile Communications AB 7 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 8 */ 9 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/interrupt.h> 14 #include <linux/kernel.h> 15 #include <linux/mfd/syscon.h> 16 #include <linux/module.h> 17 #include <linux/of_address.h> 18 #include <linux/of_device.h> 19 #include <linux/platform_device.h> 20 #include <linux/pm_domain.h> 21 #include <linux/pm_runtime.h> 22 #include <linux/regmap.h> 23 #include <linux/regulator/consumer.h> 24 #include <linux/remoteproc.h> 25 #include "linux/remoteproc/qcom_q6v5_ipa_notify.h" 26 #include <linux/reset.h> 27 #include <linux/soc/qcom/mdt_loader.h> 28 #include <linux/iopoll.h> 29 30 #include "remoteproc_internal.h" 31 #include "qcom_common.h" 32 #include "qcom_pil_info.h" 33 #include "qcom_q6v5.h" 34 35 #include <linux/qcom_scm.h> 36 37 #define MPSS_CRASH_REASON_SMEM 421 38 39 /* RMB Status Register Values */ 40 #define RMB_PBL_SUCCESS 0x1 41 42 #define RMB_MBA_XPU_UNLOCKED 0x1 43 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 44 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 45 #define RMB_MBA_AUTH_COMPLETE 0x4 46 47 /* PBL/MBA interface registers */ 48 #define RMB_MBA_IMAGE_REG 0x00 49 #define RMB_PBL_STATUS_REG 0x04 50 #define RMB_MBA_COMMAND_REG 0x08 51 #define RMB_MBA_STATUS_REG 0x0C 52 #define RMB_PMI_META_DATA_REG 0x10 53 #define RMB_PMI_CODE_START_REG 0x14 54 #define RMB_PMI_CODE_LENGTH_REG 0x18 55 #define RMB_MBA_MSS_STATUS 0x40 56 #define RMB_MBA_ALT_RESET 0x44 57 58 #define RMB_CMD_META_DATA_READY 0x1 59 #define RMB_CMD_LOAD_READY 0x2 60 61 /* QDSP6SS Register Offsets */ 62 #define QDSP6SS_RESET_REG 0x014 63 #define QDSP6SS_GFMUX_CTL_REG 0x020 64 #define QDSP6SS_PWR_CTL_REG 0x030 65 #define QDSP6SS_MEM_PWR_CTL 0x0B0 66 #define QDSP6V6SS_MEM_PWR_CTL 0x034 67 #define QDSP6SS_STRAP_ACC 0x110 68 69 /* AXI Halt Register Offsets */ 70 #define AXI_HALTREQ_REG 0x0 71 #define AXI_HALTACK_REG 0x4 72 #define AXI_IDLE_REG 0x8 73 #define AXI_GATING_VALID_OVERRIDE BIT(0) 74 75 #define HALT_ACK_TIMEOUT_US 100000 76 77 /* QDSP6SS_RESET */ 78 #define Q6SS_STOP_CORE BIT(0) 79 #define Q6SS_CORE_ARES BIT(1) 80 #define Q6SS_BUS_ARES_ENABLE BIT(2) 81 82 /* QDSP6SS CBCR */ 83 #define Q6SS_CBCR_CLKEN BIT(0) 84 #define Q6SS_CBCR_CLKOFF BIT(31) 85 #define Q6SS_CBCR_TIMEOUT_US 200 86 87 /* QDSP6SS_GFMUX_CTL */ 88 #define Q6SS_CLK_ENABLE BIT(1) 89 90 /* QDSP6SS_PWR_CTL */ 91 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 92 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 93 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 94 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 95 #define Q6SS_ETB_SLP_NRET_N BIT(17) 96 #define Q6SS_L2DATA_STBY_N BIT(18) 97 #define Q6SS_SLP_RET_N BIT(19) 98 #define Q6SS_CLAMP_IO BIT(20) 99 #define QDSS_BHS_ON BIT(21) 100 #define QDSS_LDO_BYP BIT(22) 101 102 /* QDSP6v56 parameters */ 103 #define QDSP6v56_LDO_BYP BIT(25) 104 #define QDSP6v56_BHS_ON BIT(24) 105 #define QDSP6v56_CLAMP_WL BIT(21) 106 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 107 #define QDSP6SS_XO_CBCR 0x0038 108 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 109 110 /* QDSP6v65 parameters */ 111 #define QDSP6SS_CORE_CBCR 0x20 112 #define QDSP6SS_SLEEP 0x3C 113 #define QDSP6SS_BOOT_CORE_START 0x400 114 #define QDSP6SS_BOOT_CMD 0x404 115 #define QDSP6SS_BOOT_STATUS 0x408 116 #define BOOT_STATUS_TIMEOUT_US 200 117 #define BOOT_FSM_TIMEOUT 10000 118 119 struct reg_info { 120 struct regulator *reg; 121 int uV; 122 int uA; 123 }; 124 125 struct qcom_mss_reg_res { 126 const char *supply; 127 int uV; 128 int uA; 129 }; 130 131 struct rproc_hexagon_res { 132 const char *hexagon_mba_image; 133 struct qcom_mss_reg_res *proxy_supply; 134 struct qcom_mss_reg_res *active_supply; 135 char **proxy_clk_names; 136 char **reset_clk_names; 137 char **active_clk_names; 138 char **active_pd_names; 139 char **proxy_pd_names; 140 int version; 141 bool need_mem_protection; 142 bool has_alt_reset; 143 bool has_spare_reg; 144 }; 145 146 struct q6v5 { 147 struct device *dev; 148 struct rproc *rproc; 149 150 void __iomem *reg_base; 151 void __iomem *rmb_base; 152 153 struct regmap *halt_map; 154 struct regmap *conn_map; 155 156 u32 halt_q6; 157 u32 halt_modem; 158 u32 halt_nc; 159 u32 conn_box; 160 161 struct reset_control *mss_restart; 162 struct reset_control *pdc_reset; 163 164 struct qcom_q6v5 q6v5; 165 166 struct clk *active_clks[8]; 167 struct clk *reset_clks[4]; 168 struct clk *proxy_clks[4]; 169 struct device *active_pds[1]; 170 struct device *proxy_pds[3]; 171 int active_clk_count; 172 int reset_clk_count; 173 int proxy_clk_count; 174 int active_pd_count; 175 int proxy_pd_count; 176 177 struct reg_info active_regs[1]; 178 struct reg_info proxy_regs[3]; 179 int active_reg_count; 180 int proxy_reg_count; 181 182 bool running; 183 184 bool dump_mba_loaded; 185 unsigned long dump_segment_mask; 186 unsigned long dump_complete_mask; 187 188 phys_addr_t mba_phys; 189 void *mba_region; 190 size_t mba_size; 191 192 phys_addr_t mpss_phys; 193 phys_addr_t mpss_reloc; 194 size_t mpss_size; 195 196 struct qcom_rproc_glink glink_subdev; 197 struct qcom_rproc_subdev smd_subdev; 198 struct qcom_rproc_ssr ssr_subdev; 199 struct qcom_rproc_ipa_notify ipa_notify_subdev; 200 struct qcom_sysmon *sysmon; 201 bool need_mem_protection; 202 bool has_alt_reset; 203 bool has_spare_reg; 204 int mpss_perm; 205 int mba_perm; 206 const char *hexagon_mdt_image; 207 int version; 208 }; 209 210 enum { 211 MSS_MSM8916, 212 MSS_MSM8974, 213 MSS_MSM8996, 214 MSS_MSM8998, 215 MSS_SC7180, 216 MSS_SDM845, 217 }; 218 219 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 220 const struct qcom_mss_reg_res *reg_res) 221 { 222 int rc; 223 int i; 224 225 if (!reg_res) 226 return 0; 227 228 for (i = 0; reg_res[i].supply; i++) { 229 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 230 if (IS_ERR(regs[i].reg)) { 231 rc = PTR_ERR(regs[i].reg); 232 if (rc != -EPROBE_DEFER) 233 dev_err(dev, "Failed to get %s\n regulator", 234 reg_res[i].supply); 235 return rc; 236 } 237 238 regs[i].uV = reg_res[i].uV; 239 regs[i].uA = reg_res[i].uA; 240 } 241 242 return i; 243 } 244 245 static int q6v5_regulator_enable(struct q6v5 *qproc, 246 struct reg_info *regs, int count) 247 { 248 int ret; 249 int i; 250 251 for (i = 0; i < count; i++) { 252 if (regs[i].uV > 0) { 253 ret = regulator_set_voltage(regs[i].reg, 254 regs[i].uV, INT_MAX); 255 if (ret) { 256 dev_err(qproc->dev, 257 "Failed to request voltage for %d.\n", 258 i); 259 goto err; 260 } 261 } 262 263 if (regs[i].uA > 0) { 264 ret = regulator_set_load(regs[i].reg, 265 regs[i].uA); 266 if (ret < 0) { 267 dev_err(qproc->dev, 268 "Failed to set regulator mode\n"); 269 goto err; 270 } 271 } 272 273 ret = regulator_enable(regs[i].reg); 274 if (ret) { 275 dev_err(qproc->dev, "Regulator enable failed\n"); 276 goto err; 277 } 278 } 279 280 return 0; 281 err: 282 for (; i >= 0; i--) { 283 if (regs[i].uV > 0) 284 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 285 286 if (regs[i].uA > 0) 287 regulator_set_load(regs[i].reg, 0); 288 289 regulator_disable(regs[i].reg); 290 } 291 292 return ret; 293 } 294 295 static void q6v5_regulator_disable(struct q6v5 *qproc, 296 struct reg_info *regs, int count) 297 { 298 int i; 299 300 for (i = 0; i < count; i++) { 301 if (regs[i].uV > 0) 302 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 303 304 if (regs[i].uA > 0) 305 regulator_set_load(regs[i].reg, 0); 306 307 regulator_disable(regs[i].reg); 308 } 309 } 310 311 static int q6v5_clk_enable(struct device *dev, 312 struct clk **clks, int count) 313 { 314 int rc; 315 int i; 316 317 for (i = 0; i < count; i++) { 318 rc = clk_prepare_enable(clks[i]); 319 if (rc) { 320 dev_err(dev, "Clock enable failed\n"); 321 goto err; 322 } 323 } 324 325 return 0; 326 err: 327 for (i--; i >= 0; i--) 328 clk_disable_unprepare(clks[i]); 329 330 return rc; 331 } 332 333 static void q6v5_clk_disable(struct device *dev, 334 struct clk **clks, int count) 335 { 336 int i; 337 338 for (i = 0; i < count; i++) 339 clk_disable_unprepare(clks[i]); 340 } 341 342 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds, 343 size_t pd_count) 344 { 345 int ret; 346 int i; 347 348 for (i = 0; i < pd_count; i++) { 349 dev_pm_genpd_set_performance_state(pds[i], INT_MAX); 350 ret = pm_runtime_get_sync(pds[i]); 351 if (ret < 0) 352 goto unroll_pd_votes; 353 } 354 355 return 0; 356 357 unroll_pd_votes: 358 for (i--; i >= 0; i--) { 359 dev_pm_genpd_set_performance_state(pds[i], 0); 360 pm_runtime_put(pds[i]); 361 } 362 363 return ret; 364 } 365 366 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, 367 size_t pd_count) 368 { 369 int i; 370 371 for (i = 0; i < pd_count; i++) { 372 dev_pm_genpd_set_performance_state(pds[i], 0); 373 pm_runtime_put(pds[i]); 374 } 375 } 376 377 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 378 bool local, bool remote, phys_addr_t addr, 379 size_t size) 380 { 381 struct qcom_scm_vmperm next[2]; 382 int perms = 0; 383 384 if (!qproc->need_mem_protection) 385 return 0; 386 387 if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) && 388 remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA))) 389 return 0; 390 391 if (local) { 392 next[perms].vmid = QCOM_SCM_VMID_HLOS; 393 next[perms].perm = QCOM_SCM_PERM_RWX; 394 perms++; 395 } 396 397 if (remote) { 398 next[perms].vmid = QCOM_SCM_VMID_MSS_MSA; 399 next[perms].perm = QCOM_SCM_PERM_RW; 400 perms++; 401 } 402 403 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 404 current_perm, next, perms); 405 } 406 407 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 408 { 409 struct q6v5 *qproc = rproc->priv; 410 411 memcpy(qproc->mba_region, fw->data, fw->size); 412 413 return 0; 414 } 415 416 static int q6v5_reset_assert(struct q6v5 *qproc) 417 { 418 int ret; 419 420 if (qproc->has_alt_reset) { 421 reset_control_assert(qproc->pdc_reset); 422 ret = reset_control_reset(qproc->mss_restart); 423 reset_control_deassert(qproc->pdc_reset); 424 } else if (qproc->has_spare_reg) { 425 /* 426 * When the AXI pipeline is being reset with the Q6 modem partly 427 * operational there is possibility of AXI valid signal to 428 * glitch, leading to spurious transactions and Q6 hangs. A work 429 * around is employed by asserting the AXI_GATING_VALID_OVERRIDE 430 * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE 431 * is withdrawn post MSS assert followed by a MSS deassert, 432 * while holding the PDC reset. 433 */ 434 reset_control_assert(qproc->pdc_reset); 435 regmap_update_bits(qproc->conn_map, qproc->conn_box, 436 AXI_GATING_VALID_OVERRIDE, 1); 437 reset_control_assert(qproc->mss_restart); 438 reset_control_deassert(qproc->pdc_reset); 439 regmap_update_bits(qproc->conn_map, qproc->conn_box, 440 AXI_GATING_VALID_OVERRIDE, 0); 441 ret = reset_control_deassert(qproc->mss_restart); 442 } else { 443 ret = reset_control_assert(qproc->mss_restart); 444 } 445 446 return ret; 447 } 448 449 static int q6v5_reset_deassert(struct q6v5 *qproc) 450 { 451 int ret; 452 453 if (qproc->has_alt_reset) { 454 reset_control_assert(qproc->pdc_reset); 455 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 456 ret = reset_control_reset(qproc->mss_restart); 457 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 458 reset_control_deassert(qproc->pdc_reset); 459 } else if (qproc->has_spare_reg) { 460 ret = reset_control_reset(qproc->mss_restart); 461 } else { 462 ret = reset_control_deassert(qproc->mss_restart); 463 } 464 465 return ret; 466 } 467 468 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 469 { 470 unsigned long timeout; 471 s32 val; 472 473 timeout = jiffies + msecs_to_jiffies(ms); 474 for (;;) { 475 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 476 if (val) 477 break; 478 479 if (time_after(jiffies, timeout)) 480 return -ETIMEDOUT; 481 482 msleep(1); 483 } 484 485 return val; 486 } 487 488 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 489 { 490 491 unsigned long timeout; 492 s32 val; 493 494 timeout = jiffies + msecs_to_jiffies(ms); 495 for (;;) { 496 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 497 if (val < 0) 498 break; 499 500 if (!status && val) 501 break; 502 else if (status && val == status) 503 break; 504 505 if (time_after(jiffies, timeout)) 506 return -ETIMEDOUT; 507 508 msleep(1); 509 } 510 511 return val; 512 } 513 514 static int q6v5proc_reset(struct q6v5 *qproc) 515 { 516 u32 val; 517 int ret; 518 int i; 519 520 if (qproc->version == MSS_SDM845) { 521 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 522 val |= Q6SS_CBCR_CLKEN; 523 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 524 525 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 526 val, !(val & Q6SS_CBCR_CLKOFF), 1, 527 Q6SS_CBCR_TIMEOUT_US); 528 if (ret) { 529 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 530 return -ETIMEDOUT; 531 } 532 533 /* De-assert QDSP6 stop core */ 534 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 535 /* Trigger boot FSM */ 536 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 537 538 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 539 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 540 if (ret) { 541 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 542 /* Reset the modem so that boot FSM is in reset state */ 543 q6v5_reset_deassert(qproc); 544 return ret; 545 } 546 547 goto pbl_wait; 548 } else if (qproc->version == MSS_SC7180) { 549 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 550 val |= Q6SS_CBCR_CLKEN; 551 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 552 553 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 554 val, !(val & Q6SS_CBCR_CLKOFF), 1, 555 Q6SS_CBCR_TIMEOUT_US); 556 if (ret) { 557 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 558 return -ETIMEDOUT; 559 } 560 561 /* Turn on the XO clock needed for PLL setup */ 562 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 563 val |= Q6SS_CBCR_CLKEN; 564 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 565 566 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 567 val, !(val & Q6SS_CBCR_CLKOFF), 1, 568 Q6SS_CBCR_TIMEOUT_US); 569 if (ret) { 570 dev_err(qproc->dev, "QDSP6SS XO clock timed out\n"); 571 return -ETIMEDOUT; 572 } 573 574 /* Configure Q6 core CBCR to auto-enable after reset sequence */ 575 val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR); 576 val |= Q6SS_CBCR_CLKEN; 577 writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR); 578 579 /* De-assert the Q6 stop core signal */ 580 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 581 582 /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ 583 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 584 585 /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */ 586 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS, 587 val, (val & BIT(0)) != 0, 1, 588 BOOT_STATUS_TIMEOUT_US); 589 if (ret) { 590 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 591 /* Reset the modem so that boot FSM is in reset state */ 592 q6v5_reset_deassert(qproc); 593 return ret; 594 } 595 goto pbl_wait; 596 } else if (qproc->version == MSS_MSM8996 || 597 qproc->version == MSS_MSM8998) { 598 int mem_pwr_ctl; 599 600 /* Override the ACC value if required */ 601 writel(QDSP6SS_ACC_OVERRIDE_VAL, 602 qproc->reg_base + QDSP6SS_STRAP_ACC); 603 604 /* Assert resets, stop core */ 605 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 606 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 607 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 608 609 /* BHS require xo cbcr to be enabled */ 610 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 611 val |= Q6SS_CBCR_CLKEN; 612 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 613 614 /* Read CLKOFF bit to go low indicating CLK is enabled */ 615 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 616 val, !(val & Q6SS_CBCR_CLKOFF), 1, 617 Q6SS_CBCR_TIMEOUT_US); 618 if (ret) { 619 dev_err(qproc->dev, 620 "xo cbcr enabling timed out (rc:%d)\n", ret); 621 return ret; 622 } 623 /* Enable power block headswitch and wait for it to stabilize */ 624 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 625 val |= QDSP6v56_BHS_ON; 626 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 627 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 628 udelay(1); 629 630 /* Put LDO in bypass mode */ 631 val |= QDSP6v56_LDO_BYP; 632 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 633 634 /* Deassert QDSP6 compiler memory clamp */ 635 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 636 val &= ~QDSP6v56_CLAMP_QMC_MEM; 637 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 638 639 /* Deassert memory peripheral sleep and L2 memory standby */ 640 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 641 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 642 643 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 644 if (qproc->version == MSS_MSM8996) { 645 mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL; 646 i = 19; 647 } else { 648 /* MSS_MSM8998 */ 649 mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL; 650 i = 28; 651 } 652 val = readl(qproc->reg_base + mem_pwr_ctl); 653 for (; i >= 0; i--) { 654 val |= BIT(i); 655 writel(val, qproc->reg_base + mem_pwr_ctl); 656 /* 657 * Read back value to ensure the write is done then 658 * wait for 1us for both memory peripheral and data 659 * array to turn on. 660 */ 661 val |= readl(qproc->reg_base + mem_pwr_ctl); 662 udelay(1); 663 } 664 /* Remove word line clamp */ 665 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 666 val &= ~QDSP6v56_CLAMP_WL; 667 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 668 } else { 669 /* Assert resets, stop core */ 670 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 671 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 672 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 673 674 /* Enable power block headswitch and wait for it to stabilize */ 675 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 676 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 677 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 678 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 679 udelay(1); 680 /* 681 * Turn on memories. L2 banks should be done individually 682 * to minimize inrush current. 683 */ 684 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 685 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 686 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 687 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 688 val |= Q6SS_L2DATA_SLP_NRET_N_2; 689 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 690 val |= Q6SS_L2DATA_SLP_NRET_N_1; 691 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 692 val |= Q6SS_L2DATA_SLP_NRET_N_0; 693 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 694 } 695 /* Remove IO clamp */ 696 val &= ~Q6SS_CLAMP_IO; 697 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 698 699 /* Bring core out of reset */ 700 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 701 val &= ~Q6SS_CORE_ARES; 702 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 703 704 /* Turn on core clock */ 705 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 706 val |= Q6SS_CLK_ENABLE; 707 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 708 709 /* Start core execution */ 710 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 711 val &= ~Q6SS_STOP_CORE; 712 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 713 714 pbl_wait: 715 /* Wait for PBL status */ 716 ret = q6v5_rmb_pbl_wait(qproc, 1000); 717 if (ret == -ETIMEDOUT) { 718 dev_err(qproc->dev, "PBL boot timed out\n"); 719 } else if (ret != RMB_PBL_SUCCESS) { 720 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 721 ret = -EINVAL; 722 } else { 723 ret = 0; 724 } 725 726 return ret; 727 } 728 729 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 730 struct regmap *halt_map, 731 u32 offset) 732 { 733 unsigned int val; 734 int ret; 735 736 /* Check if we're already idle */ 737 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 738 if (!ret && val) 739 return; 740 741 /* Assert halt request */ 742 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 743 744 /* Wait for halt */ 745 regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val, 746 val, 1000, HALT_ACK_TIMEOUT_US); 747 748 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 749 if (ret || !val) 750 dev_err(qproc->dev, "port failed halt\n"); 751 752 /* Clear halt request (port will remain halted until reset) */ 753 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 754 } 755 756 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 757 { 758 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 759 dma_addr_t phys; 760 void *metadata; 761 int mdata_perm; 762 int xferop_ret; 763 size_t size; 764 void *ptr; 765 int ret; 766 767 metadata = qcom_mdt_read_metadata(fw, &size); 768 if (IS_ERR(metadata)) 769 return PTR_ERR(metadata); 770 771 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs); 772 if (!ptr) { 773 kfree(metadata); 774 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 775 return -ENOMEM; 776 } 777 778 memcpy(ptr, metadata, size); 779 780 /* Hypervisor mapping to access metadata by modem */ 781 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 782 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true, 783 phys, size); 784 if (ret) { 785 dev_err(qproc->dev, 786 "assigning Q6 access to metadata failed: %d\n", ret); 787 ret = -EAGAIN; 788 goto free_dma_attrs; 789 } 790 791 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 792 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 793 794 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 795 if (ret == -ETIMEDOUT) 796 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 797 else if (ret < 0) 798 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 799 800 /* Metadata authentication done, remove modem access */ 801 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false, 802 phys, size); 803 if (xferop_ret) 804 dev_warn(qproc->dev, 805 "mdt buffer not reclaimed system may become unstable\n"); 806 807 free_dma_attrs: 808 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs); 809 kfree(metadata); 810 811 return ret < 0 ? ret : 0; 812 } 813 814 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 815 { 816 if (phdr->p_type != PT_LOAD) 817 return false; 818 819 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 820 return false; 821 822 if (!phdr->p_memsz) 823 return false; 824 825 return true; 826 } 827 828 static int q6v5_mba_load(struct q6v5 *qproc) 829 { 830 int ret; 831 int xfermemop_ret; 832 833 qcom_q6v5_prepare(&qproc->q6v5); 834 835 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count); 836 if (ret < 0) { 837 dev_err(qproc->dev, "failed to enable active power domains\n"); 838 goto disable_irqs; 839 } 840 841 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 842 if (ret < 0) { 843 dev_err(qproc->dev, "failed to enable proxy power domains\n"); 844 goto disable_active_pds; 845 } 846 847 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 848 qproc->proxy_reg_count); 849 if (ret) { 850 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 851 goto disable_proxy_pds; 852 } 853 854 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 855 qproc->proxy_clk_count); 856 if (ret) { 857 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 858 goto disable_proxy_reg; 859 } 860 861 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 862 qproc->active_reg_count); 863 if (ret) { 864 dev_err(qproc->dev, "failed to enable supplies\n"); 865 goto disable_proxy_clk; 866 } 867 868 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 869 qproc->reset_clk_count); 870 if (ret) { 871 dev_err(qproc->dev, "failed to enable reset clocks\n"); 872 goto disable_vdd; 873 } 874 875 ret = q6v5_reset_deassert(qproc); 876 if (ret) { 877 dev_err(qproc->dev, "failed to deassert mss restart\n"); 878 goto disable_reset_clks; 879 } 880 881 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 882 qproc->active_clk_count); 883 if (ret) { 884 dev_err(qproc->dev, "failed to enable clocks\n"); 885 goto assert_reset; 886 } 887 888 /* Assign MBA image access in DDR to q6 */ 889 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true, 890 qproc->mba_phys, qproc->mba_size); 891 if (ret) { 892 dev_err(qproc->dev, 893 "assigning Q6 access to mba memory failed: %d\n", ret); 894 goto disable_active_clks; 895 } 896 897 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 898 899 ret = q6v5proc_reset(qproc); 900 if (ret) 901 goto reclaim_mba; 902 903 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 904 if (ret == -ETIMEDOUT) { 905 dev_err(qproc->dev, "MBA boot timed out\n"); 906 goto halt_axi_ports; 907 } else if (ret != RMB_MBA_XPU_UNLOCKED && 908 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 909 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 910 ret = -EINVAL; 911 goto halt_axi_ports; 912 } 913 914 qproc->dump_mba_loaded = true; 915 return 0; 916 917 halt_axi_ports: 918 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 919 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 920 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 921 922 reclaim_mba: 923 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 924 false, qproc->mba_phys, 925 qproc->mba_size); 926 if (xfermemop_ret) { 927 dev_err(qproc->dev, 928 "Failed to reclaim mba buffer, system may become unstable\n"); 929 } 930 931 disable_active_clks: 932 q6v5_clk_disable(qproc->dev, qproc->active_clks, 933 qproc->active_clk_count); 934 assert_reset: 935 q6v5_reset_assert(qproc); 936 disable_reset_clks: 937 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 938 qproc->reset_clk_count); 939 disable_vdd: 940 q6v5_regulator_disable(qproc, qproc->active_regs, 941 qproc->active_reg_count); 942 disable_proxy_clk: 943 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 944 qproc->proxy_clk_count); 945 disable_proxy_reg: 946 q6v5_regulator_disable(qproc, qproc->proxy_regs, 947 qproc->proxy_reg_count); 948 disable_proxy_pds: 949 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 950 disable_active_pds: 951 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 952 disable_irqs: 953 qcom_q6v5_unprepare(&qproc->q6v5); 954 955 return ret; 956 } 957 958 static void q6v5_mba_reclaim(struct q6v5 *qproc) 959 { 960 int ret; 961 u32 val; 962 963 qproc->dump_mba_loaded = false; 964 965 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 966 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 967 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 968 if (qproc->version == MSS_MSM8996) { 969 /* 970 * To avoid high MX current during LPASS/MSS restart. 971 */ 972 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 973 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 974 QDSP6v56_CLAMP_QMC_MEM; 975 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 976 } 977 978 q6v5_reset_assert(qproc); 979 980 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 981 qproc->reset_clk_count); 982 q6v5_clk_disable(qproc->dev, qproc->active_clks, 983 qproc->active_clk_count); 984 q6v5_regulator_disable(qproc, qproc->active_regs, 985 qproc->active_reg_count); 986 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count); 987 988 /* In case of failure or coredump scenario where reclaiming MBA memory 989 * could not happen reclaim it here. 990 */ 991 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, 992 qproc->mba_phys, 993 qproc->mba_size); 994 WARN_ON(ret); 995 996 ret = qcom_q6v5_unprepare(&qproc->q6v5); 997 if (ret) { 998 q6v5_pds_disable(qproc, qproc->proxy_pds, 999 qproc->proxy_pd_count); 1000 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1001 qproc->proxy_clk_count); 1002 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1003 qproc->proxy_reg_count); 1004 } 1005 } 1006 1007 static int q6v5_reload_mba(struct rproc *rproc) 1008 { 1009 struct q6v5 *qproc = rproc->priv; 1010 const struct firmware *fw; 1011 int ret; 1012 1013 ret = request_firmware(&fw, rproc->firmware, qproc->dev); 1014 if (ret < 0) 1015 return ret; 1016 1017 q6v5_load(rproc, fw); 1018 ret = q6v5_mba_load(qproc); 1019 release_firmware(fw); 1020 1021 return ret; 1022 } 1023 1024 static int q6v5_mpss_load(struct q6v5 *qproc) 1025 { 1026 const struct elf32_phdr *phdrs; 1027 const struct elf32_phdr *phdr; 1028 const struct firmware *seg_fw; 1029 const struct firmware *fw; 1030 struct elf32_hdr *ehdr; 1031 phys_addr_t mpss_reloc; 1032 phys_addr_t boot_addr; 1033 phys_addr_t min_addr = PHYS_ADDR_MAX; 1034 phys_addr_t max_addr = 0; 1035 u32 code_length; 1036 bool relocate = false; 1037 char *fw_name; 1038 size_t fw_name_len; 1039 ssize_t offset; 1040 size_t size = 0; 1041 void *ptr; 1042 int ret; 1043 int i; 1044 1045 fw_name_len = strlen(qproc->hexagon_mdt_image); 1046 if (fw_name_len <= 4) 1047 return -EINVAL; 1048 1049 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL); 1050 if (!fw_name) 1051 return -ENOMEM; 1052 1053 ret = request_firmware(&fw, fw_name, qproc->dev); 1054 if (ret < 0) { 1055 dev_err(qproc->dev, "unable to load %s\n", fw_name); 1056 goto out; 1057 } 1058 1059 /* Initialize the RMB validator */ 1060 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1061 1062 ret = q6v5_mpss_init_image(qproc, fw); 1063 if (ret) 1064 goto release_firmware; 1065 1066 ehdr = (struct elf32_hdr *)fw->data; 1067 phdrs = (struct elf32_phdr *)(ehdr + 1); 1068 1069 for (i = 0; i < ehdr->e_phnum; i++) { 1070 phdr = &phdrs[i]; 1071 1072 if (!q6v5_phdr_valid(phdr)) 1073 continue; 1074 1075 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 1076 relocate = true; 1077 1078 if (phdr->p_paddr < min_addr) 1079 min_addr = phdr->p_paddr; 1080 1081 if (phdr->p_paddr + phdr->p_memsz > max_addr) 1082 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 1083 } 1084 1085 /** 1086 * In case of a modem subsystem restart on secure devices, the modem 1087 * memory can be reclaimed only after MBA is loaded. For modem cold 1088 * boot this will be a nop 1089 */ 1090 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false, 1091 qproc->mpss_phys, qproc->mpss_size); 1092 1093 /* Share ownership between Linux and MSS, during segment loading */ 1094 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true, 1095 qproc->mpss_phys, qproc->mpss_size); 1096 if (ret) { 1097 dev_err(qproc->dev, 1098 "assigning Q6 access to mpss memory failed: %d\n", ret); 1099 ret = -EAGAIN; 1100 goto release_firmware; 1101 } 1102 1103 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 1104 qproc->mpss_reloc = mpss_reloc; 1105 /* Load firmware segments */ 1106 for (i = 0; i < ehdr->e_phnum; i++) { 1107 phdr = &phdrs[i]; 1108 1109 if (!q6v5_phdr_valid(phdr)) 1110 continue; 1111 1112 offset = phdr->p_paddr - mpss_reloc; 1113 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 1114 dev_err(qproc->dev, "segment outside memory range\n"); 1115 ret = -EINVAL; 1116 goto release_firmware; 1117 } 1118 1119 ptr = ioremap_wc(qproc->mpss_phys + offset, phdr->p_memsz); 1120 if (!ptr) { 1121 dev_err(qproc->dev, 1122 "unable to map memory region: %pa+%zx-%x\n", 1123 &qproc->mpss_phys, offset, phdr->p_memsz); 1124 goto release_firmware; 1125 } 1126 1127 if (phdr->p_filesz && phdr->p_offset < fw->size) { 1128 /* Firmware is large enough to be non-split */ 1129 if (phdr->p_offset + phdr->p_filesz > fw->size) { 1130 dev_err(qproc->dev, 1131 "failed to load segment %d from truncated file %s\n", 1132 i, fw_name); 1133 ret = -EINVAL; 1134 iounmap(ptr); 1135 goto release_firmware; 1136 } 1137 1138 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz); 1139 } else if (phdr->p_filesz) { 1140 /* Replace "xxx.xxx" with "xxx.bxx" */ 1141 sprintf(fw_name + fw_name_len - 3, "b%02d", i); 1142 ret = request_firmware(&seg_fw, fw_name, qproc->dev); 1143 if (ret) { 1144 dev_err(qproc->dev, "failed to load %s\n", fw_name); 1145 iounmap(ptr); 1146 goto release_firmware; 1147 } 1148 1149 memcpy(ptr, seg_fw->data, seg_fw->size); 1150 1151 release_firmware(seg_fw); 1152 } 1153 1154 if (phdr->p_memsz > phdr->p_filesz) { 1155 memset(ptr + phdr->p_filesz, 0, 1156 phdr->p_memsz - phdr->p_filesz); 1157 } 1158 iounmap(ptr); 1159 size += phdr->p_memsz; 1160 1161 code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1162 if (!code_length) { 1163 boot_addr = relocate ? qproc->mpss_phys : min_addr; 1164 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 1165 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 1166 } 1167 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 1168 1169 ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 1170 if (ret < 0) { 1171 dev_err(qproc->dev, "MPSS authentication failed: %d\n", 1172 ret); 1173 goto release_firmware; 1174 } 1175 } 1176 1177 /* Transfer ownership of modem ddr region to q6 */ 1178 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true, 1179 qproc->mpss_phys, qproc->mpss_size); 1180 if (ret) { 1181 dev_err(qproc->dev, 1182 "assigning Q6 access to mpss memory failed: %d\n", ret); 1183 ret = -EAGAIN; 1184 goto release_firmware; 1185 } 1186 1187 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 1188 if (ret == -ETIMEDOUT) 1189 dev_err(qproc->dev, "MPSS authentication timed out\n"); 1190 else if (ret < 0) 1191 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 1192 1193 qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); 1194 1195 release_firmware: 1196 release_firmware(fw); 1197 out: 1198 kfree(fw_name); 1199 1200 return ret < 0 ? ret : 0; 1201 } 1202 1203 static void qcom_q6v5_dump_segment(struct rproc *rproc, 1204 struct rproc_dump_segment *segment, 1205 void *dest) 1206 { 1207 int ret = 0; 1208 struct q6v5 *qproc = rproc->priv; 1209 unsigned long mask = BIT((unsigned long)segment->priv); 1210 int offset = segment->da - qproc->mpss_reloc; 1211 void *ptr = NULL; 1212 1213 /* Unlock mba before copying segments */ 1214 if (!qproc->dump_mba_loaded) { 1215 ret = q6v5_reload_mba(rproc); 1216 if (!ret) { 1217 /* Reset ownership back to Linux to copy segments */ 1218 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1219 true, false, 1220 qproc->mpss_phys, 1221 qproc->mpss_size); 1222 } 1223 } 1224 1225 if (!ret) 1226 ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); 1227 1228 if (ptr) { 1229 memcpy(dest, ptr, segment->size); 1230 iounmap(ptr); 1231 } else { 1232 memset(dest, 0xff, segment->size); 1233 } 1234 1235 qproc->dump_segment_mask |= mask; 1236 1237 /* Reclaim mba after copying segments */ 1238 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 1239 if (qproc->dump_mba_loaded) { 1240 /* Try to reset ownership back to Q6 */ 1241 q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1242 false, true, 1243 qproc->mpss_phys, 1244 qproc->mpss_size); 1245 q6v5_mba_reclaim(qproc); 1246 } 1247 } 1248 } 1249 1250 static int q6v5_start(struct rproc *rproc) 1251 { 1252 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1253 int xfermemop_ret; 1254 int ret; 1255 1256 ret = q6v5_mba_load(qproc); 1257 if (ret) 1258 return ret; 1259 1260 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1261 1262 ret = q6v5_mpss_load(qproc); 1263 if (ret) 1264 goto reclaim_mpss; 1265 1266 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1267 if (ret == -ETIMEDOUT) { 1268 dev_err(qproc->dev, "start timed out\n"); 1269 goto reclaim_mpss; 1270 } 1271 1272 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 1273 false, qproc->mba_phys, 1274 qproc->mba_size); 1275 if (xfermemop_ret) 1276 dev_err(qproc->dev, 1277 "Failed to reclaim mba buffer system may become unstable\n"); 1278 1279 /* Reset Dump Segment Mask */ 1280 qproc->dump_segment_mask = 0; 1281 qproc->running = true; 1282 1283 return 0; 1284 1285 reclaim_mpss: 1286 q6v5_mba_reclaim(qproc); 1287 1288 return ret; 1289 } 1290 1291 static int q6v5_stop(struct rproc *rproc) 1292 { 1293 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1294 int ret; 1295 1296 qproc->running = false; 1297 1298 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1299 if (ret == -ETIMEDOUT) 1300 dev_err(qproc->dev, "timed out on wait\n"); 1301 1302 q6v5_mba_reclaim(qproc); 1303 1304 return 0; 1305 } 1306 1307 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1308 const struct firmware *mba_fw) 1309 { 1310 const struct firmware *fw; 1311 const struct elf32_phdr *phdrs; 1312 const struct elf32_phdr *phdr; 1313 const struct elf32_hdr *ehdr; 1314 struct q6v5 *qproc = rproc->priv; 1315 unsigned long i; 1316 int ret; 1317 1318 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev); 1319 if (ret < 0) { 1320 dev_err(qproc->dev, "unable to load %s\n", 1321 qproc->hexagon_mdt_image); 1322 return ret; 1323 } 1324 1325 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1326 1327 ehdr = (struct elf32_hdr *)fw->data; 1328 phdrs = (struct elf32_phdr *)(ehdr + 1); 1329 qproc->dump_complete_mask = 0; 1330 1331 for (i = 0; i < ehdr->e_phnum; i++) { 1332 phdr = &phdrs[i]; 1333 1334 if (!q6v5_phdr_valid(phdr)) 1335 continue; 1336 1337 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1338 phdr->p_memsz, 1339 qcom_q6v5_dump_segment, 1340 (void *)i); 1341 if (ret) 1342 break; 1343 1344 qproc->dump_complete_mask |= BIT(i); 1345 } 1346 1347 release_firmware(fw); 1348 return ret; 1349 } 1350 1351 static const struct rproc_ops q6v5_ops = { 1352 .start = q6v5_start, 1353 .stop = q6v5_stop, 1354 .parse_fw = qcom_q6v5_register_dump_segments, 1355 .load = q6v5_load, 1356 }; 1357 1358 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1359 { 1360 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1361 1362 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1363 qproc->proxy_clk_count); 1364 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1365 qproc->proxy_reg_count); 1366 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1367 } 1368 1369 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1370 { 1371 struct of_phandle_args args; 1372 struct resource *res; 1373 int ret; 1374 1375 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1376 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1377 if (IS_ERR(qproc->reg_base)) 1378 return PTR_ERR(qproc->reg_base); 1379 1380 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1381 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1382 if (IS_ERR(qproc->rmb_base)) 1383 return PTR_ERR(qproc->rmb_base); 1384 1385 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1386 "qcom,halt-regs", 3, 0, &args); 1387 if (ret < 0) { 1388 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1389 return -EINVAL; 1390 } 1391 1392 qproc->halt_map = syscon_node_to_regmap(args.np); 1393 of_node_put(args.np); 1394 if (IS_ERR(qproc->halt_map)) 1395 return PTR_ERR(qproc->halt_map); 1396 1397 qproc->halt_q6 = args.args[0]; 1398 qproc->halt_modem = args.args[1]; 1399 qproc->halt_nc = args.args[2]; 1400 1401 if (qproc->has_spare_reg) { 1402 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1403 "qcom,spare-regs", 1404 1, 0, &args); 1405 if (ret < 0) { 1406 dev_err(&pdev->dev, "failed to parse spare-regs\n"); 1407 return -EINVAL; 1408 } 1409 1410 qproc->conn_map = syscon_node_to_regmap(args.np); 1411 of_node_put(args.np); 1412 if (IS_ERR(qproc->conn_map)) 1413 return PTR_ERR(qproc->conn_map); 1414 1415 qproc->conn_box = args.args[0]; 1416 } 1417 1418 return 0; 1419 } 1420 1421 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1422 char **clk_names) 1423 { 1424 int i; 1425 1426 if (!clk_names) 1427 return 0; 1428 1429 for (i = 0; clk_names[i]; i++) { 1430 clks[i] = devm_clk_get(dev, clk_names[i]); 1431 if (IS_ERR(clks[i])) { 1432 int rc = PTR_ERR(clks[i]); 1433 1434 if (rc != -EPROBE_DEFER) 1435 dev_err(dev, "Failed to get %s clock\n", 1436 clk_names[i]); 1437 return rc; 1438 } 1439 } 1440 1441 return i; 1442 } 1443 1444 static int q6v5_pds_attach(struct device *dev, struct device **devs, 1445 char **pd_names) 1446 { 1447 size_t num_pds = 0; 1448 int ret; 1449 int i; 1450 1451 if (!pd_names) 1452 return 0; 1453 1454 while (pd_names[num_pds]) 1455 num_pds++; 1456 1457 for (i = 0; i < num_pds; i++) { 1458 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); 1459 if (IS_ERR_OR_NULL(devs[i])) { 1460 ret = PTR_ERR(devs[i]) ? : -ENODATA; 1461 goto unroll_attach; 1462 } 1463 } 1464 1465 return num_pds; 1466 1467 unroll_attach: 1468 for (i--; i >= 0; i--) 1469 dev_pm_domain_detach(devs[i], false); 1470 1471 return ret; 1472 } 1473 1474 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, 1475 size_t pd_count) 1476 { 1477 int i; 1478 1479 for (i = 0; i < pd_count; i++) 1480 dev_pm_domain_detach(pds[i], false); 1481 } 1482 1483 static int q6v5_init_reset(struct q6v5 *qproc) 1484 { 1485 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1486 "mss_restart"); 1487 if (IS_ERR(qproc->mss_restart)) { 1488 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1489 return PTR_ERR(qproc->mss_restart); 1490 } 1491 1492 if (qproc->has_alt_reset || qproc->has_spare_reg) { 1493 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1494 "pdc_reset"); 1495 if (IS_ERR(qproc->pdc_reset)) { 1496 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1497 return PTR_ERR(qproc->pdc_reset); 1498 } 1499 } 1500 1501 return 0; 1502 } 1503 1504 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1505 { 1506 struct device_node *child; 1507 struct device_node *node; 1508 struct resource r; 1509 int ret; 1510 1511 /* 1512 * In the absence of mba/mpss sub-child, extract the mba and mpss 1513 * reserved memory regions from device's memory-region property. 1514 */ 1515 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1516 if (!child) 1517 node = of_parse_phandle(qproc->dev->of_node, 1518 "memory-region", 0); 1519 else 1520 node = of_parse_phandle(child, "memory-region", 0); 1521 1522 ret = of_address_to_resource(node, 0, &r); 1523 if (ret) { 1524 dev_err(qproc->dev, "unable to resolve mba region\n"); 1525 return ret; 1526 } 1527 of_node_put(node); 1528 1529 qproc->mba_phys = r.start; 1530 qproc->mba_size = resource_size(&r); 1531 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1532 if (!qproc->mba_region) { 1533 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1534 &r.start, qproc->mba_size); 1535 return -EBUSY; 1536 } 1537 1538 if (!child) { 1539 node = of_parse_phandle(qproc->dev->of_node, 1540 "memory-region", 1); 1541 } else { 1542 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1543 node = of_parse_phandle(child, "memory-region", 0); 1544 } 1545 1546 ret = of_address_to_resource(node, 0, &r); 1547 if (ret) { 1548 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1549 return ret; 1550 } 1551 of_node_put(node); 1552 1553 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1554 qproc->mpss_size = resource_size(&r); 1555 1556 return 0; 1557 } 1558 1559 #if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) 1560 1561 /* Register IPA notification function */ 1562 int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, 1563 void *data) 1564 { 1565 struct qcom_rproc_ipa_notify *ipa_notify; 1566 struct q6v5 *qproc = rproc->priv; 1567 1568 if (!notify) 1569 return -EINVAL; 1570 1571 ipa_notify = &qproc->ipa_notify_subdev; 1572 if (ipa_notify->notify) 1573 return -EBUSY; 1574 1575 ipa_notify->notify = notify; 1576 ipa_notify->data = data; 1577 1578 return 0; 1579 } 1580 EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); 1581 1582 /* Deregister IPA notification function */ 1583 void qcom_deregister_ipa_notify(struct rproc *rproc) 1584 { 1585 struct q6v5 *qproc = rproc->priv; 1586 1587 qproc->ipa_notify_subdev.notify = NULL; 1588 } 1589 EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); 1590 #endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ 1591 1592 static int q6v5_probe(struct platform_device *pdev) 1593 { 1594 const struct rproc_hexagon_res *desc; 1595 struct q6v5 *qproc; 1596 struct rproc *rproc; 1597 const char *mba_image; 1598 int ret; 1599 1600 desc = of_device_get_match_data(&pdev->dev); 1601 if (!desc) 1602 return -EINVAL; 1603 1604 if (desc->need_mem_protection && !qcom_scm_is_available()) 1605 return -EPROBE_DEFER; 1606 1607 mba_image = desc->hexagon_mba_image; 1608 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1609 0, &mba_image); 1610 if (ret < 0 && ret != -EINVAL) 1611 return ret; 1612 1613 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1614 mba_image, sizeof(*qproc)); 1615 if (!rproc) { 1616 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1617 return -ENOMEM; 1618 } 1619 1620 rproc->auto_boot = false; 1621 rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); 1622 1623 qproc = (struct q6v5 *)rproc->priv; 1624 qproc->dev = &pdev->dev; 1625 qproc->rproc = rproc; 1626 qproc->hexagon_mdt_image = "modem.mdt"; 1627 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name", 1628 1, &qproc->hexagon_mdt_image); 1629 if (ret < 0 && ret != -EINVAL) 1630 goto free_rproc; 1631 1632 platform_set_drvdata(pdev, qproc); 1633 1634 qproc->has_spare_reg = desc->has_spare_reg; 1635 ret = q6v5_init_mem(qproc, pdev); 1636 if (ret) 1637 goto free_rproc; 1638 1639 ret = q6v5_alloc_memory_region(qproc); 1640 if (ret) 1641 goto free_rproc; 1642 1643 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1644 desc->proxy_clk_names); 1645 if (ret < 0) { 1646 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1647 goto free_rproc; 1648 } 1649 qproc->proxy_clk_count = ret; 1650 1651 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1652 desc->reset_clk_names); 1653 if (ret < 0) { 1654 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1655 goto free_rproc; 1656 } 1657 qproc->reset_clk_count = ret; 1658 1659 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1660 desc->active_clk_names); 1661 if (ret < 0) { 1662 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1663 goto free_rproc; 1664 } 1665 qproc->active_clk_count = ret; 1666 1667 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1668 desc->proxy_supply); 1669 if (ret < 0) { 1670 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1671 goto free_rproc; 1672 } 1673 qproc->proxy_reg_count = ret; 1674 1675 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1676 desc->active_supply); 1677 if (ret < 0) { 1678 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1679 goto free_rproc; 1680 } 1681 qproc->active_reg_count = ret; 1682 1683 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds, 1684 desc->active_pd_names); 1685 if (ret < 0) { 1686 dev_err(&pdev->dev, "Failed to attach active power domains\n"); 1687 goto free_rproc; 1688 } 1689 qproc->active_pd_count = ret; 1690 1691 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds, 1692 desc->proxy_pd_names); 1693 if (ret < 0) { 1694 dev_err(&pdev->dev, "Failed to init power domains\n"); 1695 goto detach_active_pds; 1696 } 1697 qproc->proxy_pd_count = ret; 1698 1699 qproc->has_alt_reset = desc->has_alt_reset; 1700 ret = q6v5_init_reset(qproc); 1701 if (ret) 1702 goto detach_proxy_pds; 1703 1704 qproc->version = desc->version; 1705 qproc->need_mem_protection = desc->need_mem_protection; 1706 1707 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1708 qcom_msa_handover); 1709 if (ret) 1710 goto detach_proxy_pds; 1711 1712 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1713 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1714 qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); 1715 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1716 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1717 qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1718 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1719 if (IS_ERR(qproc->sysmon)) { 1720 ret = PTR_ERR(qproc->sysmon); 1721 goto remove_subdevs; 1722 } 1723 1724 ret = rproc_add(rproc); 1725 if (ret) 1726 goto remove_sysmon_subdev; 1727 1728 return 0; 1729 1730 remove_sysmon_subdev: 1731 qcom_remove_sysmon_subdev(qproc->sysmon); 1732 remove_subdevs: 1733 qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); 1734 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1735 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1736 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1737 detach_proxy_pds: 1738 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1739 detach_active_pds: 1740 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1741 free_rproc: 1742 rproc_free(rproc); 1743 1744 return ret; 1745 } 1746 1747 static int q6v5_remove(struct platform_device *pdev) 1748 { 1749 struct q6v5 *qproc = platform_get_drvdata(pdev); 1750 struct rproc *rproc = qproc->rproc; 1751 1752 rproc_del(rproc); 1753 1754 qcom_remove_sysmon_subdev(qproc->sysmon); 1755 qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); 1756 qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); 1757 qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); 1758 qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); 1759 1760 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count); 1761 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count); 1762 1763 rproc_free(rproc); 1764 1765 return 0; 1766 } 1767 1768 static const struct rproc_hexagon_res sc7180_mss = { 1769 .hexagon_mba_image = "mba.mbn", 1770 .proxy_clk_names = (char*[]){ 1771 "xo", 1772 NULL 1773 }, 1774 .reset_clk_names = (char*[]){ 1775 "iface", 1776 "bus", 1777 "snoc_axi", 1778 NULL 1779 }, 1780 .active_clk_names = (char*[]){ 1781 "mnoc_axi", 1782 "nav", 1783 NULL 1784 }, 1785 .active_pd_names = (char*[]){ 1786 "load_state", 1787 NULL 1788 }, 1789 .proxy_pd_names = (char*[]){ 1790 "cx", 1791 "mx", 1792 "mss", 1793 NULL 1794 }, 1795 .need_mem_protection = true, 1796 .has_alt_reset = false, 1797 .has_spare_reg = true, 1798 .version = MSS_SC7180, 1799 }; 1800 1801 static const struct rproc_hexagon_res sdm845_mss = { 1802 .hexagon_mba_image = "mba.mbn", 1803 .proxy_clk_names = (char*[]){ 1804 "xo", 1805 "prng", 1806 NULL 1807 }, 1808 .reset_clk_names = (char*[]){ 1809 "iface", 1810 "snoc_axi", 1811 NULL 1812 }, 1813 .active_clk_names = (char*[]){ 1814 "bus", 1815 "mem", 1816 "gpll0_mss", 1817 "mnoc_axi", 1818 NULL 1819 }, 1820 .active_pd_names = (char*[]){ 1821 "load_state", 1822 NULL 1823 }, 1824 .proxy_pd_names = (char*[]){ 1825 "cx", 1826 "mx", 1827 "mss", 1828 NULL 1829 }, 1830 .need_mem_protection = true, 1831 .has_alt_reset = true, 1832 .has_spare_reg = false, 1833 .version = MSS_SDM845, 1834 }; 1835 1836 static const struct rproc_hexagon_res msm8998_mss = { 1837 .hexagon_mba_image = "mba.mbn", 1838 .proxy_clk_names = (char*[]){ 1839 "xo", 1840 "qdss", 1841 "mem", 1842 NULL 1843 }, 1844 .active_clk_names = (char*[]){ 1845 "iface", 1846 "bus", 1847 "gpll0_mss", 1848 "mnoc_axi", 1849 "snoc_axi", 1850 NULL 1851 }, 1852 .proxy_pd_names = (char*[]){ 1853 "cx", 1854 "mx", 1855 NULL 1856 }, 1857 .need_mem_protection = true, 1858 .has_alt_reset = false, 1859 .has_spare_reg = false, 1860 .version = MSS_MSM8998, 1861 }; 1862 1863 static const struct rproc_hexagon_res msm8996_mss = { 1864 .hexagon_mba_image = "mba.mbn", 1865 .proxy_supply = (struct qcom_mss_reg_res[]) { 1866 { 1867 .supply = "pll", 1868 .uA = 100000, 1869 }, 1870 {} 1871 }, 1872 .proxy_clk_names = (char*[]){ 1873 "xo", 1874 "pnoc", 1875 "qdss", 1876 NULL 1877 }, 1878 .active_clk_names = (char*[]){ 1879 "iface", 1880 "bus", 1881 "mem", 1882 "gpll0_mss", 1883 "snoc_axi", 1884 "mnoc_axi", 1885 NULL 1886 }, 1887 .need_mem_protection = true, 1888 .has_alt_reset = false, 1889 .has_spare_reg = false, 1890 .version = MSS_MSM8996, 1891 }; 1892 1893 static const struct rproc_hexagon_res msm8916_mss = { 1894 .hexagon_mba_image = "mba.mbn", 1895 .proxy_supply = (struct qcom_mss_reg_res[]) { 1896 { 1897 .supply = "mx", 1898 .uV = 1050000, 1899 }, 1900 { 1901 .supply = "cx", 1902 .uA = 100000, 1903 }, 1904 { 1905 .supply = "pll", 1906 .uA = 100000, 1907 }, 1908 {} 1909 }, 1910 .proxy_clk_names = (char*[]){ 1911 "xo", 1912 NULL 1913 }, 1914 .active_clk_names = (char*[]){ 1915 "iface", 1916 "bus", 1917 "mem", 1918 NULL 1919 }, 1920 .need_mem_protection = false, 1921 .has_alt_reset = false, 1922 .has_spare_reg = false, 1923 .version = MSS_MSM8916, 1924 }; 1925 1926 static const struct rproc_hexagon_res msm8974_mss = { 1927 .hexagon_mba_image = "mba.b00", 1928 .proxy_supply = (struct qcom_mss_reg_res[]) { 1929 { 1930 .supply = "mx", 1931 .uV = 1050000, 1932 }, 1933 { 1934 .supply = "cx", 1935 .uA = 100000, 1936 }, 1937 { 1938 .supply = "pll", 1939 .uA = 100000, 1940 }, 1941 {} 1942 }, 1943 .active_supply = (struct qcom_mss_reg_res[]) { 1944 { 1945 .supply = "mss", 1946 .uV = 1050000, 1947 .uA = 100000, 1948 }, 1949 {} 1950 }, 1951 .proxy_clk_names = (char*[]){ 1952 "xo", 1953 NULL 1954 }, 1955 .active_clk_names = (char*[]){ 1956 "iface", 1957 "bus", 1958 "mem", 1959 NULL 1960 }, 1961 .need_mem_protection = false, 1962 .has_alt_reset = false, 1963 .has_spare_reg = false, 1964 .version = MSS_MSM8974, 1965 }; 1966 1967 static const struct of_device_id q6v5_of_match[] = { 1968 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1969 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1970 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1971 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1972 { .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss}, 1973 { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss}, 1974 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1975 { }, 1976 }; 1977 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1978 1979 static struct platform_driver q6v5_driver = { 1980 .probe = q6v5_probe, 1981 .remove = q6v5_remove, 1982 .driver = { 1983 .name = "qcom-q6v5-mss", 1984 .of_match_table = q6v5_of_match, 1985 }, 1986 }; 1987 module_platform_driver(q6v5_driver); 1988 1989 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 1990 MODULE_LICENSE("GPL v2"); 1991