1 /* 2 * Qualcomm self-authenticating modem subsystem remoteproc driver 3 * 4 * Copyright (C) 2016 Linaro Ltd. 5 * Copyright (C) 2014 Sony Mobile Communications AB 6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/delay.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/interrupt.h> 22 #include <linux/kernel.h> 23 #include <linux/mfd/syscon.h> 24 #include <linux/module.h> 25 #include <linux/of_address.h> 26 #include <linux/of_device.h> 27 #include <linux/platform_device.h> 28 #include <linux/regmap.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/remoteproc.h> 31 #include <linux/reset.h> 32 #include <linux/soc/qcom/mdt_loader.h> 33 #include <linux/iopoll.h> 34 35 #include "remoteproc_internal.h" 36 #include "qcom_common.h" 37 #include "qcom_q6v5.h" 38 39 #include <linux/qcom_scm.h> 40 41 #define MPSS_CRASH_REASON_SMEM 421 42 43 /* RMB Status Register Values */ 44 #define RMB_PBL_SUCCESS 0x1 45 46 #define RMB_MBA_XPU_UNLOCKED 0x1 47 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 48 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 49 #define RMB_MBA_AUTH_COMPLETE 0x4 50 51 /* PBL/MBA interface registers */ 52 #define RMB_MBA_IMAGE_REG 0x00 53 #define RMB_PBL_STATUS_REG 0x04 54 #define RMB_MBA_COMMAND_REG 0x08 55 #define RMB_MBA_STATUS_REG 0x0C 56 #define RMB_PMI_META_DATA_REG 0x10 57 #define RMB_PMI_CODE_START_REG 0x14 58 #define RMB_PMI_CODE_LENGTH_REG 0x18 59 #define RMB_MBA_MSS_STATUS 0x40 60 #define RMB_MBA_ALT_RESET 0x44 61 62 #define RMB_CMD_META_DATA_READY 0x1 63 #define RMB_CMD_LOAD_READY 0x2 64 65 /* QDSP6SS Register Offsets */ 66 #define QDSP6SS_RESET_REG 0x014 67 #define QDSP6SS_GFMUX_CTL_REG 0x020 68 #define QDSP6SS_PWR_CTL_REG 0x030 69 #define QDSP6SS_MEM_PWR_CTL 0x0B0 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 77 #define HALT_ACK_TIMEOUT_MS 100 78 79 /* QDSP6SS_RESET */ 80 #define Q6SS_STOP_CORE BIT(0) 81 #define Q6SS_CORE_ARES BIT(1) 82 #define Q6SS_BUS_ARES_ENABLE BIT(2) 83 84 /* QDSP6SS_GFMUX_CTL */ 85 #define Q6SS_CLK_ENABLE BIT(1) 86 87 /* QDSP6SS_PWR_CTL */ 88 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 89 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 90 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 91 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 92 #define Q6SS_ETB_SLP_NRET_N BIT(17) 93 #define Q6SS_L2DATA_STBY_N BIT(18) 94 #define Q6SS_SLP_RET_N BIT(19) 95 #define Q6SS_CLAMP_IO BIT(20) 96 #define QDSS_BHS_ON BIT(21) 97 #define QDSS_LDO_BYP BIT(22) 98 99 /* QDSP6v56 parameters */ 100 #define QDSP6v56_LDO_BYP BIT(25) 101 #define QDSP6v56_BHS_ON BIT(24) 102 #define QDSP6v56_CLAMP_WL BIT(21) 103 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 104 #define HALT_CHECK_MAX_LOOPS 200 105 #define QDSP6SS_XO_CBCR 0x0038 106 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 107 108 /* QDSP6v65 parameters */ 109 #define QDSP6SS_SLEEP 0x3C 110 #define QDSP6SS_BOOT_CORE_START 0x400 111 #define QDSP6SS_BOOT_CMD 0x404 112 #define SLEEP_CHECK_MAX_LOOPS 200 113 #define BOOT_FSM_TIMEOUT 10000 114 115 struct reg_info { 116 struct regulator *reg; 117 int uV; 118 int uA; 119 }; 120 121 struct qcom_mss_reg_res { 122 const char *supply; 123 int uV; 124 int uA; 125 }; 126 127 struct rproc_hexagon_res { 128 const char *hexagon_mba_image; 129 struct qcom_mss_reg_res *proxy_supply; 130 struct qcom_mss_reg_res *active_supply; 131 char **proxy_clk_names; 132 char **reset_clk_names; 133 char **active_clk_names; 134 int version; 135 bool need_mem_protection; 136 bool has_alt_reset; 137 }; 138 139 struct q6v5 { 140 struct device *dev; 141 struct rproc *rproc; 142 143 void __iomem *reg_base; 144 void __iomem *rmb_base; 145 146 struct regmap *halt_map; 147 u32 halt_q6; 148 u32 halt_modem; 149 u32 halt_nc; 150 151 struct reset_control *mss_restart; 152 struct reset_control *pdc_reset; 153 154 struct qcom_q6v5 q6v5; 155 156 struct clk *active_clks[8]; 157 struct clk *reset_clks[4]; 158 struct clk *proxy_clks[4]; 159 int active_clk_count; 160 int reset_clk_count; 161 int proxy_clk_count; 162 163 struct reg_info active_regs[1]; 164 struct reg_info proxy_regs[3]; 165 int active_reg_count; 166 int proxy_reg_count; 167 168 bool running; 169 170 bool dump_mba_loaded; 171 unsigned long dump_segment_mask; 172 unsigned long dump_complete_mask; 173 174 phys_addr_t mba_phys; 175 void *mba_region; 176 size_t mba_size; 177 178 phys_addr_t mpss_phys; 179 phys_addr_t mpss_reloc; 180 void *mpss_region; 181 size_t mpss_size; 182 183 struct qcom_rproc_glink glink_subdev; 184 struct qcom_rproc_subdev smd_subdev; 185 struct qcom_rproc_ssr ssr_subdev; 186 struct qcom_sysmon *sysmon; 187 bool need_mem_protection; 188 bool has_alt_reset; 189 int mpss_perm; 190 int mba_perm; 191 int version; 192 }; 193 194 enum { 195 MSS_MSM8916, 196 MSS_MSM8974, 197 MSS_MSM8996, 198 MSS_SDM845, 199 }; 200 201 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 202 const struct qcom_mss_reg_res *reg_res) 203 { 204 int rc; 205 int i; 206 207 if (!reg_res) 208 return 0; 209 210 for (i = 0; reg_res[i].supply; i++) { 211 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 212 if (IS_ERR(regs[i].reg)) { 213 rc = PTR_ERR(regs[i].reg); 214 if (rc != -EPROBE_DEFER) 215 dev_err(dev, "Failed to get %s\n regulator", 216 reg_res[i].supply); 217 return rc; 218 } 219 220 regs[i].uV = reg_res[i].uV; 221 regs[i].uA = reg_res[i].uA; 222 } 223 224 return i; 225 } 226 227 static int q6v5_regulator_enable(struct q6v5 *qproc, 228 struct reg_info *regs, int count) 229 { 230 int ret; 231 int i; 232 233 for (i = 0; i < count; i++) { 234 if (regs[i].uV > 0) { 235 ret = regulator_set_voltage(regs[i].reg, 236 regs[i].uV, INT_MAX); 237 if (ret) { 238 dev_err(qproc->dev, 239 "Failed to request voltage for %d.\n", 240 i); 241 goto err; 242 } 243 } 244 245 if (regs[i].uA > 0) { 246 ret = regulator_set_load(regs[i].reg, 247 regs[i].uA); 248 if (ret < 0) { 249 dev_err(qproc->dev, 250 "Failed to set regulator mode\n"); 251 goto err; 252 } 253 } 254 255 ret = regulator_enable(regs[i].reg); 256 if (ret) { 257 dev_err(qproc->dev, "Regulator enable failed\n"); 258 goto err; 259 } 260 } 261 262 return 0; 263 err: 264 for (; i >= 0; i--) { 265 if (regs[i].uV > 0) 266 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 267 268 if (regs[i].uA > 0) 269 regulator_set_load(regs[i].reg, 0); 270 271 regulator_disable(regs[i].reg); 272 } 273 274 return ret; 275 } 276 277 static void q6v5_regulator_disable(struct q6v5 *qproc, 278 struct reg_info *regs, int count) 279 { 280 int i; 281 282 for (i = 0; i < count; i++) { 283 if (regs[i].uV > 0) 284 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 285 286 if (regs[i].uA > 0) 287 regulator_set_load(regs[i].reg, 0); 288 289 regulator_disable(regs[i].reg); 290 } 291 } 292 293 static int q6v5_clk_enable(struct device *dev, 294 struct clk **clks, int count) 295 { 296 int rc; 297 int i; 298 299 for (i = 0; i < count; i++) { 300 rc = clk_prepare_enable(clks[i]); 301 if (rc) { 302 dev_err(dev, "Clock enable failed\n"); 303 goto err; 304 } 305 } 306 307 return 0; 308 err: 309 for (i--; i >= 0; i--) 310 clk_disable_unprepare(clks[i]); 311 312 return rc; 313 } 314 315 static void q6v5_clk_disable(struct device *dev, 316 struct clk **clks, int count) 317 { 318 int i; 319 320 for (i = 0; i < count; i++) 321 clk_disable_unprepare(clks[i]); 322 } 323 324 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 325 bool remote_owner, phys_addr_t addr, 326 size_t size) 327 { 328 struct qcom_scm_vmperm next; 329 330 if (!qproc->need_mem_protection) 331 return 0; 332 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA)) 333 return 0; 334 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS)) 335 return 0; 336 337 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS; 338 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX; 339 340 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 341 current_perm, &next, 1); 342 } 343 344 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 345 { 346 struct q6v5 *qproc = rproc->priv; 347 348 memcpy(qproc->mba_region, fw->data, fw->size); 349 350 return 0; 351 } 352 353 static int q6v5_reset_assert(struct q6v5 *qproc) 354 { 355 int ret; 356 357 if (qproc->has_alt_reset) { 358 reset_control_assert(qproc->pdc_reset); 359 ret = reset_control_reset(qproc->mss_restart); 360 reset_control_deassert(qproc->pdc_reset); 361 } else { 362 ret = reset_control_assert(qproc->mss_restart); 363 } 364 365 return ret; 366 } 367 368 static int q6v5_reset_deassert(struct q6v5 *qproc) 369 { 370 int ret; 371 372 if (qproc->has_alt_reset) { 373 reset_control_assert(qproc->pdc_reset); 374 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 375 ret = reset_control_reset(qproc->mss_restart); 376 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 377 reset_control_deassert(qproc->pdc_reset); 378 } else { 379 ret = reset_control_deassert(qproc->mss_restart); 380 } 381 382 return ret; 383 } 384 385 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 386 { 387 unsigned long timeout; 388 s32 val; 389 390 timeout = jiffies + msecs_to_jiffies(ms); 391 for (;;) { 392 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 393 if (val) 394 break; 395 396 if (time_after(jiffies, timeout)) 397 return -ETIMEDOUT; 398 399 msleep(1); 400 } 401 402 return val; 403 } 404 405 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 406 { 407 408 unsigned long timeout; 409 s32 val; 410 411 timeout = jiffies + msecs_to_jiffies(ms); 412 for (;;) { 413 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 414 if (val < 0) 415 break; 416 417 if (!status && val) 418 break; 419 else if (status && val == status) 420 break; 421 422 if (time_after(jiffies, timeout)) 423 return -ETIMEDOUT; 424 425 msleep(1); 426 } 427 428 return val; 429 } 430 431 static int q6v5proc_reset(struct q6v5 *qproc) 432 { 433 u32 val; 434 int ret; 435 int i; 436 437 if (qproc->version == MSS_SDM845) { 438 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 439 val |= 0x1; 440 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 441 442 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 443 val, !(val & BIT(31)), 1, 444 SLEEP_CHECK_MAX_LOOPS); 445 if (ret) { 446 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 447 return -ETIMEDOUT; 448 } 449 450 /* De-assert QDSP6 stop core */ 451 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 452 /* Trigger boot FSM */ 453 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 454 455 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 456 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 457 if (ret) { 458 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 459 /* Reset the modem so that boot FSM is in reset state */ 460 q6v5_reset_deassert(qproc); 461 return ret; 462 } 463 464 goto pbl_wait; 465 } else if (qproc->version == MSS_MSM8996) { 466 /* Override the ACC value if required */ 467 writel(QDSP6SS_ACC_OVERRIDE_VAL, 468 qproc->reg_base + QDSP6SS_STRAP_ACC); 469 470 /* Assert resets, stop core */ 471 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 472 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 473 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 474 475 /* BHS require xo cbcr to be enabled */ 476 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 477 val |= 0x1; 478 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 479 480 /* Read CLKOFF bit to go low indicating CLK is enabled */ 481 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 482 val, !(val & BIT(31)), 1, 483 HALT_CHECK_MAX_LOOPS); 484 if (ret) { 485 dev_err(qproc->dev, 486 "xo cbcr enabling timed out (rc:%d)\n", ret); 487 return ret; 488 } 489 /* Enable power block headswitch and wait for it to stabilize */ 490 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 491 val |= QDSP6v56_BHS_ON; 492 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 493 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 494 udelay(1); 495 496 /* Put LDO in bypass mode */ 497 val |= QDSP6v56_LDO_BYP; 498 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 499 500 /* Deassert QDSP6 compiler memory clamp */ 501 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 502 val &= ~QDSP6v56_CLAMP_QMC_MEM; 503 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 504 505 /* Deassert memory peripheral sleep and L2 memory standby */ 506 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 507 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 508 509 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 510 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); 511 for (i = 19; i >= 0; i--) { 512 val |= BIT(i); 513 writel(val, qproc->reg_base + 514 QDSP6SS_MEM_PWR_CTL); 515 /* 516 * Read back value to ensure the write is done then 517 * wait for 1us for both memory peripheral and data 518 * array to turn on. 519 */ 520 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); 521 udelay(1); 522 } 523 /* Remove word line clamp */ 524 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 525 val &= ~QDSP6v56_CLAMP_WL; 526 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 527 } else { 528 /* Assert resets, stop core */ 529 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 530 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 531 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 532 533 /* Enable power block headswitch and wait for it to stabilize */ 534 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 535 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 536 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 537 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 538 udelay(1); 539 /* 540 * Turn on memories. L2 banks should be done individually 541 * to minimize inrush current. 542 */ 543 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 544 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 545 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 546 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 547 val |= Q6SS_L2DATA_SLP_NRET_N_2; 548 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 549 val |= Q6SS_L2DATA_SLP_NRET_N_1; 550 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 551 val |= Q6SS_L2DATA_SLP_NRET_N_0; 552 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 553 } 554 /* Remove IO clamp */ 555 val &= ~Q6SS_CLAMP_IO; 556 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 557 558 /* Bring core out of reset */ 559 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 560 val &= ~Q6SS_CORE_ARES; 561 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 562 563 /* Turn on core clock */ 564 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 565 val |= Q6SS_CLK_ENABLE; 566 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 567 568 /* Start core execution */ 569 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 570 val &= ~Q6SS_STOP_CORE; 571 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 572 573 pbl_wait: 574 /* Wait for PBL status */ 575 ret = q6v5_rmb_pbl_wait(qproc, 1000); 576 if (ret == -ETIMEDOUT) { 577 dev_err(qproc->dev, "PBL boot timed out\n"); 578 } else if (ret != RMB_PBL_SUCCESS) { 579 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 580 ret = -EINVAL; 581 } else { 582 ret = 0; 583 } 584 585 return ret; 586 } 587 588 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 589 struct regmap *halt_map, 590 u32 offset) 591 { 592 unsigned long timeout; 593 unsigned int val; 594 int ret; 595 596 /* Check if we're already idle */ 597 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 598 if (!ret && val) 599 return; 600 601 /* Assert halt request */ 602 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 603 604 /* Wait for halt */ 605 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); 606 for (;;) { 607 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); 608 if (ret || val || time_after(jiffies, timeout)) 609 break; 610 611 msleep(1); 612 } 613 614 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 615 if (ret || !val) 616 dev_err(qproc->dev, "port failed halt\n"); 617 618 /* Clear halt request (port will remain halted until reset) */ 619 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 620 } 621 622 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 623 { 624 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 625 dma_addr_t phys; 626 int mdata_perm; 627 int xferop_ret; 628 void *ptr; 629 int ret; 630 631 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); 632 if (!ptr) { 633 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 634 return -ENOMEM; 635 } 636 637 memcpy(ptr, fw->data, fw->size); 638 639 /* Hypervisor mapping to access metadata by modem */ 640 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 641 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, 642 true, phys, fw->size); 643 if (ret) { 644 dev_err(qproc->dev, 645 "assigning Q6 access to metadata failed: %d\n", ret); 646 ret = -EAGAIN; 647 goto free_dma_attrs; 648 } 649 650 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 651 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 652 653 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 654 if (ret == -ETIMEDOUT) 655 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 656 else if (ret < 0) 657 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 658 659 /* Metadata authentication done, remove modem access */ 660 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, 661 false, phys, fw->size); 662 if (xferop_ret) 663 dev_warn(qproc->dev, 664 "mdt buffer not reclaimed system may become unstable\n"); 665 666 free_dma_attrs: 667 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); 668 669 return ret < 0 ? ret : 0; 670 } 671 672 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 673 { 674 if (phdr->p_type != PT_LOAD) 675 return false; 676 677 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 678 return false; 679 680 if (!phdr->p_memsz) 681 return false; 682 683 return true; 684 } 685 686 static int q6v5_mba_load(struct q6v5 *qproc) 687 { 688 int ret; 689 int xfermemop_ret; 690 691 qcom_q6v5_prepare(&qproc->q6v5); 692 693 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 694 qproc->proxy_reg_count); 695 if (ret) { 696 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 697 goto disable_irqs; 698 } 699 700 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 701 qproc->proxy_clk_count); 702 if (ret) { 703 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 704 goto disable_proxy_reg; 705 } 706 707 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 708 qproc->active_reg_count); 709 if (ret) { 710 dev_err(qproc->dev, "failed to enable supplies\n"); 711 goto disable_proxy_clk; 712 } 713 714 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 715 qproc->reset_clk_count); 716 if (ret) { 717 dev_err(qproc->dev, "failed to enable reset clocks\n"); 718 goto disable_vdd; 719 } 720 721 ret = q6v5_reset_deassert(qproc); 722 if (ret) { 723 dev_err(qproc->dev, "failed to deassert mss restart\n"); 724 goto disable_reset_clks; 725 } 726 727 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 728 qproc->active_clk_count); 729 if (ret) { 730 dev_err(qproc->dev, "failed to enable clocks\n"); 731 goto assert_reset; 732 } 733 734 /* Assign MBA image access in DDR to q6 */ 735 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 736 qproc->mba_phys, qproc->mba_size); 737 if (ret) { 738 dev_err(qproc->dev, 739 "assigning Q6 access to mba memory failed: %d\n", ret); 740 goto disable_active_clks; 741 } 742 743 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 744 745 ret = q6v5proc_reset(qproc); 746 if (ret) 747 goto reclaim_mba; 748 749 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 750 if (ret == -ETIMEDOUT) { 751 dev_err(qproc->dev, "MBA boot timed out\n"); 752 goto halt_axi_ports; 753 } else if (ret != RMB_MBA_XPU_UNLOCKED && 754 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 755 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 756 ret = -EINVAL; 757 goto halt_axi_ports; 758 } 759 760 qproc->dump_mba_loaded = true; 761 return 0; 762 763 halt_axi_ports: 764 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 765 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 766 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 767 768 reclaim_mba: 769 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 770 qproc->mba_phys, 771 qproc->mba_size); 772 if (xfermemop_ret) { 773 dev_err(qproc->dev, 774 "Failed to reclaim mba buffer, system may become unstable\n"); 775 } 776 777 disable_active_clks: 778 q6v5_clk_disable(qproc->dev, qproc->active_clks, 779 qproc->active_clk_count); 780 assert_reset: 781 q6v5_reset_assert(qproc); 782 disable_reset_clks: 783 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 784 qproc->reset_clk_count); 785 disable_vdd: 786 q6v5_regulator_disable(qproc, qproc->active_regs, 787 qproc->active_reg_count); 788 disable_proxy_clk: 789 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 790 qproc->proxy_clk_count); 791 disable_proxy_reg: 792 q6v5_regulator_disable(qproc, qproc->proxy_regs, 793 qproc->proxy_reg_count); 794 disable_irqs: 795 qcom_q6v5_unprepare(&qproc->q6v5); 796 797 return ret; 798 } 799 800 static void q6v5_mba_reclaim(struct q6v5 *qproc) 801 { 802 int ret; 803 u32 val; 804 805 qproc->dump_mba_loaded = false; 806 807 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 808 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 809 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 810 if (qproc->version == MSS_MSM8996) { 811 /* 812 * To avoid high MX current during LPASS/MSS restart. 813 */ 814 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 815 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 816 QDSP6v56_CLAMP_QMC_MEM; 817 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 818 } 819 820 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 821 false, qproc->mpss_phys, 822 qproc->mpss_size); 823 WARN_ON(ret); 824 825 q6v5_reset_assert(qproc); 826 827 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 828 qproc->reset_clk_count); 829 q6v5_clk_disable(qproc->dev, qproc->active_clks, 830 qproc->active_clk_count); 831 q6v5_regulator_disable(qproc, qproc->active_regs, 832 qproc->active_reg_count); 833 834 /* In case of failure or coredump scenario where reclaiming MBA memory 835 * could not happen reclaim it here. 836 */ 837 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 838 qproc->mba_phys, 839 qproc->mba_size); 840 WARN_ON(ret); 841 842 ret = qcom_q6v5_unprepare(&qproc->q6v5); 843 if (ret) { 844 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 845 qproc->proxy_clk_count); 846 q6v5_regulator_disable(qproc, qproc->proxy_regs, 847 qproc->proxy_reg_count); 848 } 849 } 850 851 static int q6v5_mpss_load(struct q6v5 *qproc) 852 { 853 const struct elf32_phdr *phdrs; 854 const struct elf32_phdr *phdr; 855 const struct firmware *seg_fw; 856 const struct firmware *fw; 857 struct elf32_hdr *ehdr; 858 phys_addr_t mpss_reloc; 859 phys_addr_t boot_addr; 860 phys_addr_t min_addr = PHYS_ADDR_MAX; 861 phys_addr_t max_addr = 0; 862 bool relocate = false; 863 char seg_name[10]; 864 ssize_t offset; 865 size_t size = 0; 866 void *ptr; 867 int ret; 868 int i; 869 870 ret = request_firmware(&fw, "modem.mdt", qproc->dev); 871 if (ret < 0) { 872 dev_err(qproc->dev, "unable to load modem.mdt\n"); 873 return ret; 874 } 875 876 /* Initialize the RMB validator */ 877 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 878 879 ret = q6v5_mpss_init_image(qproc, fw); 880 if (ret) 881 goto release_firmware; 882 883 ehdr = (struct elf32_hdr *)fw->data; 884 phdrs = (struct elf32_phdr *)(ehdr + 1); 885 886 for (i = 0; i < ehdr->e_phnum; i++) { 887 phdr = &phdrs[i]; 888 889 if (!q6v5_phdr_valid(phdr)) 890 continue; 891 892 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 893 relocate = true; 894 895 if (phdr->p_paddr < min_addr) 896 min_addr = phdr->p_paddr; 897 898 if (phdr->p_paddr + phdr->p_memsz > max_addr) 899 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 900 } 901 902 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 903 qproc->mpss_reloc = mpss_reloc; 904 /* Load firmware segments */ 905 for (i = 0; i < ehdr->e_phnum; i++) { 906 phdr = &phdrs[i]; 907 908 if (!q6v5_phdr_valid(phdr)) 909 continue; 910 911 offset = phdr->p_paddr - mpss_reloc; 912 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 913 dev_err(qproc->dev, "segment outside memory range\n"); 914 ret = -EINVAL; 915 goto release_firmware; 916 } 917 918 ptr = qproc->mpss_region + offset; 919 920 if (phdr->p_filesz) { 921 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i); 922 ret = request_firmware(&seg_fw, seg_name, qproc->dev); 923 if (ret) { 924 dev_err(qproc->dev, "failed to load %s\n", seg_name); 925 goto release_firmware; 926 } 927 928 memcpy(ptr, seg_fw->data, seg_fw->size); 929 930 release_firmware(seg_fw); 931 } 932 933 if (phdr->p_memsz > phdr->p_filesz) { 934 memset(ptr + phdr->p_filesz, 0, 935 phdr->p_memsz - phdr->p_filesz); 936 } 937 size += phdr->p_memsz; 938 } 939 940 /* Transfer ownership of modem ddr region to q6 */ 941 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, 942 qproc->mpss_phys, qproc->mpss_size); 943 if (ret) { 944 dev_err(qproc->dev, 945 "assigning Q6 access to mpss memory failed: %d\n", ret); 946 ret = -EAGAIN; 947 goto release_firmware; 948 } 949 950 boot_addr = relocate ? qproc->mpss_phys : min_addr; 951 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 952 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 953 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 954 955 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 956 if (ret == -ETIMEDOUT) 957 dev_err(qproc->dev, "MPSS authentication timed out\n"); 958 else if (ret < 0) 959 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 960 961 release_firmware: 962 release_firmware(fw); 963 964 return ret < 0 ? ret : 0; 965 } 966 967 static void qcom_q6v5_dump_segment(struct rproc *rproc, 968 struct rproc_dump_segment *segment, 969 void *dest) 970 { 971 int ret = 0; 972 struct q6v5 *qproc = rproc->priv; 973 unsigned long mask = BIT((unsigned long)segment->priv); 974 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); 975 976 /* Unlock mba before copying segments */ 977 if (!qproc->dump_mba_loaded) 978 ret = q6v5_mba_load(qproc); 979 980 if (!ptr || ret) 981 memset(dest, 0xff, segment->size); 982 else 983 memcpy(dest, ptr, segment->size); 984 985 qproc->dump_segment_mask |= mask; 986 987 /* Reclaim mba after copying segments */ 988 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 989 if (qproc->dump_mba_loaded) 990 q6v5_mba_reclaim(qproc); 991 } 992 } 993 994 static int q6v5_start(struct rproc *rproc) 995 { 996 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 997 int xfermemop_ret; 998 int ret; 999 1000 ret = q6v5_mba_load(qproc); 1001 if (ret) 1002 return ret; 1003 1004 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1005 1006 ret = q6v5_mpss_load(qproc); 1007 if (ret) 1008 goto reclaim_mpss; 1009 1010 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1011 if (ret == -ETIMEDOUT) { 1012 dev_err(qproc->dev, "start timed out\n"); 1013 goto reclaim_mpss; 1014 } 1015 1016 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 1017 qproc->mba_phys, 1018 qproc->mba_size); 1019 if (xfermemop_ret) 1020 dev_err(qproc->dev, 1021 "Failed to reclaim mba buffer system may become unstable\n"); 1022 1023 /* Reset Dump Segment Mask */ 1024 qproc->dump_segment_mask = 0; 1025 qproc->running = true; 1026 1027 return 0; 1028 1029 reclaim_mpss: 1030 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1031 false, qproc->mpss_phys, 1032 qproc->mpss_size); 1033 WARN_ON(xfermemop_ret); 1034 q6v5_mba_reclaim(qproc); 1035 1036 return ret; 1037 } 1038 1039 static int q6v5_stop(struct rproc *rproc) 1040 { 1041 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1042 int ret; 1043 1044 qproc->running = false; 1045 1046 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1047 if (ret == -ETIMEDOUT) 1048 dev_err(qproc->dev, "timed out on wait\n"); 1049 1050 q6v5_mba_reclaim(qproc); 1051 1052 return 0; 1053 } 1054 1055 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) 1056 { 1057 struct q6v5 *qproc = rproc->priv; 1058 int offset; 1059 1060 offset = da - qproc->mpss_reloc; 1061 if (offset < 0 || offset + len > qproc->mpss_size) 1062 return NULL; 1063 1064 return qproc->mpss_region + offset; 1065 } 1066 1067 static int qcom_q6v5_register_dump_segments(struct rproc *rproc, 1068 const struct firmware *mba_fw) 1069 { 1070 const struct firmware *fw; 1071 const struct elf32_phdr *phdrs; 1072 const struct elf32_phdr *phdr; 1073 const struct elf32_hdr *ehdr; 1074 struct q6v5 *qproc = rproc->priv; 1075 unsigned long i; 1076 int ret; 1077 1078 ret = request_firmware(&fw, "modem.mdt", qproc->dev); 1079 if (ret < 0) { 1080 dev_err(qproc->dev, "unable to load modem.mdt\n"); 1081 return ret; 1082 } 1083 1084 ehdr = (struct elf32_hdr *)fw->data; 1085 phdrs = (struct elf32_phdr *)(ehdr + 1); 1086 qproc->dump_complete_mask = 0; 1087 1088 for (i = 0; i < ehdr->e_phnum; i++) { 1089 phdr = &phdrs[i]; 1090 1091 if (!q6v5_phdr_valid(phdr)) 1092 continue; 1093 1094 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, 1095 phdr->p_memsz, 1096 qcom_q6v5_dump_segment, 1097 (void *)i); 1098 if (ret) 1099 break; 1100 1101 qproc->dump_complete_mask |= BIT(i); 1102 } 1103 1104 release_firmware(fw); 1105 return ret; 1106 } 1107 1108 static const struct rproc_ops q6v5_ops = { 1109 .start = q6v5_start, 1110 .stop = q6v5_stop, 1111 .da_to_va = q6v5_da_to_va, 1112 .parse_fw = qcom_q6v5_register_dump_segments, 1113 .load = q6v5_load, 1114 }; 1115 1116 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1117 { 1118 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1119 1120 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1121 qproc->proxy_clk_count); 1122 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1123 qproc->proxy_reg_count); 1124 } 1125 1126 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1127 { 1128 struct of_phandle_args args; 1129 struct resource *res; 1130 int ret; 1131 1132 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1133 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1134 if (IS_ERR(qproc->reg_base)) 1135 return PTR_ERR(qproc->reg_base); 1136 1137 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1138 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1139 if (IS_ERR(qproc->rmb_base)) 1140 return PTR_ERR(qproc->rmb_base); 1141 1142 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1143 "qcom,halt-regs", 3, 0, &args); 1144 if (ret < 0) { 1145 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1146 return -EINVAL; 1147 } 1148 1149 qproc->halt_map = syscon_node_to_regmap(args.np); 1150 of_node_put(args.np); 1151 if (IS_ERR(qproc->halt_map)) 1152 return PTR_ERR(qproc->halt_map); 1153 1154 qproc->halt_q6 = args.args[0]; 1155 qproc->halt_modem = args.args[1]; 1156 qproc->halt_nc = args.args[2]; 1157 1158 return 0; 1159 } 1160 1161 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1162 char **clk_names) 1163 { 1164 int i; 1165 1166 if (!clk_names) 1167 return 0; 1168 1169 for (i = 0; clk_names[i]; i++) { 1170 clks[i] = devm_clk_get(dev, clk_names[i]); 1171 if (IS_ERR(clks[i])) { 1172 int rc = PTR_ERR(clks[i]); 1173 1174 if (rc != -EPROBE_DEFER) 1175 dev_err(dev, "Failed to get %s clock\n", 1176 clk_names[i]); 1177 return rc; 1178 } 1179 } 1180 1181 return i; 1182 } 1183 1184 static int q6v5_init_reset(struct q6v5 *qproc) 1185 { 1186 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1187 "mss_restart"); 1188 if (IS_ERR(qproc->mss_restart)) { 1189 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1190 return PTR_ERR(qproc->mss_restart); 1191 } 1192 1193 if (qproc->has_alt_reset) { 1194 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1195 "pdc_reset"); 1196 if (IS_ERR(qproc->pdc_reset)) { 1197 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1198 return PTR_ERR(qproc->pdc_reset); 1199 } 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1206 { 1207 struct device_node *child; 1208 struct device_node *node; 1209 struct resource r; 1210 int ret; 1211 1212 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1213 node = of_parse_phandle(child, "memory-region", 0); 1214 ret = of_address_to_resource(node, 0, &r); 1215 if (ret) { 1216 dev_err(qproc->dev, "unable to resolve mba region\n"); 1217 return ret; 1218 } 1219 of_node_put(node); 1220 1221 qproc->mba_phys = r.start; 1222 qproc->mba_size = resource_size(&r); 1223 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1224 if (!qproc->mba_region) { 1225 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1226 &r.start, qproc->mba_size); 1227 return -EBUSY; 1228 } 1229 1230 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1231 node = of_parse_phandle(child, "memory-region", 0); 1232 ret = of_address_to_resource(node, 0, &r); 1233 if (ret) { 1234 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1235 return ret; 1236 } 1237 of_node_put(node); 1238 1239 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1240 qproc->mpss_size = resource_size(&r); 1241 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); 1242 if (!qproc->mpss_region) { 1243 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1244 &r.start, qproc->mpss_size); 1245 return -EBUSY; 1246 } 1247 1248 return 0; 1249 } 1250 1251 static int q6v5_probe(struct platform_device *pdev) 1252 { 1253 const struct rproc_hexagon_res *desc; 1254 struct q6v5 *qproc; 1255 struct rproc *rproc; 1256 int ret; 1257 1258 desc = of_device_get_match_data(&pdev->dev); 1259 if (!desc) 1260 return -EINVAL; 1261 1262 if (desc->need_mem_protection && !qcom_scm_is_available()) 1263 return -EPROBE_DEFER; 1264 1265 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1266 desc->hexagon_mba_image, sizeof(*qproc)); 1267 if (!rproc) { 1268 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1269 return -ENOMEM; 1270 } 1271 1272 qproc = (struct q6v5 *)rproc->priv; 1273 qproc->dev = &pdev->dev; 1274 qproc->rproc = rproc; 1275 platform_set_drvdata(pdev, qproc); 1276 1277 ret = q6v5_init_mem(qproc, pdev); 1278 if (ret) 1279 goto free_rproc; 1280 1281 ret = q6v5_alloc_memory_region(qproc); 1282 if (ret) 1283 goto free_rproc; 1284 1285 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1286 desc->proxy_clk_names); 1287 if (ret < 0) { 1288 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1289 goto free_rproc; 1290 } 1291 qproc->proxy_clk_count = ret; 1292 1293 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1294 desc->reset_clk_names); 1295 if (ret < 0) { 1296 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1297 goto free_rproc; 1298 } 1299 qproc->reset_clk_count = ret; 1300 1301 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1302 desc->active_clk_names); 1303 if (ret < 0) { 1304 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1305 goto free_rproc; 1306 } 1307 qproc->active_clk_count = ret; 1308 1309 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1310 desc->proxy_supply); 1311 if (ret < 0) { 1312 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1313 goto free_rproc; 1314 } 1315 qproc->proxy_reg_count = ret; 1316 1317 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1318 desc->active_supply); 1319 if (ret < 0) { 1320 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1321 goto free_rproc; 1322 } 1323 qproc->active_reg_count = ret; 1324 1325 qproc->has_alt_reset = desc->has_alt_reset; 1326 ret = q6v5_init_reset(qproc); 1327 if (ret) 1328 goto free_rproc; 1329 1330 qproc->version = desc->version; 1331 qproc->need_mem_protection = desc->need_mem_protection; 1332 1333 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1334 qcom_msa_handover); 1335 if (ret) 1336 goto free_rproc; 1337 1338 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1339 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1340 qcom_add_glink_subdev(rproc, &qproc->glink_subdev); 1341 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1342 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1343 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1344 1345 ret = rproc_add(rproc); 1346 if (ret) 1347 goto free_rproc; 1348 1349 return 0; 1350 1351 free_rproc: 1352 rproc_free(rproc); 1353 1354 return ret; 1355 } 1356 1357 static int q6v5_remove(struct platform_device *pdev) 1358 { 1359 struct q6v5 *qproc = platform_get_drvdata(pdev); 1360 1361 rproc_del(qproc->rproc); 1362 1363 qcom_remove_sysmon_subdev(qproc->sysmon); 1364 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); 1365 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); 1366 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); 1367 rproc_free(qproc->rproc); 1368 1369 return 0; 1370 } 1371 1372 static const struct rproc_hexagon_res sdm845_mss = { 1373 .hexagon_mba_image = "mba.mbn", 1374 .proxy_clk_names = (char*[]){ 1375 "xo", 1376 "prng", 1377 NULL 1378 }, 1379 .reset_clk_names = (char*[]){ 1380 "iface", 1381 "snoc_axi", 1382 NULL 1383 }, 1384 .active_clk_names = (char*[]){ 1385 "bus", 1386 "mem", 1387 "gpll0_mss", 1388 "mnoc_axi", 1389 NULL 1390 }, 1391 .need_mem_protection = true, 1392 .has_alt_reset = true, 1393 .version = MSS_SDM845, 1394 }; 1395 1396 static const struct rproc_hexagon_res msm8996_mss = { 1397 .hexagon_mba_image = "mba.mbn", 1398 .proxy_clk_names = (char*[]){ 1399 "xo", 1400 "pnoc", 1401 NULL 1402 }, 1403 .active_clk_names = (char*[]){ 1404 "iface", 1405 "bus", 1406 "mem", 1407 "gpll0_mss_clk", 1408 NULL 1409 }, 1410 .need_mem_protection = true, 1411 .has_alt_reset = false, 1412 .version = MSS_MSM8996, 1413 }; 1414 1415 static const struct rproc_hexagon_res msm8916_mss = { 1416 .hexagon_mba_image = "mba.mbn", 1417 .proxy_supply = (struct qcom_mss_reg_res[]) { 1418 { 1419 .supply = "mx", 1420 .uV = 1050000, 1421 }, 1422 { 1423 .supply = "cx", 1424 .uA = 100000, 1425 }, 1426 { 1427 .supply = "pll", 1428 .uA = 100000, 1429 }, 1430 {} 1431 }, 1432 .proxy_clk_names = (char*[]){ 1433 "xo", 1434 NULL 1435 }, 1436 .active_clk_names = (char*[]){ 1437 "iface", 1438 "bus", 1439 "mem", 1440 NULL 1441 }, 1442 .need_mem_protection = false, 1443 .has_alt_reset = false, 1444 .version = MSS_MSM8916, 1445 }; 1446 1447 static const struct rproc_hexagon_res msm8974_mss = { 1448 .hexagon_mba_image = "mba.b00", 1449 .proxy_supply = (struct qcom_mss_reg_res[]) { 1450 { 1451 .supply = "mx", 1452 .uV = 1050000, 1453 }, 1454 { 1455 .supply = "cx", 1456 .uA = 100000, 1457 }, 1458 { 1459 .supply = "pll", 1460 .uA = 100000, 1461 }, 1462 {} 1463 }, 1464 .active_supply = (struct qcom_mss_reg_res[]) { 1465 { 1466 .supply = "mss", 1467 .uV = 1050000, 1468 .uA = 100000, 1469 }, 1470 {} 1471 }, 1472 .proxy_clk_names = (char*[]){ 1473 "xo", 1474 NULL 1475 }, 1476 .active_clk_names = (char*[]){ 1477 "iface", 1478 "bus", 1479 "mem", 1480 NULL 1481 }, 1482 .need_mem_protection = false, 1483 .has_alt_reset = false, 1484 .version = MSS_MSM8974, 1485 }; 1486 1487 static const struct of_device_id q6v5_of_match[] = { 1488 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1489 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1490 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1491 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1492 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1493 { }, 1494 }; 1495 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1496 1497 static struct platform_driver q6v5_driver = { 1498 .probe = q6v5_probe, 1499 .remove = q6v5_remove, 1500 .driver = { 1501 .name = "qcom-q6v5-mss", 1502 .of_match_table = q6v5_of_match, 1503 }, 1504 }; 1505 module_platform_driver(q6v5_driver); 1506 1507 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 1508 MODULE_LICENSE("GPL v2"); 1509