1 /* 2 * Qualcomm self-authenticating modem subsystem remoteproc driver 3 * 4 * Copyright (C) 2016 Linaro Ltd. 5 * Copyright (C) 2014 Sony Mobile Communications AB 6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 */ 17 18 #include <linux/clk.h> 19 #include <linux/delay.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/interrupt.h> 22 #include <linux/kernel.h> 23 #include <linux/mfd/syscon.h> 24 #include <linux/module.h> 25 #include <linux/of_address.h> 26 #include <linux/of_device.h> 27 #include <linux/platform_device.h> 28 #include <linux/regmap.h> 29 #include <linux/regulator/consumer.h> 30 #include <linux/remoteproc.h> 31 #include <linux/reset.h> 32 #include <linux/soc/qcom/mdt_loader.h> 33 #include <linux/iopoll.h> 34 35 #include "remoteproc_internal.h" 36 #include "qcom_common.h" 37 #include "qcom_q6v5.h" 38 39 #include <linux/qcom_scm.h> 40 41 #define MPSS_CRASH_REASON_SMEM 421 42 43 /* RMB Status Register Values */ 44 #define RMB_PBL_SUCCESS 0x1 45 46 #define RMB_MBA_XPU_UNLOCKED 0x1 47 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2 48 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3 49 #define RMB_MBA_AUTH_COMPLETE 0x4 50 51 /* PBL/MBA interface registers */ 52 #define RMB_MBA_IMAGE_REG 0x00 53 #define RMB_PBL_STATUS_REG 0x04 54 #define RMB_MBA_COMMAND_REG 0x08 55 #define RMB_MBA_STATUS_REG 0x0C 56 #define RMB_PMI_META_DATA_REG 0x10 57 #define RMB_PMI_CODE_START_REG 0x14 58 #define RMB_PMI_CODE_LENGTH_REG 0x18 59 #define RMB_MBA_MSS_STATUS 0x40 60 #define RMB_MBA_ALT_RESET 0x44 61 62 #define RMB_CMD_META_DATA_READY 0x1 63 #define RMB_CMD_LOAD_READY 0x2 64 65 /* QDSP6SS Register Offsets */ 66 #define QDSP6SS_RESET_REG 0x014 67 #define QDSP6SS_GFMUX_CTL_REG 0x020 68 #define QDSP6SS_PWR_CTL_REG 0x030 69 #define QDSP6SS_MEM_PWR_CTL 0x0B0 70 #define QDSP6SS_STRAP_ACC 0x110 71 72 /* AXI Halt Register Offsets */ 73 #define AXI_HALTREQ_REG 0x0 74 #define AXI_HALTACK_REG 0x4 75 #define AXI_IDLE_REG 0x8 76 77 #define HALT_ACK_TIMEOUT_MS 100 78 79 /* QDSP6SS_RESET */ 80 #define Q6SS_STOP_CORE BIT(0) 81 #define Q6SS_CORE_ARES BIT(1) 82 #define Q6SS_BUS_ARES_ENABLE BIT(2) 83 84 /* QDSP6SS_GFMUX_CTL */ 85 #define Q6SS_CLK_ENABLE BIT(1) 86 87 /* QDSP6SS_PWR_CTL */ 88 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0) 89 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1) 90 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2) 91 #define Q6SS_L2TAG_SLP_NRET_N BIT(16) 92 #define Q6SS_ETB_SLP_NRET_N BIT(17) 93 #define Q6SS_L2DATA_STBY_N BIT(18) 94 #define Q6SS_SLP_RET_N BIT(19) 95 #define Q6SS_CLAMP_IO BIT(20) 96 #define QDSS_BHS_ON BIT(21) 97 #define QDSS_LDO_BYP BIT(22) 98 99 /* QDSP6v56 parameters */ 100 #define QDSP6v56_LDO_BYP BIT(25) 101 #define QDSP6v56_BHS_ON BIT(24) 102 #define QDSP6v56_CLAMP_WL BIT(21) 103 #define QDSP6v56_CLAMP_QMC_MEM BIT(22) 104 #define HALT_CHECK_MAX_LOOPS 200 105 #define QDSP6SS_XO_CBCR 0x0038 106 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20 107 108 /* QDSP6v65 parameters */ 109 #define QDSP6SS_SLEEP 0x3C 110 #define QDSP6SS_BOOT_CORE_START 0x400 111 #define QDSP6SS_BOOT_CMD 0x404 112 #define SLEEP_CHECK_MAX_LOOPS 200 113 #define BOOT_FSM_TIMEOUT 10000 114 115 struct reg_info { 116 struct regulator *reg; 117 int uV; 118 int uA; 119 }; 120 121 struct qcom_mss_reg_res { 122 const char *supply; 123 int uV; 124 int uA; 125 }; 126 127 struct rproc_hexagon_res { 128 const char *hexagon_mba_image; 129 struct qcom_mss_reg_res *proxy_supply; 130 struct qcom_mss_reg_res *active_supply; 131 char **proxy_clk_names; 132 char **reset_clk_names; 133 char **active_clk_names; 134 int version; 135 bool need_mem_protection; 136 bool has_alt_reset; 137 }; 138 139 struct q6v5 { 140 struct device *dev; 141 struct rproc *rproc; 142 143 void __iomem *reg_base; 144 void __iomem *rmb_base; 145 146 struct regmap *halt_map; 147 u32 halt_q6; 148 u32 halt_modem; 149 u32 halt_nc; 150 151 struct reset_control *mss_restart; 152 struct reset_control *pdc_reset; 153 154 struct qcom_q6v5 q6v5; 155 156 struct clk *active_clks[8]; 157 struct clk *reset_clks[4]; 158 struct clk *proxy_clks[4]; 159 int active_clk_count; 160 int reset_clk_count; 161 int proxy_clk_count; 162 163 struct reg_info active_regs[1]; 164 struct reg_info proxy_regs[3]; 165 int active_reg_count; 166 int proxy_reg_count; 167 168 bool running; 169 170 bool dump_mba_loaded; 171 unsigned long dump_segment_mask; 172 unsigned long dump_complete_mask; 173 174 phys_addr_t mba_phys; 175 void *mba_region; 176 size_t mba_size; 177 178 phys_addr_t mpss_phys; 179 phys_addr_t mpss_reloc; 180 void *mpss_region; 181 size_t mpss_size; 182 183 struct qcom_rproc_glink glink_subdev; 184 struct qcom_rproc_subdev smd_subdev; 185 struct qcom_rproc_ssr ssr_subdev; 186 struct qcom_sysmon *sysmon; 187 bool need_mem_protection; 188 bool has_alt_reset; 189 int mpss_perm; 190 int mba_perm; 191 int version; 192 }; 193 194 enum { 195 MSS_MSM8916, 196 MSS_MSM8974, 197 MSS_MSM8996, 198 MSS_SDM845, 199 }; 200 201 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs, 202 const struct qcom_mss_reg_res *reg_res) 203 { 204 int rc; 205 int i; 206 207 if (!reg_res) 208 return 0; 209 210 for (i = 0; reg_res[i].supply; i++) { 211 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply); 212 if (IS_ERR(regs[i].reg)) { 213 rc = PTR_ERR(regs[i].reg); 214 if (rc != -EPROBE_DEFER) 215 dev_err(dev, "Failed to get %s\n regulator", 216 reg_res[i].supply); 217 return rc; 218 } 219 220 regs[i].uV = reg_res[i].uV; 221 regs[i].uA = reg_res[i].uA; 222 } 223 224 return i; 225 } 226 227 static int q6v5_regulator_enable(struct q6v5 *qproc, 228 struct reg_info *regs, int count) 229 { 230 int ret; 231 int i; 232 233 for (i = 0; i < count; i++) { 234 if (regs[i].uV > 0) { 235 ret = regulator_set_voltage(regs[i].reg, 236 regs[i].uV, INT_MAX); 237 if (ret) { 238 dev_err(qproc->dev, 239 "Failed to request voltage for %d.\n", 240 i); 241 goto err; 242 } 243 } 244 245 if (regs[i].uA > 0) { 246 ret = regulator_set_load(regs[i].reg, 247 regs[i].uA); 248 if (ret < 0) { 249 dev_err(qproc->dev, 250 "Failed to set regulator mode\n"); 251 goto err; 252 } 253 } 254 255 ret = regulator_enable(regs[i].reg); 256 if (ret) { 257 dev_err(qproc->dev, "Regulator enable failed\n"); 258 goto err; 259 } 260 } 261 262 return 0; 263 err: 264 for (; i >= 0; i--) { 265 if (regs[i].uV > 0) 266 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 267 268 if (regs[i].uA > 0) 269 regulator_set_load(regs[i].reg, 0); 270 271 regulator_disable(regs[i].reg); 272 } 273 274 return ret; 275 } 276 277 static void q6v5_regulator_disable(struct q6v5 *qproc, 278 struct reg_info *regs, int count) 279 { 280 int i; 281 282 for (i = 0; i < count; i++) { 283 if (regs[i].uV > 0) 284 regulator_set_voltage(regs[i].reg, 0, INT_MAX); 285 286 if (regs[i].uA > 0) 287 regulator_set_load(regs[i].reg, 0); 288 289 regulator_disable(regs[i].reg); 290 } 291 } 292 293 static int q6v5_clk_enable(struct device *dev, 294 struct clk **clks, int count) 295 { 296 int rc; 297 int i; 298 299 for (i = 0; i < count; i++) { 300 rc = clk_prepare_enable(clks[i]); 301 if (rc) { 302 dev_err(dev, "Clock enable failed\n"); 303 goto err; 304 } 305 } 306 307 return 0; 308 err: 309 for (i--; i >= 0; i--) 310 clk_disable_unprepare(clks[i]); 311 312 return rc; 313 } 314 315 static void q6v5_clk_disable(struct device *dev, 316 struct clk **clks, int count) 317 { 318 int i; 319 320 for (i = 0; i < count; i++) 321 clk_disable_unprepare(clks[i]); 322 } 323 324 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, 325 bool remote_owner, phys_addr_t addr, 326 size_t size) 327 { 328 struct qcom_scm_vmperm next; 329 330 if (!qproc->need_mem_protection) 331 return 0; 332 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA)) 333 return 0; 334 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS)) 335 return 0; 336 337 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS; 338 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX; 339 340 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K), 341 current_perm, &next, 1); 342 } 343 344 static int q6v5_load(struct rproc *rproc, const struct firmware *fw) 345 { 346 struct q6v5 *qproc = rproc->priv; 347 348 memcpy(qproc->mba_region, fw->data, fw->size); 349 350 return 0; 351 } 352 353 static int q6v5_reset_assert(struct q6v5 *qproc) 354 { 355 int ret; 356 357 if (qproc->has_alt_reset) { 358 reset_control_assert(qproc->pdc_reset); 359 ret = reset_control_reset(qproc->mss_restart); 360 reset_control_deassert(qproc->pdc_reset); 361 } else { 362 ret = reset_control_assert(qproc->mss_restart); 363 } 364 365 return ret; 366 } 367 368 static int q6v5_reset_deassert(struct q6v5 *qproc) 369 { 370 int ret; 371 372 if (qproc->has_alt_reset) { 373 reset_control_assert(qproc->pdc_reset); 374 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET); 375 ret = reset_control_reset(qproc->mss_restart); 376 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET); 377 reset_control_deassert(qproc->pdc_reset); 378 } else { 379 ret = reset_control_deassert(qproc->mss_restart); 380 } 381 382 return ret; 383 } 384 385 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms) 386 { 387 unsigned long timeout; 388 s32 val; 389 390 timeout = jiffies + msecs_to_jiffies(ms); 391 for (;;) { 392 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG); 393 if (val) 394 break; 395 396 if (time_after(jiffies, timeout)) 397 return -ETIMEDOUT; 398 399 msleep(1); 400 } 401 402 return val; 403 } 404 405 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) 406 { 407 408 unsigned long timeout; 409 s32 val; 410 411 timeout = jiffies + msecs_to_jiffies(ms); 412 for (;;) { 413 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG); 414 if (val < 0) 415 break; 416 417 if (!status && val) 418 break; 419 else if (status && val == status) 420 break; 421 422 if (time_after(jiffies, timeout)) 423 return -ETIMEDOUT; 424 425 msleep(1); 426 } 427 428 return val; 429 } 430 431 static int q6v5proc_reset(struct q6v5 *qproc) 432 { 433 u32 val; 434 int ret; 435 int i; 436 437 if (qproc->version == MSS_SDM845) { 438 val = readl(qproc->reg_base + QDSP6SS_SLEEP); 439 val |= 0x1; 440 writel(val, qproc->reg_base + QDSP6SS_SLEEP); 441 442 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP, 443 val, !(val & BIT(31)), 1, 444 SLEEP_CHECK_MAX_LOOPS); 445 if (ret) { 446 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n"); 447 return -ETIMEDOUT; 448 } 449 450 /* De-assert QDSP6 stop core */ 451 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); 452 /* Trigger boot FSM */ 453 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); 454 455 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, 456 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); 457 if (ret) { 458 dev_err(qproc->dev, "Boot FSM failed to complete.\n"); 459 /* Reset the modem so that boot FSM is in reset state */ 460 q6v5_reset_deassert(qproc); 461 return ret; 462 } 463 464 goto pbl_wait; 465 } else if (qproc->version == MSS_MSM8996) { 466 /* Override the ACC value if required */ 467 writel(QDSP6SS_ACC_OVERRIDE_VAL, 468 qproc->reg_base + QDSP6SS_STRAP_ACC); 469 470 /* Assert resets, stop core */ 471 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 472 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 473 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 474 475 /* BHS require xo cbcr to be enabled */ 476 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR); 477 val |= 0x1; 478 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR); 479 480 /* Read CLKOFF bit to go low indicating CLK is enabled */ 481 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR, 482 val, !(val & BIT(31)), 1, 483 HALT_CHECK_MAX_LOOPS); 484 if (ret) { 485 dev_err(qproc->dev, 486 "xo cbcr enabling timed out (rc:%d)\n", ret); 487 return ret; 488 } 489 /* Enable power block headswitch and wait for it to stabilize */ 490 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 491 val |= QDSP6v56_BHS_ON; 492 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 493 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 494 udelay(1); 495 496 /* Put LDO in bypass mode */ 497 val |= QDSP6v56_LDO_BYP; 498 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 499 500 /* Deassert QDSP6 compiler memory clamp */ 501 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 502 val &= ~QDSP6v56_CLAMP_QMC_MEM; 503 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 504 505 /* Deassert memory peripheral sleep and L2 memory standby */ 506 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N; 507 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 508 509 /* Turn on L1, L2, ETB and JU memories 1 at a time */ 510 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); 511 for (i = 19; i >= 0; i--) { 512 val |= BIT(i); 513 writel(val, qproc->reg_base + 514 QDSP6SS_MEM_PWR_CTL); 515 /* 516 * Read back value to ensure the write is done then 517 * wait for 1us for both memory peripheral and data 518 * array to turn on. 519 */ 520 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL); 521 udelay(1); 522 } 523 /* Remove word line clamp */ 524 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 525 val &= ~QDSP6v56_CLAMP_WL; 526 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 527 } else { 528 /* Assert resets, stop core */ 529 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 530 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE; 531 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 532 533 /* Enable power block headswitch and wait for it to stabilize */ 534 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 535 val |= QDSS_BHS_ON | QDSS_LDO_BYP; 536 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 537 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 538 udelay(1); 539 /* 540 * Turn on memories. L2 banks should be done individually 541 * to minimize inrush current. 542 */ 543 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 544 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N | 545 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N; 546 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 547 val |= Q6SS_L2DATA_SLP_NRET_N_2; 548 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 549 val |= Q6SS_L2DATA_SLP_NRET_N_1; 550 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 551 val |= Q6SS_L2DATA_SLP_NRET_N_0; 552 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 553 } 554 /* Remove IO clamp */ 555 val &= ~Q6SS_CLAMP_IO; 556 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 557 558 /* Bring core out of reset */ 559 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 560 val &= ~Q6SS_CORE_ARES; 561 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 562 563 /* Turn on core clock */ 564 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 565 val |= Q6SS_CLK_ENABLE; 566 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG); 567 568 /* Start core execution */ 569 val = readl(qproc->reg_base + QDSP6SS_RESET_REG); 570 val &= ~Q6SS_STOP_CORE; 571 writel(val, qproc->reg_base + QDSP6SS_RESET_REG); 572 573 pbl_wait: 574 /* Wait for PBL status */ 575 ret = q6v5_rmb_pbl_wait(qproc, 1000); 576 if (ret == -ETIMEDOUT) { 577 dev_err(qproc->dev, "PBL boot timed out\n"); 578 } else if (ret != RMB_PBL_SUCCESS) { 579 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret); 580 ret = -EINVAL; 581 } else { 582 ret = 0; 583 } 584 585 return ret; 586 } 587 588 static void q6v5proc_halt_axi_port(struct q6v5 *qproc, 589 struct regmap *halt_map, 590 u32 offset) 591 { 592 unsigned long timeout; 593 unsigned int val; 594 int ret; 595 596 /* Check if we're already idle */ 597 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 598 if (!ret && val) 599 return; 600 601 /* Assert halt request */ 602 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1); 603 604 /* Wait for halt */ 605 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS); 606 for (;;) { 607 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val); 608 if (ret || val || time_after(jiffies, timeout)) 609 break; 610 611 msleep(1); 612 } 613 614 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val); 615 if (ret || !val) 616 dev_err(qproc->dev, "port failed halt\n"); 617 618 /* Clear halt request (port will remain halted until reset) */ 619 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0); 620 } 621 622 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 623 { 624 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 625 dma_addr_t phys; 626 int mdata_perm; 627 int xferop_ret; 628 void *ptr; 629 int ret; 630 631 ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); 632 if (!ptr) { 633 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 634 return -ENOMEM; 635 } 636 637 memcpy(ptr, fw->data, fw->size); 638 639 /* Hypervisor mapping to access metadata by modem */ 640 mdata_perm = BIT(QCOM_SCM_VMID_HLOS); 641 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, 642 true, phys, fw->size); 643 if (ret) { 644 dev_err(qproc->dev, 645 "assigning Q6 access to metadata failed: %d\n", ret); 646 ret = -EAGAIN; 647 goto free_dma_attrs; 648 } 649 650 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG); 651 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 652 653 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000); 654 if (ret == -ETIMEDOUT) 655 dev_err(qproc->dev, "MPSS header authentication timed out\n"); 656 else if (ret < 0) 657 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 658 659 /* Metadata authentication done, remove modem access */ 660 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, 661 false, phys, fw->size); 662 if (xferop_ret) 663 dev_warn(qproc->dev, 664 "mdt buffer not reclaimed system may become unstable\n"); 665 666 free_dma_attrs: 667 dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); 668 669 return ret < 0 ? ret : 0; 670 } 671 672 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr) 673 { 674 if (phdr->p_type != PT_LOAD) 675 return false; 676 677 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH) 678 return false; 679 680 if (!phdr->p_memsz) 681 return false; 682 683 return true; 684 } 685 686 static int q6v5_mba_load(struct q6v5 *qproc) 687 { 688 int ret; 689 int xfermemop_ret; 690 691 qcom_q6v5_prepare(&qproc->q6v5); 692 693 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs, 694 qproc->proxy_reg_count); 695 if (ret) { 696 dev_err(qproc->dev, "failed to enable proxy supplies\n"); 697 goto disable_irqs; 698 } 699 700 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks, 701 qproc->proxy_clk_count); 702 if (ret) { 703 dev_err(qproc->dev, "failed to enable proxy clocks\n"); 704 goto disable_proxy_reg; 705 } 706 707 ret = q6v5_regulator_enable(qproc, qproc->active_regs, 708 qproc->active_reg_count); 709 if (ret) { 710 dev_err(qproc->dev, "failed to enable supplies\n"); 711 goto disable_proxy_clk; 712 } 713 714 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks, 715 qproc->reset_clk_count); 716 if (ret) { 717 dev_err(qproc->dev, "failed to enable reset clocks\n"); 718 goto disable_vdd; 719 } 720 721 ret = q6v5_reset_deassert(qproc); 722 if (ret) { 723 dev_err(qproc->dev, "failed to deassert mss restart\n"); 724 goto disable_reset_clks; 725 } 726 727 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks, 728 qproc->active_clk_count); 729 if (ret) { 730 dev_err(qproc->dev, "failed to enable clocks\n"); 731 goto assert_reset; 732 } 733 734 /* Assign MBA image access in DDR to q6 */ 735 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, 736 qproc->mba_phys, qproc->mba_size); 737 if (ret) { 738 dev_err(qproc->dev, 739 "assigning Q6 access to mba memory failed: %d\n", ret); 740 goto disable_active_clks; 741 } 742 743 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); 744 745 ret = q6v5proc_reset(qproc); 746 if (ret) 747 goto reclaim_mba; 748 749 ret = q6v5_rmb_mba_wait(qproc, 0, 5000); 750 if (ret == -ETIMEDOUT) { 751 dev_err(qproc->dev, "MBA boot timed out\n"); 752 goto halt_axi_ports; 753 } else if (ret != RMB_MBA_XPU_UNLOCKED && 754 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) { 755 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret); 756 ret = -EINVAL; 757 goto halt_axi_ports; 758 } 759 760 qproc->dump_mba_loaded = true; 761 return 0; 762 763 halt_axi_ports: 764 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 765 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 766 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 767 768 reclaim_mba: 769 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 770 qproc->mba_phys, 771 qproc->mba_size); 772 if (xfermemop_ret) { 773 dev_err(qproc->dev, 774 "Failed to reclaim mba buffer, system may become unstable\n"); 775 } 776 777 disable_active_clks: 778 q6v5_clk_disable(qproc->dev, qproc->active_clks, 779 qproc->active_clk_count); 780 assert_reset: 781 q6v5_reset_assert(qproc); 782 disable_reset_clks: 783 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 784 qproc->reset_clk_count); 785 disable_vdd: 786 q6v5_regulator_disable(qproc, qproc->active_regs, 787 qproc->active_reg_count); 788 disable_proxy_clk: 789 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 790 qproc->proxy_clk_count); 791 disable_proxy_reg: 792 q6v5_regulator_disable(qproc, qproc->proxy_regs, 793 qproc->proxy_reg_count); 794 disable_irqs: 795 qcom_q6v5_unprepare(&qproc->q6v5); 796 797 return ret; 798 } 799 800 static void q6v5_mba_reclaim(struct q6v5 *qproc) 801 { 802 int ret; 803 u32 val; 804 805 qproc->dump_mba_loaded = false; 806 807 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); 808 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); 809 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); 810 if (qproc->version == MSS_MSM8996) { 811 /* 812 * To avoid high MX current during LPASS/MSS restart. 813 */ 814 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG); 815 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL | 816 QDSP6v56_CLAMP_QMC_MEM; 817 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG); 818 } 819 820 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 821 false, qproc->mpss_phys, 822 qproc->mpss_size); 823 WARN_ON(ret); 824 825 q6v5_reset_assert(qproc); 826 827 q6v5_clk_disable(qproc->dev, qproc->reset_clks, 828 qproc->reset_clk_count); 829 q6v5_clk_disable(qproc->dev, qproc->active_clks, 830 qproc->active_clk_count); 831 q6v5_regulator_disable(qproc, qproc->active_regs, 832 qproc->active_reg_count); 833 834 /* In case of failure or coredump scenario where reclaiming MBA memory 835 * could not happen reclaim it here. 836 */ 837 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 838 qproc->mba_phys, 839 qproc->mba_size); 840 WARN_ON(ret); 841 842 ret = qcom_q6v5_unprepare(&qproc->q6v5); 843 if (ret) { 844 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 845 qproc->proxy_clk_count); 846 q6v5_regulator_disable(qproc, qproc->proxy_regs, 847 qproc->proxy_reg_count); 848 } 849 } 850 851 static int q6v5_mpss_load(struct q6v5 *qproc) 852 { 853 const struct elf32_phdr *phdrs; 854 const struct elf32_phdr *phdr; 855 const struct firmware *seg_fw; 856 const struct firmware *fw; 857 struct elf32_hdr *ehdr; 858 phys_addr_t mpss_reloc; 859 phys_addr_t boot_addr; 860 phys_addr_t min_addr = PHYS_ADDR_MAX; 861 phys_addr_t max_addr = 0; 862 bool relocate = false; 863 char seg_name[10]; 864 ssize_t offset; 865 size_t size = 0; 866 void *ptr; 867 int ret; 868 int i; 869 870 ret = request_firmware(&fw, "modem.mdt", qproc->dev); 871 if (ret < 0) { 872 dev_err(qproc->dev, "unable to load modem.mdt\n"); 873 return ret; 874 } 875 876 /* Initialize the RMB validator */ 877 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 878 879 ret = q6v5_mpss_init_image(qproc, fw); 880 if (ret) 881 goto release_firmware; 882 883 ehdr = (struct elf32_hdr *)fw->data; 884 phdrs = (struct elf32_phdr *)(ehdr + 1); 885 886 for (i = 0; i < ehdr->e_phnum; i++) { 887 phdr = &phdrs[i]; 888 889 if (!q6v5_phdr_valid(phdr)) 890 continue; 891 892 if (phdr->p_flags & QCOM_MDT_RELOCATABLE) 893 relocate = true; 894 895 if (phdr->p_paddr < min_addr) 896 min_addr = phdr->p_paddr; 897 898 if (phdr->p_paddr + phdr->p_memsz > max_addr) 899 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K); 900 } 901 902 mpss_reloc = relocate ? min_addr : qproc->mpss_phys; 903 qproc->mpss_reloc = mpss_reloc; 904 /* Load firmware segments */ 905 for (i = 0; i < ehdr->e_phnum; i++) { 906 phdr = &phdrs[i]; 907 908 if (!q6v5_phdr_valid(phdr)) 909 continue; 910 911 offset = phdr->p_paddr - mpss_reloc; 912 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) { 913 dev_err(qproc->dev, "segment outside memory range\n"); 914 ret = -EINVAL; 915 goto release_firmware; 916 } 917 918 ptr = qproc->mpss_region + offset; 919 920 if (phdr->p_filesz) { 921 snprintf(seg_name, sizeof(seg_name), "modem.b%02d", i); 922 ret = request_firmware(&seg_fw, seg_name, qproc->dev); 923 if (ret) { 924 dev_err(qproc->dev, "failed to load %s\n", seg_name); 925 goto release_firmware; 926 } 927 928 memcpy(ptr, seg_fw->data, seg_fw->size); 929 930 release_firmware(seg_fw); 931 } 932 933 if (phdr->p_memsz > phdr->p_filesz) { 934 memset(ptr + phdr->p_filesz, 0, 935 phdr->p_memsz - phdr->p_filesz); 936 } 937 size += phdr->p_memsz; 938 } 939 940 /* Transfer ownership of modem ddr region to q6 */ 941 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, 942 qproc->mpss_phys, qproc->mpss_size); 943 if (ret) { 944 dev_err(qproc->dev, 945 "assigning Q6 access to mpss memory failed: %d\n", ret); 946 ret = -EAGAIN; 947 goto release_firmware; 948 } 949 950 boot_addr = relocate ? qproc->mpss_phys : min_addr; 951 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG); 952 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG); 953 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); 954 955 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000); 956 if (ret == -ETIMEDOUT) 957 dev_err(qproc->dev, "MPSS authentication timed out\n"); 958 else if (ret < 0) 959 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); 960 961 release_firmware: 962 release_firmware(fw); 963 964 return ret < 0 ? ret : 0; 965 } 966 967 static void qcom_q6v5_dump_segment(struct rproc *rproc, 968 struct rproc_dump_segment *segment, 969 void *dest) 970 { 971 int ret = 0; 972 struct q6v5 *qproc = rproc->priv; 973 unsigned long mask = BIT((unsigned long)segment->priv); 974 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size); 975 976 /* Unlock mba before copying segments */ 977 if (!qproc->dump_mba_loaded) 978 ret = q6v5_mba_load(qproc); 979 980 if (!ptr || ret) 981 memset(dest, 0xff, segment->size); 982 else 983 memcpy(dest, ptr, segment->size); 984 985 qproc->dump_segment_mask |= mask; 986 987 /* Reclaim mba after copying segments */ 988 if (qproc->dump_segment_mask == qproc->dump_complete_mask) { 989 if (qproc->dump_mba_loaded) 990 q6v5_mba_reclaim(qproc); 991 } 992 } 993 994 static int q6v5_start(struct rproc *rproc) 995 { 996 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 997 int xfermemop_ret; 998 int ret; 999 1000 ret = q6v5_mba_load(qproc); 1001 if (ret) 1002 return ret; 1003 1004 dev_info(qproc->dev, "MBA booted, loading mpss\n"); 1005 1006 ret = q6v5_mpss_load(qproc); 1007 if (ret) 1008 goto reclaim_mpss; 1009 1010 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000)); 1011 if (ret == -ETIMEDOUT) { 1012 dev_err(qproc->dev, "start timed out\n"); 1013 goto reclaim_mpss; 1014 } 1015 1016 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, 1017 qproc->mba_phys, 1018 qproc->mba_size); 1019 if (xfermemop_ret) 1020 dev_err(qproc->dev, 1021 "Failed to reclaim mba buffer system may become unstable\n"); 1022 1023 /* Reset Dump Segment Mask */ 1024 qproc->dump_segment_mask = 0; 1025 qproc->running = true; 1026 1027 return 0; 1028 1029 reclaim_mpss: 1030 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, 1031 false, qproc->mpss_phys, 1032 qproc->mpss_size); 1033 WARN_ON(xfermemop_ret); 1034 q6v5_mba_reclaim(qproc); 1035 1036 return ret; 1037 } 1038 1039 static int q6v5_stop(struct rproc *rproc) 1040 { 1041 struct q6v5 *qproc = (struct q6v5 *)rproc->priv; 1042 int ret; 1043 1044 qproc->running = false; 1045 1046 ret = qcom_q6v5_request_stop(&qproc->q6v5); 1047 if (ret == -ETIMEDOUT) 1048 dev_err(qproc->dev, "timed out on wait\n"); 1049 1050 q6v5_mba_reclaim(qproc); 1051 1052 return 0; 1053 } 1054 1055 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len) 1056 { 1057 struct q6v5 *qproc = rproc->priv; 1058 int offset; 1059 1060 offset = da - qproc->mpss_reloc; 1061 if (offset < 0 || offset + len > qproc->mpss_size) 1062 return NULL; 1063 1064 return qproc->mpss_region + offset; 1065 } 1066 1067 static const struct rproc_ops q6v5_ops = { 1068 .start = q6v5_start, 1069 .stop = q6v5_stop, 1070 .da_to_va = q6v5_da_to_va, 1071 .load = q6v5_load, 1072 }; 1073 1074 static void qcom_msa_handover(struct qcom_q6v5 *q6v5) 1075 { 1076 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5); 1077 1078 q6v5_clk_disable(qproc->dev, qproc->proxy_clks, 1079 qproc->proxy_clk_count); 1080 q6v5_regulator_disable(qproc, qproc->proxy_regs, 1081 qproc->proxy_reg_count); 1082 } 1083 1084 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev) 1085 { 1086 struct of_phandle_args args; 1087 struct resource *res; 1088 int ret; 1089 1090 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6"); 1091 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res); 1092 if (IS_ERR(qproc->reg_base)) 1093 return PTR_ERR(qproc->reg_base); 1094 1095 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb"); 1096 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res); 1097 if (IS_ERR(qproc->rmb_base)) 1098 return PTR_ERR(qproc->rmb_base); 1099 1100 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, 1101 "qcom,halt-regs", 3, 0, &args); 1102 if (ret < 0) { 1103 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n"); 1104 return -EINVAL; 1105 } 1106 1107 qproc->halt_map = syscon_node_to_regmap(args.np); 1108 of_node_put(args.np); 1109 if (IS_ERR(qproc->halt_map)) 1110 return PTR_ERR(qproc->halt_map); 1111 1112 qproc->halt_q6 = args.args[0]; 1113 qproc->halt_modem = args.args[1]; 1114 qproc->halt_nc = args.args[2]; 1115 1116 return 0; 1117 } 1118 1119 static int q6v5_init_clocks(struct device *dev, struct clk **clks, 1120 char **clk_names) 1121 { 1122 int i; 1123 1124 if (!clk_names) 1125 return 0; 1126 1127 for (i = 0; clk_names[i]; i++) { 1128 clks[i] = devm_clk_get(dev, clk_names[i]); 1129 if (IS_ERR(clks[i])) { 1130 int rc = PTR_ERR(clks[i]); 1131 1132 if (rc != -EPROBE_DEFER) 1133 dev_err(dev, "Failed to get %s clock\n", 1134 clk_names[i]); 1135 return rc; 1136 } 1137 } 1138 1139 return i; 1140 } 1141 1142 static int q6v5_init_reset(struct q6v5 *qproc) 1143 { 1144 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev, 1145 "mss_restart"); 1146 if (IS_ERR(qproc->mss_restart)) { 1147 dev_err(qproc->dev, "failed to acquire mss restart\n"); 1148 return PTR_ERR(qproc->mss_restart); 1149 } 1150 1151 if (qproc->has_alt_reset) { 1152 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev, 1153 "pdc_reset"); 1154 if (IS_ERR(qproc->pdc_reset)) { 1155 dev_err(qproc->dev, "failed to acquire pdc reset\n"); 1156 return PTR_ERR(qproc->pdc_reset); 1157 } 1158 } 1159 1160 return 0; 1161 } 1162 1163 static int q6v5_alloc_memory_region(struct q6v5 *qproc) 1164 { 1165 struct device_node *child; 1166 struct device_node *node; 1167 struct resource r; 1168 int ret; 1169 1170 child = of_get_child_by_name(qproc->dev->of_node, "mba"); 1171 node = of_parse_phandle(child, "memory-region", 0); 1172 ret = of_address_to_resource(node, 0, &r); 1173 if (ret) { 1174 dev_err(qproc->dev, "unable to resolve mba region\n"); 1175 return ret; 1176 } 1177 of_node_put(node); 1178 1179 qproc->mba_phys = r.start; 1180 qproc->mba_size = resource_size(&r); 1181 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size); 1182 if (!qproc->mba_region) { 1183 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1184 &r.start, qproc->mba_size); 1185 return -EBUSY; 1186 } 1187 1188 child = of_get_child_by_name(qproc->dev->of_node, "mpss"); 1189 node = of_parse_phandle(child, "memory-region", 0); 1190 ret = of_address_to_resource(node, 0, &r); 1191 if (ret) { 1192 dev_err(qproc->dev, "unable to resolve mpss region\n"); 1193 return ret; 1194 } 1195 of_node_put(node); 1196 1197 qproc->mpss_phys = qproc->mpss_reloc = r.start; 1198 qproc->mpss_size = resource_size(&r); 1199 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size); 1200 if (!qproc->mpss_region) { 1201 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n", 1202 &r.start, qproc->mpss_size); 1203 return -EBUSY; 1204 } 1205 1206 return 0; 1207 } 1208 1209 static int q6v5_probe(struct platform_device *pdev) 1210 { 1211 const struct rproc_hexagon_res *desc; 1212 struct q6v5 *qproc; 1213 struct rproc *rproc; 1214 int ret; 1215 1216 desc = of_device_get_match_data(&pdev->dev); 1217 if (!desc) 1218 return -EINVAL; 1219 1220 if (desc->need_mem_protection && !qcom_scm_is_available()) 1221 return -EPROBE_DEFER; 1222 1223 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops, 1224 desc->hexagon_mba_image, sizeof(*qproc)); 1225 if (!rproc) { 1226 dev_err(&pdev->dev, "failed to allocate rproc\n"); 1227 return -ENOMEM; 1228 } 1229 1230 qproc = (struct q6v5 *)rproc->priv; 1231 qproc->dev = &pdev->dev; 1232 qproc->rproc = rproc; 1233 platform_set_drvdata(pdev, qproc); 1234 1235 ret = q6v5_init_mem(qproc, pdev); 1236 if (ret) 1237 goto free_rproc; 1238 1239 ret = q6v5_alloc_memory_region(qproc); 1240 if (ret) 1241 goto free_rproc; 1242 1243 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks, 1244 desc->proxy_clk_names); 1245 if (ret < 0) { 1246 dev_err(&pdev->dev, "Failed to get proxy clocks.\n"); 1247 goto free_rproc; 1248 } 1249 qproc->proxy_clk_count = ret; 1250 1251 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks, 1252 desc->reset_clk_names); 1253 if (ret < 0) { 1254 dev_err(&pdev->dev, "Failed to get reset clocks.\n"); 1255 goto free_rproc; 1256 } 1257 qproc->reset_clk_count = ret; 1258 1259 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks, 1260 desc->active_clk_names); 1261 if (ret < 0) { 1262 dev_err(&pdev->dev, "Failed to get active clocks.\n"); 1263 goto free_rproc; 1264 } 1265 qproc->active_clk_count = ret; 1266 1267 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs, 1268 desc->proxy_supply); 1269 if (ret < 0) { 1270 dev_err(&pdev->dev, "Failed to get proxy regulators.\n"); 1271 goto free_rproc; 1272 } 1273 qproc->proxy_reg_count = ret; 1274 1275 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs, 1276 desc->active_supply); 1277 if (ret < 0) { 1278 dev_err(&pdev->dev, "Failed to get active regulators.\n"); 1279 goto free_rproc; 1280 } 1281 qproc->active_reg_count = ret; 1282 1283 qproc->has_alt_reset = desc->has_alt_reset; 1284 ret = q6v5_init_reset(qproc); 1285 if (ret) 1286 goto free_rproc; 1287 1288 qproc->version = desc->version; 1289 qproc->need_mem_protection = desc->need_mem_protection; 1290 1291 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, 1292 qcom_msa_handover); 1293 if (ret) 1294 goto free_rproc; 1295 1296 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS); 1297 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS); 1298 qcom_add_glink_subdev(rproc, &qproc->glink_subdev); 1299 qcom_add_smd_subdev(rproc, &qproc->smd_subdev); 1300 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); 1301 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); 1302 1303 ret = rproc_add(rproc); 1304 if (ret) 1305 goto free_rproc; 1306 1307 return 0; 1308 1309 free_rproc: 1310 rproc_free(rproc); 1311 1312 return ret; 1313 } 1314 1315 static int q6v5_remove(struct platform_device *pdev) 1316 { 1317 struct q6v5 *qproc = platform_get_drvdata(pdev); 1318 1319 rproc_del(qproc->rproc); 1320 1321 qcom_remove_sysmon_subdev(qproc->sysmon); 1322 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev); 1323 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev); 1324 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev); 1325 rproc_free(qproc->rproc); 1326 1327 return 0; 1328 } 1329 1330 static const struct rproc_hexagon_res sdm845_mss = { 1331 .hexagon_mba_image = "mba.mbn", 1332 .proxy_clk_names = (char*[]){ 1333 "xo", 1334 "prng", 1335 NULL 1336 }, 1337 .reset_clk_names = (char*[]){ 1338 "iface", 1339 "snoc_axi", 1340 NULL 1341 }, 1342 .active_clk_names = (char*[]){ 1343 "bus", 1344 "mem", 1345 "gpll0_mss", 1346 "mnoc_axi", 1347 NULL 1348 }, 1349 .need_mem_protection = true, 1350 .has_alt_reset = true, 1351 .version = MSS_SDM845, 1352 }; 1353 1354 static const struct rproc_hexagon_res msm8996_mss = { 1355 .hexagon_mba_image = "mba.mbn", 1356 .proxy_clk_names = (char*[]){ 1357 "xo", 1358 "pnoc", 1359 NULL 1360 }, 1361 .active_clk_names = (char*[]){ 1362 "iface", 1363 "bus", 1364 "mem", 1365 "gpll0_mss_clk", 1366 NULL 1367 }, 1368 .need_mem_protection = true, 1369 .has_alt_reset = false, 1370 .version = MSS_MSM8996, 1371 }; 1372 1373 static const struct rproc_hexagon_res msm8916_mss = { 1374 .hexagon_mba_image = "mba.mbn", 1375 .proxy_supply = (struct qcom_mss_reg_res[]) { 1376 { 1377 .supply = "mx", 1378 .uV = 1050000, 1379 }, 1380 { 1381 .supply = "cx", 1382 .uA = 100000, 1383 }, 1384 { 1385 .supply = "pll", 1386 .uA = 100000, 1387 }, 1388 {} 1389 }, 1390 .proxy_clk_names = (char*[]){ 1391 "xo", 1392 NULL 1393 }, 1394 .active_clk_names = (char*[]){ 1395 "iface", 1396 "bus", 1397 "mem", 1398 NULL 1399 }, 1400 .need_mem_protection = false, 1401 .has_alt_reset = false, 1402 .version = MSS_MSM8916, 1403 }; 1404 1405 static const struct rproc_hexagon_res msm8974_mss = { 1406 .hexagon_mba_image = "mba.b00", 1407 .proxy_supply = (struct qcom_mss_reg_res[]) { 1408 { 1409 .supply = "mx", 1410 .uV = 1050000, 1411 }, 1412 { 1413 .supply = "cx", 1414 .uA = 100000, 1415 }, 1416 { 1417 .supply = "pll", 1418 .uA = 100000, 1419 }, 1420 {} 1421 }, 1422 .active_supply = (struct qcom_mss_reg_res[]) { 1423 { 1424 .supply = "mss", 1425 .uV = 1050000, 1426 .uA = 100000, 1427 }, 1428 {} 1429 }, 1430 .proxy_clk_names = (char*[]){ 1431 "xo", 1432 NULL 1433 }, 1434 .active_clk_names = (char*[]){ 1435 "iface", 1436 "bus", 1437 "mem", 1438 NULL 1439 }, 1440 .need_mem_protection = false, 1441 .has_alt_reset = false, 1442 .version = MSS_MSM8974, 1443 }; 1444 1445 static const struct of_device_id q6v5_of_match[] = { 1446 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss}, 1447 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss}, 1448 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss}, 1449 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss}, 1450 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss}, 1451 { }, 1452 }; 1453 MODULE_DEVICE_TABLE(of, q6v5_of_match); 1454 1455 static struct platform_driver q6v5_driver = { 1456 .probe = q6v5_probe, 1457 .remove = q6v5_remove, 1458 .driver = { 1459 .name = "qcom-q6v5-mss", 1460 .of_match_table = q6v5_of_match, 1461 }, 1462 }; 1463 module_platform_driver(q6v5_driver); 1464 1465 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver"); 1466 MODULE_LICENSE("GPL v2"); 1467