1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 #include <linux/platform_device.h> 6 #include <linux/init.h> 7 #include <linux/cpumask.h> 8 #include <linux/export.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/qcom_scm.h> 13 #include <linux/of.h> 14 #include <linux/of_address.h> 15 #include <linux/of_platform.h> 16 #include <linux/clk.h> 17 #include <linux/reset-controller.h> 18 #include <linux/arm-smccc.h> 19 20 #include "qcom_scm.h" 21 22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 23 module_param(download_mode, bool, 0); 24 25 #define SCM_HAS_CORE_CLK BIT(0) 26 #define SCM_HAS_IFACE_CLK BIT(1) 27 #define SCM_HAS_BUS_CLK BIT(2) 28 29 struct qcom_scm { 30 struct device *dev; 31 struct clk *core_clk; 32 struct clk *iface_clk; 33 struct clk *bus_clk; 34 struct reset_controller_dev reset; 35 36 u64 dload_mode_addr; 37 }; 38 39 struct qcom_scm_current_perm_info { 40 __le32 vmid; 41 __le32 perm; 42 __le64 ctx; 43 __le32 ctx_size; 44 __le32 unused; 45 }; 46 47 struct qcom_scm_mem_map_info { 48 __le64 mem_addr; 49 __le64 mem_size; 50 }; 51 52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 56 57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 61 62 struct qcom_scm_wb_entry { 63 int flag; 64 void *entry; 65 }; 66 67 static struct qcom_scm_wb_entry qcom_scm_wb[] = { 68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, 69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, 70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, 71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, 72 }; 73 74 static const char *qcom_scm_convention_names[] = { 75 [SMC_CONVENTION_UNKNOWN] = "unknown", 76 [SMC_CONVENTION_ARM_32] = "smc arm 32", 77 [SMC_CONVENTION_ARM_64] = "smc arm 64", 78 [SMC_CONVENTION_LEGACY] = "smc legacy", 79 }; 80 81 static struct qcom_scm *__scm; 82 83 static int qcom_scm_clk_enable(void) 84 { 85 int ret; 86 87 ret = clk_prepare_enable(__scm->core_clk); 88 if (ret) 89 goto bail; 90 91 ret = clk_prepare_enable(__scm->iface_clk); 92 if (ret) 93 goto disable_core; 94 95 ret = clk_prepare_enable(__scm->bus_clk); 96 if (ret) 97 goto disable_iface; 98 99 return 0; 100 101 disable_iface: 102 clk_disable_unprepare(__scm->iface_clk); 103 disable_core: 104 clk_disable_unprepare(__scm->core_clk); 105 bail: 106 return ret; 107 } 108 109 static void qcom_scm_clk_disable(void) 110 { 111 clk_disable_unprepare(__scm->core_clk); 112 clk_disable_unprepare(__scm->iface_clk); 113 clk_disable_unprepare(__scm->bus_clk); 114 } 115 116 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 117 static DEFINE_SPINLOCK(scm_query_lock); 118 119 static enum qcom_scm_convention __get_convention(void) 120 { 121 unsigned long flags; 122 struct qcom_scm_desc desc = { 123 .svc = QCOM_SCM_SVC_INFO, 124 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 125 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 126 QCOM_SCM_INFO_IS_CALL_AVAIL) | 127 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 128 .arginfo = QCOM_SCM_ARGS(1), 129 .owner = ARM_SMCCC_OWNER_SIP, 130 }; 131 struct qcom_scm_res res; 132 enum qcom_scm_convention probed_convention; 133 int ret; 134 bool forced = false; 135 136 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 137 return qcom_scm_convention; 138 139 /* 140 * Device isn't required as there is only one argument - no device 141 * needed to dma_map_single to secure world 142 */ 143 probed_convention = SMC_CONVENTION_ARM_64; 144 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 145 if (!ret && res.result[0] == 1) 146 goto found; 147 148 /* 149 * Some SC7180 firmwares didn't implement the 150 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 151 * calling conventions on these firmwares. Luckily we don't make any 152 * early calls into the firmware on these SoCs so the device pointer 153 * will be valid here to check if the compatible matches. 154 */ 155 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 156 forced = true; 157 goto found; 158 } 159 160 probed_convention = SMC_CONVENTION_ARM_32; 161 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 162 if (!ret && res.result[0] == 1) 163 goto found; 164 165 probed_convention = SMC_CONVENTION_LEGACY; 166 found: 167 spin_lock_irqsave(&scm_query_lock, flags); 168 if (probed_convention != qcom_scm_convention) { 169 qcom_scm_convention = probed_convention; 170 pr_info("qcom_scm: convention: %s%s\n", 171 qcom_scm_convention_names[qcom_scm_convention], 172 forced ? " (forced)" : ""); 173 } 174 spin_unlock_irqrestore(&scm_query_lock, flags); 175 176 return qcom_scm_convention; 177 } 178 179 /** 180 * qcom_scm_call() - Invoke a syscall in the secure world 181 * @dev: device 182 * @svc_id: service identifier 183 * @cmd_id: command identifier 184 * @desc: Descriptor structure containing arguments and return values 185 * 186 * Sends a command to the SCM and waits for the command to finish processing. 187 * This should *only* be called in pre-emptible context. 188 */ 189 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 190 struct qcom_scm_res *res) 191 { 192 might_sleep(); 193 switch (__get_convention()) { 194 case SMC_CONVENTION_ARM_32: 195 case SMC_CONVENTION_ARM_64: 196 return scm_smc_call(dev, desc, res, false); 197 case SMC_CONVENTION_LEGACY: 198 return scm_legacy_call(dev, desc, res); 199 default: 200 pr_err("Unknown current SCM calling convention.\n"); 201 return -EINVAL; 202 } 203 } 204 205 /** 206 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 207 * @dev: device 208 * @svc_id: service identifier 209 * @cmd_id: command identifier 210 * @desc: Descriptor structure containing arguments and return values 211 * @res: Structure containing results from SMC/HVC call 212 * 213 * Sends a command to the SCM and waits for the command to finish processing. 214 * This can be called in atomic context. 215 */ 216 static int qcom_scm_call_atomic(struct device *dev, 217 const struct qcom_scm_desc *desc, 218 struct qcom_scm_res *res) 219 { 220 switch (__get_convention()) { 221 case SMC_CONVENTION_ARM_32: 222 case SMC_CONVENTION_ARM_64: 223 return scm_smc_call(dev, desc, res, true); 224 case SMC_CONVENTION_LEGACY: 225 return scm_legacy_call_atomic(dev, desc, res); 226 default: 227 pr_err("Unknown current SCM calling convention.\n"); 228 return -EINVAL; 229 } 230 } 231 232 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 233 u32 cmd_id) 234 { 235 int ret; 236 struct qcom_scm_desc desc = { 237 .svc = QCOM_SCM_SVC_INFO, 238 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 239 .owner = ARM_SMCCC_OWNER_SIP, 240 }; 241 struct qcom_scm_res res; 242 243 desc.arginfo = QCOM_SCM_ARGS(1); 244 switch (__get_convention()) { 245 case SMC_CONVENTION_ARM_32: 246 case SMC_CONVENTION_ARM_64: 247 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 248 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 249 break; 250 case SMC_CONVENTION_LEGACY: 251 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 252 break; 253 default: 254 pr_err("Unknown SMC convention being used\n"); 255 return -EINVAL; 256 } 257 258 ret = qcom_scm_call(dev, &desc, &res); 259 260 return ret ? false : !!res.result[0]; 261 } 262 263 /** 264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 265 * @entry: Entry point function for the cpus 266 * @cpus: The cpumask of cpus that will use the entry point 267 * 268 * Set the Linux entry point for the SCM to transfer control to when coming 269 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 270 */ 271 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 272 { 273 int ret; 274 int flags = 0; 275 int cpu; 276 struct qcom_scm_desc desc = { 277 .svc = QCOM_SCM_SVC_BOOT, 278 .cmd = QCOM_SCM_BOOT_SET_ADDR, 279 .arginfo = QCOM_SCM_ARGS(2), 280 }; 281 282 /* 283 * Reassign only if we are switching from hotplug entry point 284 * to cpuidle entry point or vice versa. 285 */ 286 for_each_cpu(cpu, cpus) { 287 if (entry == qcom_scm_wb[cpu].entry) 288 continue; 289 flags |= qcom_scm_wb[cpu].flag; 290 } 291 292 /* No change in entry function */ 293 if (!flags) 294 return 0; 295 296 desc.args[0] = flags; 297 desc.args[1] = virt_to_phys(entry); 298 299 ret = qcom_scm_call(__scm->dev, &desc, NULL); 300 if (!ret) { 301 for_each_cpu(cpu, cpus) 302 qcom_scm_wb[cpu].entry = entry; 303 } 304 305 return ret; 306 } 307 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 308 309 /** 310 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 311 * @entry: Entry point function for the cpus 312 * @cpus: The cpumask of cpus that will use the entry point 313 * 314 * Set the cold boot address of the cpus. Any cpu outside the supported 315 * range would be removed from the cpu present mask. 316 */ 317 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 318 { 319 int flags = 0; 320 int cpu; 321 int scm_cb_flags[] = { 322 QCOM_SCM_FLAG_COLDBOOT_CPU0, 323 QCOM_SCM_FLAG_COLDBOOT_CPU1, 324 QCOM_SCM_FLAG_COLDBOOT_CPU2, 325 QCOM_SCM_FLAG_COLDBOOT_CPU3, 326 }; 327 struct qcom_scm_desc desc = { 328 .svc = QCOM_SCM_SVC_BOOT, 329 .cmd = QCOM_SCM_BOOT_SET_ADDR, 330 .arginfo = QCOM_SCM_ARGS(2), 331 .owner = ARM_SMCCC_OWNER_SIP, 332 }; 333 334 if (!cpus || (cpus && cpumask_empty(cpus))) 335 return -EINVAL; 336 337 for_each_cpu(cpu, cpus) { 338 if (cpu < ARRAY_SIZE(scm_cb_flags)) 339 flags |= scm_cb_flags[cpu]; 340 else 341 set_cpu_present(cpu, false); 342 } 343 344 desc.args[0] = flags; 345 desc.args[1] = virt_to_phys(entry); 346 347 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 348 } 349 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 350 351 /** 352 * qcom_scm_cpu_power_down() - Power down the cpu 353 * @flags - Flags to flush cache 354 * 355 * This is an end point to power down cpu. If there was a pending interrupt, 356 * the control would return from this function, otherwise, the cpu jumps to the 357 * warm boot entry point set for this cpu upon reset. 358 */ 359 void qcom_scm_cpu_power_down(u32 flags) 360 { 361 struct qcom_scm_desc desc = { 362 .svc = QCOM_SCM_SVC_BOOT, 363 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 364 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 365 .arginfo = QCOM_SCM_ARGS(1), 366 .owner = ARM_SMCCC_OWNER_SIP, 367 }; 368 369 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 370 } 371 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 372 373 int qcom_scm_set_remote_state(u32 state, u32 id) 374 { 375 struct qcom_scm_desc desc = { 376 .svc = QCOM_SCM_SVC_BOOT, 377 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 378 .arginfo = QCOM_SCM_ARGS(2), 379 .args[0] = state, 380 .args[1] = id, 381 .owner = ARM_SMCCC_OWNER_SIP, 382 }; 383 struct qcom_scm_res res; 384 int ret; 385 386 ret = qcom_scm_call(__scm->dev, &desc, &res); 387 388 return ret ? : res.result[0]; 389 } 390 EXPORT_SYMBOL(qcom_scm_set_remote_state); 391 392 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 393 { 394 struct qcom_scm_desc desc = { 395 .svc = QCOM_SCM_SVC_BOOT, 396 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 397 .arginfo = QCOM_SCM_ARGS(2), 398 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 399 .owner = ARM_SMCCC_OWNER_SIP, 400 }; 401 402 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 403 404 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 405 } 406 407 static void qcom_scm_set_download_mode(bool enable) 408 { 409 bool avail; 410 int ret = 0; 411 412 avail = __qcom_scm_is_call_available(__scm->dev, 413 QCOM_SCM_SVC_BOOT, 414 QCOM_SCM_BOOT_SET_DLOAD_MODE); 415 if (avail) { 416 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 417 } else if (__scm->dload_mode_addr) { 418 ret = qcom_scm_io_writel(__scm->dload_mode_addr, 419 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); 420 } else { 421 dev_err(__scm->dev, 422 "No available mechanism for setting download mode\n"); 423 } 424 425 if (ret) 426 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 427 } 428 429 /** 430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 431 * state machine for a given peripheral, using the 432 * metadata 433 * @peripheral: peripheral id 434 * @metadata: pointer to memory containing ELF header, program header table 435 * and optional blob of data used for authenticating the metadata 436 * and the rest of the firmware 437 * @size: size of the metadata 438 * 439 * Returns 0 on success. 440 */ 441 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 442 { 443 dma_addr_t mdata_phys; 444 void *mdata_buf; 445 int ret; 446 struct qcom_scm_desc desc = { 447 .svc = QCOM_SCM_SVC_PIL, 448 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 449 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 450 .args[0] = peripheral, 451 .owner = ARM_SMCCC_OWNER_SIP, 452 }; 453 struct qcom_scm_res res; 454 455 /* 456 * During the scm call memory protection will be enabled for the meta 457 * data blob, so make sure it's physically contiguous, 4K aligned and 458 * non-cachable to avoid XPU violations. 459 */ 460 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 461 GFP_KERNEL); 462 if (!mdata_buf) { 463 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 464 return -ENOMEM; 465 } 466 memcpy(mdata_buf, metadata, size); 467 468 ret = qcom_scm_clk_enable(); 469 if (ret) 470 goto free_metadata; 471 472 desc.args[1] = mdata_phys; 473 474 ret = qcom_scm_call(__scm->dev, &desc, &res); 475 476 qcom_scm_clk_disable(); 477 478 free_metadata: 479 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 480 481 return ret ? : res.result[0]; 482 } 483 EXPORT_SYMBOL(qcom_scm_pas_init_image); 484 485 /** 486 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 487 * for firmware loading 488 * @peripheral: peripheral id 489 * @addr: start address of memory area to prepare 490 * @size: size of the memory area to prepare 491 * 492 * Returns 0 on success. 493 */ 494 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 495 { 496 int ret; 497 struct qcom_scm_desc desc = { 498 .svc = QCOM_SCM_SVC_PIL, 499 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 500 .arginfo = QCOM_SCM_ARGS(3), 501 .args[0] = peripheral, 502 .args[1] = addr, 503 .args[2] = size, 504 .owner = ARM_SMCCC_OWNER_SIP, 505 }; 506 struct qcom_scm_res res; 507 508 ret = qcom_scm_clk_enable(); 509 if (ret) 510 return ret; 511 512 ret = qcom_scm_call(__scm->dev, &desc, &res); 513 qcom_scm_clk_disable(); 514 515 return ret ? : res.result[0]; 516 } 517 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 518 519 /** 520 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 521 * and reset the remote processor 522 * @peripheral: peripheral id 523 * 524 * Return 0 on success. 525 */ 526 int qcom_scm_pas_auth_and_reset(u32 peripheral) 527 { 528 int ret; 529 struct qcom_scm_desc desc = { 530 .svc = QCOM_SCM_SVC_PIL, 531 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 532 .arginfo = QCOM_SCM_ARGS(1), 533 .args[0] = peripheral, 534 .owner = ARM_SMCCC_OWNER_SIP, 535 }; 536 struct qcom_scm_res res; 537 538 ret = qcom_scm_clk_enable(); 539 if (ret) 540 return ret; 541 542 ret = qcom_scm_call(__scm->dev, &desc, &res); 543 qcom_scm_clk_disable(); 544 545 return ret ? : res.result[0]; 546 } 547 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 548 549 /** 550 * qcom_scm_pas_shutdown() - Shut down the remote processor 551 * @peripheral: peripheral id 552 * 553 * Returns 0 on success. 554 */ 555 int qcom_scm_pas_shutdown(u32 peripheral) 556 { 557 int ret; 558 struct qcom_scm_desc desc = { 559 .svc = QCOM_SCM_SVC_PIL, 560 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 561 .arginfo = QCOM_SCM_ARGS(1), 562 .args[0] = peripheral, 563 .owner = ARM_SMCCC_OWNER_SIP, 564 }; 565 struct qcom_scm_res res; 566 567 ret = qcom_scm_clk_enable(); 568 if (ret) 569 return ret; 570 571 ret = qcom_scm_call(__scm->dev, &desc, &res); 572 573 qcom_scm_clk_disable(); 574 575 return ret ? : res.result[0]; 576 } 577 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 578 579 /** 580 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 581 * available for the given peripherial 582 * @peripheral: peripheral id 583 * 584 * Returns true if PAS is supported for this peripheral, otherwise false. 585 */ 586 bool qcom_scm_pas_supported(u32 peripheral) 587 { 588 int ret; 589 struct qcom_scm_desc desc = { 590 .svc = QCOM_SCM_SVC_PIL, 591 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 592 .arginfo = QCOM_SCM_ARGS(1), 593 .args[0] = peripheral, 594 .owner = ARM_SMCCC_OWNER_SIP, 595 }; 596 struct qcom_scm_res res; 597 598 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 599 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 600 return false; 601 602 ret = qcom_scm_call(__scm->dev, &desc, &res); 603 604 return ret ? false : !!res.result[0]; 605 } 606 EXPORT_SYMBOL(qcom_scm_pas_supported); 607 608 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 609 { 610 struct qcom_scm_desc desc = { 611 .svc = QCOM_SCM_SVC_PIL, 612 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 613 .arginfo = QCOM_SCM_ARGS(2), 614 .args[0] = reset, 615 .args[1] = 0, 616 .owner = ARM_SMCCC_OWNER_SIP, 617 }; 618 struct qcom_scm_res res; 619 int ret; 620 621 ret = qcom_scm_call(__scm->dev, &desc, &res); 622 623 return ret ? : res.result[0]; 624 } 625 626 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 627 unsigned long idx) 628 { 629 if (idx != 0) 630 return -EINVAL; 631 632 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 633 } 634 635 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 636 unsigned long idx) 637 { 638 if (idx != 0) 639 return -EINVAL; 640 641 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 642 } 643 644 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 645 .assert = qcom_scm_pas_reset_assert, 646 .deassert = qcom_scm_pas_reset_deassert, 647 }; 648 649 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 650 { 651 struct qcom_scm_desc desc = { 652 .svc = QCOM_SCM_SVC_IO, 653 .cmd = QCOM_SCM_IO_READ, 654 .arginfo = QCOM_SCM_ARGS(1), 655 .args[0] = addr, 656 .owner = ARM_SMCCC_OWNER_SIP, 657 }; 658 struct qcom_scm_res res; 659 int ret; 660 661 662 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 663 if (ret >= 0) 664 *val = res.result[0]; 665 666 return ret < 0 ? ret : 0; 667 } 668 EXPORT_SYMBOL(qcom_scm_io_readl); 669 670 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 671 { 672 struct qcom_scm_desc desc = { 673 .svc = QCOM_SCM_SVC_IO, 674 .cmd = QCOM_SCM_IO_WRITE, 675 .arginfo = QCOM_SCM_ARGS(2), 676 .args[0] = addr, 677 .args[1] = val, 678 .owner = ARM_SMCCC_OWNER_SIP, 679 }; 680 681 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 682 } 683 EXPORT_SYMBOL(qcom_scm_io_writel); 684 685 /** 686 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 687 * supports restore security config interface. 688 * 689 * Return true if restore-cfg interface is supported, false if not. 690 */ 691 bool qcom_scm_restore_sec_cfg_available(void) 692 { 693 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 694 QCOM_SCM_MP_RESTORE_SEC_CFG); 695 } 696 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 697 698 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 699 { 700 struct qcom_scm_desc desc = { 701 .svc = QCOM_SCM_SVC_MP, 702 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 703 .arginfo = QCOM_SCM_ARGS(2), 704 .args[0] = device_id, 705 .args[1] = spare, 706 .owner = ARM_SMCCC_OWNER_SIP, 707 }; 708 struct qcom_scm_res res; 709 int ret; 710 711 ret = qcom_scm_call(__scm->dev, &desc, &res); 712 713 return ret ? : res.result[0]; 714 } 715 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 716 717 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 718 { 719 struct qcom_scm_desc desc = { 720 .svc = QCOM_SCM_SVC_MP, 721 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 722 .arginfo = QCOM_SCM_ARGS(1), 723 .args[0] = spare, 724 .owner = ARM_SMCCC_OWNER_SIP, 725 }; 726 struct qcom_scm_res res; 727 int ret; 728 729 ret = qcom_scm_call(__scm->dev, &desc, &res); 730 731 if (size) 732 *size = res.result[0]; 733 734 return ret ? : res.result[1]; 735 } 736 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 737 738 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 739 { 740 struct qcom_scm_desc desc = { 741 .svc = QCOM_SCM_SVC_MP, 742 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 743 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 744 QCOM_SCM_VAL), 745 .args[0] = addr, 746 .args[1] = size, 747 .args[2] = spare, 748 .owner = ARM_SMCCC_OWNER_SIP, 749 }; 750 int ret; 751 752 desc.args[0] = addr; 753 desc.args[1] = size; 754 desc.args[2] = spare; 755 desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 756 QCOM_SCM_VAL); 757 758 ret = qcom_scm_call(__scm->dev, &desc, NULL); 759 760 /* the pg table has been initialized already, ignore the error */ 761 if (ret == -EPERM) 762 ret = 0; 763 764 return ret; 765 } 766 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 767 768 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 769 u32 cp_nonpixel_start, 770 u32 cp_nonpixel_size) 771 { 772 int ret; 773 struct qcom_scm_desc desc = { 774 .svc = QCOM_SCM_SVC_MP, 775 .cmd = QCOM_SCM_MP_VIDEO_VAR, 776 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 777 QCOM_SCM_VAL, QCOM_SCM_VAL), 778 .args[0] = cp_start, 779 .args[1] = cp_size, 780 .args[2] = cp_nonpixel_start, 781 .args[3] = cp_nonpixel_size, 782 .owner = ARM_SMCCC_OWNER_SIP, 783 }; 784 struct qcom_scm_res res; 785 786 ret = qcom_scm_call(__scm->dev, &desc, &res); 787 788 return ret ? : res.result[0]; 789 } 790 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var); 791 792 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 793 size_t mem_sz, phys_addr_t src, size_t src_sz, 794 phys_addr_t dest, size_t dest_sz) 795 { 796 int ret; 797 struct qcom_scm_desc desc = { 798 .svc = QCOM_SCM_SVC_MP, 799 .cmd = QCOM_SCM_MP_ASSIGN, 800 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 801 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 802 QCOM_SCM_VAL, QCOM_SCM_VAL), 803 .args[0] = mem_region, 804 .args[1] = mem_sz, 805 .args[2] = src, 806 .args[3] = src_sz, 807 .args[4] = dest, 808 .args[5] = dest_sz, 809 .args[6] = 0, 810 .owner = ARM_SMCCC_OWNER_SIP, 811 }; 812 struct qcom_scm_res res; 813 814 ret = qcom_scm_call(dev, &desc, &res); 815 816 return ret ? : res.result[0]; 817 } 818 819 /** 820 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 821 * @mem_addr: mem region whose ownership need to be reassigned 822 * @mem_sz: size of the region. 823 * @srcvm: vmid for current set of owners, each set bit in 824 * flag indicate a unique owner 825 * @newvm: array having new owners and corresponding permission 826 * flags 827 * @dest_cnt: number of owners in next set. 828 * 829 * Return negative errno on failure or 0 on success with @srcvm updated. 830 */ 831 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 832 unsigned int *srcvm, 833 const struct qcom_scm_vmperm *newvm, 834 unsigned int dest_cnt) 835 { 836 struct qcom_scm_current_perm_info *destvm; 837 struct qcom_scm_mem_map_info *mem_to_map; 838 phys_addr_t mem_to_map_phys; 839 phys_addr_t dest_phys; 840 dma_addr_t ptr_phys; 841 size_t mem_to_map_sz; 842 size_t dest_sz; 843 size_t src_sz; 844 size_t ptr_sz; 845 int next_vm; 846 __le32 *src; 847 void *ptr; 848 int ret, i, b; 849 unsigned long srcvm_bits = *srcvm; 850 851 src_sz = hweight_long(srcvm_bits) * sizeof(*src); 852 mem_to_map_sz = sizeof(*mem_to_map); 853 dest_sz = dest_cnt * sizeof(*destvm); 854 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 855 ALIGN(dest_sz, SZ_64); 856 857 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 858 if (!ptr) 859 return -ENOMEM; 860 861 /* Fill source vmid detail */ 862 src = ptr; 863 i = 0; 864 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) 865 src[i++] = cpu_to_le32(b); 866 867 /* Fill details of mem buff to map */ 868 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 869 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 870 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 871 mem_to_map->mem_size = cpu_to_le64(mem_sz); 872 873 next_vm = 0; 874 /* Fill details of next vmid detail */ 875 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 876 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 877 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 878 destvm->vmid = cpu_to_le32(newvm->vmid); 879 destvm->perm = cpu_to_le32(newvm->perm); 880 destvm->ctx = 0; 881 destvm->ctx_size = 0; 882 next_vm |= BIT(newvm->vmid); 883 } 884 885 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 886 ptr_phys, src_sz, dest_phys, dest_sz); 887 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 888 if (ret) { 889 dev_err(__scm->dev, 890 "Assign memory protection call failed %d\n", ret); 891 return -EINVAL; 892 } 893 894 *srcvm = next_vm; 895 return 0; 896 } 897 EXPORT_SYMBOL(qcom_scm_assign_mem); 898 899 /** 900 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 901 */ 902 bool qcom_scm_ocmem_lock_available(void) 903 { 904 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 905 QCOM_SCM_OCMEM_LOCK_CMD); 906 } 907 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 908 909 /** 910 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 911 * region to the specified initiator 912 * 913 * @id: tz initiator id 914 * @offset: OCMEM offset 915 * @size: OCMEM size 916 * @mode: access mode (WIDE/NARROW) 917 */ 918 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 919 u32 mode) 920 { 921 struct qcom_scm_desc desc = { 922 .svc = QCOM_SCM_SVC_OCMEM, 923 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 924 .args[0] = id, 925 .args[1] = offset, 926 .args[2] = size, 927 .args[3] = mode, 928 .arginfo = QCOM_SCM_ARGS(4), 929 }; 930 931 return qcom_scm_call(__scm->dev, &desc, NULL); 932 } 933 EXPORT_SYMBOL(qcom_scm_ocmem_lock); 934 935 /** 936 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 937 * region from the specified initiator 938 * 939 * @id: tz initiator id 940 * @offset: OCMEM offset 941 * @size: OCMEM size 942 */ 943 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 944 { 945 struct qcom_scm_desc desc = { 946 .svc = QCOM_SCM_SVC_OCMEM, 947 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 948 .args[0] = id, 949 .args[1] = offset, 950 .args[2] = size, 951 .arginfo = QCOM_SCM_ARGS(3), 952 }; 953 954 return qcom_scm_call(__scm->dev, &desc, NULL); 955 } 956 EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 957 958 /** 959 * qcom_scm_ice_available() - Is the ICE key programming interface available? 960 * 961 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 962 * qcom_scm_ice_set_key() are available. 963 */ 964 bool qcom_scm_ice_available(void) 965 { 966 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 967 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 968 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 969 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 970 } 971 EXPORT_SYMBOL(qcom_scm_ice_available); 972 973 /** 974 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 975 * @index: the keyslot to invalidate 976 * 977 * The UFSHCI and eMMC standards define a standard way to do this, but it 978 * doesn't work on these SoCs; only this SCM call does. 979 * 980 * It is assumed that the SoC has only one ICE instance being used, as this SCM 981 * call doesn't specify which ICE instance the keyslot belongs to. 982 * 983 * Return: 0 on success; -errno on failure. 984 */ 985 int qcom_scm_ice_invalidate_key(u32 index) 986 { 987 struct qcom_scm_desc desc = { 988 .svc = QCOM_SCM_SVC_ES, 989 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 990 .arginfo = QCOM_SCM_ARGS(1), 991 .args[0] = index, 992 .owner = ARM_SMCCC_OWNER_SIP, 993 }; 994 995 return qcom_scm_call(__scm->dev, &desc, NULL); 996 } 997 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); 998 999 /** 1000 * qcom_scm_ice_set_key() - Set an inline encryption key 1001 * @index: the keyslot into which to set the key 1002 * @key: the key to program 1003 * @key_size: the size of the key in bytes 1004 * @cipher: the encryption algorithm the key is for 1005 * @data_unit_size: the encryption data unit size, i.e. the size of each 1006 * individual plaintext and ciphertext. Given in 512-byte 1007 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1008 * 1009 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1010 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1011 * 1012 * The UFSHCI and eMMC standards define a standard way to do this, but it 1013 * doesn't work on these SoCs; only this SCM call does. 1014 * 1015 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1016 * call doesn't specify which ICE instance the keyslot belongs to. 1017 * 1018 * Return: 0 on success; -errno on failure. 1019 */ 1020 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1021 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1022 { 1023 struct qcom_scm_desc desc = { 1024 .svc = QCOM_SCM_SVC_ES, 1025 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1026 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1027 QCOM_SCM_VAL, QCOM_SCM_VAL, 1028 QCOM_SCM_VAL), 1029 .args[0] = index, 1030 .args[2] = key_size, 1031 .args[3] = cipher, 1032 .args[4] = data_unit_size, 1033 .owner = ARM_SMCCC_OWNER_SIP, 1034 }; 1035 void *keybuf; 1036 dma_addr_t key_phys; 1037 int ret; 1038 1039 /* 1040 * 'key' may point to vmalloc()'ed memory, but we need to pass a 1041 * physical address that's been properly flushed. The sanctioned way to 1042 * do this is by using the DMA API. But as is best practice for crypto 1043 * keys, we also must wipe the key after use. This makes kmemdup() + 1044 * dma_map_single() not clearly correct, since the DMA API can use 1045 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming 1046 * keys is normally rare and thus not performance-critical. 1047 */ 1048 1049 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, 1050 GFP_KERNEL); 1051 if (!keybuf) 1052 return -ENOMEM; 1053 memcpy(keybuf, key, key_size); 1054 desc.args[1] = key_phys; 1055 1056 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1057 1058 memzero_explicit(keybuf, key_size); 1059 1060 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); 1061 return ret; 1062 } 1063 EXPORT_SYMBOL(qcom_scm_ice_set_key); 1064 1065 /** 1066 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1067 * 1068 * Return true if HDCP is supported, false if not. 1069 */ 1070 bool qcom_scm_hdcp_available(void) 1071 { 1072 bool avail; 1073 int ret = qcom_scm_clk_enable(); 1074 1075 if (ret) 1076 return ret; 1077 1078 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1079 QCOM_SCM_HDCP_INVOKE); 1080 1081 qcom_scm_clk_disable(); 1082 1083 return avail; 1084 } 1085 EXPORT_SYMBOL(qcom_scm_hdcp_available); 1086 1087 /** 1088 * qcom_scm_hdcp_req() - Send HDCP request. 1089 * @req: HDCP request array 1090 * @req_cnt: HDCP request array count 1091 * @resp: response buffer passed to SCM 1092 * 1093 * Write HDCP register(s) through SCM. 1094 */ 1095 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1096 { 1097 int ret; 1098 struct qcom_scm_desc desc = { 1099 .svc = QCOM_SCM_SVC_HDCP, 1100 .cmd = QCOM_SCM_HDCP_INVOKE, 1101 .arginfo = QCOM_SCM_ARGS(10), 1102 .args = { 1103 req[0].addr, 1104 req[0].val, 1105 req[1].addr, 1106 req[1].val, 1107 req[2].addr, 1108 req[2].val, 1109 req[3].addr, 1110 req[3].val, 1111 req[4].addr, 1112 req[4].val 1113 }, 1114 .owner = ARM_SMCCC_OWNER_SIP, 1115 }; 1116 struct qcom_scm_res res; 1117 1118 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1119 return -ERANGE; 1120 1121 ret = qcom_scm_clk_enable(); 1122 if (ret) 1123 return ret; 1124 1125 ret = qcom_scm_call(__scm->dev, &desc, &res); 1126 *resp = res.result[0]; 1127 1128 qcom_scm_clk_disable(); 1129 1130 return ret; 1131 } 1132 EXPORT_SYMBOL(qcom_scm_hdcp_req); 1133 1134 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1135 { 1136 struct qcom_scm_desc desc = { 1137 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1138 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1139 .arginfo = QCOM_SCM_ARGS(2), 1140 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1141 .args[1] = en, 1142 .owner = ARM_SMCCC_OWNER_SIP, 1143 }; 1144 1145 1146 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1147 } 1148 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); 1149 1150 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1151 { 1152 struct device_node *tcsr; 1153 struct device_node *np = dev->of_node; 1154 struct resource res; 1155 u32 offset; 1156 int ret; 1157 1158 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1159 if (!tcsr) 1160 return 0; 1161 1162 ret = of_address_to_resource(tcsr, 0, &res); 1163 of_node_put(tcsr); 1164 if (ret) 1165 return ret; 1166 1167 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1168 if (ret < 0) 1169 return ret; 1170 1171 *addr = res.start + offset; 1172 1173 return 0; 1174 } 1175 1176 /** 1177 * qcom_scm_is_available() - Checks if SCM is available 1178 */ 1179 bool qcom_scm_is_available(void) 1180 { 1181 return !!__scm; 1182 } 1183 EXPORT_SYMBOL(qcom_scm_is_available); 1184 1185 static int qcom_scm_probe(struct platform_device *pdev) 1186 { 1187 struct qcom_scm *scm; 1188 unsigned long clks; 1189 int ret; 1190 1191 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1192 if (!scm) 1193 return -ENOMEM; 1194 1195 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1196 if (ret < 0) 1197 return ret; 1198 1199 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 1200 1201 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 1202 if (IS_ERR(scm->core_clk)) { 1203 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 1204 return PTR_ERR(scm->core_clk); 1205 1206 if (clks & SCM_HAS_CORE_CLK) { 1207 dev_err(&pdev->dev, "failed to acquire core clk\n"); 1208 return PTR_ERR(scm->core_clk); 1209 } 1210 1211 scm->core_clk = NULL; 1212 } 1213 1214 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 1215 if (IS_ERR(scm->iface_clk)) { 1216 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 1217 return PTR_ERR(scm->iface_clk); 1218 1219 if (clks & SCM_HAS_IFACE_CLK) { 1220 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 1221 return PTR_ERR(scm->iface_clk); 1222 } 1223 1224 scm->iface_clk = NULL; 1225 } 1226 1227 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1228 if (IS_ERR(scm->bus_clk)) { 1229 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 1230 return PTR_ERR(scm->bus_clk); 1231 1232 if (clks & SCM_HAS_BUS_CLK) { 1233 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 1234 return PTR_ERR(scm->bus_clk); 1235 } 1236 1237 scm->bus_clk = NULL; 1238 } 1239 1240 scm->reset.ops = &qcom_scm_pas_reset_ops; 1241 scm->reset.nr_resets = 1; 1242 scm->reset.of_node = pdev->dev.of_node; 1243 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1244 if (ret) 1245 return ret; 1246 1247 /* vote for max clk rate for highest performance */ 1248 ret = clk_set_rate(scm->core_clk, INT_MAX); 1249 if (ret) 1250 return ret; 1251 1252 __scm = scm; 1253 __scm->dev = &pdev->dev; 1254 1255 __get_convention(); 1256 1257 /* 1258 * If requested enable "download mode", from this point on warmboot 1259 * will cause the the boot stages to enter download mode, unless 1260 * disabled below by a clean shutdown/reboot. 1261 */ 1262 if (download_mode) 1263 qcom_scm_set_download_mode(true); 1264 1265 return 0; 1266 } 1267 1268 static void qcom_scm_shutdown(struct platform_device *pdev) 1269 { 1270 /* Clean shutdown, disable download mode to allow normal restart */ 1271 if (download_mode) 1272 qcom_scm_set_download_mode(false); 1273 } 1274 1275 static const struct of_device_id qcom_scm_dt_match[] = { 1276 { .compatible = "qcom,scm-apq8064", 1277 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 1278 }, 1279 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 1280 SCM_HAS_IFACE_CLK | 1281 SCM_HAS_BUS_CLK) 1282 }, 1283 { .compatible = "qcom,scm-ipq4019" }, 1284 { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK | 1285 SCM_HAS_IFACE_CLK | 1286 SCM_HAS_BUS_CLK) }, 1287 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 1288 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 1289 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 1290 SCM_HAS_IFACE_CLK | 1291 SCM_HAS_BUS_CLK) 1292 }, 1293 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 1294 SCM_HAS_IFACE_CLK | 1295 SCM_HAS_BUS_CLK) 1296 }, 1297 { .compatible = "qcom,scm-msm8994" }, 1298 { .compatible = "qcom,scm-msm8996" }, 1299 { .compatible = "qcom,scm" }, 1300 {} 1301 }; 1302 1303 static struct platform_driver qcom_scm_driver = { 1304 .driver = { 1305 .name = "qcom_scm", 1306 .of_match_table = qcom_scm_dt_match, 1307 .suppress_bind_attrs = true, 1308 }, 1309 .probe = qcom_scm_probe, 1310 .shutdown = qcom_scm_shutdown, 1311 }; 1312 1313 static int __init qcom_scm_init(void) 1314 { 1315 return platform_driver_register(&qcom_scm_driver); 1316 } 1317 subsys_initcall(qcom_scm_init); 1318