1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm SCM driver 4 * 5 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. 6 * Copyright (C) 2015 Linaro Ltd. 7 */ 8 #include <linux/platform_device.h> 9 #include <linux/init.h> 10 #include <linux/cpumask.h> 11 #include <linux/export.h> 12 #include <linux/dma-direct.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/module.h> 15 #include <linux/types.h> 16 #include <linux/qcom_scm.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_platform.h> 20 #include <linux/clk.h> 21 #include <linux/reset-controller.h> 22 23 #include "qcom_scm.h" 24 25 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 26 module_param(download_mode, bool, 0); 27 28 #define SCM_HAS_CORE_CLK BIT(0) 29 #define SCM_HAS_IFACE_CLK BIT(1) 30 #define SCM_HAS_BUS_CLK BIT(2) 31 32 struct qcom_scm { 33 struct device *dev; 34 struct clk *core_clk; 35 struct clk *iface_clk; 36 struct clk *bus_clk; 37 struct reset_controller_dev reset; 38 39 u64 dload_mode_addr; 40 }; 41 42 struct qcom_scm_current_perm_info { 43 __le32 vmid; 44 __le32 perm; 45 __le64 ctx; 46 __le32 ctx_size; 47 __le32 unused; 48 }; 49 50 struct qcom_scm_mem_map_info { 51 __le64 mem_addr; 52 __le64 mem_size; 53 }; 54 55 static struct qcom_scm *__scm; 56 57 static int qcom_scm_clk_enable(void) 58 { 59 int ret; 60 61 ret = clk_prepare_enable(__scm->core_clk); 62 if (ret) 63 goto bail; 64 65 ret = clk_prepare_enable(__scm->iface_clk); 66 if (ret) 67 goto disable_core; 68 69 ret = clk_prepare_enable(__scm->bus_clk); 70 if (ret) 71 goto disable_iface; 72 73 return 0; 74 75 disable_iface: 76 clk_disable_unprepare(__scm->iface_clk); 77 disable_core: 78 clk_disable_unprepare(__scm->core_clk); 79 bail: 80 return ret; 81 } 82 83 static void qcom_scm_clk_disable(void) 84 { 85 clk_disable_unprepare(__scm->core_clk); 86 clk_disable_unprepare(__scm->iface_clk); 87 clk_disable_unprepare(__scm->bus_clk); 88 } 89 90 /** 91 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 92 * @entry: Entry point function for the cpus 93 * @cpus: The cpumask of cpus that will use the entry point 94 * 95 * Set the cold boot address of the cpus. Any cpu outside the supported 96 * range would be removed from the cpu present mask. 97 */ 98 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 99 { 100 return __qcom_scm_set_cold_boot_addr(entry, cpus); 101 } 102 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 103 104 /** 105 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 106 * @entry: Entry point function for the cpus 107 * @cpus: The cpumask of cpus that will use the entry point 108 * 109 * Set the Linux entry point for the SCM to transfer control to when coming 110 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 111 */ 112 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 113 { 114 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); 115 } 116 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 117 118 /** 119 * qcom_scm_cpu_power_down() - Power down the cpu 120 * @flags - Flags to flush cache 121 * 122 * This is an end point to power down cpu. If there was a pending interrupt, 123 * the control would return from this function, otherwise, the cpu jumps to the 124 * warm boot entry point set for this cpu upon reset. 125 */ 126 void qcom_scm_cpu_power_down(u32 flags) 127 { 128 __qcom_scm_cpu_power_down(flags); 129 } 130 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 131 132 /** 133 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 134 * 135 * Return true if HDCP is supported, false if not. 136 */ 137 bool qcom_scm_hdcp_available(void) 138 { 139 int ret = qcom_scm_clk_enable(); 140 141 if (ret) 142 return ret; 143 144 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 145 QCOM_SCM_CMD_HDCP); 146 147 qcom_scm_clk_disable(); 148 149 return ret > 0 ? true : false; 150 } 151 EXPORT_SYMBOL(qcom_scm_hdcp_available); 152 153 /** 154 * qcom_scm_hdcp_req() - Send HDCP request. 155 * @req: HDCP request array 156 * @req_cnt: HDCP request array count 157 * @resp: response buffer passed to SCM 158 * 159 * Write HDCP register(s) through SCM. 160 */ 161 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 162 { 163 int ret = qcom_scm_clk_enable(); 164 165 if (ret) 166 return ret; 167 168 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); 169 qcom_scm_clk_disable(); 170 return ret; 171 } 172 EXPORT_SYMBOL(qcom_scm_hdcp_req); 173 174 /** 175 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 176 * available for the given peripherial 177 * @peripheral: peripheral id 178 * 179 * Returns true if PAS is supported for this peripheral, otherwise false. 180 */ 181 bool qcom_scm_pas_supported(u32 peripheral) 182 { 183 int ret; 184 185 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 186 QCOM_SCM_PAS_IS_SUPPORTED_CMD); 187 if (ret <= 0) 188 return false; 189 190 return __qcom_scm_pas_supported(__scm->dev, peripheral); 191 } 192 EXPORT_SYMBOL(qcom_scm_pas_supported); 193 194 /** 195 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 196 */ 197 bool qcom_scm_ocmem_lock_available(void) 198 { 199 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_OCMEM_SVC, 200 QCOM_SCM_OCMEM_LOCK_CMD); 201 } 202 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 203 204 /** 205 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 206 * region to the specified initiator 207 * 208 * @id: tz initiator id 209 * @offset: OCMEM offset 210 * @size: OCMEM size 211 * @mode: access mode (WIDE/NARROW) 212 */ 213 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 214 u32 mode) 215 { 216 return __qcom_scm_ocmem_lock(__scm->dev, id, offset, size, mode); 217 } 218 EXPORT_SYMBOL(qcom_scm_ocmem_lock); 219 220 /** 221 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 222 * region from the specified initiator 223 * 224 * @id: tz initiator id 225 * @offset: OCMEM offset 226 * @size: OCMEM size 227 */ 228 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 229 { 230 return __qcom_scm_ocmem_unlock(__scm->dev, id, offset, size); 231 } 232 EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 233 234 /** 235 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 236 * state machine for a given peripheral, using the 237 * metadata 238 * @peripheral: peripheral id 239 * @metadata: pointer to memory containing ELF header, program header table 240 * and optional blob of data used for authenticating the metadata 241 * and the rest of the firmware 242 * @size: size of the metadata 243 * 244 * Returns 0 on success. 245 */ 246 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 247 { 248 dma_addr_t mdata_phys; 249 void *mdata_buf; 250 int ret; 251 252 /* 253 * During the scm call memory protection will be enabled for the meta 254 * data blob, so make sure it's physically contiguous, 4K aligned and 255 * non-cachable to avoid XPU violations. 256 */ 257 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 258 GFP_KERNEL); 259 if (!mdata_buf) { 260 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 261 return -ENOMEM; 262 } 263 memcpy(mdata_buf, metadata, size); 264 265 ret = qcom_scm_clk_enable(); 266 if (ret) 267 goto free_metadata; 268 269 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); 270 271 qcom_scm_clk_disable(); 272 273 free_metadata: 274 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 275 276 return ret; 277 } 278 EXPORT_SYMBOL(qcom_scm_pas_init_image); 279 280 /** 281 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 282 * for firmware loading 283 * @peripheral: peripheral id 284 * @addr: start address of memory area to prepare 285 * @size: size of the memory area to prepare 286 * 287 * Returns 0 on success. 288 */ 289 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 290 { 291 int ret; 292 293 ret = qcom_scm_clk_enable(); 294 if (ret) 295 return ret; 296 297 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); 298 qcom_scm_clk_disable(); 299 300 return ret; 301 } 302 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 303 304 /** 305 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 306 * and reset the remote processor 307 * @peripheral: peripheral id 308 * 309 * Return 0 on success. 310 */ 311 int qcom_scm_pas_auth_and_reset(u32 peripheral) 312 { 313 int ret; 314 315 ret = qcom_scm_clk_enable(); 316 if (ret) 317 return ret; 318 319 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); 320 qcom_scm_clk_disable(); 321 322 return ret; 323 } 324 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 325 326 /** 327 * qcom_scm_pas_shutdown() - Shut down the remote processor 328 * @peripheral: peripheral id 329 * 330 * Returns 0 on success. 331 */ 332 int qcom_scm_pas_shutdown(u32 peripheral) 333 { 334 int ret; 335 336 ret = qcom_scm_clk_enable(); 337 if (ret) 338 return ret; 339 340 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); 341 qcom_scm_clk_disable(); 342 343 return ret; 344 } 345 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 346 347 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 348 unsigned long idx) 349 { 350 if (idx != 0) 351 return -EINVAL; 352 353 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 354 } 355 356 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 357 unsigned long idx) 358 { 359 if (idx != 0) 360 return -EINVAL; 361 362 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 363 } 364 365 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 366 .assert = qcom_scm_pas_reset_assert, 367 .deassert = qcom_scm_pas_reset_deassert, 368 }; 369 370 /** 371 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 372 * supports restore security config interface. 373 * 374 * Return true if restore-cfg interface is supported, false if not. 375 */ 376 bool qcom_scm_restore_sec_cfg_available(void) 377 { 378 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 379 QCOM_SCM_RESTORE_SEC_CFG); 380 } 381 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 382 383 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 384 { 385 return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); 386 } 387 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 388 389 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 390 { 391 return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); 392 } 393 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 394 395 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 396 { 397 return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); 398 } 399 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 400 401 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 402 { 403 return __qcom_scm_qsmmu500_wait_safe_toggle(__scm->dev, en); 404 } 405 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); 406 407 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 408 { 409 return __qcom_scm_io_readl(__scm->dev, addr, val); 410 } 411 EXPORT_SYMBOL(qcom_scm_io_readl); 412 413 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 414 { 415 return __qcom_scm_io_writel(__scm->dev, addr, val); 416 } 417 EXPORT_SYMBOL(qcom_scm_io_writel); 418 419 static void qcom_scm_set_download_mode(bool enable) 420 { 421 bool avail; 422 int ret = 0; 423 424 avail = __qcom_scm_is_call_available(__scm->dev, 425 QCOM_SCM_SVC_BOOT, 426 QCOM_SCM_SET_DLOAD_MODE); 427 if (avail) { 428 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 429 } else if (__scm->dload_mode_addr) { 430 ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, 431 enable ? QCOM_SCM_SET_DLOAD_MODE : 0); 432 } else { 433 dev_err(__scm->dev, 434 "No available mechanism for setting download mode\n"); 435 } 436 437 if (ret) 438 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 439 } 440 441 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 442 { 443 struct device_node *tcsr; 444 struct device_node *np = dev->of_node; 445 struct resource res; 446 u32 offset; 447 int ret; 448 449 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 450 if (!tcsr) 451 return 0; 452 453 ret = of_address_to_resource(tcsr, 0, &res); 454 of_node_put(tcsr); 455 if (ret) 456 return ret; 457 458 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 459 if (ret < 0) 460 return ret; 461 462 *addr = res.start + offset; 463 464 return 0; 465 } 466 467 /** 468 * qcom_scm_is_available() - Checks if SCM is available 469 */ 470 bool qcom_scm_is_available(void) 471 { 472 return !!__scm; 473 } 474 EXPORT_SYMBOL(qcom_scm_is_available); 475 476 int qcom_scm_set_remote_state(u32 state, u32 id) 477 { 478 return __qcom_scm_set_remote_state(__scm->dev, state, id); 479 } 480 EXPORT_SYMBOL(qcom_scm_set_remote_state); 481 482 /** 483 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 484 * @mem_addr: mem region whose ownership need to be reassigned 485 * @mem_sz: size of the region. 486 * @srcvm: vmid for current set of owners, each set bit in 487 * flag indicate a unique owner 488 * @newvm: array having new owners and corresponding permission 489 * flags 490 * @dest_cnt: number of owners in next set. 491 * 492 * Return negative errno on failure or 0 on success with @srcvm updated. 493 */ 494 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 495 unsigned int *srcvm, 496 const struct qcom_scm_vmperm *newvm, 497 unsigned int dest_cnt) 498 { 499 struct qcom_scm_current_perm_info *destvm; 500 struct qcom_scm_mem_map_info *mem_to_map; 501 phys_addr_t mem_to_map_phys; 502 phys_addr_t dest_phys; 503 phys_addr_t ptr_phys; 504 dma_addr_t ptr_dma; 505 size_t mem_to_map_sz; 506 size_t dest_sz; 507 size_t src_sz; 508 size_t ptr_sz; 509 int next_vm; 510 __le32 *src; 511 void *ptr; 512 int ret, i, b; 513 unsigned long srcvm_bits = *srcvm; 514 515 src_sz = hweight_long(srcvm_bits) * sizeof(*src); 516 mem_to_map_sz = sizeof(*mem_to_map); 517 dest_sz = dest_cnt * sizeof(*destvm); 518 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 519 ALIGN(dest_sz, SZ_64); 520 521 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); 522 if (!ptr) 523 return -ENOMEM; 524 ptr_phys = dma_to_phys(__scm->dev, ptr_dma); 525 526 /* Fill source vmid detail */ 527 src = ptr; 528 i = 0; 529 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) 530 src[i++] = cpu_to_le32(b); 531 532 /* Fill details of mem buff to map */ 533 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 534 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 535 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 536 mem_to_map->mem_size = cpu_to_le64(mem_sz); 537 538 next_vm = 0; 539 /* Fill details of next vmid detail */ 540 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 541 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 542 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 543 destvm->vmid = cpu_to_le32(newvm->vmid); 544 destvm->perm = cpu_to_le32(newvm->perm); 545 destvm->ctx = 0; 546 destvm->ctx_size = 0; 547 next_vm |= BIT(newvm->vmid); 548 } 549 550 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 551 ptr_phys, src_sz, dest_phys, dest_sz); 552 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); 553 if (ret) { 554 dev_err(__scm->dev, 555 "Assign memory protection call failed %d\n", ret); 556 return -EINVAL; 557 } 558 559 *srcvm = next_vm; 560 return 0; 561 } 562 EXPORT_SYMBOL(qcom_scm_assign_mem); 563 564 static int qcom_scm_probe(struct platform_device *pdev) 565 { 566 struct qcom_scm *scm; 567 unsigned long clks; 568 int ret; 569 570 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 571 if (!scm) 572 return -ENOMEM; 573 574 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 575 if (ret < 0) 576 return ret; 577 578 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 579 580 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 581 if (IS_ERR(scm->core_clk)) { 582 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 583 return PTR_ERR(scm->core_clk); 584 585 if (clks & SCM_HAS_CORE_CLK) { 586 dev_err(&pdev->dev, "failed to acquire core clk\n"); 587 return PTR_ERR(scm->core_clk); 588 } 589 590 scm->core_clk = NULL; 591 } 592 593 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 594 if (IS_ERR(scm->iface_clk)) { 595 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 596 return PTR_ERR(scm->iface_clk); 597 598 if (clks & SCM_HAS_IFACE_CLK) { 599 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 600 return PTR_ERR(scm->iface_clk); 601 } 602 603 scm->iface_clk = NULL; 604 } 605 606 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 607 if (IS_ERR(scm->bus_clk)) { 608 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 609 return PTR_ERR(scm->bus_clk); 610 611 if (clks & SCM_HAS_BUS_CLK) { 612 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 613 return PTR_ERR(scm->bus_clk); 614 } 615 616 scm->bus_clk = NULL; 617 } 618 619 scm->reset.ops = &qcom_scm_pas_reset_ops; 620 scm->reset.nr_resets = 1; 621 scm->reset.of_node = pdev->dev.of_node; 622 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 623 if (ret) 624 return ret; 625 626 /* vote for max clk rate for highest performance */ 627 ret = clk_set_rate(scm->core_clk, INT_MAX); 628 if (ret) 629 return ret; 630 631 __scm = scm; 632 __scm->dev = &pdev->dev; 633 634 __qcom_scm_init(); 635 636 /* 637 * If requested enable "download mode", from this point on warmboot 638 * will cause the the boot stages to enter download mode, unless 639 * disabled below by a clean shutdown/reboot. 640 */ 641 if (download_mode) 642 qcom_scm_set_download_mode(true); 643 644 return 0; 645 } 646 647 static void qcom_scm_shutdown(struct platform_device *pdev) 648 { 649 /* Clean shutdown, disable download mode to allow normal restart */ 650 if (download_mode) 651 qcom_scm_set_download_mode(false); 652 } 653 654 static const struct of_device_id qcom_scm_dt_match[] = { 655 { .compatible = "qcom,scm-apq8064", 656 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 657 }, 658 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 659 SCM_HAS_IFACE_CLK | 660 SCM_HAS_BUS_CLK) 661 }, 662 { .compatible = "qcom,scm-ipq4019" }, 663 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 664 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 665 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 666 SCM_HAS_IFACE_CLK | 667 SCM_HAS_BUS_CLK) 668 }, 669 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 670 SCM_HAS_IFACE_CLK | 671 SCM_HAS_BUS_CLK) 672 }, 673 { .compatible = "qcom,scm-msm8996" }, 674 { .compatible = "qcom,scm" }, 675 {} 676 }; 677 678 static struct platform_driver qcom_scm_driver = { 679 .driver = { 680 .name = "qcom_scm", 681 .of_match_table = qcom_scm_dt_match, 682 }, 683 .probe = qcom_scm_probe, 684 .shutdown = qcom_scm_shutdown, 685 }; 686 687 static int __init qcom_scm_init(void) 688 { 689 return platform_driver_register(&qcom_scm_driver); 690 } 691 subsys_initcall(qcom_scm_init); 692