1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Qualcomm SCM driver 4 * 5 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. 6 * Copyright (C) 2015 Linaro Ltd. 7 */ 8 #include <linux/platform_device.h> 9 #include <linux/init.h> 10 #include <linux/cpumask.h> 11 #include <linux/export.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/qcom_scm.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_platform.h> 19 #include <linux/clk.h> 20 #include <linux/reset-controller.h> 21 22 #include "qcom_scm.h" 23 24 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 25 module_param(download_mode, bool, 0); 26 27 #define SCM_HAS_CORE_CLK BIT(0) 28 #define SCM_HAS_IFACE_CLK BIT(1) 29 #define SCM_HAS_BUS_CLK BIT(2) 30 31 struct qcom_scm { 32 struct device *dev; 33 struct clk *core_clk; 34 struct clk *iface_clk; 35 struct clk *bus_clk; 36 struct reset_controller_dev reset; 37 38 u64 dload_mode_addr; 39 }; 40 41 struct qcom_scm_current_perm_info { 42 __le32 vmid; 43 __le32 perm; 44 __le64 ctx; 45 __le32 ctx_size; 46 __le32 unused; 47 }; 48 49 struct qcom_scm_mem_map_info { 50 __le64 mem_addr; 51 __le64 mem_size; 52 }; 53 54 static struct qcom_scm *__scm; 55 56 static int qcom_scm_clk_enable(void) 57 { 58 int ret; 59 60 ret = clk_prepare_enable(__scm->core_clk); 61 if (ret) 62 goto bail; 63 64 ret = clk_prepare_enable(__scm->iface_clk); 65 if (ret) 66 goto disable_core; 67 68 ret = clk_prepare_enable(__scm->bus_clk); 69 if (ret) 70 goto disable_iface; 71 72 return 0; 73 74 disable_iface: 75 clk_disable_unprepare(__scm->iface_clk); 76 disable_core: 77 clk_disable_unprepare(__scm->core_clk); 78 bail: 79 return ret; 80 } 81 82 static void qcom_scm_clk_disable(void) 83 { 84 clk_disable_unprepare(__scm->core_clk); 85 clk_disable_unprepare(__scm->iface_clk); 86 clk_disable_unprepare(__scm->bus_clk); 87 } 88 89 /** 90 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 91 * @entry: Entry point function for the cpus 92 * @cpus: The cpumask of cpus that will use the entry point 93 * 94 * Set the cold boot address of the cpus. Any cpu outside the supported 95 * range would be removed from the cpu present mask. 96 */ 97 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 98 { 99 return __qcom_scm_set_cold_boot_addr(entry, cpus); 100 } 101 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 102 103 /** 104 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 105 * @entry: Entry point function for the cpus 106 * @cpus: The cpumask of cpus that will use the entry point 107 * 108 * Set the Linux entry point for the SCM to transfer control to when coming 109 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 110 */ 111 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 112 { 113 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); 114 } 115 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 116 117 /** 118 * qcom_scm_cpu_power_down() - Power down the cpu 119 * @flags - Flags to flush cache 120 * 121 * This is an end point to power down cpu. If there was a pending interrupt, 122 * the control would return from this function, otherwise, the cpu jumps to the 123 * warm boot entry point set for this cpu upon reset. 124 */ 125 void qcom_scm_cpu_power_down(u32 flags) 126 { 127 __qcom_scm_cpu_power_down(flags); 128 } 129 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 130 131 /** 132 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 133 * 134 * Return true if HDCP is supported, false if not. 135 */ 136 bool qcom_scm_hdcp_available(void) 137 { 138 int ret = qcom_scm_clk_enable(); 139 140 if (ret) 141 return ret; 142 143 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 144 QCOM_SCM_CMD_HDCP); 145 146 qcom_scm_clk_disable(); 147 148 return ret > 0 ? true : false; 149 } 150 EXPORT_SYMBOL(qcom_scm_hdcp_available); 151 152 /** 153 * qcom_scm_hdcp_req() - Send HDCP request. 154 * @req: HDCP request array 155 * @req_cnt: HDCP request array count 156 * @resp: response buffer passed to SCM 157 * 158 * Write HDCP register(s) through SCM. 159 */ 160 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 161 { 162 int ret = qcom_scm_clk_enable(); 163 164 if (ret) 165 return ret; 166 167 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); 168 qcom_scm_clk_disable(); 169 return ret; 170 } 171 EXPORT_SYMBOL(qcom_scm_hdcp_req); 172 173 /** 174 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 175 * available for the given peripherial 176 * @peripheral: peripheral id 177 * 178 * Returns true if PAS is supported for this peripheral, otherwise false. 179 */ 180 bool qcom_scm_pas_supported(u32 peripheral) 181 { 182 int ret; 183 184 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 185 QCOM_SCM_PAS_IS_SUPPORTED_CMD); 186 if (ret <= 0) 187 return false; 188 189 return __qcom_scm_pas_supported(__scm->dev, peripheral); 190 } 191 EXPORT_SYMBOL(qcom_scm_pas_supported); 192 193 /** 194 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 195 * state machine for a given peripheral, using the 196 * metadata 197 * @peripheral: peripheral id 198 * @metadata: pointer to memory containing ELF header, program header table 199 * and optional blob of data used for authenticating the metadata 200 * and the rest of the firmware 201 * @size: size of the metadata 202 * 203 * Returns 0 on success. 204 */ 205 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 206 { 207 dma_addr_t mdata_phys; 208 void *mdata_buf; 209 int ret; 210 211 /* 212 * During the scm call memory protection will be enabled for the meta 213 * data blob, so make sure it's physically contiguous, 4K aligned and 214 * non-cachable to avoid XPU violations. 215 */ 216 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 217 GFP_KERNEL); 218 if (!mdata_buf) { 219 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 220 return -ENOMEM; 221 } 222 memcpy(mdata_buf, metadata, size); 223 224 ret = qcom_scm_clk_enable(); 225 if (ret) 226 goto free_metadata; 227 228 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); 229 230 qcom_scm_clk_disable(); 231 232 free_metadata: 233 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 234 235 return ret; 236 } 237 EXPORT_SYMBOL(qcom_scm_pas_init_image); 238 239 /** 240 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 241 * for firmware loading 242 * @peripheral: peripheral id 243 * @addr: start address of memory area to prepare 244 * @size: size of the memory area to prepare 245 * 246 * Returns 0 on success. 247 */ 248 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 249 { 250 int ret; 251 252 ret = qcom_scm_clk_enable(); 253 if (ret) 254 return ret; 255 256 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); 257 qcom_scm_clk_disable(); 258 259 return ret; 260 } 261 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 262 263 /** 264 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 265 * and reset the remote processor 266 * @peripheral: peripheral id 267 * 268 * Return 0 on success. 269 */ 270 int qcom_scm_pas_auth_and_reset(u32 peripheral) 271 { 272 int ret; 273 274 ret = qcom_scm_clk_enable(); 275 if (ret) 276 return ret; 277 278 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); 279 qcom_scm_clk_disable(); 280 281 return ret; 282 } 283 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 284 285 /** 286 * qcom_scm_pas_shutdown() - Shut down the remote processor 287 * @peripheral: peripheral id 288 * 289 * Returns 0 on success. 290 */ 291 int qcom_scm_pas_shutdown(u32 peripheral) 292 { 293 int ret; 294 295 ret = qcom_scm_clk_enable(); 296 if (ret) 297 return ret; 298 299 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); 300 qcom_scm_clk_disable(); 301 302 return ret; 303 } 304 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 305 306 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 307 unsigned long idx) 308 { 309 if (idx != 0) 310 return -EINVAL; 311 312 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 313 } 314 315 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 316 unsigned long idx) 317 { 318 if (idx != 0) 319 return -EINVAL; 320 321 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 322 } 323 324 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 325 .assert = qcom_scm_pas_reset_assert, 326 .deassert = qcom_scm_pas_reset_deassert, 327 }; 328 329 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 330 { 331 return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); 332 } 333 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 334 335 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 336 { 337 return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); 338 } 339 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 340 341 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 342 { 343 return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); 344 } 345 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 346 347 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 348 { 349 return __qcom_scm_io_readl(__scm->dev, addr, val); 350 } 351 EXPORT_SYMBOL(qcom_scm_io_readl); 352 353 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 354 { 355 return __qcom_scm_io_writel(__scm->dev, addr, val); 356 } 357 EXPORT_SYMBOL(qcom_scm_io_writel); 358 359 static void qcom_scm_set_download_mode(bool enable) 360 { 361 bool avail; 362 int ret = 0; 363 364 avail = __qcom_scm_is_call_available(__scm->dev, 365 QCOM_SCM_SVC_BOOT, 366 QCOM_SCM_SET_DLOAD_MODE); 367 if (avail) { 368 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 369 } else if (__scm->dload_mode_addr) { 370 ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, 371 enable ? QCOM_SCM_SET_DLOAD_MODE : 0); 372 } else { 373 dev_err(__scm->dev, 374 "No available mechanism for setting download mode\n"); 375 } 376 377 if (ret) 378 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 379 } 380 381 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 382 { 383 struct device_node *tcsr; 384 struct device_node *np = dev->of_node; 385 struct resource res; 386 u32 offset; 387 int ret; 388 389 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 390 if (!tcsr) 391 return 0; 392 393 ret = of_address_to_resource(tcsr, 0, &res); 394 of_node_put(tcsr); 395 if (ret) 396 return ret; 397 398 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 399 if (ret < 0) 400 return ret; 401 402 *addr = res.start + offset; 403 404 return 0; 405 } 406 407 /** 408 * qcom_scm_is_available() - Checks if SCM is available 409 */ 410 bool qcom_scm_is_available(void) 411 { 412 return !!__scm; 413 } 414 EXPORT_SYMBOL(qcom_scm_is_available); 415 416 int qcom_scm_set_remote_state(u32 state, u32 id) 417 { 418 return __qcom_scm_set_remote_state(__scm->dev, state, id); 419 } 420 EXPORT_SYMBOL(qcom_scm_set_remote_state); 421 422 /** 423 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 424 * @mem_addr: mem region whose ownership need to be reassigned 425 * @mem_sz: size of the region. 426 * @srcvm: vmid for current set of owners, each set bit in 427 * flag indicate a unique owner 428 * @newvm: array having new owners and corrsponding permission 429 * flags 430 * @dest_cnt: number of owners in next set. 431 * 432 * Return negative errno on failure, 0 on success, with @srcvm updated. 433 */ 434 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 435 unsigned int *srcvm, 436 struct qcom_scm_vmperm *newvm, int dest_cnt) 437 { 438 struct qcom_scm_current_perm_info *destvm; 439 struct qcom_scm_mem_map_info *mem_to_map; 440 phys_addr_t mem_to_map_phys; 441 phys_addr_t dest_phys; 442 phys_addr_t ptr_phys; 443 size_t mem_to_map_sz; 444 size_t dest_sz; 445 size_t src_sz; 446 size_t ptr_sz; 447 int next_vm; 448 __le32 *src; 449 void *ptr; 450 int ret; 451 int len; 452 int i; 453 454 src_sz = hweight_long(*srcvm) * sizeof(*src); 455 mem_to_map_sz = sizeof(*mem_to_map); 456 dest_sz = dest_cnt * sizeof(*destvm); 457 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 458 ALIGN(dest_sz, SZ_64); 459 460 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 461 if (!ptr) 462 return -ENOMEM; 463 464 /* Fill source vmid detail */ 465 src = ptr; 466 len = hweight_long(*srcvm); 467 for (i = 0; i < len; i++) { 468 src[i] = cpu_to_le32(ffs(*srcvm) - 1); 469 *srcvm ^= 1 << (ffs(*srcvm) - 1); 470 } 471 472 /* Fill details of mem buff to map */ 473 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 474 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 475 mem_to_map[0].mem_addr = cpu_to_le64(mem_addr); 476 mem_to_map[0].mem_size = cpu_to_le64(mem_sz); 477 478 next_vm = 0; 479 /* Fill details of next vmid detail */ 480 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 481 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 482 for (i = 0; i < dest_cnt; i++) { 483 destvm[i].vmid = cpu_to_le32(newvm[i].vmid); 484 destvm[i].perm = cpu_to_le32(newvm[i].perm); 485 destvm[i].ctx = 0; 486 destvm[i].ctx_size = 0; 487 next_vm |= BIT(newvm[i].vmid); 488 } 489 490 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 491 ptr_phys, src_sz, dest_phys, dest_sz); 492 dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); 493 if (ret) { 494 dev_err(__scm->dev, 495 "Assign memory protection call failed %d.\n", ret); 496 return -EINVAL; 497 } 498 499 *srcvm = next_vm; 500 return 0; 501 } 502 EXPORT_SYMBOL(qcom_scm_assign_mem); 503 504 static int qcom_scm_probe(struct platform_device *pdev) 505 { 506 struct qcom_scm *scm; 507 unsigned long clks; 508 int ret; 509 510 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 511 if (!scm) 512 return -ENOMEM; 513 514 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 515 if (ret < 0) 516 return ret; 517 518 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 519 520 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 521 if (IS_ERR(scm->core_clk)) { 522 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 523 return PTR_ERR(scm->core_clk); 524 525 if (clks & SCM_HAS_CORE_CLK) { 526 dev_err(&pdev->dev, "failed to acquire core clk\n"); 527 return PTR_ERR(scm->core_clk); 528 } 529 530 scm->core_clk = NULL; 531 } 532 533 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 534 if (IS_ERR(scm->iface_clk)) { 535 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 536 return PTR_ERR(scm->iface_clk); 537 538 if (clks & SCM_HAS_IFACE_CLK) { 539 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 540 return PTR_ERR(scm->iface_clk); 541 } 542 543 scm->iface_clk = NULL; 544 } 545 546 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 547 if (IS_ERR(scm->bus_clk)) { 548 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 549 return PTR_ERR(scm->bus_clk); 550 551 if (clks & SCM_HAS_BUS_CLK) { 552 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 553 return PTR_ERR(scm->bus_clk); 554 } 555 556 scm->bus_clk = NULL; 557 } 558 559 scm->reset.ops = &qcom_scm_pas_reset_ops; 560 scm->reset.nr_resets = 1; 561 scm->reset.of_node = pdev->dev.of_node; 562 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 563 if (ret) 564 return ret; 565 566 /* vote for max clk rate for highest performance */ 567 ret = clk_set_rate(scm->core_clk, INT_MAX); 568 if (ret) 569 return ret; 570 571 __scm = scm; 572 __scm->dev = &pdev->dev; 573 574 __qcom_scm_init(); 575 576 /* 577 * If requested enable "download mode", from this point on warmboot 578 * will cause the the boot stages to enter download mode, unless 579 * disabled below by a clean shutdown/reboot. 580 */ 581 if (download_mode) 582 qcom_scm_set_download_mode(true); 583 584 return 0; 585 } 586 587 static void qcom_scm_shutdown(struct platform_device *pdev) 588 { 589 /* Clean shutdown, disable download mode to allow normal restart */ 590 if (download_mode) 591 qcom_scm_set_download_mode(false); 592 } 593 594 static const struct of_device_id qcom_scm_dt_match[] = { 595 { .compatible = "qcom,scm-apq8064", 596 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 597 }, 598 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 599 SCM_HAS_IFACE_CLK | 600 SCM_HAS_BUS_CLK) 601 }, 602 { .compatible = "qcom,scm-ipq4019" }, 603 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 604 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 605 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 606 SCM_HAS_IFACE_CLK | 607 SCM_HAS_BUS_CLK) 608 }, 609 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 610 SCM_HAS_IFACE_CLK | 611 SCM_HAS_BUS_CLK) 612 }, 613 { .compatible = "qcom,scm-msm8996" }, 614 { .compatible = "qcom,scm" }, 615 {} 616 }; 617 618 static struct platform_driver qcom_scm_driver = { 619 .driver = { 620 .name = "qcom_scm", 621 .of_match_table = qcom_scm_dt_match, 622 }, 623 .probe = qcom_scm_probe, 624 .shutdown = qcom_scm_shutdown, 625 }; 626 627 static int __init qcom_scm_init(void) 628 { 629 return platform_driver_register(&qcom_scm_driver); 630 } 631 subsys_initcall(qcom_scm_init); 632