1 /* 2 * Qualcomm SCM driver 3 * 4 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. 5 * Copyright (C) 2015 Linaro Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 #include <linux/platform_device.h> 18 #include <linux/init.h> 19 #include <linux/cpumask.h> 20 #include <linux/export.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/module.h> 23 #include <linux/types.h> 24 #include <linux/qcom_scm.h> 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_platform.h> 28 #include <linux/clk.h> 29 #include <linux/reset-controller.h> 30 31 #include "qcom_scm.h" 32 33 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 34 module_param(download_mode, bool, 0); 35 36 #define SCM_HAS_CORE_CLK BIT(0) 37 #define SCM_HAS_IFACE_CLK BIT(1) 38 #define SCM_HAS_BUS_CLK BIT(2) 39 40 struct qcom_scm { 41 struct device *dev; 42 struct clk *core_clk; 43 struct clk *iface_clk; 44 struct clk *bus_clk; 45 struct reset_controller_dev reset; 46 47 u64 dload_mode_addr; 48 }; 49 50 struct qcom_scm_current_perm_info { 51 __le32 vmid; 52 __le32 perm; 53 __le64 ctx; 54 __le32 ctx_size; 55 __le32 unused; 56 }; 57 58 struct qcom_scm_mem_map_info { 59 __le64 mem_addr; 60 __le64 mem_size; 61 }; 62 63 static struct qcom_scm *__scm; 64 65 static int qcom_scm_clk_enable(void) 66 { 67 int ret; 68 69 ret = clk_prepare_enable(__scm->core_clk); 70 if (ret) 71 goto bail; 72 73 ret = clk_prepare_enable(__scm->iface_clk); 74 if (ret) 75 goto disable_core; 76 77 ret = clk_prepare_enable(__scm->bus_clk); 78 if (ret) 79 goto disable_iface; 80 81 return 0; 82 83 disable_iface: 84 clk_disable_unprepare(__scm->iface_clk); 85 disable_core: 86 clk_disable_unprepare(__scm->core_clk); 87 bail: 88 return ret; 89 } 90 91 static void qcom_scm_clk_disable(void) 92 { 93 clk_disable_unprepare(__scm->core_clk); 94 clk_disable_unprepare(__scm->iface_clk); 95 clk_disable_unprepare(__scm->bus_clk); 96 } 97 98 /** 99 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 100 * @entry: Entry point function for the cpus 101 * @cpus: The cpumask of cpus that will use the entry point 102 * 103 * Set the cold boot address of the cpus. Any cpu outside the supported 104 * range would be removed from the cpu present mask. 105 */ 106 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 107 { 108 return __qcom_scm_set_cold_boot_addr(entry, cpus); 109 } 110 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 111 112 /** 113 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 114 * @entry: Entry point function for the cpus 115 * @cpus: The cpumask of cpus that will use the entry point 116 * 117 * Set the Linux entry point for the SCM to transfer control to when coming 118 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 119 */ 120 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 121 { 122 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); 123 } 124 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 125 126 /** 127 * qcom_scm_cpu_power_down() - Power down the cpu 128 * @flags - Flags to flush cache 129 * 130 * This is an end point to power down cpu. If there was a pending interrupt, 131 * the control would return from this function, otherwise, the cpu jumps to the 132 * warm boot entry point set for this cpu upon reset. 133 */ 134 void qcom_scm_cpu_power_down(u32 flags) 135 { 136 __qcom_scm_cpu_power_down(flags); 137 } 138 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 139 140 /** 141 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 142 * 143 * Return true if HDCP is supported, false if not. 144 */ 145 bool qcom_scm_hdcp_available(void) 146 { 147 int ret = qcom_scm_clk_enable(); 148 149 if (ret) 150 return ret; 151 152 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 153 QCOM_SCM_CMD_HDCP); 154 155 qcom_scm_clk_disable(); 156 157 return ret > 0 ? true : false; 158 } 159 EXPORT_SYMBOL(qcom_scm_hdcp_available); 160 161 /** 162 * qcom_scm_hdcp_req() - Send HDCP request. 163 * @req: HDCP request array 164 * @req_cnt: HDCP request array count 165 * @resp: response buffer passed to SCM 166 * 167 * Write HDCP register(s) through SCM. 168 */ 169 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 170 { 171 int ret = qcom_scm_clk_enable(); 172 173 if (ret) 174 return ret; 175 176 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); 177 qcom_scm_clk_disable(); 178 return ret; 179 } 180 EXPORT_SYMBOL(qcom_scm_hdcp_req); 181 182 /** 183 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 184 * available for the given peripherial 185 * @peripheral: peripheral id 186 * 187 * Returns true if PAS is supported for this peripheral, otherwise false. 188 */ 189 bool qcom_scm_pas_supported(u32 peripheral) 190 { 191 int ret; 192 193 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 194 QCOM_SCM_PAS_IS_SUPPORTED_CMD); 195 if (ret <= 0) 196 return false; 197 198 return __qcom_scm_pas_supported(__scm->dev, peripheral); 199 } 200 EXPORT_SYMBOL(qcom_scm_pas_supported); 201 202 /** 203 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 204 * state machine for a given peripheral, using the 205 * metadata 206 * @peripheral: peripheral id 207 * @metadata: pointer to memory containing ELF header, program header table 208 * and optional blob of data used for authenticating the metadata 209 * and the rest of the firmware 210 * @size: size of the metadata 211 * 212 * Returns 0 on success. 213 */ 214 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 215 { 216 dma_addr_t mdata_phys; 217 void *mdata_buf; 218 int ret; 219 220 /* 221 * During the scm call memory protection will be enabled for the meta 222 * data blob, so make sure it's physically contiguous, 4K aligned and 223 * non-cachable to avoid XPU violations. 224 */ 225 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 226 GFP_KERNEL); 227 if (!mdata_buf) { 228 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 229 return -ENOMEM; 230 } 231 memcpy(mdata_buf, metadata, size); 232 233 ret = qcom_scm_clk_enable(); 234 if (ret) 235 goto free_metadata; 236 237 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); 238 239 qcom_scm_clk_disable(); 240 241 free_metadata: 242 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 243 244 return ret; 245 } 246 EXPORT_SYMBOL(qcom_scm_pas_init_image); 247 248 /** 249 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 250 * for firmware loading 251 * @peripheral: peripheral id 252 * @addr: start address of memory area to prepare 253 * @size: size of the memory area to prepare 254 * 255 * Returns 0 on success. 256 */ 257 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 258 { 259 int ret; 260 261 ret = qcom_scm_clk_enable(); 262 if (ret) 263 return ret; 264 265 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); 266 qcom_scm_clk_disable(); 267 268 return ret; 269 } 270 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 271 272 /** 273 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 274 * and reset the remote processor 275 * @peripheral: peripheral id 276 * 277 * Return 0 on success. 278 */ 279 int qcom_scm_pas_auth_and_reset(u32 peripheral) 280 { 281 int ret; 282 283 ret = qcom_scm_clk_enable(); 284 if (ret) 285 return ret; 286 287 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); 288 qcom_scm_clk_disable(); 289 290 return ret; 291 } 292 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 293 294 /** 295 * qcom_scm_pas_shutdown() - Shut down the remote processor 296 * @peripheral: peripheral id 297 * 298 * Returns 0 on success. 299 */ 300 int qcom_scm_pas_shutdown(u32 peripheral) 301 { 302 int ret; 303 304 ret = qcom_scm_clk_enable(); 305 if (ret) 306 return ret; 307 308 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); 309 qcom_scm_clk_disable(); 310 311 return ret; 312 } 313 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 314 315 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 316 unsigned long idx) 317 { 318 if (idx != 0) 319 return -EINVAL; 320 321 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 322 } 323 324 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 325 unsigned long idx) 326 { 327 if (idx != 0) 328 return -EINVAL; 329 330 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 331 } 332 333 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 334 .assert = qcom_scm_pas_reset_assert, 335 .deassert = qcom_scm_pas_reset_deassert, 336 }; 337 338 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 339 { 340 return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare); 341 } 342 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 343 344 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 345 { 346 return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size); 347 } 348 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 349 350 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 351 { 352 return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare); 353 } 354 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 355 356 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 357 { 358 return __qcom_scm_io_readl(__scm->dev, addr, val); 359 } 360 EXPORT_SYMBOL(qcom_scm_io_readl); 361 362 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 363 { 364 return __qcom_scm_io_writel(__scm->dev, addr, val); 365 } 366 EXPORT_SYMBOL(qcom_scm_io_writel); 367 368 static void qcom_scm_set_download_mode(bool enable) 369 { 370 bool avail; 371 int ret = 0; 372 373 avail = __qcom_scm_is_call_available(__scm->dev, 374 QCOM_SCM_SVC_BOOT, 375 QCOM_SCM_SET_DLOAD_MODE); 376 if (avail) { 377 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 378 } else if (__scm->dload_mode_addr) { 379 ret = __qcom_scm_io_writel(__scm->dev, __scm->dload_mode_addr, 380 enable ? QCOM_SCM_SET_DLOAD_MODE : 0); 381 } else { 382 dev_err(__scm->dev, 383 "No available mechanism for setting download mode\n"); 384 } 385 386 if (ret) 387 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 388 } 389 390 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 391 { 392 struct device_node *tcsr; 393 struct device_node *np = dev->of_node; 394 struct resource res; 395 u32 offset; 396 int ret; 397 398 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 399 if (!tcsr) 400 return 0; 401 402 ret = of_address_to_resource(tcsr, 0, &res); 403 of_node_put(tcsr); 404 if (ret) 405 return ret; 406 407 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 408 if (ret < 0) 409 return ret; 410 411 *addr = res.start + offset; 412 413 return 0; 414 } 415 416 /** 417 * qcom_scm_is_available() - Checks if SCM is available 418 */ 419 bool qcom_scm_is_available(void) 420 { 421 return !!__scm; 422 } 423 EXPORT_SYMBOL(qcom_scm_is_available); 424 425 int qcom_scm_set_remote_state(u32 state, u32 id) 426 { 427 return __qcom_scm_set_remote_state(__scm->dev, state, id); 428 } 429 EXPORT_SYMBOL(qcom_scm_set_remote_state); 430 431 /** 432 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 433 * @mem_addr: mem region whose ownership need to be reassigned 434 * @mem_sz: size of the region. 435 * @srcvm: vmid for current set of owners, each set bit in 436 * flag indicate a unique owner 437 * @newvm: array having new owners and corrsponding permission 438 * flags 439 * @dest_cnt: number of owners in next set. 440 * 441 * Return negative errno on failure, 0 on success, with @srcvm updated. 442 */ 443 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 444 unsigned int *srcvm, 445 struct qcom_scm_vmperm *newvm, int dest_cnt) 446 { 447 struct qcom_scm_current_perm_info *destvm; 448 struct qcom_scm_mem_map_info *mem_to_map; 449 phys_addr_t mem_to_map_phys; 450 phys_addr_t dest_phys; 451 phys_addr_t ptr_phys; 452 size_t mem_to_map_sz; 453 size_t dest_sz; 454 size_t src_sz; 455 size_t ptr_sz; 456 int next_vm; 457 __le32 *src; 458 void *ptr; 459 int ret; 460 int len; 461 int i; 462 463 src_sz = hweight_long(*srcvm) * sizeof(*src); 464 mem_to_map_sz = sizeof(*mem_to_map); 465 dest_sz = dest_cnt * sizeof(*destvm); 466 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 467 ALIGN(dest_sz, SZ_64); 468 469 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 470 if (!ptr) 471 return -ENOMEM; 472 473 /* Fill source vmid detail */ 474 src = ptr; 475 len = hweight_long(*srcvm); 476 for (i = 0; i < len; i++) { 477 src[i] = cpu_to_le32(ffs(*srcvm) - 1); 478 *srcvm ^= 1 << (ffs(*srcvm) - 1); 479 } 480 481 /* Fill details of mem buff to map */ 482 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 483 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 484 mem_to_map[0].mem_addr = cpu_to_le64(mem_addr); 485 mem_to_map[0].mem_size = cpu_to_le64(mem_sz); 486 487 next_vm = 0; 488 /* Fill details of next vmid detail */ 489 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 490 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 491 for (i = 0; i < dest_cnt; i++) { 492 destvm[i].vmid = cpu_to_le32(newvm[i].vmid); 493 destvm[i].perm = cpu_to_le32(newvm[i].perm); 494 destvm[i].ctx = 0; 495 destvm[i].ctx_size = 0; 496 next_vm |= BIT(newvm[i].vmid); 497 } 498 499 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 500 ptr_phys, src_sz, dest_phys, dest_sz); 501 dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys); 502 if (ret) { 503 dev_err(__scm->dev, 504 "Assign memory protection call failed %d.\n", ret); 505 return -EINVAL; 506 } 507 508 *srcvm = next_vm; 509 return 0; 510 } 511 EXPORT_SYMBOL(qcom_scm_assign_mem); 512 513 static int qcom_scm_probe(struct platform_device *pdev) 514 { 515 struct qcom_scm *scm; 516 unsigned long clks; 517 int ret; 518 519 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 520 if (!scm) 521 return -ENOMEM; 522 523 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 524 if (ret < 0) 525 return ret; 526 527 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 528 529 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 530 if (IS_ERR(scm->core_clk)) { 531 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 532 return PTR_ERR(scm->core_clk); 533 534 if (clks & SCM_HAS_CORE_CLK) { 535 dev_err(&pdev->dev, "failed to acquire core clk\n"); 536 return PTR_ERR(scm->core_clk); 537 } 538 539 scm->core_clk = NULL; 540 } 541 542 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 543 if (IS_ERR(scm->iface_clk)) { 544 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 545 return PTR_ERR(scm->iface_clk); 546 547 if (clks & SCM_HAS_IFACE_CLK) { 548 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 549 return PTR_ERR(scm->iface_clk); 550 } 551 552 scm->iface_clk = NULL; 553 } 554 555 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 556 if (IS_ERR(scm->bus_clk)) { 557 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 558 return PTR_ERR(scm->bus_clk); 559 560 if (clks & SCM_HAS_BUS_CLK) { 561 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 562 return PTR_ERR(scm->bus_clk); 563 } 564 565 scm->bus_clk = NULL; 566 } 567 568 scm->reset.ops = &qcom_scm_pas_reset_ops; 569 scm->reset.nr_resets = 1; 570 scm->reset.of_node = pdev->dev.of_node; 571 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 572 if (ret) 573 return ret; 574 575 /* vote for max clk rate for highest performance */ 576 ret = clk_set_rate(scm->core_clk, INT_MAX); 577 if (ret) 578 return ret; 579 580 __scm = scm; 581 __scm->dev = &pdev->dev; 582 583 __qcom_scm_init(); 584 585 /* 586 * If requested enable "download mode", from this point on warmboot 587 * will cause the the boot stages to enter download mode, unless 588 * disabled below by a clean shutdown/reboot. 589 */ 590 if (download_mode) 591 qcom_scm_set_download_mode(true); 592 593 return 0; 594 } 595 596 static void qcom_scm_shutdown(struct platform_device *pdev) 597 { 598 /* Clean shutdown, disable download mode to allow normal restart */ 599 if (download_mode) 600 qcom_scm_set_download_mode(false); 601 } 602 603 static const struct of_device_id qcom_scm_dt_match[] = { 604 { .compatible = "qcom,scm-apq8064", 605 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 606 }, 607 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 608 SCM_HAS_IFACE_CLK | 609 SCM_HAS_BUS_CLK) 610 }, 611 { .compatible = "qcom,scm-ipq4019" }, 612 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 613 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 614 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 615 SCM_HAS_IFACE_CLK | 616 SCM_HAS_BUS_CLK) 617 }, 618 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 619 SCM_HAS_IFACE_CLK | 620 SCM_HAS_BUS_CLK) 621 }, 622 { .compatible = "qcom,scm-msm8996" }, 623 { .compatible = "qcom,scm" }, 624 {} 625 }; 626 627 static struct platform_driver qcom_scm_driver = { 628 .driver = { 629 .name = "qcom_scm", 630 .of_match_table = qcom_scm_dt_match, 631 }, 632 .probe = qcom_scm_probe, 633 .shutdown = qcom_scm_shutdown, 634 }; 635 636 static int __init qcom_scm_init(void) 637 { 638 return platform_driver_register(&qcom_scm_driver); 639 } 640 subsys_initcall(qcom_scm_init); 641