1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 #include <linux/platform_device.h> 6 #include <linux/init.h> 7 #include <linux/cpumask.h> 8 #include <linux/export.h> 9 #include <linux/dma-direct.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/qcom_scm.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/of_platform.h> 17 #include <linux/clk.h> 18 #include <linux/reset-controller.h> 19 #include <linux/arm-smccc.h> 20 21 #include "qcom_scm.h" 22 23 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 24 module_param(download_mode, bool, 0); 25 26 #define SCM_HAS_CORE_CLK BIT(0) 27 #define SCM_HAS_IFACE_CLK BIT(1) 28 #define SCM_HAS_BUS_CLK BIT(2) 29 30 struct qcom_scm { 31 struct device *dev; 32 struct clk *core_clk; 33 struct clk *iface_clk; 34 struct clk *bus_clk; 35 struct reset_controller_dev reset; 36 37 u64 dload_mode_addr; 38 }; 39 40 struct qcom_scm_current_perm_info { 41 __le32 vmid; 42 __le32 perm; 43 __le64 ctx; 44 __le32 ctx_size; 45 __le32 unused; 46 }; 47 48 struct qcom_scm_mem_map_info { 49 __le64 mem_addr; 50 __le64 mem_size; 51 }; 52 53 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 54 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 55 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 56 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 57 58 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 59 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 60 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 61 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 62 63 struct qcom_scm_wb_entry { 64 int flag; 65 void *entry; 66 }; 67 68 static struct qcom_scm_wb_entry qcom_scm_wb[] = { 69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, 70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, 71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, 72 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, 73 }; 74 75 static const char *qcom_scm_convention_names[] = { 76 [SMC_CONVENTION_UNKNOWN] = "unknown", 77 [SMC_CONVENTION_ARM_32] = "smc arm 32", 78 [SMC_CONVENTION_ARM_64] = "smc arm 64", 79 [SMC_CONVENTION_LEGACY] = "smc legacy", 80 }; 81 82 static struct qcom_scm *__scm; 83 84 static int qcom_scm_clk_enable(void) 85 { 86 int ret; 87 88 ret = clk_prepare_enable(__scm->core_clk); 89 if (ret) 90 goto bail; 91 92 ret = clk_prepare_enable(__scm->iface_clk); 93 if (ret) 94 goto disable_core; 95 96 ret = clk_prepare_enable(__scm->bus_clk); 97 if (ret) 98 goto disable_iface; 99 100 return 0; 101 102 disable_iface: 103 clk_disable_unprepare(__scm->iface_clk); 104 disable_core: 105 clk_disable_unprepare(__scm->core_clk); 106 bail: 107 return ret; 108 } 109 110 static void qcom_scm_clk_disable(void) 111 { 112 clk_disable_unprepare(__scm->core_clk); 113 clk_disable_unprepare(__scm->iface_clk); 114 clk_disable_unprepare(__scm->bus_clk); 115 } 116 117 static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 118 u32 cmd_id); 119 120 enum qcom_scm_convention qcom_scm_convention; 121 static bool has_queried __read_mostly; 122 static DEFINE_SPINLOCK(query_lock); 123 124 static void __query_convention(void) 125 { 126 unsigned long flags; 127 struct qcom_scm_desc desc = { 128 .svc = QCOM_SCM_SVC_INFO, 129 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 130 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 131 QCOM_SCM_INFO_IS_CALL_AVAIL) | 132 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 133 .arginfo = QCOM_SCM_ARGS(1), 134 .owner = ARM_SMCCC_OWNER_SIP, 135 }; 136 struct qcom_scm_res res; 137 int ret; 138 139 spin_lock_irqsave(&query_lock, flags); 140 if (has_queried) 141 goto out; 142 143 qcom_scm_convention = SMC_CONVENTION_ARM_64; 144 // Device isn't required as there is only one argument - no device 145 // needed to dma_map_single to secure world 146 ret = scm_smc_call(NULL, &desc, &res, true); 147 if (!ret && res.result[0] == 1) 148 goto out; 149 150 qcom_scm_convention = SMC_CONVENTION_ARM_32; 151 ret = scm_smc_call(NULL, &desc, &res, true); 152 if (!ret && res.result[0] == 1) 153 goto out; 154 155 qcom_scm_convention = SMC_CONVENTION_LEGACY; 156 out: 157 has_queried = true; 158 spin_unlock_irqrestore(&query_lock, flags); 159 pr_info("qcom_scm: convention: %s\n", 160 qcom_scm_convention_names[qcom_scm_convention]); 161 } 162 163 static inline enum qcom_scm_convention __get_convention(void) 164 { 165 if (unlikely(!has_queried)) 166 __query_convention(); 167 return qcom_scm_convention; 168 } 169 170 /** 171 * qcom_scm_call() - Invoke a syscall in the secure world 172 * @dev: device 173 * @svc_id: service identifier 174 * @cmd_id: command identifier 175 * @desc: Descriptor structure containing arguments and return values 176 * 177 * Sends a command to the SCM and waits for the command to finish processing. 178 * This should *only* be called in pre-emptible context. 179 */ 180 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 181 struct qcom_scm_res *res) 182 { 183 might_sleep(); 184 switch (__get_convention()) { 185 case SMC_CONVENTION_ARM_32: 186 case SMC_CONVENTION_ARM_64: 187 return scm_smc_call(dev, desc, res, false); 188 case SMC_CONVENTION_LEGACY: 189 return scm_legacy_call(dev, desc, res); 190 default: 191 pr_err("Unknown current SCM calling convention.\n"); 192 return -EINVAL; 193 } 194 } 195 196 /** 197 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 198 * @dev: device 199 * @svc_id: service identifier 200 * @cmd_id: command identifier 201 * @desc: Descriptor structure containing arguments and return values 202 * @res: Structure containing results from SMC/HVC call 203 * 204 * Sends a command to the SCM and waits for the command to finish processing. 205 * This can be called in atomic context. 206 */ 207 static int qcom_scm_call_atomic(struct device *dev, 208 const struct qcom_scm_desc *desc, 209 struct qcom_scm_res *res) 210 { 211 switch (__get_convention()) { 212 case SMC_CONVENTION_ARM_32: 213 case SMC_CONVENTION_ARM_64: 214 return scm_smc_call(dev, desc, res, true); 215 case SMC_CONVENTION_LEGACY: 216 return scm_legacy_call_atomic(dev, desc, res); 217 default: 218 pr_err("Unknown current SCM calling convention.\n"); 219 return -EINVAL; 220 } 221 } 222 223 static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 224 u32 cmd_id) 225 { 226 int ret; 227 struct qcom_scm_desc desc = { 228 .svc = QCOM_SCM_SVC_INFO, 229 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 230 .owner = ARM_SMCCC_OWNER_SIP, 231 }; 232 struct qcom_scm_res res; 233 234 desc.arginfo = QCOM_SCM_ARGS(1); 235 switch (__get_convention()) { 236 case SMC_CONVENTION_ARM_32: 237 case SMC_CONVENTION_ARM_64: 238 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 239 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 240 break; 241 case SMC_CONVENTION_LEGACY: 242 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 243 break; 244 default: 245 pr_err("Unknown SMC convention being used\n"); 246 return -EINVAL; 247 } 248 249 ret = qcom_scm_call(dev, &desc, &res); 250 251 return ret ? : res.result[0]; 252 } 253 254 /** 255 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 256 * @entry: Entry point function for the cpus 257 * @cpus: The cpumask of cpus that will use the entry point 258 * 259 * Set the Linux entry point for the SCM to transfer control to when coming 260 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 261 */ 262 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 263 { 264 int ret; 265 int flags = 0; 266 int cpu; 267 struct qcom_scm_desc desc = { 268 .svc = QCOM_SCM_SVC_BOOT, 269 .cmd = QCOM_SCM_BOOT_SET_ADDR, 270 .arginfo = QCOM_SCM_ARGS(2), 271 }; 272 273 /* 274 * Reassign only if we are switching from hotplug entry point 275 * to cpuidle entry point or vice versa. 276 */ 277 for_each_cpu(cpu, cpus) { 278 if (entry == qcom_scm_wb[cpu].entry) 279 continue; 280 flags |= qcom_scm_wb[cpu].flag; 281 } 282 283 /* No change in entry function */ 284 if (!flags) 285 return 0; 286 287 desc.args[0] = flags; 288 desc.args[1] = virt_to_phys(entry); 289 290 ret = qcom_scm_call(__scm->dev, &desc, NULL); 291 if (!ret) { 292 for_each_cpu(cpu, cpus) 293 qcom_scm_wb[cpu].entry = entry; 294 } 295 296 return ret; 297 } 298 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 299 300 /** 301 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 302 * @entry: Entry point function for the cpus 303 * @cpus: The cpumask of cpus that will use the entry point 304 * 305 * Set the cold boot address of the cpus. Any cpu outside the supported 306 * range would be removed from the cpu present mask. 307 */ 308 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 309 { 310 int flags = 0; 311 int cpu; 312 int scm_cb_flags[] = { 313 QCOM_SCM_FLAG_COLDBOOT_CPU0, 314 QCOM_SCM_FLAG_COLDBOOT_CPU1, 315 QCOM_SCM_FLAG_COLDBOOT_CPU2, 316 QCOM_SCM_FLAG_COLDBOOT_CPU3, 317 }; 318 struct qcom_scm_desc desc = { 319 .svc = QCOM_SCM_SVC_BOOT, 320 .cmd = QCOM_SCM_BOOT_SET_ADDR, 321 .arginfo = QCOM_SCM_ARGS(2), 322 .owner = ARM_SMCCC_OWNER_SIP, 323 }; 324 325 if (!cpus || (cpus && cpumask_empty(cpus))) 326 return -EINVAL; 327 328 for_each_cpu(cpu, cpus) { 329 if (cpu < ARRAY_SIZE(scm_cb_flags)) 330 flags |= scm_cb_flags[cpu]; 331 else 332 set_cpu_present(cpu, false); 333 } 334 335 desc.args[0] = flags; 336 desc.args[1] = virt_to_phys(entry); 337 338 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 339 } 340 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 341 342 /** 343 * qcom_scm_cpu_power_down() - Power down the cpu 344 * @flags - Flags to flush cache 345 * 346 * This is an end point to power down cpu. If there was a pending interrupt, 347 * the control would return from this function, otherwise, the cpu jumps to the 348 * warm boot entry point set for this cpu upon reset. 349 */ 350 void qcom_scm_cpu_power_down(u32 flags) 351 { 352 struct qcom_scm_desc desc = { 353 .svc = QCOM_SCM_SVC_BOOT, 354 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 355 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 356 .arginfo = QCOM_SCM_ARGS(1), 357 .owner = ARM_SMCCC_OWNER_SIP, 358 }; 359 360 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 361 } 362 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 363 364 int qcom_scm_set_remote_state(u32 state, u32 id) 365 { 366 struct qcom_scm_desc desc = { 367 .svc = QCOM_SCM_SVC_BOOT, 368 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 369 .arginfo = QCOM_SCM_ARGS(2), 370 .args[0] = state, 371 .args[1] = id, 372 .owner = ARM_SMCCC_OWNER_SIP, 373 }; 374 struct qcom_scm_res res; 375 int ret; 376 377 ret = qcom_scm_call(__scm->dev, &desc, &res); 378 379 return ret ? : res.result[0]; 380 } 381 EXPORT_SYMBOL(qcom_scm_set_remote_state); 382 383 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 384 { 385 struct qcom_scm_desc desc = { 386 .svc = QCOM_SCM_SVC_BOOT, 387 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 388 .arginfo = QCOM_SCM_ARGS(2), 389 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 390 .owner = ARM_SMCCC_OWNER_SIP, 391 }; 392 393 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 394 395 return qcom_scm_call(__scm->dev, &desc, NULL); 396 } 397 398 static void qcom_scm_set_download_mode(bool enable) 399 { 400 bool avail; 401 int ret = 0; 402 403 avail = __qcom_scm_is_call_available(__scm->dev, 404 QCOM_SCM_SVC_BOOT, 405 QCOM_SCM_BOOT_SET_DLOAD_MODE); 406 if (avail) { 407 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 408 } else if (__scm->dload_mode_addr) { 409 ret = qcom_scm_io_writel(__scm->dload_mode_addr, 410 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); 411 } else { 412 dev_err(__scm->dev, 413 "No available mechanism for setting download mode\n"); 414 } 415 416 if (ret) 417 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 418 } 419 420 /** 421 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 422 * state machine for a given peripheral, using the 423 * metadata 424 * @peripheral: peripheral id 425 * @metadata: pointer to memory containing ELF header, program header table 426 * and optional blob of data used for authenticating the metadata 427 * and the rest of the firmware 428 * @size: size of the metadata 429 * 430 * Returns 0 on success. 431 */ 432 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 433 { 434 dma_addr_t mdata_phys; 435 void *mdata_buf; 436 int ret; 437 struct qcom_scm_desc desc = { 438 .svc = QCOM_SCM_SVC_PIL, 439 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 440 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 441 .args[0] = peripheral, 442 .owner = ARM_SMCCC_OWNER_SIP, 443 }; 444 struct qcom_scm_res res; 445 446 /* 447 * During the scm call memory protection will be enabled for the meta 448 * data blob, so make sure it's physically contiguous, 4K aligned and 449 * non-cachable to avoid XPU violations. 450 */ 451 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 452 GFP_KERNEL); 453 if (!mdata_buf) { 454 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 455 return -ENOMEM; 456 } 457 memcpy(mdata_buf, metadata, size); 458 459 ret = qcom_scm_clk_enable(); 460 if (ret) 461 goto free_metadata; 462 463 desc.args[1] = mdata_phys; 464 465 ret = qcom_scm_call(__scm->dev, &desc, &res); 466 467 qcom_scm_clk_disable(); 468 469 free_metadata: 470 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 471 472 return ret ? : res.result[0]; 473 } 474 EXPORT_SYMBOL(qcom_scm_pas_init_image); 475 476 /** 477 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 478 * for firmware loading 479 * @peripheral: peripheral id 480 * @addr: start address of memory area to prepare 481 * @size: size of the memory area to prepare 482 * 483 * Returns 0 on success. 484 */ 485 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 486 { 487 int ret; 488 struct qcom_scm_desc desc = { 489 .svc = QCOM_SCM_SVC_PIL, 490 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 491 .arginfo = QCOM_SCM_ARGS(3), 492 .args[0] = peripheral, 493 .args[1] = addr, 494 .args[2] = size, 495 .owner = ARM_SMCCC_OWNER_SIP, 496 }; 497 struct qcom_scm_res res; 498 499 ret = qcom_scm_clk_enable(); 500 if (ret) 501 return ret; 502 503 ret = qcom_scm_call(__scm->dev, &desc, &res); 504 qcom_scm_clk_disable(); 505 506 return ret ? : res.result[0]; 507 } 508 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 509 510 /** 511 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 512 * and reset the remote processor 513 * @peripheral: peripheral id 514 * 515 * Return 0 on success. 516 */ 517 int qcom_scm_pas_auth_and_reset(u32 peripheral) 518 { 519 int ret; 520 struct qcom_scm_desc desc = { 521 .svc = QCOM_SCM_SVC_PIL, 522 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 523 .arginfo = QCOM_SCM_ARGS(1), 524 .args[0] = peripheral, 525 .owner = ARM_SMCCC_OWNER_SIP, 526 }; 527 struct qcom_scm_res res; 528 529 ret = qcom_scm_clk_enable(); 530 if (ret) 531 return ret; 532 533 ret = qcom_scm_call(__scm->dev, &desc, &res); 534 qcom_scm_clk_disable(); 535 536 return ret ? : res.result[0]; 537 } 538 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 539 540 /** 541 * qcom_scm_pas_shutdown() - Shut down the remote processor 542 * @peripheral: peripheral id 543 * 544 * Returns 0 on success. 545 */ 546 int qcom_scm_pas_shutdown(u32 peripheral) 547 { 548 int ret; 549 struct qcom_scm_desc desc = { 550 .svc = QCOM_SCM_SVC_PIL, 551 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 552 .arginfo = QCOM_SCM_ARGS(1), 553 .args[0] = peripheral, 554 .owner = ARM_SMCCC_OWNER_SIP, 555 }; 556 struct qcom_scm_res res; 557 558 ret = qcom_scm_clk_enable(); 559 if (ret) 560 return ret; 561 562 ret = qcom_scm_call(__scm->dev, &desc, &res); 563 564 qcom_scm_clk_disable(); 565 566 return ret ? : res.result[0]; 567 } 568 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 569 570 /** 571 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 572 * available for the given peripherial 573 * @peripheral: peripheral id 574 * 575 * Returns true if PAS is supported for this peripheral, otherwise false. 576 */ 577 bool qcom_scm_pas_supported(u32 peripheral) 578 { 579 int ret; 580 struct qcom_scm_desc desc = { 581 .svc = QCOM_SCM_SVC_PIL, 582 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 583 .arginfo = QCOM_SCM_ARGS(1), 584 .args[0] = peripheral, 585 .owner = ARM_SMCCC_OWNER_SIP, 586 }; 587 struct qcom_scm_res res; 588 589 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 590 QCOM_SCM_PIL_PAS_IS_SUPPORTED); 591 if (ret <= 0) 592 return false; 593 594 ret = qcom_scm_call(__scm->dev, &desc, &res); 595 596 return ret ? false : !!res.result[0]; 597 } 598 EXPORT_SYMBOL(qcom_scm_pas_supported); 599 600 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 601 { 602 struct qcom_scm_desc desc = { 603 .svc = QCOM_SCM_SVC_PIL, 604 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 605 .arginfo = QCOM_SCM_ARGS(2), 606 .args[0] = reset, 607 .args[1] = 0, 608 .owner = ARM_SMCCC_OWNER_SIP, 609 }; 610 struct qcom_scm_res res; 611 int ret; 612 613 ret = qcom_scm_call(__scm->dev, &desc, &res); 614 615 return ret ? : res.result[0]; 616 } 617 618 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 619 unsigned long idx) 620 { 621 if (idx != 0) 622 return -EINVAL; 623 624 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 625 } 626 627 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 628 unsigned long idx) 629 { 630 if (idx != 0) 631 return -EINVAL; 632 633 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 634 } 635 636 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 637 .assert = qcom_scm_pas_reset_assert, 638 .deassert = qcom_scm_pas_reset_deassert, 639 }; 640 641 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 642 { 643 struct qcom_scm_desc desc = { 644 .svc = QCOM_SCM_SVC_IO, 645 .cmd = QCOM_SCM_IO_READ, 646 .arginfo = QCOM_SCM_ARGS(1), 647 .args[0] = addr, 648 .owner = ARM_SMCCC_OWNER_SIP, 649 }; 650 struct qcom_scm_res res; 651 int ret; 652 653 654 ret = qcom_scm_call(__scm->dev, &desc, &res); 655 if (ret >= 0) 656 *val = res.result[0]; 657 658 return ret < 0 ? ret : 0; 659 } 660 EXPORT_SYMBOL(qcom_scm_io_readl); 661 662 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 663 { 664 struct qcom_scm_desc desc = { 665 .svc = QCOM_SCM_SVC_IO, 666 .cmd = QCOM_SCM_IO_WRITE, 667 .arginfo = QCOM_SCM_ARGS(2), 668 .args[0] = addr, 669 .args[1] = val, 670 .owner = ARM_SMCCC_OWNER_SIP, 671 }; 672 673 674 return qcom_scm_call(__scm->dev, &desc, NULL); 675 } 676 EXPORT_SYMBOL(qcom_scm_io_writel); 677 678 /** 679 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 680 * supports restore security config interface. 681 * 682 * Return true if restore-cfg interface is supported, false if not. 683 */ 684 bool qcom_scm_restore_sec_cfg_available(void) 685 { 686 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 687 QCOM_SCM_MP_RESTORE_SEC_CFG); 688 } 689 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 690 691 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 692 { 693 struct qcom_scm_desc desc = { 694 .svc = QCOM_SCM_SVC_MP, 695 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 696 .arginfo = QCOM_SCM_ARGS(2), 697 .args[0] = device_id, 698 .args[1] = spare, 699 .owner = ARM_SMCCC_OWNER_SIP, 700 }; 701 struct qcom_scm_res res; 702 int ret; 703 704 ret = qcom_scm_call(__scm->dev, &desc, &res); 705 706 return ret ? : res.result[0]; 707 } 708 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 709 710 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 711 { 712 struct qcom_scm_desc desc = { 713 .svc = QCOM_SCM_SVC_MP, 714 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 715 .arginfo = QCOM_SCM_ARGS(1), 716 .args[0] = spare, 717 .owner = ARM_SMCCC_OWNER_SIP, 718 }; 719 struct qcom_scm_res res; 720 int ret; 721 722 ret = qcom_scm_call(__scm->dev, &desc, &res); 723 724 if (size) 725 *size = res.result[0]; 726 727 return ret ? : res.result[1]; 728 } 729 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 730 731 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 732 { 733 struct qcom_scm_desc desc = { 734 .svc = QCOM_SCM_SVC_MP, 735 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 736 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 737 QCOM_SCM_VAL), 738 .args[0] = addr, 739 .args[1] = size, 740 .args[2] = spare, 741 .owner = ARM_SMCCC_OWNER_SIP, 742 }; 743 int ret; 744 745 desc.args[0] = addr; 746 desc.args[1] = size; 747 desc.args[2] = spare; 748 desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 749 QCOM_SCM_VAL); 750 751 ret = qcom_scm_call(__scm->dev, &desc, NULL); 752 753 /* the pg table has been initialized already, ignore the error */ 754 if (ret == -EPERM) 755 ret = 0; 756 757 return ret; 758 } 759 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 760 761 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 762 size_t mem_sz, phys_addr_t src, size_t src_sz, 763 phys_addr_t dest, size_t dest_sz) 764 { 765 int ret; 766 struct qcom_scm_desc desc = { 767 .svc = QCOM_SCM_SVC_MP, 768 .cmd = QCOM_SCM_MP_ASSIGN, 769 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 770 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 771 QCOM_SCM_VAL, QCOM_SCM_VAL), 772 .args[0] = mem_region, 773 .args[1] = mem_sz, 774 .args[2] = src, 775 .args[3] = src_sz, 776 .args[4] = dest, 777 .args[5] = dest_sz, 778 .args[6] = 0, 779 .owner = ARM_SMCCC_OWNER_SIP, 780 }; 781 struct qcom_scm_res res; 782 783 ret = qcom_scm_call(dev, &desc, &res); 784 785 return ret ? : res.result[0]; 786 } 787 788 /** 789 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 790 * @mem_addr: mem region whose ownership need to be reassigned 791 * @mem_sz: size of the region. 792 * @srcvm: vmid for current set of owners, each set bit in 793 * flag indicate a unique owner 794 * @newvm: array having new owners and corresponding permission 795 * flags 796 * @dest_cnt: number of owners in next set. 797 * 798 * Return negative errno on failure or 0 on success with @srcvm updated. 799 */ 800 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 801 unsigned int *srcvm, 802 const struct qcom_scm_vmperm *newvm, 803 unsigned int dest_cnt) 804 { 805 struct qcom_scm_current_perm_info *destvm; 806 struct qcom_scm_mem_map_info *mem_to_map; 807 phys_addr_t mem_to_map_phys; 808 phys_addr_t dest_phys; 809 phys_addr_t ptr_phys; 810 dma_addr_t ptr_dma; 811 size_t mem_to_map_sz; 812 size_t dest_sz; 813 size_t src_sz; 814 size_t ptr_sz; 815 int next_vm; 816 __le32 *src; 817 void *ptr; 818 int ret, i, b; 819 unsigned long srcvm_bits = *srcvm; 820 821 src_sz = hweight_long(srcvm_bits) * sizeof(*src); 822 mem_to_map_sz = sizeof(*mem_to_map); 823 dest_sz = dest_cnt * sizeof(*destvm); 824 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 825 ALIGN(dest_sz, SZ_64); 826 827 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL); 828 if (!ptr) 829 return -ENOMEM; 830 ptr_phys = dma_to_phys(__scm->dev, ptr_dma); 831 832 /* Fill source vmid detail */ 833 src = ptr; 834 i = 0; 835 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) 836 src[i++] = cpu_to_le32(b); 837 838 /* Fill details of mem buff to map */ 839 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 840 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 841 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 842 mem_to_map->mem_size = cpu_to_le64(mem_sz); 843 844 next_vm = 0; 845 /* Fill details of next vmid detail */ 846 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 847 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 848 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 849 destvm->vmid = cpu_to_le32(newvm->vmid); 850 destvm->perm = cpu_to_le32(newvm->perm); 851 destvm->ctx = 0; 852 destvm->ctx_size = 0; 853 next_vm |= BIT(newvm->vmid); 854 } 855 856 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 857 ptr_phys, src_sz, dest_phys, dest_sz); 858 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma); 859 if (ret) { 860 dev_err(__scm->dev, 861 "Assign memory protection call failed %d\n", ret); 862 return -EINVAL; 863 } 864 865 *srcvm = next_vm; 866 return 0; 867 } 868 EXPORT_SYMBOL(qcom_scm_assign_mem); 869 870 /** 871 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 872 */ 873 bool qcom_scm_ocmem_lock_available(void) 874 { 875 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 876 QCOM_SCM_OCMEM_LOCK_CMD); 877 } 878 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 879 880 /** 881 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 882 * region to the specified initiator 883 * 884 * @id: tz initiator id 885 * @offset: OCMEM offset 886 * @size: OCMEM size 887 * @mode: access mode (WIDE/NARROW) 888 */ 889 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 890 u32 mode) 891 { 892 struct qcom_scm_desc desc = { 893 .svc = QCOM_SCM_SVC_OCMEM, 894 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 895 .args[0] = id, 896 .args[1] = offset, 897 .args[2] = size, 898 .args[3] = mode, 899 .arginfo = QCOM_SCM_ARGS(4), 900 }; 901 902 return qcom_scm_call(__scm->dev, &desc, NULL); 903 } 904 EXPORT_SYMBOL(qcom_scm_ocmem_lock); 905 906 /** 907 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 908 * region from the specified initiator 909 * 910 * @id: tz initiator id 911 * @offset: OCMEM offset 912 * @size: OCMEM size 913 */ 914 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 915 { 916 struct qcom_scm_desc desc = { 917 .svc = QCOM_SCM_SVC_OCMEM, 918 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 919 .args[0] = id, 920 .args[1] = offset, 921 .args[2] = size, 922 .arginfo = QCOM_SCM_ARGS(3), 923 }; 924 925 return qcom_scm_call(__scm->dev, &desc, NULL); 926 } 927 EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 928 929 /** 930 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 931 * 932 * Return true if HDCP is supported, false if not. 933 */ 934 bool qcom_scm_hdcp_available(void) 935 { 936 int ret = qcom_scm_clk_enable(); 937 938 if (ret) 939 return ret; 940 941 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 942 QCOM_SCM_HDCP_INVOKE); 943 944 qcom_scm_clk_disable(); 945 946 return ret > 0 ? true : false; 947 } 948 EXPORT_SYMBOL(qcom_scm_hdcp_available); 949 950 /** 951 * qcom_scm_hdcp_req() - Send HDCP request. 952 * @req: HDCP request array 953 * @req_cnt: HDCP request array count 954 * @resp: response buffer passed to SCM 955 * 956 * Write HDCP register(s) through SCM. 957 */ 958 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 959 { 960 int ret; 961 struct qcom_scm_desc desc = { 962 .svc = QCOM_SCM_SVC_HDCP, 963 .cmd = QCOM_SCM_HDCP_INVOKE, 964 .arginfo = QCOM_SCM_ARGS(10), 965 .args = { 966 req[0].addr, 967 req[0].val, 968 req[1].addr, 969 req[1].val, 970 req[2].addr, 971 req[2].val, 972 req[3].addr, 973 req[3].val, 974 req[4].addr, 975 req[4].val 976 }, 977 .owner = ARM_SMCCC_OWNER_SIP, 978 }; 979 struct qcom_scm_res res; 980 981 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 982 return -ERANGE; 983 984 ret = qcom_scm_clk_enable(); 985 if (ret) 986 return ret; 987 988 ret = qcom_scm_call(__scm->dev, &desc, &res); 989 *resp = res.result[0]; 990 991 qcom_scm_clk_disable(); 992 993 return ret; 994 } 995 EXPORT_SYMBOL(qcom_scm_hdcp_req); 996 997 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 998 { 999 struct qcom_scm_desc desc = { 1000 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1001 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1002 .arginfo = QCOM_SCM_ARGS(2), 1003 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1004 .args[1] = en, 1005 .owner = ARM_SMCCC_OWNER_SIP, 1006 }; 1007 1008 1009 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1010 } 1011 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); 1012 1013 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1014 { 1015 struct device_node *tcsr; 1016 struct device_node *np = dev->of_node; 1017 struct resource res; 1018 u32 offset; 1019 int ret; 1020 1021 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1022 if (!tcsr) 1023 return 0; 1024 1025 ret = of_address_to_resource(tcsr, 0, &res); 1026 of_node_put(tcsr); 1027 if (ret) 1028 return ret; 1029 1030 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1031 if (ret < 0) 1032 return ret; 1033 1034 *addr = res.start + offset; 1035 1036 return 0; 1037 } 1038 1039 /** 1040 * qcom_scm_is_available() - Checks if SCM is available 1041 */ 1042 bool qcom_scm_is_available(void) 1043 { 1044 return !!__scm; 1045 } 1046 EXPORT_SYMBOL(qcom_scm_is_available); 1047 1048 static int qcom_scm_probe(struct platform_device *pdev) 1049 { 1050 struct qcom_scm *scm; 1051 unsigned long clks; 1052 int ret; 1053 1054 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1055 if (!scm) 1056 return -ENOMEM; 1057 1058 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1059 if (ret < 0) 1060 return ret; 1061 1062 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 1063 1064 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 1065 if (IS_ERR(scm->core_clk)) { 1066 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 1067 return PTR_ERR(scm->core_clk); 1068 1069 if (clks & SCM_HAS_CORE_CLK) { 1070 dev_err(&pdev->dev, "failed to acquire core clk\n"); 1071 return PTR_ERR(scm->core_clk); 1072 } 1073 1074 scm->core_clk = NULL; 1075 } 1076 1077 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 1078 if (IS_ERR(scm->iface_clk)) { 1079 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 1080 return PTR_ERR(scm->iface_clk); 1081 1082 if (clks & SCM_HAS_IFACE_CLK) { 1083 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 1084 return PTR_ERR(scm->iface_clk); 1085 } 1086 1087 scm->iface_clk = NULL; 1088 } 1089 1090 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1091 if (IS_ERR(scm->bus_clk)) { 1092 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 1093 return PTR_ERR(scm->bus_clk); 1094 1095 if (clks & SCM_HAS_BUS_CLK) { 1096 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 1097 return PTR_ERR(scm->bus_clk); 1098 } 1099 1100 scm->bus_clk = NULL; 1101 } 1102 1103 scm->reset.ops = &qcom_scm_pas_reset_ops; 1104 scm->reset.nr_resets = 1; 1105 scm->reset.of_node = pdev->dev.of_node; 1106 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1107 if (ret) 1108 return ret; 1109 1110 /* vote for max clk rate for highest performance */ 1111 ret = clk_set_rate(scm->core_clk, INT_MAX); 1112 if (ret) 1113 return ret; 1114 1115 __scm = scm; 1116 __scm->dev = &pdev->dev; 1117 1118 __query_convention(); 1119 1120 /* 1121 * If requested enable "download mode", from this point on warmboot 1122 * will cause the the boot stages to enter download mode, unless 1123 * disabled below by a clean shutdown/reboot. 1124 */ 1125 if (download_mode) 1126 qcom_scm_set_download_mode(true); 1127 1128 return 0; 1129 } 1130 1131 static void qcom_scm_shutdown(struct platform_device *pdev) 1132 { 1133 /* Clean shutdown, disable download mode to allow normal restart */ 1134 if (download_mode) 1135 qcom_scm_set_download_mode(false); 1136 } 1137 1138 static const struct of_device_id qcom_scm_dt_match[] = { 1139 { .compatible = "qcom,scm-apq8064", 1140 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 1141 }, 1142 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 1143 SCM_HAS_IFACE_CLK | 1144 SCM_HAS_BUS_CLK) 1145 }, 1146 { .compatible = "qcom,scm-ipq4019" }, 1147 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 1148 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 1149 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 1150 SCM_HAS_IFACE_CLK | 1151 SCM_HAS_BUS_CLK) 1152 }, 1153 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 1154 SCM_HAS_IFACE_CLK | 1155 SCM_HAS_BUS_CLK) 1156 }, 1157 { .compatible = "qcom,scm-msm8996" }, 1158 { .compatible = "qcom,scm" }, 1159 {} 1160 }; 1161 1162 static struct platform_driver qcom_scm_driver = { 1163 .driver = { 1164 .name = "qcom_scm", 1165 .of_match_table = qcom_scm_dt_match, 1166 }, 1167 .probe = qcom_scm_probe, 1168 .shutdown = qcom_scm_shutdown, 1169 }; 1170 1171 static int __init qcom_scm_init(void) 1172 { 1173 return platform_driver_register(&qcom_scm_driver); 1174 } 1175 subsys_initcall(qcom_scm_init); 1176