1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 #include <linux/platform_device.h> 6 #include <linux/init.h> 7 #include <linux/cpumask.h> 8 #include <linux/export.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/module.h> 11 #include <linux/types.h> 12 #include <linux/qcom_scm.h> 13 #include <linux/of.h> 14 #include <linux/of_address.h> 15 #include <linux/of_platform.h> 16 #include <linux/clk.h> 17 #include <linux/reset-controller.h> 18 #include <linux/arm-smccc.h> 19 20 #include "qcom_scm.h" 21 22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 23 module_param(download_mode, bool, 0); 24 25 #define SCM_HAS_CORE_CLK BIT(0) 26 #define SCM_HAS_IFACE_CLK BIT(1) 27 #define SCM_HAS_BUS_CLK BIT(2) 28 29 struct qcom_scm { 30 struct device *dev; 31 struct clk *core_clk; 32 struct clk *iface_clk; 33 struct clk *bus_clk; 34 struct reset_controller_dev reset; 35 36 u64 dload_mode_addr; 37 }; 38 39 struct qcom_scm_current_perm_info { 40 __le32 vmid; 41 __le32 perm; 42 __le64 ctx; 43 __le32 ctx_size; 44 __le32 unused; 45 }; 46 47 struct qcom_scm_mem_map_info { 48 __le64 mem_addr; 49 __le64 mem_size; 50 }; 51 52 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 53 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 54 0, BIT(0), BIT(3), BIT(5) 55 }; 56 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 57 BIT(2), BIT(1), BIT(4), BIT(6) 58 }; 59 60 static const char * const qcom_scm_convention_names[] = { 61 [SMC_CONVENTION_UNKNOWN] = "unknown", 62 [SMC_CONVENTION_ARM_32] = "smc arm 32", 63 [SMC_CONVENTION_ARM_64] = "smc arm 64", 64 [SMC_CONVENTION_LEGACY] = "smc legacy", 65 }; 66 67 static struct qcom_scm *__scm; 68 69 static int qcom_scm_clk_enable(void) 70 { 71 int ret; 72 73 ret = clk_prepare_enable(__scm->core_clk); 74 if (ret) 75 goto bail; 76 77 ret = clk_prepare_enable(__scm->iface_clk); 78 if (ret) 79 goto disable_core; 80 81 ret = clk_prepare_enable(__scm->bus_clk); 82 if (ret) 83 goto disable_iface; 84 85 return 0; 86 87 disable_iface: 88 clk_disable_unprepare(__scm->iface_clk); 89 disable_core: 90 clk_disable_unprepare(__scm->core_clk); 91 bail: 92 return ret; 93 } 94 95 static void qcom_scm_clk_disable(void) 96 { 97 clk_disable_unprepare(__scm->core_clk); 98 clk_disable_unprepare(__scm->iface_clk); 99 clk_disable_unprepare(__scm->bus_clk); 100 } 101 102 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 103 static DEFINE_SPINLOCK(scm_query_lock); 104 105 static enum qcom_scm_convention __get_convention(void) 106 { 107 unsigned long flags; 108 struct qcom_scm_desc desc = { 109 .svc = QCOM_SCM_SVC_INFO, 110 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 111 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 112 QCOM_SCM_INFO_IS_CALL_AVAIL) | 113 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 114 .arginfo = QCOM_SCM_ARGS(1), 115 .owner = ARM_SMCCC_OWNER_SIP, 116 }; 117 struct qcom_scm_res res; 118 enum qcom_scm_convention probed_convention; 119 int ret; 120 bool forced = false; 121 122 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 123 return qcom_scm_convention; 124 125 /* 126 * Device isn't required as there is only one argument - no device 127 * needed to dma_map_single to secure world 128 */ 129 probed_convention = SMC_CONVENTION_ARM_64; 130 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 131 if (!ret && res.result[0] == 1) 132 goto found; 133 134 /* 135 * Some SC7180 firmwares didn't implement the 136 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 137 * calling conventions on these firmwares. Luckily we don't make any 138 * early calls into the firmware on these SoCs so the device pointer 139 * will be valid here to check if the compatible matches. 140 */ 141 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 142 forced = true; 143 goto found; 144 } 145 146 probed_convention = SMC_CONVENTION_ARM_32; 147 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 148 if (!ret && res.result[0] == 1) 149 goto found; 150 151 probed_convention = SMC_CONVENTION_LEGACY; 152 found: 153 spin_lock_irqsave(&scm_query_lock, flags); 154 if (probed_convention != qcom_scm_convention) { 155 qcom_scm_convention = probed_convention; 156 pr_info("qcom_scm: convention: %s%s\n", 157 qcom_scm_convention_names[qcom_scm_convention], 158 forced ? " (forced)" : ""); 159 } 160 spin_unlock_irqrestore(&scm_query_lock, flags); 161 162 return qcom_scm_convention; 163 } 164 165 /** 166 * qcom_scm_call() - Invoke a syscall in the secure world 167 * @dev: device 168 * @desc: Descriptor structure containing arguments and return values 169 * @res: Structure containing results from SMC/HVC call 170 * 171 * Sends a command to the SCM and waits for the command to finish processing. 172 * This should *only* be called in pre-emptible context. 173 */ 174 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 175 struct qcom_scm_res *res) 176 { 177 might_sleep(); 178 switch (__get_convention()) { 179 case SMC_CONVENTION_ARM_32: 180 case SMC_CONVENTION_ARM_64: 181 return scm_smc_call(dev, desc, res, false); 182 case SMC_CONVENTION_LEGACY: 183 return scm_legacy_call(dev, desc, res); 184 default: 185 pr_err("Unknown current SCM calling convention.\n"); 186 return -EINVAL; 187 } 188 } 189 190 /** 191 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 192 * @dev: device 193 * @desc: Descriptor structure containing arguments and return values 194 * @res: Structure containing results from SMC/HVC call 195 * 196 * Sends a command to the SCM and waits for the command to finish processing. 197 * This can be called in atomic context. 198 */ 199 static int qcom_scm_call_atomic(struct device *dev, 200 const struct qcom_scm_desc *desc, 201 struct qcom_scm_res *res) 202 { 203 switch (__get_convention()) { 204 case SMC_CONVENTION_ARM_32: 205 case SMC_CONVENTION_ARM_64: 206 return scm_smc_call(dev, desc, res, true); 207 case SMC_CONVENTION_LEGACY: 208 return scm_legacy_call_atomic(dev, desc, res); 209 default: 210 pr_err("Unknown current SCM calling convention.\n"); 211 return -EINVAL; 212 } 213 } 214 215 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 216 u32 cmd_id) 217 { 218 int ret; 219 struct qcom_scm_desc desc = { 220 .svc = QCOM_SCM_SVC_INFO, 221 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 222 .owner = ARM_SMCCC_OWNER_SIP, 223 }; 224 struct qcom_scm_res res; 225 226 desc.arginfo = QCOM_SCM_ARGS(1); 227 switch (__get_convention()) { 228 case SMC_CONVENTION_ARM_32: 229 case SMC_CONVENTION_ARM_64: 230 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 231 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 232 break; 233 case SMC_CONVENTION_LEGACY: 234 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 235 break; 236 default: 237 pr_err("Unknown SMC convention being used\n"); 238 return false; 239 } 240 241 ret = qcom_scm_call(dev, &desc, &res); 242 243 return ret ? false : !!res.result[0]; 244 } 245 246 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 247 { 248 int cpu; 249 unsigned int flags = 0; 250 struct qcom_scm_desc desc = { 251 .svc = QCOM_SCM_SVC_BOOT, 252 .cmd = QCOM_SCM_BOOT_SET_ADDR, 253 .arginfo = QCOM_SCM_ARGS(2), 254 .owner = ARM_SMCCC_OWNER_SIP, 255 }; 256 257 for_each_present_cpu(cpu) { 258 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 259 return -EINVAL; 260 flags |= cpu_bits[cpu]; 261 } 262 263 desc.args[0] = flags; 264 desc.args[1] = virt_to_phys(entry); 265 266 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 267 } 268 269 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 270 { 271 struct qcom_scm_desc desc = { 272 .svc = QCOM_SCM_SVC_BOOT, 273 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 274 .owner = ARM_SMCCC_OWNER_SIP, 275 .arginfo = QCOM_SCM_ARGS(6), 276 .args = { 277 virt_to_phys(entry), 278 /* Apply to all CPUs in all affinity levels */ 279 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 280 flags, 281 }, 282 }; 283 284 /* Need a device for DMA of the additional arguments */ 285 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 286 return -EOPNOTSUPP; 287 288 return qcom_scm_call(__scm->dev, &desc, NULL); 289 } 290 291 /** 292 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 293 * @entry: Entry point function for the cpus 294 * 295 * Set the Linux entry point for the SCM to transfer control to when coming 296 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 297 */ 298 int qcom_scm_set_warm_boot_addr(void *entry) 299 { 300 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 301 /* Fallback to old SCM call */ 302 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 303 return 0; 304 } 305 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 306 307 /** 308 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 309 * @entry: Entry point function for the cpus 310 */ 311 int qcom_scm_set_cold_boot_addr(void *entry) 312 { 313 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 314 /* Fallback to old SCM call */ 315 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 316 return 0; 317 } 318 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 319 320 /** 321 * qcom_scm_cpu_power_down() - Power down the cpu 322 * @flags: Flags to flush cache 323 * 324 * This is an end point to power down cpu. If there was a pending interrupt, 325 * the control would return from this function, otherwise, the cpu jumps to the 326 * warm boot entry point set for this cpu upon reset. 327 */ 328 void qcom_scm_cpu_power_down(u32 flags) 329 { 330 struct qcom_scm_desc desc = { 331 .svc = QCOM_SCM_SVC_BOOT, 332 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 333 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 334 .arginfo = QCOM_SCM_ARGS(1), 335 .owner = ARM_SMCCC_OWNER_SIP, 336 }; 337 338 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 339 } 340 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 341 342 int qcom_scm_set_remote_state(u32 state, u32 id) 343 { 344 struct qcom_scm_desc desc = { 345 .svc = QCOM_SCM_SVC_BOOT, 346 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 347 .arginfo = QCOM_SCM_ARGS(2), 348 .args[0] = state, 349 .args[1] = id, 350 .owner = ARM_SMCCC_OWNER_SIP, 351 }; 352 struct qcom_scm_res res; 353 int ret; 354 355 ret = qcom_scm_call(__scm->dev, &desc, &res); 356 357 return ret ? : res.result[0]; 358 } 359 EXPORT_SYMBOL(qcom_scm_set_remote_state); 360 361 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 362 { 363 struct qcom_scm_desc desc = { 364 .svc = QCOM_SCM_SVC_BOOT, 365 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 366 .arginfo = QCOM_SCM_ARGS(2), 367 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 368 .owner = ARM_SMCCC_OWNER_SIP, 369 }; 370 371 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 372 373 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 374 } 375 376 static void qcom_scm_set_download_mode(bool enable) 377 { 378 bool avail; 379 int ret = 0; 380 381 avail = __qcom_scm_is_call_available(__scm->dev, 382 QCOM_SCM_SVC_BOOT, 383 QCOM_SCM_BOOT_SET_DLOAD_MODE); 384 if (avail) { 385 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 386 } else if (__scm->dload_mode_addr) { 387 ret = qcom_scm_io_writel(__scm->dload_mode_addr, 388 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); 389 } else { 390 dev_err(__scm->dev, 391 "No available mechanism for setting download mode\n"); 392 } 393 394 if (ret) 395 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 396 } 397 398 /** 399 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 400 * state machine for a given peripheral, using the 401 * metadata 402 * @peripheral: peripheral id 403 * @metadata: pointer to memory containing ELF header, program header table 404 * and optional blob of data used for authenticating the metadata 405 * and the rest of the firmware 406 * @size: size of the metadata 407 * @ctx: optional metadata context 408 * 409 * Return: 0 on success. 410 * 411 * Upon successful return, the PAS metadata context (@ctx) will be used to 412 * track the metadata allocation, this needs to be released by invoking 413 * qcom_scm_pas_metadata_release() by the caller. 414 */ 415 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, 416 struct qcom_scm_pas_metadata *ctx) 417 { 418 dma_addr_t mdata_phys; 419 void *mdata_buf; 420 int ret; 421 struct qcom_scm_desc desc = { 422 .svc = QCOM_SCM_SVC_PIL, 423 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 424 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 425 .args[0] = peripheral, 426 .owner = ARM_SMCCC_OWNER_SIP, 427 }; 428 struct qcom_scm_res res; 429 430 /* 431 * During the scm call memory protection will be enabled for the meta 432 * data blob, so make sure it's physically contiguous, 4K aligned and 433 * non-cachable to avoid XPU violations. 434 */ 435 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 436 GFP_KERNEL); 437 if (!mdata_buf) { 438 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 439 return -ENOMEM; 440 } 441 memcpy(mdata_buf, metadata, size); 442 443 ret = qcom_scm_clk_enable(); 444 if (ret) 445 goto out; 446 447 desc.args[1] = mdata_phys; 448 449 ret = qcom_scm_call(__scm->dev, &desc, &res); 450 451 qcom_scm_clk_disable(); 452 453 out: 454 if (ret < 0 || !ctx) { 455 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 456 } else if (ctx) { 457 ctx->ptr = mdata_buf; 458 ctx->phys = mdata_phys; 459 ctx->size = size; 460 } 461 462 return ret ? : res.result[0]; 463 } 464 EXPORT_SYMBOL(qcom_scm_pas_init_image); 465 466 /** 467 * qcom_scm_pas_metadata_release() - release metadata context 468 * @ctx: metadata context 469 */ 470 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) 471 { 472 if (!ctx->ptr) 473 return; 474 475 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 476 477 ctx->ptr = NULL; 478 ctx->phys = 0; 479 ctx->size = 0; 480 } 481 EXPORT_SYMBOL(qcom_scm_pas_metadata_release); 482 483 /** 484 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 485 * for firmware loading 486 * @peripheral: peripheral id 487 * @addr: start address of memory area to prepare 488 * @size: size of the memory area to prepare 489 * 490 * Returns 0 on success. 491 */ 492 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 493 { 494 int ret; 495 struct qcom_scm_desc desc = { 496 .svc = QCOM_SCM_SVC_PIL, 497 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 498 .arginfo = QCOM_SCM_ARGS(3), 499 .args[0] = peripheral, 500 .args[1] = addr, 501 .args[2] = size, 502 .owner = ARM_SMCCC_OWNER_SIP, 503 }; 504 struct qcom_scm_res res; 505 506 ret = qcom_scm_clk_enable(); 507 if (ret) 508 return ret; 509 510 ret = qcom_scm_call(__scm->dev, &desc, &res); 511 qcom_scm_clk_disable(); 512 513 return ret ? : res.result[0]; 514 } 515 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 516 517 /** 518 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 519 * and reset the remote processor 520 * @peripheral: peripheral id 521 * 522 * Return 0 on success. 523 */ 524 int qcom_scm_pas_auth_and_reset(u32 peripheral) 525 { 526 int ret; 527 struct qcom_scm_desc desc = { 528 .svc = QCOM_SCM_SVC_PIL, 529 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 530 .arginfo = QCOM_SCM_ARGS(1), 531 .args[0] = peripheral, 532 .owner = ARM_SMCCC_OWNER_SIP, 533 }; 534 struct qcom_scm_res res; 535 536 ret = qcom_scm_clk_enable(); 537 if (ret) 538 return ret; 539 540 ret = qcom_scm_call(__scm->dev, &desc, &res); 541 qcom_scm_clk_disable(); 542 543 return ret ? : res.result[0]; 544 } 545 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 546 547 /** 548 * qcom_scm_pas_shutdown() - Shut down the remote processor 549 * @peripheral: peripheral id 550 * 551 * Returns 0 on success. 552 */ 553 int qcom_scm_pas_shutdown(u32 peripheral) 554 { 555 int ret; 556 struct qcom_scm_desc desc = { 557 .svc = QCOM_SCM_SVC_PIL, 558 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 559 .arginfo = QCOM_SCM_ARGS(1), 560 .args[0] = peripheral, 561 .owner = ARM_SMCCC_OWNER_SIP, 562 }; 563 struct qcom_scm_res res; 564 565 ret = qcom_scm_clk_enable(); 566 if (ret) 567 return ret; 568 569 ret = qcom_scm_call(__scm->dev, &desc, &res); 570 571 qcom_scm_clk_disable(); 572 573 return ret ? : res.result[0]; 574 } 575 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 576 577 /** 578 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 579 * available for the given peripherial 580 * @peripheral: peripheral id 581 * 582 * Returns true if PAS is supported for this peripheral, otherwise false. 583 */ 584 bool qcom_scm_pas_supported(u32 peripheral) 585 { 586 int ret; 587 struct qcom_scm_desc desc = { 588 .svc = QCOM_SCM_SVC_PIL, 589 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 590 .arginfo = QCOM_SCM_ARGS(1), 591 .args[0] = peripheral, 592 .owner = ARM_SMCCC_OWNER_SIP, 593 }; 594 struct qcom_scm_res res; 595 596 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 597 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 598 return false; 599 600 ret = qcom_scm_call(__scm->dev, &desc, &res); 601 602 return ret ? false : !!res.result[0]; 603 } 604 EXPORT_SYMBOL(qcom_scm_pas_supported); 605 606 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 607 { 608 struct qcom_scm_desc desc = { 609 .svc = QCOM_SCM_SVC_PIL, 610 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 611 .arginfo = QCOM_SCM_ARGS(2), 612 .args[0] = reset, 613 .args[1] = 0, 614 .owner = ARM_SMCCC_OWNER_SIP, 615 }; 616 struct qcom_scm_res res; 617 int ret; 618 619 ret = qcom_scm_call(__scm->dev, &desc, &res); 620 621 return ret ? : res.result[0]; 622 } 623 624 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 625 unsigned long idx) 626 { 627 if (idx != 0) 628 return -EINVAL; 629 630 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 631 } 632 633 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 634 unsigned long idx) 635 { 636 if (idx != 0) 637 return -EINVAL; 638 639 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 640 } 641 642 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 643 .assert = qcom_scm_pas_reset_assert, 644 .deassert = qcom_scm_pas_reset_deassert, 645 }; 646 647 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 648 { 649 struct qcom_scm_desc desc = { 650 .svc = QCOM_SCM_SVC_IO, 651 .cmd = QCOM_SCM_IO_READ, 652 .arginfo = QCOM_SCM_ARGS(1), 653 .args[0] = addr, 654 .owner = ARM_SMCCC_OWNER_SIP, 655 }; 656 struct qcom_scm_res res; 657 int ret; 658 659 660 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 661 if (ret >= 0) 662 *val = res.result[0]; 663 664 return ret < 0 ? ret : 0; 665 } 666 EXPORT_SYMBOL(qcom_scm_io_readl); 667 668 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 669 { 670 struct qcom_scm_desc desc = { 671 .svc = QCOM_SCM_SVC_IO, 672 .cmd = QCOM_SCM_IO_WRITE, 673 .arginfo = QCOM_SCM_ARGS(2), 674 .args[0] = addr, 675 .args[1] = val, 676 .owner = ARM_SMCCC_OWNER_SIP, 677 }; 678 679 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 680 } 681 EXPORT_SYMBOL(qcom_scm_io_writel); 682 683 /** 684 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 685 * supports restore security config interface. 686 * 687 * Return true if restore-cfg interface is supported, false if not. 688 */ 689 bool qcom_scm_restore_sec_cfg_available(void) 690 { 691 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 692 QCOM_SCM_MP_RESTORE_SEC_CFG); 693 } 694 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available); 695 696 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 697 { 698 struct qcom_scm_desc desc = { 699 .svc = QCOM_SCM_SVC_MP, 700 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 701 .arginfo = QCOM_SCM_ARGS(2), 702 .args[0] = device_id, 703 .args[1] = spare, 704 .owner = ARM_SMCCC_OWNER_SIP, 705 }; 706 struct qcom_scm_res res; 707 int ret; 708 709 ret = qcom_scm_call(__scm->dev, &desc, &res); 710 711 return ret ? : res.result[0]; 712 } 713 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg); 714 715 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 716 { 717 struct qcom_scm_desc desc = { 718 .svc = QCOM_SCM_SVC_MP, 719 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 720 .arginfo = QCOM_SCM_ARGS(1), 721 .args[0] = spare, 722 .owner = ARM_SMCCC_OWNER_SIP, 723 }; 724 struct qcom_scm_res res; 725 int ret; 726 727 ret = qcom_scm_call(__scm->dev, &desc, &res); 728 729 if (size) 730 *size = res.result[0]; 731 732 return ret ? : res.result[1]; 733 } 734 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size); 735 736 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 737 { 738 struct qcom_scm_desc desc = { 739 .svc = QCOM_SCM_SVC_MP, 740 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 741 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 742 QCOM_SCM_VAL), 743 .args[0] = addr, 744 .args[1] = size, 745 .args[2] = spare, 746 .owner = ARM_SMCCC_OWNER_SIP, 747 }; 748 int ret; 749 750 ret = qcom_scm_call(__scm->dev, &desc, NULL); 751 752 /* the pg table has been initialized already, ignore the error */ 753 if (ret == -EPERM) 754 ret = 0; 755 756 return ret; 757 } 758 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init); 759 760 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 761 { 762 struct qcom_scm_desc desc = { 763 .svc = QCOM_SCM_SVC_MP, 764 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 765 .arginfo = QCOM_SCM_ARGS(2), 766 .args[0] = size, 767 .args[1] = spare, 768 .owner = ARM_SMCCC_OWNER_SIP, 769 }; 770 771 return qcom_scm_call(__scm->dev, &desc, NULL); 772 } 773 EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size); 774 775 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 776 u32 cp_nonpixel_start, 777 u32 cp_nonpixel_size) 778 { 779 int ret; 780 struct qcom_scm_desc desc = { 781 .svc = QCOM_SCM_SVC_MP, 782 .cmd = QCOM_SCM_MP_VIDEO_VAR, 783 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 784 QCOM_SCM_VAL, QCOM_SCM_VAL), 785 .args[0] = cp_start, 786 .args[1] = cp_size, 787 .args[2] = cp_nonpixel_start, 788 .args[3] = cp_nonpixel_size, 789 .owner = ARM_SMCCC_OWNER_SIP, 790 }; 791 struct qcom_scm_res res; 792 793 ret = qcom_scm_call(__scm->dev, &desc, &res); 794 795 return ret ? : res.result[0]; 796 } 797 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var); 798 799 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 800 size_t mem_sz, phys_addr_t src, size_t src_sz, 801 phys_addr_t dest, size_t dest_sz) 802 { 803 int ret; 804 struct qcom_scm_desc desc = { 805 .svc = QCOM_SCM_SVC_MP, 806 .cmd = QCOM_SCM_MP_ASSIGN, 807 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 808 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 809 QCOM_SCM_VAL, QCOM_SCM_VAL), 810 .args[0] = mem_region, 811 .args[1] = mem_sz, 812 .args[2] = src, 813 .args[3] = src_sz, 814 .args[4] = dest, 815 .args[5] = dest_sz, 816 .args[6] = 0, 817 .owner = ARM_SMCCC_OWNER_SIP, 818 }; 819 struct qcom_scm_res res; 820 821 ret = qcom_scm_call(dev, &desc, &res); 822 823 return ret ? : res.result[0]; 824 } 825 826 /** 827 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 828 * @mem_addr: mem region whose ownership need to be reassigned 829 * @mem_sz: size of the region. 830 * @srcvm: vmid for current set of owners, each set bit in 831 * flag indicate a unique owner 832 * @newvm: array having new owners and corresponding permission 833 * flags 834 * @dest_cnt: number of owners in next set. 835 * 836 * Return negative errno on failure or 0 on success with @srcvm updated. 837 */ 838 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 839 unsigned int *srcvm, 840 const struct qcom_scm_vmperm *newvm, 841 unsigned int dest_cnt) 842 { 843 struct qcom_scm_current_perm_info *destvm; 844 struct qcom_scm_mem_map_info *mem_to_map; 845 phys_addr_t mem_to_map_phys; 846 phys_addr_t dest_phys; 847 dma_addr_t ptr_phys; 848 size_t mem_to_map_sz; 849 size_t dest_sz; 850 size_t src_sz; 851 size_t ptr_sz; 852 int next_vm; 853 __le32 *src; 854 void *ptr; 855 int ret, i, b; 856 unsigned long srcvm_bits = *srcvm; 857 858 src_sz = hweight_long(srcvm_bits) * sizeof(*src); 859 mem_to_map_sz = sizeof(*mem_to_map); 860 dest_sz = dest_cnt * sizeof(*destvm); 861 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 862 ALIGN(dest_sz, SZ_64); 863 864 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 865 if (!ptr) 866 return -ENOMEM; 867 868 /* Fill source vmid detail */ 869 src = ptr; 870 i = 0; 871 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) 872 src[i++] = cpu_to_le32(b); 873 874 /* Fill details of mem buff to map */ 875 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 876 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 877 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 878 mem_to_map->mem_size = cpu_to_le64(mem_sz); 879 880 next_vm = 0; 881 /* Fill details of next vmid detail */ 882 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 883 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 884 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 885 destvm->vmid = cpu_to_le32(newvm->vmid); 886 destvm->perm = cpu_to_le32(newvm->perm); 887 destvm->ctx = 0; 888 destvm->ctx_size = 0; 889 next_vm |= BIT(newvm->vmid); 890 } 891 892 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 893 ptr_phys, src_sz, dest_phys, dest_sz); 894 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 895 if (ret) { 896 dev_err(__scm->dev, 897 "Assign memory protection call failed %d\n", ret); 898 return -EINVAL; 899 } 900 901 *srcvm = next_vm; 902 return 0; 903 } 904 EXPORT_SYMBOL(qcom_scm_assign_mem); 905 906 /** 907 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 908 */ 909 bool qcom_scm_ocmem_lock_available(void) 910 { 911 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 912 QCOM_SCM_OCMEM_LOCK_CMD); 913 } 914 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available); 915 916 /** 917 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 918 * region to the specified initiator 919 * 920 * @id: tz initiator id 921 * @offset: OCMEM offset 922 * @size: OCMEM size 923 * @mode: access mode (WIDE/NARROW) 924 */ 925 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 926 u32 mode) 927 { 928 struct qcom_scm_desc desc = { 929 .svc = QCOM_SCM_SVC_OCMEM, 930 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 931 .args[0] = id, 932 .args[1] = offset, 933 .args[2] = size, 934 .args[3] = mode, 935 .arginfo = QCOM_SCM_ARGS(4), 936 }; 937 938 return qcom_scm_call(__scm->dev, &desc, NULL); 939 } 940 EXPORT_SYMBOL(qcom_scm_ocmem_lock); 941 942 /** 943 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 944 * region from the specified initiator 945 * 946 * @id: tz initiator id 947 * @offset: OCMEM offset 948 * @size: OCMEM size 949 */ 950 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 951 { 952 struct qcom_scm_desc desc = { 953 .svc = QCOM_SCM_SVC_OCMEM, 954 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 955 .args[0] = id, 956 .args[1] = offset, 957 .args[2] = size, 958 .arginfo = QCOM_SCM_ARGS(3), 959 }; 960 961 return qcom_scm_call(__scm->dev, &desc, NULL); 962 } 963 EXPORT_SYMBOL(qcom_scm_ocmem_unlock); 964 965 /** 966 * qcom_scm_ice_available() - Is the ICE key programming interface available? 967 * 968 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 969 * qcom_scm_ice_set_key() are available. 970 */ 971 bool qcom_scm_ice_available(void) 972 { 973 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 974 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 975 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 976 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 977 } 978 EXPORT_SYMBOL(qcom_scm_ice_available); 979 980 /** 981 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 982 * @index: the keyslot to invalidate 983 * 984 * The UFSHCI and eMMC standards define a standard way to do this, but it 985 * doesn't work on these SoCs; only this SCM call does. 986 * 987 * It is assumed that the SoC has only one ICE instance being used, as this SCM 988 * call doesn't specify which ICE instance the keyslot belongs to. 989 * 990 * Return: 0 on success; -errno on failure. 991 */ 992 int qcom_scm_ice_invalidate_key(u32 index) 993 { 994 struct qcom_scm_desc desc = { 995 .svc = QCOM_SCM_SVC_ES, 996 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 997 .arginfo = QCOM_SCM_ARGS(1), 998 .args[0] = index, 999 .owner = ARM_SMCCC_OWNER_SIP, 1000 }; 1001 1002 return qcom_scm_call(__scm->dev, &desc, NULL); 1003 } 1004 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key); 1005 1006 /** 1007 * qcom_scm_ice_set_key() - Set an inline encryption key 1008 * @index: the keyslot into which to set the key 1009 * @key: the key to program 1010 * @key_size: the size of the key in bytes 1011 * @cipher: the encryption algorithm the key is for 1012 * @data_unit_size: the encryption data unit size, i.e. the size of each 1013 * individual plaintext and ciphertext. Given in 512-byte 1014 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1015 * 1016 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1017 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1018 * 1019 * The UFSHCI and eMMC standards define a standard way to do this, but it 1020 * doesn't work on these SoCs; only this SCM call does. 1021 * 1022 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1023 * call doesn't specify which ICE instance the keyslot belongs to. 1024 * 1025 * Return: 0 on success; -errno on failure. 1026 */ 1027 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1028 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1029 { 1030 struct qcom_scm_desc desc = { 1031 .svc = QCOM_SCM_SVC_ES, 1032 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1033 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1034 QCOM_SCM_VAL, QCOM_SCM_VAL, 1035 QCOM_SCM_VAL), 1036 .args[0] = index, 1037 .args[2] = key_size, 1038 .args[3] = cipher, 1039 .args[4] = data_unit_size, 1040 .owner = ARM_SMCCC_OWNER_SIP, 1041 }; 1042 void *keybuf; 1043 dma_addr_t key_phys; 1044 int ret; 1045 1046 /* 1047 * 'key' may point to vmalloc()'ed memory, but we need to pass a 1048 * physical address that's been properly flushed. The sanctioned way to 1049 * do this is by using the DMA API. But as is best practice for crypto 1050 * keys, we also must wipe the key after use. This makes kmemdup() + 1051 * dma_map_single() not clearly correct, since the DMA API can use 1052 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming 1053 * keys is normally rare and thus not performance-critical. 1054 */ 1055 1056 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, 1057 GFP_KERNEL); 1058 if (!keybuf) 1059 return -ENOMEM; 1060 memcpy(keybuf, key, key_size); 1061 desc.args[1] = key_phys; 1062 1063 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1064 1065 memzero_explicit(keybuf, key_size); 1066 1067 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); 1068 return ret; 1069 } 1070 EXPORT_SYMBOL(qcom_scm_ice_set_key); 1071 1072 /** 1073 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1074 * 1075 * Return true if HDCP is supported, false if not. 1076 */ 1077 bool qcom_scm_hdcp_available(void) 1078 { 1079 bool avail; 1080 int ret = qcom_scm_clk_enable(); 1081 1082 if (ret) 1083 return ret; 1084 1085 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1086 QCOM_SCM_HDCP_INVOKE); 1087 1088 qcom_scm_clk_disable(); 1089 1090 return avail; 1091 } 1092 EXPORT_SYMBOL(qcom_scm_hdcp_available); 1093 1094 /** 1095 * qcom_scm_hdcp_req() - Send HDCP request. 1096 * @req: HDCP request array 1097 * @req_cnt: HDCP request array count 1098 * @resp: response buffer passed to SCM 1099 * 1100 * Write HDCP register(s) through SCM. 1101 */ 1102 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1103 { 1104 int ret; 1105 struct qcom_scm_desc desc = { 1106 .svc = QCOM_SCM_SVC_HDCP, 1107 .cmd = QCOM_SCM_HDCP_INVOKE, 1108 .arginfo = QCOM_SCM_ARGS(10), 1109 .args = { 1110 req[0].addr, 1111 req[0].val, 1112 req[1].addr, 1113 req[1].val, 1114 req[2].addr, 1115 req[2].val, 1116 req[3].addr, 1117 req[3].val, 1118 req[4].addr, 1119 req[4].val 1120 }, 1121 .owner = ARM_SMCCC_OWNER_SIP, 1122 }; 1123 struct qcom_scm_res res; 1124 1125 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1126 return -ERANGE; 1127 1128 ret = qcom_scm_clk_enable(); 1129 if (ret) 1130 return ret; 1131 1132 ret = qcom_scm_call(__scm->dev, &desc, &res); 1133 *resp = res.result[0]; 1134 1135 qcom_scm_clk_disable(); 1136 1137 return ret; 1138 } 1139 EXPORT_SYMBOL(qcom_scm_hdcp_req); 1140 1141 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1142 { 1143 struct qcom_scm_desc desc = { 1144 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1145 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1146 .arginfo = QCOM_SCM_ARGS(3), 1147 .args[0] = sec_id, 1148 .args[1] = ctx_num, 1149 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1150 .owner = ARM_SMCCC_OWNER_SIP, 1151 }; 1152 1153 return qcom_scm_call(__scm->dev, &desc, NULL); 1154 } 1155 EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format); 1156 1157 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1158 { 1159 struct qcom_scm_desc desc = { 1160 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1161 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1162 .arginfo = QCOM_SCM_ARGS(2), 1163 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1164 .args[1] = en, 1165 .owner = ARM_SMCCC_OWNER_SIP, 1166 }; 1167 1168 1169 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1170 } 1171 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle); 1172 1173 bool qcom_scm_lmh_dcvsh_available(void) 1174 { 1175 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1176 } 1177 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available); 1178 1179 int qcom_scm_lmh_profile_change(u32 profile_id) 1180 { 1181 struct qcom_scm_desc desc = { 1182 .svc = QCOM_SCM_SVC_LMH, 1183 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1184 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1185 .args[0] = profile_id, 1186 .owner = ARM_SMCCC_OWNER_SIP, 1187 }; 1188 1189 return qcom_scm_call(__scm->dev, &desc, NULL); 1190 } 1191 EXPORT_SYMBOL(qcom_scm_lmh_profile_change); 1192 1193 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1194 u64 limit_node, u32 node_id, u64 version) 1195 { 1196 dma_addr_t payload_phys; 1197 u32 *payload_buf; 1198 int ret, payload_size = 5 * sizeof(u32); 1199 1200 struct qcom_scm_desc desc = { 1201 .svc = QCOM_SCM_SVC_LMH, 1202 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1203 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1204 QCOM_SCM_VAL, QCOM_SCM_VAL), 1205 .args[1] = payload_size, 1206 .args[2] = limit_node, 1207 .args[3] = node_id, 1208 .args[4] = version, 1209 .owner = ARM_SMCCC_OWNER_SIP, 1210 }; 1211 1212 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); 1213 if (!payload_buf) 1214 return -ENOMEM; 1215 1216 payload_buf[0] = payload_fn; 1217 payload_buf[1] = 0; 1218 payload_buf[2] = payload_reg; 1219 payload_buf[3] = 1; 1220 payload_buf[4] = payload_val; 1221 1222 desc.args[0] = payload_phys; 1223 1224 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1225 1226 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); 1227 return ret; 1228 } 1229 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh); 1230 1231 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1232 { 1233 struct device_node *tcsr; 1234 struct device_node *np = dev->of_node; 1235 struct resource res; 1236 u32 offset; 1237 int ret; 1238 1239 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1240 if (!tcsr) 1241 return 0; 1242 1243 ret = of_address_to_resource(tcsr, 0, &res); 1244 of_node_put(tcsr); 1245 if (ret) 1246 return ret; 1247 1248 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1249 if (ret < 0) 1250 return ret; 1251 1252 *addr = res.start + offset; 1253 1254 return 0; 1255 } 1256 1257 /** 1258 * qcom_scm_is_available() - Checks if SCM is available 1259 */ 1260 bool qcom_scm_is_available(void) 1261 { 1262 return !!__scm; 1263 } 1264 EXPORT_SYMBOL(qcom_scm_is_available); 1265 1266 static int qcom_scm_probe(struct platform_device *pdev) 1267 { 1268 struct qcom_scm *scm; 1269 unsigned long clks; 1270 int ret; 1271 1272 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1273 if (!scm) 1274 return -ENOMEM; 1275 1276 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1277 if (ret < 0) 1278 return ret; 1279 1280 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 1281 1282 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 1283 if (IS_ERR(scm->core_clk)) { 1284 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER) 1285 return PTR_ERR(scm->core_clk); 1286 1287 if (clks & SCM_HAS_CORE_CLK) { 1288 dev_err(&pdev->dev, "failed to acquire core clk\n"); 1289 return PTR_ERR(scm->core_clk); 1290 } 1291 1292 scm->core_clk = NULL; 1293 } 1294 1295 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 1296 if (IS_ERR(scm->iface_clk)) { 1297 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER) 1298 return PTR_ERR(scm->iface_clk); 1299 1300 if (clks & SCM_HAS_IFACE_CLK) { 1301 dev_err(&pdev->dev, "failed to acquire iface clk\n"); 1302 return PTR_ERR(scm->iface_clk); 1303 } 1304 1305 scm->iface_clk = NULL; 1306 } 1307 1308 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 1309 if (IS_ERR(scm->bus_clk)) { 1310 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER) 1311 return PTR_ERR(scm->bus_clk); 1312 1313 if (clks & SCM_HAS_BUS_CLK) { 1314 dev_err(&pdev->dev, "failed to acquire bus clk\n"); 1315 return PTR_ERR(scm->bus_clk); 1316 } 1317 1318 scm->bus_clk = NULL; 1319 } 1320 1321 scm->reset.ops = &qcom_scm_pas_reset_ops; 1322 scm->reset.nr_resets = 1; 1323 scm->reset.of_node = pdev->dev.of_node; 1324 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1325 if (ret) 1326 return ret; 1327 1328 /* vote for max clk rate for highest performance */ 1329 ret = clk_set_rate(scm->core_clk, INT_MAX); 1330 if (ret) 1331 return ret; 1332 1333 __scm = scm; 1334 __scm->dev = &pdev->dev; 1335 1336 __get_convention(); 1337 1338 /* 1339 * If requested enable "download mode", from this point on warmboot 1340 * will cause the the boot stages to enter download mode, unless 1341 * disabled below by a clean shutdown/reboot. 1342 */ 1343 if (download_mode) 1344 qcom_scm_set_download_mode(true); 1345 1346 return 0; 1347 } 1348 1349 static void qcom_scm_shutdown(struct platform_device *pdev) 1350 { 1351 /* Clean shutdown, disable download mode to allow normal restart */ 1352 if (download_mode) 1353 qcom_scm_set_download_mode(false); 1354 } 1355 1356 static const struct of_device_id qcom_scm_dt_match[] = { 1357 { .compatible = "qcom,scm-apq8064", 1358 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */ 1359 }, 1360 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK | 1361 SCM_HAS_IFACE_CLK | 1362 SCM_HAS_BUS_CLK) 1363 }, 1364 { .compatible = "qcom,scm-ipq4019" }, 1365 { .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK | 1366 SCM_HAS_IFACE_CLK | 1367 SCM_HAS_BUS_CLK) }, 1368 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK }, 1369 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK }, 1370 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK | 1371 SCM_HAS_IFACE_CLK | 1372 SCM_HAS_BUS_CLK) 1373 }, 1374 { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK | 1375 SCM_HAS_IFACE_CLK | 1376 SCM_HAS_BUS_CLK) 1377 }, 1378 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK | 1379 SCM_HAS_IFACE_CLK | 1380 SCM_HAS_BUS_CLK) 1381 }, 1382 { .compatible = "qcom,scm-msm8994" }, 1383 { .compatible = "qcom,scm-msm8996" }, 1384 { .compatible = "qcom,scm" }, 1385 {} 1386 }; 1387 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 1388 1389 static struct platform_driver qcom_scm_driver = { 1390 .driver = { 1391 .name = "qcom_scm", 1392 .of_match_table = qcom_scm_dt_match, 1393 .suppress_bind_attrs = true, 1394 }, 1395 .probe = qcom_scm_probe, 1396 .shutdown = qcom_scm_shutdown, 1397 }; 1398 1399 static int __init qcom_scm_init(void) 1400 { 1401 return platform_driver_register(&qcom_scm_driver); 1402 } 1403 subsys_initcall(qcom_scm_init); 1404 1405 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 1406 MODULE_LICENSE("GPL v2"); 1407