1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. 3 * Copyright (C) 2015 Linaro Ltd. 4 */ 5 #include <linux/platform_device.h> 6 #include <linux/init.h> 7 #include <linux/interrupt.h> 8 #include <linux/completion.h> 9 #include <linux/cpumask.h> 10 #include <linux/export.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/interconnect.h> 13 #include <linux/module.h> 14 #include <linux/types.h> 15 #include <linux/firmware/qcom/qcom_scm.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/of_platform.h> 20 #include <linux/clk.h> 21 #include <linux/reset-controller.h> 22 #include <linux/arm-smccc.h> 23 24 #include "qcom_scm.h" 25 26 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT); 27 module_param(download_mode, bool, 0); 28 29 struct qcom_scm { 30 struct device *dev; 31 struct clk *core_clk; 32 struct clk *iface_clk; 33 struct clk *bus_clk; 34 struct icc_path *path; 35 struct completion waitq_comp; 36 struct reset_controller_dev reset; 37 38 /* control access to the interconnect path */ 39 struct mutex scm_bw_lock; 40 int scm_vote_count; 41 42 u64 dload_mode_addr; 43 }; 44 45 struct qcom_scm_current_perm_info { 46 __le32 vmid; 47 __le32 perm; 48 __le64 ctx; 49 __le32 ctx_size; 50 __le32 unused; 51 }; 52 53 struct qcom_scm_mem_map_info { 54 __le64 mem_addr; 55 __le64 mem_size; 56 }; 57 58 /* Each bit configures cold/warm boot address for one of the 4 CPUs */ 59 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 60 0, BIT(0), BIT(3), BIT(5) 61 }; 62 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = { 63 BIT(2), BIT(1), BIT(4), BIT(6) 64 }; 65 66 #define QCOM_SMC_WAITQ_FLAG_WAKE_ONE BIT(0) 67 #define QCOM_SMC_WAITQ_FLAG_WAKE_ALL BIT(1) 68 69 static const char * const qcom_scm_convention_names[] = { 70 [SMC_CONVENTION_UNKNOWN] = "unknown", 71 [SMC_CONVENTION_ARM_32] = "smc arm 32", 72 [SMC_CONVENTION_ARM_64] = "smc arm 64", 73 [SMC_CONVENTION_LEGACY] = "smc legacy", 74 }; 75 76 static struct qcom_scm *__scm; 77 78 static int qcom_scm_clk_enable(void) 79 { 80 int ret; 81 82 ret = clk_prepare_enable(__scm->core_clk); 83 if (ret) 84 goto bail; 85 86 ret = clk_prepare_enable(__scm->iface_clk); 87 if (ret) 88 goto disable_core; 89 90 ret = clk_prepare_enable(__scm->bus_clk); 91 if (ret) 92 goto disable_iface; 93 94 return 0; 95 96 disable_iface: 97 clk_disable_unprepare(__scm->iface_clk); 98 disable_core: 99 clk_disable_unprepare(__scm->core_clk); 100 bail: 101 return ret; 102 } 103 104 static void qcom_scm_clk_disable(void) 105 { 106 clk_disable_unprepare(__scm->core_clk); 107 clk_disable_unprepare(__scm->iface_clk); 108 clk_disable_unprepare(__scm->bus_clk); 109 } 110 111 static int qcom_scm_bw_enable(void) 112 { 113 int ret = 0; 114 115 if (!__scm->path) 116 return 0; 117 118 if (IS_ERR(__scm->path)) 119 return -EINVAL; 120 121 mutex_lock(&__scm->scm_bw_lock); 122 if (!__scm->scm_vote_count) { 123 ret = icc_set_bw(__scm->path, 0, UINT_MAX); 124 if (ret < 0) { 125 dev_err(__scm->dev, "failed to set bandwidth request\n"); 126 goto err_bw; 127 } 128 } 129 __scm->scm_vote_count++; 130 err_bw: 131 mutex_unlock(&__scm->scm_bw_lock); 132 133 return ret; 134 } 135 136 static void qcom_scm_bw_disable(void) 137 { 138 if (IS_ERR_OR_NULL(__scm->path)) 139 return; 140 141 mutex_lock(&__scm->scm_bw_lock); 142 if (__scm->scm_vote_count-- == 1) 143 icc_set_bw(__scm->path, 0, 0); 144 mutex_unlock(&__scm->scm_bw_lock); 145 } 146 147 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN; 148 static DEFINE_SPINLOCK(scm_query_lock); 149 150 static enum qcom_scm_convention __get_convention(void) 151 { 152 unsigned long flags; 153 struct qcom_scm_desc desc = { 154 .svc = QCOM_SCM_SVC_INFO, 155 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 156 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO, 157 QCOM_SCM_INFO_IS_CALL_AVAIL) | 158 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT), 159 .arginfo = QCOM_SCM_ARGS(1), 160 .owner = ARM_SMCCC_OWNER_SIP, 161 }; 162 struct qcom_scm_res res; 163 enum qcom_scm_convention probed_convention; 164 int ret; 165 bool forced = false; 166 167 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN)) 168 return qcom_scm_convention; 169 170 /* 171 * Per the "SMC calling convention specification", the 64-bit calling 172 * convention can only be used when the client is 64-bit, otherwise 173 * system will encounter the undefined behaviour. 174 */ 175 #if IS_ENABLED(CONFIG_ARM64) 176 /* 177 * Device isn't required as there is only one argument - no device 178 * needed to dma_map_single to secure world 179 */ 180 probed_convention = SMC_CONVENTION_ARM_64; 181 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 182 if (!ret && res.result[0] == 1) 183 goto found; 184 185 /* 186 * Some SC7180 firmwares didn't implement the 187 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64 188 * calling conventions on these firmwares. Luckily we don't make any 189 * early calls into the firmware on these SoCs so the device pointer 190 * will be valid here to check if the compatible matches. 191 */ 192 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) { 193 forced = true; 194 goto found; 195 } 196 #endif 197 198 probed_convention = SMC_CONVENTION_ARM_32; 199 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true); 200 if (!ret && res.result[0] == 1) 201 goto found; 202 203 probed_convention = SMC_CONVENTION_LEGACY; 204 found: 205 spin_lock_irqsave(&scm_query_lock, flags); 206 if (probed_convention != qcom_scm_convention) { 207 qcom_scm_convention = probed_convention; 208 pr_info("qcom_scm: convention: %s%s\n", 209 qcom_scm_convention_names[qcom_scm_convention], 210 forced ? " (forced)" : ""); 211 } 212 spin_unlock_irqrestore(&scm_query_lock, flags); 213 214 return qcom_scm_convention; 215 } 216 217 /** 218 * qcom_scm_call() - Invoke a syscall in the secure world 219 * @dev: device 220 * @desc: Descriptor structure containing arguments and return values 221 * @res: Structure containing results from SMC/HVC call 222 * 223 * Sends a command to the SCM and waits for the command to finish processing. 224 * This should *only* be called in pre-emptible context. 225 */ 226 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, 227 struct qcom_scm_res *res) 228 { 229 might_sleep(); 230 switch (__get_convention()) { 231 case SMC_CONVENTION_ARM_32: 232 case SMC_CONVENTION_ARM_64: 233 return scm_smc_call(dev, desc, res, false); 234 case SMC_CONVENTION_LEGACY: 235 return scm_legacy_call(dev, desc, res); 236 default: 237 pr_err("Unknown current SCM calling convention.\n"); 238 return -EINVAL; 239 } 240 } 241 242 /** 243 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call() 244 * @dev: device 245 * @desc: Descriptor structure containing arguments and return values 246 * @res: Structure containing results from SMC/HVC call 247 * 248 * Sends a command to the SCM and waits for the command to finish processing. 249 * This can be called in atomic context. 250 */ 251 static int qcom_scm_call_atomic(struct device *dev, 252 const struct qcom_scm_desc *desc, 253 struct qcom_scm_res *res) 254 { 255 switch (__get_convention()) { 256 case SMC_CONVENTION_ARM_32: 257 case SMC_CONVENTION_ARM_64: 258 return scm_smc_call(dev, desc, res, true); 259 case SMC_CONVENTION_LEGACY: 260 return scm_legacy_call_atomic(dev, desc, res); 261 default: 262 pr_err("Unknown current SCM calling convention.\n"); 263 return -EINVAL; 264 } 265 } 266 267 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id, 268 u32 cmd_id) 269 { 270 int ret; 271 struct qcom_scm_desc desc = { 272 .svc = QCOM_SCM_SVC_INFO, 273 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, 274 .owner = ARM_SMCCC_OWNER_SIP, 275 }; 276 struct qcom_scm_res res; 277 278 desc.arginfo = QCOM_SCM_ARGS(1); 279 switch (__get_convention()) { 280 case SMC_CONVENTION_ARM_32: 281 case SMC_CONVENTION_ARM_64: 282 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) | 283 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT); 284 break; 285 case SMC_CONVENTION_LEGACY: 286 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id); 287 break; 288 default: 289 pr_err("Unknown SMC convention being used\n"); 290 return false; 291 } 292 293 ret = qcom_scm_call(dev, &desc, &res); 294 295 return ret ? false : !!res.result[0]; 296 } 297 298 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits) 299 { 300 int cpu; 301 unsigned int flags = 0; 302 struct qcom_scm_desc desc = { 303 .svc = QCOM_SCM_SVC_BOOT, 304 .cmd = QCOM_SCM_BOOT_SET_ADDR, 305 .arginfo = QCOM_SCM_ARGS(2), 306 .owner = ARM_SMCCC_OWNER_SIP, 307 }; 308 309 for_each_present_cpu(cpu) { 310 if (cpu >= QCOM_SCM_BOOT_MAX_CPUS) 311 return -EINVAL; 312 flags |= cpu_bits[cpu]; 313 } 314 315 desc.args[0] = flags; 316 desc.args[1] = virt_to_phys(entry); 317 318 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 319 } 320 321 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags) 322 { 323 struct qcom_scm_desc desc = { 324 .svc = QCOM_SCM_SVC_BOOT, 325 .cmd = QCOM_SCM_BOOT_SET_ADDR_MC, 326 .owner = ARM_SMCCC_OWNER_SIP, 327 .arginfo = QCOM_SCM_ARGS(6), 328 .args = { 329 virt_to_phys(entry), 330 /* Apply to all CPUs in all affinity levels */ 331 ~0ULL, ~0ULL, ~0ULL, ~0ULL, 332 flags, 333 }, 334 }; 335 336 /* Need a device for DMA of the additional arguments */ 337 if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY) 338 return -EOPNOTSUPP; 339 340 return qcom_scm_call(__scm->dev, &desc, NULL); 341 } 342 343 /** 344 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus 345 * @entry: Entry point function for the cpus 346 * 347 * Set the Linux entry point for the SCM to transfer control to when coming 348 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 349 */ 350 int qcom_scm_set_warm_boot_addr(void *entry) 351 { 352 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT)) 353 /* Fallback to old SCM call */ 354 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits); 355 return 0; 356 } 357 EXPORT_SYMBOL_GPL(qcom_scm_set_warm_boot_addr); 358 359 /** 360 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus 361 * @entry: Entry point function for the cpus 362 */ 363 int qcom_scm_set_cold_boot_addr(void *entry) 364 { 365 if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT)) 366 /* Fallback to old SCM call */ 367 return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits); 368 return 0; 369 } 370 EXPORT_SYMBOL_GPL(qcom_scm_set_cold_boot_addr); 371 372 /** 373 * qcom_scm_cpu_power_down() - Power down the cpu 374 * @flags: Flags to flush cache 375 * 376 * This is an end point to power down cpu. If there was a pending interrupt, 377 * the control would return from this function, otherwise, the cpu jumps to the 378 * warm boot entry point set for this cpu upon reset. 379 */ 380 void qcom_scm_cpu_power_down(u32 flags) 381 { 382 struct qcom_scm_desc desc = { 383 .svc = QCOM_SCM_SVC_BOOT, 384 .cmd = QCOM_SCM_BOOT_TERMINATE_PC, 385 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK, 386 .arginfo = QCOM_SCM_ARGS(1), 387 .owner = ARM_SMCCC_OWNER_SIP, 388 }; 389 390 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL); 391 } 392 EXPORT_SYMBOL_GPL(qcom_scm_cpu_power_down); 393 394 int qcom_scm_set_remote_state(u32 state, u32 id) 395 { 396 struct qcom_scm_desc desc = { 397 .svc = QCOM_SCM_SVC_BOOT, 398 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, 399 .arginfo = QCOM_SCM_ARGS(2), 400 .args[0] = state, 401 .args[1] = id, 402 .owner = ARM_SMCCC_OWNER_SIP, 403 }; 404 struct qcom_scm_res res; 405 int ret; 406 407 ret = qcom_scm_call(__scm->dev, &desc, &res); 408 409 return ret ? : res.result[0]; 410 } 411 EXPORT_SYMBOL_GPL(qcom_scm_set_remote_state); 412 413 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable) 414 { 415 struct qcom_scm_desc desc = { 416 .svc = QCOM_SCM_SVC_BOOT, 417 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE, 418 .arginfo = QCOM_SCM_ARGS(2), 419 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE, 420 .owner = ARM_SMCCC_OWNER_SIP, 421 }; 422 423 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0; 424 425 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 426 } 427 428 static void qcom_scm_set_download_mode(bool enable) 429 { 430 bool avail; 431 int ret = 0; 432 433 avail = __qcom_scm_is_call_available(__scm->dev, 434 QCOM_SCM_SVC_BOOT, 435 QCOM_SCM_BOOT_SET_DLOAD_MODE); 436 if (avail) { 437 ret = __qcom_scm_set_dload_mode(__scm->dev, enable); 438 } else if (__scm->dload_mode_addr) { 439 ret = qcom_scm_io_writel(__scm->dload_mode_addr, 440 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0); 441 } else { 442 dev_err(__scm->dev, 443 "No available mechanism for setting download mode\n"); 444 } 445 446 if (ret) 447 dev_err(__scm->dev, "failed to set download mode: %d\n", ret); 448 } 449 450 /** 451 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 452 * state machine for a given peripheral, using the 453 * metadata 454 * @peripheral: peripheral id 455 * @metadata: pointer to memory containing ELF header, program header table 456 * and optional blob of data used for authenticating the metadata 457 * and the rest of the firmware 458 * @size: size of the metadata 459 * @ctx: optional metadata context 460 * 461 * Return: 0 on success. 462 * 463 * Upon successful return, the PAS metadata context (@ctx) will be used to 464 * track the metadata allocation, this needs to be released by invoking 465 * qcom_scm_pas_metadata_release() by the caller. 466 */ 467 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size, 468 struct qcom_scm_pas_metadata *ctx) 469 { 470 dma_addr_t mdata_phys; 471 void *mdata_buf; 472 int ret; 473 struct qcom_scm_desc desc = { 474 .svc = QCOM_SCM_SVC_PIL, 475 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, 476 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW), 477 .args[0] = peripheral, 478 .owner = ARM_SMCCC_OWNER_SIP, 479 }; 480 struct qcom_scm_res res; 481 482 /* 483 * During the scm call memory protection will be enabled for the meta 484 * data blob, so make sure it's physically contiguous, 4K aligned and 485 * non-cachable to avoid XPU violations. 486 */ 487 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 488 GFP_KERNEL); 489 if (!mdata_buf) { 490 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 491 return -ENOMEM; 492 } 493 memcpy(mdata_buf, metadata, size); 494 495 ret = qcom_scm_clk_enable(); 496 if (ret) 497 goto out; 498 499 ret = qcom_scm_bw_enable(); 500 if (ret) 501 goto disable_clk; 502 503 desc.args[1] = mdata_phys; 504 505 ret = qcom_scm_call(__scm->dev, &desc, &res); 506 qcom_scm_bw_disable(); 507 508 disable_clk: 509 qcom_scm_clk_disable(); 510 511 out: 512 if (ret < 0 || !ctx) { 513 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 514 } else if (ctx) { 515 ctx->ptr = mdata_buf; 516 ctx->phys = mdata_phys; 517 ctx->size = size; 518 } 519 520 return ret ? : res.result[0]; 521 } 522 EXPORT_SYMBOL_GPL(qcom_scm_pas_init_image); 523 524 /** 525 * qcom_scm_pas_metadata_release() - release metadata context 526 * @ctx: metadata context 527 */ 528 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx) 529 { 530 if (!ctx->ptr) 531 return; 532 533 dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys); 534 535 ctx->ptr = NULL; 536 ctx->phys = 0; 537 ctx->size = 0; 538 } 539 EXPORT_SYMBOL_GPL(qcom_scm_pas_metadata_release); 540 541 /** 542 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 543 * for firmware loading 544 * @peripheral: peripheral id 545 * @addr: start address of memory area to prepare 546 * @size: size of the memory area to prepare 547 * 548 * Returns 0 on success. 549 */ 550 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 551 { 552 int ret; 553 struct qcom_scm_desc desc = { 554 .svc = QCOM_SCM_SVC_PIL, 555 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, 556 .arginfo = QCOM_SCM_ARGS(3), 557 .args[0] = peripheral, 558 .args[1] = addr, 559 .args[2] = size, 560 .owner = ARM_SMCCC_OWNER_SIP, 561 }; 562 struct qcom_scm_res res; 563 564 ret = qcom_scm_clk_enable(); 565 if (ret) 566 return ret; 567 568 ret = qcom_scm_bw_enable(); 569 if (ret) 570 goto disable_clk; 571 572 ret = qcom_scm_call(__scm->dev, &desc, &res); 573 qcom_scm_bw_disable(); 574 575 disable_clk: 576 qcom_scm_clk_disable(); 577 578 return ret ? : res.result[0]; 579 } 580 EXPORT_SYMBOL_GPL(qcom_scm_pas_mem_setup); 581 582 /** 583 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 584 * and reset the remote processor 585 * @peripheral: peripheral id 586 * 587 * Return 0 on success. 588 */ 589 int qcom_scm_pas_auth_and_reset(u32 peripheral) 590 { 591 int ret; 592 struct qcom_scm_desc desc = { 593 .svc = QCOM_SCM_SVC_PIL, 594 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, 595 .arginfo = QCOM_SCM_ARGS(1), 596 .args[0] = peripheral, 597 .owner = ARM_SMCCC_OWNER_SIP, 598 }; 599 struct qcom_scm_res res; 600 601 ret = qcom_scm_clk_enable(); 602 if (ret) 603 return ret; 604 605 ret = qcom_scm_bw_enable(); 606 if (ret) 607 goto disable_clk; 608 609 ret = qcom_scm_call(__scm->dev, &desc, &res); 610 qcom_scm_bw_disable(); 611 612 disable_clk: 613 qcom_scm_clk_disable(); 614 615 return ret ? : res.result[0]; 616 } 617 EXPORT_SYMBOL_GPL(qcom_scm_pas_auth_and_reset); 618 619 /** 620 * qcom_scm_pas_shutdown() - Shut down the remote processor 621 * @peripheral: peripheral id 622 * 623 * Returns 0 on success. 624 */ 625 int qcom_scm_pas_shutdown(u32 peripheral) 626 { 627 int ret; 628 struct qcom_scm_desc desc = { 629 .svc = QCOM_SCM_SVC_PIL, 630 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, 631 .arginfo = QCOM_SCM_ARGS(1), 632 .args[0] = peripheral, 633 .owner = ARM_SMCCC_OWNER_SIP, 634 }; 635 struct qcom_scm_res res; 636 637 ret = qcom_scm_clk_enable(); 638 if (ret) 639 return ret; 640 641 ret = qcom_scm_bw_enable(); 642 if (ret) 643 goto disable_clk; 644 645 ret = qcom_scm_call(__scm->dev, &desc, &res); 646 qcom_scm_bw_disable(); 647 648 disable_clk: 649 qcom_scm_clk_disable(); 650 651 return ret ? : res.result[0]; 652 } 653 EXPORT_SYMBOL_GPL(qcom_scm_pas_shutdown); 654 655 /** 656 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 657 * available for the given peripherial 658 * @peripheral: peripheral id 659 * 660 * Returns true if PAS is supported for this peripheral, otherwise false. 661 */ 662 bool qcom_scm_pas_supported(u32 peripheral) 663 { 664 int ret; 665 struct qcom_scm_desc desc = { 666 .svc = QCOM_SCM_SVC_PIL, 667 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, 668 .arginfo = QCOM_SCM_ARGS(1), 669 .args[0] = peripheral, 670 .owner = ARM_SMCCC_OWNER_SIP, 671 }; 672 struct qcom_scm_res res; 673 674 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 675 QCOM_SCM_PIL_PAS_IS_SUPPORTED)) 676 return false; 677 678 ret = qcom_scm_call(__scm->dev, &desc, &res); 679 680 return ret ? false : !!res.result[0]; 681 } 682 EXPORT_SYMBOL_GPL(qcom_scm_pas_supported); 683 684 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) 685 { 686 struct qcom_scm_desc desc = { 687 .svc = QCOM_SCM_SVC_PIL, 688 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, 689 .arginfo = QCOM_SCM_ARGS(2), 690 .args[0] = reset, 691 .args[1] = 0, 692 .owner = ARM_SMCCC_OWNER_SIP, 693 }; 694 struct qcom_scm_res res; 695 int ret; 696 697 ret = qcom_scm_call(__scm->dev, &desc, &res); 698 699 return ret ? : res.result[0]; 700 } 701 702 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 703 unsigned long idx) 704 { 705 if (idx != 0) 706 return -EINVAL; 707 708 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 709 } 710 711 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 712 unsigned long idx) 713 { 714 if (idx != 0) 715 return -EINVAL; 716 717 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 718 } 719 720 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 721 .assert = qcom_scm_pas_reset_assert, 722 .deassert = qcom_scm_pas_reset_deassert, 723 }; 724 725 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) 726 { 727 struct qcom_scm_desc desc = { 728 .svc = QCOM_SCM_SVC_IO, 729 .cmd = QCOM_SCM_IO_READ, 730 .arginfo = QCOM_SCM_ARGS(1), 731 .args[0] = addr, 732 .owner = ARM_SMCCC_OWNER_SIP, 733 }; 734 struct qcom_scm_res res; 735 int ret; 736 737 738 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res); 739 if (ret >= 0) 740 *val = res.result[0]; 741 742 return ret < 0 ? ret : 0; 743 } 744 EXPORT_SYMBOL_GPL(qcom_scm_io_readl); 745 746 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) 747 { 748 struct qcom_scm_desc desc = { 749 .svc = QCOM_SCM_SVC_IO, 750 .cmd = QCOM_SCM_IO_WRITE, 751 .arginfo = QCOM_SCM_ARGS(2), 752 .args[0] = addr, 753 .args[1] = val, 754 .owner = ARM_SMCCC_OWNER_SIP, 755 }; 756 757 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 758 } 759 EXPORT_SYMBOL_GPL(qcom_scm_io_writel); 760 761 /** 762 * qcom_scm_restore_sec_cfg_available() - Check if secure environment 763 * supports restore security config interface. 764 * 765 * Return true if restore-cfg interface is supported, false if not. 766 */ 767 bool qcom_scm_restore_sec_cfg_available(void) 768 { 769 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP, 770 QCOM_SCM_MP_RESTORE_SEC_CFG); 771 } 772 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg_available); 773 774 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) 775 { 776 struct qcom_scm_desc desc = { 777 .svc = QCOM_SCM_SVC_MP, 778 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, 779 .arginfo = QCOM_SCM_ARGS(2), 780 .args[0] = device_id, 781 .args[1] = spare, 782 .owner = ARM_SMCCC_OWNER_SIP, 783 }; 784 struct qcom_scm_res res; 785 int ret; 786 787 ret = qcom_scm_call(__scm->dev, &desc, &res); 788 789 return ret ? : res.result[0]; 790 } 791 EXPORT_SYMBOL_GPL(qcom_scm_restore_sec_cfg); 792 793 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) 794 { 795 struct qcom_scm_desc desc = { 796 .svc = QCOM_SCM_SVC_MP, 797 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE, 798 .arginfo = QCOM_SCM_ARGS(1), 799 .args[0] = spare, 800 .owner = ARM_SMCCC_OWNER_SIP, 801 }; 802 struct qcom_scm_res res; 803 int ret; 804 805 ret = qcom_scm_call(__scm->dev, &desc, &res); 806 807 if (size) 808 *size = res.result[0]; 809 810 return ret ? : res.result[1]; 811 } 812 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_size); 813 814 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) 815 { 816 struct qcom_scm_desc desc = { 817 .svc = QCOM_SCM_SVC_MP, 818 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT, 819 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, 820 QCOM_SCM_VAL), 821 .args[0] = addr, 822 .args[1] = size, 823 .args[2] = spare, 824 .owner = ARM_SMCCC_OWNER_SIP, 825 }; 826 int ret; 827 828 ret = qcom_scm_call(__scm->dev, &desc, NULL); 829 830 /* the pg table has been initialized already, ignore the error */ 831 if (ret == -EPERM) 832 ret = 0; 833 834 return ret; 835 } 836 EXPORT_SYMBOL_GPL(qcom_scm_iommu_secure_ptbl_init); 837 838 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size) 839 { 840 struct qcom_scm_desc desc = { 841 .svc = QCOM_SCM_SVC_MP, 842 .cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE, 843 .arginfo = QCOM_SCM_ARGS(2), 844 .args[0] = size, 845 .args[1] = spare, 846 .owner = ARM_SMCCC_OWNER_SIP, 847 }; 848 849 return qcom_scm_call(__scm->dev, &desc, NULL); 850 } 851 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_cp_pool_size); 852 853 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, 854 u32 cp_nonpixel_start, 855 u32 cp_nonpixel_size) 856 { 857 int ret; 858 struct qcom_scm_desc desc = { 859 .svc = QCOM_SCM_SVC_MP, 860 .cmd = QCOM_SCM_MP_VIDEO_VAR, 861 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, 862 QCOM_SCM_VAL, QCOM_SCM_VAL), 863 .args[0] = cp_start, 864 .args[1] = cp_size, 865 .args[2] = cp_nonpixel_start, 866 .args[3] = cp_nonpixel_size, 867 .owner = ARM_SMCCC_OWNER_SIP, 868 }; 869 struct qcom_scm_res res; 870 871 ret = qcom_scm_call(__scm->dev, &desc, &res); 872 873 return ret ? : res.result[0]; 874 } 875 EXPORT_SYMBOL_GPL(qcom_scm_mem_protect_video_var); 876 877 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, 878 size_t mem_sz, phys_addr_t src, size_t src_sz, 879 phys_addr_t dest, size_t dest_sz) 880 { 881 int ret; 882 struct qcom_scm_desc desc = { 883 .svc = QCOM_SCM_SVC_MP, 884 .cmd = QCOM_SCM_MP_ASSIGN, 885 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL, 886 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO, 887 QCOM_SCM_VAL, QCOM_SCM_VAL), 888 .args[0] = mem_region, 889 .args[1] = mem_sz, 890 .args[2] = src, 891 .args[3] = src_sz, 892 .args[4] = dest, 893 .args[5] = dest_sz, 894 .args[6] = 0, 895 .owner = ARM_SMCCC_OWNER_SIP, 896 }; 897 struct qcom_scm_res res; 898 899 ret = qcom_scm_call(dev, &desc, &res); 900 901 return ret ? : res.result[0]; 902 } 903 904 /** 905 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership 906 * @mem_addr: mem region whose ownership need to be reassigned 907 * @mem_sz: size of the region. 908 * @srcvm: vmid for current set of owners, each set bit in 909 * flag indicate a unique owner 910 * @newvm: array having new owners and corresponding permission 911 * flags 912 * @dest_cnt: number of owners in next set. 913 * 914 * Return negative errno on failure or 0 on success with @srcvm updated. 915 */ 916 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, 917 u64 *srcvm, 918 const struct qcom_scm_vmperm *newvm, 919 unsigned int dest_cnt) 920 { 921 struct qcom_scm_current_perm_info *destvm; 922 struct qcom_scm_mem_map_info *mem_to_map; 923 phys_addr_t mem_to_map_phys; 924 phys_addr_t dest_phys; 925 dma_addr_t ptr_phys; 926 size_t mem_to_map_sz; 927 size_t dest_sz; 928 size_t src_sz; 929 size_t ptr_sz; 930 int next_vm; 931 __le32 *src; 932 void *ptr; 933 int ret, i, b; 934 u64 srcvm_bits = *srcvm; 935 936 src_sz = hweight64(srcvm_bits) * sizeof(*src); 937 mem_to_map_sz = sizeof(*mem_to_map); 938 dest_sz = dest_cnt * sizeof(*destvm); 939 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + 940 ALIGN(dest_sz, SZ_64); 941 942 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL); 943 if (!ptr) 944 return -ENOMEM; 945 946 /* Fill source vmid detail */ 947 src = ptr; 948 i = 0; 949 for (b = 0; b < BITS_PER_TYPE(u64); b++) { 950 if (srcvm_bits & BIT(b)) 951 src[i++] = cpu_to_le32(b); 952 } 953 954 /* Fill details of mem buff to map */ 955 mem_to_map = ptr + ALIGN(src_sz, SZ_64); 956 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64); 957 mem_to_map->mem_addr = cpu_to_le64(mem_addr); 958 mem_to_map->mem_size = cpu_to_le64(mem_sz); 959 960 next_vm = 0; 961 /* Fill details of next vmid detail */ 962 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 963 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64); 964 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) { 965 destvm->vmid = cpu_to_le32(newvm->vmid); 966 destvm->perm = cpu_to_le32(newvm->perm); 967 destvm->ctx = 0; 968 destvm->ctx_size = 0; 969 next_vm |= BIT(newvm->vmid); 970 } 971 972 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz, 973 ptr_phys, src_sz, dest_phys, dest_sz); 974 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys); 975 if (ret) { 976 dev_err(__scm->dev, 977 "Assign memory protection call failed %d\n", ret); 978 return -EINVAL; 979 } 980 981 *srcvm = next_vm; 982 return 0; 983 } 984 EXPORT_SYMBOL_GPL(qcom_scm_assign_mem); 985 986 /** 987 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available 988 */ 989 bool qcom_scm_ocmem_lock_available(void) 990 { 991 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM, 992 QCOM_SCM_OCMEM_LOCK_CMD); 993 } 994 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock_available); 995 996 /** 997 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM 998 * region to the specified initiator 999 * 1000 * @id: tz initiator id 1001 * @offset: OCMEM offset 1002 * @size: OCMEM size 1003 * @mode: access mode (WIDE/NARROW) 1004 */ 1005 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size, 1006 u32 mode) 1007 { 1008 struct qcom_scm_desc desc = { 1009 .svc = QCOM_SCM_SVC_OCMEM, 1010 .cmd = QCOM_SCM_OCMEM_LOCK_CMD, 1011 .args[0] = id, 1012 .args[1] = offset, 1013 .args[2] = size, 1014 .args[3] = mode, 1015 .arginfo = QCOM_SCM_ARGS(4), 1016 }; 1017 1018 return qcom_scm_call(__scm->dev, &desc, NULL); 1019 } 1020 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_lock); 1021 1022 /** 1023 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM 1024 * region from the specified initiator 1025 * 1026 * @id: tz initiator id 1027 * @offset: OCMEM offset 1028 * @size: OCMEM size 1029 */ 1030 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) 1031 { 1032 struct qcom_scm_desc desc = { 1033 .svc = QCOM_SCM_SVC_OCMEM, 1034 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, 1035 .args[0] = id, 1036 .args[1] = offset, 1037 .args[2] = size, 1038 .arginfo = QCOM_SCM_ARGS(3), 1039 }; 1040 1041 return qcom_scm_call(__scm->dev, &desc, NULL); 1042 } 1043 EXPORT_SYMBOL_GPL(qcom_scm_ocmem_unlock); 1044 1045 /** 1046 * qcom_scm_ice_available() - Is the ICE key programming interface available? 1047 * 1048 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and 1049 * qcom_scm_ice_set_key() are available. 1050 */ 1051 bool qcom_scm_ice_available(void) 1052 { 1053 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1054 QCOM_SCM_ES_INVALIDATE_ICE_KEY) && 1055 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES, 1056 QCOM_SCM_ES_CONFIG_SET_ICE_KEY); 1057 } 1058 EXPORT_SYMBOL_GPL(qcom_scm_ice_available); 1059 1060 /** 1061 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key 1062 * @index: the keyslot to invalidate 1063 * 1064 * The UFSHCI and eMMC standards define a standard way to do this, but it 1065 * doesn't work on these SoCs; only this SCM call does. 1066 * 1067 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1068 * call doesn't specify which ICE instance the keyslot belongs to. 1069 * 1070 * Return: 0 on success; -errno on failure. 1071 */ 1072 int qcom_scm_ice_invalidate_key(u32 index) 1073 { 1074 struct qcom_scm_desc desc = { 1075 .svc = QCOM_SCM_SVC_ES, 1076 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY, 1077 .arginfo = QCOM_SCM_ARGS(1), 1078 .args[0] = index, 1079 .owner = ARM_SMCCC_OWNER_SIP, 1080 }; 1081 1082 return qcom_scm_call(__scm->dev, &desc, NULL); 1083 } 1084 EXPORT_SYMBOL_GPL(qcom_scm_ice_invalidate_key); 1085 1086 /** 1087 * qcom_scm_ice_set_key() - Set an inline encryption key 1088 * @index: the keyslot into which to set the key 1089 * @key: the key to program 1090 * @key_size: the size of the key in bytes 1091 * @cipher: the encryption algorithm the key is for 1092 * @data_unit_size: the encryption data unit size, i.e. the size of each 1093 * individual plaintext and ciphertext. Given in 512-byte 1094 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc. 1095 * 1096 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it 1097 * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline. 1098 * 1099 * The UFSHCI and eMMC standards define a standard way to do this, but it 1100 * doesn't work on these SoCs; only this SCM call does. 1101 * 1102 * It is assumed that the SoC has only one ICE instance being used, as this SCM 1103 * call doesn't specify which ICE instance the keyslot belongs to. 1104 * 1105 * Return: 0 on success; -errno on failure. 1106 */ 1107 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, 1108 enum qcom_scm_ice_cipher cipher, u32 data_unit_size) 1109 { 1110 struct qcom_scm_desc desc = { 1111 .svc = QCOM_SCM_SVC_ES, 1112 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY, 1113 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW, 1114 QCOM_SCM_VAL, QCOM_SCM_VAL, 1115 QCOM_SCM_VAL), 1116 .args[0] = index, 1117 .args[2] = key_size, 1118 .args[3] = cipher, 1119 .args[4] = data_unit_size, 1120 .owner = ARM_SMCCC_OWNER_SIP, 1121 }; 1122 void *keybuf; 1123 dma_addr_t key_phys; 1124 int ret; 1125 1126 /* 1127 * 'key' may point to vmalloc()'ed memory, but we need to pass a 1128 * physical address that's been properly flushed. The sanctioned way to 1129 * do this is by using the DMA API. But as is best practice for crypto 1130 * keys, we also must wipe the key after use. This makes kmemdup() + 1131 * dma_map_single() not clearly correct, since the DMA API can use 1132 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming 1133 * keys is normally rare and thus not performance-critical. 1134 */ 1135 1136 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys, 1137 GFP_KERNEL); 1138 if (!keybuf) 1139 return -ENOMEM; 1140 memcpy(keybuf, key, key_size); 1141 desc.args[1] = key_phys; 1142 1143 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1144 1145 memzero_explicit(keybuf, key_size); 1146 1147 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys); 1148 return ret; 1149 } 1150 EXPORT_SYMBOL_GPL(qcom_scm_ice_set_key); 1151 1152 /** 1153 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 1154 * 1155 * Return true if HDCP is supported, false if not. 1156 */ 1157 bool qcom_scm_hdcp_available(void) 1158 { 1159 bool avail; 1160 int ret = qcom_scm_clk_enable(); 1161 1162 if (ret) 1163 return ret; 1164 1165 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 1166 QCOM_SCM_HDCP_INVOKE); 1167 1168 qcom_scm_clk_disable(); 1169 1170 return avail; 1171 } 1172 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_available); 1173 1174 /** 1175 * qcom_scm_hdcp_req() - Send HDCP request. 1176 * @req: HDCP request array 1177 * @req_cnt: HDCP request array count 1178 * @resp: response buffer passed to SCM 1179 * 1180 * Write HDCP register(s) through SCM. 1181 */ 1182 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 1183 { 1184 int ret; 1185 struct qcom_scm_desc desc = { 1186 .svc = QCOM_SCM_SVC_HDCP, 1187 .cmd = QCOM_SCM_HDCP_INVOKE, 1188 .arginfo = QCOM_SCM_ARGS(10), 1189 .args = { 1190 req[0].addr, 1191 req[0].val, 1192 req[1].addr, 1193 req[1].val, 1194 req[2].addr, 1195 req[2].val, 1196 req[3].addr, 1197 req[3].val, 1198 req[4].addr, 1199 req[4].val 1200 }, 1201 .owner = ARM_SMCCC_OWNER_SIP, 1202 }; 1203 struct qcom_scm_res res; 1204 1205 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) 1206 return -ERANGE; 1207 1208 ret = qcom_scm_clk_enable(); 1209 if (ret) 1210 return ret; 1211 1212 ret = qcom_scm_call(__scm->dev, &desc, &res); 1213 *resp = res.result[0]; 1214 1215 qcom_scm_clk_disable(); 1216 1217 return ret; 1218 } 1219 EXPORT_SYMBOL_GPL(qcom_scm_hdcp_req); 1220 1221 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt) 1222 { 1223 struct qcom_scm_desc desc = { 1224 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1225 .cmd = QCOM_SCM_SMMU_PT_FORMAT, 1226 .arginfo = QCOM_SCM_ARGS(3), 1227 .args[0] = sec_id, 1228 .args[1] = ctx_num, 1229 .args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */ 1230 .owner = ARM_SMCCC_OWNER_SIP, 1231 }; 1232 1233 return qcom_scm_call(__scm->dev, &desc, NULL); 1234 } 1235 EXPORT_SYMBOL_GPL(qcom_scm_iommu_set_pt_format); 1236 1237 int qcom_scm_qsmmu500_wait_safe_toggle(bool en) 1238 { 1239 struct qcom_scm_desc desc = { 1240 .svc = QCOM_SCM_SVC_SMMU_PROGRAM, 1241 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1, 1242 .arginfo = QCOM_SCM_ARGS(2), 1243 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL, 1244 .args[1] = en, 1245 .owner = ARM_SMCCC_OWNER_SIP, 1246 }; 1247 1248 1249 return qcom_scm_call_atomic(__scm->dev, &desc, NULL); 1250 } 1251 EXPORT_SYMBOL_GPL(qcom_scm_qsmmu500_wait_safe_toggle); 1252 1253 bool qcom_scm_lmh_dcvsh_available(void) 1254 { 1255 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH); 1256 } 1257 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh_available); 1258 1259 int qcom_scm_lmh_profile_change(u32 profile_id) 1260 { 1261 struct qcom_scm_desc desc = { 1262 .svc = QCOM_SCM_SVC_LMH, 1263 .cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE, 1264 .arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL), 1265 .args[0] = profile_id, 1266 .owner = ARM_SMCCC_OWNER_SIP, 1267 }; 1268 1269 return qcom_scm_call(__scm->dev, &desc, NULL); 1270 } 1271 EXPORT_SYMBOL_GPL(qcom_scm_lmh_profile_change); 1272 1273 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, 1274 u64 limit_node, u32 node_id, u64 version) 1275 { 1276 dma_addr_t payload_phys; 1277 u32 *payload_buf; 1278 int ret, payload_size = 5 * sizeof(u32); 1279 1280 struct qcom_scm_desc desc = { 1281 .svc = QCOM_SCM_SVC_LMH, 1282 .cmd = QCOM_SCM_LMH_LIMIT_DCVSH, 1283 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL, 1284 QCOM_SCM_VAL, QCOM_SCM_VAL), 1285 .args[1] = payload_size, 1286 .args[2] = limit_node, 1287 .args[3] = node_id, 1288 .args[4] = version, 1289 .owner = ARM_SMCCC_OWNER_SIP, 1290 }; 1291 1292 payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL); 1293 if (!payload_buf) 1294 return -ENOMEM; 1295 1296 payload_buf[0] = payload_fn; 1297 payload_buf[1] = 0; 1298 payload_buf[2] = payload_reg; 1299 payload_buf[3] = 1; 1300 payload_buf[4] = payload_val; 1301 1302 desc.args[0] = payload_phys; 1303 1304 ret = qcom_scm_call(__scm->dev, &desc, NULL); 1305 1306 dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys); 1307 return ret; 1308 } 1309 EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh); 1310 1311 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) 1312 { 1313 struct device_node *tcsr; 1314 struct device_node *np = dev->of_node; 1315 struct resource res; 1316 u32 offset; 1317 int ret; 1318 1319 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0); 1320 if (!tcsr) 1321 return 0; 1322 1323 ret = of_address_to_resource(tcsr, 0, &res); 1324 of_node_put(tcsr); 1325 if (ret) 1326 return ret; 1327 1328 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset); 1329 if (ret < 0) 1330 return ret; 1331 1332 *addr = res.start + offset; 1333 1334 return 0; 1335 } 1336 1337 /** 1338 * qcom_scm_is_available() - Checks if SCM is available 1339 */ 1340 bool qcom_scm_is_available(void) 1341 { 1342 return !!READ_ONCE(__scm); 1343 } 1344 EXPORT_SYMBOL_GPL(qcom_scm_is_available); 1345 1346 static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx) 1347 { 1348 /* FW currently only supports a single wq_ctx (zero). 1349 * TODO: Update this logic to include dynamic allocation and lookup of 1350 * completion structs when FW supports more wq_ctx values. 1351 */ 1352 if (wq_ctx != 0) { 1353 dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n"); 1354 return -EINVAL; 1355 } 1356 1357 return 0; 1358 } 1359 1360 int qcom_scm_wait_for_wq_completion(u32 wq_ctx) 1361 { 1362 int ret; 1363 1364 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1365 if (ret) 1366 return ret; 1367 1368 wait_for_completion(&__scm->waitq_comp); 1369 1370 return 0; 1371 } 1372 1373 static int qcom_scm_waitq_wakeup(struct qcom_scm *scm, unsigned int wq_ctx) 1374 { 1375 int ret; 1376 1377 ret = qcom_scm_assert_valid_wq_ctx(wq_ctx); 1378 if (ret) 1379 return ret; 1380 1381 complete(&__scm->waitq_comp); 1382 1383 return 0; 1384 } 1385 1386 static irqreturn_t qcom_scm_irq_handler(int irq, void *data) 1387 { 1388 int ret; 1389 struct qcom_scm *scm = data; 1390 u32 wq_ctx, flags, more_pending = 0; 1391 1392 do { 1393 ret = scm_get_wq_ctx(&wq_ctx, &flags, &more_pending); 1394 if (ret) { 1395 dev_err(scm->dev, "GET_WQ_CTX SMC call failed: %d\n", ret); 1396 goto out; 1397 } 1398 1399 if (flags != QCOM_SMC_WAITQ_FLAG_WAKE_ONE && 1400 flags != QCOM_SMC_WAITQ_FLAG_WAKE_ALL) { 1401 dev_err(scm->dev, "Invalid flags found for wq_ctx: %u\n", flags); 1402 goto out; 1403 } 1404 1405 ret = qcom_scm_waitq_wakeup(scm, wq_ctx); 1406 if (ret) 1407 goto out; 1408 } while (more_pending); 1409 1410 out: 1411 return IRQ_HANDLED; 1412 } 1413 1414 static int qcom_scm_probe(struct platform_device *pdev) 1415 { 1416 struct qcom_scm *scm; 1417 int irq, ret; 1418 1419 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 1420 if (!scm) 1421 return -ENOMEM; 1422 1423 scm->dev = &pdev->dev; 1424 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr); 1425 if (ret < 0) 1426 return ret; 1427 1428 init_completion(&scm->waitq_comp); 1429 mutex_init(&scm->scm_bw_lock); 1430 1431 scm->path = devm_of_icc_get(&pdev->dev, NULL); 1432 if (IS_ERR(scm->path)) 1433 return dev_err_probe(&pdev->dev, PTR_ERR(scm->path), 1434 "failed to acquire interconnect path\n"); 1435 1436 scm->core_clk = devm_clk_get_optional(&pdev->dev, "core"); 1437 if (IS_ERR(scm->core_clk)) 1438 return PTR_ERR(scm->core_clk); 1439 1440 scm->iface_clk = devm_clk_get_optional(&pdev->dev, "iface"); 1441 if (IS_ERR(scm->iface_clk)) 1442 return PTR_ERR(scm->iface_clk); 1443 1444 scm->bus_clk = devm_clk_get_optional(&pdev->dev, "bus"); 1445 if (IS_ERR(scm->bus_clk)) 1446 return PTR_ERR(scm->bus_clk); 1447 1448 scm->reset.ops = &qcom_scm_pas_reset_ops; 1449 scm->reset.nr_resets = 1; 1450 scm->reset.of_node = pdev->dev.of_node; 1451 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 1452 if (ret) 1453 return ret; 1454 1455 /* vote for max clk rate for highest performance */ 1456 ret = clk_set_rate(scm->core_clk, INT_MAX); 1457 if (ret) 1458 return ret; 1459 1460 /* Let all above stores be available after this */ 1461 smp_store_release(&__scm, scm); 1462 1463 irq = platform_get_irq_optional(pdev, 0); 1464 if (irq < 0) { 1465 if (irq != -ENXIO) 1466 return irq; 1467 } else { 1468 ret = devm_request_threaded_irq(__scm->dev, irq, NULL, qcom_scm_irq_handler, 1469 IRQF_ONESHOT, "qcom-scm", __scm); 1470 if (ret < 0) 1471 return dev_err_probe(scm->dev, ret, "Failed to request qcom-scm irq\n"); 1472 } 1473 1474 __get_convention(); 1475 1476 /* 1477 * If requested enable "download mode", from this point on warmboot 1478 * will cause the boot stages to enter download mode, unless 1479 * disabled below by a clean shutdown/reboot. 1480 */ 1481 if (download_mode) 1482 qcom_scm_set_download_mode(true); 1483 1484 return 0; 1485 } 1486 1487 static void qcom_scm_shutdown(struct platform_device *pdev) 1488 { 1489 /* Clean shutdown, disable download mode to allow normal restart */ 1490 qcom_scm_set_download_mode(false); 1491 } 1492 1493 static const struct of_device_id qcom_scm_dt_match[] = { 1494 { .compatible = "qcom,scm" }, 1495 1496 /* Legacy entries kept for backwards compatibility */ 1497 { .compatible = "qcom,scm-apq8064" }, 1498 { .compatible = "qcom,scm-apq8084" }, 1499 { .compatible = "qcom,scm-ipq4019" }, 1500 { .compatible = "qcom,scm-msm8953" }, 1501 { .compatible = "qcom,scm-msm8974" }, 1502 { .compatible = "qcom,scm-msm8996" }, 1503 {} 1504 }; 1505 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); 1506 1507 static struct platform_driver qcom_scm_driver = { 1508 .driver = { 1509 .name = "qcom_scm", 1510 .of_match_table = qcom_scm_dt_match, 1511 .suppress_bind_attrs = true, 1512 }, 1513 .probe = qcom_scm_probe, 1514 .shutdown = qcom_scm_shutdown, 1515 }; 1516 1517 static int __init qcom_scm_init(void) 1518 { 1519 return platform_driver_register(&qcom_scm_driver); 1520 } 1521 subsys_initcall(qcom_scm_init); 1522 1523 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver"); 1524 MODULE_LICENSE("GPL v2"); 1525