1 /* 2 * Qualcomm SCM driver 3 * 4 * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. 5 * Copyright (C) 2015 Linaro Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 and 9 * only version 2 as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 #include <linux/platform_device.h> 18 #include <linux/init.h> 19 #include <linux/cpumask.h> 20 #include <linux/export.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/types.h> 23 #include <linux/qcom_scm.h> 24 #include <linux/of.h> 25 #include <linux/of_platform.h> 26 #include <linux/clk.h> 27 #include <linux/reset-controller.h> 28 29 #include "qcom_scm.h" 30 31 #define SCM_HAS_CORE_CLK BIT(0) 32 #define SCM_HAS_IFACE_CLK BIT(1) 33 #define SCM_HAS_BUS_CLK BIT(2) 34 35 struct qcom_scm { 36 struct device *dev; 37 struct clk *core_clk; 38 struct clk *iface_clk; 39 struct clk *bus_clk; 40 struct reset_controller_dev reset; 41 }; 42 43 static struct qcom_scm *__scm; 44 45 static int qcom_scm_clk_enable(void) 46 { 47 int ret; 48 49 ret = clk_prepare_enable(__scm->core_clk); 50 if (ret) 51 goto bail; 52 53 ret = clk_prepare_enable(__scm->iface_clk); 54 if (ret) 55 goto disable_core; 56 57 ret = clk_prepare_enable(__scm->bus_clk); 58 if (ret) 59 goto disable_iface; 60 61 return 0; 62 63 disable_iface: 64 clk_disable_unprepare(__scm->iface_clk); 65 disable_core: 66 clk_disable_unprepare(__scm->core_clk); 67 bail: 68 return ret; 69 } 70 71 static void qcom_scm_clk_disable(void) 72 { 73 clk_disable_unprepare(__scm->core_clk); 74 clk_disable_unprepare(__scm->iface_clk); 75 clk_disable_unprepare(__scm->bus_clk); 76 } 77 78 /** 79 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus 80 * @entry: Entry point function for the cpus 81 * @cpus: The cpumask of cpus that will use the entry point 82 * 83 * Set the cold boot address of the cpus. Any cpu outside the supported 84 * range would be removed from the cpu present mask. 85 */ 86 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) 87 { 88 return __qcom_scm_set_cold_boot_addr(entry, cpus); 89 } 90 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr); 91 92 /** 93 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus 94 * @entry: Entry point function for the cpus 95 * @cpus: The cpumask of cpus that will use the entry point 96 * 97 * Set the Linux entry point for the SCM to transfer control to when coming 98 * out of a power down. CPU power down may be executed on cpuidle or hotplug. 99 */ 100 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus) 101 { 102 return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus); 103 } 104 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr); 105 106 /** 107 * qcom_scm_cpu_power_down() - Power down the cpu 108 * @flags - Flags to flush cache 109 * 110 * This is an end point to power down cpu. If there was a pending interrupt, 111 * the control would return from this function, otherwise, the cpu jumps to the 112 * warm boot entry point set for this cpu upon reset. 113 */ 114 void qcom_scm_cpu_power_down(u32 flags) 115 { 116 __qcom_scm_cpu_power_down(flags); 117 } 118 EXPORT_SYMBOL(qcom_scm_cpu_power_down); 119 120 /** 121 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP. 122 * 123 * Return true if HDCP is supported, false if not. 124 */ 125 bool qcom_scm_hdcp_available(void) 126 { 127 int ret = qcom_scm_clk_enable(); 128 129 if (ret) 130 return ret; 131 132 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP, 133 QCOM_SCM_CMD_HDCP); 134 135 qcom_scm_clk_disable(); 136 137 return ret > 0 ? true : false; 138 } 139 EXPORT_SYMBOL(qcom_scm_hdcp_available); 140 141 /** 142 * qcom_scm_hdcp_req() - Send HDCP request. 143 * @req: HDCP request array 144 * @req_cnt: HDCP request array count 145 * @resp: response buffer passed to SCM 146 * 147 * Write HDCP register(s) through SCM. 148 */ 149 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) 150 { 151 int ret = qcom_scm_clk_enable(); 152 153 if (ret) 154 return ret; 155 156 ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp); 157 qcom_scm_clk_disable(); 158 return ret; 159 } 160 EXPORT_SYMBOL(qcom_scm_hdcp_req); 161 162 /** 163 * qcom_scm_pas_supported() - Check if the peripheral authentication service is 164 * available for the given peripherial 165 * @peripheral: peripheral id 166 * 167 * Returns true if PAS is supported for this peripheral, otherwise false. 168 */ 169 bool qcom_scm_pas_supported(u32 peripheral) 170 { 171 int ret; 172 173 ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL, 174 QCOM_SCM_PAS_IS_SUPPORTED_CMD); 175 if (ret <= 0) 176 return false; 177 178 return __qcom_scm_pas_supported(__scm->dev, peripheral); 179 } 180 EXPORT_SYMBOL(qcom_scm_pas_supported); 181 182 /** 183 * qcom_scm_pas_init_image() - Initialize peripheral authentication service 184 * state machine for a given peripheral, using the 185 * metadata 186 * @peripheral: peripheral id 187 * @metadata: pointer to memory containing ELF header, program header table 188 * and optional blob of data used for authenticating the metadata 189 * and the rest of the firmware 190 * @size: size of the metadata 191 * 192 * Returns 0 on success. 193 */ 194 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size) 195 { 196 dma_addr_t mdata_phys; 197 void *mdata_buf; 198 int ret; 199 200 /* 201 * During the scm call memory protection will be enabled for the meta 202 * data blob, so make sure it's physically contiguous, 4K aligned and 203 * non-cachable to avoid XPU violations. 204 */ 205 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, 206 GFP_KERNEL); 207 if (!mdata_buf) { 208 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n"); 209 return -ENOMEM; 210 } 211 memcpy(mdata_buf, metadata, size); 212 213 ret = qcom_scm_clk_enable(); 214 if (ret) 215 goto free_metadata; 216 217 ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys); 218 219 qcom_scm_clk_disable(); 220 221 free_metadata: 222 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys); 223 224 return ret; 225 } 226 EXPORT_SYMBOL(qcom_scm_pas_init_image); 227 228 /** 229 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral 230 * for firmware loading 231 * @peripheral: peripheral id 232 * @addr: start address of memory area to prepare 233 * @size: size of the memory area to prepare 234 * 235 * Returns 0 on success. 236 */ 237 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size) 238 { 239 int ret; 240 241 ret = qcom_scm_clk_enable(); 242 if (ret) 243 return ret; 244 245 ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size); 246 qcom_scm_clk_disable(); 247 248 return ret; 249 } 250 EXPORT_SYMBOL(qcom_scm_pas_mem_setup); 251 252 /** 253 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware 254 * and reset the remote processor 255 * @peripheral: peripheral id 256 * 257 * Return 0 on success. 258 */ 259 int qcom_scm_pas_auth_and_reset(u32 peripheral) 260 { 261 int ret; 262 263 ret = qcom_scm_clk_enable(); 264 if (ret) 265 return ret; 266 267 ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral); 268 qcom_scm_clk_disable(); 269 270 return ret; 271 } 272 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset); 273 274 /** 275 * qcom_scm_pas_shutdown() - Shut down the remote processor 276 * @peripheral: peripheral id 277 * 278 * Returns 0 on success. 279 */ 280 int qcom_scm_pas_shutdown(u32 peripheral) 281 { 282 int ret; 283 284 ret = qcom_scm_clk_enable(); 285 if (ret) 286 return ret; 287 288 ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral); 289 qcom_scm_clk_disable(); 290 291 return ret; 292 } 293 EXPORT_SYMBOL(qcom_scm_pas_shutdown); 294 295 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev, 296 unsigned long idx) 297 { 298 if (idx != 0) 299 return -EINVAL; 300 301 return __qcom_scm_pas_mss_reset(__scm->dev, 1); 302 } 303 304 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev, 305 unsigned long idx) 306 { 307 if (idx != 0) 308 return -EINVAL; 309 310 return __qcom_scm_pas_mss_reset(__scm->dev, 0); 311 } 312 313 static const struct reset_control_ops qcom_scm_pas_reset_ops = { 314 .assert = qcom_scm_pas_reset_assert, 315 .deassert = qcom_scm_pas_reset_deassert, 316 }; 317 318 /** 319 * qcom_scm_is_available() - Checks if SCM is available 320 */ 321 bool qcom_scm_is_available(void) 322 { 323 return !!__scm; 324 } 325 EXPORT_SYMBOL(qcom_scm_is_available); 326 327 static int qcom_scm_probe(struct platform_device *pdev) 328 { 329 struct qcom_scm *scm; 330 unsigned long clks; 331 int ret; 332 333 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL); 334 if (!scm) 335 return -ENOMEM; 336 337 clks = (unsigned long)of_device_get_match_data(&pdev->dev); 338 if (clks & SCM_HAS_CORE_CLK) { 339 scm->core_clk = devm_clk_get(&pdev->dev, "core"); 340 if (IS_ERR(scm->core_clk)) { 341 if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER) 342 dev_err(&pdev->dev, 343 "failed to acquire core clk\n"); 344 return PTR_ERR(scm->core_clk); 345 } 346 } 347 348 if (clks & SCM_HAS_IFACE_CLK) { 349 scm->iface_clk = devm_clk_get(&pdev->dev, "iface"); 350 if (IS_ERR(scm->iface_clk)) { 351 if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER) 352 dev_err(&pdev->dev, 353 "failed to acquire iface clk\n"); 354 return PTR_ERR(scm->iface_clk); 355 } 356 } 357 358 if (clks & SCM_HAS_BUS_CLK) { 359 scm->bus_clk = devm_clk_get(&pdev->dev, "bus"); 360 if (IS_ERR(scm->bus_clk)) { 361 if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER) 362 dev_err(&pdev->dev, 363 "failed to acquire bus clk\n"); 364 return PTR_ERR(scm->bus_clk); 365 } 366 } 367 368 scm->reset.ops = &qcom_scm_pas_reset_ops; 369 scm->reset.nr_resets = 1; 370 scm->reset.of_node = pdev->dev.of_node; 371 ret = devm_reset_controller_register(&pdev->dev, &scm->reset); 372 if (ret) 373 return ret; 374 375 /* vote for max clk rate for highest performance */ 376 ret = clk_set_rate(scm->core_clk, INT_MAX); 377 if (ret) 378 return ret; 379 380 __scm = scm; 381 __scm->dev = &pdev->dev; 382 383 __qcom_scm_init(); 384 385 return 0; 386 } 387 388 static const struct of_device_id qcom_scm_dt_match[] = { 389 { .compatible = "qcom,scm-apq8064", 390 .data = (void *) SCM_HAS_CORE_CLK, 391 }, 392 { .compatible = "qcom,scm-msm8660", 393 .data = (void *) SCM_HAS_CORE_CLK, 394 }, 395 { .compatible = "qcom,scm-msm8960", 396 .data = (void *) SCM_HAS_CORE_CLK, 397 }, 398 { .compatible = "qcom,scm-msm8996", 399 .data = NULL, /* no clocks */ 400 }, 401 { .compatible = "qcom,scm", 402 .data = (void *)(SCM_HAS_CORE_CLK 403 | SCM_HAS_IFACE_CLK 404 | SCM_HAS_BUS_CLK), 405 }, 406 {} 407 }; 408 409 static struct platform_driver qcom_scm_driver = { 410 .driver = { 411 .name = "qcom_scm", 412 .of_match_table = qcom_scm_dt_match, 413 }, 414 .probe = qcom_scm_probe, 415 }; 416 417 static int __init qcom_scm_init(void) 418 { 419 struct device_node *np, *fw_np; 420 int ret; 421 422 fw_np = of_find_node_by_name(NULL, "firmware"); 423 424 if (!fw_np) 425 return -ENODEV; 426 427 np = of_find_matching_node(fw_np, qcom_scm_dt_match); 428 429 if (!np) { 430 of_node_put(fw_np); 431 return -ENODEV; 432 } 433 434 of_node_put(np); 435 436 ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL); 437 438 of_node_put(fw_np); 439 440 if (ret) 441 return ret; 442 443 return platform_driver_register(&qcom_scm_driver); 444 } 445 subsys_initcall(qcom_scm_init); 446