xref: /openbmc/linux/drivers/firmware/qcom_scm.c (revision 52beb1fc)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
19 
20 #include "qcom_scm.h"
21 
22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23 module_param(download_mode, bool, 0);
24 
25 #define SCM_HAS_CORE_CLK	BIT(0)
26 #define SCM_HAS_IFACE_CLK	BIT(1)
27 #define SCM_HAS_BUS_CLK		BIT(2)
28 
29 struct qcom_scm {
30 	struct device *dev;
31 	struct clk *core_clk;
32 	struct clk *iface_clk;
33 	struct clk *bus_clk;
34 	struct reset_controller_dev reset;
35 
36 	u64 dload_mode_addr;
37 };
38 
39 struct qcom_scm_current_perm_info {
40 	__le32 vmid;
41 	__le32 perm;
42 	__le64 ctx;
43 	__le32 ctx_size;
44 	__le32 unused;
45 };
46 
47 struct qcom_scm_mem_map_info {
48 	__le64 mem_addr;
49 	__le64 mem_size;
50 };
51 
52 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
53 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
54 	0, BIT(0), BIT(3), BIT(5)
55 };
56 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
57 	BIT(2), BIT(1), BIT(4), BIT(6)
58 };
59 
60 static const char * const qcom_scm_convention_names[] = {
61 	[SMC_CONVENTION_UNKNOWN] = "unknown",
62 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
63 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
64 	[SMC_CONVENTION_LEGACY] = "smc legacy",
65 };
66 
67 static struct qcom_scm *__scm;
68 
69 static int qcom_scm_clk_enable(void)
70 {
71 	int ret;
72 
73 	ret = clk_prepare_enable(__scm->core_clk);
74 	if (ret)
75 		goto bail;
76 
77 	ret = clk_prepare_enable(__scm->iface_clk);
78 	if (ret)
79 		goto disable_core;
80 
81 	ret = clk_prepare_enable(__scm->bus_clk);
82 	if (ret)
83 		goto disable_iface;
84 
85 	return 0;
86 
87 disable_iface:
88 	clk_disable_unprepare(__scm->iface_clk);
89 disable_core:
90 	clk_disable_unprepare(__scm->core_clk);
91 bail:
92 	return ret;
93 }
94 
95 static void qcom_scm_clk_disable(void)
96 {
97 	clk_disable_unprepare(__scm->core_clk);
98 	clk_disable_unprepare(__scm->iface_clk);
99 	clk_disable_unprepare(__scm->bus_clk);
100 }
101 
102 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
103 static DEFINE_SPINLOCK(scm_query_lock);
104 
105 static enum qcom_scm_convention __get_convention(void)
106 {
107 	unsigned long flags;
108 	struct qcom_scm_desc desc = {
109 		.svc = QCOM_SCM_SVC_INFO,
110 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
111 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
112 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
113 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
114 		.arginfo = QCOM_SCM_ARGS(1),
115 		.owner = ARM_SMCCC_OWNER_SIP,
116 	};
117 	struct qcom_scm_res res;
118 	enum qcom_scm_convention probed_convention;
119 	int ret;
120 	bool forced = false;
121 
122 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
123 		return qcom_scm_convention;
124 
125 	/*
126 	 * Device isn't required as there is only one argument - no device
127 	 * needed to dma_map_single to secure world
128 	 */
129 	probed_convention = SMC_CONVENTION_ARM_64;
130 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
131 	if (!ret && res.result[0] == 1)
132 		goto found;
133 
134 	/*
135 	 * Some SC7180 firmwares didn't implement the
136 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
137 	 * calling conventions on these firmwares. Luckily we don't make any
138 	 * early calls into the firmware on these SoCs so the device pointer
139 	 * will be valid here to check if the compatible matches.
140 	 */
141 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
142 		forced = true;
143 		goto found;
144 	}
145 
146 	probed_convention = SMC_CONVENTION_ARM_32;
147 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
148 	if (!ret && res.result[0] == 1)
149 		goto found;
150 
151 	probed_convention = SMC_CONVENTION_LEGACY;
152 found:
153 	spin_lock_irqsave(&scm_query_lock, flags);
154 	if (probed_convention != qcom_scm_convention) {
155 		qcom_scm_convention = probed_convention;
156 		pr_info("qcom_scm: convention: %s%s\n",
157 			qcom_scm_convention_names[qcom_scm_convention],
158 			forced ? " (forced)" : "");
159 	}
160 	spin_unlock_irqrestore(&scm_query_lock, flags);
161 
162 	return qcom_scm_convention;
163 }
164 
165 /**
166  * qcom_scm_call() - Invoke a syscall in the secure world
167  * @dev:	device
168  * @desc:	Descriptor structure containing arguments and return values
169  * @res:        Structure containing results from SMC/HVC call
170  *
171  * Sends a command to the SCM and waits for the command to finish processing.
172  * This should *only* be called in pre-emptible context.
173  */
174 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
175 			 struct qcom_scm_res *res)
176 {
177 	might_sleep();
178 	switch (__get_convention()) {
179 	case SMC_CONVENTION_ARM_32:
180 	case SMC_CONVENTION_ARM_64:
181 		return scm_smc_call(dev, desc, res, false);
182 	case SMC_CONVENTION_LEGACY:
183 		return scm_legacy_call(dev, desc, res);
184 	default:
185 		pr_err("Unknown current SCM calling convention.\n");
186 		return -EINVAL;
187 	}
188 }
189 
190 /**
191  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
192  * @dev:	device
193  * @desc:	Descriptor structure containing arguments and return values
194  * @res:	Structure containing results from SMC/HVC call
195  *
196  * Sends a command to the SCM and waits for the command to finish processing.
197  * This can be called in atomic context.
198  */
199 static int qcom_scm_call_atomic(struct device *dev,
200 				const struct qcom_scm_desc *desc,
201 				struct qcom_scm_res *res)
202 {
203 	switch (__get_convention()) {
204 	case SMC_CONVENTION_ARM_32:
205 	case SMC_CONVENTION_ARM_64:
206 		return scm_smc_call(dev, desc, res, true);
207 	case SMC_CONVENTION_LEGACY:
208 		return scm_legacy_call_atomic(dev, desc, res);
209 	default:
210 		pr_err("Unknown current SCM calling convention.\n");
211 		return -EINVAL;
212 	}
213 }
214 
215 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
216 					 u32 cmd_id)
217 {
218 	int ret;
219 	struct qcom_scm_desc desc = {
220 		.svc = QCOM_SCM_SVC_INFO,
221 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
222 		.owner = ARM_SMCCC_OWNER_SIP,
223 	};
224 	struct qcom_scm_res res;
225 
226 	desc.arginfo = QCOM_SCM_ARGS(1);
227 	switch (__get_convention()) {
228 	case SMC_CONVENTION_ARM_32:
229 	case SMC_CONVENTION_ARM_64:
230 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
231 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
232 		break;
233 	case SMC_CONVENTION_LEGACY:
234 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
235 		break;
236 	default:
237 		pr_err("Unknown SMC convention being used\n");
238 		return false;
239 	}
240 
241 	ret = qcom_scm_call(dev, &desc, &res);
242 
243 	return ret ? false : !!res.result[0];
244 }
245 
246 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
247 {
248 	int cpu;
249 	unsigned int flags = 0;
250 	struct qcom_scm_desc desc = {
251 		.svc = QCOM_SCM_SVC_BOOT,
252 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
253 		.arginfo = QCOM_SCM_ARGS(2),
254 		.owner = ARM_SMCCC_OWNER_SIP,
255 	};
256 
257 	for_each_present_cpu(cpu) {
258 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
259 			return -EINVAL;
260 		flags |= cpu_bits[cpu];
261 	}
262 
263 	desc.args[0] = flags;
264 	desc.args[1] = virt_to_phys(entry);
265 
266 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
267 }
268 
269 /**
270  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
271  * @entry: Entry point function for the cpus
272  *
273  * Set the Linux entry point for the SCM to transfer control to when coming
274  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
275  */
276 int qcom_scm_set_warm_boot_addr(void *entry)
277 {
278 	return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
279 }
280 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
281 
282 /**
283  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
284  * @entry: Entry point function for the cpus
285  */
286 int qcom_scm_set_cold_boot_addr(void *entry)
287 {
288 	return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
289 }
290 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
291 
292 /**
293  * qcom_scm_cpu_power_down() - Power down the cpu
294  * @flags:	Flags to flush cache
295  *
296  * This is an end point to power down cpu. If there was a pending interrupt,
297  * the control would return from this function, otherwise, the cpu jumps to the
298  * warm boot entry point set for this cpu upon reset.
299  */
300 void qcom_scm_cpu_power_down(u32 flags)
301 {
302 	struct qcom_scm_desc desc = {
303 		.svc = QCOM_SCM_SVC_BOOT,
304 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
305 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
306 		.arginfo = QCOM_SCM_ARGS(1),
307 		.owner = ARM_SMCCC_OWNER_SIP,
308 	};
309 
310 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
311 }
312 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
313 
314 int qcom_scm_set_remote_state(u32 state, u32 id)
315 {
316 	struct qcom_scm_desc desc = {
317 		.svc = QCOM_SCM_SVC_BOOT,
318 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
319 		.arginfo = QCOM_SCM_ARGS(2),
320 		.args[0] = state,
321 		.args[1] = id,
322 		.owner = ARM_SMCCC_OWNER_SIP,
323 	};
324 	struct qcom_scm_res res;
325 	int ret;
326 
327 	ret = qcom_scm_call(__scm->dev, &desc, &res);
328 
329 	return ret ? : res.result[0];
330 }
331 EXPORT_SYMBOL(qcom_scm_set_remote_state);
332 
333 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
334 {
335 	struct qcom_scm_desc desc = {
336 		.svc = QCOM_SCM_SVC_BOOT,
337 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
338 		.arginfo = QCOM_SCM_ARGS(2),
339 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
340 		.owner = ARM_SMCCC_OWNER_SIP,
341 	};
342 
343 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
344 
345 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
346 }
347 
348 static void qcom_scm_set_download_mode(bool enable)
349 {
350 	bool avail;
351 	int ret = 0;
352 
353 	avail = __qcom_scm_is_call_available(__scm->dev,
354 					     QCOM_SCM_SVC_BOOT,
355 					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
356 	if (avail) {
357 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
358 	} else if (__scm->dload_mode_addr) {
359 		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
360 				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
361 	} else {
362 		dev_err(__scm->dev,
363 			"No available mechanism for setting download mode\n");
364 	}
365 
366 	if (ret)
367 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
368 }
369 
370 /**
371  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
372  *			       state machine for a given peripheral, using the
373  *			       metadata
374  * @peripheral: peripheral id
375  * @metadata:	pointer to memory containing ELF header, program header table
376  *		and optional blob of data used for authenticating the metadata
377  *		and the rest of the firmware
378  * @size:	size of the metadata
379  * @ctx:	optional metadata context
380  *
381  * Return: 0 on success.
382  *
383  * Upon successful return, the PAS metadata context (@ctx) will be used to
384  * track the metadata allocation, this needs to be released by invoking
385  * qcom_scm_pas_metadata_release() by the caller.
386  */
387 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
388 			    struct qcom_scm_pas_metadata *ctx)
389 {
390 	dma_addr_t mdata_phys;
391 	void *mdata_buf;
392 	int ret;
393 	struct qcom_scm_desc desc = {
394 		.svc = QCOM_SCM_SVC_PIL,
395 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
396 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
397 		.args[0] = peripheral,
398 		.owner = ARM_SMCCC_OWNER_SIP,
399 	};
400 	struct qcom_scm_res res;
401 
402 	/*
403 	 * During the scm call memory protection will be enabled for the meta
404 	 * data blob, so make sure it's physically contiguous, 4K aligned and
405 	 * non-cachable to avoid XPU violations.
406 	 */
407 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
408 				       GFP_KERNEL);
409 	if (!mdata_buf) {
410 		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
411 		return -ENOMEM;
412 	}
413 	memcpy(mdata_buf, metadata, size);
414 
415 	ret = qcom_scm_clk_enable();
416 	if (ret)
417 		goto out;
418 
419 	desc.args[1] = mdata_phys;
420 
421 	ret = qcom_scm_call(__scm->dev, &desc, &res);
422 
423 	qcom_scm_clk_disable();
424 
425 out:
426 	if (ret < 0 || !ctx) {
427 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
428 	} else if (ctx) {
429 		ctx->ptr = mdata_buf;
430 		ctx->phys = mdata_phys;
431 		ctx->size = size;
432 	}
433 
434 	return ret ? : res.result[0];
435 }
436 EXPORT_SYMBOL(qcom_scm_pas_init_image);
437 
438 /**
439  * qcom_scm_pas_metadata_release() - release metadata context
440  * @ctx:	metadata context
441  */
442 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
443 {
444 	if (!ctx->ptr)
445 		return;
446 
447 	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
448 
449 	ctx->ptr = NULL;
450 	ctx->phys = 0;
451 	ctx->size = 0;
452 }
453 EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
454 
455 /**
456  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
457  *			      for firmware loading
458  * @peripheral:	peripheral id
459  * @addr:	start address of memory area to prepare
460  * @size:	size of the memory area to prepare
461  *
462  * Returns 0 on success.
463  */
464 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
465 {
466 	int ret;
467 	struct qcom_scm_desc desc = {
468 		.svc = QCOM_SCM_SVC_PIL,
469 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
470 		.arginfo = QCOM_SCM_ARGS(3),
471 		.args[0] = peripheral,
472 		.args[1] = addr,
473 		.args[2] = size,
474 		.owner = ARM_SMCCC_OWNER_SIP,
475 	};
476 	struct qcom_scm_res res;
477 
478 	ret = qcom_scm_clk_enable();
479 	if (ret)
480 		return ret;
481 
482 	ret = qcom_scm_call(__scm->dev, &desc, &res);
483 	qcom_scm_clk_disable();
484 
485 	return ret ? : res.result[0];
486 }
487 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
488 
489 /**
490  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
491  *				   and reset the remote processor
492  * @peripheral:	peripheral id
493  *
494  * Return 0 on success.
495  */
496 int qcom_scm_pas_auth_and_reset(u32 peripheral)
497 {
498 	int ret;
499 	struct qcom_scm_desc desc = {
500 		.svc = QCOM_SCM_SVC_PIL,
501 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
502 		.arginfo = QCOM_SCM_ARGS(1),
503 		.args[0] = peripheral,
504 		.owner = ARM_SMCCC_OWNER_SIP,
505 	};
506 	struct qcom_scm_res res;
507 
508 	ret = qcom_scm_clk_enable();
509 	if (ret)
510 		return ret;
511 
512 	ret = qcom_scm_call(__scm->dev, &desc, &res);
513 	qcom_scm_clk_disable();
514 
515 	return ret ? : res.result[0];
516 }
517 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
518 
519 /**
520  * qcom_scm_pas_shutdown() - Shut down the remote processor
521  * @peripheral: peripheral id
522  *
523  * Returns 0 on success.
524  */
525 int qcom_scm_pas_shutdown(u32 peripheral)
526 {
527 	int ret;
528 	struct qcom_scm_desc desc = {
529 		.svc = QCOM_SCM_SVC_PIL,
530 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
531 		.arginfo = QCOM_SCM_ARGS(1),
532 		.args[0] = peripheral,
533 		.owner = ARM_SMCCC_OWNER_SIP,
534 	};
535 	struct qcom_scm_res res;
536 
537 	ret = qcom_scm_clk_enable();
538 	if (ret)
539 		return ret;
540 
541 	ret = qcom_scm_call(__scm->dev, &desc, &res);
542 
543 	qcom_scm_clk_disable();
544 
545 	return ret ? : res.result[0];
546 }
547 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
548 
549 /**
550  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
551  *			      available for the given peripherial
552  * @peripheral:	peripheral id
553  *
554  * Returns true if PAS is supported for this peripheral, otherwise false.
555  */
556 bool qcom_scm_pas_supported(u32 peripheral)
557 {
558 	int ret;
559 	struct qcom_scm_desc desc = {
560 		.svc = QCOM_SCM_SVC_PIL,
561 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
562 		.arginfo = QCOM_SCM_ARGS(1),
563 		.args[0] = peripheral,
564 		.owner = ARM_SMCCC_OWNER_SIP,
565 	};
566 	struct qcom_scm_res res;
567 
568 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
569 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
570 		return false;
571 
572 	ret = qcom_scm_call(__scm->dev, &desc, &res);
573 
574 	return ret ? false : !!res.result[0];
575 }
576 EXPORT_SYMBOL(qcom_scm_pas_supported);
577 
578 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
579 {
580 	struct qcom_scm_desc desc = {
581 		.svc = QCOM_SCM_SVC_PIL,
582 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
583 		.arginfo = QCOM_SCM_ARGS(2),
584 		.args[0] = reset,
585 		.args[1] = 0,
586 		.owner = ARM_SMCCC_OWNER_SIP,
587 	};
588 	struct qcom_scm_res res;
589 	int ret;
590 
591 	ret = qcom_scm_call(__scm->dev, &desc, &res);
592 
593 	return ret ? : res.result[0];
594 }
595 
596 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
597 				     unsigned long idx)
598 {
599 	if (idx != 0)
600 		return -EINVAL;
601 
602 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
603 }
604 
605 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
606 				       unsigned long idx)
607 {
608 	if (idx != 0)
609 		return -EINVAL;
610 
611 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
612 }
613 
614 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
615 	.assert = qcom_scm_pas_reset_assert,
616 	.deassert = qcom_scm_pas_reset_deassert,
617 };
618 
619 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
620 {
621 	struct qcom_scm_desc desc = {
622 		.svc = QCOM_SCM_SVC_IO,
623 		.cmd = QCOM_SCM_IO_READ,
624 		.arginfo = QCOM_SCM_ARGS(1),
625 		.args[0] = addr,
626 		.owner = ARM_SMCCC_OWNER_SIP,
627 	};
628 	struct qcom_scm_res res;
629 	int ret;
630 
631 
632 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
633 	if (ret >= 0)
634 		*val = res.result[0];
635 
636 	return ret < 0 ? ret : 0;
637 }
638 EXPORT_SYMBOL(qcom_scm_io_readl);
639 
640 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
641 {
642 	struct qcom_scm_desc desc = {
643 		.svc = QCOM_SCM_SVC_IO,
644 		.cmd = QCOM_SCM_IO_WRITE,
645 		.arginfo = QCOM_SCM_ARGS(2),
646 		.args[0] = addr,
647 		.args[1] = val,
648 		.owner = ARM_SMCCC_OWNER_SIP,
649 	};
650 
651 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
652 }
653 EXPORT_SYMBOL(qcom_scm_io_writel);
654 
655 /**
656  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
657  * supports restore security config interface.
658  *
659  * Return true if restore-cfg interface is supported, false if not.
660  */
661 bool qcom_scm_restore_sec_cfg_available(void)
662 {
663 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
664 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
665 }
666 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
667 
668 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
669 {
670 	struct qcom_scm_desc desc = {
671 		.svc = QCOM_SCM_SVC_MP,
672 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
673 		.arginfo = QCOM_SCM_ARGS(2),
674 		.args[0] = device_id,
675 		.args[1] = spare,
676 		.owner = ARM_SMCCC_OWNER_SIP,
677 	};
678 	struct qcom_scm_res res;
679 	int ret;
680 
681 	ret = qcom_scm_call(__scm->dev, &desc, &res);
682 
683 	return ret ? : res.result[0];
684 }
685 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
686 
687 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
688 {
689 	struct qcom_scm_desc desc = {
690 		.svc = QCOM_SCM_SVC_MP,
691 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
692 		.arginfo = QCOM_SCM_ARGS(1),
693 		.args[0] = spare,
694 		.owner = ARM_SMCCC_OWNER_SIP,
695 	};
696 	struct qcom_scm_res res;
697 	int ret;
698 
699 	ret = qcom_scm_call(__scm->dev, &desc, &res);
700 
701 	if (size)
702 		*size = res.result[0];
703 
704 	return ret ? : res.result[1];
705 }
706 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
707 
708 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
709 {
710 	struct qcom_scm_desc desc = {
711 		.svc = QCOM_SCM_SVC_MP,
712 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
713 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
714 					 QCOM_SCM_VAL),
715 		.args[0] = addr,
716 		.args[1] = size,
717 		.args[2] = spare,
718 		.owner = ARM_SMCCC_OWNER_SIP,
719 	};
720 	int ret;
721 
722 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
723 
724 	/* the pg table has been initialized already, ignore the error */
725 	if (ret == -EPERM)
726 		ret = 0;
727 
728 	return ret;
729 }
730 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
731 
732 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
733 {
734 	struct qcom_scm_desc desc = {
735 		.svc = QCOM_SCM_SVC_MP,
736 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
737 		.arginfo = QCOM_SCM_ARGS(2),
738 		.args[0] = size,
739 		.args[1] = spare,
740 		.owner = ARM_SMCCC_OWNER_SIP,
741 	};
742 
743 	return qcom_scm_call(__scm->dev, &desc, NULL);
744 }
745 EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
746 
747 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
748 				   u32 cp_nonpixel_start,
749 				   u32 cp_nonpixel_size)
750 {
751 	int ret;
752 	struct qcom_scm_desc desc = {
753 		.svc = QCOM_SCM_SVC_MP,
754 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
755 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
756 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
757 		.args[0] = cp_start,
758 		.args[1] = cp_size,
759 		.args[2] = cp_nonpixel_start,
760 		.args[3] = cp_nonpixel_size,
761 		.owner = ARM_SMCCC_OWNER_SIP,
762 	};
763 	struct qcom_scm_res res;
764 
765 	ret = qcom_scm_call(__scm->dev, &desc, &res);
766 
767 	return ret ? : res.result[0];
768 }
769 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
770 
771 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
772 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
773 				 phys_addr_t dest, size_t dest_sz)
774 {
775 	int ret;
776 	struct qcom_scm_desc desc = {
777 		.svc = QCOM_SCM_SVC_MP,
778 		.cmd = QCOM_SCM_MP_ASSIGN,
779 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
780 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
781 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
782 		.args[0] = mem_region,
783 		.args[1] = mem_sz,
784 		.args[2] = src,
785 		.args[3] = src_sz,
786 		.args[4] = dest,
787 		.args[5] = dest_sz,
788 		.args[6] = 0,
789 		.owner = ARM_SMCCC_OWNER_SIP,
790 	};
791 	struct qcom_scm_res res;
792 
793 	ret = qcom_scm_call(dev, &desc, &res);
794 
795 	return ret ? : res.result[0];
796 }
797 
798 /**
799  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
800  * @mem_addr: mem region whose ownership need to be reassigned
801  * @mem_sz:   size of the region.
802  * @srcvm:    vmid for current set of owners, each set bit in
803  *            flag indicate a unique owner
804  * @newvm:    array having new owners and corresponding permission
805  *            flags
806  * @dest_cnt: number of owners in next set.
807  *
808  * Return negative errno on failure or 0 on success with @srcvm updated.
809  */
810 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
811 			unsigned int *srcvm,
812 			const struct qcom_scm_vmperm *newvm,
813 			unsigned int dest_cnt)
814 {
815 	struct qcom_scm_current_perm_info *destvm;
816 	struct qcom_scm_mem_map_info *mem_to_map;
817 	phys_addr_t mem_to_map_phys;
818 	phys_addr_t dest_phys;
819 	dma_addr_t ptr_phys;
820 	size_t mem_to_map_sz;
821 	size_t dest_sz;
822 	size_t src_sz;
823 	size_t ptr_sz;
824 	int next_vm;
825 	__le32 *src;
826 	void *ptr;
827 	int ret, i, b;
828 	unsigned long srcvm_bits = *srcvm;
829 
830 	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
831 	mem_to_map_sz = sizeof(*mem_to_map);
832 	dest_sz = dest_cnt * sizeof(*destvm);
833 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
834 			ALIGN(dest_sz, SZ_64);
835 
836 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
837 	if (!ptr)
838 		return -ENOMEM;
839 
840 	/* Fill source vmid detail */
841 	src = ptr;
842 	i = 0;
843 	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
844 		src[i++] = cpu_to_le32(b);
845 
846 	/* Fill details of mem buff to map */
847 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
848 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
849 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
850 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
851 
852 	next_vm = 0;
853 	/* Fill details of next vmid detail */
854 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
855 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
856 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
857 		destvm->vmid = cpu_to_le32(newvm->vmid);
858 		destvm->perm = cpu_to_le32(newvm->perm);
859 		destvm->ctx = 0;
860 		destvm->ctx_size = 0;
861 		next_vm |= BIT(newvm->vmid);
862 	}
863 
864 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
865 				    ptr_phys, src_sz, dest_phys, dest_sz);
866 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
867 	if (ret) {
868 		dev_err(__scm->dev,
869 			"Assign memory protection call failed %d\n", ret);
870 		return -EINVAL;
871 	}
872 
873 	*srcvm = next_vm;
874 	return 0;
875 }
876 EXPORT_SYMBOL(qcom_scm_assign_mem);
877 
878 /**
879  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
880  */
881 bool qcom_scm_ocmem_lock_available(void)
882 {
883 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
884 					    QCOM_SCM_OCMEM_LOCK_CMD);
885 }
886 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
887 
888 /**
889  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
890  * region to the specified initiator
891  *
892  * @id:     tz initiator id
893  * @offset: OCMEM offset
894  * @size:   OCMEM size
895  * @mode:   access mode (WIDE/NARROW)
896  */
897 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
898 			u32 mode)
899 {
900 	struct qcom_scm_desc desc = {
901 		.svc = QCOM_SCM_SVC_OCMEM,
902 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
903 		.args[0] = id,
904 		.args[1] = offset,
905 		.args[2] = size,
906 		.args[3] = mode,
907 		.arginfo = QCOM_SCM_ARGS(4),
908 	};
909 
910 	return qcom_scm_call(__scm->dev, &desc, NULL);
911 }
912 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
913 
914 /**
915  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
916  * region from the specified initiator
917  *
918  * @id:     tz initiator id
919  * @offset: OCMEM offset
920  * @size:   OCMEM size
921  */
922 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
923 {
924 	struct qcom_scm_desc desc = {
925 		.svc = QCOM_SCM_SVC_OCMEM,
926 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
927 		.args[0] = id,
928 		.args[1] = offset,
929 		.args[2] = size,
930 		.arginfo = QCOM_SCM_ARGS(3),
931 	};
932 
933 	return qcom_scm_call(__scm->dev, &desc, NULL);
934 }
935 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
936 
937 /**
938  * qcom_scm_ice_available() - Is the ICE key programming interface available?
939  *
940  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
941  *	   qcom_scm_ice_set_key() are available.
942  */
943 bool qcom_scm_ice_available(void)
944 {
945 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
946 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
947 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
948 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
949 }
950 EXPORT_SYMBOL(qcom_scm_ice_available);
951 
952 /**
953  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
954  * @index: the keyslot to invalidate
955  *
956  * The UFSHCI and eMMC standards define a standard way to do this, but it
957  * doesn't work on these SoCs; only this SCM call does.
958  *
959  * It is assumed that the SoC has only one ICE instance being used, as this SCM
960  * call doesn't specify which ICE instance the keyslot belongs to.
961  *
962  * Return: 0 on success; -errno on failure.
963  */
964 int qcom_scm_ice_invalidate_key(u32 index)
965 {
966 	struct qcom_scm_desc desc = {
967 		.svc = QCOM_SCM_SVC_ES,
968 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
969 		.arginfo = QCOM_SCM_ARGS(1),
970 		.args[0] = index,
971 		.owner = ARM_SMCCC_OWNER_SIP,
972 	};
973 
974 	return qcom_scm_call(__scm->dev, &desc, NULL);
975 }
976 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
977 
978 /**
979  * qcom_scm_ice_set_key() - Set an inline encryption key
980  * @index: the keyslot into which to set the key
981  * @key: the key to program
982  * @key_size: the size of the key in bytes
983  * @cipher: the encryption algorithm the key is for
984  * @data_unit_size: the encryption data unit size, i.e. the size of each
985  *		    individual plaintext and ciphertext.  Given in 512-byte
986  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
987  *
988  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
989  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
990  *
991  * The UFSHCI and eMMC standards define a standard way to do this, but it
992  * doesn't work on these SoCs; only this SCM call does.
993  *
994  * It is assumed that the SoC has only one ICE instance being used, as this SCM
995  * call doesn't specify which ICE instance the keyslot belongs to.
996  *
997  * Return: 0 on success; -errno on failure.
998  */
999 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1000 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1001 {
1002 	struct qcom_scm_desc desc = {
1003 		.svc = QCOM_SCM_SVC_ES,
1004 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1005 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1006 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1007 					 QCOM_SCM_VAL),
1008 		.args[0] = index,
1009 		.args[2] = key_size,
1010 		.args[3] = cipher,
1011 		.args[4] = data_unit_size,
1012 		.owner = ARM_SMCCC_OWNER_SIP,
1013 	};
1014 	void *keybuf;
1015 	dma_addr_t key_phys;
1016 	int ret;
1017 
1018 	/*
1019 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1020 	 * physical address that's been properly flushed.  The sanctioned way to
1021 	 * do this is by using the DMA API.  But as is best practice for crypto
1022 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1023 	 * dma_map_single() not clearly correct, since the DMA API can use
1024 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1025 	 * keys is normally rare and thus not performance-critical.
1026 	 */
1027 
1028 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1029 				    GFP_KERNEL);
1030 	if (!keybuf)
1031 		return -ENOMEM;
1032 	memcpy(keybuf, key, key_size);
1033 	desc.args[1] = key_phys;
1034 
1035 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1036 
1037 	memzero_explicit(keybuf, key_size);
1038 
1039 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1040 	return ret;
1041 }
1042 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1043 
1044 /**
1045  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1046  *
1047  * Return true if HDCP is supported, false if not.
1048  */
1049 bool qcom_scm_hdcp_available(void)
1050 {
1051 	bool avail;
1052 	int ret = qcom_scm_clk_enable();
1053 
1054 	if (ret)
1055 		return ret;
1056 
1057 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1058 						QCOM_SCM_HDCP_INVOKE);
1059 
1060 	qcom_scm_clk_disable();
1061 
1062 	return avail;
1063 }
1064 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1065 
1066 /**
1067  * qcom_scm_hdcp_req() - Send HDCP request.
1068  * @req: HDCP request array
1069  * @req_cnt: HDCP request array count
1070  * @resp: response buffer passed to SCM
1071  *
1072  * Write HDCP register(s) through SCM.
1073  */
1074 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1075 {
1076 	int ret;
1077 	struct qcom_scm_desc desc = {
1078 		.svc = QCOM_SCM_SVC_HDCP,
1079 		.cmd = QCOM_SCM_HDCP_INVOKE,
1080 		.arginfo = QCOM_SCM_ARGS(10),
1081 		.args = {
1082 			req[0].addr,
1083 			req[0].val,
1084 			req[1].addr,
1085 			req[1].val,
1086 			req[2].addr,
1087 			req[2].val,
1088 			req[3].addr,
1089 			req[3].val,
1090 			req[4].addr,
1091 			req[4].val
1092 		},
1093 		.owner = ARM_SMCCC_OWNER_SIP,
1094 	};
1095 	struct qcom_scm_res res;
1096 
1097 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1098 		return -ERANGE;
1099 
1100 	ret = qcom_scm_clk_enable();
1101 	if (ret)
1102 		return ret;
1103 
1104 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1105 	*resp = res.result[0];
1106 
1107 	qcom_scm_clk_disable();
1108 
1109 	return ret;
1110 }
1111 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1112 
1113 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1114 {
1115 	struct qcom_scm_desc desc = {
1116 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1117 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1118 		.arginfo = QCOM_SCM_ARGS(3),
1119 		.args[0] = sec_id,
1120 		.args[1] = ctx_num,
1121 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1122 		.owner = ARM_SMCCC_OWNER_SIP,
1123 	};
1124 
1125 	return qcom_scm_call(__scm->dev, &desc, NULL);
1126 }
1127 EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
1128 
1129 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1130 {
1131 	struct qcom_scm_desc desc = {
1132 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1133 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1134 		.arginfo = QCOM_SCM_ARGS(2),
1135 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1136 		.args[1] = en,
1137 		.owner = ARM_SMCCC_OWNER_SIP,
1138 	};
1139 
1140 
1141 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1142 }
1143 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1144 
1145 bool qcom_scm_lmh_dcvsh_available(void)
1146 {
1147 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1148 }
1149 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1150 
1151 int qcom_scm_lmh_profile_change(u32 profile_id)
1152 {
1153 	struct qcom_scm_desc desc = {
1154 		.svc = QCOM_SCM_SVC_LMH,
1155 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1156 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1157 		.args[0] = profile_id,
1158 		.owner = ARM_SMCCC_OWNER_SIP,
1159 	};
1160 
1161 	return qcom_scm_call(__scm->dev, &desc, NULL);
1162 }
1163 EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1164 
1165 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1166 		       u64 limit_node, u32 node_id, u64 version)
1167 {
1168 	dma_addr_t payload_phys;
1169 	u32 *payload_buf;
1170 	int ret, payload_size = 5 * sizeof(u32);
1171 
1172 	struct qcom_scm_desc desc = {
1173 		.svc = QCOM_SCM_SVC_LMH,
1174 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1175 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1176 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1177 		.args[1] = payload_size,
1178 		.args[2] = limit_node,
1179 		.args[3] = node_id,
1180 		.args[4] = version,
1181 		.owner = ARM_SMCCC_OWNER_SIP,
1182 	};
1183 
1184 	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1185 	if (!payload_buf)
1186 		return -ENOMEM;
1187 
1188 	payload_buf[0] = payload_fn;
1189 	payload_buf[1] = 0;
1190 	payload_buf[2] = payload_reg;
1191 	payload_buf[3] = 1;
1192 	payload_buf[4] = payload_val;
1193 
1194 	desc.args[0] = payload_phys;
1195 
1196 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1197 
1198 	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1199 	return ret;
1200 }
1201 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1202 
1203 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1204 {
1205 	struct device_node *tcsr;
1206 	struct device_node *np = dev->of_node;
1207 	struct resource res;
1208 	u32 offset;
1209 	int ret;
1210 
1211 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1212 	if (!tcsr)
1213 		return 0;
1214 
1215 	ret = of_address_to_resource(tcsr, 0, &res);
1216 	of_node_put(tcsr);
1217 	if (ret)
1218 		return ret;
1219 
1220 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1221 	if (ret < 0)
1222 		return ret;
1223 
1224 	*addr = res.start + offset;
1225 
1226 	return 0;
1227 }
1228 
1229 /**
1230  * qcom_scm_is_available() - Checks if SCM is available
1231  */
1232 bool qcom_scm_is_available(void)
1233 {
1234 	return !!__scm;
1235 }
1236 EXPORT_SYMBOL(qcom_scm_is_available);
1237 
1238 static int qcom_scm_probe(struct platform_device *pdev)
1239 {
1240 	struct qcom_scm *scm;
1241 	unsigned long clks;
1242 	int ret;
1243 
1244 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1245 	if (!scm)
1246 		return -ENOMEM;
1247 
1248 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1249 	if (ret < 0)
1250 		return ret;
1251 
1252 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1253 
1254 	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1255 	if (IS_ERR(scm->core_clk)) {
1256 		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1257 			return PTR_ERR(scm->core_clk);
1258 
1259 		if (clks & SCM_HAS_CORE_CLK) {
1260 			dev_err(&pdev->dev, "failed to acquire core clk\n");
1261 			return PTR_ERR(scm->core_clk);
1262 		}
1263 
1264 		scm->core_clk = NULL;
1265 	}
1266 
1267 	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1268 	if (IS_ERR(scm->iface_clk)) {
1269 		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1270 			return PTR_ERR(scm->iface_clk);
1271 
1272 		if (clks & SCM_HAS_IFACE_CLK) {
1273 			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1274 			return PTR_ERR(scm->iface_clk);
1275 		}
1276 
1277 		scm->iface_clk = NULL;
1278 	}
1279 
1280 	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1281 	if (IS_ERR(scm->bus_clk)) {
1282 		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1283 			return PTR_ERR(scm->bus_clk);
1284 
1285 		if (clks & SCM_HAS_BUS_CLK) {
1286 			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1287 			return PTR_ERR(scm->bus_clk);
1288 		}
1289 
1290 		scm->bus_clk = NULL;
1291 	}
1292 
1293 	scm->reset.ops = &qcom_scm_pas_reset_ops;
1294 	scm->reset.nr_resets = 1;
1295 	scm->reset.of_node = pdev->dev.of_node;
1296 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1297 	if (ret)
1298 		return ret;
1299 
1300 	/* vote for max clk rate for highest performance */
1301 	ret = clk_set_rate(scm->core_clk, INT_MAX);
1302 	if (ret)
1303 		return ret;
1304 
1305 	__scm = scm;
1306 	__scm->dev = &pdev->dev;
1307 
1308 	__get_convention();
1309 
1310 	/*
1311 	 * If requested enable "download mode", from this point on warmboot
1312 	 * will cause the the boot stages to enter download mode, unless
1313 	 * disabled below by a clean shutdown/reboot.
1314 	 */
1315 	if (download_mode)
1316 		qcom_scm_set_download_mode(true);
1317 
1318 	return 0;
1319 }
1320 
1321 static void qcom_scm_shutdown(struct platform_device *pdev)
1322 {
1323 	/* Clean shutdown, disable download mode to allow normal restart */
1324 	if (download_mode)
1325 		qcom_scm_set_download_mode(false);
1326 }
1327 
1328 static const struct of_device_id qcom_scm_dt_match[] = {
1329 	{ .compatible = "qcom,scm-apq8064",
1330 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1331 	},
1332 	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1333 							     SCM_HAS_IFACE_CLK |
1334 							     SCM_HAS_BUS_CLK)
1335 	},
1336 	{ .compatible = "qcom,scm-ipq4019" },
1337 	{ .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1338 							     SCM_HAS_IFACE_CLK |
1339 							     SCM_HAS_BUS_CLK) },
1340 	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1341 	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1342 	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1343 							     SCM_HAS_IFACE_CLK |
1344 							     SCM_HAS_BUS_CLK)
1345 	},
1346 	{ .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1347 							     SCM_HAS_IFACE_CLK |
1348 							     SCM_HAS_BUS_CLK)
1349 	},
1350 	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1351 							     SCM_HAS_IFACE_CLK |
1352 							     SCM_HAS_BUS_CLK)
1353 	},
1354 	{ .compatible = "qcom,scm-msm8994" },
1355 	{ .compatible = "qcom,scm-msm8996" },
1356 	{ .compatible = "qcom,scm" },
1357 	{}
1358 };
1359 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1360 
1361 static struct platform_driver qcom_scm_driver = {
1362 	.driver = {
1363 		.name	= "qcom_scm",
1364 		.of_match_table = qcom_scm_dt_match,
1365 		.suppress_bind_attrs = true,
1366 	},
1367 	.probe = qcom_scm_probe,
1368 	.shutdown = qcom_scm_shutdown,
1369 };
1370 
1371 static int __init qcom_scm_init(void)
1372 {
1373 	return platform_driver_register(&qcom_scm_driver);
1374 }
1375 subsys_initcall(qcom_scm_init);
1376 
1377 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1378 MODULE_LICENSE("GPL v2");
1379