xref: /openbmc/linux/drivers/firmware/qcom_scm.c (revision 87922aec)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
19 
20 #include <asm/smp_plat.h>
21 
22 #include "qcom_scm.h"
23 
24 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
25 module_param(download_mode, bool, 0);
26 
27 #define SCM_HAS_CORE_CLK	BIT(0)
28 #define SCM_HAS_IFACE_CLK	BIT(1)
29 #define SCM_HAS_BUS_CLK		BIT(2)
30 
31 struct qcom_scm {
32 	struct device *dev;
33 	struct clk *core_clk;
34 	struct clk *iface_clk;
35 	struct clk *bus_clk;
36 	struct reset_controller_dev reset;
37 
38 	u64 dload_mode_addr;
39 };
40 
41 struct qcom_scm_current_perm_info {
42 	__le32 vmid;
43 	__le32 perm;
44 	__le64 ctx;
45 	__le32 ctx_size;
46 	__le32 unused;
47 };
48 
49 struct qcom_scm_mem_map_info {
50 	__le64 mem_addr;
51 	__le64 mem_size;
52 };
53 
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU0	0x00
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU1	0x01
56 #define QCOM_SCM_FLAG_COLDBOOT_CPU2	0x08
57 #define QCOM_SCM_FLAG_COLDBOOT_CPU3	0x20
58 
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU0	0x04
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU1	0x02
61 #define QCOM_SCM_FLAG_WARMBOOT_CPU2	0x10
62 #define QCOM_SCM_FLAG_WARMBOOT_CPU3	0x40
63 
64 struct qcom_scm_wb_entry {
65 	int flag;
66 	void *entry;
67 };
68 
69 static struct qcom_scm_wb_entry qcom_scm_wb[] = {
70 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
71 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
72 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
73 	{ .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
74 };
75 
76 static const char * const qcom_scm_convention_names[] = {
77 	[SMC_CONVENTION_UNKNOWN] = "unknown",
78 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
79 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
80 	[SMC_CONVENTION_LEGACY] = "smc legacy",
81 };
82 
83 static struct qcom_scm *__scm;
84 
85 static int qcom_scm_clk_enable(void)
86 {
87 	int ret;
88 
89 	ret = clk_prepare_enable(__scm->core_clk);
90 	if (ret)
91 		goto bail;
92 
93 	ret = clk_prepare_enable(__scm->iface_clk);
94 	if (ret)
95 		goto disable_core;
96 
97 	ret = clk_prepare_enable(__scm->bus_clk);
98 	if (ret)
99 		goto disable_iface;
100 
101 	return 0;
102 
103 disable_iface:
104 	clk_disable_unprepare(__scm->iface_clk);
105 disable_core:
106 	clk_disable_unprepare(__scm->core_clk);
107 bail:
108 	return ret;
109 }
110 
111 static void qcom_scm_clk_disable(void)
112 {
113 	clk_disable_unprepare(__scm->core_clk);
114 	clk_disable_unprepare(__scm->iface_clk);
115 	clk_disable_unprepare(__scm->bus_clk);
116 }
117 
118 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
119 static DEFINE_SPINLOCK(scm_query_lock);
120 
121 static enum qcom_scm_convention __get_convention(void)
122 {
123 	unsigned long flags;
124 	struct qcom_scm_desc desc = {
125 		.svc = QCOM_SCM_SVC_INFO,
126 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
127 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
128 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
129 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
130 		.arginfo = QCOM_SCM_ARGS(1),
131 		.owner = ARM_SMCCC_OWNER_SIP,
132 	};
133 	struct qcom_scm_res res;
134 	enum qcom_scm_convention probed_convention;
135 	int ret;
136 	bool forced = false;
137 
138 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
139 		return qcom_scm_convention;
140 
141 	/*
142 	 * Device isn't required as there is only one argument - no device
143 	 * needed to dma_map_single to secure world
144 	 */
145 	probed_convention = SMC_CONVENTION_ARM_64;
146 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
147 	if (!ret && res.result[0] == 1)
148 		goto found;
149 
150 	/*
151 	 * Some SC7180 firmwares didn't implement the
152 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
153 	 * calling conventions on these firmwares. Luckily we don't make any
154 	 * early calls into the firmware on these SoCs so the device pointer
155 	 * will be valid here to check if the compatible matches.
156 	 */
157 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
158 		forced = true;
159 		goto found;
160 	}
161 
162 	probed_convention = SMC_CONVENTION_ARM_32;
163 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
164 	if (!ret && res.result[0] == 1)
165 		goto found;
166 
167 	probed_convention = SMC_CONVENTION_LEGACY;
168 found:
169 	spin_lock_irqsave(&scm_query_lock, flags);
170 	if (probed_convention != qcom_scm_convention) {
171 		qcom_scm_convention = probed_convention;
172 		pr_info("qcom_scm: convention: %s%s\n",
173 			qcom_scm_convention_names[qcom_scm_convention],
174 			forced ? " (forced)" : "");
175 	}
176 	spin_unlock_irqrestore(&scm_query_lock, flags);
177 
178 	return qcom_scm_convention;
179 }
180 
181 /**
182  * qcom_scm_call() - Invoke a syscall in the secure world
183  * @dev:	device
184  * @svc_id:	service identifier
185  * @cmd_id:	command identifier
186  * @desc:	Descriptor structure containing arguments and return values
187  *
188  * Sends a command to the SCM and waits for the command to finish processing.
189  * This should *only* be called in pre-emptible context.
190  */
191 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
192 			 struct qcom_scm_res *res)
193 {
194 	might_sleep();
195 	switch (__get_convention()) {
196 	case SMC_CONVENTION_ARM_32:
197 	case SMC_CONVENTION_ARM_64:
198 		return scm_smc_call(dev, desc, res, false);
199 	case SMC_CONVENTION_LEGACY:
200 		return scm_legacy_call(dev, desc, res);
201 	default:
202 		pr_err("Unknown current SCM calling convention.\n");
203 		return -EINVAL;
204 	}
205 }
206 
207 /**
208  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
209  * @dev:	device
210  * @svc_id:	service identifier
211  * @cmd_id:	command identifier
212  * @desc:	Descriptor structure containing arguments and return values
213  * @res:	Structure containing results from SMC/HVC call
214  *
215  * Sends a command to the SCM and waits for the command to finish processing.
216  * This can be called in atomic context.
217  */
218 static int qcom_scm_call_atomic(struct device *dev,
219 				const struct qcom_scm_desc *desc,
220 				struct qcom_scm_res *res)
221 {
222 	switch (__get_convention()) {
223 	case SMC_CONVENTION_ARM_32:
224 	case SMC_CONVENTION_ARM_64:
225 		return scm_smc_call(dev, desc, res, true);
226 	case SMC_CONVENTION_LEGACY:
227 		return scm_legacy_call_atomic(dev, desc, res);
228 	default:
229 		pr_err("Unknown current SCM calling convention.\n");
230 		return -EINVAL;
231 	}
232 }
233 
234 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
235 					 u32 cmd_id)
236 {
237 	int ret;
238 	struct qcom_scm_desc desc = {
239 		.svc = QCOM_SCM_SVC_INFO,
240 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
241 		.owner = ARM_SMCCC_OWNER_SIP,
242 	};
243 	struct qcom_scm_res res;
244 
245 	desc.arginfo = QCOM_SCM_ARGS(1);
246 	switch (__get_convention()) {
247 	case SMC_CONVENTION_ARM_32:
248 	case SMC_CONVENTION_ARM_64:
249 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
250 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
251 		break;
252 	case SMC_CONVENTION_LEGACY:
253 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
254 		break;
255 	default:
256 		pr_err("Unknown SMC convention being used\n");
257 		return false;
258 	}
259 
260 	ret = qcom_scm_call(dev, &desc, &res);
261 
262 	return ret ? false : !!res.result[0];
263 }
264 
265 static int __qcom_scm_set_boot_addr_mc(void *entry, const cpumask_t *cpus,
266 				       unsigned int flags)
267 {
268 	struct qcom_scm_desc desc = {
269 		.svc = QCOM_SCM_SVC_BOOT,
270 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
271 		.owner = ARM_SMCCC_OWNER_SIP,
272 		.arginfo = QCOM_SCM_ARGS(6),
273 	};
274 	unsigned int cpu;
275 	u64 map;
276 
277 	/* Need a device for DMA of the additional arguments */
278 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
279 		return -EOPNOTSUPP;
280 
281 	desc.args[0] = virt_to_phys(entry);
282 	for_each_cpu(cpu, cpus) {
283 		map = cpu_logical_map(cpu);
284 		desc.args[1] |= BIT(MPIDR_AFFINITY_LEVEL(map, 0));
285 		desc.args[2] |= BIT(MPIDR_AFFINITY_LEVEL(map, 1));
286 		desc.args[3] |= BIT(MPIDR_AFFINITY_LEVEL(map, 2));
287 	}
288 	desc.args[4] = ~0ULL; /* Reserved for affinity level 3 */
289 	desc.args[5] = flags;
290 
291 	return qcom_scm_call(__scm->dev, &desc, NULL);
292 }
293 
294 static int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
295 {
296 	int ret;
297 	int flags = 0;
298 	int cpu;
299 	struct qcom_scm_desc desc = {
300 		.svc = QCOM_SCM_SVC_BOOT,
301 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
302 		.arginfo = QCOM_SCM_ARGS(2),
303 	};
304 
305 	/*
306 	 * Reassign only if we are switching from hotplug entry point
307 	 * to cpuidle entry point or vice versa.
308 	 */
309 	for_each_cpu(cpu, cpus) {
310 		if (entry == qcom_scm_wb[cpu].entry)
311 			continue;
312 		flags |= qcom_scm_wb[cpu].flag;
313 	}
314 
315 	/* No change in entry function */
316 	if (!flags)
317 		return 0;
318 
319 	desc.args[0] = flags;
320 	desc.args[1] = virt_to_phys(entry);
321 
322 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
323 	if (!ret) {
324 		for_each_cpu(cpu, cpus)
325 			qcom_scm_wb[cpu].entry = entry;
326 	}
327 
328 	return ret;
329 }
330 
331 /**
332  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
333  * @entry: Entry point function for the cpus
334  * @cpus: The cpumask of cpus that will use the entry point
335  *
336  * Set the Linux entry point for the SCM to transfer control to when coming
337  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
338  */
339 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
340 {
341 	if (!cpus || cpumask_empty(cpus))
342 		return -EINVAL;
343 
344 	if (__qcom_scm_set_boot_addr_mc(entry, cpus, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
345 		/* Fallback to old SCM call */
346 		return __qcom_scm_set_warm_boot_addr(entry, cpus);
347 	return 0;
348 }
349 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
350 
351 static int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
352 {
353 	int flags = 0;
354 	int cpu;
355 	int scm_cb_flags[] = {
356 		QCOM_SCM_FLAG_COLDBOOT_CPU0,
357 		QCOM_SCM_FLAG_COLDBOOT_CPU1,
358 		QCOM_SCM_FLAG_COLDBOOT_CPU2,
359 		QCOM_SCM_FLAG_COLDBOOT_CPU3,
360 	};
361 	struct qcom_scm_desc desc = {
362 		.svc = QCOM_SCM_SVC_BOOT,
363 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
364 		.arginfo = QCOM_SCM_ARGS(2),
365 		.owner = ARM_SMCCC_OWNER_SIP,
366 	};
367 
368 	for_each_cpu(cpu, cpus) {
369 		if (cpu < ARRAY_SIZE(scm_cb_flags))
370 			flags |= scm_cb_flags[cpu];
371 		else
372 			set_cpu_present(cpu, false);
373 	}
374 
375 	desc.args[0] = flags;
376 	desc.args[1] = virt_to_phys(entry);
377 
378 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
379 }
380 
381 /**
382  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
383  * @entry: Entry point function for the cpus
384  * @cpus: The cpumask of cpus that will use the entry point
385  *
386  * Set the cold boot address of the cpus. Any cpu outside the supported
387  * range would be removed from the cpu present mask.
388  */
389 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
390 {
391 	if (!cpus || cpumask_empty(cpus))
392 		return -EINVAL;
393 
394 	if (__qcom_scm_set_boot_addr_mc(entry, cpus, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
395 		/* Fallback to old SCM call */
396 		return __qcom_scm_set_cold_boot_addr(entry, cpus);
397 	return 0;
398 }
399 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
400 
401 /**
402  * qcom_scm_cpu_power_down() - Power down the cpu
403  * @flags - Flags to flush cache
404  *
405  * This is an end point to power down cpu. If there was a pending interrupt,
406  * the control would return from this function, otherwise, the cpu jumps to the
407  * warm boot entry point set for this cpu upon reset.
408  */
409 void qcom_scm_cpu_power_down(u32 flags)
410 {
411 	struct qcom_scm_desc desc = {
412 		.svc = QCOM_SCM_SVC_BOOT,
413 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
414 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
415 		.arginfo = QCOM_SCM_ARGS(1),
416 		.owner = ARM_SMCCC_OWNER_SIP,
417 	};
418 
419 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
420 }
421 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
422 
423 int qcom_scm_set_remote_state(u32 state, u32 id)
424 {
425 	struct qcom_scm_desc desc = {
426 		.svc = QCOM_SCM_SVC_BOOT,
427 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
428 		.arginfo = QCOM_SCM_ARGS(2),
429 		.args[0] = state,
430 		.args[1] = id,
431 		.owner = ARM_SMCCC_OWNER_SIP,
432 	};
433 	struct qcom_scm_res res;
434 	int ret;
435 
436 	ret = qcom_scm_call(__scm->dev, &desc, &res);
437 
438 	return ret ? : res.result[0];
439 }
440 EXPORT_SYMBOL(qcom_scm_set_remote_state);
441 
442 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
443 {
444 	struct qcom_scm_desc desc = {
445 		.svc = QCOM_SCM_SVC_BOOT,
446 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
447 		.arginfo = QCOM_SCM_ARGS(2),
448 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
449 		.owner = ARM_SMCCC_OWNER_SIP,
450 	};
451 
452 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
453 
454 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
455 }
456 
457 static void qcom_scm_set_download_mode(bool enable)
458 {
459 	bool avail;
460 	int ret = 0;
461 
462 	avail = __qcom_scm_is_call_available(__scm->dev,
463 					     QCOM_SCM_SVC_BOOT,
464 					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
465 	if (avail) {
466 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
467 	} else if (__scm->dload_mode_addr) {
468 		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
469 				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
470 	} else {
471 		dev_err(__scm->dev,
472 			"No available mechanism for setting download mode\n");
473 	}
474 
475 	if (ret)
476 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
477 }
478 
479 /**
480  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
481  *			       state machine for a given peripheral, using the
482  *			       metadata
483  * @peripheral: peripheral id
484  * @metadata:	pointer to memory containing ELF header, program header table
485  *		and optional blob of data used for authenticating the metadata
486  *		and the rest of the firmware
487  * @size:	size of the metadata
488  *
489  * Returns 0 on success.
490  */
491 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
492 {
493 	dma_addr_t mdata_phys;
494 	void *mdata_buf;
495 	int ret;
496 	struct qcom_scm_desc desc = {
497 		.svc = QCOM_SCM_SVC_PIL,
498 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
499 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
500 		.args[0] = peripheral,
501 		.owner = ARM_SMCCC_OWNER_SIP,
502 	};
503 	struct qcom_scm_res res;
504 
505 	/*
506 	 * During the scm call memory protection will be enabled for the meta
507 	 * data blob, so make sure it's physically contiguous, 4K aligned and
508 	 * non-cachable to avoid XPU violations.
509 	 */
510 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
511 				       GFP_KERNEL);
512 	if (!mdata_buf) {
513 		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
514 		return -ENOMEM;
515 	}
516 	memcpy(mdata_buf, metadata, size);
517 
518 	ret = qcom_scm_clk_enable();
519 	if (ret)
520 		goto free_metadata;
521 
522 	desc.args[1] = mdata_phys;
523 
524 	ret = qcom_scm_call(__scm->dev, &desc, &res);
525 
526 	qcom_scm_clk_disable();
527 
528 free_metadata:
529 	dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
530 
531 	return ret ? : res.result[0];
532 }
533 EXPORT_SYMBOL(qcom_scm_pas_init_image);
534 
535 /**
536  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
537  *			      for firmware loading
538  * @peripheral:	peripheral id
539  * @addr:	start address of memory area to prepare
540  * @size:	size of the memory area to prepare
541  *
542  * Returns 0 on success.
543  */
544 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
545 {
546 	int ret;
547 	struct qcom_scm_desc desc = {
548 		.svc = QCOM_SCM_SVC_PIL,
549 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
550 		.arginfo = QCOM_SCM_ARGS(3),
551 		.args[0] = peripheral,
552 		.args[1] = addr,
553 		.args[2] = size,
554 		.owner = ARM_SMCCC_OWNER_SIP,
555 	};
556 	struct qcom_scm_res res;
557 
558 	ret = qcom_scm_clk_enable();
559 	if (ret)
560 		return ret;
561 
562 	ret = qcom_scm_call(__scm->dev, &desc, &res);
563 	qcom_scm_clk_disable();
564 
565 	return ret ? : res.result[0];
566 }
567 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
568 
569 /**
570  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
571  *				   and reset the remote processor
572  * @peripheral:	peripheral id
573  *
574  * Return 0 on success.
575  */
576 int qcom_scm_pas_auth_and_reset(u32 peripheral)
577 {
578 	int ret;
579 	struct qcom_scm_desc desc = {
580 		.svc = QCOM_SCM_SVC_PIL,
581 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
582 		.arginfo = QCOM_SCM_ARGS(1),
583 		.args[0] = peripheral,
584 		.owner = ARM_SMCCC_OWNER_SIP,
585 	};
586 	struct qcom_scm_res res;
587 
588 	ret = qcom_scm_clk_enable();
589 	if (ret)
590 		return ret;
591 
592 	ret = qcom_scm_call(__scm->dev, &desc, &res);
593 	qcom_scm_clk_disable();
594 
595 	return ret ? : res.result[0];
596 }
597 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
598 
599 /**
600  * qcom_scm_pas_shutdown() - Shut down the remote processor
601  * @peripheral: peripheral id
602  *
603  * Returns 0 on success.
604  */
605 int qcom_scm_pas_shutdown(u32 peripheral)
606 {
607 	int ret;
608 	struct qcom_scm_desc desc = {
609 		.svc = QCOM_SCM_SVC_PIL,
610 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
611 		.arginfo = QCOM_SCM_ARGS(1),
612 		.args[0] = peripheral,
613 		.owner = ARM_SMCCC_OWNER_SIP,
614 	};
615 	struct qcom_scm_res res;
616 
617 	ret = qcom_scm_clk_enable();
618 	if (ret)
619 		return ret;
620 
621 	ret = qcom_scm_call(__scm->dev, &desc, &res);
622 
623 	qcom_scm_clk_disable();
624 
625 	return ret ? : res.result[0];
626 }
627 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
628 
629 /**
630  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
631  *			      available for the given peripherial
632  * @peripheral:	peripheral id
633  *
634  * Returns true if PAS is supported for this peripheral, otherwise false.
635  */
636 bool qcom_scm_pas_supported(u32 peripheral)
637 {
638 	int ret;
639 	struct qcom_scm_desc desc = {
640 		.svc = QCOM_SCM_SVC_PIL,
641 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
642 		.arginfo = QCOM_SCM_ARGS(1),
643 		.args[0] = peripheral,
644 		.owner = ARM_SMCCC_OWNER_SIP,
645 	};
646 	struct qcom_scm_res res;
647 
648 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
649 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
650 		return false;
651 
652 	ret = qcom_scm_call(__scm->dev, &desc, &res);
653 
654 	return ret ? false : !!res.result[0];
655 }
656 EXPORT_SYMBOL(qcom_scm_pas_supported);
657 
658 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
659 {
660 	struct qcom_scm_desc desc = {
661 		.svc = QCOM_SCM_SVC_PIL,
662 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
663 		.arginfo = QCOM_SCM_ARGS(2),
664 		.args[0] = reset,
665 		.args[1] = 0,
666 		.owner = ARM_SMCCC_OWNER_SIP,
667 	};
668 	struct qcom_scm_res res;
669 	int ret;
670 
671 	ret = qcom_scm_call(__scm->dev, &desc, &res);
672 
673 	return ret ? : res.result[0];
674 }
675 
676 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
677 				     unsigned long idx)
678 {
679 	if (idx != 0)
680 		return -EINVAL;
681 
682 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
683 }
684 
685 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
686 				       unsigned long idx)
687 {
688 	if (idx != 0)
689 		return -EINVAL;
690 
691 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
692 }
693 
694 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
695 	.assert = qcom_scm_pas_reset_assert,
696 	.deassert = qcom_scm_pas_reset_deassert,
697 };
698 
699 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
700 {
701 	struct qcom_scm_desc desc = {
702 		.svc = QCOM_SCM_SVC_IO,
703 		.cmd = QCOM_SCM_IO_READ,
704 		.arginfo = QCOM_SCM_ARGS(1),
705 		.args[0] = addr,
706 		.owner = ARM_SMCCC_OWNER_SIP,
707 	};
708 	struct qcom_scm_res res;
709 	int ret;
710 
711 
712 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
713 	if (ret >= 0)
714 		*val = res.result[0];
715 
716 	return ret < 0 ? ret : 0;
717 }
718 EXPORT_SYMBOL(qcom_scm_io_readl);
719 
720 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
721 {
722 	struct qcom_scm_desc desc = {
723 		.svc = QCOM_SCM_SVC_IO,
724 		.cmd = QCOM_SCM_IO_WRITE,
725 		.arginfo = QCOM_SCM_ARGS(2),
726 		.args[0] = addr,
727 		.args[1] = val,
728 		.owner = ARM_SMCCC_OWNER_SIP,
729 	};
730 
731 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
732 }
733 EXPORT_SYMBOL(qcom_scm_io_writel);
734 
735 /**
736  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
737  * supports restore security config interface.
738  *
739  * Return true if restore-cfg interface is supported, false if not.
740  */
741 bool qcom_scm_restore_sec_cfg_available(void)
742 {
743 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
744 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
745 }
746 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
747 
748 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
749 {
750 	struct qcom_scm_desc desc = {
751 		.svc = QCOM_SCM_SVC_MP,
752 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
753 		.arginfo = QCOM_SCM_ARGS(2),
754 		.args[0] = device_id,
755 		.args[1] = spare,
756 		.owner = ARM_SMCCC_OWNER_SIP,
757 	};
758 	struct qcom_scm_res res;
759 	int ret;
760 
761 	ret = qcom_scm_call(__scm->dev, &desc, &res);
762 
763 	return ret ? : res.result[0];
764 }
765 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
766 
767 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
768 {
769 	struct qcom_scm_desc desc = {
770 		.svc = QCOM_SCM_SVC_MP,
771 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
772 		.arginfo = QCOM_SCM_ARGS(1),
773 		.args[0] = spare,
774 		.owner = ARM_SMCCC_OWNER_SIP,
775 	};
776 	struct qcom_scm_res res;
777 	int ret;
778 
779 	ret = qcom_scm_call(__scm->dev, &desc, &res);
780 
781 	if (size)
782 		*size = res.result[0];
783 
784 	return ret ? : res.result[1];
785 }
786 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
787 
788 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
789 {
790 	struct qcom_scm_desc desc = {
791 		.svc = QCOM_SCM_SVC_MP,
792 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
793 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
794 					 QCOM_SCM_VAL),
795 		.args[0] = addr,
796 		.args[1] = size,
797 		.args[2] = spare,
798 		.owner = ARM_SMCCC_OWNER_SIP,
799 	};
800 	int ret;
801 
802 	desc.args[0] = addr;
803 	desc.args[1] = size;
804 	desc.args[2] = spare;
805 	desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
806 				     QCOM_SCM_VAL);
807 
808 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
809 
810 	/* the pg table has been initialized already, ignore the error */
811 	if (ret == -EPERM)
812 		ret = 0;
813 
814 	return ret;
815 }
816 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
817 
818 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
819 				   u32 cp_nonpixel_start,
820 				   u32 cp_nonpixel_size)
821 {
822 	int ret;
823 	struct qcom_scm_desc desc = {
824 		.svc = QCOM_SCM_SVC_MP,
825 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
826 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
827 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
828 		.args[0] = cp_start,
829 		.args[1] = cp_size,
830 		.args[2] = cp_nonpixel_start,
831 		.args[3] = cp_nonpixel_size,
832 		.owner = ARM_SMCCC_OWNER_SIP,
833 	};
834 	struct qcom_scm_res res;
835 
836 	ret = qcom_scm_call(__scm->dev, &desc, &res);
837 
838 	return ret ? : res.result[0];
839 }
840 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
841 
842 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
843 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
844 				 phys_addr_t dest, size_t dest_sz)
845 {
846 	int ret;
847 	struct qcom_scm_desc desc = {
848 		.svc = QCOM_SCM_SVC_MP,
849 		.cmd = QCOM_SCM_MP_ASSIGN,
850 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
851 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
852 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
853 		.args[0] = mem_region,
854 		.args[1] = mem_sz,
855 		.args[2] = src,
856 		.args[3] = src_sz,
857 		.args[4] = dest,
858 		.args[5] = dest_sz,
859 		.args[6] = 0,
860 		.owner = ARM_SMCCC_OWNER_SIP,
861 	};
862 	struct qcom_scm_res res;
863 
864 	ret = qcom_scm_call(dev, &desc, &res);
865 
866 	return ret ? : res.result[0];
867 }
868 
869 /**
870  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
871  * @mem_addr: mem region whose ownership need to be reassigned
872  * @mem_sz:   size of the region.
873  * @srcvm:    vmid for current set of owners, each set bit in
874  *            flag indicate a unique owner
875  * @newvm:    array having new owners and corresponding permission
876  *            flags
877  * @dest_cnt: number of owners in next set.
878  *
879  * Return negative errno on failure or 0 on success with @srcvm updated.
880  */
881 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
882 			unsigned int *srcvm,
883 			const struct qcom_scm_vmperm *newvm,
884 			unsigned int dest_cnt)
885 {
886 	struct qcom_scm_current_perm_info *destvm;
887 	struct qcom_scm_mem_map_info *mem_to_map;
888 	phys_addr_t mem_to_map_phys;
889 	phys_addr_t dest_phys;
890 	dma_addr_t ptr_phys;
891 	size_t mem_to_map_sz;
892 	size_t dest_sz;
893 	size_t src_sz;
894 	size_t ptr_sz;
895 	int next_vm;
896 	__le32 *src;
897 	void *ptr;
898 	int ret, i, b;
899 	unsigned long srcvm_bits = *srcvm;
900 
901 	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
902 	mem_to_map_sz = sizeof(*mem_to_map);
903 	dest_sz = dest_cnt * sizeof(*destvm);
904 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
905 			ALIGN(dest_sz, SZ_64);
906 
907 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
908 	if (!ptr)
909 		return -ENOMEM;
910 
911 	/* Fill source vmid detail */
912 	src = ptr;
913 	i = 0;
914 	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
915 		src[i++] = cpu_to_le32(b);
916 
917 	/* Fill details of mem buff to map */
918 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
919 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
920 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
921 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
922 
923 	next_vm = 0;
924 	/* Fill details of next vmid detail */
925 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
926 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
927 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
928 		destvm->vmid = cpu_to_le32(newvm->vmid);
929 		destvm->perm = cpu_to_le32(newvm->perm);
930 		destvm->ctx = 0;
931 		destvm->ctx_size = 0;
932 		next_vm |= BIT(newvm->vmid);
933 	}
934 
935 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
936 				    ptr_phys, src_sz, dest_phys, dest_sz);
937 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
938 	if (ret) {
939 		dev_err(__scm->dev,
940 			"Assign memory protection call failed %d\n", ret);
941 		return -EINVAL;
942 	}
943 
944 	*srcvm = next_vm;
945 	return 0;
946 }
947 EXPORT_SYMBOL(qcom_scm_assign_mem);
948 
949 /**
950  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
951  */
952 bool qcom_scm_ocmem_lock_available(void)
953 {
954 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
955 					    QCOM_SCM_OCMEM_LOCK_CMD);
956 }
957 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
958 
959 /**
960  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
961  * region to the specified initiator
962  *
963  * @id:     tz initiator id
964  * @offset: OCMEM offset
965  * @size:   OCMEM size
966  * @mode:   access mode (WIDE/NARROW)
967  */
968 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
969 			u32 mode)
970 {
971 	struct qcom_scm_desc desc = {
972 		.svc = QCOM_SCM_SVC_OCMEM,
973 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
974 		.args[0] = id,
975 		.args[1] = offset,
976 		.args[2] = size,
977 		.args[3] = mode,
978 		.arginfo = QCOM_SCM_ARGS(4),
979 	};
980 
981 	return qcom_scm_call(__scm->dev, &desc, NULL);
982 }
983 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
984 
985 /**
986  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
987  * region from the specified initiator
988  *
989  * @id:     tz initiator id
990  * @offset: OCMEM offset
991  * @size:   OCMEM size
992  */
993 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
994 {
995 	struct qcom_scm_desc desc = {
996 		.svc = QCOM_SCM_SVC_OCMEM,
997 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
998 		.args[0] = id,
999 		.args[1] = offset,
1000 		.args[2] = size,
1001 		.arginfo = QCOM_SCM_ARGS(3),
1002 	};
1003 
1004 	return qcom_scm_call(__scm->dev, &desc, NULL);
1005 }
1006 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
1007 
1008 /**
1009  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1010  *
1011  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1012  *	   qcom_scm_ice_set_key() are available.
1013  */
1014 bool qcom_scm_ice_available(void)
1015 {
1016 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1017 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1018 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1019 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1020 }
1021 EXPORT_SYMBOL(qcom_scm_ice_available);
1022 
1023 /**
1024  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1025  * @index: the keyslot to invalidate
1026  *
1027  * The UFSHCI and eMMC standards define a standard way to do this, but it
1028  * doesn't work on these SoCs; only this SCM call does.
1029  *
1030  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1031  * call doesn't specify which ICE instance the keyslot belongs to.
1032  *
1033  * Return: 0 on success; -errno on failure.
1034  */
1035 int qcom_scm_ice_invalidate_key(u32 index)
1036 {
1037 	struct qcom_scm_desc desc = {
1038 		.svc = QCOM_SCM_SVC_ES,
1039 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1040 		.arginfo = QCOM_SCM_ARGS(1),
1041 		.args[0] = index,
1042 		.owner = ARM_SMCCC_OWNER_SIP,
1043 	};
1044 
1045 	return qcom_scm_call(__scm->dev, &desc, NULL);
1046 }
1047 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
1048 
1049 /**
1050  * qcom_scm_ice_set_key() - Set an inline encryption key
1051  * @index: the keyslot into which to set the key
1052  * @key: the key to program
1053  * @key_size: the size of the key in bytes
1054  * @cipher: the encryption algorithm the key is for
1055  * @data_unit_size: the encryption data unit size, i.e. the size of each
1056  *		    individual plaintext and ciphertext.  Given in 512-byte
1057  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1058  *
1059  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1060  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1061  *
1062  * The UFSHCI and eMMC standards define a standard way to do this, but it
1063  * doesn't work on these SoCs; only this SCM call does.
1064  *
1065  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1066  * call doesn't specify which ICE instance the keyslot belongs to.
1067  *
1068  * Return: 0 on success; -errno on failure.
1069  */
1070 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1071 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1072 {
1073 	struct qcom_scm_desc desc = {
1074 		.svc = QCOM_SCM_SVC_ES,
1075 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1076 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1077 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1078 					 QCOM_SCM_VAL),
1079 		.args[0] = index,
1080 		.args[2] = key_size,
1081 		.args[3] = cipher,
1082 		.args[4] = data_unit_size,
1083 		.owner = ARM_SMCCC_OWNER_SIP,
1084 	};
1085 	void *keybuf;
1086 	dma_addr_t key_phys;
1087 	int ret;
1088 
1089 	/*
1090 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1091 	 * physical address that's been properly flushed.  The sanctioned way to
1092 	 * do this is by using the DMA API.  But as is best practice for crypto
1093 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1094 	 * dma_map_single() not clearly correct, since the DMA API can use
1095 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1096 	 * keys is normally rare and thus not performance-critical.
1097 	 */
1098 
1099 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1100 				    GFP_KERNEL);
1101 	if (!keybuf)
1102 		return -ENOMEM;
1103 	memcpy(keybuf, key, key_size);
1104 	desc.args[1] = key_phys;
1105 
1106 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1107 
1108 	memzero_explicit(keybuf, key_size);
1109 
1110 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1111 	return ret;
1112 }
1113 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1114 
1115 /**
1116  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1117  *
1118  * Return true if HDCP is supported, false if not.
1119  */
1120 bool qcom_scm_hdcp_available(void)
1121 {
1122 	bool avail;
1123 	int ret = qcom_scm_clk_enable();
1124 
1125 	if (ret)
1126 		return ret;
1127 
1128 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1129 						QCOM_SCM_HDCP_INVOKE);
1130 
1131 	qcom_scm_clk_disable();
1132 
1133 	return avail;
1134 }
1135 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1136 
1137 /**
1138  * qcom_scm_hdcp_req() - Send HDCP request.
1139  * @req: HDCP request array
1140  * @req_cnt: HDCP request array count
1141  * @resp: response buffer passed to SCM
1142  *
1143  * Write HDCP register(s) through SCM.
1144  */
1145 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1146 {
1147 	int ret;
1148 	struct qcom_scm_desc desc = {
1149 		.svc = QCOM_SCM_SVC_HDCP,
1150 		.cmd = QCOM_SCM_HDCP_INVOKE,
1151 		.arginfo = QCOM_SCM_ARGS(10),
1152 		.args = {
1153 			req[0].addr,
1154 			req[0].val,
1155 			req[1].addr,
1156 			req[1].val,
1157 			req[2].addr,
1158 			req[2].val,
1159 			req[3].addr,
1160 			req[3].val,
1161 			req[4].addr,
1162 			req[4].val
1163 		},
1164 		.owner = ARM_SMCCC_OWNER_SIP,
1165 	};
1166 	struct qcom_scm_res res;
1167 
1168 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1169 		return -ERANGE;
1170 
1171 	ret = qcom_scm_clk_enable();
1172 	if (ret)
1173 		return ret;
1174 
1175 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1176 	*resp = res.result[0];
1177 
1178 	qcom_scm_clk_disable();
1179 
1180 	return ret;
1181 }
1182 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1183 
1184 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1185 {
1186 	struct qcom_scm_desc desc = {
1187 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1188 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1189 		.arginfo = QCOM_SCM_ARGS(2),
1190 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1191 		.args[1] = en,
1192 		.owner = ARM_SMCCC_OWNER_SIP,
1193 	};
1194 
1195 
1196 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1197 }
1198 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1199 
1200 bool qcom_scm_lmh_dcvsh_available(void)
1201 {
1202 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1203 }
1204 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1205 
1206 int qcom_scm_lmh_profile_change(u32 profile_id)
1207 {
1208 	struct qcom_scm_desc desc = {
1209 		.svc = QCOM_SCM_SVC_LMH,
1210 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1211 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1212 		.args[0] = profile_id,
1213 		.owner = ARM_SMCCC_OWNER_SIP,
1214 	};
1215 
1216 	return qcom_scm_call(__scm->dev, &desc, NULL);
1217 }
1218 EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1219 
1220 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1221 		       u64 limit_node, u32 node_id, u64 version)
1222 {
1223 	dma_addr_t payload_phys;
1224 	u32 *payload_buf;
1225 	int ret, payload_size = 5 * sizeof(u32);
1226 
1227 	struct qcom_scm_desc desc = {
1228 		.svc = QCOM_SCM_SVC_LMH,
1229 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1230 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1231 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1232 		.args[1] = payload_size,
1233 		.args[2] = limit_node,
1234 		.args[3] = node_id,
1235 		.args[4] = version,
1236 		.owner = ARM_SMCCC_OWNER_SIP,
1237 	};
1238 
1239 	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1240 	if (!payload_buf)
1241 		return -ENOMEM;
1242 
1243 	payload_buf[0] = payload_fn;
1244 	payload_buf[1] = 0;
1245 	payload_buf[2] = payload_reg;
1246 	payload_buf[3] = 1;
1247 	payload_buf[4] = payload_val;
1248 
1249 	desc.args[0] = payload_phys;
1250 
1251 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1252 
1253 	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1254 	return ret;
1255 }
1256 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1257 
1258 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1259 {
1260 	struct device_node *tcsr;
1261 	struct device_node *np = dev->of_node;
1262 	struct resource res;
1263 	u32 offset;
1264 	int ret;
1265 
1266 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1267 	if (!tcsr)
1268 		return 0;
1269 
1270 	ret = of_address_to_resource(tcsr, 0, &res);
1271 	of_node_put(tcsr);
1272 	if (ret)
1273 		return ret;
1274 
1275 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1276 	if (ret < 0)
1277 		return ret;
1278 
1279 	*addr = res.start + offset;
1280 
1281 	return 0;
1282 }
1283 
1284 /**
1285  * qcom_scm_is_available() - Checks if SCM is available
1286  */
1287 bool qcom_scm_is_available(void)
1288 {
1289 	return !!__scm;
1290 }
1291 EXPORT_SYMBOL(qcom_scm_is_available);
1292 
1293 static int qcom_scm_probe(struct platform_device *pdev)
1294 {
1295 	struct qcom_scm *scm;
1296 	unsigned long clks;
1297 	int ret;
1298 
1299 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1300 	if (!scm)
1301 		return -ENOMEM;
1302 
1303 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1304 	if (ret < 0)
1305 		return ret;
1306 
1307 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1308 
1309 	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1310 	if (IS_ERR(scm->core_clk)) {
1311 		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1312 			return PTR_ERR(scm->core_clk);
1313 
1314 		if (clks & SCM_HAS_CORE_CLK) {
1315 			dev_err(&pdev->dev, "failed to acquire core clk\n");
1316 			return PTR_ERR(scm->core_clk);
1317 		}
1318 
1319 		scm->core_clk = NULL;
1320 	}
1321 
1322 	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1323 	if (IS_ERR(scm->iface_clk)) {
1324 		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1325 			return PTR_ERR(scm->iface_clk);
1326 
1327 		if (clks & SCM_HAS_IFACE_CLK) {
1328 			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1329 			return PTR_ERR(scm->iface_clk);
1330 		}
1331 
1332 		scm->iface_clk = NULL;
1333 	}
1334 
1335 	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1336 	if (IS_ERR(scm->bus_clk)) {
1337 		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1338 			return PTR_ERR(scm->bus_clk);
1339 
1340 		if (clks & SCM_HAS_BUS_CLK) {
1341 			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1342 			return PTR_ERR(scm->bus_clk);
1343 		}
1344 
1345 		scm->bus_clk = NULL;
1346 	}
1347 
1348 	scm->reset.ops = &qcom_scm_pas_reset_ops;
1349 	scm->reset.nr_resets = 1;
1350 	scm->reset.of_node = pdev->dev.of_node;
1351 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1352 	if (ret)
1353 		return ret;
1354 
1355 	/* vote for max clk rate for highest performance */
1356 	ret = clk_set_rate(scm->core_clk, INT_MAX);
1357 	if (ret)
1358 		return ret;
1359 
1360 	__scm = scm;
1361 	__scm->dev = &pdev->dev;
1362 
1363 	__get_convention();
1364 
1365 	/*
1366 	 * If requested enable "download mode", from this point on warmboot
1367 	 * will cause the the boot stages to enter download mode, unless
1368 	 * disabled below by a clean shutdown/reboot.
1369 	 */
1370 	if (download_mode)
1371 		qcom_scm_set_download_mode(true);
1372 
1373 	return 0;
1374 }
1375 
1376 static void qcom_scm_shutdown(struct platform_device *pdev)
1377 {
1378 	/* Clean shutdown, disable download mode to allow normal restart */
1379 	if (download_mode)
1380 		qcom_scm_set_download_mode(false);
1381 }
1382 
1383 static const struct of_device_id qcom_scm_dt_match[] = {
1384 	{ .compatible = "qcom,scm-apq8064",
1385 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1386 	},
1387 	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1388 							     SCM_HAS_IFACE_CLK |
1389 							     SCM_HAS_BUS_CLK)
1390 	},
1391 	{ .compatible = "qcom,scm-ipq4019" },
1392 	{ .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1393 							     SCM_HAS_IFACE_CLK |
1394 							     SCM_HAS_BUS_CLK) },
1395 	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1396 	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1397 	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1398 							     SCM_HAS_IFACE_CLK |
1399 							     SCM_HAS_BUS_CLK)
1400 	},
1401 	{ .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1402 							     SCM_HAS_IFACE_CLK |
1403 							     SCM_HAS_BUS_CLK)
1404 	},
1405 	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1406 							     SCM_HAS_IFACE_CLK |
1407 							     SCM_HAS_BUS_CLK)
1408 	},
1409 	{ .compatible = "qcom,scm-msm8994" },
1410 	{ .compatible = "qcom,scm-msm8996" },
1411 	{ .compatible = "qcom,scm" },
1412 	{}
1413 };
1414 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1415 
1416 static struct platform_driver qcom_scm_driver = {
1417 	.driver = {
1418 		.name	= "qcom_scm",
1419 		.of_match_table = qcom_scm_dt_match,
1420 		.suppress_bind_attrs = true,
1421 	},
1422 	.probe = qcom_scm_probe,
1423 	.shutdown = qcom_scm_shutdown,
1424 };
1425 
1426 static int __init qcom_scm_init(void)
1427 {
1428 	return platform_driver_register(&qcom_scm_driver);
1429 }
1430 subsys_initcall(qcom_scm_init);
1431 
1432 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1433 MODULE_LICENSE("GPL v2");
1434