xref: /openbmc/linux/drivers/firmware/qcom_scm.c (revision fadbafc1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3  * Copyright (C) 2015 Linaro Ltd.
4  */
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interconnect.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/qcom_scm.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_platform.h>
17 #include <linux/clk.h>
18 #include <linux/reset-controller.h>
19 #include <linux/arm-smccc.h>
20 
21 #include "qcom_scm.h"
22 
23 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
24 module_param(download_mode, bool, 0);
25 
26 #define SCM_HAS_CORE_CLK	BIT(0)
27 #define SCM_HAS_IFACE_CLK	BIT(1)
28 #define SCM_HAS_BUS_CLK		BIT(2)
29 
30 struct qcom_scm {
31 	struct device *dev;
32 	struct clk *core_clk;
33 	struct clk *iface_clk;
34 	struct clk *bus_clk;
35 	struct icc_path *path;
36 	struct reset_controller_dev reset;
37 
38 	/* control access to the interconnect path */
39 	struct mutex scm_bw_lock;
40 	int scm_vote_count;
41 
42 	u64 dload_mode_addr;
43 };
44 
45 struct qcom_scm_current_perm_info {
46 	__le32 vmid;
47 	__le32 perm;
48 	__le64 ctx;
49 	__le32 ctx_size;
50 	__le32 unused;
51 };
52 
53 struct qcom_scm_mem_map_info {
54 	__le64 mem_addr;
55 	__le64 mem_size;
56 };
57 
58 /* Each bit configures cold/warm boot address for one of the 4 CPUs */
59 static const u8 qcom_scm_cpu_cold_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
60 	0, BIT(0), BIT(3), BIT(5)
61 };
62 static const u8 qcom_scm_cpu_warm_bits[QCOM_SCM_BOOT_MAX_CPUS] = {
63 	BIT(2), BIT(1), BIT(4), BIT(6)
64 };
65 
66 static const char * const qcom_scm_convention_names[] = {
67 	[SMC_CONVENTION_UNKNOWN] = "unknown",
68 	[SMC_CONVENTION_ARM_32] = "smc arm 32",
69 	[SMC_CONVENTION_ARM_64] = "smc arm 64",
70 	[SMC_CONVENTION_LEGACY] = "smc legacy",
71 };
72 
73 static struct qcom_scm *__scm;
74 
75 static int qcom_scm_clk_enable(void)
76 {
77 	int ret;
78 
79 	ret = clk_prepare_enable(__scm->core_clk);
80 	if (ret)
81 		goto bail;
82 
83 	ret = clk_prepare_enable(__scm->iface_clk);
84 	if (ret)
85 		goto disable_core;
86 
87 	ret = clk_prepare_enable(__scm->bus_clk);
88 	if (ret)
89 		goto disable_iface;
90 
91 	return 0;
92 
93 disable_iface:
94 	clk_disable_unprepare(__scm->iface_clk);
95 disable_core:
96 	clk_disable_unprepare(__scm->core_clk);
97 bail:
98 	return ret;
99 }
100 
101 static void qcom_scm_clk_disable(void)
102 {
103 	clk_disable_unprepare(__scm->core_clk);
104 	clk_disable_unprepare(__scm->iface_clk);
105 	clk_disable_unprepare(__scm->bus_clk);
106 }
107 
108 static int qcom_scm_bw_enable(void)
109 {
110 	int ret = 0;
111 
112 	if (!__scm->path)
113 		return 0;
114 
115 	if (IS_ERR(__scm->path))
116 		return -EINVAL;
117 
118 	mutex_lock(&__scm->scm_bw_lock);
119 	if (!__scm->scm_vote_count) {
120 		ret = icc_set_bw(__scm->path, 0, UINT_MAX);
121 		if (ret < 0) {
122 			dev_err(__scm->dev, "failed to set bandwidth request\n");
123 			goto err_bw;
124 		}
125 	}
126 	__scm->scm_vote_count++;
127 err_bw:
128 	mutex_unlock(&__scm->scm_bw_lock);
129 
130 	return ret;
131 }
132 
133 static void qcom_scm_bw_disable(void)
134 {
135 	if (IS_ERR_OR_NULL(__scm->path))
136 		return;
137 
138 	mutex_lock(&__scm->scm_bw_lock);
139 	if (__scm->scm_vote_count-- == 1)
140 		icc_set_bw(__scm->path, 0, 0);
141 	mutex_unlock(&__scm->scm_bw_lock);
142 }
143 
144 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
145 static DEFINE_SPINLOCK(scm_query_lock);
146 
147 static enum qcom_scm_convention __get_convention(void)
148 {
149 	unsigned long flags;
150 	struct qcom_scm_desc desc = {
151 		.svc = QCOM_SCM_SVC_INFO,
152 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
153 		.args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
154 					   QCOM_SCM_INFO_IS_CALL_AVAIL) |
155 			   (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
156 		.arginfo = QCOM_SCM_ARGS(1),
157 		.owner = ARM_SMCCC_OWNER_SIP,
158 	};
159 	struct qcom_scm_res res;
160 	enum qcom_scm_convention probed_convention;
161 	int ret;
162 	bool forced = false;
163 
164 	if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
165 		return qcom_scm_convention;
166 
167 	/*
168 	 * Device isn't required as there is only one argument - no device
169 	 * needed to dma_map_single to secure world
170 	 */
171 	probed_convention = SMC_CONVENTION_ARM_64;
172 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
173 	if (!ret && res.result[0] == 1)
174 		goto found;
175 
176 	/*
177 	 * Some SC7180 firmwares didn't implement the
178 	 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
179 	 * calling conventions on these firmwares. Luckily we don't make any
180 	 * early calls into the firmware on these SoCs so the device pointer
181 	 * will be valid here to check if the compatible matches.
182 	 */
183 	if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
184 		forced = true;
185 		goto found;
186 	}
187 
188 	probed_convention = SMC_CONVENTION_ARM_32;
189 	ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
190 	if (!ret && res.result[0] == 1)
191 		goto found;
192 
193 	probed_convention = SMC_CONVENTION_LEGACY;
194 found:
195 	spin_lock_irqsave(&scm_query_lock, flags);
196 	if (probed_convention != qcom_scm_convention) {
197 		qcom_scm_convention = probed_convention;
198 		pr_info("qcom_scm: convention: %s%s\n",
199 			qcom_scm_convention_names[qcom_scm_convention],
200 			forced ? " (forced)" : "");
201 	}
202 	spin_unlock_irqrestore(&scm_query_lock, flags);
203 
204 	return qcom_scm_convention;
205 }
206 
207 /**
208  * qcom_scm_call() - Invoke a syscall in the secure world
209  * @dev:	device
210  * @desc:	Descriptor structure containing arguments and return values
211  * @res:        Structure containing results from SMC/HVC call
212  *
213  * Sends a command to the SCM and waits for the command to finish processing.
214  * This should *only* be called in pre-emptible context.
215  */
216 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
217 			 struct qcom_scm_res *res)
218 {
219 	might_sleep();
220 	switch (__get_convention()) {
221 	case SMC_CONVENTION_ARM_32:
222 	case SMC_CONVENTION_ARM_64:
223 		return scm_smc_call(dev, desc, res, false);
224 	case SMC_CONVENTION_LEGACY:
225 		return scm_legacy_call(dev, desc, res);
226 	default:
227 		pr_err("Unknown current SCM calling convention.\n");
228 		return -EINVAL;
229 	}
230 }
231 
232 /**
233  * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
234  * @dev:	device
235  * @desc:	Descriptor structure containing arguments and return values
236  * @res:	Structure containing results from SMC/HVC call
237  *
238  * Sends a command to the SCM and waits for the command to finish processing.
239  * This can be called in atomic context.
240  */
241 static int qcom_scm_call_atomic(struct device *dev,
242 				const struct qcom_scm_desc *desc,
243 				struct qcom_scm_res *res)
244 {
245 	switch (__get_convention()) {
246 	case SMC_CONVENTION_ARM_32:
247 	case SMC_CONVENTION_ARM_64:
248 		return scm_smc_call(dev, desc, res, true);
249 	case SMC_CONVENTION_LEGACY:
250 		return scm_legacy_call_atomic(dev, desc, res);
251 	default:
252 		pr_err("Unknown current SCM calling convention.\n");
253 		return -EINVAL;
254 	}
255 }
256 
257 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
258 					 u32 cmd_id)
259 {
260 	int ret;
261 	struct qcom_scm_desc desc = {
262 		.svc = QCOM_SCM_SVC_INFO,
263 		.cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
264 		.owner = ARM_SMCCC_OWNER_SIP,
265 	};
266 	struct qcom_scm_res res;
267 
268 	desc.arginfo = QCOM_SCM_ARGS(1);
269 	switch (__get_convention()) {
270 	case SMC_CONVENTION_ARM_32:
271 	case SMC_CONVENTION_ARM_64:
272 		desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
273 				(ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
274 		break;
275 	case SMC_CONVENTION_LEGACY:
276 		desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
277 		break;
278 	default:
279 		pr_err("Unknown SMC convention being used\n");
280 		return false;
281 	}
282 
283 	ret = qcom_scm_call(dev, &desc, &res);
284 
285 	return ret ? false : !!res.result[0];
286 }
287 
288 static int qcom_scm_set_boot_addr(void *entry, const u8 *cpu_bits)
289 {
290 	int cpu;
291 	unsigned int flags = 0;
292 	struct qcom_scm_desc desc = {
293 		.svc = QCOM_SCM_SVC_BOOT,
294 		.cmd = QCOM_SCM_BOOT_SET_ADDR,
295 		.arginfo = QCOM_SCM_ARGS(2),
296 		.owner = ARM_SMCCC_OWNER_SIP,
297 	};
298 
299 	for_each_present_cpu(cpu) {
300 		if (cpu >= QCOM_SCM_BOOT_MAX_CPUS)
301 			return -EINVAL;
302 		flags |= cpu_bits[cpu];
303 	}
304 
305 	desc.args[0] = flags;
306 	desc.args[1] = virt_to_phys(entry);
307 
308 	return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
309 }
310 
311 static int qcom_scm_set_boot_addr_mc(void *entry, unsigned int flags)
312 {
313 	struct qcom_scm_desc desc = {
314 		.svc = QCOM_SCM_SVC_BOOT,
315 		.cmd = QCOM_SCM_BOOT_SET_ADDR_MC,
316 		.owner = ARM_SMCCC_OWNER_SIP,
317 		.arginfo = QCOM_SCM_ARGS(6),
318 		.args = {
319 			virt_to_phys(entry),
320 			/* Apply to all CPUs in all affinity levels */
321 			~0ULL, ~0ULL, ~0ULL, ~0ULL,
322 			flags,
323 		},
324 	};
325 
326 	/* Need a device for DMA of the additional arguments */
327 	if (!__scm || __get_convention() == SMC_CONVENTION_LEGACY)
328 		return -EOPNOTSUPP;
329 
330 	return qcom_scm_call(__scm->dev, &desc, NULL);
331 }
332 
333 /**
334  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for all cpus
335  * @entry: Entry point function for the cpus
336  *
337  * Set the Linux entry point for the SCM to transfer control to when coming
338  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
339  */
340 int qcom_scm_set_warm_boot_addr(void *entry)
341 {
342 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_WARMBOOT))
343 		/* Fallback to old SCM call */
344 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_warm_bits);
345 	return 0;
346 }
347 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
348 
349 /**
350  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for all cpus
351  * @entry: Entry point function for the cpus
352  */
353 int qcom_scm_set_cold_boot_addr(void *entry)
354 {
355 	if (qcom_scm_set_boot_addr_mc(entry, QCOM_SCM_BOOT_MC_FLAG_COLDBOOT))
356 		/* Fallback to old SCM call */
357 		return qcom_scm_set_boot_addr(entry, qcom_scm_cpu_cold_bits);
358 	return 0;
359 }
360 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
361 
362 /**
363  * qcom_scm_cpu_power_down() - Power down the cpu
364  * @flags:	Flags to flush cache
365  *
366  * This is an end point to power down cpu. If there was a pending interrupt,
367  * the control would return from this function, otherwise, the cpu jumps to the
368  * warm boot entry point set for this cpu upon reset.
369  */
370 void qcom_scm_cpu_power_down(u32 flags)
371 {
372 	struct qcom_scm_desc desc = {
373 		.svc = QCOM_SCM_SVC_BOOT,
374 		.cmd = QCOM_SCM_BOOT_TERMINATE_PC,
375 		.args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
376 		.arginfo = QCOM_SCM_ARGS(1),
377 		.owner = ARM_SMCCC_OWNER_SIP,
378 	};
379 
380 	qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
381 }
382 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
383 
384 int qcom_scm_set_remote_state(u32 state, u32 id)
385 {
386 	struct qcom_scm_desc desc = {
387 		.svc = QCOM_SCM_SVC_BOOT,
388 		.cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
389 		.arginfo = QCOM_SCM_ARGS(2),
390 		.args[0] = state,
391 		.args[1] = id,
392 		.owner = ARM_SMCCC_OWNER_SIP,
393 	};
394 	struct qcom_scm_res res;
395 	int ret;
396 
397 	ret = qcom_scm_call(__scm->dev, &desc, &res);
398 
399 	return ret ? : res.result[0];
400 }
401 EXPORT_SYMBOL(qcom_scm_set_remote_state);
402 
403 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
404 {
405 	struct qcom_scm_desc desc = {
406 		.svc = QCOM_SCM_SVC_BOOT,
407 		.cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
408 		.arginfo = QCOM_SCM_ARGS(2),
409 		.args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
410 		.owner = ARM_SMCCC_OWNER_SIP,
411 	};
412 
413 	desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
414 
415 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
416 }
417 
418 static void qcom_scm_set_download_mode(bool enable)
419 {
420 	bool avail;
421 	int ret = 0;
422 
423 	avail = __qcom_scm_is_call_available(__scm->dev,
424 					     QCOM_SCM_SVC_BOOT,
425 					     QCOM_SCM_BOOT_SET_DLOAD_MODE);
426 	if (avail) {
427 		ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
428 	} else if (__scm->dload_mode_addr) {
429 		ret = qcom_scm_io_writel(__scm->dload_mode_addr,
430 				enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
431 	} else {
432 		dev_err(__scm->dev,
433 			"No available mechanism for setting download mode\n");
434 	}
435 
436 	if (ret)
437 		dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
438 }
439 
440 /**
441  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
442  *			       state machine for a given peripheral, using the
443  *			       metadata
444  * @peripheral: peripheral id
445  * @metadata:	pointer to memory containing ELF header, program header table
446  *		and optional blob of data used for authenticating the metadata
447  *		and the rest of the firmware
448  * @size:	size of the metadata
449  * @ctx:	optional metadata context
450  *
451  * Return: 0 on success.
452  *
453  * Upon successful return, the PAS metadata context (@ctx) will be used to
454  * track the metadata allocation, this needs to be released by invoking
455  * qcom_scm_pas_metadata_release() by the caller.
456  */
457 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
458 			    struct qcom_scm_pas_metadata *ctx)
459 {
460 	dma_addr_t mdata_phys;
461 	void *mdata_buf;
462 	int ret;
463 	struct qcom_scm_desc desc = {
464 		.svc = QCOM_SCM_SVC_PIL,
465 		.cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
466 		.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
467 		.args[0] = peripheral,
468 		.owner = ARM_SMCCC_OWNER_SIP,
469 	};
470 	struct qcom_scm_res res;
471 
472 	/*
473 	 * During the scm call memory protection will be enabled for the meta
474 	 * data blob, so make sure it's physically contiguous, 4K aligned and
475 	 * non-cachable to avoid XPU violations.
476 	 */
477 	mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
478 				       GFP_KERNEL);
479 	if (!mdata_buf) {
480 		dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
481 		return -ENOMEM;
482 	}
483 	memcpy(mdata_buf, metadata, size);
484 
485 	ret = qcom_scm_clk_enable();
486 	if (ret)
487 		goto out;
488 
489 	ret = qcom_scm_bw_enable();
490 	if (ret)
491 		return ret;
492 
493 	desc.args[1] = mdata_phys;
494 
495 	ret = qcom_scm_call(__scm->dev, &desc, &res);
496 
497 	qcom_scm_bw_disable();
498 	qcom_scm_clk_disable();
499 
500 out:
501 	if (ret < 0 || !ctx) {
502 		dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
503 	} else if (ctx) {
504 		ctx->ptr = mdata_buf;
505 		ctx->phys = mdata_phys;
506 		ctx->size = size;
507 	}
508 
509 	return ret ? : res.result[0];
510 }
511 EXPORT_SYMBOL(qcom_scm_pas_init_image);
512 
513 /**
514  * qcom_scm_pas_metadata_release() - release metadata context
515  * @ctx:	metadata context
516  */
517 void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx)
518 {
519 	if (!ctx->ptr)
520 		return;
521 
522 	dma_free_coherent(__scm->dev, ctx->size, ctx->ptr, ctx->phys);
523 
524 	ctx->ptr = NULL;
525 	ctx->phys = 0;
526 	ctx->size = 0;
527 }
528 EXPORT_SYMBOL(qcom_scm_pas_metadata_release);
529 
530 /**
531  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
532  *			      for firmware loading
533  * @peripheral:	peripheral id
534  * @addr:	start address of memory area to prepare
535  * @size:	size of the memory area to prepare
536  *
537  * Returns 0 on success.
538  */
539 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
540 {
541 	int ret;
542 	struct qcom_scm_desc desc = {
543 		.svc = QCOM_SCM_SVC_PIL,
544 		.cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
545 		.arginfo = QCOM_SCM_ARGS(3),
546 		.args[0] = peripheral,
547 		.args[1] = addr,
548 		.args[2] = size,
549 		.owner = ARM_SMCCC_OWNER_SIP,
550 	};
551 	struct qcom_scm_res res;
552 
553 	ret = qcom_scm_clk_enable();
554 	if (ret)
555 		return ret;
556 
557 	ret = qcom_scm_bw_enable();
558 	if (ret)
559 		return ret;
560 
561 	ret = qcom_scm_call(__scm->dev, &desc, &res);
562 	qcom_scm_bw_disable();
563 	qcom_scm_clk_disable();
564 
565 	return ret ? : res.result[0];
566 }
567 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
568 
569 /**
570  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
571  *				   and reset the remote processor
572  * @peripheral:	peripheral id
573  *
574  * Return 0 on success.
575  */
576 int qcom_scm_pas_auth_and_reset(u32 peripheral)
577 {
578 	int ret;
579 	struct qcom_scm_desc desc = {
580 		.svc = QCOM_SCM_SVC_PIL,
581 		.cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
582 		.arginfo = QCOM_SCM_ARGS(1),
583 		.args[0] = peripheral,
584 		.owner = ARM_SMCCC_OWNER_SIP,
585 	};
586 	struct qcom_scm_res res;
587 
588 	ret = qcom_scm_clk_enable();
589 	if (ret)
590 		return ret;
591 
592 	ret = qcom_scm_bw_enable();
593 	if (ret)
594 		return ret;
595 
596 	ret = qcom_scm_call(__scm->dev, &desc, &res);
597 	qcom_scm_bw_disable();
598 	qcom_scm_clk_disable();
599 
600 	return ret ? : res.result[0];
601 }
602 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
603 
604 /**
605  * qcom_scm_pas_shutdown() - Shut down the remote processor
606  * @peripheral: peripheral id
607  *
608  * Returns 0 on success.
609  */
610 int qcom_scm_pas_shutdown(u32 peripheral)
611 {
612 	int ret;
613 	struct qcom_scm_desc desc = {
614 		.svc = QCOM_SCM_SVC_PIL,
615 		.cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
616 		.arginfo = QCOM_SCM_ARGS(1),
617 		.args[0] = peripheral,
618 		.owner = ARM_SMCCC_OWNER_SIP,
619 	};
620 	struct qcom_scm_res res;
621 
622 	ret = qcom_scm_clk_enable();
623 	if (ret)
624 		return ret;
625 
626 	ret = qcom_scm_bw_enable();
627 	if (ret)
628 		return ret;
629 
630 	ret = qcom_scm_call(__scm->dev, &desc, &res);
631 
632 	qcom_scm_bw_disable();
633 	qcom_scm_clk_disable();
634 
635 	return ret ? : res.result[0];
636 }
637 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
638 
639 /**
640  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
641  *			      available for the given peripherial
642  * @peripheral:	peripheral id
643  *
644  * Returns true if PAS is supported for this peripheral, otherwise false.
645  */
646 bool qcom_scm_pas_supported(u32 peripheral)
647 {
648 	int ret;
649 	struct qcom_scm_desc desc = {
650 		.svc = QCOM_SCM_SVC_PIL,
651 		.cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
652 		.arginfo = QCOM_SCM_ARGS(1),
653 		.args[0] = peripheral,
654 		.owner = ARM_SMCCC_OWNER_SIP,
655 	};
656 	struct qcom_scm_res res;
657 
658 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
659 					  QCOM_SCM_PIL_PAS_IS_SUPPORTED))
660 		return false;
661 
662 	ret = qcom_scm_call(__scm->dev, &desc, &res);
663 
664 	return ret ? false : !!res.result[0];
665 }
666 EXPORT_SYMBOL(qcom_scm_pas_supported);
667 
668 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
669 {
670 	struct qcom_scm_desc desc = {
671 		.svc = QCOM_SCM_SVC_PIL,
672 		.cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
673 		.arginfo = QCOM_SCM_ARGS(2),
674 		.args[0] = reset,
675 		.args[1] = 0,
676 		.owner = ARM_SMCCC_OWNER_SIP,
677 	};
678 	struct qcom_scm_res res;
679 	int ret;
680 
681 	ret = qcom_scm_call(__scm->dev, &desc, &res);
682 
683 	return ret ? : res.result[0];
684 }
685 
686 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
687 				     unsigned long idx)
688 {
689 	if (idx != 0)
690 		return -EINVAL;
691 
692 	return __qcom_scm_pas_mss_reset(__scm->dev, 1);
693 }
694 
695 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
696 				       unsigned long idx)
697 {
698 	if (idx != 0)
699 		return -EINVAL;
700 
701 	return __qcom_scm_pas_mss_reset(__scm->dev, 0);
702 }
703 
704 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
705 	.assert = qcom_scm_pas_reset_assert,
706 	.deassert = qcom_scm_pas_reset_deassert,
707 };
708 
709 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
710 {
711 	struct qcom_scm_desc desc = {
712 		.svc = QCOM_SCM_SVC_IO,
713 		.cmd = QCOM_SCM_IO_READ,
714 		.arginfo = QCOM_SCM_ARGS(1),
715 		.args[0] = addr,
716 		.owner = ARM_SMCCC_OWNER_SIP,
717 	};
718 	struct qcom_scm_res res;
719 	int ret;
720 
721 
722 	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
723 	if (ret >= 0)
724 		*val = res.result[0];
725 
726 	return ret < 0 ? ret : 0;
727 }
728 EXPORT_SYMBOL(qcom_scm_io_readl);
729 
730 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
731 {
732 	struct qcom_scm_desc desc = {
733 		.svc = QCOM_SCM_SVC_IO,
734 		.cmd = QCOM_SCM_IO_WRITE,
735 		.arginfo = QCOM_SCM_ARGS(2),
736 		.args[0] = addr,
737 		.args[1] = val,
738 		.owner = ARM_SMCCC_OWNER_SIP,
739 	};
740 
741 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
742 }
743 EXPORT_SYMBOL(qcom_scm_io_writel);
744 
745 /**
746  * qcom_scm_restore_sec_cfg_available() - Check if secure environment
747  * supports restore security config interface.
748  *
749  * Return true if restore-cfg interface is supported, false if not.
750  */
751 bool qcom_scm_restore_sec_cfg_available(void)
752 {
753 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
754 					    QCOM_SCM_MP_RESTORE_SEC_CFG);
755 }
756 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
757 
758 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
759 {
760 	struct qcom_scm_desc desc = {
761 		.svc = QCOM_SCM_SVC_MP,
762 		.cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
763 		.arginfo = QCOM_SCM_ARGS(2),
764 		.args[0] = device_id,
765 		.args[1] = spare,
766 		.owner = ARM_SMCCC_OWNER_SIP,
767 	};
768 	struct qcom_scm_res res;
769 	int ret;
770 
771 	ret = qcom_scm_call(__scm->dev, &desc, &res);
772 
773 	return ret ? : res.result[0];
774 }
775 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
776 
777 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
778 {
779 	struct qcom_scm_desc desc = {
780 		.svc = QCOM_SCM_SVC_MP,
781 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
782 		.arginfo = QCOM_SCM_ARGS(1),
783 		.args[0] = spare,
784 		.owner = ARM_SMCCC_OWNER_SIP,
785 	};
786 	struct qcom_scm_res res;
787 	int ret;
788 
789 	ret = qcom_scm_call(__scm->dev, &desc, &res);
790 
791 	if (size)
792 		*size = res.result[0];
793 
794 	return ret ? : res.result[1];
795 }
796 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
797 
798 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
799 {
800 	struct qcom_scm_desc desc = {
801 		.svc = QCOM_SCM_SVC_MP,
802 		.cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
803 		.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
804 					 QCOM_SCM_VAL),
805 		.args[0] = addr,
806 		.args[1] = size,
807 		.args[2] = spare,
808 		.owner = ARM_SMCCC_OWNER_SIP,
809 	};
810 	int ret;
811 
812 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
813 
814 	/* the pg table has been initialized already, ignore the error */
815 	if (ret == -EPERM)
816 		ret = 0;
817 
818 	return ret;
819 }
820 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
821 
822 int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size)
823 {
824 	struct qcom_scm_desc desc = {
825 		.svc = QCOM_SCM_SVC_MP,
826 		.cmd = QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE,
827 		.arginfo = QCOM_SCM_ARGS(2),
828 		.args[0] = size,
829 		.args[1] = spare,
830 		.owner = ARM_SMCCC_OWNER_SIP,
831 	};
832 
833 	return qcom_scm_call(__scm->dev, &desc, NULL);
834 }
835 EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
836 
837 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
838 				   u32 cp_nonpixel_start,
839 				   u32 cp_nonpixel_size)
840 {
841 	int ret;
842 	struct qcom_scm_desc desc = {
843 		.svc = QCOM_SCM_SVC_MP,
844 		.cmd = QCOM_SCM_MP_VIDEO_VAR,
845 		.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
846 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
847 		.args[0] = cp_start,
848 		.args[1] = cp_size,
849 		.args[2] = cp_nonpixel_start,
850 		.args[3] = cp_nonpixel_size,
851 		.owner = ARM_SMCCC_OWNER_SIP,
852 	};
853 	struct qcom_scm_res res;
854 
855 	ret = qcom_scm_call(__scm->dev, &desc, &res);
856 
857 	return ret ? : res.result[0];
858 }
859 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
860 
861 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
862 				 size_t mem_sz, phys_addr_t src, size_t src_sz,
863 				 phys_addr_t dest, size_t dest_sz)
864 {
865 	int ret;
866 	struct qcom_scm_desc desc = {
867 		.svc = QCOM_SCM_SVC_MP,
868 		.cmd = QCOM_SCM_MP_ASSIGN,
869 		.arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
870 					 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
871 					 QCOM_SCM_VAL, QCOM_SCM_VAL),
872 		.args[0] = mem_region,
873 		.args[1] = mem_sz,
874 		.args[2] = src,
875 		.args[3] = src_sz,
876 		.args[4] = dest,
877 		.args[5] = dest_sz,
878 		.args[6] = 0,
879 		.owner = ARM_SMCCC_OWNER_SIP,
880 	};
881 	struct qcom_scm_res res;
882 
883 	ret = qcom_scm_call(dev, &desc, &res);
884 
885 	return ret ? : res.result[0];
886 }
887 
888 /**
889  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
890  * @mem_addr: mem region whose ownership need to be reassigned
891  * @mem_sz:   size of the region.
892  * @srcvm:    vmid for current set of owners, each set bit in
893  *            flag indicate a unique owner
894  * @newvm:    array having new owners and corresponding permission
895  *            flags
896  * @dest_cnt: number of owners in next set.
897  *
898  * Return negative errno on failure or 0 on success with @srcvm updated.
899  */
900 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
901 			unsigned int *srcvm,
902 			const struct qcom_scm_vmperm *newvm,
903 			unsigned int dest_cnt)
904 {
905 	struct qcom_scm_current_perm_info *destvm;
906 	struct qcom_scm_mem_map_info *mem_to_map;
907 	phys_addr_t mem_to_map_phys;
908 	phys_addr_t dest_phys;
909 	dma_addr_t ptr_phys;
910 	size_t mem_to_map_sz;
911 	size_t dest_sz;
912 	size_t src_sz;
913 	size_t ptr_sz;
914 	int next_vm;
915 	__le32 *src;
916 	void *ptr;
917 	int ret, i, b;
918 	unsigned long srcvm_bits = *srcvm;
919 
920 	src_sz = hweight_long(srcvm_bits) * sizeof(*src);
921 	mem_to_map_sz = sizeof(*mem_to_map);
922 	dest_sz = dest_cnt * sizeof(*destvm);
923 	ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
924 			ALIGN(dest_sz, SZ_64);
925 
926 	ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
927 	if (!ptr)
928 		return -ENOMEM;
929 
930 	/* Fill source vmid detail */
931 	src = ptr;
932 	i = 0;
933 	for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
934 		src[i++] = cpu_to_le32(b);
935 
936 	/* Fill details of mem buff to map */
937 	mem_to_map = ptr + ALIGN(src_sz, SZ_64);
938 	mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
939 	mem_to_map->mem_addr = cpu_to_le64(mem_addr);
940 	mem_to_map->mem_size = cpu_to_le64(mem_sz);
941 
942 	next_vm = 0;
943 	/* Fill details of next vmid detail */
944 	destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
945 	dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
946 	for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
947 		destvm->vmid = cpu_to_le32(newvm->vmid);
948 		destvm->perm = cpu_to_le32(newvm->perm);
949 		destvm->ctx = 0;
950 		destvm->ctx_size = 0;
951 		next_vm |= BIT(newvm->vmid);
952 	}
953 
954 	ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
955 				    ptr_phys, src_sz, dest_phys, dest_sz);
956 	dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
957 	if (ret) {
958 		dev_err(__scm->dev,
959 			"Assign memory protection call failed %d\n", ret);
960 		return -EINVAL;
961 	}
962 
963 	*srcvm = next_vm;
964 	return 0;
965 }
966 EXPORT_SYMBOL(qcom_scm_assign_mem);
967 
968 /**
969  * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
970  */
971 bool qcom_scm_ocmem_lock_available(void)
972 {
973 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
974 					    QCOM_SCM_OCMEM_LOCK_CMD);
975 }
976 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
977 
978 /**
979  * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
980  * region to the specified initiator
981  *
982  * @id:     tz initiator id
983  * @offset: OCMEM offset
984  * @size:   OCMEM size
985  * @mode:   access mode (WIDE/NARROW)
986  */
987 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
988 			u32 mode)
989 {
990 	struct qcom_scm_desc desc = {
991 		.svc = QCOM_SCM_SVC_OCMEM,
992 		.cmd = QCOM_SCM_OCMEM_LOCK_CMD,
993 		.args[0] = id,
994 		.args[1] = offset,
995 		.args[2] = size,
996 		.args[3] = mode,
997 		.arginfo = QCOM_SCM_ARGS(4),
998 	};
999 
1000 	return qcom_scm_call(__scm->dev, &desc, NULL);
1001 }
1002 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
1003 
1004 /**
1005  * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
1006  * region from the specified initiator
1007  *
1008  * @id:     tz initiator id
1009  * @offset: OCMEM offset
1010  * @size:   OCMEM size
1011  */
1012 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
1013 {
1014 	struct qcom_scm_desc desc = {
1015 		.svc = QCOM_SCM_SVC_OCMEM,
1016 		.cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
1017 		.args[0] = id,
1018 		.args[1] = offset,
1019 		.args[2] = size,
1020 		.arginfo = QCOM_SCM_ARGS(3),
1021 	};
1022 
1023 	return qcom_scm_call(__scm->dev, &desc, NULL);
1024 }
1025 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
1026 
1027 /**
1028  * qcom_scm_ice_available() - Is the ICE key programming interface available?
1029  *
1030  * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
1031  *	   qcom_scm_ice_set_key() are available.
1032  */
1033 bool qcom_scm_ice_available(void)
1034 {
1035 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1036 					    QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
1037 		__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
1038 					     QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
1039 }
1040 EXPORT_SYMBOL(qcom_scm_ice_available);
1041 
1042 /**
1043  * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
1044  * @index: the keyslot to invalidate
1045  *
1046  * The UFSHCI and eMMC standards define a standard way to do this, but it
1047  * doesn't work on these SoCs; only this SCM call does.
1048  *
1049  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1050  * call doesn't specify which ICE instance the keyslot belongs to.
1051  *
1052  * Return: 0 on success; -errno on failure.
1053  */
1054 int qcom_scm_ice_invalidate_key(u32 index)
1055 {
1056 	struct qcom_scm_desc desc = {
1057 		.svc = QCOM_SCM_SVC_ES,
1058 		.cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
1059 		.arginfo = QCOM_SCM_ARGS(1),
1060 		.args[0] = index,
1061 		.owner = ARM_SMCCC_OWNER_SIP,
1062 	};
1063 
1064 	return qcom_scm_call(__scm->dev, &desc, NULL);
1065 }
1066 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
1067 
1068 /**
1069  * qcom_scm_ice_set_key() - Set an inline encryption key
1070  * @index: the keyslot into which to set the key
1071  * @key: the key to program
1072  * @key_size: the size of the key in bytes
1073  * @cipher: the encryption algorithm the key is for
1074  * @data_unit_size: the encryption data unit size, i.e. the size of each
1075  *		    individual plaintext and ciphertext.  Given in 512-byte
1076  *		    units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1077  *
1078  * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1079  * can then be used to encrypt/decrypt UFS or eMMC I/O requests inline.
1080  *
1081  * The UFSHCI and eMMC standards define a standard way to do this, but it
1082  * doesn't work on these SoCs; only this SCM call does.
1083  *
1084  * It is assumed that the SoC has only one ICE instance being used, as this SCM
1085  * call doesn't specify which ICE instance the keyslot belongs to.
1086  *
1087  * Return: 0 on success; -errno on failure.
1088  */
1089 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1090 			 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1091 {
1092 	struct qcom_scm_desc desc = {
1093 		.svc = QCOM_SCM_SVC_ES,
1094 		.cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1095 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1096 					 QCOM_SCM_VAL, QCOM_SCM_VAL,
1097 					 QCOM_SCM_VAL),
1098 		.args[0] = index,
1099 		.args[2] = key_size,
1100 		.args[3] = cipher,
1101 		.args[4] = data_unit_size,
1102 		.owner = ARM_SMCCC_OWNER_SIP,
1103 	};
1104 	void *keybuf;
1105 	dma_addr_t key_phys;
1106 	int ret;
1107 
1108 	/*
1109 	 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1110 	 * physical address that's been properly flushed.  The sanctioned way to
1111 	 * do this is by using the DMA API.  But as is best practice for crypto
1112 	 * keys, we also must wipe the key after use.  This makes kmemdup() +
1113 	 * dma_map_single() not clearly correct, since the DMA API can use
1114 	 * bounce buffers.  Instead, just use dma_alloc_coherent().  Programming
1115 	 * keys is normally rare and thus not performance-critical.
1116 	 */
1117 
1118 	keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1119 				    GFP_KERNEL);
1120 	if (!keybuf)
1121 		return -ENOMEM;
1122 	memcpy(keybuf, key, key_size);
1123 	desc.args[1] = key_phys;
1124 
1125 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1126 
1127 	memzero_explicit(keybuf, key_size);
1128 
1129 	dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1130 	return ret;
1131 }
1132 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1133 
1134 /**
1135  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1136  *
1137  * Return true if HDCP is supported, false if not.
1138  */
1139 bool qcom_scm_hdcp_available(void)
1140 {
1141 	bool avail;
1142 	int ret = qcom_scm_clk_enable();
1143 
1144 	if (ret)
1145 		return ret;
1146 
1147 	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1148 						QCOM_SCM_HDCP_INVOKE);
1149 
1150 	qcom_scm_clk_disable();
1151 
1152 	return avail;
1153 }
1154 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1155 
1156 /**
1157  * qcom_scm_hdcp_req() - Send HDCP request.
1158  * @req: HDCP request array
1159  * @req_cnt: HDCP request array count
1160  * @resp: response buffer passed to SCM
1161  *
1162  * Write HDCP register(s) through SCM.
1163  */
1164 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1165 {
1166 	int ret;
1167 	struct qcom_scm_desc desc = {
1168 		.svc = QCOM_SCM_SVC_HDCP,
1169 		.cmd = QCOM_SCM_HDCP_INVOKE,
1170 		.arginfo = QCOM_SCM_ARGS(10),
1171 		.args = {
1172 			req[0].addr,
1173 			req[0].val,
1174 			req[1].addr,
1175 			req[1].val,
1176 			req[2].addr,
1177 			req[2].val,
1178 			req[3].addr,
1179 			req[3].val,
1180 			req[4].addr,
1181 			req[4].val
1182 		},
1183 		.owner = ARM_SMCCC_OWNER_SIP,
1184 	};
1185 	struct qcom_scm_res res;
1186 
1187 	if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1188 		return -ERANGE;
1189 
1190 	ret = qcom_scm_clk_enable();
1191 	if (ret)
1192 		return ret;
1193 
1194 	ret = qcom_scm_call(__scm->dev, &desc, &res);
1195 	*resp = res.result[0];
1196 
1197 	qcom_scm_clk_disable();
1198 
1199 	return ret;
1200 }
1201 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1202 
1203 int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt)
1204 {
1205 	struct qcom_scm_desc desc = {
1206 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1207 		.cmd = QCOM_SCM_SMMU_PT_FORMAT,
1208 		.arginfo = QCOM_SCM_ARGS(3),
1209 		.args[0] = sec_id,
1210 		.args[1] = ctx_num,
1211 		.args[2] = pt_fmt, /* 0: LPAE AArch32 - 1: AArch64 */
1212 		.owner = ARM_SMCCC_OWNER_SIP,
1213 	};
1214 
1215 	return qcom_scm_call(__scm->dev, &desc, NULL);
1216 }
1217 EXPORT_SYMBOL(qcom_scm_iommu_set_pt_format);
1218 
1219 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1220 {
1221 	struct qcom_scm_desc desc = {
1222 		.svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1223 		.cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1224 		.arginfo = QCOM_SCM_ARGS(2),
1225 		.args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1226 		.args[1] = en,
1227 		.owner = ARM_SMCCC_OWNER_SIP,
1228 	};
1229 
1230 
1231 	return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1232 }
1233 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1234 
1235 bool qcom_scm_lmh_dcvsh_available(void)
1236 {
1237 	return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_LMH, QCOM_SCM_LMH_LIMIT_DCVSH);
1238 }
1239 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh_available);
1240 
1241 int qcom_scm_lmh_profile_change(u32 profile_id)
1242 {
1243 	struct qcom_scm_desc desc = {
1244 		.svc = QCOM_SCM_SVC_LMH,
1245 		.cmd = QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE,
1246 		.arginfo = QCOM_SCM_ARGS(1, QCOM_SCM_VAL),
1247 		.args[0] = profile_id,
1248 		.owner = ARM_SMCCC_OWNER_SIP,
1249 	};
1250 
1251 	return qcom_scm_call(__scm->dev, &desc, NULL);
1252 }
1253 EXPORT_SYMBOL(qcom_scm_lmh_profile_change);
1254 
1255 int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
1256 		       u64 limit_node, u32 node_id, u64 version)
1257 {
1258 	dma_addr_t payload_phys;
1259 	u32 *payload_buf;
1260 	int ret, payload_size = 5 * sizeof(u32);
1261 
1262 	struct qcom_scm_desc desc = {
1263 		.svc = QCOM_SCM_SVC_LMH,
1264 		.cmd = QCOM_SCM_LMH_LIMIT_DCVSH,
1265 		.arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_VAL,
1266 					QCOM_SCM_VAL, QCOM_SCM_VAL),
1267 		.args[1] = payload_size,
1268 		.args[2] = limit_node,
1269 		.args[3] = node_id,
1270 		.args[4] = version,
1271 		.owner = ARM_SMCCC_OWNER_SIP,
1272 	};
1273 
1274 	payload_buf = dma_alloc_coherent(__scm->dev, payload_size, &payload_phys, GFP_KERNEL);
1275 	if (!payload_buf)
1276 		return -ENOMEM;
1277 
1278 	payload_buf[0] = payload_fn;
1279 	payload_buf[1] = 0;
1280 	payload_buf[2] = payload_reg;
1281 	payload_buf[3] = 1;
1282 	payload_buf[4] = payload_val;
1283 
1284 	desc.args[0] = payload_phys;
1285 
1286 	ret = qcom_scm_call(__scm->dev, &desc, NULL);
1287 
1288 	dma_free_coherent(__scm->dev, payload_size, payload_buf, payload_phys);
1289 	return ret;
1290 }
1291 EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
1292 
1293 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1294 {
1295 	struct device_node *tcsr;
1296 	struct device_node *np = dev->of_node;
1297 	struct resource res;
1298 	u32 offset;
1299 	int ret;
1300 
1301 	tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1302 	if (!tcsr)
1303 		return 0;
1304 
1305 	ret = of_address_to_resource(tcsr, 0, &res);
1306 	of_node_put(tcsr);
1307 	if (ret)
1308 		return ret;
1309 
1310 	ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1311 	if (ret < 0)
1312 		return ret;
1313 
1314 	*addr = res.start + offset;
1315 
1316 	return 0;
1317 }
1318 
1319 /**
1320  * qcom_scm_is_available() - Checks if SCM is available
1321  */
1322 bool qcom_scm_is_available(void)
1323 {
1324 	return !!__scm;
1325 }
1326 EXPORT_SYMBOL(qcom_scm_is_available);
1327 
1328 static int qcom_scm_probe(struct platform_device *pdev)
1329 {
1330 	struct qcom_scm *scm;
1331 	unsigned long clks;
1332 	int ret;
1333 
1334 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1335 	if (!scm)
1336 		return -ENOMEM;
1337 
1338 	ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1339 	if (ret < 0)
1340 		return ret;
1341 
1342 	mutex_init(&scm->scm_bw_lock);
1343 
1344 	clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1345 
1346 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
1347 	if (IS_ERR(scm->path))
1348 		return dev_err_probe(&pdev->dev, PTR_ERR(scm->path),
1349 				     "failed to acquire interconnect path\n");
1350 
1351 	scm->core_clk = devm_clk_get(&pdev->dev, "core");
1352 	if (IS_ERR(scm->core_clk)) {
1353 		if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1354 			return PTR_ERR(scm->core_clk);
1355 
1356 		if (clks & SCM_HAS_CORE_CLK) {
1357 			dev_err(&pdev->dev, "failed to acquire core clk\n");
1358 			return PTR_ERR(scm->core_clk);
1359 		}
1360 
1361 		scm->core_clk = NULL;
1362 	}
1363 
1364 	scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1365 	if (IS_ERR(scm->iface_clk)) {
1366 		if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1367 			return PTR_ERR(scm->iface_clk);
1368 
1369 		if (clks & SCM_HAS_IFACE_CLK) {
1370 			dev_err(&pdev->dev, "failed to acquire iface clk\n");
1371 			return PTR_ERR(scm->iface_clk);
1372 		}
1373 
1374 		scm->iface_clk = NULL;
1375 	}
1376 
1377 	scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1378 	if (IS_ERR(scm->bus_clk)) {
1379 		if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1380 			return PTR_ERR(scm->bus_clk);
1381 
1382 		if (clks & SCM_HAS_BUS_CLK) {
1383 			dev_err(&pdev->dev, "failed to acquire bus clk\n");
1384 			return PTR_ERR(scm->bus_clk);
1385 		}
1386 
1387 		scm->bus_clk = NULL;
1388 	}
1389 
1390 	scm->reset.ops = &qcom_scm_pas_reset_ops;
1391 	scm->reset.nr_resets = 1;
1392 	scm->reset.of_node = pdev->dev.of_node;
1393 	ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1394 	if (ret)
1395 		return ret;
1396 
1397 	/* vote for max clk rate for highest performance */
1398 	ret = clk_set_rate(scm->core_clk, INT_MAX);
1399 	if (ret)
1400 		return ret;
1401 
1402 	__scm = scm;
1403 	__scm->dev = &pdev->dev;
1404 
1405 	__get_convention();
1406 
1407 	/*
1408 	 * If requested enable "download mode", from this point on warmboot
1409 	 * will cause the boot stages to enter download mode, unless
1410 	 * disabled below by a clean shutdown/reboot.
1411 	 */
1412 	if (download_mode)
1413 		qcom_scm_set_download_mode(true);
1414 
1415 	return 0;
1416 }
1417 
1418 static void qcom_scm_shutdown(struct platform_device *pdev)
1419 {
1420 	/* Clean shutdown, disable download mode to allow normal restart */
1421 	if (download_mode)
1422 		qcom_scm_set_download_mode(false);
1423 }
1424 
1425 static const struct of_device_id qcom_scm_dt_match[] = {
1426 	{ .compatible = "qcom,scm-apq8064",
1427 	  /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1428 	},
1429 	{ .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1430 							     SCM_HAS_IFACE_CLK |
1431 							     SCM_HAS_BUS_CLK)
1432 	},
1433 	{ .compatible = "qcom,scm-ipq4019" },
1434 	{ .compatible = "qcom,scm-mdm9607", .data = (void *)(SCM_HAS_CORE_CLK |
1435 							     SCM_HAS_IFACE_CLK |
1436 							     SCM_HAS_BUS_CLK) },
1437 	{ .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1438 	{ .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1439 	{ .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1440 							     SCM_HAS_IFACE_CLK |
1441 							     SCM_HAS_BUS_CLK)
1442 	},
1443 	{ .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
1444 							     SCM_HAS_IFACE_CLK |
1445 							     SCM_HAS_BUS_CLK)
1446 	},
1447 	{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1448 							     SCM_HAS_IFACE_CLK |
1449 							     SCM_HAS_BUS_CLK)
1450 	},
1451 	{ .compatible = "qcom,scm-msm8976", .data = (void *)(SCM_HAS_CORE_CLK |
1452 							     SCM_HAS_IFACE_CLK |
1453 							     SCM_HAS_BUS_CLK)
1454 	},
1455 	{ .compatible = "qcom,scm-msm8994" },
1456 	{ .compatible = "qcom,scm-msm8996" },
1457 	{ .compatible = "qcom,scm" },
1458 	{}
1459 };
1460 MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
1461 
1462 static struct platform_driver qcom_scm_driver = {
1463 	.driver = {
1464 		.name	= "qcom_scm",
1465 		.of_match_table = qcom_scm_dt_match,
1466 		.suppress_bind_attrs = true,
1467 	},
1468 	.probe = qcom_scm_probe,
1469 	.shutdown = qcom_scm_shutdown,
1470 };
1471 
1472 static int __init qcom_scm_init(void)
1473 {
1474 	return platform_driver_register(&qcom_scm_driver);
1475 }
1476 subsys_initcall(qcom_scm_init);
1477 
1478 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
1479 MODULE_LICENSE("GPL v2");
1480