xref: /openbmc/linux/drivers/tee/optee/core.c (revision 852a53a0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/arm-smccc.h>
9 #include <linux/errno.h>
10 #include <linux/io.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/tee_drv.h>
18 #include <linux/types.h>
19 #include <linux/uaccess.h>
20 #include <linux/workqueue.h>
21 #include "optee_private.h"
22 #include "optee_smc.h"
23 #include "shm_pool.h"
24 
25 #define DRIVER_NAME "optee"
26 
27 #define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
28 
29 /**
30  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
31  *			    struct tee_param
32  * @params:	subsystem internal parameter representation
33  * @num_params:	number of elements in the parameter arrays
34  * @msg_params:	OPTEE_MSG parameters
35  * Returns 0 on success or <0 on failure
36  */
37 int optee_from_msg_param(struct tee_param *params, size_t num_params,
38 			 const struct optee_msg_param *msg_params)
39 {
40 	int rc;
41 	size_t n;
42 	struct tee_shm *shm;
43 	phys_addr_t pa;
44 
45 	for (n = 0; n < num_params; n++) {
46 		struct tee_param *p = params + n;
47 		const struct optee_msg_param *mp = msg_params + n;
48 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
49 
50 		switch (attr) {
51 		case OPTEE_MSG_ATTR_TYPE_NONE:
52 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
53 			memset(&p->u, 0, sizeof(p->u));
54 			break;
55 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
56 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
57 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
58 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
59 				  attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
60 			p->u.value.a = mp->u.value.a;
61 			p->u.value.b = mp->u.value.b;
62 			p->u.value.c = mp->u.value.c;
63 			break;
64 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
65 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
66 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
67 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
68 				  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
69 			p->u.memref.size = mp->u.tmem.size;
70 			shm = (struct tee_shm *)(unsigned long)
71 				mp->u.tmem.shm_ref;
72 			if (!shm) {
73 				p->u.memref.shm_offs = 0;
74 				p->u.memref.shm = NULL;
75 				break;
76 			}
77 			rc = tee_shm_get_pa(shm, 0, &pa);
78 			if (rc)
79 				return rc;
80 			p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
81 			p->u.memref.shm = shm;
82 
83 			/* Check that the memref is covered by the shm object */
84 			if (p->u.memref.size) {
85 				size_t o = p->u.memref.shm_offs +
86 					   p->u.memref.size - 1;
87 
88 				rc = tee_shm_get_pa(shm, o, NULL);
89 				if (rc)
90 					return rc;
91 			}
92 			break;
93 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
94 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
95 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
96 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
97 				  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
98 			p->u.memref.size = mp->u.rmem.size;
99 			shm = (struct tee_shm *)(unsigned long)
100 				mp->u.rmem.shm_ref;
101 
102 			if (!shm) {
103 				p->u.memref.shm_offs = 0;
104 				p->u.memref.shm = NULL;
105 				break;
106 			}
107 			p->u.memref.shm_offs = mp->u.rmem.offs;
108 			p->u.memref.shm = shm;
109 
110 			break;
111 
112 		default:
113 			return -EINVAL;
114 		}
115 	}
116 	return 0;
117 }
118 
119 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
120 				const struct tee_param *p)
121 {
122 	int rc;
123 	phys_addr_t pa;
124 
125 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
126 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
127 
128 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
129 	mp->u.tmem.size = p->u.memref.size;
130 
131 	if (!p->u.memref.shm) {
132 		mp->u.tmem.buf_ptr = 0;
133 		return 0;
134 	}
135 
136 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
137 	if (rc)
138 		return rc;
139 
140 	mp->u.tmem.buf_ptr = pa;
141 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
142 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
143 
144 	return 0;
145 }
146 
147 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
148 				const struct tee_param *p)
149 {
150 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
151 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
152 
153 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
154 	mp->u.rmem.size = p->u.memref.size;
155 	mp->u.rmem.offs = p->u.memref.shm_offs;
156 	return 0;
157 }
158 
159 /**
160  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
161  * @msg_params:	OPTEE_MSG parameters
162  * @num_params:	number of elements in the parameter arrays
163  * @params:	subsystem itnernal parameter representation
164  * Returns 0 on success or <0 on failure
165  */
166 int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
167 		       const struct tee_param *params)
168 {
169 	int rc;
170 	size_t n;
171 
172 	for (n = 0; n < num_params; n++) {
173 		const struct tee_param *p = params + n;
174 		struct optee_msg_param *mp = msg_params + n;
175 
176 		switch (p->attr) {
177 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
178 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
179 			memset(&mp->u, 0, sizeof(mp->u));
180 			break;
181 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
182 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
183 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
184 			mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
185 				   TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
186 			mp->u.value.a = p->u.value.a;
187 			mp->u.value.b = p->u.value.b;
188 			mp->u.value.c = p->u.value.c;
189 			break;
190 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
191 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
192 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
193 			if (tee_shm_is_registered(p->u.memref.shm))
194 				rc = to_msg_param_reg_mem(mp, p);
195 			else
196 				rc = to_msg_param_tmp_mem(mp, p);
197 			if (rc)
198 				return rc;
199 			break;
200 		default:
201 			return -EINVAL;
202 		}
203 	}
204 	return 0;
205 }
206 
207 static void optee_get_version(struct tee_device *teedev,
208 			      struct tee_ioctl_version_data *vers)
209 {
210 	struct tee_ioctl_version_data v = {
211 		.impl_id = TEE_IMPL_ID_OPTEE,
212 		.impl_caps = TEE_OPTEE_CAP_TZ,
213 		.gen_caps = TEE_GEN_CAP_GP,
214 	};
215 	struct optee *optee = tee_get_drvdata(teedev);
216 
217 	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
218 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
219 	*vers = v;
220 }
221 
222 static void optee_bus_scan(struct work_struct *work)
223 {
224 	WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
225 }
226 
227 static int optee_open(struct tee_context *ctx)
228 {
229 	struct optee_context_data *ctxdata;
230 	struct tee_device *teedev = ctx->teedev;
231 	struct optee *optee = tee_get_drvdata(teedev);
232 
233 	ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
234 	if (!ctxdata)
235 		return -ENOMEM;
236 
237 	if (teedev == optee->supp_teedev) {
238 		bool busy = true;
239 
240 		mutex_lock(&optee->supp.mutex);
241 		if (!optee->supp.ctx) {
242 			busy = false;
243 			optee->supp.ctx = ctx;
244 		}
245 		mutex_unlock(&optee->supp.mutex);
246 		if (busy) {
247 			kfree(ctxdata);
248 			return -EBUSY;
249 		}
250 
251 		if (!optee->scan_bus_done) {
252 			INIT_WORK(&optee->scan_bus_work, optee_bus_scan);
253 			optee->scan_bus_wq = create_workqueue("optee_bus_scan");
254 			if (!optee->scan_bus_wq) {
255 				kfree(ctxdata);
256 				return -ECHILD;
257 			}
258 			queue_work(optee->scan_bus_wq, &optee->scan_bus_work);
259 			optee->scan_bus_done = true;
260 		}
261 	}
262 	mutex_init(&ctxdata->mutex);
263 	INIT_LIST_HEAD(&ctxdata->sess_list);
264 
265 	ctx->data = ctxdata;
266 	return 0;
267 }
268 
269 static void optee_release(struct tee_context *ctx)
270 {
271 	struct optee_context_data *ctxdata = ctx->data;
272 	struct tee_device *teedev = ctx->teedev;
273 	struct optee *optee = tee_get_drvdata(teedev);
274 	struct tee_shm *shm;
275 	struct optee_msg_arg *arg = NULL;
276 	phys_addr_t parg;
277 	struct optee_session *sess;
278 	struct optee_session *sess_tmp;
279 
280 	if (!ctxdata)
281 		return;
282 
283 	shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
284 	if (!IS_ERR(shm)) {
285 		arg = tee_shm_get_va(shm, 0);
286 		/*
287 		 * If va2pa fails for some reason, we can't call into
288 		 * secure world, only free the memory. Secure OS will leak
289 		 * sessions and finally refuse more sessions, but we will
290 		 * at least let normal world reclaim its memory.
291 		 */
292 		if (!IS_ERR(arg))
293 			if (tee_shm_va2pa(shm, arg, &parg))
294 				arg = NULL; /* prevent usage of parg below */
295 	}
296 
297 	list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
298 				 list_node) {
299 		list_del(&sess->list_node);
300 		if (!IS_ERR_OR_NULL(arg)) {
301 			memset(arg, 0, sizeof(*arg));
302 			arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
303 			arg->session = sess->session_id;
304 			optee_do_call_with_arg(ctx, parg);
305 		}
306 		kfree(sess);
307 	}
308 	kfree(ctxdata);
309 
310 	if (!IS_ERR(shm))
311 		tee_shm_free(shm);
312 
313 	ctx->data = NULL;
314 
315 	if (teedev == optee->supp_teedev) {
316 		if (optee->scan_bus_wq) {
317 			destroy_workqueue(optee->scan_bus_wq);
318 			optee->scan_bus_wq = NULL;
319 		}
320 		optee_supp_release(&optee->supp);
321 	}
322 }
323 
324 static const struct tee_driver_ops optee_ops = {
325 	.get_version = optee_get_version,
326 	.open = optee_open,
327 	.release = optee_release,
328 	.open_session = optee_open_session,
329 	.close_session = optee_close_session,
330 	.invoke_func = optee_invoke_func,
331 	.cancel_req = optee_cancel_req,
332 	.shm_register = optee_shm_register,
333 	.shm_unregister = optee_shm_unregister,
334 };
335 
336 static const struct tee_desc optee_desc = {
337 	.name = DRIVER_NAME "-clnt",
338 	.ops = &optee_ops,
339 	.owner = THIS_MODULE,
340 };
341 
342 static const struct tee_driver_ops optee_supp_ops = {
343 	.get_version = optee_get_version,
344 	.open = optee_open,
345 	.release = optee_release,
346 	.supp_recv = optee_supp_recv,
347 	.supp_send = optee_supp_send,
348 	.shm_register = optee_shm_register_supp,
349 	.shm_unregister = optee_shm_unregister_supp,
350 };
351 
352 static const struct tee_desc optee_supp_desc = {
353 	.name = DRIVER_NAME "-supp",
354 	.ops = &optee_supp_ops,
355 	.owner = THIS_MODULE,
356 	.flags = TEE_DESC_PRIVILEGED,
357 };
358 
359 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
360 {
361 	struct arm_smccc_res res;
362 
363 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
364 
365 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
366 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
367 		return true;
368 	return false;
369 }
370 
371 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
372 {
373 	union {
374 		struct arm_smccc_res smccc;
375 		struct optee_smc_call_get_os_revision_result result;
376 	} res = {
377 		.result = {
378 			.build_id = 0
379 		}
380 	};
381 
382 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
383 		  &res.smccc);
384 
385 	if (res.result.build_id)
386 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
387 			res.result.minor, res.result.build_id);
388 	else
389 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
390 }
391 
392 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
393 {
394 	union {
395 		struct arm_smccc_res smccc;
396 		struct optee_smc_calls_revision_result result;
397 	} res;
398 
399 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
400 
401 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
402 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
403 		return true;
404 	return false;
405 }
406 
407 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
408 					    u32 *sec_caps)
409 {
410 	union {
411 		struct arm_smccc_res smccc;
412 		struct optee_smc_exchange_capabilities_result result;
413 	} res;
414 	u32 a1 = 0;
415 
416 	/*
417 	 * TODO This isn't enough to tell if it's UP system (from kernel
418 	 * point of view) or not, is_smp() returns the the information
419 	 * needed, but can't be called directly from here.
420 	 */
421 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
422 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
423 
424 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
425 		  &res.smccc);
426 
427 	if (res.result.status != OPTEE_SMC_RETURN_OK)
428 		return false;
429 
430 	*sec_caps = res.result.capabilities;
431 	return true;
432 }
433 
434 static struct tee_shm_pool *optee_config_dyn_shm(void)
435 {
436 	struct tee_shm_pool_mgr *priv_mgr;
437 	struct tee_shm_pool_mgr *dmabuf_mgr;
438 	void *rc;
439 
440 	rc = optee_shm_pool_alloc_pages();
441 	if (IS_ERR(rc))
442 		return rc;
443 	priv_mgr = rc;
444 
445 	rc = optee_shm_pool_alloc_pages();
446 	if (IS_ERR(rc)) {
447 		tee_shm_pool_mgr_destroy(priv_mgr);
448 		return rc;
449 	}
450 	dmabuf_mgr = rc;
451 
452 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
453 	if (IS_ERR(rc)) {
454 		tee_shm_pool_mgr_destroy(priv_mgr);
455 		tee_shm_pool_mgr_destroy(dmabuf_mgr);
456 	}
457 
458 	return rc;
459 }
460 
461 static struct tee_shm_pool *
462 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
463 {
464 	union {
465 		struct arm_smccc_res smccc;
466 		struct optee_smc_get_shm_config_result result;
467 	} res;
468 	unsigned long vaddr;
469 	phys_addr_t paddr;
470 	size_t size;
471 	phys_addr_t begin;
472 	phys_addr_t end;
473 	void *va;
474 	struct tee_shm_pool_mgr *priv_mgr;
475 	struct tee_shm_pool_mgr *dmabuf_mgr;
476 	void *rc;
477 	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
478 
479 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
480 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
481 		pr_err("static shm service not available\n");
482 		return ERR_PTR(-ENOENT);
483 	}
484 
485 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
486 		pr_err("only normal cached shared memory supported\n");
487 		return ERR_PTR(-EINVAL);
488 	}
489 
490 	begin = roundup(res.result.start, PAGE_SIZE);
491 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
492 	paddr = begin;
493 	size = end - begin;
494 
495 	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
496 		pr_err("too small shared memory area\n");
497 		return ERR_PTR(-EINVAL);
498 	}
499 
500 	va = memremap(paddr, size, MEMREMAP_WB);
501 	if (!va) {
502 		pr_err("shared memory ioremap failed\n");
503 		return ERR_PTR(-EINVAL);
504 	}
505 	vaddr = (unsigned long)va;
506 
507 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
508 					    3 /* 8 bytes aligned */);
509 	if (IS_ERR(rc))
510 		goto err_memunmap;
511 	priv_mgr = rc;
512 
513 	vaddr += sz;
514 	paddr += sz;
515 	size -= sz;
516 
517 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
518 	if (IS_ERR(rc))
519 		goto err_free_priv_mgr;
520 	dmabuf_mgr = rc;
521 
522 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
523 	if (IS_ERR(rc))
524 		goto err_free_dmabuf_mgr;
525 
526 	*memremaped_shm = va;
527 
528 	return rc;
529 
530 err_free_dmabuf_mgr:
531 	tee_shm_pool_mgr_destroy(dmabuf_mgr);
532 err_free_priv_mgr:
533 	tee_shm_pool_mgr_destroy(priv_mgr);
534 err_memunmap:
535 	memunmap(va);
536 	return rc;
537 }
538 
539 /* Simple wrapper functions to be able to use a function pointer */
540 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
541 			    unsigned long a2, unsigned long a3,
542 			    unsigned long a4, unsigned long a5,
543 			    unsigned long a6, unsigned long a7,
544 			    struct arm_smccc_res *res)
545 {
546 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
547 }
548 
549 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
550 			    unsigned long a2, unsigned long a3,
551 			    unsigned long a4, unsigned long a5,
552 			    unsigned long a6, unsigned long a7,
553 			    struct arm_smccc_res *res)
554 {
555 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
556 }
557 
558 static optee_invoke_fn *get_invoke_func(struct device *dev)
559 {
560 	const char *method;
561 
562 	pr_info("probing for conduit method.\n");
563 
564 	if (device_property_read_string(dev, "method", &method)) {
565 		pr_warn("missing \"method\" property\n");
566 		return ERR_PTR(-ENXIO);
567 	}
568 
569 	if (!strcmp("hvc", method))
570 		return optee_smccc_hvc;
571 	else if (!strcmp("smc", method))
572 		return optee_smccc_smc;
573 
574 	pr_warn("invalid \"method\" property: %s\n", method);
575 	return ERR_PTR(-EINVAL);
576 }
577 
578 static int optee_remove(struct platform_device *pdev)
579 {
580 	struct optee *optee = platform_get_drvdata(pdev);
581 
582 	/*
583 	 * Ask OP-TEE to free all cached shared memory objects to decrease
584 	 * reference counters and also avoid wild pointers in secure world
585 	 * into the old shared memory range.
586 	 */
587 	optee_disable_shm_cache(optee);
588 
589 	/*
590 	 * The two devices have to be unregistered before we can free the
591 	 * other resources.
592 	 */
593 	tee_device_unregister(optee->supp_teedev);
594 	tee_device_unregister(optee->teedev);
595 
596 	tee_shm_pool_free(optee->pool);
597 	if (optee->memremaped_shm)
598 		memunmap(optee->memremaped_shm);
599 	optee_wait_queue_exit(&optee->wait_queue);
600 	optee_supp_uninit(&optee->supp);
601 	mutex_destroy(&optee->call_queue.mutex);
602 
603 	kfree(optee);
604 
605 	return 0;
606 }
607 
608 static int optee_probe(struct platform_device *pdev)
609 {
610 	optee_invoke_fn *invoke_fn;
611 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
612 	struct optee *optee = NULL;
613 	void *memremaped_shm = NULL;
614 	struct tee_device *teedev;
615 	u32 sec_caps;
616 	int rc;
617 
618 	invoke_fn = get_invoke_func(&pdev->dev);
619 	if (IS_ERR(invoke_fn))
620 		return PTR_ERR(invoke_fn);
621 
622 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
623 		pr_warn("api uid mismatch\n");
624 		return -EINVAL;
625 	}
626 
627 	optee_msg_get_os_revision(invoke_fn);
628 
629 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
630 		pr_warn("api revision mismatch\n");
631 		return -EINVAL;
632 	}
633 
634 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
635 		pr_warn("capabilities mismatch\n");
636 		return -EINVAL;
637 	}
638 
639 	/*
640 	 * Try to use dynamic shared memory if possible
641 	 */
642 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
643 		pool = optee_config_dyn_shm();
644 
645 	/*
646 	 * If dynamic shared memory is not available or failed - try static one
647 	 */
648 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
649 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
650 
651 	if (IS_ERR(pool))
652 		return PTR_ERR(pool);
653 
654 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
655 	if (!optee) {
656 		rc = -ENOMEM;
657 		goto err;
658 	}
659 
660 	optee->invoke_fn = invoke_fn;
661 	optee->sec_caps = sec_caps;
662 
663 	teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
664 	if (IS_ERR(teedev)) {
665 		rc = PTR_ERR(teedev);
666 		goto err;
667 	}
668 	optee->teedev = teedev;
669 
670 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
671 	if (IS_ERR(teedev)) {
672 		rc = PTR_ERR(teedev);
673 		goto err;
674 	}
675 	optee->supp_teedev = teedev;
676 
677 	rc = tee_device_register(optee->teedev);
678 	if (rc)
679 		goto err;
680 
681 	rc = tee_device_register(optee->supp_teedev);
682 	if (rc)
683 		goto err;
684 
685 	mutex_init(&optee->call_queue.mutex);
686 	INIT_LIST_HEAD(&optee->call_queue.waiters);
687 	optee_wait_queue_init(&optee->wait_queue);
688 	optee_supp_init(&optee->supp);
689 	optee->memremaped_shm = memremaped_shm;
690 	optee->pool = pool;
691 
692 	optee_enable_shm_cache(optee);
693 
694 	if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
695 		pr_info("dynamic shared memory is enabled\n");
696 
697 	platform_set_drvdata(pdev, optee);
698 
699 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
700 	if (rc) {
701 		optee_remove(pdev);
702 		return rc;
703 	}
704 
705 	pr_info("initialized driver\n");
706 	return 0;
707 err:
708 	if (optee) {
709 		/*
710 		 * tee_device_unregister() is safe to call even if the
711 		 * devices hasn't been registered with
712 		 * tee_device_register() yet.
713 		 */
714 		tee_device_unregister(optee->supp_teedev);
715 		tee_device_unregister(optee->teedev);
716 		kfree(optee);
717 	}
718 	if (pool)
719 		tee_shm_pool_free(pool);
720 	if (memremaped_shm)
721 		memunmap(memremaped_shm);
722 	return rc;
723 }
724 
725 static const struct of_device_id optee_dt_match[] = {
726 	{ .compatible = "linaro,optee-tz" },
727 	{},
728 };
729 MODULE_DEVICE_TABLE(of, optee_dt_match);
730 
731 static struct platform_driver optee_driver = {
732 	.probe  = optee_probe,
733 	.remove = optee_remove,
734 	.driver = {
735 		.name = "optee",
736 		.of_match_table = optee_dt_match,
737 	},
738 };
739 module_platform_driver(optee_driver);
740 
741 MODULE_AUTHOR("Linaro");
742 MODULE_DESCRIPTION("OP-TEE driver");
743 MODULE_SUPPORTED_DEVICE("");
744 MODULE_VERSION("1.0");
745 MODULE_LICENSE("GPL v2");
746 MODULE_ALIAS("platform:optee");
747