xref: /openbmc/linux/drivers/tee/optee/smc_abi.c (revision d40d48e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
11 #include <linux/io.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/tee_drv.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23 #include "optee_private.h"
24 #include "optee_smc.h"
25 #include "optee_rpc_cmd.h"
26 #define CREATE_TRACE_POINTS
27 #include "optee_trace.h"
28 
29 /*
30  * This file implement the SMC ABI used when communicating with secure world
31  * OP-TEE OS via raw SMCs.
32  * This file is divided into the following sections:
33  * 1. Convert between struct tee_param and struct optee_msg_param
34  * 2. Low level support functions to register shared memory in secure world
35  * 3. Dynamic shared memory pool based on alloc_pages()
36  * 4. Do a normal scheduled call into secure world
37  * 5. Driver initialization.
38  */
39 
40 #define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
41 
42 /*
43  * 1. Convert between struct tee_param and struct optee_msg_param
44  *
45  * optee_from_msg_param() and optee_to_msg_param() are the main
46  * functions.
47  */
48 
49 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
50 				  const struct optee_msg_param *mp)
51 {
52 	struct tee_shm *shm;
53 	phys_addr_t pa;
54 	int rc;
55 
56 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
57 		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
58 	p->u.memref.size = mp->u.tmem.size;
59 	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
60 	if (!shm) {
61 		p->u.memref.shm_offs = 0;
62 		p->u.memref.shm = NULL;
63 		return 0;
64 	}
65 
66 	rc = tee_shm_get_pa(shm, 0, &pa);
67 	if (rc)
68 		return rc;
69 
70 	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
71 	p->u.memref.shm = shm;
72 
73 	/* Check that the memref is covered by the shm object */
74 	if (p->u.memref.size) {
75 		size_t o = p->u.memref.shm_offs +
76 			   p->u.memref.size - 1;
77 
78 		rc = tee_shm_get_pa(shm, o, NULL);
79 		if (rc)
80 			return rc;
81 	}
82 
83 	return 0;
84 }
85 
86 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
87 				   const struct optee_msg_param *mp)
88 {
89 	struct tee_shm *shm;
90 
91 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
92 		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
93 	p->u.memref.size = mp->u.rmem.size;
94 	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
95 
96 	if (shm) {
97 		p->u.memref.shm_offs = mp->u.rmem.offs;
98 		p->u.memref.shm = shm;
99 	} else {
100 		p->u.memref.shm_offs = 0;
101 		p->u.memref.shm = NULL;
102 	}
103 }
104 
105 /**
106  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
107  *			    struct tee_param
108  * @optee:	main service struct
109  * @params:	subsystem internal parameter representation
110  * @num_params:	number of elements in the parameter arrays
111  * @msg_params:	OPTEE_MSG parameters
112  * Returns 0 on success or <0 on failure
113  */
114 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
115 				size_t num_params,
116 				const struct optee_msg_param *msg_params)
117 {
118 	int rc;
119 	size_t n;
120 
121 	for (n = 0; n < num_params; n++) {
122 		struct tee_param *p = params + n;
123 		const struct optee_msg_param *mp = msg_params + n;
124 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
125 
126 		switch (attr) {
127 		case OPTEE_MSG_ATTR_TYPE_NONE:
128 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
129 			memset(&p->u, 0, sizeof(p->u));
130 			break;
131 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
132 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
133 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
134 			optee_from_msg_param_value(p, attr, mp);
135 			break;
136 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
137 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
138 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
139 			rc = from_msg_param_tmp_mem(p, attr, mp);
140 			if (rc)
141 				return rc;
142 			break;
143 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
144 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
145 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
146 			from_msg_param_reg_mem(p, attr, mp);
147 			break;
148 
149 		default:
150 			return -EINVAL;
151 		}
152 	}
153 	return 0;
154 }
155 
156 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
157 				const struct tee_param *p)
158 {
159 	int rc;
160 	phys_addr_t pa;
161 
162 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
163 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
164 
165 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
166 	mp->u.tmem.size = p->u.memref.size;
167 
168 	if (!p->u.memref.shm) {
169 		mp->u.tmem.buf_ptr = 0;
170 		return 0;
171 	}
172 
173 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
174 	if (rc)
175 		return rc;
176 
177 	mp->u.tmem.buf_ptr = pa;
178 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
179 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
180 
181 	return 0;
182 }
183 
184 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
185 				const struct tee_param *p)
186 {
187 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
188 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
189 
190 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
191 	mp->u.rmem.size = p->u.memref.size;
192 	mp->u.rmem.offs = p->u.memref.shm_offs;
193 	return 0;
194 }
195 
196 /**
197  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
198  * @optee:	main service struct
199  * @msg_params:	OPTEE_MSG parameters
200  * @num_params:	number of elements in the parameter arrays
201  * @params:	subsystem itnernal parameter representation
202  * Returns 0 on success or <0 on failure
203  */
204 static int optee_to_msg_param(struct optee *optee,
205 			      struct optee_msg_param *msg_params,
206 			      size_t num_params, const struct tee_param *params)
207 {
208 	int rc;
209 	size_t n;
210 
211 	for (n = 0; n < num_params; n++) {
212 		const struct tee_param *p = params + n;
213 		struct optee_msg_param *mp = msg_params + n;
214 
215 		switch (p->attr) {
216 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
217 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
218 			memset(&mp->u, 0, sizeof(mp->u));
219 			break;
220 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
221 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
222 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
223 			optee_to_msg_param_value(mp, p);
224 			break;
225 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
226 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
227 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
228 			if (tee_shm_is_registered(p->u.memref.shm))
229 				rc = to_msg_param_reg_mem(mp, p);
230 			else
231 				rc = to_msg_param_tmp_mem(mp, p);
232 			if (rc)
233 				return rc;
234 			break;
235 		default:
236 			return -EINVAL;
237 		}
238 	}
239 	return 0;
240 }
241 
242 /*
243  * 2. Low level support functions to register shared memory in secure world
244  *
245  * Functions to enable/disable shared memory caching in secure world, that
246  * is, lazy freeing of previously allocated shared memory. Freeing is
247  * performed when a request has been compled.
248  *
249  * Functions to register and unregister shared memory both for normal
250  * clients and for tee-supplicant.
251  */
252 
253 /**
254  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
255  *			      in OP-TEE
256  * @optee:	main service struct
257  */
258 static void optee_enable_shm_cache(struct optee *optee)
259 {
260 	struct optee_call_waiter w;
261 
262 	/* We need to retry until secure world isn't busy. */
263 	optee_cq_wait_init(&optee->call_queue, &w);
264 	while (true) {
265 		struct arm_smccc_res res;
266 
267 		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
268 				     0, 0, 0, 0, 0, 0, 0, &res);
269 		if (res.a0 == OPTEE_SMC_RETURN_OK)
270 			break;
271 		optee_cq_wait_for_completion(&optee->call_queue, &w);
272 	}
273 	optee_cq_wait_final(&optee->call_queue, &w);
274 }
275 
276 /**
277  * __optee_disable_shm_cache() - Disables caching of some shared memory
278  *				 allocation in OP-TEE
279  * @optee:	main service struct
280  * @is_mapped:	true if the cached shared memory addresses were mapped by this
281  *		kernel, are safe to dereference, and should be freed
282  */
283 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
284 {
285 	struct optee_call_waiter w;
286 
287 	/* We need to retry until secure world isn't busy. */
288 	optee_cq_wait_init(&optee->call_queue, &w);
289 	while (true) {
290 		union {
291 			struct arm_smccc_res smccc;
292 			struct optee_smc_disable_shm_cache_result result;
293 		} res;
294 
295 		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
296 				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
297 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
298 			break; /* All shm's freed */
299 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
300 			struct tee_shm *shm;
301 
302 			/*
303 			 * Shared memory references that were not mapped by
304 			 * this kernel must be ignored to prevent a crash.
305 			 */
306 			if (!is_mapped)
307 				continue;
308 
309 			shm = reg_pair_to_ptr(res.result.shm_upper32,
310 					      res.result.shm_lower32);
311 			tee_shm_free(shm);
312 		} else {
313 			optee_cq_wait_for_completion(&optee->call_queue, &w);
314 		}
315 	}
316 	optee_cq_wait_final(&optee->call_queue, &w);
317 }
318 
319 /**
320  * optee_disable_shm_cache() - Disables caching of mapped shared memory
321  *			       allocations in OP-TEE
322  * @optee:	main service struct
323  */
324 static void optee_disable_shm_cache(struct optee *optee)
325 {
326 	return __optee_disable_shm_cache(optee, true);
327 }
328 
329 /**
330  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
331  *					allocations in OP-TEE which are not
332  *					currently mapped
333  * @optee:	main service struct
334  */
335 static void optee_disable_unmapped_shm_cache(struct optee *optee)
336 {
337 	return __optee_disable_shm_cache(optee, false);
338 }
339 
340 #define PAGELIST_ENTRIES_PER_PAGE				\
341 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
342 
343 /*
344  * The final entry in each pagelist page is a pointer to the next
345  * pagelist page.
346  */
347 static size_t get_pages_list_size(size_t num_entries)
348 {
349 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
350 
351 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
352 }
353 
354 static u64 *optee_allocate_pages_list(size_t num_entries)
355 {
356 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
357 }
358 
359 static void optee_free_pages_list(void *list, size_t num_entries)
360 {
361 	free_pages_exact(list, get_pages_list_size(num_entries));
362 }
363 
364 /**
365  * optee_fill_pages_list() - write list of user pages to given shared
366  * buffer.
367  *
368  * @dst: page-aligned buffer where list of pages will be stored
369  * @pages: array of pages that represents shared buffer
370  * @num_pages: number of entries in @pages
371  * @page_offset: offset of user buffer from page start
372  *
373  * @dst should be big enough to hold list of user page addresses and
374  *	links to the next pages of buffer
375  */
376 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
377 				  size_t page_offset)
378 {
379 	int n = 0;
380 	phys_addr_t optee_page;
381 	/*
382 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
383 	 * for details.
384 	 */
385 	struct {
386 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
387 		u64 next_page_data;
388 	} *pages_data;
389 
390 	/*
391 	 * Currently OP-TEE uses 4k page size and it does not looks
392 	 * like this will change in the future.  On other hand, there are
393 	 * no know ARM architectures with page size < 4k.
394 	 * Thus the next built assert looks redundant. But the following
395 	 * code heavily relies on this assumption, so it is better be
396 	 * safe than sorry.
397 	 */
398 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
399 
400 	pages_data = (void *)dst;
401 	/*
402 	 * If linux page is bigger than 4k, and user buffer offset is
403 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
404 	 * because they bear no value data for OP-TEE.
405 	 */
406 	optee_page = page_to_phys(*pages) +
407 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
408 
409 	while (true) {
410 		pages_data->pages_list[n++] = optee_page;
411 
412 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
413 			pages_data->next_page_data =
414 				virt_to_phys(pages_data + 1);
415 			pages_data++;
416 			n = 0;
417 		}
418 
419 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
420 		if (!(optee_page & ~PAGE_MASK)) {
421 			if (!--num_pages)
422 				break;
423 			pages++;
424 			optee_page = page_to_phys(*pages);
425 		}
426 	}
427 }
428 
429 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
430 			      struct page **pages, size_t num_pages,
431 			      unsigned long start)
432 {
433 	struct optee *optee = tee_get_drvdata(ctx->teedev);
434 	struct optee_msg_arg *msg_arg;
435 	struct tee_shm *shm_arg;
436 	u64 *pages_list;
437 	int rc;
438 
439 	if (!num_pages)
440 		return -EINVAL;
441 
442 	rc = optee_check_mem_type(start, num_pages);
443 	if (rc)
444 		return rc;
445 
446 	pages_list = optee_allocate_pages_list(num_pages);
447 	if (!pages_list)
448 		return -ENOMEM;
449 
450 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
451 	if (IS_ERR(shm_arg)) {
452 		rc = PTR_ERR(shm_arg);
453 		goto out;
454 	}
455 
456 	optee_fill_pages_list(pages_list, pages, num_pages,
457 			      tee_shm_get_page_offset(shm));
458 
459 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
460 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
461 				OPTEE_MSG_ATTR_NONCONTIG;
462 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
463 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
464 	/*
465 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
466 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
467 	 */
468 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
469 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
470 
471 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
472 	    msg_arg->ret != TEEC_SUCCESS)
473 		rc = -EINVAL;
474 
475 	tee_shm_free(shm_arg);
476 out:
477 	optee_free_pages_list(pages_list, num_pages);
478 	return rc;
479 }
480 
481 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
482 {
483 	struct optee *optee = tee_get_drvdata(ctx->teedev);
484 	struct optee_msg_arg *msg_arg;
485 	struct tee_shm *shm_arg;
486 	int rc = 0;
487 
488 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
489 	if (IS_ERR(shm_arg))
490 		return PTR_ERR(shm_arg);
491 
492 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
493 
494 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
495 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
496 
497 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
498 	    msg_arg->ret != TEEC_SUCCESS)
499 		rc = -EINVAL;
500 	tee_shm_free(shm_arg);
501 	return rc;
502 }
503 
504 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
505 				   struct page **pages, size_t num_pages,
506 				   unsigned long start)
507 {
508 	/*
509 	 * We don't want to register supplicant memory in OP-TEE.
510 	 * Instead information about it will be passed in RPC code.
511 	 */
512 	return optee_check_mem_type(start, num_pages);
513 }
514 
515 static int optee_shm_unregister_supp(struct tee_context *ctx,
516 				     struct tee_shm *shm)
517 {
518 	return 0;
519 }
520 
521 /*
522  * 3. Dynamic shared memory pool based on alloc_pages()
523  *
524  * Implements an OP-TEE specific shared memory pool which is used
525  * when dynamic shared memory is supported by secure world.
526  *
527  * The main function is optee_shm_pool_alloc_pages().
528  */
529 
530 static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
531 			 struct tee_shm *shm, size_t size)
532 {
533 	/*
534 	 * Shared memory private to the OP-TEE driver doesn't need
535 	 * to be registered with OP-TEE.
536 	 */
537 	if (shm->flags & TEE_SHM_PRIV)
538 		return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
539 
540 	return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
541 }
542 
543 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
544 			 struct tee_shm *shm)
545 {
546 	if (!(shm->flags & TEE_SHM_PRIV))
547 		optee_shm_unregister(shm->ctx, shm);
548 
549 	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
550 	shm->kaddr = NULL;
551 }
552 
553 static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
554 {
555 	kfree(poolm);
556 }
557 
558 static const struct tee_shm_pool_mgr_ops pool_ops = {
559 	.alloc = pool_op_alloc,
560 	.free = pool_op_free,
561 	.destroy_poolmgr = pool_op_destroy_poolmgr,
562 };
563 
564 /**
565  * optee_shm_pool_alloc_pages() - create page-based allocator pool
566  *
567  * This pool is used when OP-TEE supports dymanic SHM. In this case
568  * command buffers and such are allocated from kernel's own memory.
569  */
570 static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
571 {
572 	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
573 
574 	if (!mgr)
575 		return ERR_PTR(-ENOMEM);
576 
577 	mgr->ops = &pool_ops;
578 
579 	return mgr;
580 }
581 
582 /*
583  * 4. Do a normal scheduled call into secure world
584  *
585  * The function optee_smc_do_call_with_arg() performs a normal scheduled
586  * call into secure world. During this call may normal world request help
587  * from normal world using RPCs, Remote Procedure Calls. This includes
588  * delivery of non-secure interrupts to for instance allow rescheduling of
589  * the current task.
590  */
591 
592 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
593 					 struct optee_msg_arg *arg)
594 {
595 	struct tee_shm *shm;
596 
597 	arg->ret_origin = TEEC_ORIGIN_COMMS;
598 
599 	if (arg->num_params != 1 ||
600 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
601 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
602 		return;
603 	}
604 
605 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
606 	switch (arg->params[0].u.value.a) {
607 	case OPTEE_RPC_SHM_TYPE_APPL:
608 		optee_rpc_cmd_free_suppl(ctx, shm);
609 		break;
610 	case OPTEE_RPC_SHM_TYPE_KERNEL:
611 		tee_shm_free(shm);
612 		break;
613 	default:
614 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
615 	}
616 	arg->ret = TEEC_SUCCESS;
617 }
618 
619 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
620 					  struct optee_msg_arg *arg,
621 					  struct optee_call_ctx *call_ctx)
622 {
623 	phys_addr_t pa;
624 	struct tee_shm *shm;
625 	size_t sz;
626 	size_t n;
627 
628 	arg->ret_origin = TEEC_ORIGIN_COMMS;
629 
630 	if (!arg->num_params ||
631 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
632 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
633 		return;
634 	}
635 
636 	for (n = 1; n < arg->num_params; n++) {
637 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
638 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
639 			return;
640 		}
641 	}
642 
643 	sz = arg->params[0].u.value.b;
644 	switch (arg->params[0].u.value.a) {
645 	case OPTEE_RPC_SHM_TYPE_APPL:
646 		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
647 		break;
648 	case OPTEE_RPC_SHM_TYPE_KERNEL:
649 		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
650 		break;
651 	default:
652 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
653 		return;
654 	}
655 
656 	if (IS_ERR(shm)) {
657 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
658 		return;
659 	}
660 
661 	if (tee_shm_get_pa(shm, 0, &pa)) {
662 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
663 		goto bad;
664 	}
665 
666 	sz = tee_shm_get_size(shm);
667 
668 	if (tee_shm_is_registered(shm)) {
669 		struct page **pages;
670 		u64 *pages_list;
671 		size_t page_num;
672 
673 		pages = tee_shm_get_pages(shm, &page_num);
674 		if (!pages || !page_num) {
675 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
676 			goto bad;
677 		}
678 
679 		pages_list = optee_allocate_pages_list(page_num);
680 		if (!pages_list) {
681 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
682 			goto bad;
683 		}
684 
685 		call_ctx->pages_list = pages_list;
686 		call_ctx->num_entries = page_num;
687 
688 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
689 				      OPTEE_MSG_ATTR_NONCONTIG;
690 		/*
691 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
692 		 * from 4k page, as described in OP-TEE ABI.
693 		 */
694 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
695 			(tee_shm_get_page_offset(shm) &
696 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
697 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
698 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
699 
700 		optee_fill_pages_list(pages_list, pages, page_num,
701 				      tee_shm_get_page_offset(shm));
702 	} else {
703 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
704 		arg->params[0].u.tmem.buf_ptr = pa;
705 		arg->params[0].u.tmem.size = sz;
706 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
707 	}
708 
709 	arg->ret = TEEC_SUCCESS;
710 	return;
711 bad:
712 	tee_shm_free(shm);
713 }
714 
715 static void free_pages_list(struct optee_call_ctx *call_ctx)
716 {
717 	if (call_ctx->pages_list) {
718 		optee_free_pages_list(call_ctx->pages_list,
719 				      call_ctx->num_entries);
720 		call_ctx->pages_list = NULL;
721 		call_ctx->num_entries = 0;
722 	}
723 }
724 
725 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
726 {
727 	free_pages_list(call_ctx);
728 }
729 
730 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
731 				struct tee_shm *shm,
732 				struct optee_call_ctx *call_ctx)
733 {
734 	struct optee_msg_arg *arg;
735 
736 	arg = tee_shm_get_va(shm, 0);
737 	if (IS_ERR(arg)) {
738 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
739 		return;
740 	}
741 
742 	switch (arg->cmd) {
743 	case OPTEE_RPC_CMD_SHM_ALLOC:
744 		free_pages_list(call_ctx);
745 		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
746 		break;
747 	case OPTEE_RPC_CMD_SHM_FREE:
748 		handle_rpc_func_cmd_shm_free(ctx, arg);
749 		break;
750 	default:
751 		optee_rpc_cmd(ctx, optee, arg);
752 	}
753 }
754 
755 /**
756  * optee_handle_rpc() - handle RPC from secure world
757  * @ctx:	context doing the RPC
758  * @param:	value of registers for the RPC
759  * @call_ctx:	call context. Preserved during one OP-TEE invocation
760  *
761  * Result of RPC is written back into @param.
762  */
763 static void optee_handle_rpc(struct tee_context *ctx,
764 			     struct optee_rpc_param *param,
765 			     struct optee_call_ctx *call_ctx)
766 {
767 	struct tee_device *teedev = ctx->teedev;
768 	struct optee *optee = tee_get_drvdata(teedev);
769 	struct tee_shm *shm;
770 	phys_addr_t pa;
771 
772 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
773 	case OPTEE_SMC_RPC_FUNC_ALLOC:
774 		shm = tee_shm_alloc(ctx, param->a1,
775 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
776 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
777 			reg_pair_from_64(&param->a1, &param->a2, pa);
778 			reg_pair_from_64(&param->a4, &param->a5,
779 					 (unsigned long)shm);
780 		} else {
781 			param->a1 = 0;
782 			param->a2 = 0;
783 			param->a4 = 0;
784 			param->a5 = 0;
785 		}
786 		break;
787 	case OPTEE_SMC_RPC_FUNC_FREE:
788 		shm = reg_pair_to_ptr(param->a1, param->a2);
789 		tee_shm_free(shm);
790 		break;
791 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
792 		/*
793 		 * A foreign interrupt was raised while secure world was
794 		 * executing, since they are handled in Linux a dummy RPC is
795 		 * performed to let Linux take the interrupt through the normal
796 		 * vector.
797 		 */
798 		break;
799 	case OPTEE_SMC_RPC_FUNC_CMD:
800 		shm = reg_pair_to_ptr(param->a1, param->a2);
801 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
802 		break;
803 	default:
804 		pr_warn("Unknown RPC func 0x%x\n",
805 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
806 		break;
807 	}
808 
809 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
810 }
811 
812 /**
813  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
814  * @ctx:	calling context
815  * @arg:	shared memory holding the message to pass to secure world
816  *
817  * Does and SMC to OP-TEE in secure world and handles eventual resulting
818  * Remote Procedure Calls (RPC) from OP-TEE.
819  *
820  * Returns return code from secure world, 0 is OK
821  */
822 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
823 				      struct tee_shm *arg)
824 {
825 	struct optee *optee = tee_get_drvdata(ctx->teedev);
826 	struct optee_call_waiter w;
827 	struct optee_rpc_param param = { };
828 	struct optee_call_ctx call_ctx = { };
829 	phys_addr_t parg;
830 	int rc;
831 
832 	rc = tee_shm_get_pa(arg, 0, &parg);
833 	if (rc)
834 		return rc;
835 
836 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
837 	reg_pair_from_64(&param.a1, &param.a2, parg);
838 	/* Initialize waiter */
839 	optee_cq_wait_init(&optee->call_queue, &w);
840 	while (true) {
841 		struct arm_smccc_res res;
842 
843 		trace_optee_invoke_fn_begin(&param);
844 		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
845 				     param.a4, param.a5, param.a6, param.a7,
846 				     &res);
847 		trace_optee_invoke_fn_end(&param, &res);
848 
849 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
850 			/*
851 			 * Out of threads in secure world, wait for a thread
852 			 * become available.
853 			 */
854 			optee_cq_wait_for_completion(&optee->call_queue, &w);
855 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
856 			cond_resched();
857 			param.a0 = res.a0;
858 			param.a1 = res.a1;
859 			param.a2 = res.a2;
860 			param.a3 = res.a3;
861 			optee_handle_rpc(ctx, &param, &call_ctx);
862 		} else {
863 			rc = res.a0;
864 			break;
865 		}
866 	}
867 
868 	optee_rpc_finalize_call(&call_ctx);
869 	/*
870 	 * We're done with our thread in secure world, if there's any
871 	 * thread waiters wake up one.
872 	 */
873 	optee_cq_wait_final(&optee->call_queue, &w);
874 
875 	return rc;
876 }
877 
878 /*
879  * 5. Driver initialization
880  *
881  * During driver inititialization is secure world probed to find out which
882  * features it supports so the driver can be initialized with a matching
883  * configuration. This involves for instance support for dynamic shared
884  * memory instead of a static memory carvout.
885  */
886 
887 static void optee_get_version(struct tee_device *teedev,
888 			      struct tee_ioctl_version_data *vers)
889 {
890 	struct tee_ioctl_version_data v = {
891 		.impl_id = TEE_IMPL_ID_OPTEE,
892 		.impl_caps = TEE_OPTEE_CAP_TZ,
893 		.gen_caps = TEE_GEN_CAP_GP,
894 	};
895 	struct optee *optee = tee_get_drvdata(teedev);
896 
897 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
898 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
899 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
900 		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
901 	*vers = v;
902 }
903 
904 static int optee_smc_open(struct tee_context *ctx)
905 {
906 	struct optee *optee = tee_get_drvdata(ctx->teedev);
907 	u32 sec_caps = optee->smc.sec_caps;
908 
909 	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
910 }
911 
912 static const struct tee_driver_ops optee_clnt_ops = {
913 	.get_version = optee_get_version,
914 	.open = optee_smc_open,
915 	.release = optee_release,
916 	.open_session = optee_open_session,
917 	.close_session = optee_close_session,
918 	.invoke_func = optee_invoke_func,
919 	.cancel_req = optee_cancel_req,
920 	.shm_register = optee_shm_register,
921 	.shm_unregister = optee_shm_unregister,
922 };
923 
924 static const struct tee_desc optee_clnt_desc = {
925 	.name = DRIVER_NAME "-clnt",
926 	.ops = &optee_clnt_ops,
927 	.owner = THIS_MODULE,
928 };
929 
930 static const struct tee_driver_ops optee_supp_ops = {
931 	.get_version = optee_get_version,
932 	.open = optee_smc_open,
933 	.release = optee_release_supp,
934 	.supp_recv = optee_supp_recv,
935 	.supp_send = optee_supp_send,
936 	.shm_register = optee_shm_register_supp,
937 	.shm_unregister = optee_shm_unregister_supp,
938 };
939 
940 static const struct tee_desc optee_supp_desc = {
941 	.name = DRIVER_NAME "-supp",
942 	.ops = &optee_supp_ops,
943 	.owner = THIS_MODULE,
944 	.flags = TEE_DESC_PRIVILEGED,
945 };
946 
947 static const struct optee_ops optee_ops = {
948 	.do_call_with_arg = optee_smc_do_call_with_arg,
949 	.to_msg_param = optee_to_msg_param,
950 	.from_msg_param = optee_from_msg_param,
951 };
952 
953 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
954 {
955 	struct arm_smccc_res res;
956 
957 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
958 
959 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
960 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
961 		return true;
962 	return false;
963 }
964 
965 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
966 {
967 	union {
968 		struct arm_smccc_res smccc;
969 		struct optee_smc_call_get_os_revision_result result;
970 	} res = {
971 		.result = {
972 			.build_id = 0
973 		}
974 	};
975 
976 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
977 		  &res.smccc);
978 
979 	if (res.result.build_id)
980 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
981 			res.result.minor, res.result.build_id);
982 	else
983 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
984 }
985 
986 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
987 {
988 	union {
989 		struct arm_smccc_res smccc;
990 		struct optee_smc_calls_revision_result result;
991 	} res;
992 
993 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
994 
995 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
996 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
997 		return true;
998 	return false;
999 }
1000 
1001 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1002 					    u32 *sec_caps)
1003 {
1004 	union {
1005 		struct arm_smccc_res smccc;
1006 		struct optee_smc_exchange_capabilities_result result;
1007 	} res;
1008 	u32 a1 = 0;
1009 
1010 	/*
1011 	 * TODO This isn't enough to tell if it's UP system (from kernel
1012 	 * point of view) or not, is_smp() returns the information
1013 	 * needed, but can't be called directly from here.
1014 	 */
1015 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1016 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1017 
1018 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1019 		  &res.smccc);
1020 
1021 	if (res.result.status != OPTEE_SMC_RETURN_OK)
1022 		return false;
1023 
1024 	*sec_caps = res.result.capabilities;
1025 	return true;
1026 }
1027 
1028 static struct tee_shm_pool *optee_config_dyn_shm(void)
1029 {
1030 	struct tee_shm_pool_mgr *priv_mgr;
1031 	struct tee_shm_pool_mgr *dmabuf_mgr;
1032 	void *rc;
1033 
1034 	rc = optee_shm_pool_alloc_pages();
1035 	if (IS_ERR(rc))
1036 		return rc;
1037 	priv_mgr = rc;
1038 
1039 	rc = optee_shm_pool_alloc_pages();
1040 	if (IS_ERR(rc)) {
1041 		tee_shm_pool_mgr_destroy(priv_mgr);
1042 		return rc;
1043 	}
1044 	dmabuf_mgr = rc;
1045 
1046 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1047 	if (IS_ERR(rc)) {
1048 		tee_shm_pool_mgr_destroy(priv_mgr);
1049 		tee_shm_pool_mgr_destroy(dmabuf_mgr);
1050 	}
1051 
1052 	return rc;
1053 }
1054 
1055 static struct tee_shm_pool *
1056 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1057 {
1058 	union {
1059 		struct arm_smccc_res smccc;
1060 		struct optee_smc_get_shm_config_result result;
1061 	} res;
1062 	unsigned long vaddr;
1063 	phys_addr_t paddr;
1064 	size_t size;
1065 	phys_addr_t begin;
1066 	phys_addr_t end;
1067 	void *va;
1068 	struct tee_shm_pool_mgr *priv_mgr;
1069 	struct tee_shm_pool_mgr *dmabuf_mgr;
1070 	void *rc;
1071 	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1072 
1073 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1074 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1075 		pr_err("static shm service not available\n");
1076 		return ERR_PTR(-ENOENT);
1077 	}
1078 
1079 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1080 		pr_err("only normal cached shared memory supported\n");
1081 		return ERR_PTR(-EINVAL);
1082 	}
1083 
1084 	begin = roundup(res.result.start, PAGE_SIZE);
1085 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1086 	paddr = begin;
1087 	size = end - begin;
1088 
1089 	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1090 		pr_err("too small shared memory area\n");
1091 		return ERR_PTR(-EINVAL);
1092 	}
1093 
1094 	va = memremap(paddr, size, MEMREMAP_WB);
1095 	if (!va) {
1096 		pr_err("shared memory ioremap failed\n");
1097 		return ERR_PTR(-EINVAL);
1098 	}
1099 	vaddr = (unsigned long)va;
1100 
1101 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1102 					    3 /* 8 bytes aligned */);
1103 	if (IS_ERR(rc))
1104 		goto err_memunmap;
1105 	priv_mgr = rc;
1106 
1107 	vaddr += sz;
1108 	paddr += sz;
1109 	size -= sz;
1110 
1111 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1112 	if (IS_ERR(rc))
1113 		goto err_free_priv_mgr;
1114 	dmabuf_mgr = rc;
1115 
1116 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1117 	if (IS_ERR(rc))
1118 		goto err_free_dmabuf_mgr;
1119 
1120 	*memremaped_shm = va;
1121 
1122 	return rc;
1123 
1124 err_free_dmabuf_mgr:
1125 	tee_shm_pool_mgr_destroy(dmabuf_mgr);
1126 err_free_priv_mgr:
1127 	tee_shm_pool_mgr_destroy(priv_mgr);
1128 err_memunmap:
1129 	memunmap(va);
1130 	return rc;
1131 }
1132 
1133 /* Simple wrapper functions to be able to use a function pointer */
1134 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1135 			    unsigned long a2, unsigned long a3,
1136 			    unsigned long a4, unsigned long a5,
1137 			    unsigned long a6, unsigned long a7,
1138 			    struct arm_smccc_res *res)
1139 {
1140 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1141 }
1142 
1143 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1144 			    unsigned long a2, unsigned long a3,
1145 			    unsigned long a4, unsigned long a5,
1146 			    unsigned long a6, unsigned long a7,
1147 			    struct arm_smccc_res *res)
1148 {
1149 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1150 }
1151 
1152 static optee_invoke_fn *get_invoke_func(struct device *dev)
1153 {
1154 	const char *method;
1155 
1156 	pr_info("probing for conduit method.\n");
1157 
1158 	if (device_property_read_string(dev, "method", &method)) {
1159 		pr_warn("missing \"method\" property\n");
1160 		return ERR_PTR(-ENXIO);
1161 	}
1162 
1163 	if (!strcmp("hvc", method))
1164 		return optee_smccc_hvc;
1165 	else if (!strcmp("smc", method))
1166 		return optee_smccc_smc;
1167 
1168 	pr_warn("invalid \"method\" property: %s\n", method);
1169 	return ERR_PTR(-EINVAL);
1170 }
1171 
1172 /* optee_remove - Device Removal Routine
1173  * @pdev: platform device information struct
1174  *
1175  * optee_remove is called by platform subsystem to alert the driver
1176  * that it should release the device
1177  */
1178 static int optee_smc_remove(struct platform_device *pdev)
1179 {
1180 	struct optee *optee = platform_get_drvdata(pdev);
1181 
1182 	/*
1183 	 * Ask OP-TEE to free all cached shared memory objects to decrease
1184 	 * reference counters and also avoid wild pointers in secure world
1185 	 * into the old shared memory range.
1186 	 */
1187 	optee_disable_shm_cache(optee);
1188 
1189 	optee_remove_common(optee);
1190 
1191 	if (optee->smc.memremaped_shm)
1192 		memunmap(optee->smc.memremaped_shm);
1193 
1194 	kfree(optee);
1195 
1196 	return 0;
1197 }
1198 
1199 /* optee_shutdown - Device Removal Routine
1200  * @pdev: platform device information struct
1201  *
1202  * platform_shutdown is called by the platform subsystem to alert
1203  * the driver that a shutdown, reboot, or kexec is happening and
1204  * device must be disabled.
1205  */
1206 static void optee_shutdown(struct platform_device *pdev)
1207 {
1208 	optee_disable_shm_cache(platform_get_drvdata(pdev));
1209 }
1210 
1211 static int optee_probe(struct platform_device *pdev)
1212 {
1213 	optee_invoke_fn *invoke_fn;
1214 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1215 	struct optee *optee = NULL;
1216 	void *memremaped_shm = NULL;
1217 	struct tee_device *teedev;
1218 	u32 sec_caps;
1219 	int rc;
1220 
1221 	invoke_fn = get_invoke_func(&pdev->dev);
1222 	if (IS_ERR(invoke_fn))
1223 		return PTR_ERR(invoke_fn);
1224 
1225 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1226 		pr_warn("api uid mismatch\n");
1227 		return -EINVAL;
1228 	}
1229 
1230 	optee_msg_get_os_revision(invoke_fn);
1231 
1232 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1233 		pr_warn("api revision mismatch\n");
1234 		return -EINVAL;
1235 	}
1236 
1237 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
1238 		pr_warn("capabilities mismatch\n");
1239 		return -EINVAL;
1240 	}
1241 
1242 	/*
1243 	 * Try to use dynamic shared memory if possible
1244 	 */
1245 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1246 		pool = optee_config_dyn_shm();
1247 
1248 	/*
1249 	 * If dynamic shared memory is not available or failed - try static one
1250 	 */
1251 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1252 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1253 
1254 	if (IS_ERR(pool))
1255 		return PTR_ERR(pool);
1256 
1257 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1258 	if (!optee) {
1259 		rc = -ENOMEM;
1260 		goto err;
1261 	}
1262 
1263 	optee->ops = &optee_ops;
1264 	optee->smc.invoke_fn = invoke_fn;
1265 	optee->smc.sec_caps = sec_caps;
1266 
1267 	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1268 	if (IS_ERR(teedev)) {
1269 		rc = PTR_ERR(teedev);
1270 		goto err;
1271 	}
1272 	optee->teedev = teedev;
1273 
1274 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1275 	if (IS_ERR(teedev)) {
1276 		rc = PTR_ERR(teedev);
1277 		goto err;
1278 	}
1279 	optee->supp_teedev = teedev;
1280 
1281 	rc = tee_device_register(optee->teedev);
1282 	if (rc)
1283 		goto err;
1284 
1285 	rc = tee_device_register(optee->supp_teedev);
1286 	if (rc)
1287 		goto err;
1288 
1289 	mutex_init(&optee->call_queue.mutex);
1290 	INIT_LIST_HEAD(&optee->call_queue.waiters);
1291 	optee_wait_queue_init(&optee->wait_queue);
1292 	optee_supp_init(&optee->supp);
1293 	optee->smc.memremaped_shm = memremaped_shm;
1294 	optee->pool = pool;
1295 
1296 	/*
1297 	 * Ensure that there are no pre-existing shm objects before enabling
1298 	 * the shm cache so that there's no chance of receiving an invalid
1299 	 * address during shutdown. This could occur, for example, if we're
1300 	 * kexec booting from an older kernel that did not properly cleanup the
1301 	 * shm cache.
1302 	 */
1303 	optee_disable_unmapped_shm_cache(optee);
1304 
1305 	optee_enable_shm_cache(optee);
1306 
1307 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1308 		pr_info("dynamic shared memory is enabled\n");
1309 
1310 	platform_set_drvdata(pdev, optee);
1311 
1312 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1313 	if (rc) {
1314 		optee_smc_remove(pdev);
1315 		return rc;
1316 	}
1317 
1318 	pr_info("initialized driver\n");
1319 	return 0;
1320 err:
1321 	if (optee) {
1322 		/*
1323 		 * tee_device_unregister() is safe to call even if the
1324 		 * devices hasn't been registered with
1325 		 * tee_device_register() yet.
1326 		 */
1327 		tee_device_unregister(optee->supp_teedev);
1328 		tee_device_unregister(optee->teedev);
1329 		kfree(optee);
1330 	}
1331 	if (pool)
1332 		tee_shm_pool_free(pool);
1333 	if (memremaped_shm)
1334 		memunmap(memremaped_shm);
1335 	return rc;
1336 }
1337 
1338 static const struct of_device_id optee_dt_match[] = {
1339 	{ .compatible = "linaro,optee-tz" },
1340 	{},
1341 };
1342 MODULE_DEVICE_TABLE(of, optee_dt_match);
1343 
1344 static struct platform_driver optee_driver = {
1345 	.probe  = optee_probe,
1346 	.remove = optee_smc_remove,
1347 	.shutdown = optee_shutdown,
1348 	.driver = {
1349 		.name = "optee",
1350 		.of_match_table = optee_dt_match,
1351 	},
1352 };
1353 
1354 int optee_smc_abi_register(void)
1355 {
1356 	return platform_driver_register(&optee_driver);
1357 }
1358 
1359 void optee_smc_abi_unregister(void)
1360 {
1361 	platform_driver_unregister(&optee_driver);
1362 }
1363