xref: /openbmc/linux/drivers/tee/optee/smc_abi.c (revision 53e8558837be58c1d44d50ad87247a8c56c95c13)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
11 #include <linux/io.h>
12 #include <linux/sched.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/tee_drv.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23 #include "optee_private.h"
24 #include "optee_smc.h"
25 #include "optee_rpc_cmd.h"
26 #include <linux/kmemleak.h>
27 #define CREATE_TRACE_POINTS
28 #include "optee_trace.h"
29 
30 /*
31  * This file implement the SMC ABI used when communicating with secure world
32  * OP-TEE OS via raw SMCs.
33  * This file is divided into the following sections:
34  * 1. Convert between struct tee_param and struct optee_msg_param
35  * 2. Low level support functions to register shared memory in secure world
36  * 3. Dynamic shared memory pool based on alloc_pages()
37  * 4. Do a normal scheduled call into secure world
38  * 5. Driver initialization.
39  */
40 
41 #define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
42 
43 /*
44  * 1. Convert between struct tee_param and struct optee_msg_param
45  *
46  * optee_from_msg_param() and optee_to_msg_param() are the main
47  * functions.
48  */
49 
50 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
51 				  const struct optee_msg_param *mp)
52 {
53 	struct tee_shm *shm;
54 	phys_addr_t pa;
55 	int rc;
56 
57 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
58 		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
59 	p->u.memref.size = mp->u.tmem.size;
60 	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
61 	if (!shm) {
62 		p->u.memref.shm_offs = 0;
63 		p->u.memref.shm = NULL;
64 		return 0;
65 	}
66 
67 	rc = tee_shm_get_pa(shm, 0, &pa);
68 	if (rc)
69 		return rc;
70 
71 	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
72 	p->u.memref.shm = shm;
73 
74 	/* Check that the memref is covered by the shm object */
75 	if (p->u.memref.size) {
76 		size_t o = p->u.memref.shm_offs +
77 			   p->u.memref.size - 1;
78 
79 		rc = tee_shm_get_pa(shm, o, NULL);
80 		if (rc)
81 			return rc;
82 	}
83 
84 	return 0;
85 }
86 
87 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
88 				   const struct optee_msg_param *mp)
89 {
90 	struct tee_shm *shm;
91 
92 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
93 		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
94 	p->u.memref.size = mp->u.rmem.size;
95 	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
96 
97 	if (shm) {
98 		p->u.memref.shm_offs = mp->u.rmem.offs;
99 		p->u.memref.shm = shm;
100 	} else {
101 		p->u.memref.shm_offs = 0;
102 		p->u.memref.shm = NULL;
103 	}
104 }
105 
106 /**
107  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
108  *			    struct tee_param
109  * @optee:	main service struct
110  * @params:	subsystem internal parameter representation
111  * @num_params:	number of elements in the parameter arrays
112  * @msg_params:	OPTEE_MSG parameters
113  * Returns 0 on success or <0 on failure
114  */
115 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
116 				size_t num_params,
117 				const struct optee_msg_param *msg_params)
118 {
119 	int rc;
120 	size_t n;
121 
122 	for (n = 0; n < num_params; n++) {
123 		struct tee_param *p = params + n;
124 		const struct optee_msg_param *mp = msg_params + n;
125 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
126 
127 		switch (attr) {
128 		case OPTEE_MSG_ATTR_TYPE_NONE:
129 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
130 			memset(&p->u, 0, sizeof(p->u));
131 			break;
132 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
133 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
134 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
135 			optee_from_msg_param_value(p, attr, mp);
136 			break;
137 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
138 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
139 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
140 			rc = from_msg_param_tmp_mem(p, attr, mp);
141 			if (rc)
142 				return rc;
143 			break;
144 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
145 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
146 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
147 			from_msg_param_reg_mem(p, attr, mp);
148 			break;
149 
150 		default:
151 			return -EINVAL;
152 		}
153 	}
154 	return 0;
155 }
156 
157 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
158 				const struct tee_param *p)
159 {
160 	int rc;
161 	phys_addr_t pa;
162 
163 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
164 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
165 
166 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
167 	mp->u.tmem.size = p->u.memref.size;
168 
169 	if (!p->u.memref.shm) {
170 		mp->u.tmem.buf_ptr = 0;
171 		return 0;
172 	}
173 
174 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
175 	if (rc)
176 		return rc;
177 
178 	mp->u.tmem.buf_ptr = pa;
179 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
180 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
181 
182 	return 0;
183 }
184 
185 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
186 				const struct tee_param *p)
187 {
188 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
189 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
190 
191 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
192 	mp->u.rmem.size = p->u.memref.size;
193 	mp->u.rmem.offs = p->u.memref.shm_offs;
194 	return 0;
195 }
196 
197 /**
198  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
199  * @optee:	main service struct
200  * @msg_params:	OPTEE_MSG parameters
201  * @num_params:	number of elements in the parameter arrays
202  * @params:	subsystem itnernal parameter representation
203  * Returns 0 on success or <0 on failure
204  */
205 static int optee_to_msg_param(struct optee *optee,
206 			      struct optee_msg_param *msg_params,
207 			      size_t num_params, const struct tee_param *params)
208 {
209 	int rc;
210 	size_t n;
211 
212 	for (n = 0; n < num_params; n++) {
213 		const struct tee_param *p = params + n;
214 		struct optee_msg_param *mp = msg_params + n;
215 
216 		switch (p->attr) {
217 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
218 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
219 			memset(&mp->u, 0, sizeof(mp->u));
220 			break;
221 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
222 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
223 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
224 			optee_to_msg_param_value(mp, p);
225 			break;
226 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
227 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
228 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
229 			if (tee_shm_is_registered(p->u.memref.shm))
230 				rc = to_msg_param_reg_mem(mp, p);
231 			else
232 				rc = to_msg_param_tmp_mem(mp, p);
233 			if (rc)
234 				return rc;
235 			break;
236 		default:
237 			return -EINVAL;
238 		}
239 	}
240 	return 0;
241 }
242 
243 /*
244  * 2. Low level support functions to register shared memory in secure world
245  *
246  * Functions to enable/disable shared memory caching in secure world, that
247  * is, lazy freeing of previously allocated shared memory. Freeing is
248  * performed when a request has been compled.
249  *
250  * Functions to register and unregister shared memory both for normal
251  * clients and for tee-supplicant.
252  */
253 
254 /**
255  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
256  *			      in OP-TEE
257  * @optee:	main service struct
258  */
259 static void optee_enable_shm_cache(struct optee *optee)
260 {
261 	struct optee_call_waiter w;
262 
263 	/* We need to retry until secure world isn't busy. */
264 	optee_cq_wait_init(&optee->call_queue, &w);
265 	while (true) {
266 		struct arm_smccc_res res;
267 
268 		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
269 				     0, 0, 0, 0, 0, 0, 0, &res);
270 		if (res.a0 == OPTEE_SMC_RETURN_OK)
271 			break;
272 		optee_cq_wait_for_completion(&optee->call_queue, &w);
273 	}
274 	optee_cq_wait_final(&optee->call_queue, &w);
275 }
276 
277 /**
278  * __optee_disable_shm_cache() - Disables caching of some shared memory
279  *				 allocation in OP-TEE
280  * @optee:	main service struct
281  * @is_mapped:	true if the cached shared memory addresses were mapped by this
282  *		kernel, are safe to dereference, and should be freed
283  */
284 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
285 {
286 	struct optee_call_waiter w;
287 
288 	/* We need to retry until secure world isn't busy. */
289 	optee_cq_wait_init(&optee->call_queue, &w);
290 	while (true) {
291 		union {
292 			struct arm_smccc_res smccc;
293 			struct optee_smc_disable_shm_cache_result result;
294 		} res;
295 
296 		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
297 				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
298 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
299 			break; /* All shm's freed */
300 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
301 			struct tee_shm *shm;
302 
303 			/*
304 			 * Shared memory references that were not mapped by
305 			 * this kernel must be ignored to prevent a crash.
306 			 */
307 			if (!is_mapped)
308 				continue;
309 
310 			shm = reg_pair_to_ptr(res.result.shm_upper32,
311 					      res.result.shm_lower32);
312 			tee_shm_free(shm);
313 		} else {
314 			optee_cq_wait_for_completion(&optee->call_queue, &w);
315 		}
316 	}
317 	optee_cq_wait_final(&optee->call_queue, &w);
318 }
319 
320 /**
321  * optee_disable_shm_cache() - Disables caching of mapped shared memory
322  *			       allocations in OP-TEE
323  * @optee:	main service struct
324  */
325 static void optee_disable_shm_cache(struct optee *optee)
326 {
327 	return __optee_disable_shm_cache(optee, true);
328 }
329 
330 /**
331  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
332  *					allocations in OP-TEE which are not
333  *					currently mapped
334  * @optee:	main service struct
335  */
336 static void optee_disable_unmapped_shm_cache(struct optee *optee)
337 {
338 	return __optee_disable_shm_cache(optee, false);
339 }
340 
341 #define PAGELIST_ENTRIES_PER_PAGE				\
342 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
343 
344 /*
345  * The final entry in each pagelist page is a pointer to the next
346  * pagelist page.
347  */
348 static size_t get_pages_list_size(size_t num_entries)
349 {
350 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
351 
352 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
353 }
354 
355 static u64 *optee_allocate_pages_list(size_t num_entries)
356 {
357 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
358 }
359 
360 static void optee_free_pages_list(void *list, size_t num_entries)
361 {
362 	free_pages_exact(list, get_pages_list_size(num_entries));
363 }
364 
365 /**
366  * optee_fill_pages_list() - write list of user pages to given shared
367  * buffer.
368  *
369  * @dst: page-aligned buffer where list of pages will be stored
370  * @pages: array of pages that represents shared buffer
371  * @num_pages: number of entries in @pages
372  * @page_offset: offset of user buffer from page start
373  *
374  * @dst should be big enough to hold list of user page addresses and
375  *	links to the next pages of buffer
376  */
377 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
378 				  size_t page_offset)
379 {
380 	int n = 0;
381 	phys_addr_t optee_page;
382 	/*
383 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
384 	 * for details.
385 	 */
386 	struct {
387 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
388 		u64 next_page_data;
389 	} *pages_data;
390 
391 	/*
392 	 * Currently OP-TEE uses 4k page size and it does not looks
393 	 * like this will change in the future.  On other hand, there are
394 	 * no know ARM architectures with page size < 4k.
395 	 * Thus the next built assert looks redundant. But the following
396 	 * code heavily relies on this assumption, so it is better be
397 	 * safe than sorry.
398 	 */
399 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
400 
401 	pages_data = (void *)dst;
402 	/*
403 	 * If linux page is bigger than 4k, and user buffer offset is
404 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
405 	 * because they bear no value data for OP-TEE.
406 	 */
407 	optee_page = page_to_phys(*pages) +
408 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
409 
410 	while (true) {
411 		pages_data->pages_list[n++] = optee_page;
412 
413 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
414 			pages_data->next_page_data =
415 				virt_to_phys(pages_data + 1);
416 			pages_data++;
417 			n = 0;
418 		}
419 
420 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
421 		if (!(optee_page & ~PAGE_MASK)) {
422 			if (!--num_pages)
423 				break;
424 			pages++;
425 			optee_page = page_to_phys(*pages);
426 		}
427 	}
428 }
429 
430 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
431 			      struct page **pages, size_t num_pages,
432 			      unsigned long start)
433 {
434 	struct optee *optee = tee_get_drvdata(ctx->teedev);
435 	struct optee_msg_arg *msg_arg;
436 	struct tee_shm *shm_arg;
437 	u64 *pages_list;
438 	int rc;
439 
440 	if (!num_pages)
441 		return -EINVAL;
442 
443 	rc = optee_check_mem_type(start, num_pages);
444 	if (rc)
445 		return rc;
446 
447 	pages_list = optee_allocate_pages_list(num_pages);
448 	if (!pages_list)
449 		return -ENOMEM;
450 
451 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
452 	if (IS_ERR(shm_arg)) {
453 		rc = PTR_ERR(shm_arg);
454 		goto out;
455 	}
456 
457 	optee_fill_pages_list(pages_list, pages, num_pages,
458 			      tee_shm_get_page_offset(shm));
459 
460 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
461 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
462 				OPTEE_MSG_ATTR_NONCONTIG;
463 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
464 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
465 	/*
466 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
467 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
468 	 */
469 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
470 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
471 
472 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
473 	    msg_arg->ret != TEEC_SUCCESS)
474 		rc = -EINVAL;
475 
476 	tee_shm_free(shm_arg);
477 out:
478 	optee_free_pages_list(pages_list, num_pages);
479 	return rc;
480 }
481 
482 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
483 {
484 	struct optee *optee = tee_get_drvdata(ctx->teedev);
485 	struct optee_msg_arg *msg_arg;
486 	struct tee_shm *shm_arg;
487 	int rc = 0;
488 
489 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
490 	if (IS_ERR(shm_arg))
491 		return PTR_ERR(shm_arg);
492 
493 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
494 
495 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
496 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
497 
498 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
499 	    msg_arg->ret != TEEC_SUCCESS)
500 		rc = -EINVAL;
501 	tee_shm_free(shm_arg);
502 	return rc;
503 }
504 
505 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
506 				   struct page **pages, size_t num_pages,
507 				   unsigned long start)
508 {
509 	/*
510 	 * We don't want to register supplicant memory in OP-TEE.
511 	 * Instead information about it will be passed in RPC code.
512 	 */
513 	return optee_check_mem_type(start, num_pages);
514 }
515 
516 static int optee_shm_unregister_supp(struct tee_context *ctx,
517 				     struct tee_shm *shm)
518 {
519 	return 0;
520 }
521 
522 /*
523  * 3. Dynamic shared memory pool based on alloc_pages()
524  *
525  * Implements an OP-TEE specific shared memory pool which is used
526  * when dynamic shared memory is supported by secure world.
527  *
528  * The main function is optee_shm_pool_alloc_pages().
529  */
530 
531 static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
532 			 struct tee_shm *shm, size_t size)
533 {
534 	/*
535 	 * Shared memory private to the OP-TEE driver doesn't need
536 	 * to be registered with OP-TEE.
537 	 */
538 	if (shm->flags & TEE_SHM_PRIV)
539 		return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
540 
541 	return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
542 }
543 
544 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
545 			 struct tee_shm *shm)
546 {
547 	if (!(shm->flags & TEE_SHM_PRIV))
548 		optee_shm_unregister(shm->ctx, shm);
549 
550 	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
551 	shm->kaddr = NULL;
552 }
553 
554 static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
555 {
556 	kfree(poolm);
557 }
558 
559 static const struct tee_shm_pool_mgr_ops pool_ops = {
560 	.alloc = pool_op_alloc,
561 	.free = pool_op_free,
562 	.destroy_poolmgr = pool_op_destroy_poolmgr,
563 };
564 
565 /**
566  * optee_shm_pool_alloc_pages() - create page-based allocator pool
567  *
568  * This pool is used when OP-TEE supports dymanic SHM. In this case
569  * command buffers and such are allocated from kernel's own memory.
570  */
571 static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
572 {
573 	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
574 
575 	if (!mgr)
576 		return ERR_PTR(-ENOMEM);
577 
578 	mgr->ops = &pool_ops;
579 
580 	return mgr;
581 }
582 
583 /*
584  * 4. Do a normal scheduled call into secure world
585  *
586  * The function optee_smc_do_call_with_arg() performs a normal scheduled
587  * call into secure world. During this call may normal world request help
588  * from normal world using RPCs, Remote Procedure Calls. This includes
589  * delivery of non-secure interrupts to for instance allow rescheduling of
590  * the current task.
591  */
592 
593 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
594 					 struct optee_msg_arg *arg)
595 {
596 	struct tee_shm *shm;
597 
598 	arg->ret_origin = TEEC_ORIGIN_COMMS;
599 
600 	if (arg->num_params != 1 ||
601 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
602 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
603 		return;
604 	}
605 
606 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
607 	switch (arg->params[0].u.value.a) {
608 	case OPTEE_RPC_SHM_TYPE_APPL:
609 		optee_rpc_cmd_free_suppl(ctx, shm);
610 		break;
611 	case OPTEE_RPC_SHM_TYPE_KERNEL:
612 		tee_shm_free(shm);
613 		break;
614 	default:
615 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
616 	}
617 	arg->ret = TEEC_SUCCESS;
618 }
619 
620 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
621 					  struct optee_msg_arg *arg,
622 					  struct optee_call_ctx *call_ctx)
623 {
624 	phys_addr_t pa;
625 	struct tee_shm *shm;
626 	size_t sz;
627 	size_t n;
628 
629 	arg->ret_origin = TEEC_ORIGIN_COMMS;
630 
631 	if (!arg->num_params ||
632 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
633 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
634 		return;
635 	}
636 
637 	for (n = 1; n < arg->num_params; n++) {
638 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
639 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
640 			return;
641 		}
642 	}
643 
644 	sz = arg->params[0].u.value.b;
645 	switch (arg->params[0].u.value.a) {
646 	case OPTEE_RPC_SHM_TYPE_APPL:
647 		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
648 		break;
649 	case OPTEE_RPC_SHM_TYPE_KERNEL:
650 		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
651 		break;
652 	default:
653 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
654 		return;
655 	}
656 
657 	if (IS_ERR(shm)) {
658 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
659 		return;
660 	}
661 
662 	if (tee_shm_get_pa(shm, 0, &pa)) {
663 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
664 		goto bad;
665 	}
666 
667 	sz = tee_shm_get_size(shm);
668 
669 	if (tee_shm_is_registered(shm)) {
670 		struct page **pages;
671 		u64 *pages_list;
672 		size_t page_num;
673 
674 		pages = tee_shm_get_pages(shm, &page_num);
675 		if (!pages || !page_num) {
676 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
677 			goto bad;
678 		}
679 
680 		pages_list = optee_allocate_pages_list(page_num);
681 		if (!pages_list) {
682 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
683 			goto bad;
684 		}
685 
686 		call_ctx->pages_list = pages_list;
687 		call_ctx->num_entries = page_num;
688 
689 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
690 				      OPTEE_MSG_ATTR_NONCONTIG;
691 		/*
692 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
693 		 * from 4k page, as described in OP-TEE ABI.
694 		 */
695 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
696 			(tee_shm_get_page_offset(shm) &
697 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
698 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
699 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
700 
701 		optee_fill_pages_list(pages_list, pages, page_num,
702 				      tee_shm_get_page_offset(shm));
703 	} else {
704 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
705 		arg->params[0].u.tmem.buf_ptr = pa;
706 		arg->params[0].u.tmem.size = sz;
707 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
708 	}
709 
710 	arg->ret = TEEC_SUCCESS;
711 	return;
712 bad:
713 	tee_shm_free(shm);
714 }
715 
716 static void free_pages_list(struct optee_call_ctx *call_ctx)
717 {
718 	if (call_ctx->pages_list) {
719 		optee_free_pages_list(call_ctx->pages_list,
720 				      call_ctx->num_entries);
721 		call_ctx->pages_list = NULL;
722 		call_ctx->num_entries = 0;
723 	}
724 }
725 
726 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
727 {
728 	free_pages_list(call_ctx);
729 }
730 
731 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
732 				struct tee_shm *shm,
733 				struct optee_call_ctx *call_ctx)
734 {
735 	struct optee_msg_arg *arg;
736 
737 	arg = tee_shm_get_va(shm, 0);
738 	if (IS_ERR(arg)) {
739 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
740 		return;
741 	}
742 
743 	switch (arg->cmd) {
744 	case OPTEE_RPC_CMD_SHM_ALLOC:
745 		free_pages_list(call_ctx);
746 		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
747 		break;
748 	case OPTEE_RPC_CMD_SHM_FREE:
749 		handle_rpc_func_cmd_shm_free(ctx, arg);
750 		break;
751 	default:
752 		optee_rpc_cmd(ctx, optee, arg);
753 	}
754 }
755 
756 /**
757  * optee_handle_rpc() - handle RPC from secure world
758  * @ctx:	context doing the RPC
759  * @param:	value of registers for the RPC
760  * @call_ctx:	call context. Preserved during one OP-TEE invocation
761  *
762  * Result of RPC is written back into @param.
763  */
764 static void optee_handle_rpc(struct tee_context *ctx,
765 			     struct optee_rpc_param *param,
766 			     struct optee_call_ctx *call_ctx)
767 {
768 	struct tee_device *teedev = ctx->teedev;
769 	struct optee *optee = tee_get_drvdata(teedev);
770 	struct tee_shm *shm;
771 	phys_addr_t pa;
772 
773 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
774 	case OPTEE_SMC_RPC_FUNC_ALLOC:
775 		shm = tee_shm_alloc(ctx, param->a1,
776 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
777 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
778 			reg_pair_from_64(&param->a1, &param->a2, pa);
779 			reg_pair_from_64(&param->a4, &param->a5,
780 					 (unsigned long)shm);
781 		} else {
782 			param->a1 = 0;
783 			param->a2 = 0;
784 			param->a4 = 0;
785 			param->a5 = 0;
786 		}
787 		kmemleak_not_leak(shm);
788 		break;
789 	case OPTEE_SMC_RPC_FUNC_FREE:
790 		shm = reg_pair_to_ptr(param->a1, param->a2);
791 		tee_shm_free(shm);
792 		break;
793 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
794 		/*
795 		 * A foreign interrupt was raised while secure world was
796 		 * executing, since they are handled in Linux a dummy RPC is
797 		 * performed to let Linux take the interrupt through the normal
798 		 * vector.
799 		 */
800 		break;
801 	case OPTEE_SMC_RPC_FUNC_CMD:
802 		shm = reg_pair_to_ptr(param->a1, param->a2);
803 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
804 		break;
805 	default:
806 		pr_warn("Unknown RPC func 0x%x\n",
807 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
808 		break;
809 	}
810 
811 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
812 }
813 
814 /**
815  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
816  * @ctx:	calling context
817  * @arg:	shared memory holding the message to pass to secure world
818  *
819  * Does and SMC to OP-TEE in secure world and handles eventual resulting
820  * Remote Procedure Calls (RPC) from OP-TEE.
821  *
822  * Returns return code from secure world, 0 is OK
823  */
824 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
825 				      struct tee_shm *arg)
826 {
827 	struct optee *optee = tee_get_drvdata(ctx->teedev);
828 	struct optee_call_waiter w;
829 	struct optee_rpc_param param = { };
830 	struct optee_call_ctx call_ctx = { };
831 	phys_addr_t parg;
832 	int rc;
833 
834 	rc = tee_shm_get_pa(arg, 0, &parg);
835 	if (rc)
836 		return rc;
837 
838 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
839 	reg_pair_from_64(&param.a1, &param.a2, parg);
840 	/* Initialize waiter */
841 	optee_cq_wait_init(&optee->call_queue, &w);
842 	while (true) {
843 		struct arm_smccc_res res;
844 
845 		trace_optee_invoke_fn_begin(&param);
846 		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
847 				     param.a4, param.a5, param.a6, param.a7,
848 				     &res);
849 		trace_optee_invoke_fn_end(&param, &res);
850 
851 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
852 			/*
853 			 * Out of threads in secure world, wait for a thread
854 			 * become available.
855 			 */
856 			optee_cq_wait_for_completion(&optee->call_queue, &w);
857 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
858 			cond_resched();
859 			param.a0 = res.a0;
860 			param.a1 = res.a1;
861 			param.a2 = res.a2;
862 			param.a3 = res.a3;
863 			optee_handle_rpc(ctx, &param, &call_ctx);
864 		} else {
865 			rc = res.a0;
866 			break;
867 		}
868 	}
869 
870 	optee_rpc_finalize_call(&call_ctx);
871 	/*
872 	 * We're done with our thread in secure world, if there's any
873 	 * thread waiters wake up one.
874 	 */
875 	optee_cq_wait_final(&optee->call_queue, &w);
876 
877 	return rc;
878 }
879 
880 /*
881  * 5. Driver initialization
882  *
883  * During driver inititialization is secure world probed to find out which
884  * features it supports so the driver can be initialized with a matching
885  * configuration. This involves for instance support for dynamic shared
886  * memory instead of a static memory carvout.
887  */
888 
889 static void optee_get_version(struct tee_device *teedev,
890 			      struct tee_ioctl_version_data *vers)
891 {
892 	struct tee_ioctl_version_data v = {
893 		.impl_id = TEE_IMPL_ID_OPTEE,
894 		.impl_caps = TEE_OPTEE_CAP_TZ,
895 		.gen_caps = TEE_GEN_CAP_GP,
896 	};
897 	struct optee *optee = tee_get_drvdata(teedev);
898 
899 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
900 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
901 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
902 		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
903 	*vers = v;
904 }
905 
906 static int optee_smc_open(struct tee_context *ctx)
907 {
908 	struct optee *optee = tee_get_drvdata(ctx->teedev);
909 	u32 sec_caps = optee->smc.sec_caps;
910 
911 	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
912 }
913 
914 static const struct tee_driver_ops optee_clnt_ops = {
915 	.get_version = optee_get_version,
916 	.open = optee_smc_open,
917 	.release = optee_release,
918 	.open_session = optee_open_session,
919 	.close_session = optee_close_session,
920 	.invoke_func = optee_invoke_func,
921 	.cancel_req = optee_cancel_req,
922 	.shm_register = optee_shm_register,
923 	.shm_unregister = optee_shm_unregister,
924 };
925 
926 static const struct tee_desc optee_clnt_desc = {
927 	.name = DRIVER_NAME "-clnt",
928 	.ops = &optee_clnt_ops,
929 	.owner = THIS_MODULE,
930 };
931 
932 static const struct tee_driver_ops optee_supp_ops = {
933 	.get_version = optee_get_version,
934 	.open = optee_smc_open,
935 	.release = optee_release_supp,
936 	.supp_recv = optee_supp_recv,
937 	.supp_send = optee_supp_send,
938 	.shm_register = optee_shm_register_supp,
939 	.shm_unregister = optee_shm_unregister_supp,
940 };
941 
942 static const struct tee_desc optee_supp_desc = {
943 	.name = DRIVER_NAME "-supp",
944 	.ops = &optee_supp_ops,
945 	.owner = THIS_MODULE,
946 	.flags = TEE_DESC_PRIVILEGED,
947 };
948 
949 static const struct optee_ops optee_ops = {
950 	.do_call_with_arg = optee_smc_do_call_with_arg,
951 	.to_msg_param = optee_to_msg_param,
952 	.from_msg_param = optee_from_msg_param,
953 };
954 
955 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
956 {
957 	struct arm_smccc_res res;
958 
959 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
960 
961 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
962 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
963 		return true;
964 	return false;
965 }
966 
967 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
968 {
969 	union {
970 		struct arm_smccc_res smccc;
971 		struct optee_smc_call_get_os_revision_result result;
972 	} res = {
973 		.result = {
974 			.build_id = 0
975 		}
976 	};
977 
978 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
979 		  &res.smccc);
980 
981 	if (res.result.build_id)
982 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
983 			res.result.minor, res.result.build_id);
984 	else
985 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
986 }
987 
988 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
989 {
990 	union {
991 		struct arm_smccc_res smccc;
992 		struct optee_smc_calls_revision_result result;
993 	} res;
994 
995 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
996 
997 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
998 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
999 		return true;
1000 	return false;
1001 }
1002 
1003 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1004 					    u32 *sec_caps)
1005 {
1006 	union {
1007 		struct arm_smccc_res smccc;
1008 		struct optee_smc_exchange_capabilities_result result;
1009 	} res;
1010 	u32 a1 = 0;
1011 
1012 	/*
1013 	 * TODO This isn't enough to tell if it's UP system (from kernel
1014 	 * point of view) or not, is_smp() returns the information
1015 	 * needed, but can't be called directly from here.
1016 	 */
1017 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1018 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1019 
1020 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1021 		  &res.smccc);
1022 
1023 	if (res.result.status != OPTEE_SMC_RETURN_OK)
1024 		return false;
1025 
1026 	*sec_caps = res.result.capabilities;
1027 	return true;
1028 }
1029 
1030 static struct tee_shm_pool *optee_config_dyn_shm(void)
1031 {
1032 	struct tee_shm_pool_mgr *priv_mgr;
1033 	struct tee_shm_pool_mgr *dmabuf_mgr;
1034 	void *rc;
1035 
1036 	rc = optee_shm_pool_alloc_pages();
1037 	if (IS_ERR(rc))
1038 		return rc;
1039 	priv_mgr = rc;
1040 
1041 	rc = optee_shm_pool_alloc_pages();
1042 	if (IS_ERR(rc)) {
1043 		tee_shm_pool_mgr_destroy(priv_mgr);
1044 		return rc;
1045 	}
1046 	dmabuf_mgr = rc;
1047 
1048 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1049 	if (IS_ERR(rc)) {
1050 		tee_shm_pool_mgr_destroy(priv_mgr);
1051 		tee_shm_pool_mgr_destroy(dmabuf_mgr);
1052 	}
1053 
1054 	return rc;
1055 }
1056 
1057 static struct tee_shm_pool *
1058 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1059 {
1060 	union {
1061 		struct arm_smccc_res smccc;
1062 		struct optee_smc_get_shm_config_result result;
1063 	} res;
1064 	unsigned long vaddr;
1065 	phys_addr_t paddr;
1066 	size_t size;
1067 	phys_addr_t begin;
1068 	phys_addr_t end;
1069 	void *va;
1070 	struct tee_shm_pool_mgr *priv_mgr;
1071 	struct tee_shm_pool_mgr *dmabuf_mgr;
1072 	void *rc;
1073 	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1074 
1075 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1076 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1077 		pr_err("static shm service not available\n");
1078 		return ERR_PTR(-ENOENT);
1079 	}
1080 
1081 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1082 		pr_err("only normal cached shared memory supported\n");
1083 		return ERR_PTR(-EINVAL);
1084 	}
1085 
1086 	begin = roundup(res.result.start, PAGE_SIZE);
1087 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1088 	paddr = begin;
1089 	size = end - begin;
1090 
1091 	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1092 		pr_err("too small shared memory area\n");
1093 		return ERR_PTR(-EINVAL);
1094 	}
1095 
1096 	va = memremap(paddr, size, MEMREMAP_WB);
1097 	if (!va) {
1098 		pr_err("shared memory ioremap failed\n");
1099 		return ERR_PTR(-EINVAL);
1100 	}
1101 	vaddr = (unsigned long)va;
1102 
1103 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1104 					    3 /* 8 bytes aligned */);
1105 	if (IS_ERR(rc))
1106 		goto err_memunmap;
1107 	priv_mgr = rc;
1108 
1109 	vaddr += sz;
1110 	paddr += sz;
1111 	size -= sz;
1112 
1113 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1114 	if (IS_ERR(rc))
1115 		goto err_free_priv_mgr;
1116 	dmabuf_mgr = rc;
1117 
1118 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1119 	if (IS_ERR(rc))
1120 		goto err_free_dmabuf_mgr;
1121 
1122 	*memremaped_shm = va;
1123 
1124 	return rc;
1125 
1126 err_free_dmabuf_mgr:
1127 	tee_shm_pool_mgr_destroy(dmabuf_mgr);
1128 err_free_priv_mgr:
1129 	tee_shm_pool_mgr_destroy(priv_mgr);
1130 err_memunmap:
1131 	memunmap(va);
1132 	return rc;
1133 }
1134 
1135 /* Simple wrapper functions to be able to use a function pointer */
1136 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1137 			    unsigned long a2, unsigned long a3,
1138 			    unsigned long a4, unsigned long a5,
1139 			    unsigned long a6, unsigned long a7,
1140 			    struct arm_smccc_res *res)
1141 {
1142 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1143 }
1144 
1145 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1146 			    unsigned long a2, unsigned long a3,
1147 			    unsigned long a4, unsigned long a5,
1148 			    unsigned long a6, unsigned long a7,
1149 			    struct arm_smccc_res *res)
1150 {
1151 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1152 }
1153 
1154 static optee_invoke_fn *get_invoke_func(struct device *dev)
1155 {
1156 	const char *method;
1157 
1158 	pr_info("probing for conduit method.\n");
1159 
1160 	if (device_property_read_string(dev, "method", &method)) {
1161 		pr_warn("missing \"method\" property\n");
1162 		return ERR_PTR(-ENXIO);
1163 	}
1164 
1165 	if (!strcmp("hvc", method))
1166 		return optee_smccc_hvc;
1167 	else if (!strcmp("smc", method))
1168 		return optee_smccc_smc;
1169 
1170 	pr_warn("invalid \"method\" property: %s\n", method);
1171 	return ERR_PTR(-EINVAL);
1172 }
1173 
1174 /* optee_remove - Device Removal Routine
1175  * @pdev: platform device information struct
1176  *
1177  * optee_remove is called by platform subsystem to alert the driver
1178  * that it should release the device
1179  */
1180 static int optee_smc_remove(struct platform_device *pdev)
1181 {
1182 	struct optee *optee = platform_get_drvdata(pdev);
1183 
1184 	/*
1185 	 * Ask OP-TEE to free all cached shared memory objects to decrease
1186 	 * reference counters and also avoid wild pointers in secure world
1187 	 * into the old shared memory range.
1188 	 */
1189 	optee_disable_shm_cache(optee);
1190 
1191 	optee_remove_common(optee);
1192 
1193 	if (optee->smc.memremaped_shm)
1194 		memunmap(optee->smc.memremaped_shm);
1195 
1196 	kfree(optee);
1197 
1198 	return 0;
1199 }
1200 
1201 /* optee_shutdown - Device Removal Routine
1202  * @pdev: platform device information struct
1203  *
1204  * platform_shutdown is called by the platform subsystem to alert
1205  * the driver that a shutdown, reboot, or kexec is happening and
1206  * device must be disabled.
1207  */
1208 static void optee_shutdown(struct platform_device *pdev)
1209 {
1210 	optee_disable_shm_cache(platform_get_drvdata(pdev));
1211 }
1212 
1213 static int optee_probe(struct platform_device *pdev)
1214 {
1215 	optee_invoke_fn *invoke_fn;
1216 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1217 	struct optee *optee = NULL;
1218 	void *memremaped_shm = NULL;
1219 	struct tee_device *teedev;
1220 	u32 sec_caps;
1221 	int rc;
1222 
1223 	invoke_fn = get_invoke_func(&pdev->dev);
1224 	if (IS_ERR(invoke_fn))
1225 		return PTR_ERR(invoke_fn);
1226 
1227 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1228 		pr_warn("api uid mismatch\n");
1229 		return -EINVAL;
1230 	}
1231 
1232 	optee_msg_get_os_revision(invoke_fn);
1233 
1234 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1235 		pr_warn("api revision mismatch\n");
1236 		return -EINVAL;
1237 	}
1238 
1239 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
1240 		pr_warn("capabilities mismatch\n");
1241 		return -EINVAL;
1242 	}
1243 
1244 	/*
1245 	 * Try to use dynamic shared memory if possible
1246 	 */
1247 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1248 		pool = optee_config_dyn_shm();
1249 
1250 	/*
1251 	 * If dynamic shared memory is not available or failed - try static one
1252 	 */
1253 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1254 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1255 
1256 	if (IS_ERR(pool))
1257 		return PTR_ERR(pool);
1258 
1259 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1260 	if (!optee) {
1261 		rc = -ENOMEM;
1262 		goto err;
1263 	}
1264 
1265 	optee->ops = &optee_ops;
1266 	optee->smc.invoke_fn = invoke_fn;
1267 	optee->smc.sec_caps = sec_caps;
1268 
1269 	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1270 	if (IS_ERR(teedev)) {
1271 		rc = PTR_ERR(teedev);
1272 		goto err;
1273 	}
1274 	optee->teedev = teedev;
1275 
1276 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1277 	if (IS_ERR(teedev)) {
1278 		rc = PTR_ERR(teedev);
1279 		goto err;
1280 	}
1281 	optee->supp_teedev = teedev;
1282 
1283 	rc = tee_device_register(optee->teedev);
1284 	if (rc)
1285 		goto err;
1286 
1287 	rc = tee_device_register(optee->supp_teedev);
1288 	if (rc)
1289 		goto err;
1290 
1291 	mutex_init(&optee->call_queue.mutex);
1292 	INIT_LIST_HEAD(&optee->call_queue.waiters);
1293 	optee_wait_queue_init(&optee->wait_queue);
1294 	optee_supp_init(&optee->supp);
1295 	optee->smc.memremaped_shm = memremaped_shm;
1296 	optee->pool = pool;
1297 
1298 	/*
1299 	 * Ensure that there are no pre-existing shm objects before enabling
1300 	 * the shm cache so that there's no chance of receiving an invalid
1301 	 * address during shutdown. This could occur, for example, if we're
1302 	 * kexec booting from an older kernel that did not properly cleanup the
1303 	 * shm cache.
1304 	 */
1305 	optee_disable_unmapped_shm_cache(optee);
1306 
1307 	optee_enable_shm_cache(optee);
1308 
1309 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1310 		pr_info("dynamic shared memory is enabled\n");
1311 
1312 	platform_set_drvdata(pdev, optee);
1313 
1314 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1315 	if (rc) {
1316 		optee_smc_remove(pdev);
1317 		return rc;
1318 	}
1319 
1320 	pr_info("initialized driver\n");
1321 	return 0;
1322 err:
1323 	if (optee) {
1324 		/*
1325 		 * tee_device_unregister() is safe to call even if the
1326 		 * devices hasn't been registered with
1327 		 * tee_device_register() yet.
1328 		 */
1329 		tee_device_unregister(optee->supp_teedev);
1330 		tee_device_unregister(optee->teedev);
1331 		kfree(optee);
1332 	}
1333 	if (pool)
1334 		tee_shm_pool_free(pool);
1335 	if (memremaped_shm)
1336 		memunmap(memremaped_shm);
1337 	return rc;
1338 }
1339 
1340 static const struct of_device_id optee_dt_match[] = {
1341 	{ .compatible = "linaro,optee-tz" },
1342 	{},
1343 };
1344 MODULE_DEVICE_TABLE(of, optee_dt_match);
1345 
1346 static struct platform_driver optee_driver = {
1347 	.probe  = optee_probe,
1348 	.remove = optee_smc_remove,
1349 	.shutdown = optee_shutdown,
1350 	.driver = {
1351 		.name = "optee",
1352 		.of_match_table = optee_dt_match,
1353 	},
1354 };
1355 
1356 int optee_smc_abi_register(void)
1357 {
1358 	return platform_driver_register(&optee_driver);
1359 }
1360 
1361 void optee_smc_abi_unregister(void)
1362 {
1363 	platform_driver_unregister(&optee_driver);
1364 }
1365