xref: /openbmc/linux/drivers/tee/optee/smc_abi.c (revision 2f0754f2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, Linaro Limited
4  * Copyright (c) 2016, EPAM Systems
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/errno.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/irqdomain.h>
14 #include <linux/mm.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/platform_device.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/tee_drv.h>
24 #include <linux/types.h>
25 #include <linux/workqueue.h>
26 #include "optee_private.h"
27 #include "optee_smc.h"
28 #include "optee_rpc_cmd.h"
29 #include <linux/kmemleak.h>
30 #define CREATE_TRACE_POINTS
31 #include "optee_trace.h"
32 
33 /*
34  * This file implement the SMC ABI used when communicating with secure world
35  * OP-TEE OS via raw SMCs.
36  * This file is divided into the following sections:
37  * 1. Convert between struct tee_param and struct optee_msg_param
38  * 2. Low level support functions to register shared memory in secure world
39  * 3. Dynamic shared memory pool based on alloc_pages()
40  * 4. Do a normal scheduled call into secure world
41  * 5. Asynchronous notification
42  * 6. Driver initialization.
43  */
44 
45 #define OPTEE_SHM_NUM_PRIV_PAGES	CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
46 
47 /*
48  * 1. Convert between struct tee_param and struct optee_msg_param
49  *
50  * optee_from_msg_param() and optee_to_msg_param() are the main
51  * functions.
52  */
53 
54 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
55 				  const struct optee_msg_param *mp)
56 {
57 	struct tee_shm *shm;
58 	phys_addr_t pa;
59 	int rc;
60 
61 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
62 		  attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
63 	p->u.memref.size = mp->u.tmem.size;
64 	shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
65 	if (!shm) {
66 		p->u.memref.shm_offs = 0;
67 		p->u.memref.shm = NULL;
68 		return 0;
69 	}
70 
71 	rc = tee_shm_get_pa(shm, 0, &pa);
72 	if (rc)
73 		return rc;
74 
75 	p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
76 	p->u.memref.shm = shm;
77 
78 	/* Check that the memref is covered by the shm object */
79 	if (p->u.memref.size) {
80 		size_t o = p->u.memref.shm_offs +
81 			   p->u.memref.size - 1;
82 
83 		rc = tee_shm_get_pa(shm, o, NULL);
84 		if (rc)
85 			return rc;
86 	}
87 
88 	return 0;
89 }
90 
91 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
92 				   const struct optee_msg_param *mp)
93 {
94 	struct tee_shm *shm;
95 
96 	p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
97 		  attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
98 	p->u.memref.size = mp->u.rmem.size;
99 	shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
100 
101 	if (shm) {
102 		p->u.memref.shm_offs = mp->u.rmem.offs;
103 		p->u.memref.shm = shm;
104 	} else {
105 		p->u.memref.shm_offs = 0;
106 		p->u.memref.shm = NULL;
107 	}
108 }
109 
110 /**
111  * optee_from_msg_param() - convert from OPTEE_MSG parameters to
112  *			    struct tee_param
113  * @optee:	main service struct
114  * @params:	subsystem internal parameter representation
115  * @num_params:	number of elements in the parameter arrays
116  * @msg_params:	OPTEE_MSG parameters
117  * Returns 0 on success or <0 on failure
118  */
119 static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
120 				size_t num_params,
121 				const struct optee_msg_param *msg_params)
122 {
123 	int rc;
124 	size_t n;
125 
126 	for (n = 0; n < num_params; n++) {
127 		struct tee_param *p = params + n;
128 		const struct optee_msg_param *mp = msg_params + n;
129 		u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
130 
131 		switch (attr) {
132 		case OPTEE_MSG_ATTR_TYPE_NONE:
133 			p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
134 			memset(&p->u, 0, sizeof(p->u));
135 			break;
136 		case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
137 		case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
138 		case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
139 			optee_from_msg_param_value(p, attr, mp);
140 			break;
141 		case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
142 		case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
143 		case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
144 			rc = from_msg_param_tmp_mem(p, attr, mp);
145 			if (rc)
146 				return rc;
147 			break;
148 		case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
149 		case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
150 		case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
151 			from_msg_param_reg_mem(p, attr, mp);
152 			break;
153 
154 		default:
155 			return -EINVAL;
156 		}
157 	}
158 	return 0;
159 }
160 
161 static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
162 				const struct tee_param *p)
163 {
164 	int rc;
165 	phys_addr_t pa;
166 
167 	mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
168 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
169 
170 	mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
171 	mp->u.tmem.size = p->u.memref.size;
172 
173 	if (!p->u.memref.shm) {
174 		mp->u.tmem.buf_ptr = 0;
175 		return 0;
176 	}
177 
178 	rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
179 	if (rc)
180 		return rc;
181 
182 	mp->u.tmem.buf_ptr = pa;
183 	mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
184 		    OPTEE_MSG_ATTR_CACHE_SHIFT;
185 
186 	return 0;
187 }
188 
189 static int to_msg_param_reg_mem(struct optee_msg_param *mp,
190 				const struct tee_param *p)
191 {
192 	mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
193 		   TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
194 
195 	mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
196 	mp->u.rmem.size = p->u.memref.size;
197 	mp->u.rmem.offs = p->u.memref.shm_offs;
198 	return 0;
199 }
200 
201 /**
202  * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
203  * @optee:	main service struct
204  * @msg_params:	OPTEE_MSG parameters
205  * @num_params:	number of elements in the parameter arrays
206  * @params:	subsystem itnernal parameter representation
207  * Returns 0 on success or <0 on failure
208  */
209 static int optee_to_msg_param(struct optee *optee,
210 			      struct optee_msg_param *msg_params,
211 			      size_t num_params, const struct tee_param *params)
212 {
213 	int rc;
214 	size_t n;
215 
216 	for (n = 0; n < num_params; n++) {
217 		const struct tee_param *p = params + n;
218 		struct optee_msg_param *mp = msg_params + n;
219 
220 		switch (p->attr) {
221 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
222 			mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
223 			memset(&mp->u, 0, sizeof(mp->u));
224 			break;
225 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
226 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
227 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
228 			optee_to_msg_param_value(mp, p);
229 			break;
230 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
231 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
232 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
233 			if (tee_shm_is_registered(p->u.memref.shm))
234 				rc = to_msg_param_reg_mem(mp, p);
235 			else
236 				rc = to_msg_param_tmp_mem(mp, p);
237 			if (rc)
238 				return rc;
239 			break;
240 		default:
241 			return -EINVAL;
242 		}
243 	}
244 	return 0;
245 }
246 
247 /*
248  * 2. Low level support functions to register shared memory in secure world
249  *
250  * Functions to enable/disable shared memory caching in secure world, that
251  * is, lazy freeing of previously allocated shared memory. Freeing is
252  * performed when a request has been compled.
253  *
254  * Functions to register and unregister shared memory both for normal
255  * clients and for tee-supplicant.
256  */
257 
258 /**
259  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
260  *			      in OP-TEE
261  * @optee:	main service struct
262  */
263 static void optee_enable_shm_cache(struct optee *optee)
264 {
265 	struct optee_call_waiter w;
266 
267 	/* We need to retry until secure world isn't busy. */
268 	optee_cq_wait_init(&optee->call_queue, &w);
269 	while (true) {
270 		struct arm_smccc_res res;
271 
272 		optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
273 				     0, 0, 0, 0, 0, 0, 0, &res);
274 		if (res.a0 == OPTEE_SMC_RETURN_OK)
275 			break;
276 		optee_cq_wait_for_completion(&optee->call_queue, &w);
277 	}
278 	optee_cq_wait_final(&optee->call_queue, &w);
279 }
280 
281 /**
282  * __optee_disable_shm_cache() - Disables caching of some shared memory
283  *				 allocation in OP-TEE
284  * @optee:	main service struct
285  * @is_mapped:	true if the cached shared memory addresses were mapped by this
286  *		kernel, are safe to dereference, and should be freed
287  */
288 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
289 {
290 	struct optee_call_waiter w;
291 
292 	/* We need to retry until secure world isn't busy. */
293 	optee_cq_wait_init(&optee->call_queue, &w);
294 	while (true) {
295 		union {
296 			struct arm_smccc_res smccc;
297 			struct optee_smc_disable_shm_cache_result result;
298 		} res;
299 
300 		optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
301 				     0, 0, 0, 0, 0, 0, 0, &res.smccc);
302 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
303 			break; /* All shm's freed */
304 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
305 			struct tee_shm *shm;
306 
307 			/*
308 			 * Shared memory references that were not mapped by
309 			 * this kernel must be ignored to prevent a crash.
310 			 */
311 			if (!is_mapped)
312 				continue;
313 
314 			shm = reg_pair_to_ptr(res.result.shm_upper32,
315 					      res.result.shm_lower32);
316 			tee_shm_free(shm);
317 		} else {
318 			optee_cq_wait_for_completion(&optee->call_queue, &w);
319 		}
320 	}
321 	optee_cq_wait_final(&optee->call_queue, &w);
322 }
323 
324 /**
325  * optee_disable_shm_cache() - Disables caching of mapped shared memory
326  *			       allocations in OP-TEE
327  * @optee:	main service struct
328  */
329 static void optee_disable_shm_cache(struct optee *optee)
330 {
331 	return __optee_disable_shm_cache(optee, true);
332 }
333 
334 /**
335  * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
336  *					allocations in OP-TEE which are not
337  *					currently mapped
338  * @optee:	main service struct
339  */
340 static void optee_disable_unmapped_shm_cache(struct optee *optee)
341 {
342 	return __optee_disable_shm_cache(optee, false);
343 }
344 
345 #define PAGELIST_ENTRIES_PER_PAGE				\
346 	((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
347 
348 /*
349  * The final entry in each pagelist page is a pointer to the next
350  * pagelist page.
351  */
352 static size_t get_pages_list_size(size_t num_entries)
353 {
354 	int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
355 
356 	return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
357 }
358 
359 static u64 *optee_allocate_pages_list(size_t num_entries)
360 {
361 	return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
362 }
363 
364 static void optee_free_pages_list(void *list, size_t num_entries)
365 {
366 	free_pages_exact(list, get_pages_list_size(num_entries));
367 }
368 
369 /**
370  * optee_fill_pages_list() - write list of user pages to given shared
371  * buffer.
372  *
373  * @dst: page-aligned buffer where list of pages will be stored
374  * @pages: array of pages that represents shared buffer
375  * @num_pages: number of entries in @pages
376  * @page_offset: offset of user buffer from page start
377  *
378  * @dst should be big enough to hold list of user page addresses and
379  *	links to the next pages of buffer
380  */
381 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
382 				  size_t page_offset)
383 {
384 	int n = 0;
385 	phys_addr_t optee_page;
386 	/*
387 	 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
388 	 * for details.
389 	 */
390 	struct {
391 		u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
392 		u64 next_page_data;
393 	} *pages_data;
394 
395 	/*
396 	 * Currently OP-TEE uses 4k page size and it does not looks
397 	 * like this will change in the future.  On other hand, there are
398 	 * no know ARM architectures with page size < 4k.
399 	 * Thus the next built assert looks redundant. But the following
400 	 * code heavily relies on this assumption, so it is better be
401 	 * safe than sorry.
402 	 */
403 	BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
404 
405 	pages_data = (void *)dst;
406 	/*
407 	 * If linux page is bigger than 4k, and user buffer offset is
408 	 * larger than 4k/8k/12k/etc this will skip first 4k pages,
409 	 * because they bear no value data for OP-TEE.
410 	 */
411 	optee_page = page_to_phys(*pages) +
412 		round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
413 
414 	while (true) {
415 		pages_data->pages_list[n++] = optee_page;
416 
417 		if (n == PAGELIST_ENTRIES_PER_PAGE) {
418 			pages_data->next_page_data =
419 				virt_to_phys(pages_data + 1);
420 			pages_data++;
421 			n = 0;
422 		}
423 
424 		optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
425 		if (!(optee_page & ~PAGE_MASK)) {
426 			if (!--num_pages)
427 				break;
428 			pages++;
429 			optee_page = page_to_phys(*pages);
430 		}
431 	}
432 }
433 
434 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
435 			      struct page **pages, size_t num_pages,
436 			      unsigned long start)
437 {
438 	struct optee *optee = tee_get_drvdata(ctx->teedev);
439 	struct optee_msg_arg *msg_arg;
440 	struct tee_shm *shm_arg;
441 	u64 *pages_list;
442 	int rc;
443 
444 	if (!num_pages)
445 		return -EINVAL;
446 
447 	rc = optee_check_mem_type(start, num_pages);
448 	if (rc)
449 		return rc;
450 
451 	pages_list = optee_allocate_pages_list(num_pages);
452 	if (!pages_list)
453 		return -ENOMEM;
454 
455 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
456 	if (IS_ERR(shm_arg)) {
457 		rc = PTR_ERR(shm_arg);
458 		goto out;
459 	}
460 
461 	optee_fill_pages_list(pages_list, pages, num_pages,
462 			      tee_shm_get_page_offset(shm));
463 
464 	msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
465 	msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
466 				OPTEE_MSG_ATTR_NONCONTIG;
467 	msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
468 	msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
469 	/*
470 	 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
471 	 * store buffer offset from 4k page, as described in OP-TEE ABI.
472 	 */
473 	msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
474 	  (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
475 
476 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
477 	    msg_arg->ret != TEEC_SUCCESS)
478 		rc = -EINVAL;
479 
480 	tee_shm_free(shm_arg);
481 out:
482 	optee_free_pages_list(pages_list, num_pages);
483 	return rc;
484 }
485 
486 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
487 {
488 	struct optee *optee = tee_get_drvdata(ctx->teedev);
489 	struct optee_msg_arg *msg_arg;
490 	struct tee_shm *shm_arg;
491 	int rc = 0;
492 
493 	shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
494 	if (IS_ERR(shm_arg))
495 		return PTR_ERR(shm_arg);
496 
497 	msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
498 
499 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
500 	msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
501 
502 	if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
503 	    msg_arg->ret != TEEC_SUCCESS)
504 		rc = -EINVAL;
505 	tee_shm_free(shm_arg);
506 	return rc;
507 }
508 
509 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
510 				   struct page **pages, size_t num_pages,
511 				   unsigned long start)
512 {
513 	/*
514 	 * We don't want to register supplicant memory in OP-TEE.
515 	 * Instead information about it will be passed in RPC code.
516 	 */
517 	return optee_check_mem_type(start, num_pages);
518 }
519 
520 static int optee_shm_unregister_supp(struct tee_context *ctx,
521 				     struct tee_shm *shm)
522 {
523 	return 0;
524 }
525 
526 /*
527  * 3. Dynamic shared memory pool based on alloc_pages()
528  *
529  * Implements an OP-TEE specific shared memory pool which is used
530  * when dynamic shared memory is supported by secure world.
531  *
532  * The main function is optee_shm_pool_alloc_pages().
533  */
534 
535 static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
536 			 struct tee_shm *shm, size_t size)
537 {
538 	/*
539 	 * Shared memory private to the OP-TEE driver doesn't need
540 	 * to be registered with OP-TEE.
541 	 */
542 	if (shm->flags & TEE_SHM_PRIV)
543 		return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
544 
545 	return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
546 }
547 
548 static void pool_op_free(struct tee_shm_pool_mgr *poolm,
549 			 struct tee_shm *shm)
550 {
551 	if (!(shm->flags & TEE_SHM_PRIV))
552 		optee_shm_unregister(shm->ctx, shm);
553 
554 	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
555 	shm->kaddr = NULL;
556 }
557 
558 static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
559 {
560 	kfree(poolm);
561 }
562 
563 static const struct tee_shm_pool_mgr_ops pool_ops = {
564 	.alloc = pool_op_alloc,
565 	.free = pool_op_free,
566 	.destroy_poolmgr = pool_op_destroy_poolmgr,
567 };
568 
569 /**
570  * optee_shm_pool_alloc_pages() - create page-based allocator pool
571  *
572  * This pool is used when OP-TEE supports dymanic SHM. In this case
573  * command buffers and such are allocated from kernel's own memory.
574  */
575 static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
576 {
577 	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
578 
579 	if (!mgr)
580 		return ERR_PTR(-ENOMEM);
581 
582 	mgr->ops = &pool_ops;
583 
584 	return mgr;
585 }
586 
587 /*
588  * 4. Do a normal scheduled call into secure world
589  *
590  * The function optee_smc_do_call_with_arg() performs a normal scheduled
591  * call into secure world. During this call may normal world request help
592  * from normal world using RPCs, Remote Procedure Calls. This includes
593  * delivery of non-secure interrupts to for instance allow rescheduling of
594  * the current task.
595  */
596 
597 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
598 					 struct optee_msg_arg *arg)
599 {
600 	struct tee_shm *shm;
601 
602 	arg->ret_origin = TEEC_ORIGIN_COMMS;
603 
604 	if (arg->num_params != 1 ||
605 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
606 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
607 		return;
608 	}
609 
610 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
611 	switch (arg->params[0].u.value.a) {
612 	case OPTEE_RPC_SHM_TYPE_APPL:
613 		optee_rpc_cmd_free_suppl(ctx, shm);
614 		break;
615 	case OPTEE_RPC_SHM_TYPE_KERNEL:
616 		tee_shm_free(shm);
617 		break;
618 	default:
619 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
620 	}
621 	arg->ret = TEEC_SUCCESS;
622 }
623 
624 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
625 					  struct optee_msg_arg *arg,
626 					  struct optee_call_ctx *call_ctx)
627 {
628 	phys_addr_t pa;
629 	struct tee_shm *shm;
630 	size_t sz;
631 	size_t n;
632 
633 	arg->ret_origin = TEEC_ORIGIN_COMMS;
634 
635 	if (!arg->num_params ||
636 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
637 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
638 		return;
639 	}
640 
641 	for (n = 1; n < arg->num_params; n++) {
642 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
643 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
644 			return;
645 		}
646 	}
647 
648 	sz = arg->params[0].u.value.b;
649 	switch (arg->params[0].u.value.a) {
650 	case OPTEE_RPC_SHM_TYPE_APPL:
651 		shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
652 		break;
653 	case OPTEE_RPC_SHM_TYPE_KERNEL:
654 		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
655 		break;
656 	default:
657 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
658 		return;
659 	}
660 
661 	if (IS_ERR(shm)) {
662 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
663 		return;
664 	}
665 
666 	if (tee_shm_get_pa(shm, 0, &pa)) {
667 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
668 		goto bad;
669 	}
670 
671 	sz = tee_shm_get_size(shm);
672 
673 	if (tee_shm_is_registered(shm)) {
674 		struct page **pages;
675 		u64 *pages_list;
676 		size_t page_num;
677 
678 		pages = tee_shm_get_pages(shm, &page_num);
679 		if (!pages || !page_num) {
680 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
681 			goto bad;
682 		}
683 
684 		pages_list = optee_allocate_pages_list(page_num);
685 		if (!pages_list) {
686 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
687 			goto bad;
688 		}
689 
690 		call_ctx->pages_list = pages_list;
691 		call_ctx->num_entries = page_num;
692 
693 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
694 				      OPTEE_MSG_ATTR_NONCONTIG;
695 		/*
696 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
697 		 * from 4k page, as described in OP-TEE ABI.
698 		 */
699 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
700 			(tee_shm_get_page_offset(shm) &
701 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
702 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
703 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
704 
705 		optee_fill_pages_list(pages_list, pages, page_num,
706 				      tee_shm_get_page_offset(shm));
707 	} else {
708 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
709 		arg->params[0].u.tmem.buf_ptr = pa;
710 		arg->params[0].u.tmem.size = sz;
711 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
712 	}
713 
714 	arg->ret = TEEC_SUCCESS;
715 	return;
716 bad:
717 	tee_shm_free(shm);
718 }
719 
720 static void free_pages_list(struct optee_call_ctx *call_ctx)
721 {
722 	if (call_ctx->pages_list) {
723 		optee_free_pages_list(call_ctx->pages_list,
724 				      call_ctx->num_entries);
725 		call_ctx->pages_list = NULL;
726 		call_ctx->num_entries = 0;
727 	}
728 }
729 
730 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
731 {
732 	free_pages_list(call_ctx);
733 }
734 
735 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
736 				struct tee_shm *shm,
737 				struct optee_call_ctx *call_ctx)
738 {
739 	struct optee_msg_arg *arg;
740 
741 	arg = tee_shm_get_va(shm, 0);
742 	if (IS_ERR(arg)) {
743 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
744 		return;
745 	}
746 
747 	switch (arg->cmd) {
748 	case OPTEE_RPC_CMD_SHM_ALLOC:
749 		free_pages_list(call_ctx);
750 		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
751 		break;
752 	case OPTEE_RPC_CMD_SHM_FREE:
753 		handle_rpc_func_cmd_shm_free(ctx, arg);
754 		break;
755 	default:
756 		optee_rpc_cmd(ctx, optee, arg);
757 	}
758 }
759 
760 /**
761  * optee_handle_rpc() - handle RPC from secure world
762  * @ctx:	context doing the RPC
763  * @param:	value of registers for the RPC
764  * @call_ctx:	call context. Preserved during one OP-TEE invocation
765  *
766  * Result of RPC is written back into @param.
767  */
768 static void optee_handle_rpc(struct tee_context *ctx,
769 			     struct optee_rpc_param *param,
770 			     struct optee_call_ctx *call_ctx)
771 {
772 	struct tee_device *teedev = ctx->teedev;
773 	struct optee *optee = tee_get_drvdata(teedev);
774 	struct tee_shm *shm;
775 	phys_addr_t pa;
776 
777 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
778 	case OPTEE_SMC_RPC_FUNC_ALLOC:
779 		shm = tee_shm_alloc(ctx, param->a1,
780 				    TEE_SHM_MAPPED | TEE_SHM_PRIV);
781 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
782 			reg_pair_from_64(&param->a1, &param->a2, pa);
783 			reg_pair_from_64(&param->a4, &param->a5,
784 					 (unsigned long)shm);
785 		} else {
786 			param->a1 = 0;
787 			param->a2 = 0;
788 			param->a4 = 0;
789 			param->a5 = 0;
790 		}
791 		kmemleak_not_leak(shm);
792 		break;
793 	case OPTEE_SMC_RPC_FUNC_FREE:
794 		shm = reg_pair_to_ptr(param->a1, param->a2);
795 		tee_shm_free(shm);
796 		break;
797 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
798 		/*
799 		 * A foreign interrupt was raised while secure world was
800 		 * executing, since they are handled in Linux a dummy RPC is
801 		 * performed to let Linux take the interrupt through the normal
802 		 * vector.
803 		 */
804 		break;
805 	case OPTEE_SMC_RPC_FUNC_CMD:
806 		shm = reg_pair_to_ptr(param->a1, param->a2);
807 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
808 		break;
809 	default:
810 		pr_warn("Unknown RPC func 0x%x\n",
811 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
812 		break;
813 	}
814 
815 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
816 }
817 
818 /**
819  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
820  * @ctx:	calling context
821  * @arg:	shared memory holding the message to pass to secure world
822  *
823  * Does and SMC to OP-TEE in secure world and handles eventual resulting
824  * Remote Procedure Calls (RPC) from OP-TEE.
825  *
826  * Returns return code from secure world, 0 is OK
827  */
828 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
829 				      struct tee_shm *arg)
830 {
831 	struct optee *optee = tee_get_drvdata(ctx->teedev);
832 	struct optee_call_waiter w;
833 	struct optee_rpc_param param = { };
834 	struct optee_call_ctx call_ctx = { };
835 	phys_addr_t parg;
836 	int rc;
837 
838 	rc = tee_shm_get_pa(arg, 0, &parg);
839 	if (rc)
840 		return rc;
841 
842 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
843 	reg_pair_from_64(&param.a1, &param.a2, parg);
844 	/* Initialize waiter */
845 	optee_cq_wait_init(&optee->call_queue, &w);
846 	while (true) {
847 		struct arm_smccc_res res;
848 
849 		trace_optee_invoke_fn_begin(&param);
850 		optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
851 				     param.a4, param.a5, param.a6, param.a7,
852 				     &res);
853 		trace_optee_invoke_fn_end(&param, &res);
854 
855 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
856 			/*
857 			 * Out of threads in secure world, wait for a thread
858 			 * become available.
859 			 */
860 			optee_cq_wait_for_completion(&optee->call_queue, &w);
861 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
862 			cond_resched();
863 			param.a0 = res.a0;
864 			param.a1 = res.a1;
865 			param.a2 = res.a2;
866 			param.a3 = res.a3;
867 			optee_handle_rpc(ctx, &param, &call_ctx);
868 		} else {
869 			rc = res.a0;
870 			break;
871 		}
872 	}
873 
874 	optee_rpc_finalize_call(&call_ctx);
875 	/*
876 	 * We're done with our thread in secure world, if there's any
877 	 * thread waiters wake up one.
878 	 */
879 	optee_cq_wait_final(&optee->call_queue, &w);
880 
881 	return rc;
882 }
883 
884 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
885 {
886 	struct optee_msg_arg *msg_arg;
887 	struct tee_shm *shm;
888 
889 	shm = optee_get_msg_arg(ctx, 0, &msg_arg);
890 	if (IS_ERR(shm))
891 		return PTR_ERR(shm);
892 
893 	msg_arg->cmd = cmd;
894 	optee_smc_do_call_with_arg(ctx, shm);
895 
896 	tee_shm_free(shm);
897 	return 0;
898 }
899 
900 static int optee_smc_do_bottom_half(struct tee_context *ctx)
901 {
902 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF);
903 }
904 
905 static int optee_smc_stop_async_notif(struct tee_context *ctx)
906 {
907 	return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF);
908 }
909 
910 /*
911  * 5. Asynchronous notification
912  */
913 
914 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
915 				 bool *value_pending)
916 {
917 	struct arm_smccc_res res;
918 
919 	invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
920 
921 	if (res.a0)
922 		return 0;
923 	*value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
924 	*value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
925 	return res.a1;
926 }
927 
928 static irqreturn_t notif_irq_handler(int irq, void *dev_id)
929 {
930 	struct optee *optee = dev_id;
931 	bool do_bottom_half = false;
932 	bool value_valid;
933 	bool value_pending;
934 	u32 value;
935 
936 	do {
937 		value = get_async_notif_value(optee->smc.invoke_fn,
938 					      &value_valid, &value_pending);
939 		if (!value_valid)
940 			break;
941 
942 		if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF)
943 			do_bottom_half = true;
944 		else
945 			optee_notif_send(optee, value);
946 	} while (value_pending);
947 
948 	if (do_bottom_half)
949 		return IRQ_WAKE_THREAD;
950 	return IRQ_HANDLED;
951 }
952 
953 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
954 {
955 	struct optee *optee = dev_id;
956 
957 	optee_smc_do_bottom_half(optee->notif.ctx);
958 
959 	return IRQ_HANDLED;
960 }
961 
962 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
963 {
964 	struct tee_context *ctx;
965 	int rc;
966 
967 	ctx = teedev_open(optee->teedev);
968 	if (IS_ERR(ctx))
969 		return PTR_ERR(ctx);
970 
971 	optee->notif.ctx = ctx;
972 	rc = request_threaded_irq(irq, notif_irq_handler,
973 				  notif_irq_thread_fn,
974 				  0, "optee_notification", optee);
975 	if (rc)
976 		goto err_close_ctx;
977 
978 	optee->smc.notif_irq = irq;
979 
980 	return 0;
981 
982 err_close_ctx:
983 	teedev_close_context(optee->notif.ctx);
984 	optee->notif.ctx = NULL;
985 
986 	return rc;
987 }
988 
989 static void optee_smc_notif_uninit_irq(struct optee *optee)
990 {
991 	if (optee->notif.ctx) {
992 		optee_smc_stop_async_notif(optee->notif.ctx);
993 		if (optee->smc.notif_irq) {
994 			free_irq(optee->smc.notif_irq, optee);
995 			irq_dispose_mapping(optee->smc.notif_irq);
996 		}
997 
998 		/*
999 		 * The thread normally working with optee->notif.ctx was
1000 		 * stopped with free_irq() above.
1001 		 *
1002 		 * Note we're not using teedev_close_context() or
1003 		 * tee_client_close_context() since we have already called
1004 		 * tee_device_put() while initializing to avoid a circular
1005 		 * reference counting.
1006 		 */
1007 		teedev_close_context(optee->notif.ctx);
1008 	}
1009 }
1010 
1011 /*
1012  * 6. Driver initialization
1013  *
1014  * During driver initialization is secure world probed to find out which
1015  * features it supports so the driver can be initialized with a matching
1016  * configuration. This involves for instance support for dynamic shared
1017  * memory instead of a static memory carvout.
1018  */
1019 
1020 static void optee_get_version(struct tee_device *teedev,
1021 			      struct tee_ioctl_version_data *vers)
1022 {
1023 	struct tee_ioctl_version_data v = {
1024 		.impl_id = TEE_IMPL_ID_OPTEE,
1025 		.impl_caps = TEE_OPTEE_CAP_TZ,
1026 		.gen_caps = TEE_GEN_CAP_GP,
1027 	};
1028 	struct optee *optee = tee_get_drvdata(teedev);
1029 
1030 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1031 		v.gen_caps |= TEE_GEN_CAP_REG_MEM;
1032 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
1033 		v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
1034 	*vers = v;
1035 }
1036 
1037 static int optee_smc_open(struct tee_context *ctx)
1038 {
1039 	struct optee *optee = tee_get_drvdata(ctx->teedev);
1040 	u32 sec_caps = optee->smc.sec_caps;
1041 
1042 	return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
1043 }
1044 
1045 static const struct tee_driver_ops optee_clnt_ops = {
1046 	.get_version = optee_get_version,
1047 	.open = optee_smc_open,
1048 	.release = optee_release,
1049 	.open_session = optee_open_session,
1050 	.close_session = optee_close_session,
1051 	.invoke_func = optee_invoke_func,
1052 	.cancel_req = optee_cancel_req,
1053 	.shm_register = optee_shm_register,
1054 	.shm_unregister = optee_shm_unregister,
1055 };
1056 
1057 static const struct tee_desc optee_clnt_desc = {
1058 	.name = DRIVER_NAME "-clnt",
1059 	.ops = &optee_clnt_ops,
1060 	.owner = THIS_MODULE,
1061 };
1062 
1063 static const struct tee_driver_ops optee_supp_ops = {
1064 	.get_version = optee_get_version,
1065 	.open = optee_smc_open,
1066 	.release = optee_release_supp,
1067 	.supp_recv = optee_supp_recv,
1068 	.supp_send = optee_supp_send,
1069 	.shm_register = optee_shm_register_supp,
1070 	.shm_unregister = optee_shm_unregister_supp,
1071 };
1072 
1073 static const struct tee_desc optee_supp_desc = {
1074 	.name = DRIVER_NAME "-supp",
1075 	.ops = &optee_supp_ops,
1076 	.owner = THIS_MODULE,
1077 	.flags = TEE_DESC_PRIVILEGED,
1078 };
1079 
1080 static const struct optee_ops optee_ops = {
1081 	.do_call_with_arg = optee_smc_do_call_with_arg,
1082 	.to_msg_param = optee_to_msg_param,
1083 	.from_msg_param = optee_from_msg_param,
1084 };
1085 
1086 static int enable_async_notif(optee_invoke_fn *invoke_fn)
1087 {
1088 	struct arm_smccc_res res;
1089 
1090 	invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res);
1091 
1092 	if (res.a0)
1093 		return -EINVAL;
1094 	return 0;
1095 }
1096 
1097 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
1098 {
1099 	struct arm_smccc_res res;
1100 
1101 	invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
1102 
1103 	if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
1104 	    res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
1105 		return true;
1106 	return false;
1107 }
1108 
1109 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
1110 {
1111 	union {
1112 		struct arm_smccc_res smccc;
1113 		struct optee_smc_call_get_os_revision_result result;
1114 	} res = {
1115 		.result = {
1116 			.build_id = 0
1117 		}
1118 	};
1119 
1120 	invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
1121 		  &res.smccc);
1122 
1123 	if (res.result.build_id)
1124 		pr_info("revision %lu.%lu (%08lx)", res.result.major,
1125 			res.result.minor, res.result.build_id);
1126 	else
1127 		pr_info("revision %lu.%lu", res.result.major, res.result.minor);
1128 }
1129 
1130 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
1131 {
1132 	union {
1133 		struct arm_smccc_res smccc;
1134 		struct optee_smc_calls_revision_result result;
1135 	} res;
1136 
1137 	invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1138 
1139 	if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
1140 	    (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
1141 		return true;
1142 	return false;
1143 }
1144 
1145 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
1146 					    u32 *sec_caps, u32 *max_notif_value)
1147 {
1148 	union {
1149 		struct arm_smccc_res smccc;
1150 		struct optee_smc_exchange_capabilities_result result;
1151 	} res;
1152 	u32 a1 = 0;
1153 
1154 	/*
1155 	 * TODO This isn't enough to tell if it's UP system (from kernel
1156 	 * point of view) or not, is_smp() returns the information
1157 	 * needed, but can't be called directly from here.
1158 	 */
1159 	if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
1160 		a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
1161 
1162 	invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
1163 		  &res.smccc);
1164 
1165 	if (res.result.status != OPTEE_SMC_RETURN_OK)
1166 		return false;
1167 
1168 	*sec_caps = res.result.capabilities;
1169 	if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF)
1170 		*max_notif_value = res.result.max_notif_value;
1171 	else
1172 		*max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
1173 
1174 	return true;
1175 }
1176 
1177 static struct tee_shm_pool *optee_config_dyn_shm(void)
1178 {
1179 	struct tee_shm_pool_mgr *priv_mgr;
1180 	struct tee_shm_pool_mgr *dmabuf_mgr;
1181 	void *rc;
1182 
1183 	rc = optee_shm_pool_alloc_pages();
1184 	if (IS_ERR(rc))
1185 		return rc;
1186 	priv_mgr = rc;
1187 
1188 	rc = optee_shm_pool_alloc_pages();
1189 	if (IS_ERR(rc)) {
1190 		tee_shm_pool_mgr_destroy(priv_mgr);
1191 		return rc;
1192 	}
1193 	dmabuf_mgr = rc;
1194 
1195 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1196 	if (IS_ERR(rc)) {
1197 		tee_shm_pool_mgr_destroy(priv_mgr);
1198 		tee_shm_pool_mgr_destroy(dmabuf_mgr);
1199 	}
1200 
1201 	return rc;
1202 }
1203 
1204 static struct tee_shm_pool *
1205 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
1206 {
1207 	union {
1208 		struct arm_smccc_res smccc;
1209 		struct optee_smc_get_shm_config_result result;
1210 	} res;
1211 	unsigned long vaddr;
1212 	phys_addr_t paddr;
1213 	size_t size;
1214 	phys_addr_t begin;
1215 	phys_addr_t end;
1216 	void *va;
1217 	struct tee_shm_pool_mgr *priv_mgr;
1218 	struct tee_shm_pool_mgr *dmabuf_mgr;
1219 	void *rc;
1220 	const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
1221 
1222 	invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
1223 	if (res.result.status != OPTEE_SMC_RETURN_OK) {
1224 		pr_err("static shm service not available\n");
1225 		return ERR_PTR(-ENOENT);
1226 	}
1227 
1228 	if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
1229 		pr_err("only normal cached shared memory supported\n");
1230 		return ERR_PTR(-EINVAL);
1231 	}
1232 
1233 	begin = roundup(res.result.start, PAGE_SIZE);
1234 	end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
1235 	paddr = begin;
1236 	size = end - begin;
1237 
1238 	if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
1239 		pr_err("too small shared memory area\n");
1240 		return ERR_PTR(-EINVAL);
1241 	}
1242 
1243 	va = memremap(paddr, size, MEMREMAP_WB);
1244 	if (!va) {
1245 		pr_err("shared memory ioremap failed\n");
1246 		return ERR_PTR(-EINVAL);
1247 	}
1248 	vaddr = (unsigned long)va;
1249 
1250 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
1251 					    3 /* 8 bytes aligned */);
1252 	if (IS_ERR(rc))
1253 		goto err_memunmap;
1254 	priv_mgr = rc;
1255 
1256 	vaddr += sz;
1257 	paddr += sz;
1258 	size -= sz;
1259 
1260 	rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
1261 	if (IS_ERR(rc))
1262 		goto err_free_priv_mgr;
1263 	dmabuf_mgr = rc;
1264 
1265 	rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
1266 	if (IS_ERR(rc))
1267 		goto err_free_dmabuf_mgr;
1268 
1269 	*memremaped_shm = va;
1270 
1271 	return rc;
1272 
1273 err_free_dmabuf_mgr:
1274 	tee_shm_pool_mgr_destroy(dmabuf_mgr);
1275 err_free_priv_mgr:
1276 	tee_shm_pool_mgr_destroy(priv_mgr);
1277 err_memunmap:
1278 	memunmap(va);
1279 	return rc;
1280 }
1281 
1282 /* Simple wrapper functions to be able to use a function pointer */
1283 static void optee_smccc_smc(unsigned long a0, unsigned long a1,
1284 			    unsigned long a2, unsigned long a3,
1285 			    unsigned long a4, unsigned long a5,
1286 			    unsigned long a6, unsigned long a7,
1287 			    struct arm_smccc_res *res)
1288 {
1289 	arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1290 }
1291 
1292 static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
1293 			    unsigned long a2, unsigned long a3,
1294 			    unsigned long a4, unsigned long a5,
1295 			    unsigned long a6, unsigned long a7,
1296 			    struct arm_smccc_res *res)
1297 {
1298 	arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
1299 }
1300 
1301 static optee_invoke_fn *get_invoke_func(struct device *dev)
1302 {
1303 	const char *method;
1304 
1305 	pr_info("probing for conduit method.\n");
1306 
1307 	if (device_property_read_string(dev, "method", &method)) {
1308 		pr_warn("missing \"method\" property\n");
1309 		return ERR_PTR(-ENXIO);
1310 	}
1311 
1312 	if (!strcmp("hvc", method))
1313 		return optee_smccc_hvc;
1314 	else if (!strcmp("smc", method))
1315 		return optee_smccc_smc;
1316 
1317 	pr_warn("invalid \"method\" property: %s\n", method);
1318 	return ERR_PTR(-EINVAL);
1319 }
1320 
1321 /* optee_remove - Device Removal Routine
1322  * @pdev: platform device information struct
1323  *
1324  * optee_remove is called by platform subsystem to alert the driver
1325  * that it should release the device
1326  */
1327 static int optee_smc_remove(struct platform_device *pdev)
1328 {
1329 	struct optee *optee = platform_get_drvdata(pdev);
1330 
1331 	/*
1332 	 * Ask OP-TEE to free all cached shared memory objects to decrease
1333 	 * reference counters and also avoid wild pointers in secure world
1334 	 * into the old shared memory range.
1335 	 */
1336 	optee_disable_shm_cache(optee);
1337 
1338 	optee_smc_notif_uninit_irq(optee);
1339 
1340 	optee_remove_common(optee);
1341 
1342 	if (optee->smc.memremaped_shm)
1343 		memunmap(optee->smc.memremaped_shm);
1344 
1345 	kfree(optee);
1346 
1347 	return 0;
1348 }
1349 
1350 /* optee_shutdown - Device Removal Routine
1351  * @pdev: platform device information struct
1352  *
1353  * platform_shutdown is called by the platform subsystem to alert
1354  * the driver that a shutdown, reboot, or kexec is happening and
1355  * device must be disabled.
1356  */
1357 static void optee_shutdown(struct platform_device *pdev)
1358 {
1359 	optee_disable_shm_cache(platform_get_drvdata(pdev));
1360 }
1361 
1362 static int optee_probe(struct platform_device *pdev)
1363 {
1364 	optee_invoke_fn *invoke_fn;
1365 	struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
1366 	struct optee *optee = NULL;
1367 	void *memremaped_shm = NULL;
1368 	struct tee_device *teedev;
1369 	u32 max_notif_value;
1370 	u32 sec_caps;
1371 	int rc;
1372 
1373 	invoke_fn = get_invoke_func(&pdev->dev);
1374 	if (IS_ERR(invoke_fn))
1375 		return PTR_ERR(invoke_fn);
1376 
1377 	if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
1378 		pr_warn("api uid mismatch\n");
1379 		return -EINVAL;
1380 	}
1381 
1382 	optee_msg_get_os_revision(invoke_fn);
1383 
1384 	if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
1385 		pr_warn("api revision mismatch\n");
1386 		return -EINVAL;
1387 	}
1388 
1389 	if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
1390 					     &max_notif_value)) {
1391 		pr_warn("capabilities mismatch\n");
1392 		return -EINVAL;
1393 	}
1394 
1395 	/*
1396 	 * Try to use dynamic shared memory if possible
1397 	 */
1398 	if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1399 		pool = optee_config_dyn_shm();
1400 
1401 	/*
1402 	 * If dynamic shared memory is not available or failed - try static one
1403 	 */
1404 	if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
1405 		pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
1406 
1407 	if (IS_ERR(pool))
1408 		return PTR_ERR(pool);
1409 
1410 	optee = kzalloc(sizeof(*optee), GFP_KERNEL);
1411 	if (!optee) {
1412 		rc = -ENOMEM;
1413 		goto err_free_pool;
1414 	}
1415 
1416 	optee->ops = &optee_ops;
1417 	optee->smc.invoke_fn = invoke_fn;
1418 	optee->smc.sec_caps = sec_caps;
1419 
1420 	teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
1421 	if (IS_ERR(teedev)) {
1422 		rc = PTR_ERR(teedev);
1423 		goto err_free_optee;
1424 	}
1425 	optee->teedev = teedev;
1426 
1427 	teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
1428 	if (IS_ERR(teedev)) {
1429 		rc = PTR_ERR(teedev);
1430 		goto err_unreg_teedev;
1431 	}
1432 	optee->supp_teedev = teedev;
1433 
1434 	rc = tee_device_register(optee->teedev);
1435 	if (rc)
1436 		goto err_unreg_supp_teedev;
1437 
1438 	rc = tee_device_register(optee->supp_teedev);
1439 	if (rc)
1440 		goto err_unreg_supp_teedev;
1441 
1442 	mutex_init(&optee->call_queue.mutex);
1443 	INIT_LIST_HEAD(&optee->call_queue.waiters);
1444 	optee_supp_init(&optee->supp);
1445 	optee->smc.memremaped_shm = memremaped_shm;
1446 	optee->pool = pool;
1447 
1448 	platform_set_drvdata(pdev, optee);
1449 	rc = optee_notif_init(optee, max_notif_value);
1450 	if (rc)
1451 		goto err_supp_uninit;
1452 
1453 	if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
1454 		unsigned int irq;
1455 
1456 		rc = platform_get_irq(pdev, 0);
1457 		if (rc < 0) {
1458 			pr_err("platform_get_irq: ret %d\n", rc);
1459 			goto err_notif_uninit;
1460 		}
1461 		irq = rc;
1462 
1463 		rc = optee_smc_notif_init_irq(optee, irq);
1464 		if (rc) {
1465 			irq_dispose_mapping(irq);
1466 			goto err_notif_uninit;
1467 		}
1468 		enable_async_notif(optee->smc.invoke_fn);
1469 		pr_info("Asynchronous notifications enabled\n");
1470 	}
1471 
1472 	/*
1473 	 * Ensure that there are no pre-existing shm objects before enabling
1474 	 * the shm cache so that there's no chance of receiving an invalid
1475 	 * address during shutdown. This could occur, for example, if we're
1476 	 * kexec booting from an older kernel that did not properly cleanup the
1477 	 * shm cache.
1478 	 */
1479 	optee_disable_unmapped_shm_cache(optee);
1480 
1481 	optee_enable_shm_cache(optee);
1482 
1483 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
1484 		pr_info("dynamic shared memory is enabled\n");
1485 
1486 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
1487 	if (rc)
1488 		goto err_disable_shm_cache;
1489 
1490 	pr_info("initialized driver\n");
1491 	return 0;
1492 
1493 err_disable_shm_cache:
1494 	optee_disable_shm_cache(optee);
1495 	optee_smc_notif_uninit_irq(optee);
1496 	optee_unregister_devices();
1497 err_notif_uninit:
1498 	optee_notif_uninit(optee);
1499 err_supp_uninit:
1500 	optee_supp_uninit(&optee->supp);
1501 	mutex_destroy(&optee->call_queue.mutex);
1502 err_unreg_supp_teedev:
1503 	tee_device_unregister(optee->supp_teedev);
1504 err_unreg_teedev:
1505 	tee_device_unregister(optee->teedev);
1506 err_free_optee:
1507 	kfree(optee);
1508 err_free_pool:
1509 	tee_shm_pool_free(pool);
1510 	if (memremaped_shm)
1511 		memunmap(memremaped_shm);
1512 	return rc;
1513 }
1514 
1515 static const struct of_device_id optee_dt_match[] = {
1516 	{ .compatible = "linaro,optee-tz" },
1517 	{},
1518 };
1519 MODULE_DEVICE_TABLE(of, optee_dt_match);
1520 
1521 static struct platform_driver optee_driver = {
1522 	.probe  = optee_probe,
1523 	.remove = optee_smc_remove,
1524 	.shutdown = optee_shutdown,
1525 	.driver = {
1526 		.name = "optee",
1527 		.of_match_table = optee_dt_match,
1528 	},
1529 };
1530 
1531 int optee_smc_abi_register(void)
1532 {
1533 	return platform_driver_register(&optee_driver);
1534 }
1535 
1536 void optee_smc_abi_unregister(void)
1537 {
1538 	platform_driver_unregister(&optee_driver);
1539 }
1540