xref: /openbmc/linux/drivers/tee/tee_core.c (revision ed1666f6)
1 /*
2  * Copyright (c) 2015-2016, Linaro Limited
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #define pr_fmt(fmt) "%s: " fmt, __func__
16 
17 #include <linux/cdev.h>
18 #include <linux/fs.h>
19 #include <linux/idr.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/tee_drv.h>
23 #include <linux/uaccess.h>
24 #include "tee_private.h"
25 
26 #define TEE_NUM_DEVICES	32
27 
28 #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
29 
30 /*
31  * Unprivileged devices in the lower half range and privileged devices in
32  * the upper half range.
33  */
34 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
35 static DEFINE_SPINLOCK(driver_lock);
36 
37 static struct class *tee_class;
38 static dev_t tee_devt;
39 
40 static struct tee_context *teedev_open(struct tee_device *teedev)
41 {
42 	int rc;
43 	struct tee_context *ctx;
44 
45 	if (!tee_device_get(teedev))
46 		return ERR_PTR(-EINVAL);
47 
48 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
49 	if (!ctx) {
50 		rc = -ENOMEM;
51 		goto err;
52 	}
53 
54 	kref_init(&ctx->refcount);
55 	ctx->teedev = teedev;
56 	INIT_LIST_HEAD(&ctx->list_shm);
57 	rc = teedev->desc->ops->open(ctx);
58 	if (rc)
59 		goto err;
60 
61 	return ctx;
62 err:
63 	kfree(ctx);
64 	tee_device_put(teedev);
65 	return ERR_PTR(rc);
66 
67 }
68 
69 void teedev_ctx_get(struct tee_context *ctx)
70 {
71 	if (ctx->releasing)
72 		return;
73 
74 	kref_get(&ctx->refcount);
75 }
76 
77 static void teedev_ctx_release(struct kref *ref)
78 {
79 	struct tee_context *ctx = container_of(ref, struct tee_context,
80 					       refcount);
81 	ctx->releasing = true;
82 	ctx->teedev->desc->ops->release(ctx);
83 	kfree(ctx);
84 }
85 
86 void teedev_ctx_put(struct tee_context *ctx)
87 {
88 	if (ctx->releasing)
89 		return;
90 
91 	kref_put(&ctx->refcount, teedev_ctx_release);
92 }
93 
94 static void teedev_close_context(struct tee_context *ctx)
95 {
96 	tee_device_put(ctx->teedev);
97 	teedev_ctx_put(ctx);
98 }
99 
100 static int tee_open(struct inode *inode, struct file *filp)
101 {
102 	struct tee_context *ctx;
103 
104 	ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
105 	if (IS_ERR(ctx))
106 		return PTR_ERR(ctx);
107 
108 	/*
109 	 * Default user-space behaviour is to wait for tee-supplicant
110 	 * if not present for any requests in this context.
111 	 */
112 	ctx->supp_nowait = false;
113 	filp->private_data = ctx;
114 	return 0;
115 }
116 
117 static int tee_release(struct inode *inode, struct file *filp)
118 {
119 	teedev_close_context(filp->private_data);
120 	return 0;
121 }
122 
123 static int tee_ioctl_version(struct tee_context *ctx,
124 			     struct tee_ioctl_version_data __user *uvers)
125 {
126 	struct tee_ioctl_version_data vers;
127 
128 	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
129 
130 	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
131 		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
132 
133 	if (copy_to_user(uvers, &vers, sizeof(vers)))
134 		return -EFAULT;
135 
136 	return 0;
137 }
138 
139 static int tee_ioctl_shm_alloc(struct tee_context *ctx,
140 			       struct tee_ioctl_shm_alloc_data __user *udata)
141 {
142 	long ret;
143 	struct tee_ioctl_shm_alloc_data data;
144 	struct tee_shm *shm;
145 
146 	if (copy_from_user(&data, udata, sizeof(data)))
147 		return -EFAULT;
148 
149 	/* Currently no input flags are supported */
150 	if (data.flags)
151 		return -EINVAL;
152 
153 	shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
154 	if (IS_ERR(shm))
155 		return PTR_ERR(shm);
156 
157 	data.id = shm->id;
158 	data.flags = shm->flags;
159 	data.size = shm->size;
160 
161 	if (copy_to_user(udata, &data, sizeof(data)))
162 		ret = -EFAULT;
163 	else
164 		ret = tee_shm_get_fd(shm);
165 
166 	/*
167 	 * When user space closes the file descriptor the shared memory
168 	 * should be freed or if tee_shm_get_fd() failed then it will
169 	 * be freed immediately.
170 	 */
171 	tee_shm_put(shm);
172 	return ret;
173 }
174 
175 static int
176 tee_ioctl_shm_register(struct tee_context *ctx,
177 		       struct tee_ioctl_shm_register_data __user *udata)
178 {
179 	long ret;
180 	struct tee_ioctl_shm_register_data data;
181 	struct tee_shm *shm;
182 
183 	if (copy_from_user(&data, udata, sizeof(data)))
184 		return -EFAULT;
185 
186 	/* Currently no input flags are supported */
187 	if (data.flags)
188 		return -EINVAL;
189 
190 	shm = tee_shm_register(ctx, data.addr, data.length,
191 			       TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
192 	if (IS_ERR(shm))
193 		return PTR_ERR(shm);
194 
195 	data.id = shm->id;
196 	data.flags = shm->flags;
197 	data.length = shm->size;
198 
199 	if (copy_to_user(udata, &data, sizeof(data)))
200 		ret = -EFAULT;
201 	else
202 		ret = tee_shm_get_fd(shm);
203 	/*
204 	 * When user space closes the file descriptor the shared memory
205 	 * should be freed or if tee_shm_get_fd() failed then it will
206 	 * be freed immediately.
207 	 */
208 	tee_shm_put(shm);
209 	return ret;
210 }
211 
212 static int params_from_user(struct tee_context *ctx, struct tee_param *params,
213 			    size_t num_params,
214 			    struct tee_ioctl_param __user *uparams)
215 {
216 	size_t n;
217 
218 	for (n = 0; n < num_params; n++) {
219 		struct tee_shm *shm;
220 		struct tee_ioctl_param ip;
221 
222 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
223 			return -EFAULT;
224 
225 		/* All unused attribute bits has to be zero */
226 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
227 			return -EINVAL;
228 
229 		params[n].attr = ip.attr;
230 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
231 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
232 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
233 			break;
234 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
235 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
236 			params[n].u.value.a = ip.a;
237 			params[n].u.value.b = ip.b;
238 			params[n].u.value.c = ip.c;
239 			break;
240 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
241 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
242 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
243 			/*
244 			 * If we fail to get a pointer to a shared memory
245 			 * object (and increase the ref count) from an
246 			 * identifier we return an error. All pointers that
247 			 * has been added in params have an increased ref
248 			 * count. It's the callers responibility to do
249 			 * tee_shm_put() on all resolved pointers.
250 			 */
251 			shm = tee_shm_get_from_id(ctx, ip.c);
252 			if (IS_ERR(shm))
253 				return PTR_ERR(shm);
254 
255 			/*
256 			 * Ensure offset + size does not overflow offset
257 			 * and does not overflow the size of the referred
258 			 * shared memory object.
259 			 */
260 			if ((ip.a + ip.b) < ip.a ||
261 			    (ip.a + ip.b) > shm->size) {
262 				tee_shm_put(shm);
263 				return -EINVAL;
264 			}
265 
266 			params[n].u.memref.shm_offs = ip.a;
267 			params[n].u.memref.size = ip.b;
268 			params[n].u.memref.shm = shm;
269 			break;
270 		default:
271 			/* Unknown attribute */
272 			return -EINVAL;
273 		}
274 	}
275 	return 0;
276 }
277 
278 static int params_to_user(struct tee_ioctl_param __user *uparams,
279 			  size_t num_params, struct tee_param *params)
280 {
281 	size_t n;
282 
283 	for (n = 0; n < num_params; n++) {
284 		struct tee_ioctl_param __user *up = uparams + n;
285 		struct tee_param *p = params + n;
286 
287 		switch (p->attr) {
288 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
289 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
290 			if (put_user(p->u.value.a, &up->a) ||
291 			    put_user(p->u.value.b, &up->b) ||
292 			    put_user(p->u.value.c, &up->c))
293 				return -EFAULT;
294 			break;
295 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
296 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
297 			if (put_user((u64)p->u.memref.size, &up->b))
298 				return -EFAULT;
299 		default:
300 			break;
301 		}
302 	}
303 	return 0;
304 }
305 
306 static int tee_ioctl_open_session(struct tee_context *ctx,
307 				  struct tee_ioctl_buf_data __user *ubuf)
308 {
309 	int rc;
310 	size_t n;
311 	struct tee_ioctl_buf_data buf;
312 	struct tee_ioctl_open_session_arg __user *uarg;
313 	struct tee_ioctl_open_session_arg arg;
314 	struct tee_ioctl_param __user *uparams = NULL;
315 	struct tee_param *params = NULL;
316 	bool have_session = false;
317 
318 	if (!ctx->teedev->desc->ops->open_session)
319 		return -EINVAL;
320 
321 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
322 		return -EFAULT;
323 
324 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
325 	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
326 		return -EINVAL;
327 
328 	uarg = u64_to_user_ptr(buf.buf_ptr);
329 	if (copy_from_user(&arg, uarg, sizeof(arg)))
330 		return -EFAULT;
331 
332 	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
333 		return -EINVAL;
334 
335 	if (arg.num_params) {
336 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
337 				 GFP_KERNEL);
338 		if (!params)
339 			return -ENOMEM;
340 		uparams = uarg->params;
341 		rc = params_from_user(ctx, params, arg.num_params, uparams);
342 		if (rc)
343 			goto out;
344 	}
345 
346 	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
347 	if (rc)
348 		goto out;
349 	have_session = true;
350 
351 	if (put_user(arg.session, &uarg->session) ||
352 	    put_user(arg.ret, &uarg->ret) ||
353 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
354 		rc = -EFAULT;
355 		goto out;
356 	}
357 	rc = params_to_user(uparams, arg.num_params, params);
358 out:
359 	/*
360 	 * If we've succeeded to open the session but failed to communicate
361 	 * it back to user space, close the session again to avoid leakage.
362 	 */
363 	if (rc && have_session && ctx->teedev->desc->ops->close_session)
364 		ctx->teedev->desc->ops->close_session(ctx, arg.session);
365 
366 	if (params) {
367 		/* Decrease ref count for all valid shared memory pointers */
368 		for (n = 0; n < arg.num_params; n++)
369 			if (tee_param_is_memref(params + n) &&
370 			    params[n].u.memref.shm)
371 				tee_shm_put(params[n].u.memref.shm);
372 		kfree(params);
373 	}
374 
375 	return rc;
376 }
377 
378 static int tee_ioctl_invoke(struct tee_context *ctx,
379 			    struct tee_ioctl_buf_data __user *ubuf)
380 {
381 	int rc;
382 	size_t n;
383 	struct tee_ioctl_buf_data buf;
384 	struct tee_ioctl_invoke_arg __user *uarg;
385 	struct tee_ioctl_invoke_arg arg;
386 	struct tee_ioctl_param __user *uparams = NULL;
387 	struct tee_param *params = NULL;
388 
389 	if (!ctx->teedev->desc->ops->invoke_func)
390 		return -EINVAL;
391 
392 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
393 		return -EFAULT;
394 
395 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
396 	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
397 		return -EINVAL;
398 
399 	uarg = u64_to_user_ptr(buf.buf_ptr);
400 	if (copy_from_user(&arg, uarg, sizeof(arg)))
401 		return -EFAULT;
402 
403 	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
404 		return -EINVAL;
405 
406 	if (arg.num_params) {
407 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
408 				 GFP_KERNEL);
409 		if (!params)
410 			return -ENOMEM;
411 		uparams = uarg->params;
412 		rc = params_from_user(ctx, params, arg.num_params, uparams);
413 		if (rc)
414 			goto out;
415 	}
416 
417 	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
418 	if (rc)
419 		goto out;
420 
421 	if (put_user(arg.ret, &uarg->ret) ||
422 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
423 		rc = -EFAULT;
424 		goto out;
425 	}
426 	rc = params_to_user(uparams, arg.num_params, params);
427 out:
428 	if (params) {
429 		/* Decrease ref count for all valid shared memory pointers */
430 		for (n = 0; n < arg.num_params; n++)
431 			if (tee_param_is_memref(params + n) &&
432 			    params[n].u.memref.shm)
433 				tee_shm_put(params[n].u.memref.shm);
434 		kfree(params);
435 	}
436 	return rc;
437 }
438 
439 static int tee_ioctl_cancel(struct tee_context *ctx,
440 			    struct tee_ioctl_cancel_arg __user *uarg)
441 {
442 	struct tee_ioctl_cancel_arg arg;
443 
444 	if (!ctx->teedev->desc->ops->cancel_req)
445 		return -EINVAL;
446 
447 	if (copy_from_user(&arg, uarg, sizeof(arg)))
448 		return -EFAULT;
449 
450 	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
451 						  arg.session);
452 }
453 
454 static int
455 tee_ioctl_close_session(struct tee_context *ctx,
456 			struct tee_ioctl_close_session_arg __user *uarg)
457 {
458 	struct tee_ioctl_close_session_arg arg;
459 
460 	if (!ctx->teedev->desc->ops->close_session)
461 		return -EINVAL;
462 
463 	if (copy_from_user(&arg, uarg, sizeof(arg)))
464 		return -EFAULT;
465 
466 	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
467 }
468 
469 static int params_to_supp(struct tee_context *ctx,
470 			  struct tee_ioctl_param __user *uparams,
471 			  size_t num_params, struct tee_param *params)
472 {
473 	size_t n;
474 
475 	for (n = 0; n < num_params; n++) {
476 		struct tee_ioctl_param ip;
477 		struct tee_param *p = params + n;
478 
479 		ip.attr = p->attr;
480 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
481 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
482 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
483 			ip.a = p->u.value.a;
484 			ip.b = p->u.value.b;
485 			ip.c = p->u.value.c;
486 			break;
487 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
488 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
489 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
490 			ip.b = p->u.memref.size;
491 			if (!p->u.memref.shm) {
492 				ip.a = 0;
493 				ip.c = (u64)-1; /* invalid shm id */
494 				break;
495 			}
496 			ip.a = p->u.memref.shm_offs;
497 			ip.c = p->u.memref.shm->id;
498 			break;
499 		default:
500 			ip.a = 0;
501 			ip.b = 0;
502 			ip.c = 0;
503 			break;
504 		}
505 
506 		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
507 			return -EFAULT;
508 	}
509 
510 	return 0;
511 }
512 
513 static int tee_ioctl_supp_recv(struct tee_context *ctx,
514 			       struct tee_ioctl_buf_data __user *ubuf)
515 {
516 	int rc;
517 	struct tee_ioctl_buf_data buf;
518 	struct tee_iocl_supp_recv_arg __user *uarg;
519 	struct tee_param *params;
520 	u32 num_params;
521 	u32 func;
522 
523 	if (!ctx->teedev->desc->ops->supp_recv)
524 		return -EINVAL;
525 
526 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
527 		return -EFAULT;
528 
529 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
530 	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
531 		return -EINVAL;
532 
533 	uarg = u64_to_user_ptr(buf.buf_ptr);
534 	if (get_user(num_params, &uarg->num_params))
535 		return -EFAULT;
536 
537 	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
538 		return -EINVAL;
539 
540 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
541 	if (!params)
542 		return -ENOMEM;
543 
544 	rc = params_from_user(ctx, params, num_params, uarg->params);
545 	if (rc)
546 		goto out;
547 
548 	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
549 	if (rc)
550 		goto out;
551 
552 	if (put_user(func, &uarg->func) ||
553 	    put_user(num_params, &uarg->num_params)) {
554 		rc = -EFAULT;
555 		goto out;
556 	}
557 
558 	rc = params_to_supp(ctx, uarg->params, num_params, params);
559 out:
560 	kfree(params);
561 	return rc;
562 }
563 
564 static int params_from_supp(struct tee_param *params, size_t num_params,
565 			    struct tee_ioctl_param __user *uparams)
566 {
567 	size_t n;
568 
569 	for (n = 0; n < num_params; n++) {
570 		struct tee_param *p = params + n;
571 		struct tee_ioctl_param ip;
572 
573 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
574 			return -EFAULT;
575 
576 		/* All unused attribute bits has to be zero */
577 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
578 			return -EINVAL;
579 
580 		p->attr = ip.attr;
581 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
582 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
583 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
584 			/* Only out and in/out values can be updated */
585 			p->u.value.a = ip.a;
586 			p->u.value.b = ip.b;
587 			p->u.value.c = ip.c;
588 			break;
589 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
590 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
591 			/*
592 			 * Only the size of the memref can be updated.
593 			 * Since we don't have access to the original
594 			 * parameters here, only store the supplied size.
595 			 * The driver will copy the updated size into the
596 			 * original parameters.
597 			 */
598 			p->u.memref.shm = NULL;
599 			p->u.memref.shm_offs = 0;
600 			p->u.memref.size = ip.b;
601 			break;
602 		default:
603 			memset(&p->u, 0, sizeof(p->u));
604 			break;
605 		}
606 	}
607 	return 0;
608 }
609 
610 static int tee_ioctl_supp_send(struct tee_context *ctx,
611 			       struct tee_ioctl_buf_data __user *ubuf)
612 {
613 	long rc;
614 	struct tee_ioctl_buf_data buf;
615 	struct tee_iocl_supp_send_arg __user *uarg;
616 	struct tee_param *params;
617 	u32 num_params;
618 	u32 ret;
619 
620 	/* Not valid for this driver */
621 	if (!ctx->teedev->desc->ops->supp_send)
622 		return -EINVAL;
623 
624 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
625 		return -EFAULT;
626 
627 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
628 	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
629 		return -EINVAL;
630 
631 	uarg = u64_to_user_ptr(buf.buf_ptr);
632 	if (get_user(ret, &uarg->ret) ||
633 	    get_user(num_params, &uarg->num_params))
634 		return -EFAULT;
635 
636 	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
637 		return -EINVAL;
638 
639 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
640 	if (!params)
641 		return -ENOMEM;
642 
643 	rc = params_from_supp(params, num_params, uarg->params);
644 	if (rc)
645 		goto out;
646 
647 	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
648 out:
649 	kfree(params);
650 	return rc;
651 }
652 
653 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
654 {
655 	struct tee_context *ctx = filp->private_data;
656 	void __user *uarg = (void __user *)arg;
657 
658 	switch (cmd) {
659 	case TEE_IOC_VERSION:
660 		return tee_ioctl_version(ctx, uarg);
661 	case TEE_IOC_SHM_ALLOC:
662 		return tee_ioctl_shm_alloc(ctx, uarg);
663 	case TEE_IOC_SHM_REGISTER:
664 		return tee_ioctl_shm_register(ctx, uarg);
665 	case TEE_IOC_OPEN_SESSION:
666 		return tee_ioctl_open_session(ctx, uarg);
667 	case TEE_IOC_INVOKE:
668 		return tee_ioctl_invoke(ctx, uarg);
669 	case TEE_IOC_CANCEL:
670 		return tee_ioctl_cancel(ctx, uarg);
671 	case TEE_IOC_CLOSE_SESSION:
672 		return tee_ioctl_close_session(ctx, uarg);
673 	case TEE_IOC_SUPPL_RECV:
674 		return tee_ioctl_supp_recv(ctx, uarg);
675 	case TEE_IOC_SUPPL_SEND:
676 		return tee_ioctl_supp_send(ctx, uarg);
677 	default:
678 		return -EINVAL;
679 	}
680 }
681 
682 static const struct file_operations tee_fops = {
683 	.owner = THIS_MODULE,
684 	.open = tee_open,
685 	.release = tee_release,
686 	.unlocked_ioctl = tee_ioctl,
687 	.compat_ioctl = tee_ioctl,
688 };
689 
690 static void tee_release_device(struct device *dev)
691 {
692 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
693 
694 	spin_lock(&driver_lock);
695 	clear_bit(teedev->id, dev_mask);
696 	spin_unlock(&driver_lock);
697 	mutex_destroy(&teedev->mutex);
698 	idr_destroy(&teedev->idr);
699 	kfree(teedev);
700 }
701 
702 /**
703  * tee_device_alloc() - Allocate a new struct tee_device instance
704  * @teedesc:	Descriptor for this driver
705  * @dev:	Parent device for this device
706  * @pool:	Shared memory pool, NULL if not used
707  * @driver_data: Private driver data for this device
708  *
709  * Allocates a new struct tee_device instance. The device is
710  * removed by tee_device_unregister().
711  *
712  * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
713  */
714 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
715 				    struct device *dev,
716 				    struct tee_shm_pool *pool,
717 				    void *driver_data)
718 {
719 	struct tee_device *teedev;
720 	void *ret;
721 	int rc, max_id;
722 	int offs = 0;
723 
724 	if (!teedesc || !teedesc->name || !teedesc->ops ||
725 	    !teedesc->ops->get_version || !teedesc->ops->open ||
726 	    !teedesc->ops->release || !pool)
727 		return ERR_PTR(-EINVAL);
728 
729 	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
730 	if (!teedev) {
731 		ret = ERR_PTR(-ENOMEM);
732 		goto err;
733 	}
734 
735 	max_id = TEE_NUM_DEVICES / 2;
736 
737 	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
738 		offs = TEE_NUM_DEVICES / 2;
739 		max_id = TEE_NUM_DEVICES;
740 	}
741 
742 	spin_lock(&driver_lock);
743 	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
744 	if (teedev->id < max_id)
745 		set_bit(teedev->id, dev_mask);
746 	spin_unlock(&driver_lock);
747 
748 	if (teedev->id >= max_id) {
749 		ret = ERR_PTR(-ENOMEM);
750 		goto err;
751 	}
752 
753 	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
754 		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
755 		 teedev->id - offs);
756 
757 	teedev->dev.class = tee_class;
758 	teedev->dev.release = tee_release_device;
759 	teedev->dev.parent = dev;
760 
761 	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
762 
763 	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
764 	if (rc) {
765 		ret = ERR_PTR(rc);
766 		goto err_devt;
767 	}
768 
769 	cdev_init(&teedev->cdev, &tee_fops);
770 	teedev->cdev.owner = teedesc->owner;
771 	teedev->cdev.kobj.parent = &teedev->dev.kobj;
772 
773 	dev_set_drvdata(&teedev->dev, driver_data);
774 	device_initialize(&teedev->dev);
775 
776 	/* 1 as tee_device_unregister() does one final tee_device_put() */
777 	teedev->num_users = 1;
778 	init_completion(&teedev->c_no_users);
779 	mutex_init(&teedev->mutex);
780 	idr_init(&teedev->idr);
781 
782 	teedev->desc = teedesc;
783 	teedev->pool = pool;
784 
785 	return teedev;
786 err_devt:
787 	unregister_chrdev_region(teedev->dev.devt, 1);
788 err:
789 	pr_err("could not register %s driver\n",
790 	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
791 	if (teedev && teedev->id < TEE_NUM_DEVICES) {
792 		spin_lock(&driver_lock);
793 		clear_bit(teedev->id, dev_mask);
794 		spin_unlock(&driver_lock);
795 	}
796 	kfree(teedev);
797 	return ret;
798 }
799 EXPORT_SYMBOL_GPL(tee_device_alloc);
800 
801 static ssize_t implementation_id_show(struct device *dev,
802 				      struct device_attribute *attr, char *buf)
803 {
804 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
805 	struct tee_ioctl_version_data vers;
806 
807 	teedev->desc->ops->get_version(teedev, &vers);
808 	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
809 }
810 static DEVICE_ATTR_RO(implementation_id);
811 
812 static struct attribute *tee_dev_attrs[] = {
813 	&dev_attr_implementation_id.attr,
814 	NULL
815 };
816 
817 static const struct attribute_group tee_dev_group = {
818 	.attrs = tee_dev_attrs,
819 };
820 
821 /**
822  * tee_device_register() - Registers a TEE device
823  * @teedev:	Device to register
824  *
825  * tee_device_unregister() need to be called to remove the @teedev if
826  * this function fails.
827  *
828  * @returns < 0 on failure
829  */
830 int tee_device_register(struct tee_device *teedev)
831 {
832 	int rc;
833 
834 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
835 		dev_err(&teedev->dev, "attempt to register twice\n");
836 		return -EINVAL;
837 	}
838 
839 	rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
840 	if (rc) {
841 		dev_err(&teedev->dev,
842 			"unable to cdev_add() %s, major %d, minor %d, err=%d\n",
843 			teedev->name, MAJOR(teedev->dev.devt),
844 			MINOR(teedev->dev.devt), rc);
845 		return rc;
846 	}
847 
848 	rc = device_add(&teedev->dev);
849 	if (rc) {
850 		dev_err(&teedev->dev,
851 			"unable to device_add() %s, major %d, minor %d, err=%d\n",
852 			teedev->name, MAJOR(teedev->dev.devt),
853 			MINOR(teedev->dev.devt), rc);
854 		goto err_device_add;
855 	}
856 
857 	rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
858 	if (rc) {
859 		dev_err(&teedev->dev,
860 			"failed to create sysfs attributes, err=%d\n", rc);
861 		goto err_sysfs_create_group;
862 	}
863 
864 	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
865 	return 0;
866 
867 err_sysfs_create_group:
868 	device_del(&teedev->dev);
869 err_device_add:
870 	cdev_del(&teedev->cdev);
871 	return rc;
872 }
873 EXPORT_SYMBOL_GPL(tee_device_register);
874 
875 void tee_device_put(struct tee_device *teedev)
876 {
877 	mutex_lock(&teedev->mutex);
878 	/* Shouldn't put in this state */
879 	if (!WARN_ON(!teedev->desc)) {
880 		teedev->num_users--;
881 		if (!teedev->num_users) {
882 			teedev->desc = NULL;
883 			complete(&teedev->c_no_users);
884 		}
885 	}
886 	mutex_unlock(&teedev->mutex);
887 }
888 
889 bool tee_device_get(struct tee_device *teedev)
890 {
891 	mutex_lock(&teedev->mutex);
892 	if (!teedev->desc) {
893 		mutex_unlock(&teedev->mutex);
894 		return false;
895 	}
896 	teedev->num_users++;
897 	mutex_unlock(&teedev->mutex);
898 	return true;
899 }
900 
901 /**
902  * tee_device_unregister() - Removes a TEE device
903  * @teedev:	Device to unregister
904  *
905  * This function should be called to remove the @teedev even if
906  * tee_device_register() hasn't been called yet. Does nothing if
907  * @teedev is NULL.
908  */
909 void tee_device_unregister(struct tee_device *teedev)
910 {
911 	if (!teedev)
912 		return;
913 
914 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
915 		sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
916 		cdev_del(&teedev->cdev);
917 		device_del(&teedev->dev);
918 	}
919 
920 	tee_device_put(teedev);
921 	wait_for_completion(&teedev->c_no_users);
922 
923 	/*
924 	 * No need to take a mutex any longer now since teedev->desc was
925 	 * set to NULL before teedev->c_no_users was completed.
926 	 */
927 
928 	teedev->pool = NULL;
929 
930 	put_device(&teedev->dev);
931 }
932 EXPORT_SYMBOL_GPL(tee_device_unregister);
933 
934 /**
935  * tee_get_drvdata() - Return driver_data pointer
936  * @teedev:	Device containing the driver_data pointer
937  * @returns the driver_data pointer supplied to tee_register().
938  */
939 void *tee_get_drvdata(struct tee_device *teedev)
940 {
941 	return dev_get_drvdata(&teedev->dev);
942 }
943 EXPORT_SYMBOL_GPL(tee_get_drvdata);
944 
945 struct match_dev_data {
946 	struct tee_ioctl_version_data *vers;
947 	const void *data;
948 	int (*match)(struct tee_ioctl_version_data *, const void *);
949 };
950 
951 static int match_dev(struct device *dev, const void *data)
952 {
953 	const struct match_dev_data *match_data = data;
954 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
955 
956 	teedev->desc->ops->get_version(teedev, match_data->vers);
957 	return match_data->match(match_data->vers, match_data->data);
958 }
959 
960 struct tee_context *
961 tee_client_open_context(struct tee_context *start,
962 			int (*match)(struct tee_ioctl_version_data *,
963 				     const void *),
964 			const void *data, struct tee_ioctl_version_data *vers)
965 {
966 	struct device *dev = NULL;
967 	struct device *put_dev = NULL;
968 	struct tee_context *ctx = NULL;
969 	struct tee_ioctl_version_data v;
970 	struct match_dev_data match_data = { vers ? vers : &v, data, match };
971 
972 	if (start)
973 		dev = &start->teedev->dev;
974 
975 	do {
976 		dev = class_find_device(tee_class, dev, &match_data, match_dev);
977 		if (!dev) {
978 			ctx = ERR_PTR(-ENOENT);
979 			break;
980 		}
981 
982 		put_device(put_dev);
983 		put_dev = dev;
984 
985 		ctx = teedev_open(container_of(dev, struct tee_device, dev));
986 	} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
987 
988 	put_device(put_dev);
989 	/*
990 	 * Default behaviour for in kernel client is to not wait for
991 	 * tee-supplicant if not present for any requests in this context.
992 	 * Also this flag could be configured again before call to
993 	 * tee_client_open_session() if any in kernel client requires
994 	 * different behaviour.
995 	 */
996 	if (!IS_ERR(ctx))
997 		ctx->supp_nowait = true;
998 
999 	return ctx;
1000 }
1001 EXPORT_SYMBOL_GPL(tee_client_open_context);
1002 
1003 void tee_client_close_context(struct tee_context *ctx)
1004 {
1005 	teedev_close_context(ctx);
1006 }
1007 EXPORT_SYMBOL_GPL(tee_client_close_context);
1008 
1009 void tee_client_get_version(struct tee_context *ctx,
1010 			    struct tee_ioctl_version_data *vers)
1011 {
1012 	ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1013 }
1014 EXPORT_SYMBOL_GPL(tee_client_get_version);
1015 
1016 int tee_client_open_session(struct tee_context *ctx,
1017 			    struct tee_ioctl_open_session_arg *arg,
1018 			    struct tee_param *param)
1019 {
1020 	if (!ctx->teedev->desc->ops->open_session)
1021 		return -EINVAL;
1022 	return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1023 }
1024 EXPORT_SYMBOL_GPL(tee_client_open_session);
1025 
1026 int tee_client_close_session(struct tee_context *ctx, u32 session)
1027 {
1028 	if (!ctx->teedev->desc->ops->close_session)
1029 		return -EINVAL;
1030 	return ctx->teedev->desc->ops->close_session(ctx, session);
1031 }
1032 EXPORT_SYMBOL_GPL(tee_client_close_session);
1033 
1034 int tee_client_invoke_func(struct tee_context *ctx,
1035 			   struct tee_ioctl_invoke_arg *arg,
1036 			   struct tee_param *param)
1037 {
1038 	if (!ctx->teedev->desc->ops->invoke_func)
1039 		return -EINVAL;
1040 	return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1041 }
1042 EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1043 
1044 int tee_client_cancel_req(struct tee_context *ctx,
1045 			  struct tee_ioctl_cancel_arg *arg)
1046 {
1047 	if (!ctx->teedev->desc->ops->cancel_req)
1048 		return -EINVAL;
1049 	return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1050 						  arg->session);
1051 }
1052 
1053 static int tee_client_device_match(struct device *dev,
1054 				   struct device_driver *drv)
1055 {
1056 	const struct tee_client_device_id *id_table;
1057 	struct tee_client_device *tee_device;
1058 
1059 	id_table = to_tee_client_driver(drv)->id_table;
1060 	tee_device = to_tee_client_device(dev);
1061 
1062 	while (!uuid_is_null(&id_table->uuid)) {
1063 		if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1064 			return 1;
1065 		id_table++;
1066 	}
1067 
1068 	return 0;
1069 }
1070 
1071 static int tee_client_device_uevent(struct device *dev,
1072 				    struct kobj_uevent_env *env)
1073 {
1074 	uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1075 
1076 	return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1077 }
1078 
1079 struct bus_type tee_bus_type = {
1080 	.name		= "tee",
1081 	.match		= tee_client_device_match,
1082 	.uevent		= tee_client_device_uevent,
1083 };
1084 EXPORT_SYMBOL_GPL(tee_bus_type);
1085 
1086 static int __init tee_init(void)
1087 {
1088 	int rc;
1089 
1090 	tee_class = class_create(THIS_MODULE, "tee");
1091 	if (IS_ERR(tee_class)) {
1092 		pr_err("couldn't create class\n");
1093 		return PTR_ERR(tee_class);
1094 	}
1095 
1096 	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1097 	if (rc) {
1098 		pr_err("failed to allocate char dev region\n");
1099 		goto out_unreg_class;
1100 	}
1101 
1102 	rc = bus_register(&tee_bus_type);
1103 	if (rc) {
1104 		pr_err("failed to register tee bus\n");
1105 		goto out_unreg_chrdev;
1106 	}
1107 
1108 	return 0;
1109 
1110 out_unreg_chrdev:
1111 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1112 out_unreg_class:
1113 	class_destroy(tee_class);
1114 	tee_class = NULL;
1115 
1116 	return rc;
1117 }
1118 
1119 static void __exit tee_exit(void)
1120 {
1121 	bus_unregister(&tee_bus_type);
1122 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1123 	class_destroy(tee_class);
1124 	tee_class = NULL;
1125 }
1126 
1127 subsys_initcall(tee_init);
1128 module_exit(tee_exit);
1129 
1130 MODULE_AUTHOR("Linaro");
1131 MODULE_DESCRIPTION("TEE Driver");
1132 MODULE_VERSION("1.0");
1133 MODULE_LICENSE("GPL v2");
1134