xref: /openbmc/linux/drivers/tee/tee_core.c (revision 02a9c6ee)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 
6 #define pr_fmt(fmt) "%s: " fmt, __func__
7 
8 #include <linux/cdev.h>
9 #include <linux/cred.h>
10 #include <linux/fs.h>
11 #include <linux/idr.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/tee_drv.h>
15 #include <linux/uaccess.h>
16 #include <crypto/hash.h>
17 #include <crypto/sha.h>
18 #include "tee_private.h"
19 
20 #define TEE_NUM_DEVICES	32
21 
22 #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
23 
24 #define TEE_UUID_NS_NAME_SIZE	128
25 
26 /*
27  * TEE Client UUID name space identifier (UUIDv4)
28  *
29  * Value here is random UUID that is allocated as name space identifier for
30  * forming Client UUID's for TEE environment using UUIDv5 scheme.
31  */
32 static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
33 						   0xa1, 0xb8, 0xec, 0x4b,
34 						   0xc0, 0x8e, 0x01, 0xb6);
35 
36 /*
37  * Unprivileged devices in the lower half range and privileged devices in
38  * the upper half range.
39  */
40 static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
41 static DEFINE_SPINLOCK(driver_lock);
42 
43 static struct class *tee_class;
44 static dev_t tee_devt;
45 
46 static struct tee_context *teedev_open(struct tee_device *teedev)
47 {
48 	int rc;
49 	struct tee_context *ctx;
50 
51 	if (!tee_device_get(teedev))
52 		return ERR_PTR(-EINVAL);
53 
54 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
55 	if (!ctx) {
56 		rc = -ENOMEM;
57 		goto err;
58 	}
59 
60 	kref_init(&ctx->refcount);
61 	ctx->teedev = teedev;
62 	rc = teedev->desc->ops->open(ctx);
63 	if (rc)
64 		goto err;
65 
66 	return ctx;
67 err:
68 	kfree(ctx);
69 	tee_device_put(teedev);
70 	return ERR_PTR(rc);
71 
72 }
73 
74 void teedev_ctx_get(struct tee_context *ctx)
75 {
76 	if (ctx->releasing)
77 		return;
78 
79 	kref_get(&ctx->refcount);
80 }
81 
82 static void teedev_ctx_release(struct kref *ref)
83 {
84 	struct tee_context *ctx = container_of(ref, struct tee_context,
85 					       refcount);
86 	ctx->releasing = true;
87 	ctx->teedev->desc->ops->release(ctx);
88 	kfree(ctx);
89 }
90 
91 void teedev_ctx_put(struct tee_context *ctx)
92 {
93 	if (ctx->releasing)
94 		return;
95 
96 	kref_put(&ctx->refcount, teedev_ctx_release);
97 }
98 
99 static void teedev_close_context(struct tee_context *ctx)
100 {
101 	tee_device_put(ctx->teedev);
102 	teedev_ctx_put(ctx);
103 }
104 
105 static int tee_open(struct inode *inode, struct file *filp)
106 {
107 	struct tee_context *ctx;
108 
109 	ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
110 	if (IS_ERR(ctx))
111 		return PTR_ERR(ctx);
112 
113 	/*
114 	 * Default user-space behaviour is to wait for tee-supplicant
115 	 * if not present for any requests in this context.
116 	 */
117 	ctx->supp_nowait = false;
118 	filp->private_data = ctx;
119 	return 0;
120 }
121 
122 static int tee_release(struct inode *inode, struct file *filp)
123 {
124 	teedev_close_context(filp->private_data);
125 	return 0;
126 }
127 
128 /**
129  * uuid_v5() - Calculate UUIDv5
130  * @uuid: Resulting UUID
131  * @ns: Name space ID for UUIDv5 function
132  * @name: Name for UUIDv5 function
133  * @size: Size of name
134  *
135  * UUIDv5 is specific in RFC 4122.
136  *
137  * This implements section (for SHA-1):
138  * 4.3.  Algorithm for Creating a Name-Based UUID
139  */
140 static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
141 		   size_t size)
142 {
143 	unsigned char hash[SHA1_DIGEST_SIZE];
144 	struct crypto_shash *shash = NULL;
145 	struct shash_desc *desc = NULL;
146 	int rc;
147 
148 	shash = crypto_alloc_shash("sha1", 0, 0);
149 	if (IS_ERR(shash)) {
150 		rc = PTR_ERR(shash);
151 		pr_err("shash(sha1) allocation failed\n");
152 		return rc;
153 	}
154 
155 	desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
156 		       GFP_KERNEL);
157 	if (!desc) {
158 		rc = -ENOMEM;
159 		goto out_free_shash;
160 	}
161 
162 	desc->tfm = shash;
163 
164 	rc = crypto_shash_init(desc);
165 	if (rc < 0)
166 		goto out_free_desc;
167 
168 	rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
169 	if (rc < 0)
170 		goto out_free_desc;
171 
172 	rc = crypto_shash_update(desc, (const u8 *)name, size);
173 	if (rc < 0)
174 		goto out_free_desc;
175 
176 	rc = crypto_shash_final(desc, hash);
177 	if (rc < 0)
178 		goto out_free_desc;
179 
180 	memcpy(uuid->b, hash, UUID_SIZE);
181 
182 	/* Tag for version 5 */
183 	uuid->b[6] = (hash[6] & 0x0F) | 0x50;
184 	uuid->b[8] = (hash[8] & 0x3F) | 0x80;
185 
186 out_free_desc:
187 	kfree(desc);
188 
189 out_free_shash:
190 	crypto_free_shash(shash);
191 	return rc;
192 }
193 
194 int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
195 				 const u8 connection_data[TEE_IOCTL_UUID_LEN])
196 {
197 	gid_t ns_grp = (gid_t)-1;
198 	kgid_t grp = INVALID_GID;
199 	char *name = NULL;
200 	int name_len;
201 	int rc;
202 
203 	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC) {
204 		/* Nil UUID to be passed to TEE environment */
205 		uuid_copy(uuid, &uuid_null);
206 		return 0;
207 	}
208 
209 	/*
210 	 * In Linux environment client UUID is based on UUIDv5.
211 	 *
212 	 * Determine client UUID with following semantics for 'name':
213 	 *
214 	 * For TEEC_LOGIN_USER:
215 	 * uid=<uid>
216 	 *
217 	 * For TEEC_LOGIN_GROUP:
218 	 * gid=<gid>
219 	 *
220 	 */
221 
222 	name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
223 	if (!name)
224 		return -ENOMEM;
225 
226 	switch (connection_method) {
227 	case TEE_IOCTL_LOGIN_USER:
228 		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
229 				    current_euid().val);
230 		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
231 			rc = -E2BIG;
232 			goto out_free_name;
233 		}
234 		break;
235 
236 	case TEE_IOCTL_LOGIN_GROUP:
237 		memcpy(&ns_grp, connection_data, sizeof(gid_t));
238 		grp = make_kgid(current_user_ns(), ns_grp);
239 		if (!gid_valid(grp) || !in_egroup_p(grp)) {
240 			rc = -EPERM;
241 			goto out_free_name;
242 		}
243 
244 		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
245 				    grp.val);
246 		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
247 			rc = -E2BIG;
248 			goto out_free_name;
249 		}
250 		break;
251 
252 	default:
253 		rc = -EINVAL;
254 		goto out_free_name;
255 	}
256 
257 	rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
258 out_free_name:
259 	kfree(name);
260 
261 	return rc;
262 }
263 EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
264 
265 static int tee_ioctl_version(struct tee_context *ctx,
266 			     struct tee_ioctl_version_data __user *uvers)
267 {
268 	struct tee_ioctl_version_data vers;
269 
270 	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
271 
272 	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
273 		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
274 
275 	if (copy_to_user(uvers, &vers, sizeof(vers)))
276 		return -EFAULT;
277 
278 	return 0;
279 }
280 
281 static int tee_ioctl_shm_alloc(struct tee_context *ctx,
282 			       struct tee_ioctl_shm_alloc_data __user *udata)
283 {
284 	long ret;
285 	struct tee_ioctl_shm_alloc_data data;
286 	struct tee_shm *shm;
287 
288 	if (copy_from_user(&data, udata, sizeof(data)))
289 		return -EFAULT;
290 
291 	/* Currently no input flags are supported */
292 	if (data.flags)
293 		return -EINVAL;
294 
295 	shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
296 	if (IS_ERR(shm))
297 		return PTR_ERR(shm);
298 
299 	data.id = shm->id;
300 	data.flags = shm->flags;
301 	data.size = shm->size;
302 
303 	if (copy_to_user(udata, &data, sizeof(data)))
304 		ret = -EFAULT;
305 	else
306 		ret = tee_shm_get_fd(shm);
307 
308 	/*
309 	 * When user space closes the file descriptor the shared memory
310 	 * should be freed or if tee_shm_get_fd() failed then it will
311 	 * be freed immediately.
312 	 */
313 	tee_shm_put(shm);
314 	return ret;
315 }
316 
317 static int
318 tee_ioctl_shm_register(struct tee_context *ctx,
319 		       struct tee_ioctl_shm_register_data __user *udata)
320 {
321 	long ret;
322 	struct tee_ioctl_shm_register_data data;
323 	struct tee_shm *shm;
324 
325 	if (copy_from_user(&data, udata, sizeof(data)))
326 		return -EFAULT;
327 
328 	/* Currently no input flags are supported */
329 	if (data.flags)
330 		return -EINVAL;
331 
332 	shm = tee_shm_register(ctx, data.addr, data.length,
333 			       TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
334 	if (IS_ERR(shm))
335 		return PTR_ERR(shm);
336 
337 	data.id = shm->id;
338 	data.flags = shm->flags;
339 	data.length = shm->size;
340 
341 	if (copy_to_user(udata, &data, sizeof(data)))
342 		ret = -EFAULT;
343 	else
344 		ret = tee_shm_get_fd(shm);
345 	/*
346 	 * When user space closes the file descriptor the shared memory
347 	 * should be freed or if tee_shm_get_fd() failed then it will
348 	 * be freed immediately.
349 	 */
350 	tee_shm_put(shm);
351 	return ret;
352 }
353 
354 static int params_from_user(struct tee_context *ctx, struct tee_param *params,
355 			    size_t num_params,
356 			    struct tee_ioctl_param __user *uparams)
357 {
358 	size_t n;
359 
360 	for (n = 0; n < num_params; n++) {
361 		struct tee_shm *shm;
362 		struct tee_ioctl_param ip;
363 
364 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
365 			return -EFAULT;
366 
367 		/* All unused attribute bits has to be zero */
368 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
369 			return -EINVAL;
370 
371 		params[n].attr = ip.attr;
372 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
373 		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
374 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
375 			break;
376 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
377 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
378 			params[n].u.value.a = ip.a;
379 			params[n].u.value.b = ip.b;
380 			params[n].u.value.c = ip.c;
381 			break;
382 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
383 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
384 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
385 			/*
386 			 * If a NULL pointer is passed to a TA in the TEE,
387 			 * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
388 			 * indicating a NULL memory reference.
389 			 */
390 			if (ip.c != TEE_MEMREF_NULL) {
391 				/*
392 				 * If we fail to get a pointer to a shared
393 				 * memory object (and increase the ref count)
394 				 * from an identifier we return an error. All
395 				 * pointers that has been added in params have
396 				 * an increased ref count. It's the callers
397 				 * responibility to do tee_shm_put() on all
398 				 * resolved pointers.
399 				 */
400 				shm = tee_shm_get_from_id(ctx, ip.c);
401 				if (IS_ERR(shm))
402 					return PTR_ERR(shm);
403 
404 				/*
405 				 * Ensure offset + size does not overflow
406 				 * offset and does not overflow the size of
407 				 * the referred shared memory object.
408 				 */
409 				if ((ip.a + ip.b) < ip.a ||
410 				    (ip.a + ip.b) > shm->size) {
411 					tee_shm_put(shm);
412 					return -EINVAL;
413 				}
414 			} else if (ctx->cap_memref_null) {
415 				/* Pass NULL pointer to OP-TEE */
416 				shm = NULL;
417 			} else {
418 				return -EINVAL;
419 			}
420 
421 			params[n].u.memref.shm_offs = ip.a;
422 			params[n].u.memref.size = ip.b;
423 			params[n].u.memref.shm = shm;
424 			break;
425 		default:
426 			/* Unknown attribute */
427 			return -EINVAL;
428 		}
429 	}
430 	return 0;
431 }
432 
433 static int params_to_user(struct tee_ioctl_param __user *uparams,
434 			  size_t num_params, struct tee_param *params)
435 {
436 	size_t n;
437 
438 	for (n = 0; n < num_params; n++) {
439 		struct tee_ioctl_param __user *up = uparams + n;
440 		struct tee_param *p = params + n;
441 
442 		switch (p->attr) {
443 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
444 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
445 			if (put_user(p->u.value.a, &up->a) ||
446 			    put_user(p->u.value.b, &up->b) ||
447 			    put_user(p->u.value.c, &up->c))
448 				return -EFAULT;
449 			break;
450 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
451 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
452 			if (put_user((u64)p->u.memref.size, &up->b))
453 				return -EFAULT;
454 		default:
455 			break;
456 		}
457 	}
458 	return 0;
459 }
460 
461 static int tee_ioctl_open_session(struct tee_context *ctx,
462 				  struct tee_ioctl_buf_data __user *ubuf)
463 {
464 	int rc;
465 	size_t n;
466 	struct tee_ioctl_buf_data buf;
467 	struct tee_ioctl_open_session_arg __user *uarg;
468 	struct tee_ioctl_open_session_arg arg;
469 	struct tee_ioctl_param __user *uparams = NULL;
470 	struct tee_param *params = NULL;
471 	bool have_session = false;
472 
473 	if (!ctx->teedev->desc->ops->open_session)
474 		return -EINVAL;
475 
476 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
477 		return -EFAULT;
478 
479 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
480 	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
481 		return -EINVAL;
482 
483 	uarg = u64_to_user_ptr(buf.buf_ptr);
484 	if (copy_from_user(&arg, uarg, sizeof(arg)))
485 		return -EFAULT;
486 
487 	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
488 		return -EINVAL;
489 
490 	if (arg.num_params) {
491 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
492 				 GFP_KERNEL);
493 		if (!params)
494 			return -ENOMEM;
495 		uparams = uarg->params;
496 		rc = params_from_user(ctx, params, arg.num_params, uparams);
497 		if (rc)
498 			goto out;
499 	}
500 
501 	if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
502 	    arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
503 		pr_debug("login method not allowed for user-space client\n");
504 		rc = -EPERM;
505 		goto out;
506 	}
507 
508 	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
509 	if (rc)
510 		goto out;
511 	have_session = true;
512 
513 	if (put_user(arg.session, &uarg->session) ||
514 	    put_user(arg.ret, &uarg->ret) ||
515 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
516 		rc = -EFAULT;
517 		goto out;
518 	}
519 	rc = params_to_user(uparams, arg.num_params, params);
520 out:
521 	/*
522 	 * If we've succeeded to open the session but failed to communicate
523 	 * it back to user space, close the session again to avoid leakage.
524 	 */
525 	if (rc && have_session && ctx->teedev->desc->ops->close_session)
526 		ctx->teedev->desc->ops->close_session(ctx, arg.session);
527 
528 	if (params) {
529 		/* Decrease ref count for all valid shared memory pointers */
530 		for (n = 0; n < arg.num_params; n++)
531 			if (tee_param_is_memref(params + n) &&
532 			    params[n].u.memref.shm)
533 				tee_shm_put(params[n].u.memref.shm);
534 		kfree(params);
535 	}
536 
537 	return rc;
538 }
539 
540 static int tee_ioctl_invoke(struct tee_context *ctx,
541 			    struct tee_ioctl_buf_data __user *ubuf)
542 {
543 	int rc;
544 	size_t n;
545 	struct tee_ioctl_buf_data buf;
546 	struct tee_ioctl_invoke_arg __user *uarg;
547 	struct tee_ioctl_invoke_arg arg;
548 	struct tee_ioctl_param __user *uparams = NULL;
549 	struct tee_param *params = NULL;
550 
551 	if (!ctx->teedev->desc->ops->invoke_func)
552 		return -EINVAL;
553 
554 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
555 		return -EFAULT;
556 
557 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
558 	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
559 		return -EINVAL;
560 
561 	uarg = u64_to_user_ptr(buf.buf_ptr);
562 	if (copy_from_user(&arg, uarg, sizeof(arg)))
563 		return -EFAULT;
564 
565 	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
566 		return -EINVAL;
567 
568 	if (arg.num_params) {
569 		params = kcalloc(arg.num_params, sizeof(struct tee_param),
570 				 GFP_KERNEL);
571 		if (!params)
572 			return -ENOMEM;
573 		uparams = uarg->params;
574 		rc = params_from_user(ctx, params, arg.num_params, uparams);
575 		if (rc)
576 			goto out;
577 	}
578 
579 	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
580 	if (rc)
581 		goto out;
582 
583 	if (put_user(arg.ret, &uarg->ret) ||
584 	    put_user(arg.ret_origin, &uarg->ret_origin)) {
585 		rc = -EFAULT;
586 		goto out;
587 	}
588 	rc = params_to_user(uparams, arg.num_params, params);
589 out:
590 	if (params) {
591 		/* Decrease ref count for all valid shared memory pointers */
592 		for (n = 0; n < arg.num_params; n++)
593 			if (tee_param_is_memref(params + n) &&
594 			    params[n].u.memref.shm)
595 				tee_shm_put(params[n].u.memref.shm);
596 		kfree(params);
597 	}
598 	return rc;
599 }
600 
601 static int tee_ioctl_cancel(struct tee_context *ctx,
602 			    struct tee_ioctl_cancel_arg __user *uarg)
603 {
604 	struct tee_ioctl_cancel_arg arg;
605 
606 	if (!ctx->teedev->desc->ops->cancel_req)
607 		return -EINVAL;
608 
609 	if (copy_from_user(&arg, uarg, sizeof(arg)))
610 		return -EFAULT;
611 
612 	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
613 						  arg.session);
614 }
615 
616 static int
617 tee_ioctl_close_session(struct tee_context *ctx,
618 			struct tee_ioctl_close_session_arg __user *uarg)
619 {
620 	struct tee_ioctl_close_session_arg arg;
621 
622 	if (!ctx->teedev->desc->ops->close_session)
623 		return -EINVAL;
624 
625 	if (copy_from_user(&arg, uarg, sizeof(arg)))
626 		return -EFAULT;
627 
628 	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
629 }
630 
631 static int params_to_supp(struct tee_context *ctx,
632 			  struct tee_ioctl_param __user *uparams,
633 			  size_t num_params, struct tee_param *params)
634 {
635 	size_t n;
636 
637 	for (n = 0; n < num_params; n++) {
638 		struct tee_ioctl_param ip;
639 		struct tee_param *p = params + n;
640 
641 		ip.attr = p->attr;
642 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
643 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
644 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
645 			ip.a = p->u.value.a;
646 			ip.b = p->u.value.b;
647 			ip.c = p->u.value.c;
648 			break;
649 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
650 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
651 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
652 			ip.b = p->u.memref.size;
653 			if (!p->u.memref.shm) {
654 				ip.a = 0;
655 				ip.c = (u64)-1; /* invalid shm id */
656 				break;
657 			}
658 			ip.a = p->u.memref.shm_offs;
659 			ip.c = p->u.memref.shm->id;
660 			break;
661 		default:
662 			ip.a = 0;
663 			ip.b = 0;
664 			ip.c = 0;
665 			break;
666 		}
667 
668 		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
669 			return -EFAULT;
670 	}
671 
672 	return 0;
673 }
674 
675 static int tee_ioctl_supp_recv(struct tee_context *ctx,
676 			       struct tee_ioctl_buf_data __user *ubuf)
677 {
678 	int rc;
679 	struct tee_ioctl_buf_data buf;
680 	struct tee_iocl_supp_recv_arg __user *uarg;
681 	struct tee_param *params;
682 	u32 num_params;
683 	u32 func;
684 
685 	if (!ctx->teedev->desc->ops->supp_recv)
686 		return -EINVAL;
687 
688 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
689 		return -EFAULT;
690 
691 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
692 	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
693 		return -EINVAL;
694 
695 	uarg = u64_to_user_ptr(buf.buf_ptr);
696 	if (get_user(num_params, &uarg->num_params))
697 		return -EFAULT;
698 
699 	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
700 		return -EINVAL;
701 
702 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
703 	if (!params)
704 		return -ENOMEM;
705 
706 	rc = params_from_user(ctx, params, num_params, uarg->params);
707 	if (rc)
708 		goto out;
709 
710 	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
711 	if (rc)
712 		goto out;
713 
714 	if (put_user(func, &uarg->func) ||
715 	    put_user(num_params, &uarg->num_params)) {
716 		rc = -EFAULT;
717 		goto out;
718 	}
719 
720 	rc = params_to_supp(ctx, uarg->params, num_params, params);
721 out:
722 	kfree(params);
723 	return rc;
724 }
725 
726 static int params_from_supp(struct tee_param *params, size_t num_params,
727 			    struct tee_ioctl_param __user *uparams)
728 {
729 	size_t n;
730 
731 	for (n = 0; n < num_params; n++) {
732 		struct tee_param *p = params + n;
733 		struct tee_ioctl_param ip;
734 
735 		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
736 			return -EFAULT;
737 
738 		/* All unused attribute bits has to be zero */
739 		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
740 			return -EINVAL;
741 
742 		p->attr = ip.attr;
743 		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
744 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
745 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
746 			/* Only out and in/out values can be updated */
747 			p->u.value.a = ip.a;
748 			p->u.value.b = ip.b;
749 			p->u.value.c = ip.c;
750 			break;
751 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
752 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
753 			/*
754 			 * Only the size of the memref can be updated.
755 			 * Since we don't have access to the original
756 			 * parameters here, only store the supplied size.
757 			 * The driver will copy the updated size into the
758 			 * original parameters.
759 			 */
760 			p->u.memref.shm = NULL;
761 			p->u.memref.shm_offs = 0;
762 			p->u.memref.size = ip.b;
763 			break;
764 		default:
765 			memset(&p->u, 0, sizeof(p->u));
766 			break;
767 		}
768 	}
769 	return 0;
770 }
771 
772 static int tee_ioctl_supp_send(struct tee_context *ctx,
773 			       struct tee_ioctl_buf_data __user *ubuf)
774 {
775 	long rc;
776 	struct tee_ioctl_buf_data buf;
777 	struct tee_iocl_supp_send_arg __user *uarg;
778 	struct tee_param *params;
779 	u32 num_params;
780 	u32 ret;
781 
782 	/* Not valid for this driver */
783 	if (!ctx->teedev->desc->ops->supp_send)
784 		return -EINVAL;
785 
786 	if (copy_from_user(&buf, ubuf, sizeof(buf)))
787 		return -EFAULT;
788 
789 	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
790 	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
791 		return -EINVAL;
792 
793 	uarg = u64_to_user_ptr(buf.buf_ptr);
794 	if (get_user(ret, &uarg->ret) ||
795 	    get_user(num_params, &uarg->num_params))
796 		return -EFAULT;
797 
798 	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
799 		return -EINVAL;
800 
801 	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
802 	if (!params)
803 		return -ENOMEM;
804 
805 	rc = params_from_supp(params, num_params, uarg->params);
806 	if (rc)
807 		goto out;
808 
809 	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
810 out:
811 	kfree(params);
812 	return rc;
813 }
814 
815 static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
816 {
817 	struct tee_context *ctx = filp->private_data;
818 	void __user *uarg = (void __user *)arg;
819 
820 	switch (cmd) {
821 	case TEE_IOC_VERSION:
822 		return tee_ioctl_version(ctx, uarg);
823 	case TEE_IOC_SHM_ALLOC:
824 		return tee_ioctl_shm_alloc(ctx, uarg);
825 	case TEE_IOC_SHM_REGISTER:
826 		return tee_ioctl_shm_register(ctx, uarg);
827 	case TEE_IOC_OPEN_SESSION:
828 		return tee_ioctl_open_session(ctx, uarg);
829 	case TEE_IOC_INVOKE:
830 		return tee_ioctl_invoke(ctx, uarg);
831 	case TEE_IOC_CANCEL:
832 		return tee_ioctl_cancel(ctx, uarg);
833 	case TEE_IOC_CLOSE_SESSION:
834 		return tee_ioctl_close_session(ctx, uarg);
835 	case TEE_IOC_SUPPL_RECV:
836 		return tee_ioctl_supp_recv(ctx, uarg);
837 	case TEE_IOC_SUPPL_SEND:
838 		return tee_ioctl_supp_send(ctx, uarg);
839 	default:
840 		return -EINVAL;
841 	}
842 }
843 
844 static const struct file_operations tee_fops = {
845 	.owner = THIS_MODULE,
846 	.open = tee_open,
847 	.release = tee_release,
848 	.unlocked_ioctl = tee_ioctl,
849 	.compat_ioctl = compat_ptr_ioctl,
850 };
851 
852 static void tee_release_device(struct device *dev)
853 {
854 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
855 
856 	spin_lock(&driver_lock);
857 	clear_bit(teedev->id, dev_mask);
858 	spin_unlock(&driver_lock);
859 	mutex_destroy(&teedev->mutex);
860 	idr_destroy(&teedev->idr);
861 	kfree(teedev);
862 }
863 
864 /**
865  * tee_device_alloc() - Allocate a new struct tee_device instance
866  * @teedesc:	Descriptor for this driver
867  * @dev:	Parent device for this device
868  * @pool:	Shared memory pool, NULL if not used
869  * @driver_data: Private driver data for this device
870  *
871  * Allocates a new struct tee_device instance. The device is
872  * removed by tee_device_unregister().
873  *
874  * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
875  */
876 struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
877 				    struct device *dev,
878 				    struct tee_shm_pool *pool,
879 				    void *driver_data)
880 {
881 	struct tee_device *teedev;
882 	void *ret;
883 	int rc, max_id;
884 	int offs = 0;
885 
886 	if (!teedesc || !teedesc->name || !teedesc->ops ||
887 	    !teedesc->ops->get_version || !teedesc->ops->open ||
888 	    !teedesc->ops->release || !pool)
889 		return ERR_PTR(-EINVAL);
890 
891 	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
892 	if (!teedev) {
893 		ret = ERR_PTR(-ENOMEM);
894 		goto err;
895 	}
896 
897 	max_id = TEE_NUM_DEVICES / 2;
898 
899 	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
900 		offs = TEE_NUM_DEVICES / 2;
901 		max_id = TEE_NUM_DEVICES;
902 	}
903 
904 	spin_lock(&driver_lock);
905 	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
906 	if (teedev->id < max_id)
907 		set_bit(teedev->id, dev_mask);
908 	spin_unlock(&driver_lock);
909 
910 	if (teedev->id >= max_id) {
911 		ret = ERR_PTR(-ENOMEM);
912 		goto err;
913 	}
914 
915 	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
916 		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
917 		 teedev->id - offs);
918 
919 	teedev->dev.class = tee_class;
920 	teedev->dev.release = tee_release_device;
921 	teedev->dev.parent = dev;
922 
923 	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
924 
925 	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
926 	if (rc) {
927 		ret = ERR_PTR(rc);
928 		goto err_devt;
929 	}
930 
931 	cdev_init(&teedev->cdev, &tee_fops);
932 	teedev->cdev.owner = teedesc->owner;
933 
934 	dev_set_drvdata(&teedev->dev, driver_data);
935 	device_initialize(&teedev->dev);
936 
937 	/* 1 as tee_device_unregister() does one final tee_device_put() */
938 	teedev->num_users = 1;
939 	init_completion(&teedev->c_no_users);
940 	mutex_init(&teedev->mutex);
941 	idr_init(&teedev->idr);
942 
943 	teedev->desc = teedesc;
944 	teedev->pool = pool;
945 
946 	return teedev;
947 err_devt:
948 	unregister_chrdev_region(teedev->dev.devt, 1);
949 err:
950 	pr_err("could not register %s driver\n",
951 	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
952 	if (teedev && teedev->id < TEE_NUM_DEVICES) {
953 		spin_lock(&driver_lock);
954 		clear_bit(teedev->id, dev_mask);
955 		spin_unlock(&driver_lock);
956 	}
957 	kfree(teedev);
958 	return ret;
959 }
960 EXPORT_SYMBOL_GPL(tee_device_alloc);
961 
962 static ssize_t implementation_id_show(struct device *dev,
963 				      struct device_attribute *attr, char *buf)
964 {
965 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
966 	struct tee_ioctl_version_data vers;
967 
968 	teedev->desc->ops->get_version(teedev, &vers);
969 	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
970 }
971 static DEVICE_ATTR_RO(implementation_id);
972 
973 static struct attribute *tee_dev_attrs[] = {
974 	&dev_attr_implementation_id.attr,
975 	NULL
976 };
977 
978 ATTRIBUTE_GROUPS(tee_dev);
979 
980 /**
981  * tee_device_register() - Registers a TEE device
982  * @teedev:	Device to register
983  *
984  * tee_device_unregister() need to be called to remove the @teedev if
985  * this function fails.
986  *
987  * @returns < 0 on failure
988  */
989 int tee_device_register(struct tee_device *teedev)
990 {
991 	int rc;
992 
993 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
994 		dev_err(&teedev->dev, "attempt to register twice\n");
995 		return -EINVAL;
996 	}
997 
998 	teedev->dev.groups = tee_dev_groups;
999 
1000 	rc = cdev_device_add(&teedev->cdev, &teedev->dev);
1001 	if (rc) {
1002 		dev_err(&teedev->dev,
1003 			"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
1004 			teedev->name, MAJOR(teedev->dev.devt),
1005 			MINOR(teedev->dev.devt), rc);
1006 		return rc;
1007 	}
1008 
1009 	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
1010 	return 0;
1011 }
1012 EXPORT_SYMBOL_GPL(tee_device_register);
1013 
1014 void tee_device_put(struct tee_device *teedev)
1015 {
1016 	mutex_lock(&teedev->mutex);
1017 	/* Shouldn't put in this state */
1018 	if (!WARN_ON(!teedev->desc)) {
1019 		teedev->num_users--;
1020 		if (!teedev->num_users) {
1021 			teedev->desc = NULL;
1022 			complete(&teedev->c_no_users);
1023 		}
1024 	}
1025 	mutex_unlock(&teedev->mutex);
1026 }
1027 
1028 bool tee_device_get(struct tee_device *teedev)
1029 {
1030 	mutex_lock(&teedev->mutex);
1031 	if (!teedev->desc) {
1032 		mutex_unlock(&teedev->mutex);
1033 		return false;
1034 	}
1035 	teedev->num_users++;
1036 	mutex_unlock(&teedev->mutex);
1037 	return true;
1038 }
1039 
1040 /**
1041  * tee_device_unregister() - Removes a TEE device
1042  * @teedev:	Device to unregister
1043  *
1044  * This function should be called to remove the @teedev even if
1045  * tee_device_register() hasn't been called yet. Does nothing if
1046  * @teedev is NULL.
1047  */
1048 void tee_device_unregister(struct tee_device *teedev)
1049 {
1050 	if (!teedev)
1051 		return;
1052 
1053 	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
1054 		cdev_device_del(&teedev->cdev, &teedev->dev);
1055 
1056 	tee_device_put(teedev);
1057 	wait_for_completion(&teedev->c_no_users);
1058 
1059 	/*
1060 	 * No need to take a mutex any longer now since teedev->desc was
1061 	 * set to NULL before teedev->c_no_users was completed.
1062 	 */
1063 
1064 	teedev->pool = NULL;
1065 
1066 	put_device(&teedev->dev);
1067 }
1068 EXPORT_SYMBOL_GPL(tee_device_unregister);
1069 
1070 /**
1071  * tee_get_drvdata() - Return driver_data pointer
1072  * @teedev:	Device containing the driver_data pointer
1073  * @returns the driver_data pointer supplied to tee_register().
1074  */
1075 void *tee_get_drvdata(struct tee_device *teedev)
1076 {
1077 	return dev_get_drvdata(&teedev->dev);
1078 }
1079 EXPORT_SYMBOL_GPL(tee_get_drvdata);
1080 
1081 struct match_dev_data {
1082 	struct tee_ioctl_version_data *vers;
1083 	const void *data;
1084 	int (*match)(struct tee_ioctl_version_data *, const void *);
1085 };
1086 
1087 static int match_dev(struct device *dev, const void *data)
1088 {
1089 	const struct match_dev_data *match_data = data;
1090 	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1091 
1092 	teedev->desc->ops->get_version(teedev, match_data->vers);
1093 	return match_data->match(match_data->vers, match_data->data);
1094 }
1095 
1096 struct tee_context *
1097 tee_client_open_context(struct tee_context *start,
1098 			int (*match)(struct tee_ioctl_version_data *,
1099 				     const void *),
1100 			const void *data, struct tee_ioctl_version_data *vers)
1101 {
1102 	struct device *dev = NULL;
1103 	struct device *put_dev = NULL;
1104 	struct tee_context *ctx = NULL;
1105 	struct tee_ioctl_version_data v;
1106 	struct match_dev_data match_data = { vers ? vers : &v, data, match };
1107 
1108 	if (start)
1109 		dev = &start->teedev->dev;
1110 
1111 	do {
1112 		dev = class_find_device(tee_class, dev, &match_data, match_dev);
1113 		if (!dev) {
1114 			ctx = ERR_PTR(-ENOENT);
1115 			break;
1116 		}
1117 
1118 		put_device(put_dev);
1119 		put_dev = dev;
1120 
1121 		ctx = teedev_open(container_of(dev, struct tee_device, dev));
1122 	} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
1123 
1124 	put_device(put_dev);
1125 	/*
1126 	 * Default behaviour for in kernel client is to not wait for
1127 	 * tee-supplicant if not present for any requests in this context.
1128 	 * Also this flag could be configured again before call to
1129 	 * tee_client_open_session() if any in kernel client requires
1130 	 * different behaviour.
1131 	 */
1132 	if (!IS_ERR(ctx))
1133 		ctx->supp_nowait = true;
1134 
1135 	return ctx;
1136 }
1137 EXPORT_SYMBOL_GPL(tee_client_open_context);
1138 
1139 void tee_client_close_context(struct tee_context *ctx)
1140 {
1141 	teedev_close_context(ctx);
1142 }
1143 EXPORT_SYMBOL_GPL(tee_client_close_context);
1144 
1145 void tee_client_get_version(struct tee_context *ctx,
1146 			    struct tee_ioctl_version_data *vers)
1147 {
1148 	ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1149 }
1150 EXPORT_SYMBOL_GPL(tee_client_get_version);
1151 
1152 int tee_client_open_session(struct tee_context *ctx,
1153 			    struct tee_ioctl_open_session_arg *arg,
1154 			    struct tee_param *param)
1155 {
1156 	if (!ctx->teedev->desc->ops->open_session)
1157 		return -EINVAL;
1158 	return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1159 }
1160 EXPORT_SYMBOL_GPL(tee_client_open_session);
1161 
1162 int tee_client_close_session(struct tee_context *ctx, u32 session)
1163 {
1164 	if (!ctx->teedev->desc->ops->close_session)
1165 		return -EINVAL;
1166 	return ctx->teedev->desc->ops->close_session(ctx, session);
1167 }
1168 EXPORT_SYMBOL_GPL(tee_client_close_session);
1169 
1170 int tee_client_invoke_func(struct tee_context *ctx,
1171 			   struct tee_ioctl_invoke_arg *arg,
1172 			   struct tee_param *param)
1173 {
1174 	if (!ctx->teedev->desc->ops->invoke_func)
1175 		return -EINVAL;
1176 	return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1177 }
1178 EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1179 
1180 int tee_client_cancel_req(struct tee_context *ctx,
1181 			  struct tee_ioctl_cancel_arg *arg)
1182 {
1183 	if (!ctx->teedev->desc->ops->cancel_req)
1184 		return -EINVAL;
1185 	return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1186 						  arg->session);
1187 }
1188 
1189 static int tee_client_device_match(struct device *dev,
1190 				   struct device_driver *drv)
1191 {
1192 	const struct tee_client_device_id *id_table;
1193 	struct tee_client_device *tee_device;
1194 
1195 	id_table = to_tee_client_driver(drv)->id_table;
1196 	tee_device = to_tee_client_device(dev);
1197 
1198 	while (!uuid_is_null(&id_table->uuid)) {
1199 		if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1200 			return 1;
1201 		id_table++;
1202 	}
1203 
1204 	return 0;
1205 }
1206 
1207 static int tee_client_device_uevent(struct device *dev,
1208 				    struct kobj_uevent_env *env)
1209 {
1210 	uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1211 
1212 	return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1213 }
1214 
1215 struct bus_type tee_bus_type = {
1216 	.name		= "tee",
1217 	.match		= tee_client_device_match,
1218 	.uevent		= tee_client_device_uevent,
1219 };
1220 EXPORT_SYMBOL_GPL(tee_bus_type);
1221 
1222 static int __init tee_init(void)
1223 {
1224 	int rc;
1225 
1226 	tee_class = class_create(THIS_MODULE, "tee");
1227 	if (IS_ERR(tee_class)) {
1228 		pr_err("couldn't create class\n");
1229 		return PTR_ERR(tee_class);
1230 	}
1231 
1232 	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1233 	if (rc) {
1234 		pr_err("failed to allocate char dev region\n");
1235 		goto out_unreg_class;
1236 	}
1237 
1238 	rc = bus_register(&tee_bus_type);
1239 	if (rc) {
1240 		pr_err("failed to register tee bus\n");
1241 		goto out_unreg_chrdev;
1242 	}
1243 
1244 	return 0;
1245 
1246 out_unreg_chrdev:
1247 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1248 out_unreg_class:
1249 	class_destroy(tee_class);
1250 	tee_class = NULL;
1251 
1252 	return rc;
1253 }
1254 
1255 static void __exit tee_exit(void)
1256 {
1257 	bus_unregister(&tee_bus_type);
1258 	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1259 	class_destroy(tee_class);
1260 	tee_class = NULL;
1261 }
1262 
1263 subsys_initcall(tee_init);
1264 module_exit(tee_exit);
1265 
1266 MODULE_AUTHOR("Linaro");
1267 MODULE_DESCRIPTION("TEE Driver");
1268 MODULE_VERSION("1.0");
1269 MODULE_LICENSE("GPL v2");
1270