xref: /openbmc/linux/drivers/tee/tee_core.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2015-2016, Linaro Limited
4   */
5  
6  #define pr_fmt(fmt) "%s: " fmt, __func__
7  
8  #include <linux/cdev.h>
9  #include <linux/cred.h>
10  #include <linux/fs.h>
11  #include <linux/idr.h>
12  #include <linux/module.h>
13  #include <linux/slab.h>
14  #include <linux/tee_drv.h>
15  #include <linux/uaccess.h>
16  #include <crypto/hash.h>
17  #include <crypto/sha1.h>
18  #include "tee_private.h"
19  
20  #define TEE_NUM_DEVICES	32
21  
22  #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
23  
24  #define TEE_UUID_NS_NAME_SIZE	128
25  
26  /*
27   * TEE Client UUID name space identifier (UUIDv4)
28   *
29   * Value here is random UUID that is allocated as name space identifier for
30   * forming Client UUID's for TEE environment using UUIDv5 scheme.
31   */
32  static const uuid_t tee_client_uuid_ns = UUID_INIT(0x58ac9ca0, 0x2086, 0x4683,
33  						   0xa1, 0xb8, 0xec, 0x4b,
34  						   0xc0, 0x8e, 0x01, 0xb6);
35  
36  /*
37   * Unprivileged devices in the lower half range and privileged devices in
38   * the upper half range.
39   */
40  static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
41  static DEFINE_SPINLOCK(driver_lock);
42  
43  static struct class *tee_class;
44  static dev_t tee_devt;
45  
teedev_open(struct tee_device * teedev)46  struct tee_context *teedev_open(struct tee_device *teedev)
47  {
48  	int rc;
49  	struct tee_context *ctx;
50  
51  	if (!tee_device_get(teedev))
52  		return ERR_PTR(-EINVAL);
53  
54  	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
55  	if (!ctx) {
56  		rc = -ENOMEM;
57  		goto err;
58  	}
59  
60  	kref_init(&ctx->refcount);
61  	ctx->teedev = teedev;
62  	rc = teedev->desc->ops->open(ctx);
63  	if (rc)
64  		goto err;
65  
66  	return ctx;
67  err:
68  	kfree(ctx);
69  	tee_device_put(teedev);
70  	return ERR_PTR(rc);
71  
72  }
73  EXPORT_SYMBOL_GPL(teedev_open);
74  
teedev_ctx_get(struct tee_context * ctx)75  void teedev_ctx_get(struct tee_context *ctx)
76  {
77  	if (ctx->releasing)
78  		return;
79  
80  	kref_get(&ctx->refcount);
81  }
82  
teedev_ctx_release(struct kref * ref)83  static void teedev_ctx_release(struct kref *ref)
84  {
85  	struct tee_context *ctx = container_of(ref, struct tee_context,
86  					       refcount);
87  	ctx->releasing = true;
88  	ctx->teedev->desc->ops->release(ctx);
89  	kfree(ctx);
90  }
91  
teedev_ctx_put(struct tee_context * ctx)92  void teedev_ctx_put(struct tee_context *ctx)
93  {
94  	if (ctx->releasing)
95  		return;
96  
97  	kref_put(&ctx->refcount, teedev_ctx_release);
98  }
99  
teedev_close_context(struct tee_context * ctx)100  void teedev_close_context(struct tee_context *ctx)
101  {
102  	struct tee_device *teedev = ctx->teedev;
103  
104  	teedev_ctx_put(ctx);
105  	tee_device_put(teedev);
106  }
107  EXPORT_SYMBOL_GPL(teedev_close_context);
108  
tee_open(struct inode * inode,struct file * filp)109  static int tee_open(struct inode *inode, struct file *filp)
110  {
111  	struct tee_context *ctx;
112  
113  	ctx = teedev_open(container_of(inode->i_cdev, struct tee_device, cdev));
114  	if (IS_ERR(ctx))
115  		return PTR_ERR(ctx);
116  
117  	/*
118  	 * Default user-space behaviour is to wait for tee-supplicant
119  	 * if not present for any requests in this context.
120  	 */
121  	ctx->supp_nowait = false;
122  	filp->private_data = ctx;
123  	return 0;
124  }
125  
tee_release(struct inode * inode,struct file * filp)126  static int tee_release(struct inode *inode, struct file *filp)
127  {
128  	teedev_close_context(filp->private_data);
129  	return 0;
130  }
131  
132  /**
133   * uuid_v5() - Calculate UUIDv5
134   * @uuid: Resulting UUID
135   * @ns: Name space ID for UUIDv5 function
136   * @name: Name for UUIDv5 function
137   * @size: Size of name
138   *
139   * UUIDv5 is specific in RFC 4122.
140   *
141   * This implements section (for SHA-1):
142   * 4.3.  Algorithm for Creating a Name-Based UUID
143   */
uuid_v5(uuid_t * uuid,const uuid_t * ns,const void * name,size_t size)144  static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name,
145  		   size_t size)
146  {
147  	unsigned char hash[SHA1_DIGEST_SIZE];
148  	struct crypto_shash *shash = NULL;
149  	struct shash_desc *desc = NULL;
150  	int rc;
151  
152  	shash = crypto_alloc_shash("sha1", 0, 0);
153  	if (IS_ERR(shash)) {
154  		rc = PTR_ERR(shash);
155  		pr_err("shash(sha1) allocation failed\n");
156  		return rc;
157  	}
158  
159  	desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash),
160  		       GFP_KERNEL);
161  	if (!desc) {
162  		rc = -ENOMEM;
163  		goto out_free_shash;
164  	}
165  
166  	desc->tfm = shash;
167  
168  	rc = crypto_shash_init(desc);
169  	if (rc < 0)
170  		goto out_free_desc;
171  
172  	rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns));
173  	if (rc < 0)
174  		goto out_free_desc;
175  
176  	rc = crypto_shash_update(desc, (const u8 *)name, size);
177  	if (rc < 0)
178  		goto out_free_desc;
179  
180  	rc = crypto_shash_final(desc, hash);
181  	if (rc < 0)
182  		goto out_free_desc;
183  
184  	memcpy(uuid->b, hash, UUID_SIZE);
185  
186  	/* Tag for version 5 */
187  	uuid->b[6] = (hash[6] & 0x0F) | 0x50;
188  	uuid->b[8] = (hash[8] & 0x3F) | 0x80;
189  
190  out_free_desc:
191  	kfree(desc);
192  
193  out_free_shash:
194  	crypto_free_shash(shash);
195  	return rc;
196  }
197  
tee_session_calc_client_uuid(uuid_t * uuid,u32 connection_method,const u8 connection_data[TEE_IOCTL_UUID_LEN])198  int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method,
199  				 const u8 connection_data[TEE_IOCTL_UUID_LEN])
200  {
201  	gid_t ns_grp = (gid_t)-1;
202  	kgid_t grp = INVALID_GID;
203  	char *name = NULL;
204  	int name_len;
205  	int rc;
206  
207  	if (connection_method == TEE_IOCTL_LOGIN_PUBLIC ||
208  	    connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) {
209  		/* Nil UUID to be passed to TEE environment */
210  		uuid_copy(uuid, &uuid_null);
211  		return 0;
212  	}
213  
214  	/*
215  	 * In Linux environment client UUID is based on UUIDv5.
216  	 *
217  	 * Determine client UUID with following semantics for 'name':
218  	 *
219  	 * For TEEC_LOGIN_USER:
220  	 * uid=<uid>
221  	 *
222  	 * For TEEC_LOGIN_GROUP:
223  	 * gid=<gid>
224  	 *
225  	 */
226  
227  	name = kzalloc(TEE_UUID_NS_NAME_SIZE, GFP_KERNEL);
228  	if (!name)
229  		return -ENOMEM;
230  
231  	switch (connection_method) {
232  	case TEE_IOCTL_LOGIN_USER:
233  		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "uid=%x",
234  				    current_euid().val);
235  		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
236  			rc = -E2BIG;
237  			goto out_free_name;
238  		}
239  		break;
240  
241  	case TEE_IOCTL_LOGIN_GROUP:
242  		memcpy(&ns_grp, connection_data, sizeof(gid_t));
243  		grp = make_kgid(current_user_ns(), ns_grp);
244  		if (!gid_valid(grp) || !in_egroup_p(grp)) {
245  			rc = -EPERM;
246  			goto out_free_name;
247  		}
248  
249  		name_len = snprintf(name, TEE_UUID_NS_NAME_SIZE, "gid=%x",
250  				    grp.val);
251  		if (name_len >= TEE_UUID_NS_NAME_SIZE) {
252  			rc = -E2BIG;
253  			goto out_free_name;
254  		}
255  		break;
256  
257  	default:
258  		rc = -EINVAL;
259  		goto out_free_name;
260  	}
261  
262  	rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len);
263  out_free_name:
264  	kfree(name);
265  
266  	return rc;
267  }
268  EXPORT_SYMBOL_GPL(tee_session_calc_client_uuid);
269  
tee_ioctl_version(struct tee_context * ctx,struct tee_ioctl_version_data __user * uvers)270  static int tee_ioctl_version(struct tee_context *ctx,
271  			     struct tee_ioctl_version_data __user *uvers)
272  {
273  	struct tee_ioctl_version_data vers;
274  
275  	ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
276  
277  	if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
278  		vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
279  
280  	if (copy_to_user(uvers, &vers, sizeof(vers)))
281  		return -EFAULT;
282  
283  	return 0;
284  }
285  
tee_ioctl_shm_alloc(struct tee_context * ctx,struct tee_ioctl_shm_alloc_data __user * udata)286  static int tee_ioctl_shm_alloc(struct tee_context *ctx,
287  			       struct tee_ioctl_shm_alloc_data __user *udata)
288  {
289  	long ret;
290  	struct tee_ioctl_shm_alloc_data data;
291  	struct tee_shm *shm;
292  
293  	if (copy_from_user(&data, udata, sizeof(data)))
294  		return -EFAULT;
295  
296  	/* Currently no input flags are supported */
297  	if (data.flags)
298  		return -EINVAL;
299  
300  	shm = tee_shm_alloc_user_buf(ctx, data.size);
301  	if (IS_ERR(shm))
302  		return PTR_ERR(shm);
303  
304  	data.id = shm->id;
305  	data.size = shm->size;
306  
307  	if (copy_to_user(udata, &data, sizeof(data)))
308  		ret = -EFAULT;
309  	else
310  		ret = tee_shm_get_fd(shm);
311  
312  	/*
313  	 * When user space closes the file descriptor the shared memory
314  	 * should be freed or if tee_shm_get_fd() failed then it will
315  	 * be freed immediately.
316  	 */
317  	tee_shm_put(shm);
318  	return ret;
319  }
320  
321  static int
tee_ioctl_shm_register(struct tee_context * ctx,struct tee_ioctl_shm_register_data __user * udata)322  tee_ioctl_shm_register(struct tee_context *ctx,
323  		       struct tee_ioctl_shm_register_data __user *udata)
324  {
325  	long ret;
326  	struct tee_ioctl_shm_register_data data;
327  	struct tee_shm *shm;
328  
329  	if (copy_from_user(&data, udata, sizeof(data)))
330  		return -EFAULT;
331  
332  	/* Currently no input flags are supported */
333  	if (data.flags)
334  		return -EINVAL;
335  
336  	shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
337  	if (IS_ERR(shm))
338  		return PTR_ERR(shm);
339  
340  	data.id = shm->id;
341  	data.length = shm->size;
342  
343  	if (copy_to_user(udata, &data, sizeof(data)))
344  		ret = -EFAULT;
345  	else
346  		ret = tee_shm_get_fd(shm);
347  	/*
348  	 * When user space closes the file descriptor the shared memory
349  	 * should be freed or if tee_shm_get_fd() failed then it will
350  	 * be freed immediately.
351  	 */
352  	tee_shm_put(shm);
353  	return ret;
354  }
355  
params_from_user(struct tee_context * ctx,struct tee_param * params,size_t num_params,struct tee_ioctl_param __user * uparams)356  static int params_from_user(struct tee_context *ctx, struct tee_param *params,
357  			    size_t num_params,
358  			    struct tee_ioctl_param __user *uparams)
359  {
360  	size_t n;
361  
362  	for (n = 0; n < num_params; n++) {
363  		struct tee_shm *shm;
364  		struct tee_ioctl_param ip;
365  
366  		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
367  			return -EFAULT;
368  
369  		/* All unused attribute bits has to be zero */
370  		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
371  			return -EINVAL;
372  
373  		params[n].attr = ip.attr;
374  		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
375  		case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
376  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
377  			break;
378  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
379  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
380  			params[n].u.value.a = ip.a;
381  			params[n].u.value.b = ip.b;
382  			params[n].u.value.c = ip.c;
383  			break;
384  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
385  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
386  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
387  			/*
388  			 * If a NULL pointer is passed to a TA in the TEE,
389  			 * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL
390  			 * indicating a NULL memory reference.
391  			 */
392  			if (ip.c != TEE_MEMREF_NULL) {
393  				/*
394  				 * If we fail to get a pointer to a shared
395  				 * memory object (and increase the ref count)
396  				 * from an identifier we return an error. All
397  				 * pointers that has been added in params have
398  				 * an increased ref count. It's the callers
399  				 * responibility to do tee_shm_put() on all
400  				 * resolved pointers.
401  				 */
402  				shm = tee_shm_get_from_id(ctx, ip.c);
403  				if (IS_ERR(shm))
404  					return PTR_ERR(shm);
405  
406  				/*
407  				 * Ensure offset + size does not overflow
408  				 * offset and does not overflow the size of
409  				 * the referred shared memory object.
410  				 */
411  				if ((ip.a + ip.b) < ip.a ||
412  				    (ip.a + ip.b) > shm->size) {
413  					tee_shm_put(shm);
414  					return -EINVAL;
415  				}
416  			} else if (ctx->cap_memref_null) {
417  				/* Pass NULL pointer to OP-TEE */
418  				shm = NULL;
419  			} else {
420  				return -EINVAL;
421  			}
422  
423  			params[n].u.memref.shm_offs = ip.a;
424  			params[n].u.memref.size = ip.b;
425  			params[n].u.memref.shm = shm;
426  			break;
427  		default:
428  			/* Unknown attribute */
429  			return -EINVAL;
430  		}
431  	}
432  	return 0;
433  }
434  
params_to_user(struct tee_ioctl_param __user * uparams,size_t num_params,struct tee_param * params)435  static int params_to_user(struct tee_ioctl_param __user *uparams,
436  			  size_t num_params, struct tee_param *params)
437  {
438  	size_t n;
439  
440  	for (n = 0; n < num_params; n++) {
441  		struct tee_ioctl_param __user *up = uparams + n;
442  		struct tee_param *p = params + n;
443  
444  		switch (p->attr) {
445  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
446  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
447  			if (put_user(p->u.value.a, &up->a) ||
448  			    put_user(p->u.value.b, &up->b) ||
449  			    put_user(p->u.value.c, &up->c))
450  				return -EFAULT;
451  			break;
452  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
453  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
454  			if (put_user((u64)p->u.memref.size, &up->b))
455  				return -EFAULT;
456  			break;
457  		default:
458  			break;
459  		}
460  	}
461  	return 0;
462  }
463  
tee_ioctl_open_session(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)464  static int tee_ioctl_open_session(struct tee_context *ctx,
465  				  struct tee_ioctl_buf_data __user *ubuf)
466  {
467  	int rc;
468  	size_t n;
469  	struct tee_ioctl_buf_data buf;
470  	struct tee_ioctl_open_session_arg __user *uarg;
471  	struct tee_ioctl_open_session_arg arg;
472  	struct tee_ioctl_param __user *uparams = NULL;
473  	struct tee_param *params = NULL;
474  	bool have_session = false;
475  
476  	if (!ctx->teedev->desc->ops->open_session)
477  		return -EINVAL;
478  
479  	if (copy_from_user(&buf, ubuf, sizeof(buf)))
480  		return -EFAULT;
481  
482  	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
483  	    buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
484  		return -EINVAL;
485  
486  	uarg = u64_to_user_ptr(buf.buf_ptr);
487  	if (copy_from_user(&arg, uarg, sizeof(arg)))
488  		return -EFAULT;
489  
490  	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
491  		return -EINVAL;
492  
493  	if (arg.num_params) {
494  		params = kcalloc(arg.num_params, sizeof(struct tee_param),
495  				 GFP_KERNEL);
496  		if (!params)
497  			return -ENOMEM;
498  		uparams = uarg->params;
499  		rc = params_from_user(ctx, params, arg.num_params, uparams);
500  		if (rc)
501  			goto out;
502  	}
503  
504  	if (arg.clnt_login >= TEE_IOCTL_LOGIN_REE_KERNEL_MIN &&
505  	    arg.clnt_login <= TEE_IOCTL_LOGIN_REE_KERNEL_MAX) {
506  		pr_debug("login method not allowed for user-space client\n");
507  		rc = -EPERM;
508  		goto out;
509  	}
510  
511  	rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
512  	if (rc)
513  		goto out;
514  	have_session = true;
515  
516  	if (put_user(arg.session, &uarg->session) ||
517  	    put_user(arg.ret, &uarg->ret) ||
518  	    put_user(arg.ret_origin, &uarg->ret_origin)) {
519  		rc = -EFAULT;
520  		goto out;
521  	}
522  	rc = params_to_user(uparams, arg.num_params, params);
523  out:
524  	/*
525  	 * If we've succeeded to open the session but failed to communicate
526  	 * it back to user space, close the session again to avoid leakage.
527  	 */
528  	if (rc && have_session && ctx->teedev->desc->ops->close_session)
529  		ctx->teedev->desc->ops->close_session(ctx, arg.session);
530  
531  	if (params) {
532  		/* Decrease ref count for all valid shared memory pointers */
533  		for (n = 0; n < arg.num_params; n++)
534  			if (tee_param_is_memref(params + n) &&
535  			    params[n].u.memref.shm)
536  				tee_shm_put(params[n].u.memref.shm);
537  		kfree(params);
538  	}
539  
540  	return rc;
541  }
542  
tee_ioctl_invoke(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)543  static int tee_ioctl_invoke(struct tee_context *ctx,
544  			    struct tee_ioctl_buf_data __user *ubuf)
545  {
546  	int rc;
547  	size_t n;
548  	struct tee_ioctl_buf_data buf;
549  	struct tee_ioctl_invoke_arg __user *uarg;
550  	struct tee_ioctl_invoke_arg arg;
551  	struct tee_ioctl_param __user *uparams = NULL;
552  	struct tee_param *params = NULL;
553  
554  	if (!ctx->teedev->desc->ops->invoke_func)
555  		return -EINVAL;
556  
557  	if (copy_from_user(&buf, ubuf, sizeof(buf)))
558  		return -EFAULT;
559  
560  	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
561  	    buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
562  		return -EINVAL;
563  
564  	uarg = u64_to_user_ptr(buf.buf_ptr);
565  	if (copy_from_user(&arg, uarg, sizeof(arg)))
566  		return -EFAULT;
567  
568  	if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
569  		return -EINVAL;
570  
571  	if (arg.num_params) {
572  		params = kcalloc(arg.num_params, sizeof(struct tee_param),
573  				 GFP_KERNEL);
574  		if (!params)
575  			return -ENOMEM;
576  		uparams = uarg->params;
577  		rc = params_from_user(ctx, params, arg.num_params, uparams);
578  		if (rc)
579  			goto out;
580  	}
581  
582  	rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
583  	if (rc)
584  		goto out;
585  
586  	if (put_user(arg.ret, &uarg->ret) ||
587  	    put_user(arg.ret_origin, &uarg->ret_origin)) {
588  		rc = -EFAULT;
589  		goto out;
590  	}
591  	rc = params_to_user(uparams, arg.num_params, params);
592  out:
593  	if (params) {
594  		/* Decrease ref count for all valid shared memory pointers */
595  		for (n = 0; n < arg.num_params; n++)
596  			if (tee_param_is_memref(params + n) &&
597  			    params[n].u.memref.shm)
598  				tee_shm_put(params[n].u.memref.shm);
599  		kfree(params);
600  	}
601  	return rc;
602  }
603  
tee_ioctl_cancel(struct tee_context * ctx,struct tee_ioctl_cancel_arg __user * uarg)604  static int tee_ioctl_cancel(struct tee_context *ctx,
605  			    struct tee_ioctl_cancel_arg __user *uarg)
606  {
607  	struct tee_ioctl_cancel_arg arg;
608  
609  	if (!ctx->teedev->desc->ops->cancel_req)
610  		return -EINVAL;
611  
612  	if (copy_from_user(&arg, uarg, sizeof(arg)))
613  		return -EFAULT;
614  
615  	return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
616  						  arg.session);
617  }
618  
619  static int
tee_ioctl_close_session(struct tee_context * ctx,struct tee_ioctl_close_session_arg __user * uarg)620  tee_ioctl_close_session(struct tee_context *ctx,
621  			struct tee_ioctl_close_session_arg __user *uarg)
622  {
623  	struct tee_ioctl_close_session_arg arg;
624  
625  	if (!ctx->teedev->desc->ops->close_session)
626  		return -EINVAL;
627  
628  	if (copy_from_user(&arg, uarg, sizeof(arg)))
629  		return -EFAULT;
630  
631  	return ctx->teedev->desc->ops->close_session(ctx, arg.session);
632  }
633  
params_to_supp(struct tee_context * ctx,struct tee_ioctl_param __user * uparams,size_t num_params,struct tee_param * params)634  static int params_to_supp(struct tee_context *ctx,
635  			  struct tee_ioctl_param __user *uparams,
636  			  size_t num_params, struct tee_param *params)
637  {
638  	size_t n;
639  
640  	for (n = 0; n < num_params; n++) {
641  		struct tee_ioctl_param ip;
642  		struct tee_param *p = params + n;
643  
644  		ip.attr = p->attr;
645  		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
646  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
647  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
648  			ip.a = p->u.value.a;
649  			ip.b = p->u.value.b;
650  			ip.c = p->u.value.c;
651  			break;
652  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
653  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
654  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
655  			ip.b = p->u.memref.size;
656  			if (!p->u.memref.shm) {
657  				ip.a = 0;
658  				ip.c = (u64)-1; /* invalid shm id */
659  				break;
660  			}
661  			ip.a = p->u.memref.shm_offs;
662  			ip.c = p->u.memref.shm->id;
663  			break;
664  		default:
665  			ip.a = 0;
666  			ip.b = 0;
667  			ip.c = 0;
668  			break;
669  		}
670  
671  		if (copy_to_user(uparams + n, &ip, sizeof(ip)))
672  			return -EFAULT;
673  	}
674  
675  	return 0;
676  }
677  
tee_ioctl_supp_recv(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)678  static int tee_ioctl_supp_recv(struct tee_context *ctx,
679  			       struct tee_ioctl_buf_data __user *ubuf)
680  {
681  	int rc;
682  	struct tee_ioctl_buf_data buf;
683  	struct tee_iocl_supp_recv_arg __user *uarg;
684  	struct tee_param *params;
685  	u32 num_params;
686  	u32 func;
687  
688  	if (!ctx->teedev->desc->ops->supp_recv)
689  		return -EINVAL;
690  
691  	if (copy_from_user(&buf, ubuf, sizeof(buf)))
692  		return -EFAULT;
693  
694  	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
695  	    buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
696  		return -EINVAL;
697  
698  	uarg = u64_to_user_ptr(buf.buf_ptr);
699  	if (get_user(num_params, &uarg->num_params))
700  		return -EFAULT;
701  
702  	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
703  		return -EINVAL;
704  
705  	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
706  	if (!params)
707  		return -ENOMEM;
708  
709  	rc = params_from_user(ctx, params, num_params, uarg->params);
710  	if (rc)
711  		goto out;
712  
713  	rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
714  	if (rc)
715  		goto out;
716  
717  	if (put_user(func, &uarg->func) ||
718  	    put_user(num_params, &uarg->num_params)) {
719  		rc = -EFAULT;
720  		goto out;
721  	}
722  
723  	rc = params_to_supp(ctx, uarg->params, num_params, params);
724  out:
725  	kfree(params);
726  	return rc;
727  }
728  
params_from_supp(struct tee_param * params,size_t num_params,struct tee_ioctl_param __user * uparams)729  static int params_from_supp(struct tee_param *params, size_t num_params,
730  			    struct tee_ioctl_param __user *uparams)
731  {
732  	size_t n;
733  
734  	for (n = 0; n < num_params; n++) {
735  		struct tee_param *p = params + n;
736  		struct tee_ioctl_param ip;
737  
738  		if (copy_from_user(&ip, uparams + n, sizeof(ip)))
739  			return -EFAULT;
740  
741  		/* All unused attribute bits has to be zero */
742  		if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
743  			return -EINVAL;
744  
745  		p->attr = ip.attr;
746  		switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
747  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
748  		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
749  			/* Only out and in/out values can be updated */
750  			p->u.value.a = ip.a;
751  			p->u.value.b = ip.b;
752  			p->u.value.c = ip.c;
753  			break;
754  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
755  		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
756  			/*
757  			 * Only the size of the memref can be updated.
758  			 * Since we don't have access to the original
759  			 * parameters here, only store the supplied size.
760  			 * The driver will copy the updated size into the
761  			 * original parameters.
762  			 */
763  			p->u.memref.shm = NULL;
764  			p->u.memref.shm_offs = 0;
765  			p->u.memref.size = ip.b;
766  			break;
767  		default:
768  			memset(&p->u, 0, sizeof(p->u));
769  			break;
770  		}
771  	}
772  	return 0;
773  }
774  
tee_ioctl_supp_send(struct tee_context * ctx,struct tee_ioctl_buf_data __user * ubuf)775  static int tee_ioctl_supp_send(struct tee_context *ctx,
776  			       struct tee_ioctl_buf_data __user *ubuf)
777  {
778  	long rc;
779  	struct tee_ioctl_buf_data buf;
780  	struct tee_iocl_supp_send_arg __user *uarg;
781  	struct tee_param *params;
782  	u32 num_params;
783  	u32 ret;
784  
785  	/* Not valid for this driver */
786  	if (!ctx->teedev->desc->ops->supp_send)
787  		return -EINVAL;
788  
789  	if (copy_from_user(&buf, ubuf, sizeof(buf)))
790  		return -EFAULT;
791  
792  	if (buf.buf_len > TEE_MAX_ARG_SIZE ||
793  	    buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
794  		return -EINVAL;
795  
796  	uarg = u64_to_user_ptr(buf.buf_ptr);
797  	if (get_user(ret, &uarg->ret) ||
798  	    get_user(num_params, &uarg->num_params))
799  		return -EFAULT;
800  
801  	if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
802  		return -EINVAL;
803  
804  	params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
805  	if (!params)
806  		return -ENOMEM;
807  
808  	rc = params_from_supp(params, num_params, uarg->params);
809  	if (rc)
810  		goto out;
811  
812  	rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
813  out:
814  	kfree(params);
815  	return rc;
816  }
817  
tee_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)818  static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
819  {
820  	struct tee_context *ctx = filp->private_data;
821  	void __user *uarg = (void __user *)arg;
822  
823  	switch (cmd) {
824  	case TEE_IOC_VERSION:
825  		return tee_ioctl_version(ctx, uarg);
826  	case TEE_IOC_SHM_ALLOC:
827  		return tee_ioctl_shm_alloc(ctx, uarg);
828  	case TEE_IOC_SHM_REGISTER:
829  		return tee_ioctl_shm_register(ctx, uarg);
830  	case TEE_IOC_OPEN_SESSION:
831  		return tee_ioctl_open_session(ctx, uarg);
832  	case TEE_IOC_INVOKE:
833  		return tee_ioctl_invoke(ctx, uarg);
834  	case TEE_IOC_CANCEL:
835  		return tee_ioctl_cancel(ctx, uarg);
836  	case TEE_IOC_CLOSE_SESSION:
837  		return tee_ioctl_close_session(ctx, uarg);
838  	case TEE_IOC_SUPPL_RECV:
839  		return tee_ioctl_supp_recv(ctx, uarg);
840  	case TEE_IOC_SUPPL_SEND:
841  		return tee_ioctl_supp_send(ctx, uarg);
842  	default:
843  		return -EINVAL;
844  	}
845  }
846  
847  static const struct file_operations tee_fops = {
848  	.owner = THIS_MODULE,
849  	.open = tee_open,
850  	.release = tee_release,
851  	.unlocked_ioctl = tee_ioctl,
852  	.compat_ioctl = compat_ptr_ioctl,
853  };
854  
tee_release_device(struct device * dev)855  static void tee_release_device(struct device *dev)
856  {
857  	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
858  
859  	spin_lock(&driver_lock);
860  	clear_bit(teedev->id, dev_mask);
861  	spin_unlock(&driver_lock);
862  	mutex_destroy(&teedev->mutex);
863  	idr_destroy(&teedev->idr);
864  	kfree(teedev);
865  }
866  
867  /**
868   * tee_device_alloc() - Allocate a new struct tee_device instance
869   * @teedesc:	Descriptor for this driver
870   * @dev:	Parent device for this device
871   * @pool:	Shared memory pool, NULL if not used
872   * @driver_data: Private driver data for this device
873   *
874   * Allocates a new struct tee_device instance. The device is
875   * removed by tee_device_unregister().
876   *
877   * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
878   */
tee_device_alloc(const struct tee_desc * teedesc,struct device * dev,struct tee_shm_pool * pool,void * driver_data)879  struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
880  				    struct device *dev,
881  				    struct tee_shm_pool *pool,
882  				    void *driver_data)
883  {
884  	struct tee_device *teedev;
885  	void *ret;
886  	int rc, max_id;
887  	int offs = 0;
888  
889  	if (!teedesc || !teedesc->name || !teedesc->ops ||
890  	    !teedesc->ops->get_version || !teedesc->ops->open ||
891  	    !teedesc->ops->release || !pool)
892  		return ERR_PTR(-EINVAL);
893  
894  	teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
895  	if (!teedev) {
896  		ret = ERR_PTR(-ENOMEM);
897  		goto err;
898  	}
899  
900  	max_id = TEE_NUM_DEVICES / 2;
901  
902  	if (teedesc->flags & TEE_DESC_PRIVILEGED) {
903  		offs = TEE_NUM_DEVICES / 2;
904  		max_id = TEE_NUM_DEVICES;
905  	}
906  
907  	spin_lock(&driver_lock);
908  	teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
909  	if (teedev->id < max_id)
910  		set_bit(teedev->id, dev_mask);
911  	spin_unlock(&driver_lock);
912  
913  	if (teedev->id >= max_id) {
914  		ret = ERR_PTR(-ENOMEM);
915  		goto err;
916  	}
917  
918  	snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
919  		 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
920  		 teedev->id - offs);
921  
922  	teedev->dev.class = tee_class;
923  	teedev->dev.release = tee_release_device;
924  	teedev->dev.parent = dev;
925  
926  	teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
927  
928  	rc = dev_set_name(&teedev->dev, "%s", teedev->name);
929  	if (rc) {
930  		ret = ERR_PTR(rc);
931  		goto err_devt;
932  	}
933  
934  	cdev_init(&teedev->cdev, &tee_fops);
935  	teedev->cdev.owner = teedesc->owner;
936  
937  	dev_set_drvdata(&teedev->dev, driver_data);
938  	device_initialize(&teedev->dev);
939  
940  	/* 1 as tee_device_unregister() does one final tee_device_put() */
941  	teedev->num_users = 1;
942  	init_completion(&teedev->c_no_users);
943  	mutex_init(&teedev->mutex);
944  	idr_init(&teedev->idr);
945  
946  	teedev->desc = teedesc;
947  	teedev->pool = pool;
948  
949  	return teedev;
950  err_devt:
951  	unregister_chrdev_region(teedev->dev.devt, 1);
952  err:
953  	pr_err("could not register %s driver\n",
954  	       teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
955  	if (teedev && teedev->id < TEE_NUM_DEVICES) {
956  		spin_lock(&driver_lock);
957  		clear_bit(teedev->id, dev_mask);
958  		spin_unlock(&driver_lock);
959  	}
960  	kfree(teedev);
961  	return ret;
962  }
963  EXPORT_SYMBOL_GPL(tee_device_alloc);
964  
implementation_id_show(struct device * dev,struct device_attribute * attr,char * buf)965  static ssize_t implementation_id_show(struct device *dev,
966  				      struct device_attribute *attr, char *buf)
967  {
968  	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
969  	struct tee_ioctl_version_data vers;
970  
971  	teedev->desc->ops->get_version(teedev, &vers);
972  	return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
973  }
974  static DEVICE_ATTR_RO(implementation_id);
975  
976  static struct attribute *tee_dev_attrs[] = {
977  	&dev_attr_implementation_id.attr,
978  	NULL
979  };
980  
981  ATTRIBUTE_GROUPS(tee_dev);
982  
983  /**
984   * tee_device_register() - Registers a TEE device
985   * @teedev:	Device to register
986   *
987   * tee_device_unregister() need to be called to remove the @teedev if
988   * this function fails.
989   *
990   * @returns < 0 on failure
991   */
tee_device_register(struct tee_device * teedev)992  int tee_device_register(struct tee_device *teedev)
993  {
994  	int rc;
995  
996  	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
997  		dev_err(&teedev->dev, "attempt to register twice\n");
998  		return -EINVAL;
999  	}
1000  
1001  	teedev->dev.groups = tee_dev_groups;
1002  
1003  	rc = cdev_device_add(&teedev->cdev, &teedev->dev);
1004  	if (rc) {
1005  		dev_err(&teedev->dev,
1006  			"unable to cdev_device_add() %s, major %d, minor %d, err=%d\n",
1007  			teedev->name, MAJOR(teedev->dev.devt),
1008  			MINOR(teedev->dev.devt), rc);
1009  		return rc;
1010  	}
1011  
1012  	teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
1013  	return 0;
1014  }
1015  EXPORT_SYMBOL_GPL(tee_device_register);
1016  
tee_device_put(struct tee_device * teedev)1017  void tee_device_put(struct tee_device *teedev)
1018  {
1019  	mutex_lock(&teedev->mutex);
1020  	/* Shouldn't put in this state */
1021  	if (!WARN_ON(!teedev->desc)) {
1022  		teedev->num_users--;
1023  		if (!teedev->num_users) {
1024  			teedev->desc = NULL;
1025  			complete(&teedev->c_no_users);
1026  		}
1027  	}
1028  	mutex_unlock(&teedev->mutex);
1029  }
1030  
tee_device_get(struct tee_device * teedev)1031  bool tee_device_get(struct tee_device *teedev)
1032  {
1033  	mutex_lock(&teedev->mutex);
1034  	if (!teedev->desc) {
1035  		mutex_unlock(&teedev->mutex);
1036  		return false;
1037  	}
1038  	teedev->num_users++;
1039  	mutex_unlock(&teedev->mutex);
1040  	return true;
1041  }
1042  
1043  /**
1044   * tee_device_unregister() - Removes a TEE device
1045   * @teedev:	Device to unregister
1046   *
1047   * This function should be called to remove the @teedev even if
1048   * tee_device_register() hasn't been called yet. Does nothing if
1049   * @teedev is NULL.
1050   */
tee_device_unregister(struct tee_device * teedev)1051  void tee_device_unregister(struct tee_device *teedev)
1052  {
1053  	if (!teedev)
1054  		return;
1055  
1056  	if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED)
1057  		cdev_device_del(&teedev->cdev, &teedev->dev);
1058  
1059  	tee_device_put(teedev);
1060  	wait_for_completion(&teedev->c_no_users);
1061  
1062  	/*
1063  	 * No need to take a mutex any longer now since teedev->desc was
1064  	 * set to NULL before teedev->c_no_users was completed.
1065  	 */
1066  
1067  	teedev->pool = NULL;
1068  
1069  	put_device(&teedev->dev);
1070  }
1071  EXPORT_SYMBOL_GPL(tee_device_unregister);
1072  
1073  /**
1074   * tee_get_drvdata() - Return driver_data pointer
1075   * @teedev:	Device containing the driver_data pointer
1076   * @returns the driver_data pointer supplied to tee_device_alloc().
1077   */
tee_get_drvdata(struct tee_device * teedev)1078  void *tee_get_drvdata(struct tee_device *teedev)
1079  {
1080  	return dev_get_drvdata(&teedev->dev);
1081  }
1082  EXPORT_SYMBOL_GPL(tee_get_drvdata);
1083  
1084  struct match_dev_data {
1085  	struct tee_ioctl_version_data *vers;
1086  	const void *data;
1087  	int (*match)(struct tee_ioctl_version_data *, const void *);
1088  };
1089  
match_dev(struct device * dev,const void * data)1090  static int match_dev(struct device *dev, const void *data)
1091  {
1092  	const struct match_dev_data *match_data = data;
1093  	struct tee_device *teedev = container_of(dev, struct tee_device, dev);
1094  
1095  	teedev->desc->ops->get_version(teedev, match_data->vers);
1096  	return match_data->match(match_data->vers, match_data->data);
1097  }
1098  
1099  struct tee_context *
tee_client_open_context(struct tee_context * start,int (* match)(struct tee_ioctl_version_data *,const void *),const void * data,struct tee_ioctl_version_data * vers)1100  tee_client_open_context(struct tee_context *start,
1101  			int (*match)(struct tee_ioctl_version_data *,
1102  				     const void *),
1103  			const void *data, struct tee_ioctl_version_data *vers)
1104  {
1105  	struct device *dev = NULL;
1106  	struct device *put_dev = NULL;
1107  	struct tee_context *ctx = NULL;
1108  	struct tee_ioctl_version_data v;
1109  	struct match_dev_data match_data = { vers ? vers : &v, data, match };
1110  
1111  	if (start)
1112  		dev = &start->teedev->dev;
1113  
1114  	do {
1115  		dev = class_find_device(tee_class, dev, &match_data, match_dev);
1116  		if (!dev) {
1117  			ctx = ERR_PTR(-ENOENT);
1118  			break;
1119  		}
1120  
1121  		put_device(put_dev);
1122  		put_dev = dev;
1123  
1124  		ctx = teedev_open(container_of(dev, struct tee_device, dev));
1125  	} while (IS_ERR(ctx) && PTR_ERR(ctx) != -ENOMEM);
1126  
1127  	put_device(put_dev);
1128  	/*
1129  	 * Default behaviour for in kernel client is to not wait for
1130  	 * tee-supplicant if not present for any requests in this context.
1131  	 * Also this flag could be configured again before call to
1132  	 * tee_client_open_session() if any in kernel client requires
1133  	 * different behaviour.
1134  	 */
1135  	if (!IS_ERR(ctx))
1136  		ctx->supp_nowait = true;
1137  
1138  	return ctx;
1139  }
1140  EXPORT_SYMBOL_GPL(tee_client_open_context);
1141  
tee_client_close_context(struct tee_context * ctx)1142  void tee_client_close_context(struct tee_context *ctx)
1143  {
1144  	teedev_close_context(ctx);
1145  }
1146  EXPORT_SYMBOL_GPL(tee_client_close_context);
1147  
tee_client_get_version(struct tee_context * ctx,struct tee_ioctl_version_data * vers)1148  void tee_client_get_version(struct tee_context *ctx,
1149  			    struct tee_ioctl_version_data *vers)
1150  {
1151  	ctx->teedev->desc->ops->get_version(ctx->teedev, vers);
1152  }
1153  EXPORT_SYMBOL_GPL(tee_client_get_version);
1154  
tee_client_open_session(struct tee_context * ctx,struct tee_ioctl_open_session_arg * arg,struct tee_param * param)1155  int tee_client_open_session(struct tee_context *ctx,
1156  			    struct tee_ioctl_open_session_arg *arg,
1157  			    struct tee_param *param)
1158  {
1159  	if (!ctx->teedev->desc->ops->open_session)
1160  		return -EINVAL;
1161  	return ctx->teedev->desc->ops->open_session(ctx, arg, param);
1162  }
1163  EXPORT_SYMBOL_GPL(tee_client_open_session);
1164  
tee_client_close_session(struct tee_context * ctx,u32 session)1165  int tee_client_close_session(struct tee_context *ctx, u32 session)
1166  {
1167  	if (!ctx->teedev->desc->ops->close_session)
1168  		return -EINVAL;
1169  	return ctx->teedev->desc->ops->close_session(ctx, session);
1170  }
1171  EXPORT_SYMBOL_GPL(tee_client_close_session);
1172  
tee_client_invoke_func(struct tee_context * ctx,struct tee_ioctl_invoke_arg * arg,struct tee_param * param)1173  int tee_client_invoke_func(struct tee_context *ctx,
1174  			   struct tee_ioctl_invoke_arg *arg,
1175  			   struct tee_param *param)
1176  {
1177  	if (!ctx->teedev->desc->ops->invoke_func)
1178  		return -EINVAL;
1179  	return ctx->teedev->desc->ops->invoke_func(ctx, arg, param);
1180  }
1181  EXPORT_SYMBOL_GPL(tee_client_invoke_func);
1182  
tee_client_cancel_req(struct tee_context * ctx,struct tee_ioctl_cancel_arg * arg)1183  int tee_client_cancel_req(struct tee_context *ctx,
1184  			  struct tee_ioctl_cancel_arg *arg)
1185  {
1186  	if (!ctx->teedev->desc->ops->cancel_req)
1187  		return -EINVAL;
1188  	return ctx->teedev->desc->ops->cancel_req(ctx, arg->cancel_id,
1189  						  arg->session);
1190  }
1191  
tee_client_device_match(struct device * dev,struct device_driver * drv)1192  static int tee_client_device_match(struct device *dev,
1193  				   struct device_driver *drv)
1194  {
1195  	const struct tee_client_device_id *id_table;
1196  	struct tee_client_device *tee_device;
1197  
1198  	id_table = to_tee_client_driver(drv)->id_table;
1199  	tee_device = to_tee_client_device(dev);
1200  
1201  	while (!uuid_is_null(&id_table->uuid)) {
1202  		if (uuid_equal(&tee_device->id.uuid, &id_table->uuid))
1203  			return 1;
1204  		id_table++;
1205  	}
1206  
1207  	return 0;
1208  }
1209  
tee_client_device_uevent(const struct device * dev,struct kobj_uevent_env * env)1210  static int tee_client_device_uevent(const struct device *dev,
1211  				    struct kobj_uevent_env *env)
1212  {
1213  	uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
1214  
1215  	return add_uevent_var(env, "MODALIAS=tee:%pUb", dev_id);
1216  }
1217  
1218  struct bus_type tee_bus_type = {
1219  	.name		= "tee",
1220  	.match		= tee_client_device_match,
1221  	.uevent		= tee_client_device_uevent,
1222  };
1223  EXPORT_SYMBOL_GPL(tee_bus_type);
1224  
tee_init(void)1225  static int __init tee_init(void)
1226  {
1227  	int rc;
1228  
1229  	tee_class = class_create("tee");
1230  	if (IS_ERR(tee_class)) {
1231  		pr_err("couldn't create class\n");
1232  		return PTR_ERR(tee_class);
1233  	}
1234  
1235  	rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
1236  	if (rc) {
1237  		pr_err("failed to allocate char dev region\n");
1238  		goto out_unreg_class;
1239  	}
1240  
1241  	rc = bus_register(&tee_bus_type);
1242  	if (rc) {
1243  		pr_err("failed to register tee bus\n");
1244  		goto out_unreg_chrdev;
1245  	}
1246  
1247  	return 0;
1248  
1249  out_unreg_chrdev:
1250  	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1251  out_unreg_class:
1252  	class_destroy(tee_class);
1253  	tee_class = NULL;
1254  
1255  	return rc;
1256  }
1257  
tee_exit(void)1258  static void __exit tee_exit(void)
1259  {
1260  	bus_unregister(&tee_bus_type);
1261  	unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
1262  	class_destroy(tee_class);
1263  	tee_class = NULL;
1264  }
1265  
1266  subsys_initcall(tee_init);
1267  module_exit(tee_exit);
1268  
1269  MODULE_AUTHOR("Linaro");
1270  MODULE_DESCRIPTION("TEE Driver");
1271  MODULE_VERSION("1.0");
1272  MODULE_LICENSE("GPL v2");
1273