xref: /openbmc/linux/drivers/tee/optee/supp.c (revision a34a9f1a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015, Linaro Limited
4  */
5 #include <linux/device.h>
6 #include <linux/slab.h>
7 #include <linux/uaccess.h>
8 #include "optee_private.h"
9 
10 struct optee_supp_req {
11 	struct list_head link;
12 
13 	bool in_queue;
14 	u32 func;
15 	u32 ret;
16 	size_t num_params;
17 	struct tee_param *param;
18 
19 	struct completion c;
20 };
21 
22 void optee_supp_init(struct optee_supp *supp)
23 {
24 	memset(supp, 0, sizeof(*supp));
25 	mutex_init(&supp->mutex);
26 	init_completion(&supp->reqs_c);
27 	idr_init(&supp->idr);
28 	INIT_LIST_HEAD(&supp->reqs);
29 	supp->req_id = -1;
30 }
31 
32 void optee_supp_uninit(struct optee_supp *supp)
33 {
34 	mutex_destroy(&supp->mutex);
35 	idr_destroy(&supp->idr);
36 }
37 
38 void optee_supp_release(struct optee_supp *supp)
39 {
40 	int id;
41 	struct optee_supp_req *req;
42 	struct optee_supp_req *req_tmp;
43 
44 	mutex_lock(&supp->mutex);
45 
46 	/* Abort all request retrieved by supplicant */
47 	idr_for_each_entry(&supp->idr, req, id) {
48 		idr_remove(&supp->idr, id);
49 		req->ret = TEEC_ERROR_COMMUNICATION;
50 		complete(&req->c);
51 	}
52 
53 	/* Abort all queued requests */
54 	list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
55 		list_del(&req->link);
56 		req->in_queue = false;
57 		req->ret = TEEC_ERROR_COMMUNICATION;
58 		complete(&req->c);
59 	}
60 
61 	supp->ctx = NULL;
62 	supp->req_id = -1;
63 
64 	mutex_unlock(&supp->mutex);
65 }
66 
67 /**
68  * optee_supp_thrd_req() - request service from supplicant
69  * @ctx:	context doing the request
70  * @func:	function requested
71  * @num_params:	number of elements in @param array
72  * @param:	parameters for function
73  *
74  * Returns result of operation to be passed to secure world
75  */
76 u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
77 			struct tee_param *param)
78 
79 {
80 	struct optee *optee = tee_get_drvdata(ctx->teedev);
81 	struct optee_supp *supp = &optee->supp;
82 	struct optee_supp_req *req;
83 	bool interruptable;
84 	u32 ret;
85 
86 	/*
87 	 * Return in case there is no supplicant available and
88 	 * non-blocking request.
89 	 */
90 	if (!supp->ctx && ctx->supp_nowait)
91 		return TEEC_ERROR_COMMUNICATION;
92 
93 	req = kzalloc(sizeof(*req), GFP_KERNEL);
94 	if (!req)
95 		return TEEC_ERROR_OUT_OF_MEMORY;
96 
97 	init_completion(&req->c);
98 	req->func = func;
99 	req->num_params = num_params;
100 	req->param = param;
101 
102 	/* Insert the request in the request list */
103 	mutex_lock(&supp->mutex);
104 	list_add_tail(&req->link, &supp->reqs);
105 	req->in_queue = true;
106 	mutex_unlock(&supp->mutex);
107 
108 	/* Tell an eventual waiter there's a new request */
109 	complete(&supp->reqs_c);
110 
111 	/*
112 	 * Wait for supplicant to process and return result, once we've
113 	 * returned from wait_for_completion(&req->c) successfully we have
114 	 * exclusive access again.
115 	 */
116 	while (wait_for_completion_interruptible(&req->c)) {
117 		mutex_lock(&supp->mutex);
118 		interruptable = !supp->ctx;
119 		if (interruptable) {
120 			/*
121 			 * There's no supplicant available and since the
122 			 * supp->mutex currently is held none can
123 			 * become available until the mutex released
124 			 * again.
125 			 *
126 			 * Interrupting an RPC to supplicant is only
127 			 * allowed as a way of slightly improving the user
128 			 * experience in case the supplicant hasn't been
129 			 * started yet. During normal operation the supplicant
130 			 * will serve all requests in a timely manner and
131 			 * interrupting then wouldn't make sense.
132 			 */
133 			if (req->in_queue) {
134 				list_del(&req->link);
135 				req->in_queue = false;
136 			}
137 		}
138 		mutex_unlock(&supp->mutex);
139 
140 		if (interruptable) {
141 			req->ret = TEEC_ERROR_COMMUNICATION;
142 			break;
143 		}
144 	}
145 
146 	ret = req->ret;
147 	kfree(req);
148 
149 	return ret;
150 }
151 
152 static struct optee_supp_req  *supp_pop_entry(struct optee_supp *supp,
153 					      int num_params, int *id)
154 {
155 	struct optee_supp_req *req;
156 
157 	if (supp->req_id != -1) {
158 		/*
159 		 * Supplicant should not mix synchronous and asnynchronous
160 		 * requests.
161 		 */
162 		return ERR_PTR(-EINVAL);
163 	}
164 
165 	if (list_empty(&supp->reqs))
166 		return NULL;
167 
168 	req = list_first_entry(&supp->reqs, struct optee_supp_req, link);
169 
170 	if (num_params < req->num_params) {
171 		/* Not enough room for parameters */
172 		return ERR_PTR(-EINVAL);
173 	}
174 
175 	*id = idr_alloc(&supp->idr, req, 1, 0, GFP_KERNEL);
176 	if (*id < 0)
177 		return ERR_PTR(-ENOMEM);
178 
179 	list_del(&req->link);
180 	req->in_queue = false;
181 
182 	return req;
183 }
184 
185 static int supp_check_recv_params(size_t num_params, struct tee_param *params,
186 				  size_t *num_meta)
187 {
188 	size_t n;
189 
190 	if (!num_params)
191 		return -EINVAL;
192 
193 	/*
194 	 * If there's memrefs we need to decrease those as they where
195 	 * increased earlier and we'll even refuse to accept any below.
196 	 */
197 	for (n = 0; n < num_params; n++)
198 		if (tee_param_is_memref(params + n) && params[n].u.memref.shm)
199 			tee_shm_put(params[n].u.memref.shm);
200 
201 	/*
202 	 * We only expect parameters as TEE_IOCTL_PARAM_ATTR_TYPE_NONE with
203 	 * or without the TEE_IOCTL_PARAM_ATTR_META bit set.
204 	 */
205 	for (n = 0; n < num_params; n++)
206 		if (params[n].attr &&
207 		    params[n].attr != TEE_IOCTL_PARAM_ATTR_META)
208 			return -EINVAL;
209 
210 	/* At most we'll need one meta parameter so no need to check for more */
211 	if (params->attr == TEE_IOCTL_PARAM_ATTR_META)
212 		*num_meta = 1;
213 	else
214 		*num_meta = 0;
215 
216 	return 0;
217 }
218 
219 /**
220  * optee_supp_recv() - receive request for supplicant
221  * @ctx:	context receiving the request
222  * @func:	requested function in supplicant
223  * @num_params:	number of elements allocated in @param, updated with number
224  *		used elements
225  * @param:	space for parameters for @func
226  *
227  * Returns 0 on success or <0 on failure
228  */
229 int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
230 		    struct tee_param *param)
231 {
232 	struct tee_device *teedev = ctx->teedev;
233 	struct optee *optee = tee_get_drvdata(teedev);
234 	struct optee_supp *supp = &optee->supp;
235 	struct optee_supp_req *req = NULL;
236 	int id;
237 	size_t num_meta;
238 	int rc;
239 
240 	rc = supp_check_recv_params(*num_params, param, &num_meta);
241 	if (rc)
242 		return rc;
243 
244 	while (true) {
245 		mutex_lock(&supp->mutex);
246 		req = supp_pop_entry(supp, *num_params - num_meta, &id);
247 		mutex_unlock(&supp->mutex);
248 
249 		if (req) {
250 			if (IS_ERR(req))
251 				return PTR_ERR(req);
252 			break;
253 		}
254 
255 		/*
256 		 * If we didn't get a request we'll block in
257 		 * wait_for_completion() to avoid needless spinning.
258 		 *
259 		 * This is where supplicant will be hanging most of
260 		 * the time, let's make this interruptable so we
261 		 * can easily restart supplicant if needed.
262 		 */
263 		if (wait_for_completion_interruptible(&supp->reqs_c))
264 			return -ERESTARTSYS;
265 	}
266 
267 	if (num_meta) {
268 		/*
269 		 * tee-supplicant support meta parameters -> requsts can be
270 		 * processed asynchronously.
271 		 */
272 		param->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
273 			      TEE_IOCTL_PARAM_ATTR_META;
274 		param->u.value.a = id;
275 		param->u.value.b = 0;
276 		param->u.value.c = 0;
277 	} else {
278 		mutex_lock(&supp->mutex);
279 		supp->req_id = id;
280 		mutex_unlock(&supp->mutex);
281 	}
282 
283 	*func = req->func;
284 	*num_params = req->num_params + num_meta;
285 	memcpy(param + num_meta, req->param,
286 	       sizeof(struct tee_param) * req->num_params);
287 
288 	return 0;
289 }
290 
291 static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
292 					   size_t num_params,
293 					   struct tee_param *param,
294 					   size_t *num_meta)
295 {
296 	struct optee_supp_req *req;
297 	int id;
298 	size_t nm;
299 	const u32 attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT |
300 			 TEE_IOCTL_PARAM_ATTR_META;
301 
302 	if (!num_params)
303 		return ERR_PTR(-EINVAL);
304 
305 	if (supp->req_id == -1) {
306 		if (param->attr != attr)
307 			return ERR_PTR(-EINVAL);
308 		id = param->u.value.a;
309 		nm = 1;
310 	} else {
311 		id = supp->req_id;
312 		nm = 0;
313 	}
314 
315 	req = idr_find(&supp->idr, id);
316 	if (!req)
317 		return ERR_PTR(-ENOENT);
318 
319 	if ((num_params - nm) != req->num_params)
320 		return ERR_PTR(-EINVAL);
321 
322 	idr_remove(&supp->idr, id);
323 	supp->req_id = -1;
324 	*num_meta = nm;
325 
326 	return req;
327 }
328 
329 /**
330  * optee_supp_send() - send result of request from supplicant
331  * @ctx:	context sending result
332  * @ret:	return value of request
333  * @num_params:	number of parameters returned
334  * @param:	returned parameters
335  *
336  * Returns 0 on success or <0 on failure.
337  */
338 int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
339 		    struct tee_param *param)
340 {
341 	struct tee_device *teedev = ctx->teedev;
342 	struct optee *optee = tee_get_drvdata(teedev);
343 	struct optee_supp *supp = &optee->supp;
344 	struct optee_supp_req *req;
345 	size_t n;
346 	size_t num_meta;
347 
348 	mutex_lock(&supp->mutex);
349 	req = supp_pop_req(supp, num_params, param, &num_meta);
350 	mutex_unlock(&supp->mutex);
351 
352 	if (IS_ERR(req)) {
353 		/* Something is wrong, let supplicant restart. */
354 		return PTR_ERR(req);
355 	}
356 
357 	/* Update out and in/out parameters */
358 	for (n = 0; n < req->num_params; n++) {
359 		struct tee_param *p = req->param + n;
360 
361 		switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
362 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
363 		case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
364 			p->u.value.a = param[n + num_meta].u.value.a;
365 			p->u.value.b = param[n + num_meta].u.value.b;
366 			p->u.value.c = param[n + num_meta].u.value.c;
367 			break;
368 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
369 		case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
370 			p->u.memref.size = param[n + num_meta].u.memref.size;
371 			break;
372 		default:
373 			break;
374 		}
375 	}
376 	req->ret = ret;
377 
378 	/* Let the requesting thread continue */
379 	complete(&req->c);
380 
381 	return 0;
382 }
383