xref: /openbmc/linux/drivers/tee/optee/call.c (revision ef2b56df)
1 /*
2  * Copyright (c) 2015, Linaro Limited
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 #include <linux/arm-smccc.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/tee_drv.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include "optee_private.h"
23 #include "optee_smc.h"
24 
25 struct optee_call_waiter {
26 	struct list_head list_node;
27 	struct completion c;
28 };
29 
30 static void optee_cq_wait_init(struct optee_call_queue *cq,
31 			       struct optee_call_waiter *w)
32 {
33 	/*
34 	 * We're preparing to make a call to secure world. In case we can't
35 	 * allocate a thread in secure world we'll end up waiting in
36 	 * optee_cq_wait_for_completion().
37 	 *
38 	 * Normally if there's no contention in secure world the call will
39 	 * complete and we can cleanup directly with optee_cq_wait_final().
40 	 */
41 	mutex_lock(&cq->mutex);
42 
43 	/*
44 	 * We add ourselves to the queue, but we don't wait. This
45 	 * guarantees that we don't lose a completion if secure world
46 	 * returns busy and another thread just exited and try to complete
47 	 * someone.
48 	 */
49 	init_completion(&w->c);
50 	list_add_tail(&w->list_node, &cq->waiters);
51 
52 	mutex_unlock(&cq->mutex);
53 }
54 
55 static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
56 					 struct optee_call_waiter *w)
57 {
58 	wait_for_completion(&w->c);
59 
60 	mutex_lock(&cq->mutex);
61 
62 	/* Move to end of list to get out of the way for other waiters */
63 	list_del(&w->list_node);
64 	reinit_completion(&w->c);
65 	list_add_tail(&w->list_node, &cq->waiters);
66 
67 	mutex_unlock(&cq->mutex);
68 }
69 
70 static void optee_cq_complete_one(struct optee_call_queue *cq)
71 {
72 	struct optee_call_waiter *w;
73 
74 	list_for_each_entry(w, &cq->waiters, list_node) {
75 		if (!completion_done(&w->c)) {
76 			complete(&w->c);
77 			break;
78 		}
79 	}
80 }
81 
82 static void optee_cq_wait_final(struct optee_call_queue *cq,
83 				struct optee_call_waiter *w)
84 {
85 	/*
86 	 * We're done with the call to secure world. The thread in secure
87 	 * world that was used for this call is now available for some
88 	 * other task to use.
89 	 */
90 	mutex_lock(&cq->mutex);
91 
92 	/* Get out of the list */
93 	list_del(&w->list_node);
94 
95 	/* Wake up one eventual waiting task */
96 	optee_cq_complete_one(cq);
97 
98 	/*
99 	 * If we're completed we've got a completion from another task that
100 	 * was just done with its call to secure world. Since yet another
101 	 * thread now is available in secure world wake up another eventual
102 	 * waiting task.
103 	 */
104 	if (completion_done(&w->c))
105 		optee_cq_complete_one(cq);
106 
107 	mutex_unlock(&cq->mutex);
108 }
109 
110 /* Requires the filpstate mutex to be held */
111 static struct optee_session *find_session(struct optee_context_data *ctxdata,
112 					  u32 session_id)
113 {
114 	struct optee_session *sess;
115 
116 	list_for_each_entry(sess, &ctxdata->sess_list, list_node)
117 		if (sess->session_id == session_id)
118 			return sess;
119 
120 	return NULL;
121 }
122 
123 /**
124  * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
125  * @ctx:	calling context
126  * @parg:	physical address of message to pass to secure world
127  *
128  * Does and SMC to OP-TEE in secure world and handles eventual resulting
129  * Remote Procedure Calls (RPC) from OP-TEE.
130  *
131  * Returns return code from secure world, 0 is OK
132  */
133 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
134 {
135 	struct optee *optee = tee_get_drvdata(ctx->teedev);
136 	struct optee_call_waiter w;
137 	struct optee_rpc_param param = { };
138 	u32 ret;
139 
140 	param.a0 = OPTEE_SMC_CALL_WITH_ARG;
141 	reg_pair_from_64(&param.a1, &param.a2, parg);
142 	/* Initialize waiter */
143 	optee_cq_wait_init(&optee->call_queue, &w);
144 	while (true) {
145 		struct arm_smccc_res res;
146 
147 		optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
148 				 param.a4, param.a5, param.a6, param.a7,
149 				 &res);
150 
151 		if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
152 			/*
153 			 * Out of threads in secure world, wait for a thread
154 			 * become available.
155 			 */
156 			optee_cq_wait_for_completion(&optee->call_queue, &w);
157 		} else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
158 			param.a0 = res.a0;
159 			param.a1 = res.a1;
160 			param.a2 = res.a2;
161 			param.a3 = res.a3;
162 			optee_handle_rpc(ctx, &param);
163 		} else {
164 			ret = res.a0;
165 			break;
166 		}
167 	}
168 
169 	/*
170 	 * We're done with our thread in secure world, if there's any
171 	 * thread waiters wake up one.
172 	 */
173 	optee_cq_wait_final(&optee->call_queue, &w);
174 
175 	return ret;
176 }
177 
178 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
179 				   struct optee_msg_arg **msg_arg,
180 				   phys_addr_t *msg_parg)
181 {
182 	int rc;
183 	struct tee_shm *shm;
184 	struct optee_msg_arg *ma;
185 
186 	shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
187 			    TEE_SHM_MAPPED);
188 	if (IS_ERR(shm))
189 		return shm;
190 
191 	ma = tee_shm_get_va(shm, 0);
192 	if (IS_ERR(ma)) {
193 		rc = PTR_ERR(ma);
194 		goto out;
195 	}
196 
197 	rc = tee_shm_get_pa(shm, 0, msg_parg);
198 	if (rc)
199 		goto out;
200 
201 	memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
202 	ma->num_params = num_params;
203 	*msg_arg = ma;
204 out:
205 	if (rc) {
206 		tee_shm_free(shm);
207 		return ERR_PTR(rc);
208 	}
209 
210 	return shm;
211 }
212 
213 int optee_open_session(struct tee_context *ctx,
214 		       struct tee_ioctl_open_session_arg *arg,
215 		       struct tee_param *param)
216 {
217 	struct optee_context_data *ctxdata = ctx->data;
218 	int rc;
219 	struct tee_shm *shm;
220 	struct optee_msg_arg *msg_arg;
221 	phys_addr_t msg_parg;
222 	struct optee_session *sess = NULL;
223 
224 	/* +2 for the meta parameters added below */
225 	shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
226 	if (IS_ERR(shm))
227 		return PTR_ERR(shm);
228 
229 	msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
230 	msg_arg->cancel_id = arg->cancel_id;
231 
232 	/*
233 	 * Initialize and add the meta parameters needed when opening a
234 	 * session.
235 	 */
236 	msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
237 				  OPTEE_MSG_ATTR_META;
238 	msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
239 				  OPTEE_MSG_ATTR_META;
240 	memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
241 	memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
242 	msg_arg->params[1].u.value.c = arg->clnt_login;
243 
244 	rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
245 	if (rc)
246 		goto out;
247 
248 	sess = kzalloc(sizeof(*sess), GFP_KERNEL);
249 	if (!sess) {
250 		rc = -ENOMEM;
251 		goto out;
252 	}
253 
254 	if (optee_do_call_with_arg(ctx, msg_parg)) {
255 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
256 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
257 	}
258 
259 	if (msg_arg->ret == TEEC_SUCCESS) {
260 		/* A new session has been created, add it to the list. */
261 		sess->session_id = msg_arg->session;
262 		mutex_lock(&ctxdata->mutex);
263 		list_add(&sess->list_node, &ctxdata->sess_list);
264 		mutex_unlock(&ctxdata->mutex);
265 	} else {
266 		kfree(sess);
267 	}
268 
269 	if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
270 		arg->ret = TEEC_ERROR_COMMUNICATION;
271 		arg->ret_origin = TEEC_ORIGIN_COMMS;
272 		/* Close session again to avoid leakage */
273 		optee_close_session(ctx, msg_arg->session);
274 	} else {
275 		arg->session = msg_arg->session;
276 		arg->ret = msg_arg->ret;
277 		arg->ret_origin = msg_arg->ret_origin;
278 	}
279 out:
280 	tee_shm_free(shm);
281 
282 	return rc;
283 }
284 
285 int optee_close_session(struct tee_context *ctx, u32 session)
286 {
287 	struct optee_context_data *ctxdata = ctx->data;
288 	struct tee_shm *shm;
289 	struct optee_msg_arg *msg_arg;
290 	phys_addr_t msg_parg;
291 	struct optee_session *sess;
292 
293 	/* Check that the session is valid and remove it from the list */
294 	mutex_lock(&ctxdata->mutex);
295 	sess = find_session(ctxdata, session);
296 	if (sess)
297 		list_del(&sess->list_node);
298 	mutex_unlock(&ctxdata->mutex);
299 	if (!sess)
300 		return -EINVAL;
301 	kfree(sess);
302 
303 	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
304 	if (IS_ERR(shm))
305 		return PTR_ERR(shm);
306 
307 	msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
308 	msg_arg->session = session;
309 	optee_do_call_with_arg(ctx, msg_parg);
310 
311 	tee_shm_free(shm);
312 	return 0;
313 }
314 
315 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
316 		      struct tee_param *param)
317 {
318 	struct optee_context_data *ctxdata = ctx->data;
319 	struct tee_shm *shm;
320 	struct optee_msg_arg *msg_arg;
321 	phys_addr_t msg_parg;
322 	struct optee_session *sess;
323 	int rc;
324 
325 	/* Check that the session is valid */
326 	mutex_lock(&ctxdata->mutex);
327 	sess = find_session(ctxdata, arg->session);
328 	mutex_unlock(&ctxdata->mutex);
329 	if (!sess)
330 		return -EINVAL;
331 
332 	shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
333 	if (IS_ERR(shm))
334 		return PTR_ERR(shm);
335 	msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
336 	msg_arg->func = arg->func;
337 	msg_arg->session = arg->session;
338 	msg_arg->cancel_id = arg->cancel_id;
339 
340 	rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
341 	if (rc)
342 		goto out;
343 
344 	if (optee_do_call_with_arg(ctx, msg_parg)) {
345 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
346 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
347 	}
348 
349 	if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
350 		msg_arg->ret = TEEC_ERROR_COMMUNICATION;
351 		msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
352 	}
353 
354 	arg->ret = msg_arg->ret;
355 	arg->ret_origin = msg_arg->ret_origin;
356 out:
357 	tee_shm_free(shm);
358 	return rc;
359 }
360 
361 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
362 {
363 	struct optee_context_data *ctxdata = ctx->data;
364 	struct tee_shm *shm;
365 	struct optee_msg_arg *msg_arg;
366 	phys_addr_t msg_parg;
367 	struct optee_session *sess;
368 
369 	/* Check that the session is valid */
370 	mutex_lock(&ctxdata->mutex);
371 	sess = find_session(ctxdata, session);
372 	mutex_unlock(&ctxdata->mutex);
373 	if (!sess)
374 		return -EINVAL;
375 
376 	shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
377 	if (IS_ERR(shm))
378 		return PTR_ERR(shm);
379 
380 	msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
381 	msg_arg->session = session;
382 	msg_arg->cancel_id = cancel_id;
383 	optee_do_call_with_arg(ctx, msg_parg);
384 
385 	tee_shm_free(shm);
386 	return 0;
387 }
388 
389 /**
390  * optee_enable_shm_cache() - Enables caching of some shared memory allocation
391  *			      in OP-TEE
392  * @optee:	main service struct
393  */
394 void optee_enable_shm_cache(struct optee *optee)
395 {
396 	struct optee_call_waiter w;
397 
398 	/* We need to retry until secure world isn't busy. */
399 	optee_cq_wait_init(&optee->call_queue, &w);
400 	while (true) {
401 		struct arm_smccc_res res;
402 
403 		optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
404 				 0, &res);
405 		if (res.a0 == OPTEE_SMC_RETURN_OK)
406 			break;
407 		optee_cq_wait_for_completion(&optee->call_queue, &w);
408 	}
409 	optee_cq_wait_final(&optee->call_queue, &w);
410 }
411 
412 /**
413  * optee_disable_shm_cache() - Disables caching of some shared memory allocation
414  *			      in OP-TEE
415  * @optee:	main service struct
416  */
417 void optee_disable_shm_cache(struct optee *optee)
418 {
419 	struct optee_call_waiter w;
420 
421 	/* We need to retry until secure world isn't busy. */
422 	optee_cq_wait_init(&optee->call_queue, &w);
423 	while (true) {
424 		union {
425 			struct arm_smccc_res smccc;
426 			struct optee_smc_disable_shm_cache_result result;
427 		} res;
428 
429 		optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
430 				 0, &res.smccc);
431 		if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
432 			break; /* All shm's freed */
433 		if (res.result.status == OPTEE_SMC_RETURN_OK) {
434 			struct tee_shm *shm;
435 
436 			shm = reg_pair_to_ptr(res.result.shm_upper32,
437 					      res.result.shm_lower32);
438 			tee_shm_free(shm);
439 		} else {
440 			optee_cq_wait_for_completion(&optee->call_queue, &w);
441 		}
442 	}
443 	optee_cq_wait_final(&optee->call_queue, &w);
444 }
445