xref: /openbmc/linux/drivers/tee/optee/rpc.c (revision 97fb5e8d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016, Linaro Limited
4  */
5 
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 
8 #include <linux/delay.h>
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/tee_drv.h>
12 #include "optee_private.h"
13 #include "optee_smc.h"
14 
15 struct wq_entry {
16 	struct list_head link;
17 	struct completion c;
18 	u32 key;
19 };
20 
21 void optee_wait_queue_init(struct optee_wait_queue *priv)
22 {
23 	mutex_init(&priv->mu);
24 	INIT_LIST_HEAD(&priv->db);
25 }
26 
27 void optee_wait_queue_exit(struct optee_wait_queue *priv)
28 {
29 	mutex_destroy(&priv->mu);
30 }
31 
32 static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
33 {
34 	struct timespec64 ts;
35 
36 	if (arg->num_params != 1)
37 		goto bad;
38 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
39 			OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
40 		goto bad;
41 
42 	ktime_get_real_ts64(&ts);
43 	arg->params[0].u.value.a = ts.tv_sec;
44 	arg->params[0].u.value.b = ts.tv_nsec;
45 
46 	arg->ret = TEEC_SUCCESS;
47 	return;
48 bad:
49 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
50 }
51 
52 static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
53 {
54 	struct wq_entry *w;
55 
56 	mutex_lock(&wq->mu);
57 
58 	list_for_each_entry(w, &wq->db, link)
59 		if (w->key == key)
60 			goto out;
61 
62 	w = kmalloc(sizeof(*w), GFP_KERNEL);
63 	if (w) {
64 		init_completion(&w->c);
65 		w->key = key;
66 		list_add_tail(&w->link, &wq->db);
67 	}
68 out:
69 	mutex_unlock(&wq->mu);
70 	return w;
71 }
72 
73 static void wq_sleep(struct optee_wait_queue *wq, u32 key)
74 {
75 	struct wq_entry *w = wq_entry_get(wq, key);
76 
77 	if (w) {
78 		wait_for_completion(&w->c);
79 		mutex_lock(&wq->mu);
80 		list_del(&w->link);
81 		mutex_unlock(&wq->mu);
82 		kfree(w);
83 	}
84 }
85 
86 static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
87 {
88 	struct wq_entry *w = wq_entry_get(wq, key);
89 
90 	if (w)
91 		complete(&w->c);
92 }
93 
94 static void handle_rpc_func_cmd_wq(struct optee *optee,
95 				   struct optee_msg_arg *arg)
96 {
97 	if (arg->num_params != 1)
98 		goto bad;
99 
100 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
101 			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
102 		goto bad;
103 
104 	switch (arg->params[0].u.value.a) {
105 	case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
106 		wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
107 		break;
108 	case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
109 		wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
110 		break;
111 	default:
112 		goto bad;
113 	}
114 
115 	arg->ret = TEEC_SUCCESS;
116 	return;
117 bad:
118 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
119 }
120 
121 static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
122 {
123 	u32 msec_to_wait;
124 
125 	if (arg->num_params != 1)
126 		goto bad;
127 
128 	if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
129 			OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
130 		goto bad;
131 
132 	msec_to_wait = arg->params[0].u.value.a;
133 
134 	/* Go to interruptible sleep */
135 	msleep_interruptible(msec_to_wait);
136 
137 	arg->ret = TEEC_SUCCESS;
138 	return;
139 bad:
140 	arg->ret = TEEC_ERROR_BAD_PARAMETERS;
141 }
142 
143 static void handle_rpc_supp_cmd(struct tee_context *ctx,
144 				struct optee_msg_arg *arg)
145 {
146 	struct tee_param *params;
147 
148 	arg->ret_origin = TEEC_ORIGIN_COMMS;
149 
150 	params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
151 			       GFP_KERNEL);
152 	if (!params) {
153 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
154 		return;
155 	}
156 
157 	if (optee_from_msg_param(params, arg->num_params, arg->params)) {
158 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
159 		goto out;
160 	}
161 
162 	arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
163 
164 	if (optee_to_msg_param(arg->params, arg->num_params, params))
165 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
166 out:
167 	kfree(params);
168 }
169 
170 static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
171 {
172 	u32 ret;
173 	struct tee_param param;
174 	struct optee *optee = tee_get_drvdata(ctx->teedev);
175 	struct tee_shm *shm;
176 
177 	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
178 	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
179 	param.u.value.b = sz;
180 	param.u.value.c = 0;
181 
182 	ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
183 	if (ret)
184 		return ERR_PTR(-ENOMEM);
185 
186 	mutex_lock(&optee->supp.mutex);
187 	/* Increases count as secure world doesn't have a reference */
188 	shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
189 	mutex_unlock(&optee->supp.mutex);
190 	return shm;
191 }
192 
193 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
194 					  struct optee_msg_arg *arg,
195 					  struct optee_call_ctx *call_ctx)
196 {
197 	phys_addr_t pa;
198 	struct tee_shm *shm;
199 	size_t sz;
200 	size_t n;
201 
202 	arg->ret_origin = TEEC_ORIGIN_COMMS;
203 
204 	if (!arg->num_params ||
205 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
206 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
207 		return;
208 	}
209 
210 	for (n = 1; n < arg->num_params; n++) {
211 		if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
212 			arg->ret = TEEC_ERROR_BAD_PARAMETERS;
213 			return;
214 		}
215 	}
216 
217 	sz = arg->params[0].u.value.b;
218 	switch (arg->params[0].u.value.a) {
219 	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
220 		shm = cmd_alloc_suppl(ctx, sz);
221 		break;
222 	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
223 		shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
224 		break;
225 	default:
226 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
227 		return;
228 	}
229 
230 	if (IS_ERR(shm)) {
231 		arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
232 		return;
233 	}
234 
235 	if (tee_shm_get_pa(shm, 0, &pa)) {
236 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
237 		goto bad;
238 	}
239 
240 	sz = tee_shm_get_size(shm);
241 
242 	if (tee_shm_is_registered(shm)) {
243 		struct page **pages;
244 		u64 *pages_list;
245 		size_t page_num;
246 
247 		pages = tee_shm_get_pages(shm, &page_num);
248 		if (!pages || !page_num) {
249 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
250 			goto bad;
251 		}
252 
253 		pages_list = optee_allocate_pages_list(page_num);
254 		if (!pages_list) {
255 			arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
256 			goto bad;
257 		}
258 
259 		call_ctx->pages_list = pages_list;
260 		call_ctx->num_entries = page_num;
261 
262 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
263 				      OPTEE_MSG_ATTR_NONCONTIG;
264 		/*
265 		 * In the least bits of u.tmem.buf_ptr we store buffer offset
266 		 * from 4k page, as described in OP-TEE ABI.
267 		 */
268 		arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
269 			(tee_shm_get_page_offset(shm) &
270 			 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
271 		arg->params[0].u.tmem.size = tee_shm_get_size(shm);
272 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
273 
274 		optee_fill_pages_list(pages_list, pages, page_num,
275 				      tee_shm_get_page_offset(shm));
276 	} else {
277 		arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
278 		arg->params[0].u.tmem.buf_ptr = pa;
279 		arg->params[0].u.tmem.size = sz;
280 		arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
281 	}
282 
283 	arg->ret = TEEC_SUCCESS;
284 	return;
285 bad:
286 	tee_shm_free(shm);
287 }
288 
289 static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
290 {
291 	struct tee_param param;
292 
293 	param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
294 	param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
295 	param.u.value.b = tee_shm_get_id(shm);
296 	param.u.value.c = 0;
297 
298 	/*
299 	 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
300 	 * world has released its reference.
301 	 *
302 	 * It's better to do this before sending the request to supplicant
303 	 * as we'd like to let the process doing the initial allocation to
304 	 * do release the last reference too in order to avoid stacking
305 	 * many pending fput() on the client process. This could otherwise
306 	 * happen if secure world does many allocate and free in a single
307 	 * invoke.
308 	 */
309 	tee_shm_put(shm);
310 
311 	optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
312 }
313 
314 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
315 					 struct optee_msg_arg *arg)
316 {
317 	struct tee_shm *shm;
318 
319 	arg->ret_origin = TEEC_ORIGIN_COMMS;
320 
321 	if (arg->num_params != 1 ||
322 	    arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
323 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
324 		return;
325 	}
326 
327 	shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
328 	switch (arg->params[0].u.value.a) {
329 	case OPTEE_MSG_RPC_SHM_TYPE_APPL:
330 		cmd_free_suppl(ctx, shm);
331 		break;
332 	case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
333 		tee_shm_free(shm);
334 		break;
335 	default:
336 		arg->ret = TEEC_ERROR_BAD_PARAMETERS;
337 	}
338 	arg->ret = TEEC_SUCCESS;
339 }
340 
341 static void free_pages_list(struct optee_call_ctx *call_ctx)
342 {
343 	if (call_ctx->pages_list) {
344 		optee_free_pages_list(call_ctx->pages_list,
345 				      call_ctx->num_entries);
346 		call_ctx->pages_list = NULL;
347 		call_ctx->num_entries = 0;
348 	}
349 }
350 
351 void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
352 {
353 	free_pages_list(call_ctx);
354 }
355 
356 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
357 				struct tee_shm *shm,
358 				struct optee_call_ctx *call_ctx)
359 {
360 	struct optee_msg_arg *arg;
361 
362 	arg = tee_shm_get_va(shm, 0);
363 	if (IS_ERR(arg)) {
364 		pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
365 		return;
366 	}
367 
368 	switch (arg->cmd) {
369 	case OPTEE_MSG_RPC_CMD_GET_TIME:
370 		handle_rpc_func_cmd_get_time(arg);
371 		break;
372 	case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
373 		handle_rpc_func_cmd_wq(optee, arg);
374 		break;
375 	case OPTEE_MSG_RPC_CMD_SUSPEND:
376 		handle_rpc_func_cmd_wait(arg);
377 		break;
378 	case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
379 		free_pages_list(call_ctx);
380 		handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
381 		break;
382 	case OPTEE_MSG_RPC_CMD_SHM_FREE:
383 		handle_rpc_func_cmd_shm_free(ctx, arg);
384 		break;
385 	default:
386 		handle_rpc_supp_cmd(ctx, arg);
387 	}
388 }
389 
390 /**
391  * optee_handle_rpc() - handle RPC from secure world
392  * @ctx:	context doing the RPC
393  * @param:	value of registers for the RPC
394  * @call_ctx:	call context. Preserved during one OP-TEE invocation
395  *
396  * Result of RPC is written back into @param.
397  */
398 void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
399 		      struct optee_call_ctx *call_ctx)
400 {
401 	struct tee_device *teedev = ctx->teedev;
402 	struct optee *optee = tee_get_drvdata(teedev);
403 	struct tee_shm *shm;
404 	phys_addr_t pa;
405 
406 	switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
407 	case OPTEE_SMC_RPC_FUNC_ALLOC:
408 		shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
409 		if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
410 			reg_pair_from_64(&param->a1, &param->a2, pa);
411 			reg_pair_from_64(&param->a4, &param->a5,
412 					 (unsigned long)shm);
413 		} else {
414 			param->a1 = 0;
415 			param->a2 = 0;
416 			param->a4 = 0;
417 			param->a5 = 0;
418 		}
419 		break;
420 	case OPTEE_SMC_RPC_FUNC_FREE:
421 		shm = reg_pair_to_ptr(param->a1, param->a2);
422 		tee_shm_free(shm);
423 		break;
424 	case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
425 		/*
426 		 * A foreign interrupt was raised while secure world was
427 		 * executing, since they are handled in Linux a dummy RPC is
428 		 * performed to let Linux take the interrupt through the normal
429 		 * vector.
430 		 */
431 		break;
432 	case OPTEE_SMC_RPC_FUNC_CMD:
433 		shm = reg_pair_to_ptr(param->a1, param->a2);
434 		handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
435 		break;
436 	default:
437 		pr_warn("Unknown RPC func 0x%x\n",
438 			(u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
439 		break;
440 	}
441 
442 	param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
443 }
444