1 /* 2 * Copyright (c) 2015-2016, Linaro Limited 3 * 4 * This software is licensed under the terms of the GNU General Public 5 * License version 2, as published by the Free Software Foundation, and 6 * may be copied, distributed, and modified under those terms. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17 #include <linux/delay.h> 18 #include <linux/device.h> 19 #include <linux/slab.h> 20 #include <linux/tee_drv.h> 21 #include "optee_private.h" 22 #include "optee_smc.h" 23 24 struct wq_entry { 25 struct list_head link; 26 struct completion c; 27 u32 key; 28 }; 29 30 void optee_wait_queue_init(struct optee_wait_queue *priv) 31 { 32 mutex_init(&priv->mu); 33 INIT_LIST_HEAD(&priv->db); 34 } 35 36 void optee_wait_queue_exit(struct optee_wait_queue *priv) 37 { 38 mutex_destroy(&priv->mu); 39 } 40 41 static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg) 42 { 43 struct timespec64 ts; 44 45 if (arg->num_params != 1) 46 goto bad; 47 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != 48 OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT) 49 goto bad; 50 51 ktime_get_real_ts64(&ts); 52 arg->params[0].u.value.a = ts.tv_sec; 53 arg->params[0].u.value.b = ts.tv_nsec; 54 55 arg->ret = TEEC_SUCCESS; 56 return; 57 bad: 58 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 59 } 60 61 static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) 62 { 63 struct wq_entry *w; 64 65 mutex_lock(&wq->mu); 66 67 list_for_each_entry(w, &wq->db, link) 68 if (w->key == key) 69 goto out; 70 71 w = kmalloc(sizeof(*w), GFP_KERNEL); 72 if (w) { 73 init_completion(&w->c); 74 w->key = key; 75 list_add_tail(&w->link, &wq->db); 76 } 77 out: 78 mutex_unlock(&wq->mu); 79 return w; 80 } 81 82 static void wq_sleep(struct optee_wait_queue *wq, u32 key) 83 { 84 struct wq_entry *w = wq_entry_get(wq, key); 85 86 if (w) { 87 wait_for_completion(&w->c); 88 mutex_lock(&wq->mu); 89 list_del(&w->link); 90 mutex_unlock(&wq->mu); 91 kfree(w); 92 } 93 } 94 95 static void wq_wakeup(struct optee_wait_queue *wq, u32 key) 96 { 97 struct wq_entry *w = wq_entry_get(wq, key); 98 99 if (w) 100 complete(&w->c); 101 } 102 103 static void handle_rpc_func_cmd_wq(struct optee *optee, 104 struct optee_msg_arg *arg) 105 { 106 if (arg->num_params != 1) 107 goto bad; 108 109 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != 110 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) 111 goto bad; 112 113 switch (arg->params[0].u.value.a) { 114 case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP: 115 wq_sleep(&optee->wait_queue, arg->params[0].u.value.b); 116 break; 117 case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP: 118 wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b); 119 break; 120 default: 121 goto bad; 122 } 123 124 arg->ret = TEEC_SUCCESS; 125 return; 126 bad: 127 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 128 } 129 130 static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg) 131 { 132 u32 msec_to_wait; 133 134 if (arg->num_params != 1) 135 goto bad; 136 137 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) != 138 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) 139 goto bad; 140 141 msec_to_wait = arg->params[0].u.value.a; 142 143 /* Go to interruptible sleep */ 144 msleep_interruptible(msec_to_wait); 145 146 arg->ret = TEEC_SUCCESS; 147 return; 148 bad: 149 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 150 } 151 152 static void handle_rpc_supp_cmd(struct tee_context *ctx, 153 struct optee_msg_arg *arg) 154 { 155 struct tee_param *params; 156 157 arg->ret_origin = TEEC_ORIGIN_COMMS; 158 159 params = kmalloc_array(arg->num_params, sizeof(struct tee_param), 160 GFP_KERNEL); 161 if (!params) { 162 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 163 return; 164 } 165 166 if (optee_from_msg_param(params, arg->num_params, arg->params)) { 167 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 168 goto out; 169 } 170 171 arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params); 172 173 if (optee_to_msg_param(arg->params, arg->num_params, params)) 174 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 175 out: 176 kfree(params); 177 } 178 179 static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz) 180 { 181 u32 ret; 182 struct tee_param param; 183 struct optee *optee = tee_get_drvdata(ctx->teedev); 184 struct tee_shm *shm; 185 186 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; 187 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; 188 param.u.value.b = sz; 189 param.u.value.c = 0; 190 191 ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, ¶m); 192 if (ret) 193 return ERR_PTR(-ENOMEM); 194 195 mutex_lock(&optee->supp.mutex); 196 /* Increases count as secure world doesn't have a reference */ 197 shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c); 198 mutex_unlock(&optee->supp.mutex); 199 return shm; 200 } 201 202 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 203 struct optee_msg_arg *arg, 204 struct optee_call_ctx *call_ctx) 205 { 206 phys_addr_t pa; 207 struct tee_shm *shm; 208 size_t sz; 209 size_t n; 210 211 arg->ret_origin = TEEC_ORIGIN_COMMS; 212 213 if (!arg->num_params || 214 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 215 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 216 return; 217 } 218 219 for (n = 1; n < arg->num_params; n++) { 220 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { 221 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 222 return; 223 } 224 } 225 226 sz = arg->params[0].u.value.b; 227 switch (arg->params[0].u.value.a) { 228 case OPTEE_MSG_RPC_SHM_TYPE_APPL: 229 shm = cmd_alloc_suppl(ctx, sz); 230 break; 231 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: 232 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED); 233 break; 234 default: 235 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 236 return; 237 } 238 239 if (IS_ERR(shm)) { 240 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 241 return; 242 } 243 244 if (tee_shm_get_pa(shm, 0, &pa)) { 245 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 246 goto bad; 247 } 248 249 sz = tee_shm_get_size(shm); 250 251 if (tee_shm_is_registered(shm)) { 252 struct page **pages; 253 u64 *pages_list; 254 size_t page_num; 255 256 pages = tee_shm_get_pages(shm, &page_num); 257 if (!pages || !page_num) { 258 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 259 goto bad; 260 } 261 262 pages_list = optee_allocate_pages_list(page_num); 263 if (!pages_list) { 264 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 265 goto bad; 266 } 267 268 call_ctx->pages_list = pages_list; 269 call_ctx->num_entries = page_num; 270 271 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 272 OPTEE_MSG_ATTR_NONCONTIG; 273 /* 274 * In the least bits of u.tmem.buf_ptr we store buffer offset 275 * from 4k page, as described in OP-TEE ABI. 276 */ 277 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | 278 (tee_shm_get_page_offset(shm) & 279 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 280 arg->params[0].u.tmem.size = tee_shm_get_size(shm); 281 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 282 283 optee_fill_pages_list(pages_list, pages, page_num, 284 tee_shm_get_page_offset(shm)); 285 } else { 286 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 287 arg->params[0].u.tmem.buf_ptr = pa; 288 arg->params[0].u.tmem.size = sz; 289 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 290 } 291 292 arg->ret = TEEC_SUCCESS; 293 return; 294 bad: 295 tee_shm_free(shm); 296 } 297 298 static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm) 299 { 300 struct tee_param param; 301 302 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT; 303 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL; 304 param.u.value.b = tee_shm_get_id(shm); 305 param.u.value.c = 0; 306 307 /* 308 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure 309 * world has released its reference. 310 * 311 * It's better to do this before sending the request to supplicant 312 * as we'd like to let the process doing the initial allocation to 313 * do release the last reference too in order to avoid stacking 314 * many pending fput() on the client process. This could otherwise 315 * happen if secure world does many allocate and free in a single 316 * invoke. 317 */ 318 tee_shm_put(shm); 319 320 optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, ¶m); 321 } 322 323 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, 324 struct optee_msg_arg *arg) 325 { 326 struct tee_shm *shm; 327 328 arg->ret_origin = TEEC_ORIGIN_COMMS; 329 330 if (arg->num_params != 1 || 331 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 332 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 333 return; 334 } 335 336 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; 337 switch (arg->params[0].u.value.a) { 338 case OPTEE_MSG_RPC_SHM_TYPE_APPL: 339 cmd_free_suppl(ctx, shm); 340 break; 341 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL: 342 tee_shm_free(shm); 343 break; 344 default: 345 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 346 } 347 arg->ret = TEEC_SUCCESS; 348 } 349 350 static void free_pages_list(struct optee_call_ctx *call_ctx) 351 { 352 if (call_ctx->pages_list) { 353 optee_free_pages_list(call_ctx->pages_list, 354 call_ctx->num_entries); 355 call_ctx->pages_list = NULL; 356 call_ctx->num_entries = 0; 357 } 358 } 359 360 void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) 361 { 362 free_pages_list(call_ctx); 363 } 364 365 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 366 struct tee_shm *shm, 367 struct optee_call_ctx *call_ctx) 368 { 369 struct optee_msg_arg *arg; 370 371 arg = tee_shm_get_va(shm, 0); 372 if (IS_ERR(arg)) { 373 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); 374 return; 375 } 376 377 switch (arg->cmd) { 378 case OPTEE_MSG_RPC_CMD_GET_TIME: 379 handle_rpc_func_cmd_get_time(arg); 380 break; 381 case OPTEE_MSG_RPC_CMD_WAIT_QUEUE: 382 handle_rpc_func_cmd_wq(optee, arg); 383 break; 384 case OPTEE_MSG_RPC_CMD_SUSPEND: 385 handle_rpc_func_cmd_wait(arg); 386 break; 387 case OPTEE_MSG_RPC_CMD_SHM_ALLOC: 388 free_pages_list(call_ctx); 389 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx); 390 break; 391 case OPTEE_MSG_RPC_CMD_SHM_FREE: 392 handle_rpc_func_cmd_shm_free(ctx, arg); 393 break; 394 default: 395 handle_rpc_supp_cmd(ctx, arg); 396 } 397 } 398 399 /** 400 * optee_handle_rpc() - handle RPC from secure world 401 * @ctx: context doing the RPC 402 * @param: value of registers for the RPC 403 * @call_ctx: call context. Preserved during one OP-TEE invocation 404 * 405 * Result of RPC is written back into @param. 406 */ 407 void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param, 408 struct optee_call_ctx *call_ctx) 409 { 410 struct tee_device *teedev = ctx->teedev; 411 struct optee *optee = tee_get_drvdata(teedev); 412 struct tee_shm *shm; 413 phys_addr_t pa; 414 415 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { 416 case OPTEE_SMC_RPC_FUNC_ALLOC: 417 shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED); 418 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { 419 reg_pair_from_64(¶m->a1, ¶m->a2, pa); 420 reg_pair_from_64(¶m->a4, ¶m->a5, 421 (unsigned long)shm); 422 } else { 423 param->a1 = 0; 424 param->a2 = 0; 425 param->a4 = 0; 426 param->a5 = 0; 427 } 428 break; 429 case OPTEE_SMC_RPC_FUNC_FREE: 430 shm = reg_pair_to_ptr(param->a1, param->a2); 431 tee_shm_free(shm); 432 break; 433 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: 434 /* 435 * A foreign interrupt was raised while secure world was 436 * executing, since they are handled in Linux a dummy RPC is 437 * performed to let Linux take the interrupt through the normal 438 * vector. 439 */ 440 break; 441 case OPTEE_SMC_RPC_FUNC_CMD: 442 shm = reg_pair_to_ptr(param->a1, param->a2); 443 handle_rpc_func_cmd(ctx, optee, shm, call_ctx); 444 break; 445 default: 446 pr_warn("Unknown RPC func 0x%x\n", 447 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); 448 break; 449 } 450 451 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; 452 } 453