1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Linaro Limited 4 */ 5 #include <linux/arm-smccc.h> 6 #include <linux/device.h> 7 #include <linux/err.h> 8 #include <linux/errno.h> 9 #include <linux/mm.h> 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/tee_drv.h> 13 #include <linux/types.h> 14 #include <linux/uaccess.h> 15 #include "optee_private.h" 16 #include "optee_smc.h" 17 18 struct optee_call_waiter { 19 struct list_head list_node; 20 struct completion c; 21 }; 22 23 static void optee_cq_wait_init(struct optee_call_queue *cq, 24 struct optee_call_waiter *w) 25 { 26 /* 27 * We're preparing to make a call to secure world. In case we can't 28 * allocate a thread in secure world we'll end up waiting in 29 * optee_cq_wait_for_completion(). 30 * 31 * Normally if there's no contention in secure world the call will 32 * complete and we can cleanup directly with optee_cq_wait_final(). 33 */ 34 mutex_lock(&cq->mutex); 35 36 /* 37 * We add ourselves to the queue, but we don't wait. This 38 * guarantees that we don't lose a completion if secure world 39 * returns busy and another thread just exited and try to complete 40 * someone. 41 */ 42 init_completion(&w->c); 43 list_add_tail(&w->list_node, &cq->waiters); 44 45 mutex_unlock(&cq->mutex); 46 } 47 48 static void optee_cq_wait_for_completion(struct optee_call_queue *cq, 49 struct optee_call_waiter *w) 50 { 51 wait_for_completion(&w->c); 52 53 mutex_lock(&cq->mutex); 54 55 /* Move to end of list to get out of the way for other waiters */ 56 list_del(&w->list_node); 57 reinit_completion(&w->c); 58 list_add_tail(&w->list_node, &cq->waiters); 59 60 mutex_unlock(&cq->mutex); 61 } 62 63 static void optee_cq_complete_one(struct optee_call_queue *cq) 64 { 65 struct optee_call_waiter *w; 66 67 list_for_each_entry(w, &cq->waiters, list_node) { 68 if (!completion_done(&w->c)) { 69 complete(&w->c); 70 break; 71 } 72 } 73 } 74 75 static void optee_cq_wait_final(struct optee_call_queue *cq, 76 struct optee_call_waiter *w) 77 { 78 /* 79 * We're done with the call to secure world. The thread in secure 80 * world that was used for this call is now available for some 81 * other task to use. 82 */ 83 mutex_lock(&cq->mutex); 84 85 /* Get out of the list */ 86 list_del(&w->list_node); 87 88 /* Wake up one eventual waiting task */ 89 optee_cq_complete_one(cq); 90 91 /* 92 * If we're completed we've got a completion from another task that 93 * was just done with its call to secure world. Since yet another 94 * thread now is available in secure world wake up another eventual 95 * waiting task. 96 */ 97 if (completion_done(&w->c)) 98 optee_cq_complete_one(cq); 99 100 mutex_unlock(&cq->mutex); 101 } 102 103 /* Requires the filpstate mutex to be held */ 104 static struct optee_session *find_session(struct optee_context_data *ctxdata, 105 u32 session_id) 106 { 107 struct optee_session *sess; 108 109 list_for_each_entry(sess, &ctxdata->sess_list, list_node) 110 if (sess->session_id == session_id) 111 return sess; 112 113 return NULL; 114 } 115 116 /** 117 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world 118 * @ctx: calling context 119 * @parg: physical address of message to pass to secure world 120 * 121 * Does and SMC to OP-TEE in secure world and handles eventual resulting 122 * Remote Procedure Calls (RPC) from OP-TEE. 123 * 124 * Returns return code from secure world, 0 is OK 125 */ 126 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) 127 { 128 struct optee *optee = tee_get_drvdata(ctx->teedev); 129 struct optee_call_waiter w; 130 struct optee_rpc_param param = { }; 131 struct optee_call_ctx call_ctx = { }; 132 u32 ret; 133 134 param.a0 = OPTEE_SMC_CALL_WITH_ARG; 135 reg_pair_from_64(¶m.a1, ¶m.a2, parg); 136 /* Initialize waiter */ 137 optee_cq_wait_init(&optee->call_queue, &w); 138 while (true) { 139 struct arm_smccc_res res; 140 141 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3, 142 param.a4, param.a5, param.a6, param.a7, 143 &res); 144 145 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 146 /* 147 * Out of threads in secure world, wait for a thread 148 * become available. 149 */ 150 optee_cq_wait_for_completion(&optee->call_queue, &w); 151 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 152 if (need_resched()) 153 cond_resched(); 154 param.a0 = res.a0; 155 param.a1 = res.a1; 156 param.a2 = res.a2; 157 param.a3 = res.a3; 158 optee_handle_rpc(ctx, ¶m, &call_ctx); 159 } else { 160 ret = res.a0; 161 break; 162 } 163 } 164 165 optee_rpc_finalize_call(&call_ctx); 166 /* 167 * We're done with our thread in secure world, if there's any 168 * thread waiters wake up one. 169 */ 170 optee_cq_wait_final(&optee->call_queue, &w); 171 172 return ret; 173 } 174 175 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params, 176 struct optee_msg_arg **msg_arg, 177 phys_addr_t *msg_parg) 178 { 179 int rc; 180 struct tee_shm *shm; 181 struct optee_msg_arg *ma; 182 183 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params), 184 TEE_SHM_MAPPED); 185 if (IS_ERR(shm)) 186 return shm; 187 188 ma = tee_shm_get_va(shm, 0); 189 if (IS_ERR(ma)) { 190 rc = PTR_ERR(ma); 191 goto out; 192 } 193 194 rc = tee_shm_get_pa(shm, 0, msg_parg); 195 if (rc) 196 goto out; 197 198 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params)); 199 ma->num_params = num_params; 200 *msg_arg = ma; 201 out: 202 if (rc) { 203 tee_shm_free(shm); 204 return ERR_PTR(rc); 205 } 206 207 return shm; 208 } 209 210 int optee_open_session(struct tee_context *ctx, 211 struct tee_ioctl_open_session_arg *arg, 212 struct tee_param *param) 213 { 214 struct optee_context_data *ctxdata = ctx->data; 215 int rc; 216 struct tee_shm *shm; 217 struct optee_msg_arg *msg_arg; 218 phys_addr_t msg_parg; 219 struct optee_session *sess = NULL; 220 221 /* +2 for the meta parameters added below */ 222 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); 223 if (IS_ERR(shm)) 224 return PTR_ERR(shm); 225 226 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION; 227 msg_arg->cancel_id = arg->cancel_id; 228 229 /* 230 * Initialize and add the meta parameters needed when opening a 231 * session. 232 */ 233 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | 234 OPTEE_MSG_ATTR_META; 235 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT | 236 OPTEE_MSG_ATTR_META; 237 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); 238 msg_arg->params[1].u.value.c = arg->clnt_login; 239 240 rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, 241 arg->clnt_login, arg->clnt_uuid); 242 if (rc) 243 goto out; 244 245 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); 246 if (rc) 247 goto out; 248 249 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 250 if (!sess) { 251 rc = -ENOMEM; 252 goto out; 253 } 254 255 if (optee_do_call_with_arg(ctx, msg_parg)) { 256 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 257 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 258 } 259 260 if (msg_arg->ret == TEEC_SUCCESS) { 261 /* A new session has been created, add it to the list. */ 262 sess->session_id = msg_arg->session; 263 mutex_lock(&ctxdata->mutex); 264 list_add(&sess->list_node, &ctxdata->sess_list); 265 mutex_unlock(&ctxdata->mutex); 266 } else { 267 kfree(sess); 268 } 269 270 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) { 271 arg->ret = TEEC_ERROR_COMMUNICATION; 272 arg->ret_origin = TEEC_ORIGIN_COMMS; 273 /* Close session again to avoid leakage */ 274 optee_close_session(ctx, msg_arg->session); 275 } else { 276 arg->session = msg_arg->session; 277 arg->ret = msg_arg->ret; 278 arg->ret_origin = msg_arg->ret_origin; 279 } 280 out: 281 tee_shm_free(shm); 282 283 return rc; 284 } 285 286 int optee_close_session(struct tee_context *ctx, u32 session) 287 { 288 struct optee_context_data *ctxdata = ctx->data; 289 struct tee_shm *shm; 290 struct optee_msg_arg *msg_arg; 291 phys_addr_t msg_parg; 292 struct optee_session *sess; 293 294 /* Check that the session is valid and remove it from the list */ 295 mutex_lock(&ctxdata->mutex); 296 sess = find_session(ctxdata, session); 297 if (sess) 298 list_del(&sess->list_node); 299 mutex_unlock(&ctxdata->mutex); 300 if (!sess) 301 return -EINVAL; 302 kfree(sess); 303 304 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 305 if (IS_ERR(shm)) 306 return PTR_ERR(shm); 307 308 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; 309 msg_arg->session = session; 310 optee_do_call_with_arg(ctx, msg_parg); 311 312 tee_shm_free(shm); 313 return 0; 314 } 315 316 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg, 317 struct tee_param *param) 318 { 319 struct optee_context_data *ctxdata = ctx->data; 320 struct tee_shm *shm; 321 struct optee_msg_arg *msg_arg; 322 phys_addr_t msg_parg; 323 struct optee_session *sess; 324 int rc; 325 326 /* Check that the session is valid */ 327 mutex_lock(&ctxdata->mutex); 328 sess = find_session(ctxdata, arg->session); 329 mutex_unlock(&ctxdata->mutex); 330 if (!sess) 331 return -EINVAL; 332 333 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg); 334 if (IS_ERR(shm)) 335 return PTR_ERR(shm); 336 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND; 337 msg_arg->func = arg->func; 338 msg_arg->session = arg->session; 339 msg_arg->cancel_id = arg->cancel_id; 340 341 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param); 342 if (rc) 343 goto out; 344 345 if (optee_do_call_with_arg(ctx, msg_parg)) { 346 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 347 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 348 } 349 350 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) { 351 msg_arg->ret = TEEC_ERROR_COMMUNICATION; 352 msg_arg->ret_origin = TEEC_ORIGIN_COMMS; 353 } 354 355 arg->ret = msg_arg->ret; 356 arg->ret_origin = msg_arg->ret_origin; 357 out: 358 tee_shm_free(shm); 359 return rc; 360 } 361 362 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session) 363 { 364 struct optee_context_data *ctxdata = ctx->data; 365 struct tee_shm *shm; 366 struct optee_msg_arg *msg_arg; 367 phys_addr_t msg_parg; 368 struct optee_session *sess; 369 370 /* Check that the session is valid */ 371 mutex_lock(&ctxdata->mutex); 372 sess = find_session(ctxdata, session); 373 mutex_unlock(&ctxdata->mutex); 374 if (!sess) 375 return -EINVAL; 376 377 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg); 378 if (IS_ERR(shm)) 379 return PTR_ERR(shm); 380 381 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL; 382 msg_arg->session = session; 383 msg_arg->cancel_id = cancel_id; 384 optee_do_call_with_arg(ctx, msg_parg); 385 386 tee_shm_free(shm); 387 return 0; 388 } 389 390 /** 391 * optee_enable_shm_cache() - Enables caching of some shared memory allocation 392 * in OP-TEE 393 * @optee: main service struct 394 */ 395 void optee_enable_shm_cache(struct optee *optee) 396 { 397 struct optee_call_waiter w; 398 399 /* We need to retry until secure world isn't busy. */ 400 optee_cq_wait_init(&optee->call_queue, &w); 401 while (true) { 402 struct arm_smccc_res res; 403 404 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 405 0, &res); 406 if (res.a0 == OPTEE_SMC_RETURN_OK) 407 break; 408 optee_cq_wait_for_completion(&optee->call_queue, &w); 409 } 410 optee_cq_wait_final(&optee->call_queue, &w); 411 } 412 413 /** 414 * optee_disable_shm_cache() - Disables caching of some shared memory allocation 415 * in OP-TEE 416 * @optee: main service struct 417 */ 418 void optee_disable_shm_cache(struct optee *optee) 419 { 420 struct optee_call_waiter w; 421 422 /* We need to retry until secure world isn't busy. */ 423 optee_cq_wait_init(&optee->call_queue, &w); 424 while (true) { 425 union { 426 struct arm_smccc_res smccc; 427 struct optee_smc_disable_shm_cache_result result; 428 } res; 429 430 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0, 431 0, &res.smccc); 432 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 433 break; /* All shm's freed */ 434 if (res.result.status == OPTEE_SMC_RETURN_OK) { 435 struct tee_shm *shm; 436 437 shm = reg_pair_to_ptr(res.result.shm_upper32, 438 res.result.shm_lower32); 439 tee_shm_free(shm); 440 } else { 441 optee_cq_wait_for_completion(&optee->call_queue, &w); 442 } 443 } 444 optee_cq_wait_final(&optee->call_queue, &w); 445 } 446 447 #define PAGELIST_ENTRIES_PER_PAGE \ 448 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 449 450 /** 451 * optee_fill_pages_list() - write list of user pages to given shared 452 * buffer. 453 * 454 * @dst: page-aligned buffer where list of pages will be stored 455 * @pages: array of pages that represents shared buffer 456 * @num_pages: number of entries in @pages 457 * @page_offset: offset of user buffer from page start 458 * 459 * @dst should be big enough to hold list of user page addresses and 460 * links to the next pages of buffer 461 */ 462 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 463 size_t page_offset) 464 { 465 int n = 0; 466 phys_addr_t optee_page; 467 /* 468 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 469 * for details. 470 */ 471 struct { 472 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 473 u64 next_page_data; 474 } *pages_data; 475 476 /* 477 * Currently OP-TEE uses 4k page size and it does not looks 478 * like this will change in the future. On other hand, there are 479 * no know ARM architectures with page size < 4k. 480 * Thus the next built assert looks redundant. But the following 481 * code heavily relies on this assumption, so it is better be 482 * safe than sorry. 483 */ 484 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 485 486 pages_data = (void *)dst; 487 /* 488 * If linux page is bigger than 4k, and user buffer offset is 489 * larger than 4k/8k/12k/etc this will skip first 4k pages, 490 * because they bear no value data for OP-TEE. 491 */ 492 optee_page = page_to_phys(*pages) + 493 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 494 495 while (true) { 496 pages_data->pages_list[n++] = optee_page; 497 498 if (n == PAGELIST_ENTRIES_PER_PAGE) { 499 pages_data->next_page_data = 500 virt_to_phys(pages_data + 1); 501 pages_data++; 502 n = 0; 503 } 504 505 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 506 if (!(optee_page & ~PAGE_MASK)) { 507 if (!--num_pages) 508 break; 509 pages++; 510 optee_page = page_to_phys(*pages); 511 } 512 } 513 } 514 515 /* 516 * The final entry in each pagelist page is a pointer to the next 517 * pagelist page. 518 */ 519 static size_t get_pages_list_size(size_t num_entries) 520 { 521 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 522 523 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 524 } 525 526 u64 *optee_allocate_pages_list(size_t num_entries) 527 { 528 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 529 } 530 531 void optee_free_pages_list(void *list, size_t num_entries) 532 { 533 free_pages_exact(list, get_pages_list_size(num_entries)); 534 } 535 536 static bool is_normal_memory(pgprot_t p) 537 { 538 #if defined(CONFIG_ARM) 539 return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) || 540 ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK)); 541 #elif defined(CONFIG_ARM64) 542 return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL); 543 #else 544 #error "Unuspported architecture" 545 #endif 546 } 547 548 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end) 549 { 550 while (vma && is_normal_memory(vma->vm_page_prot)) { 551 if (vma->vm_end >= end) 552 return 0; 553 vma = vma->vm_next; 554 } 555 556 return -EINVAL; 557 } 558 559 static int check_mem_type(unsigned long start, size_t num_pages) 560 { 561 struct mm_struct *mm = current->mm; 562 int rc; 563 564 /* 565 * Allow kernel address to register with OP-TEE as kernel 566 * pages are configured as normal memory only. 567 */ 568 if (virt_addr_valid(start)) 569 return 0; 570 571 mmap_read_lock(mm); 572 rc = __check_mem_type(find_vma(mm, start), 573 start + num_pages * PAGE_SIZE); 574 mmap_read_unlock(mm); 575 576 return rc; 577 } 578 579 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 580 struct page **pages, size_t num_pages, 581 unsigned long start) 582 { 583 struct tee_shm *shm_arg = NULL; 584 struct optee_msg_arg *msg_arg; 585 u64 *pages_list; 586 phys_addr_t msg_parg; 587 int rc; 588 589 if (!num_pages) 590 return -EINVAL; 591 592 rc = check_mem_type(start, num_pages); 593 if (rc) 594 return rc; 595 596 pages_list = optee_allocate_pages_list(num_pages); 597 if (!pages_list) 598 return -ENOMEM; 599 600 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 601 if (IS_ERR(shm_arg)) { 602 rc = PTR_ERR(shm_arg); 603 goto out; 604 } 605 606 optee_fill_pages_list(pages_list, pages, num_pages, 607 tee_shm_get_page_offset(shm)); 608 609 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 610 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 611 OPTEE_MSG_ATTR_NONCONTIG; 612 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 613 msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 614 /* 615 * In the least bits of msg_arg->params->u.tmem.buf_ptr we 616 * store buffer offset from 4k page, as described in OP-TEE ABI. 617 */ 618 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 619 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 620 621 if (optee_do_call_with_arg(ctx, msg_parg) || 622 msg_arg->ret != TEEC_SUCCESS) 623 rc = -EINVAL; 624 625 tee_shm_free(shm_arg); 626 out: 627 optee_free_pages_list(pages_list, num_pages); 628 return rc; 629 } 630 631 int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 632 { 633 struct tee_shm *shm_arg; 634 struct optee_msg_arg *msg_arg; 635 phys_addr_t msg_parg; 636 int rc = 0; 637 638 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg); 639 if (IS_ERR(shm_arg)) 640 return PTR_ERR(shm_arg); 641 642 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 643 644 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 645 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 646 647 if (optee_do_call_with_arg(ctx, msg_parg) || 648 msg_arg->ret != TEEC_SUCCESS) 649 rc = -EINVAL; 650 tee_shm_free(shm_arg); 651 return rc; 652 } 653 654 int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 655 struct page **pages, size_t num_pages, 656 unsigned long start) 657 { 658 /* 659 * We don't want to register supplicant memory in OP-TEE. 660 * Instead information about it will be passed in RPC code. 661 */ 662 return check_mem_type(start, num_pages); 663 } 664 665 int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm) 666 { 667 return 0; 668 } 669