1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2021, Linaro Limited 4 * Copyright (c) 2016, EPAM Systems 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/arm-smccc.h> 10 #include <linux/errno.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/irqdomain.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_irq.h> 18 #include <linux/of_platform.h> 19 #include <linux/platform_device.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/tee_drv.h> 24 #include <linux/types.h> 25 #include <linux/workqueue.h> 26 #include "optee_private.h" 27 #include "optee_smc.h" 28 #include "optee_rpc_cmd.h" 29 #include <linux/kmemleak.h> 30 #define CREATE_TRACE_POINTS 31 #include "optee_trace.h" 32 33 /* 34 * This file implement the SMC ABI used when communicating with secure world 35 * OP-TEE OS via raw SMCs. 36 * This file is divided into the following sections: 37 * 1. Convert between struct tee_param and struct optee_msg_param 38 * 2. Low level support functions to register shared memory in secure world 39 * 3. Dynamic shared memory pool based on alloc_pages() 40 * 4. Do a normal scheduled call into secure world 41 * 5. Asynchronous notification 42 * 6. Driver initialization. 43 */ 44 45 /* 46 * A typical OP-TEE private shm allocation is 224 bytes (argument struct 47 * with 6 parameters, needed for open session). So with an alignment of 512 48 * we'll waste a bit more than 50%. However, it's only expected that we'll 49 * have a handful of these structs allocated at a time. Most memory will 50 * be allocated aligned to the page size, So all in all this should scale 51 * up and down quite well. 52 */ 53 #define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */ 54 55 /* 56 * 1. Convert between struct tee_param and struct optee_msg_param 57 * 58 * optee_from_msg_param() and optee_to_msg_param() are the main 59 * functions. 60 */ 61 62 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, 63 const struct optee_msg_param *mp) 64 { 65 struct tee_shm *shm; 66 phys_addr_t pa; 67 int rc; 68 69 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 70 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; 71 p->u.memref.size = mp->u.tmem.size; 72 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref; 73 if (!shm) { 74 p->u.memref.shm_offs = 0; 75 p->u.memref.shm = NULL; 76 return 0; 77 } 78 79 rc = tee_shm_get_pa(shm, 0, &pa); 80 if (rc) 81 return rc; 82 83 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; 84 p->u.memref.shm = shm; 85 86 return 0; 87 } 88 89 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr, 90 const struct optee_msg_param *mp) 91 { 92 struct tee_shm *shm; 93 94 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 95 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 96 p->u.memref.size = mp->u.rmem.size; 97 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref; 98 99 if (shm) { 100 p->u.memref.shm_offs = mp->u.rmem.offs; 101 p->u.memref.shm = shm; 102 } else { 103 p->u.memref.shm_offs = 0; 104 p->u.memref.shm = NULL; 105 } 106 } 107 108 /** 109 * optee_from_msg_param() - convert from OPTEE_MSG parameters to 110 * struct tee_param 111 * @optee: main service struct 112 * @params: subsystem internal parameter representation 113 * @num_params: number of elements in the parameter arrays 114 * @msg_params: OPTEE_MSG parameters 115 * Returns 0 on success or <0 on failure 116 */ 117 static int optee_from_msg_param(struct optee *optee, struct tee_param *params, 118 size_t num_params, 119 const struct optee_msg_param *msg_params) 120 { 121 int rc; 122 size_t n; 123 124 for (n = 0; n < num_params; n++) { 125 struct tee_param *p = params + n; 126 const struct optee_msg_param *mp = msg_params + n; 127 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; 128 129 switch (attr) { 130 case OPTEE_MSG_ATTR_TYPE_NONE: 131 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 132 memset(&p->u, 0, sizeof(p->u)); 133 break; 134 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: 135 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 136 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 137 optee_from_msg_param_value(p, attr, mp); 138 break; 139 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: 140 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 141 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 142 rc = from_msg_param_tmp_mem(p, attr, mp); 143 if (rc) 144 return rc; 145 break; 146 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: 147 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 148 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 149 from_msg_param_reg_mem(p, attr, mp); 150 break; 151 152 default: 153 return -EINVAL; 154 } 155 } 156 return 0; 157 } 158 159 static int to_msg_param_tmp_mem(struct optee_msg_param *mp, 160 const struct tee_param *p) 161 { 162 int rc; 163 phys_addr_t pa; 164 165 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - 166 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 167 168 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; 169 mp->u.tmem.size = p->u.memref.size; 170 171 if (!p->u.memref.shm) { 172 mp->u.tmem.buf_ptr = 0; 173 return 0; 174 } 175 176 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); 177 if (rc) 178 return rc; 179 180 mp->u.tmem.buf_ptr = pa; 181 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << 182 OPTEE_MSG_ATTR_CACHE_SHIFT; 183 184 return 0; 185 } 186 187 static int to_msg_param_reg_mem(struct optee_msg_param *mp, 188 const struct tee_param *p) 189 { 190 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - 191 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 192 193 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; 194 mp->u.rmem.size = p->u.memref.size; 195 mp->u.rmem.offs = p->u.memref.shm_offs; 196 return 0; 197 } 198 199 /** 200 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters 201 * @optee: main service struct 202 * @msg_params: OPTEE_MSG parameters 203 * @num_params: number of elements in the parameter arrays 204 * @params: subsystem itnernal parameter representation 205 * Returns 0 on success or <0 on failure 206 */ 207 static int optee_to_msg_param(struct optee *optee, 208 struct optee_msg_param *msg_params, 209 size_t num_params, const struct tee_param *params) 210 { 211 int rc; 212 size_t n; 213 214 for (n = 0; n < num_params; n++) { 215 const struct tee_param *p = params + n; 216 struct optee_msg_param *mp = msg_params + n; 217 218 switch (p->attr) { 219 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 220 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 221 memset(&mp->u, 0, sizeof(mp->u)); 222 break; 223 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 224 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 225 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 226 optee_to_msg_param_value(mp, p); 227 break; 228 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 229 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 230 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 231 if (tee_shm_is_dynamic(p->u.memref.shm)) 232 rc = to_msg_param_reg_mem(mp, p); 233 else 234 rc = to_msg_param_tmp_mem(mp, p); 235 if (rc) 236 return rc; 237 break; 238 default: 239 return -EINVAL; 240 } 241 } 242 return 0; 243 } 244 245 /* 246 * 2. Low level support functions to register shared memory in secure world 247 * 248 * Functions to enable/disable shared memory caching in secure world, that 249 * is, lazy freeing of previously allocated shared memory. Freeing is 250 * performed when a request has been compled. 251 * 252 * Functions to register and unregister shared memory both for normal 253 * clients and for tee-supplicant. 254 */ 255 256 /** 257 * optee_enable_shm_cache() - Enables caching of some shared memory allocation 258 * in OP-TEE 259 * @optee: main service struct 260 */ 261 static void optee_enable_shm_cache(struct optee *optee) 262 { 263 struct optee_call_waiter w; 264 265 /* We need to retry until secure world isn't busy. */ 266 optee_cq_wait_init(&optee->call_queue, &w); 267 while (true) { 268 struct arm_smccc_res res; 269 270 optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 271 0, 0, 0, 0, 0, 0, 0, &res); 272 if (res.a0 == OPTEE_SMC_RETURN_OK) 273 break; 274 optee_cq_wait_for_completion(&optee->call_queue, &w); 275 } 276 optee_cq_wait_final(&optee->call_queue, &w); 277 } 278 279 /** 280 * __optee_disable_shm_cache() - Disables caching of some shared memory 281 * allocation in OP-TEE 282 * @optee: main service struct 283 * @is_mapped: true if the cached shared memory addresses were mapped by this 284 * kernel, are safe to dereference, and should be freed 285 */ 286 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) 287 { 288 struct optee_call_waiter w; 289 290 /* We need to retry until secure world isn't busy. */ 291 optee_cq_wait_init(&optee->call_queue, &w); 292 while (true) { 293 union { 294 struct arm_smccc_res smccc; 295 struct optee_smc_disable_shm_cache_result result; 296 } res; 297 298 optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 299 0, 0, 0, 0, 0, 0, 0, &res.smccc); 300 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 301 break; /* All shm's freed */ 302 if (res.result.status == OPTEE_SMC_RETURN_OK) { 303 struct tee_shm *shm; 304 305 /* 306 * Shared memory references that were not mapped by 307 * this kernel must be ignored to prevent a crash. 308 */ 309 if (!is_mapped) 310 continue; 311 312 shm = reg_pair_to_ptr(res.result.shm_upper32, 313 res.result.shm_lower32); 314 tee_shm_free(shm); 315 } else { 316 optee_cq_wait_for_completion(&optee->call_queue, &w); 317 } 318 } 319 optee_cq_wait_final(&optee->call_queue, &w); 320 } 321 322 /** 323 * optee_disable_shm_cache() - Disables caching of mapped shared memory 324 * allocations in OP-TEE 325 * @optee: main service struct 326 */ 327 static void optee_disable_shm_cache(struct optee *optee) 328 { 329 return __optee_disable_shm_cache(optee, true); 330 } 331 332 /** 333 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory 334 * allocations in OP-TEE which are not 335 * currently mapped 336 * @optee: main service struct 337 */ 338 static void optee_disable_unmapped_shm_cache(struct optee *optee) 339 { 340 return __optee_disable_shm_cache(optee, false); 341 } 342 343 #define PAGELIST_ENTRIES_PER_PAGE \ 344 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 345 346 /* 347 * The final entry in each pagelist page is a pointer to the next 348 * pagelist page. 349 */ 350 static size_t get_pages_list_size(size_t num_entries) 351 { 352 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 353 354 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 355 } 356 357 static u64 *optee_allocate_pages_list(size_t num_entries) 358 { 359 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 360 } 361 362 static void optee_free_pages_list(void *list, size_t num_entries) 363 { 364 free_pages_exact(list, get_pages_list_size(num_entries)); 365 } 366 367 /** 368 * optee_fill_pages_list() - write list of user pages to given shared 369 * buffer. 370 * 371 * @dst: page-aligned buffer where list of pages will be stored 372 * @pages: array of pages that represents shared buffer 373 * @num_pages: number of entries in @pages 374 * @page_offset: offset of user buffer from page start 375 * 376 * @dst should be big enough to hold list of user page addresses and 377 * links to the next pages of buffer 378 */ 379 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 380 size_t page_offset) 381 { 382 int n = 0; 383 phys_addr_t optee_page; 384 /* 385 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 386 * for details. 387 */ 388 struct { 389 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 390 u64 next_page_data; 391 } *pages_data; 392 393 /* 394 * Currently OP-TEE uses 4k page size and it does not looks 395 * like this will change in the future. On other hand, there are 396 * no know ARM architectures with page size < 4k. 397 * Thus the next built assert looks redundant. But the following 398 * code heavily relies on this assumption, so it is better be 399 * safe than sorry. 400 */ 401 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 402 403 pages_data = (void *)dst; 404 /* 405 * If linux page is bigger than 4k, and user buffer offset is 406 * larger than 4k/8k/12k/etc this will skip first 4k pages, 407 * because they bear no value data for OP-TEE. 408 */ 409 optee_page = page_to_phys(*pages) + 410 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 411 412 while (true) { 413 pages_data->pages_list[n++] = optee_page; 414 415 if (n == PAGELIST_ENTRIES_PER_PAGE) { 416 pages_data->next_page_data = 417 virt_to_phys(pages_data + 1); 418 pages_data++; 419 n = 0; 420 } 421 422 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 423 if (!(optee_page & ~PAGE_MASK)) { 424 if (!--num_pages) 425 break; 426 pages++; 427 optee_page = page_to_phys(*pages); 428 } 429 } 430 } 431 432 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 433 struct page **pages, size_t num_pages, 434 unsigned long start) 435 { 436 struct optee *optee = tee_get_drvdata(ctx->teedev); 437 struct optee_msg_arg *msg_arg; 438 struct tee_shm *shm_arg; 439 u64 *pages_list; 440 int rc; 441 442 if (!num_pages) 443 return -EINVAL; 444 445 rc = optee_check_mem_type(start, num_pages); 446 if (rc) 447 return rc; 448 449 pages_list = optee_allocate_pages_list(num_pages); 450 if (!pages_list) 451 return -ENOMEM; 452 453 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 454 if (IS_ERR(shm_arg)) { 455 rc = PTR_ERR(shm_arg); 456 goto out; 457 } 458 459 optee_fill_pages_list(pages_list, pages, num_pages, 460 tee_shm_get_page_offset(shm)); 461 462 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 463 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 464 OPTEE_MSG_ATTR_NONCONTIG; 465 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 466 msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 467 /* 468 * In the least bits of msg_arg->params->u.tmem.buf_ptr we 469 * store buffer offset from 4k page, as described in OP-TEE ABI. 470 */ 471 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 472 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 473 474 if (optee->ops->do_call_with_arg(ctx, shm_arg) || 475 msg_arg->ret != TEEC_SUCCESS) 476 rc = -EINVAL; 477 478 tee_shm_free(shm_arg); 479 out: 480 optee_free_pages_list(pages_list, num_pages); 481 return rc; 482 } 483 484 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 485 { 486 struct optee *optee = tee_get_drvdata(ctx->teedev); 487 struct optee_msg_arg *msg_arg; 488 struct tee_shm *shm_arg; 489 int rc = 0; 490 491 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 492 if (IS_ERR(shm_arg)) 493 return PTR_ERR(shm_arg); 494 495 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 496 497 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 498 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 499 500 if (optee->ops->do_call_with_arg(ctx, shm_arg) || 501 msg_arg->ret != TEEC_SUCCESS) 502 rc = -EINVAL; 503 tee_shm_free(shm_arg); 504 return rc; 505 } 506 507 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 508 struct page **pages, size_t num_pages, 509 unsigned long start) 510 { 511 /* 512 * We don't want to register supplicant memory in OP-TEE. 513 * Instead information about it will be passed in RPC code. 514 */ 515 return optee_check_mem_type(start, num_pages); 516 } 517 518 static int optee_shm_unregister_supp(struct tee_context *ctx, 519 struct tee_shm *shm) 520 { 521 return 0; 522 } 523 524 /* 525 * 3. Dynamic shared memory pool based on alloc_pages() 526 * 527 * Implements an OP-TEE specific shared memory pool which is used 528 * when dynamic shared memory is supported by secure world. 529 * 530 * The main function is optee_shm_pool_alloc_pages(). 531 */ 532 533 static int pool_op_alloc(struct tee_shm_pool *pool, 534 struct tee_shm *shm, size_t size, size_t align) 535 { 536 /* 537 * Shared memory private to the OP-TEE driver doesn't need 538 * to be registered with OP-TEE. 539 */ 540 if (shm->flags & TEE_SHM_PRIV) 541 return optee_pool_op_alloc_helper(pool, shm, size, align, NULL); 542 543 return optee_pool_op_alloc_helper(pool, shm, size, align, 544 optee_shm_register); 545 } 546 547 static void pool_op_free(struct tee_shm_pool *pool, 548 struct tee_shm *shm) 549 { 550 if (!(shm->flags & TEE_SHM_PRIV)) 551 optee_pool_op_free_helper(pool, shm, optee_shm_unregister); 552 else 553 optee_pool_op_free_helper(pool, shm, NULL); 554 } 555 556 static void pool_op_destroy_pool(struct tee_shm_pool *pool) 557 { 558 kfree(pool); 559 } 560 561 static const struct tee_shm_pool_ops pool_ops = { 562 .alloc = pool_op_alloc, 563 .free = pool_op_free, 564 .destroy_pool = pool_op_destroy_pool, 565 }; 566 567 /** 568 * optee_shm_pool_alloc_pages() - create page-based allocator pool 569 * 570 * This pool is used when OP-TEE supports dymanic SHM. In this case 571 * command buffers and such are allocated from kernel's own memory. 572 */ 573 static struct tee_shm_pool *optee_shm_pool_alloc_pages(void) 574 { 575 struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); 576 577 if (!pool) 578 return ERR_PTR(-ENOMEM); 579 580 pool->ops = &pool_ops; 581 582 return pool; 583 } 584 585 /* 586 * 4. Do a normal scheduled call into secure world 587 * 588 * The function optee_smc_do_call_with_arg() performs a normal scheduled 589 * call into secure world. During this call may normal world request help 590 * from normal world using RPCs, Remote Procedure Calls. This includes 591 * delivery of non-secure interrupts to for instance allow rescheduling of 592 * the current task. 593 */ 594 595 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, 596 struct optee_msg_arg *arg) 597 { 598 struct tee_shm *shm; 599 600 arg->ret_origin = TEEC_ORIGIN_COMMS; 601 602 if (arg->num_params != 1 || 603 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 604 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 605 return; 606 } 607 608 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; 609 switch (arg->params[0].u.value.a) { 610 case OPTEE_RPC_SHM_TYPE_APPL: 611 optee_rpc_cmd_free_suppl(ctx, shm); 612 break; 613 case OPTEE_RPC_SHM_TYPE_KERNEL: 614 tee_shm_free(shm); 615 break; 616 default: 617 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 618 } 619 arg->ret = TEEC_SUCCESS; 620 } 621 622 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 623 struct optee *optee, 624 struct optee_msg_arg *arg, 625 struct optee_call_ctx *call_ctx) 626 { 627 phys_addr_t pa; 628 struct tee_shm *shm; 629 size_t sz; 630 size_t n; 631 632 arg->ret_origin = TEEC_ORIGIN_COMMS; 633 634 if (!arg->num_params || 635 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 636 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 637 return; 638 } 639 640 for (n = 1; n < arg->num_params; n++) { 641 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { 642 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 643 return; 644 } 645 } 646 647 sz = arg->params[0].u.value.b; 648 switch (arg->params[0].u.value.a) { 649 case OPTEE_RPC_SHM_TYPE_APPL: 650 shm = optee_rpc_cmd_alloc_suppl(ctx, sz); 651 break; 652 case OPTEE_RPC_SHM_TYPE_KERNEL: 653 shm = tee_shm_alloc_priv_buf(optee->ctx, sz); 654 break; 655 default: 656 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 657 return; 658 } 659 660 if (IS_ERR(shm)) { 661 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 662 return; 663 } 664 665 if (tee_shm_get_pa(shm, 0, &pa)) { 666 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 667 goto bad; 668 } 669 670 sz = tee_shm_get_size(shm); 671 672 if (tee_shm_is_dynamic(shm)) { 673 struct page **pages; 674 u64 *pages_list; 675 size_t page_num; 676 677 pages = tee_shm_get_pages(shm, &page_num); 678 if (!pages || !page_num) { 679 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 680 goto bad; 681 } 682 683 pages_list = optee_allocate_pages_list(page_num); 684 if (!pages_list) { 685 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 686 goto bad; 687 } 688 689 call_ctx->pages_list = pages_list; 690 call_ctx->num_entries = page_num; 691 692 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 693 OPTEE_MSG_ATTR_NONCONTIG; 694 /* 695 * In the least bits of u.tmem.buf_ptr we store buffer offset 696 * from 4k page, as described in OP-TEE ABI. 697 */ 698 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | 699 (tee_shm_get_page_offset(shm) & 700 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 701 arg->params[0].u.tmem.size = tee_shm_get_size(shm); 702 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 703 704 optee_fill_pages_list(pages_list, pages, page_num, 705 tee_shm_get_page_offset(shm)); 706 } else { 707 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 708 arg->params[0].u.tmem.buf_ptr = pa; 709 arg->params[0].u.tmem.size = sz; 710 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 711 } 712 713 arg->ret = TEEC_SUCCESS; 714 return; 715 bad: 716 tee_shm_free(shm); 717 } 718 719 static void free_pages_list(struct optee_call_ctx *call_ctx) 720 { 721 if (call_ctx->pages_list) { 722 optee_free_pages_list(call_ctx->pages_list, 723 call_ctx->num_entries); 724 call_ctx->pages_list = NULL; 725 call_ctx->num_entries = 0; 726 } 727 } 728 729 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) 730 { 731 free_pages_list(call_ctx); 732 } 733 734 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 735 struct optee_msg_arg *arg, 736 struct optee_call_ctx *call_ctx) 737 { 738 739 switch (arg->cmd) { 740 case OPTEE_RPC_CMD_SHM_ALLOC: 741 free_pages_list(call_ctx); 742 handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); 743 break; 744 case OPTEE_RPC_CMD_SHM_FREE: 745 handle_rpc_func_cmd_shm_free(ctx, arg); 746 break; 747 default: 748 optee_rpc_cmd(ctx, optee, arg); 749 } 750 } 751 752 /** 753 * optee_handle_rpc() - handle RPC from secure world 754 * @ctx: context doing the RPC 755 * @param: value of registers for the RPC 756 * @call_ctx: call context. Preserved during one OP-TEE invocation 757 * 758 * Result of RPC is written back into @param. 759 */ 760 static void optee_handle_rpc(struct tee_context *ctx, 761 struct optee_msg_arg *rpc_arg, 762 struct optee_rpc_param *param, 763 struct optee_call_ctx *call_ctx) 764 { 765 struct tee_device *teedev = ctx->teedev; 766 struct optee *optee = tee_get_drvdata(teedev); 767 struct optee_msg_arg *arg; 768 struct tee_shm *shm; 769 phys_addr_t pa; 770 771 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { 772 case OPTEE_SMC_RPC_FUNC_ALLOC: 773 shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1); 774 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { 775 reg_pair_from_64(¶m->a1, ¶m->a2, pa); 776 reg_pair_from_64(¶m->a4, ¶m->a5, 777 (unsigned long)shm); 778 } else { 779 param->a1 = 0; 780 param->a2 = 0; 781 param->a4 = 0; 782 param->a5 = 0; 783 } 784 kmemleak_not_leak(shm); 785 break; 786 case OPTEE_SMC_RPC_FUNC_FREE: 787 shm = reg_pair_to_ptr(param->a1, param->a2); 788 tee_shm_free(shm); 789 break; 790 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: 791 /* 792 * A foreign interrupt was raised while secure world was 793 * executing, since they are handled in Linux a dummy RPC is 794 * performed to let Linux take the interrupt through the normal 795 * vector. 796 */ 797 break; 798 case OPTEE_SMC_RPC_FUNC_CMD: 799 if (rpc_arg) { 800 arg = rpc_arg; 801 } else { 802 shm = reg_pair_to_ptr(param->a1, param->a2); 803 arg = tee_shm_get_va(shm, 0); 804 if (IS_ERR(arg)) { 805 pr_err("%s: tee_shm_get_va %p failed\n", 806 __func__, shm); 807 break; 808 } 809 } 810 811 handle_rpc_func_cmd(ctx, optee, arg, call_ctx); 812 break; 813 default: 814 pr_warn("Unknown RPC func 0x%x\n", 815 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); 816 break; 817 } 818 819 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; 820 } 821 822 /** 823 * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world 824 * @ctx: calling context 825 * @shm: shared memory holding the message to pass to secure world 826 * 827 * Does and SMC to OP-TEE in secure world and handles eventual resulting 828 * Remote Procedure Calls (RPC) from OP-TEE. 829 * 830 * Returns return code from secure world, 0 is OK 831 */ 832 static int optee_smc_do_call_with_arg(struct tee_context *ctx, 833 struct tee_shm *shm) 834 { 835 struct optee *optee = tee_get_drvdata(ctx->teedev); 836 struct optee_call_waiter w; 837 struct optee_rpc_param param = { }; 838 struct optee_call_ctx call_ctx = { }; 839 struct optee_msg_arg *rpc_arg = NULL; 840 int rc; 841 842 if (optee->rpc_param_count) { 843 struct optee_msg_arg *arg; 844 unsigned int rpc_arg_offs; 845 846 arg = tee_shm_get_va(shm, 0); 847 if (IS_ERR(arg)) 848 return PTR_ERR(arg); 849 850 rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); 851 rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); 852 if (IS_ERR(arg)) 853 return PTR_ERR(arg); 854 } 855 856 if (rpc_arg && tee_shm_is_dynamic(shm)) { 857 param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG; 858 reg_pair_from_64(¶m.a1, ¶m.a2, (u_long)shm); 859 param.a3 = 0; 860 } else { 861 phys_addr_t parg; 862 863 rc = tee_shm_get_pa(shm, 0, &parg); 864 if (rc) 865 return rc; 866 867 if (rpc_arg) 868 param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG; 869 else 870 param.a0 = OPTEE_SMC_CALL_WITH_ARG; 871 reg_pair_from_64(¶m.a1, ¶m.a2, parg); 872 } 873 /* Initialize waiter */ 874 optee_cq_wait_init(&optee->call_queue, &w); 875 while (true) { 876 struct arm_smccc_res res; 877 878 trace_optee_invoke_fn_begin(¶m); 879 optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3, 880 param.a4, param.a5, param.a6, param.a7, 881 &res); 882 trace_optee_invoke_fn_end(¶m, &res); 883 884 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 885 /* 886 * Out of threads in secure world, wait for a thread 887 * become available. 888 */ 889 optee_cq_wait_for_completion(&optee->call_queue, &w); 890 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 891 cond_resched(); 892 param.a0 = res.a0; 893 param.a1 = res.a1; 894 param.a2 = res.a2; 895 param.a3 = res.a3; 896 optee_handle_rpc(ctx, rpc_arg, ¶m, &call_ctx); 897 } else { 898 rc = res.a0; 899 break; 900 } 901 } 902 903 optee_rpc_finalize_call(&call_ctx); 904 /* 905 * We're done with our thread in secure world, if there's any 906 * thread waiters wake up one. 907 */ 908 optee_cq_wait_final(&optee->call_queue, &w); 909 910 return rc; 911 } 912 913 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) 914 { 915 struct optee_msg_arg *msg_arg; 916 struct tee_shm *shm; 917 918 shm = optee_get_msg_arg(ctx, 0, &msg_arg); 919 if (IS_ERR(shm)) 920 return PTR_ERR(shm); 921 922 msg_arg->cmd = cmd; 923 optee_smc_do_call_with_arg(ctx, shm); 924 925 tee_shm_free(shm); 926 return 0; 927 } 928 929 static int optee_smc_do_bottom_half(struct tee_context *ctx) 930 { 931 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF); 932 } 933 934 static int optee_smc_stop_async_notif(struct tee_context *ctx) 935 { 936 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF); 937 } 938 939 /* 940 * 5. Asynchronous notification 941 */ 942 943 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid, 944 bool *value_pending) 945 { 946 struct arm_smccc_res res; 947 948 invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res); 949 950 if (res.a0) 951 return 0; 952 *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID); 953 *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING); 954 return res.a1; 955 } 956 957 static irqreturn_t notif_irq_handler(int irq, void *dev_id) 958 { 959 struct optee *optee = dev_id; 960 bool do_bottom_half = false; 961 bool value_valid; 962 bool value_pending; 963 u32 value; 964 965 do { 966 value = get_async_notif_value(optee->smc.invoke_fn, 967 &value_valid, &value_pending); 968 if (!value_valid) 969 break; 970 971 if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF) 972 do_bottom_half = true; 973 else 974 optee_notif_send(optee, value); 975 } while (value_pending); 976 977 if (do_bottom_half) 978 return IRQ_WAKE_THREAD; 979 return IRQ_HANDLED; 980 } 981 982 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) 983 { 984 struct optee *optee = dev_id; 985 986 optee_smc_do_bottom_half(optee->ctx); 987 988 return IRQ_HANDLED; 989 } 990 991 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) 992 { 993 int rc; 994 995 rc = request_threaded_irq(irq, notif_irq_handler, 996 notif_irq_thread_fn, 997 0, "optee_notification", optee); 998 if (rc) 999 return rc; 1000 1001 optee->smc.notif_irq = irq; 1002 1003 return 0; 1004 } 1005 1006 static void optee_smc_notif_uninit_irq(struct optee *optee) 1007 { 1008 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { 1009 optee_smc_stop_async_notif(optee->ctx); 1010 if (optee->smc.notif_irq) { 1011 free_irq(optee->smc.notif_irq, optee); 1012 irq_dispose_mapping(optee->smc.notif_irq); 1013 } 1014 } 1015 } 1016 1017 /* 1018 * 6. Driver initialization 1019 * 1020 * During driver initialization is secure world probed to find out which 1021 * features it supports so the driver can be initialized with a matching 1022 * configuration. This involves for instance support for dynamic shared 1023 * memory instead of a static memory carvout. 1024 */ 1025 1026 static void optee_get_version(struct tee_device *teedev, 1027 struct tee_ioctl_version_data *vers) 1028 { 1029 struct tee_ioctl_version_data v = { 1030 .impl_id = TEE_IMPL_ID_OPTEE, 1031 .impl_caps = TEE_OPTEE_CAP_TZ, 1032 .gen_caps = TEE_GEN_CAP_GP, 1033 }; 1034 struct optee *optee = tee_get_drvdata(teedev); 1035 1036 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1037 v.gen_caps |= TEE_GEN_CAP_REG_MEM; 1038 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) 1039 v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL; 1040 *vers = v; 1041 } 1042 1043 static int optee_smc_open(struct tee_context *ctx) 1044 { 1045 struct optee *optee = tee_get_drvdata(ctx->teedev); 1046 u32 sec_caps = optee->smc.sec_caps; 1047 1048 return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL); 1049 } 1050 1051 static const struct tee_driver_ops optee_clnt_ops = { 1052 .get_version = optee_get_version, 1053 .open = optee_smc_open, 1054 .release = optee_release, 1055 .open_session = optee_open_session, 1056 .close_session = optee_close_session, 1057 .invoke_func = optee_invoke_func, 1058 .cancel_req = optee_cancel_req, 1059 .shm_register = optee_shm_register, 1060 .shm_unregister = optee_shm_unregister, 1061 }; 1062 1063 static const struct tee_desc optee_clnt_desc = { 1064 .name = DRIVER_NAME "-clnt", 1065 .ops = &optee_clnt_ops, 1066 .owner = THIS_MODULE, 1067 }; 1068 1069 static const struct tee_driver_ops optee_supp_ops = { 1070 .get_version = optee_get_version, 1071 .open = optee_smc_open, 1072 .release = optee_release_supp, 1073 .supp_recv = optee_supp_recv, 1074 .supp_send = optee_supp_send, 1075 .shm_register = optee_shm_register_supp, 1076 .shm_unregister = optee_shm_unregister_supp, 1077 }; 1078 1079 static const struct tee_desc optee_supp_desc = { 1080 .name = DRIVER_NAME "-supp", 1081 .ops = &optee_supp_ops, 1082 .owner = THIS_MODULE, 1083 .flags = TEE_DESC_PRIVILEGED, 1084 }; 1085 1086 static const struct optee_ops optee_ops = { 1087 .do_call_with_arg = optee_smc_do_call_with_arg, 1088 .to_msg_param = optee_to_msg_param, 1089 .from_msg_param = optee_from_msg_param, 1090 }; 1091 1092 static int enable_async_notif(optee_invoke_fn *invoke_fn) 1093 { 1094 struct arm_smccc_res res; 1095 1096 invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res); 1097 1098 if (res.a0) 1099 return -EINVAL; 1100 return 0; 1101 } 1102 1103 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) 1104 { 1105 struct arm_smccc_res res; 1106 1107 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); 1108 1109 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && 1110 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) 1111 return true; 1112 return false; 1113 } 1114 1115 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) 1116 { 1117 union { 1118 struct arm_smccc_res smccc; 1119 struct optee_smc_call_get_os_revision_result result; 1120 } res = { 1121 .result = { 1122 .build_id = 0 1123 } 1124 }; 1125 1126 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, 1127 &res.smccc); 1128 1129 if (res.result.build_id) 1130 pr_info("revision %lu.%lu (%08lx)", res.result.major, 1131 res.result.minor, res.result.build_id); 1132 else 1133 pr_info("revision %lu.%lu", res.result.major, res.result.minor); 1134 } 1135 1136 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) 1137 { 1138 union { 1139 struct arm_smccc_res smccc; 1140 struct optee_smc_calls_revision_result result; 1141 } res; 1142 1143 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 1144 1145 if (res.result.major == OPTEE_MSG_REVISION_MAJOR && 1146 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) 1147 return true; 1148 return false; 1149 } 1150 1151 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, 1152 u32 *sec_caps, u32 *max_notif_value, 1153 unsigned int *rpc_param_count) 1154 { 1155 union { 1156 struct arm_smccc_res smccc; 1157 struct optee_smc_exchange_capabilities_result result; 1158 } res; 1159 u32 a1 = 0; 1160 1161 /* 1162 * TODO This isn't enough to tell if it's UP system (from kernel 1163 * point of view) or not, is_smp() returns the information 1164 * needed, but can't be called directly from here. 1165 */ 1166 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) 1167 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; 1168 1169 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, 1170 &res.smccc); 1171 1172 if (res.result.status != OPTEE_SMC_RETURN_OK) 1173 return false; 1174 1175 *sec_caps = res.result.capabilities; 1176 if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) 1177 *max_notif_value = res.result.max_notif_value; 1178 else 1179 *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE; 1180 if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG) 1181 *rpc_param_count = (u8)res.result.data; 1182 else 1183 *rpc_param_count = 0; 1184 1185 return true; 1186 } 1187 1188 static struct tee_shm_pool * 1189 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 1190 { 1191 union { 1192 struct arm_smccc_res smccc; 1193 struct optee_smc_get_shm_config_result result; 1194 } res; 1195 unsigned long vaddr; 1196 phys_addr_t paddr; 1197 size_t size; 1198 phys_addr_t begin; 1199 phys_addr_t end; 1200 void *va; 1201 void *rc; 1202 1203 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 1204 if (res.result.status != OPTEE_SMC_RETURN_OK) { 1205 pr_err("static shm service not available\n"); 1206 return ERR_PTR(-ENOENT); 1207 } 1208 1209 if (res.result.settings != OPTEE_SMC_SHM_CACHED) { 1210 pr_err("only normal cached shared memory supported\n"); 1211 return ERR_PTR(-EINVAL); 1212 } 1213 1214 begin = roundup(res.result.start, PAGE_SIZE); 1215 end = rounddown(res.result.start + res.result.size, PAGE_SIZE); 1216 paddr = begin; 1217 size = end - begin; 1218 1219 va = memremap(paddr, size, MEMREMAP_WB); 1220 if (!va) { 1221 pr_err("shared memory ioremap failed\n"); 1222 return ERR_PTR(-EINVAL); 1223 } 1224 vaddr = (unsigned long)va; 1225 1226 rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size, 1227 OPTEE_MIN_STATIC_POOL_ALIGN); 1228 if (IS_ERR(rc)) 1229 memunmap(va); 1230 else 1231 *memremaped_shm = va; 1232 1233 return rc; 1234 } 1235 1236 /* Simple wrapper functions to be able to use a function pointer */ 1237 static void optee_smccc_smc(unsigned long a0, unsigned long a1, 1238 unsigned long a2, unsigned long a3, 1239 unsigned long a4, unsigned long a5, 1240 unsigned long a6, unsigned long a7, 1241 struct arm_smccc_res *res) 1242 { 1243 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1244 } 1245 1246 static void optee_smccc_hvc(unsigned long a0, unsigned long a1, 1247 unsigned long a2, unsigned long a3, 1248 unsigned long a4, unsigned long a5, 1249 unsigned long a6, unsigned long a7, 1250 struct arm_smccc_res *res) 1251 { 1252 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1253 } 1254 1255 static optee_invoke_fn *get_invoke_func(struct device *dev) 1256 { 1257 const char *method; 1258 1259 pr_info("probing for conduit method.\n"); 1260 1261 if (device_property_read_string(dev, "method", &method)) { 1262 pr_warn("missing \"method\" property\n"); 1263 return ERR_PTR(-ENXIO); 1264 } 1265 1266 if (!strcmp("hvc", method)) 1267 return optee_smccc_hvc; 1268 else if (!strcmp("smc", method)) 1269 return optee_smccc_smc; 1270 1271 pr_warn("invalid \"method\" property: %s\n", method); 1272 return ERR_PTR(-EINVAL); 1273 } 1274 1275 /* optee_remove - Device Removal Routine 1276 * @pdev: platform device information struct 1277 * 1278 * optee_remove is called by platform subsystem to alert the driver 1279 * that it should release the device 1280 */ 1281 static int optee_smc_remove(struct platform_device *pdev) 1282 { 1283 struct optee *optee = platform_get_drvdata(pdev); 1284 1285 /* 1286 * Ask OP-TEE to free all cached shared memory objects to decrease 1287 * reference counters and also avoid wild pointers in secure world 1288 * into the old shared memory range. 1289 */ 1290 if (!optee->rpc_param_count) 1291 optee_disable_shm_cache(optee); 1292 1293 optee_smc_notif_uninit_irq(optee); 1294 1295 optee_remove_common(optee); 1296 1297 if (optee->smc.memremaped_shm) 1298 memunmap(optee->smc.memremaped_shm); 1299 1300 kfree(optee); 1301 1302 return 0; 1303 } 1304 1305 /* optee_shutdown - Device Removal Routine 1306 * @pdev: platform device information struct 1307 * 1308 * platform_shutdown is called by the platform subsystem to alert 1309 * the driver that a shutdown, reboot, or kexec is happening and 1310 * device must be disabled. 1311 */ 1312 static void optee_shutdown(struct platform_device *pdev) 1313 { 1314 struct optee *optee = platform_get_drvdata(pdev); 1315 1316 if (!optee->rpc_param_count) 1317 optee_disable_shm_cache(optee); 1318 } 1319 1320 static int optee_probe(struct platform_device *pdev) 1321 { 1322 optee_invoke_fn *invoke_fn; 1323 struct tee_shm_pool *pool = ERR_PTR(-EINVAL); 1324 struct optee *optee = NULL; 1325 void *memremaped_shm = NULL; 1326 unsigned int rpc_param_count; 1327 struct tee_device *teedev; 1328 struct tee_context *ctx; 1329 u32 max_notif_value; 1330 u32 sec_caps; 1331 int rc; 1332 1333 invoke_fn = get_invoke_func(&pdev->dev); 1334 if (IS_ERR(invoke_fn)) 1335 return PTR_ERR(invoke_fn); 1336 1337 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { 1338 pr_warn("api uid mismatch\n"); 1339 return -EINVAL; 1340 } 1341 1342 optee_msg_get_os_revision(invoke_fn); 1343 1344 if (!optee_msg_api_revision_is_compatible(invoke_fn)) { 1345 pr_warn("api revision mismatch\n"); 1346 return -EINVAL; 1347 } 1348 1349 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps, 1350 &max_notif_value, 1351 &rpc_param_count)) { 1352 pr_warn("capabilities mismatch\n"); 1353 return -EINVAL; 1354 } 1355 1356 /* 1357 * Try to use dynamic shared memory if possible 1358 */ 1359 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1360 pool = optee_shm_pool_alloc_pages(); 1361 1362 /* 1363 * If dynamic shared memory is not available or failed - try static one 1364 */ 1365 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 1366 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 1367 1368 if (IS_ERR(pool)) 1369 return PTR_ERR(pool); 1370 1371 optee = kzalloc(sizeof(*optee), GFP_KERNEL); 1372 if (!optee) { 1373 rc = -ENOMEM; 1374 goto err_free_pool; 1375 } 1376 1377 optee->ops = &optee_ops; 1378 optee->smc.invoke_fn = invoke_fn; 1379 optee->smc.sec_caps = sec_caps; 1380 optee->rpc_param_count = rpc_param_count; 1381 1382 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); 1383 if (IS_ERR(teedev)) { 1384 rc = PTR_ERR(teedev); 1385 goto err_free_optee; 1386 } 1387 optee->teedev = teedev; 1388 1389 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); 1390 if (IS_ERR(teedev)) { 1391 rc = PTR_ERR(teedev); 1392 goto err_unreg_teedev; 1393 } 1394 optee->supp_teedev = teedev; 1395 1396 rc = tee_device_register(optee->teedev); 1397 if (rc) 1398 goto err_unreg_supp_teedev; 1399 1400 rc = tee_device_register(optee->supp_teedev); 1401 if (rc) 1402 goto err_unreg_supp_teedev; 1403 1404 mutex_init(&optee->call_queue.mutex); 1405 INIT_LIST_HEAD(&optee->call_queue.waiters); 1406 optee_supp_init(&optee->supp); 1407 optee->smc.memremaped_shm = memremaped_shm; 1408 optee->pool = pool; 1409 1410 platform_set_drvdata(pdev, optee); 1411 ctx = teedev_open(optee->teedev); 1412 if (IS_ERR(ctx)) { 1413 rc = PTR_ERR(ctx); 1414 goto err_supp_uninit; 1415 } 1416 optee->ctx = ctx; 1417 rc = optee_notif_init(optee, max_notif_value); 1418 if (rc) 1419 goto err_close_ctx; 1420 1421 if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { 1422 unsigned int irq; 1423 1424 rc = platform_get_irq(pdev, 0); 1425 if (rc < 0) { 1426 pr_err("platform_get_irq: ret %d\n", rc); 1427 goto err_notif_uninit; 1428 } 1429 irq = rc; 1430 1431 rc = optee_smc_notif_init_irq(optee, irq); 1432 if (rc) { 1433 irq_dispose_mapping(irq); 1434 goto err_notif_uninit; 1435 } 1436 enable_async_notif(optee->smc.invoke_fn); 1437 pr_info("Asynchronous notifications enabled\n"); 1438 } 1439 1440 /* 1441 * Ensure that there are no pre-existing shm objects before enabling 1442 * the shm cache so that there's no chance of receiving an invalid 1443 * address during shutdown. This could occur, for example, if we're 1444 * kexec booting from an older kernel that did not properly cleanup the 1445 * shm cache. 1446 */ 1447 optee_disable_unmapped_shm_cache(optee); 1448 1449 /* 1450 * Only enable the shm cache in case we're not able to pass the RPC 1451 * arg struct right after the normal arg struct. 1452 */ 1453 if (!optee->rpc_param_count) 1454 optee_enable_shm_cache(optee); 1455 1456 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1457 pr_info("dynamic shared memory is enabled\n"); 1458 1459 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1460 if (rc) 1461 goto err_disable_shm_cache; 1462 1463 pr_info("initialized driver\n"); 1464 return 0; 1465 1466 err_disable_shm_cache: 1467 if (!optee->rpc_param_count) 1468 optee_disable_shm_cache(optee); 1469 optee_smc_notif_uninit_irq(optee); 1470 optee_unregister_devices(); 1471 err_notif_uninit: 1472 optee_notif_uninit(optee); 1473 err_close_ctx: 1474 teedev_close_context(ctx); 1475 err_supp_uninit: 1476 optee_supp_uninit(&optee->supp); 1477 mutex_destroy(&optee->call_queue.mutex); 1478 err_unreg_supp_teedev: 1479 tee_device_unregister(optee->supp_teedev); 1480 err_unreg_teedev: 1481 tee_device_unregister(optee->teedev); 1482 err_free_optee: 1483 kfree(optee); 1484 err_free_pool: 1485 tee_shm_pool_free(pool); 1486 if (memremaped_shm) 1487 memunmap(memremaped_shm); 1488 return rc; 1489 } 1490 1491 static const struct of_device_id optee_dt_match[] = { 1492 { .compatible = "linaro,optee-tz" }, 1493 {}, 1494 }; 1495 MODULE_DEVICE_TABLE(of, optee_dt_match); 1496 1497 static struct platform_driver optee_driver = { 1498 .probe = optee_probe, 1499 .remove = optee_smc_remove, 1500 .shutdown = optee_shutdown, 1501 .driver = { 1502 .name = "optee", 1503 .of_match_table = optee_dt_match, 1504 }, 1505 }; 1506 1507 int optee_smc_abi_register(void) 1508 { 1509 return platform_driver_register(&optee_driver); 1510 } 1511 1512 void optee_smc_abi_unregister(void) 1513 { 1514 platform_driver_unregister(&optee_driver); 1515 } 1516