1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015-2021, Linaro Limited 4 * Copyright (c) 2016, EPAM Systems 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/arm-smccc.h> 10 #include <linux/errno.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/irqdomain.h> 14 #include <linux/mm.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_irq.h> 18 #include <linux/of_platform.h> 19 #include <linux/platform_device.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/string.h> 23 #include <linux/tee_drv.h> 24 #include <linux/types.h> 25 #include <linux/workqueue.h> 26 #include "optee_private.h" 27 #include "optee_smc.h" 28 #include "optee_rpc_cmd.h" 29 #include <linux/kmemleak.h> 30 #define CREATE_TRACE_POINTS 31 #include "optee_trace.h" 32 33 /* 34 * This file implement the SMC ABI used when communicating with secure world 35 * OP-TEE OS via raw SMCs. 36 * This file is divided into the following sections: 37 * 1. Convert between struct tee_param and struct optee_msg_param 38 * 2. Low level support functions to register shared memory in secure world 39 * 3. Dynamic shared memory pool based on alloc_pages() 40 * 4. Do a normal scheduled call into secure world 41 * 5. Asynchronous notification 42 * 6. Driver initialization. 43 */ 44 45 /* 46 * A typical OP-TEE private shm allocation is 224 bytes (argument struct 47 * with 6 parameters, needed for open session). So with an alignment of 512 48 * we'll waste a bit more than 50%. However, it's only expected that we'll 49 * have a handful of these structs allocated at a time. Most memory will 50 * be allocated aligned to the page size, So all in all this should scale 51 * up and down quite well. 52 */ 53 #define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */ 54 55 /* 56 * 1. Convert between struct tee_param and struct optee_msg_param 57 * 58 * optee_from_msg_param() and optee_to_msg_param() are the main 59 * functions. 60 */ 61 62 static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, 63 const struct optee_msg_param *mp) 64 { 65 struct tee_shm *shm; 66 phys_addr_t pa; 67 int rc; 68 69 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 70 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; 71 p->u.memref.size = mp->u.tmem.size; 72 shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref; 73 if (!shm) { 74 p->u.memref.shm_offs = 0; 75 p->u.memref.shm = NULL; 76 return 0; 77 } 78 79 rc = tee_shm_get_pa(shm, 0, &pa); 80 if (rc) 81 return rc; 82 83 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; 84 p->u.memref.shm = shm; 85 86 return 0; 87 } 88 89 static void from_msg_param_reg_mem(struct tee_param *p, u32 attr, 90 const struct optee_msg_param *mp) 91 { 92 struct tee_shm *shm; 93 94 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + 95 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 96 p->u.memref.size = mp->u.rmem.size; 97 shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref; 98 99 if (shm) { 100 p->u.memref.shm_offs = mp->u.rmem.offs; 101 p->u.memref.shm = shm; 102 } else { 103 p->u.memref.shm_offs = 0; 104 p->u.memref.shm = NULL; 105 } 106 } 107 108 /** 109 * optee_from_msg_param() - convert from OPTEE_MSG parameters to 110 * struct tee_param 111 * @optee: main service struct 112 * @params: subsystem internal parameter representation 113 * @num_params: number of elements in the parameter arrays 114 * @msg_params: OPTEE_MSG parameters 115 * Returns 0 on success or <0 on failure 116 */ 117 static int optee_from_msg_param(struct optee *optee, struct tee_param *params, 118 size_t num_params, 119 const struct optee_msg_param *msg_params) 120 { 121 int rc; 122 size_t n; 123 124 for (n = 0; n < num_params; n++) { 125 struct tee_param *p = params + n; 126 const struct optee_msg_param *mp = msg_params + n; 127 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; 128 129 switch (attr) { 130 case OPTEE_MSG_ATTR_TYPE_NONE: 131 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 132 memset(&p->u, 0, sizeof(p->u)); 133 break; 134 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: 135 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: 136 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: 137 optee_from_msg_param_value(p, attr, mp); 138 break; 139 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: 140 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: 141 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: 142 rc = from_msg_param_tmp_mem(p, attr, mp); 143 if (rc) 144 return rc; 145 break; 146 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT: 147 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT: 148 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT: 149 from_msg_param_reg_mem(p, attr, mp); 150 break; 151 152 default: 153 return -EINVAL; 154 } 155 } 156 return 0; 157 } 158 159 static int to_msg_param_tmp_mem(struct optee_msg_param *mp, 160 const struct tee_param *p) 161 { 162 int rc; 163 phys_addr_t pa; 164 165 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - 166 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 167 168 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; 169 mp->u.tmem.size = p->u.memref.size; 170 171 if (!p->u.memref.shm) { 172 mp->u.tmem.buf_ptr = 0; 173 return 0; 174 } 175 176 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); 177 if (rc) 178 return rc; 179 180 mp->u.tmem.buf_ptr = pa; 181 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << 182 OPTEE_MSG_ATTR_CACHE_SHIFT; 183 184 return 0; 185 } 186 187 static int to_msg_param_reg_mem(struct optee_msg_param *mp, 188 const struct tee_param *p) 189 { 190 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr - 191 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 192 193 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm; 194 mp->u.rmem.size = p->u.memref.size; 195 mp->u.rmem.offs = p->u.memref.shm_offs; 196 return 0; 197 } 198 199 /** 200 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters 201 * @optee: main service struct 202 * @msg_params: OPTEE_MSG parameters 203 * @num_params: number of elements in the parameter arrays 204 * @params: subsystem itnernal parameter representation 205 * Returns 0 on success or <0 on failure 206 */ 207 static int optee_to_msg_param(struct optee *optee, 208 struct optee_msg_param *msg_params, 209 size_t num_params, const struct tee_param *params) 210 { 211 int rc; 212 size_t n; 213 214 for (n = 0; n < num_params; n++) { 215 const struct tee_param *p = params + n; 216 struct optee_msg_param *mp = msg_params + n; 217 218 switch (p->attr) { 219 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: 220 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; 221 memset(&mp->u, 0, sizeof(mp->u)); 222 break; 223 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: 224 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: 225 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: 226 optee_to_msg_param_value(mp, p); 227 break; 228 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 229 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 230 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 231 if (tee_shm_is_dynamic(p->u.memref.shm)) 232 rc = to_msg_param_reg_mem(mp, p); 233 else 234 rc = to_msg_param_tmp_mem(mp, p); 235 if (rc) 236 return rc; 237 break; 238 default: 239 return -EINVAL; 240 } 241 } 242 return 0; 243 } 244 245 /* 246 * 2. Low level support functions to register shared memory in secure world 247 * 248 * Functions to enable/disable shared memory caching in secure world, that 249 * is, lazy freeing of previously allocated shared memory. Freeing is 250 * performed when a request has been compled. 251 * 252 * Functions to register and unregister shared memory both for normal 253 * clients and for tee-supplicant. 254 */ 255 256 /** 257 * optee_enable_shm_cache() - Enables caching of some shared memory allocation 258 * in OP-TEE 259 * @optee: main service struct 260 */ 261 static void optee_enable_shm_cache(struct optee *optee) 262 { 263 struct optee_call_waiter w; 264 265 /* We need to retry until secure world isn't busy. */ 266 optee_cq_wait_init(&optee->call_queue, &w); 267 while (true) { 268 struct arm_smccc_res res; 269 270 optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 271 0, 0, 0, 0, 0, 0, 0, &res); 272 if (res.a0 == OPTEE_SMC_RETURN_OK) 273 break; 274 optee_cq_wait_for_completion(&optee->call_queue, &w); 275 } 276 optee_cq_wait_final(&optee->call_queue, &w); 277 } 278 279 /** 280 * __optee_disable_shm_cache() - Disables caching of some shared memory 281 * allocation in OP-TEE 282 * @optee: main service struct 283 * @is_mapped: true if the cached shared memory addresses were mapped by this 284 * kernel, are safe to dereference, and should be freed 285 */ 286 static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped) 287 { 288 struct optee_call_waiter w; 289 290 /* We need to retry until secure world isn't busy. */ 291 optee_cq_wait_init(&optee->call_queue, &w); 292 while (true) { 293 union { 294 struct arm_smccc_res smccc; 295 struct optee_smc_disable_shm_cache_result result; 296 } res; 297 298 optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 299 0, 0, 0, 0, 0, 0, 0, &res.smccc); 300 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL) 301 break; /* All shm's freed */ 302 if (res.result.status == OPTEE_SMC_RETURN_OK) { 303 struct tee_shm *shm; 304 305 /* 306 * Shared memory references that were not mapped by 307 * this kernel must be ignored to prevent a crash. 308 */ 309 if (!is_mapped) 310 continue; 311 312 shm = reg_pair_to_ptr(res.result.shm_upper32, 313 res.result.shm_lower32); 314 tee_shm_free(shm); 315 } else { 316 optee_cq_wait_for_completion(&optee->call_queue, &w); 317 } 318 } 319 optee_cq_wait_final(&optee->call_queue, &w); 320 } 321 322 /** 323 * optee_disable_shm_cache() - Disables caching of mapped shared memory 324 * allocations in OP-TEE 325 * @optee: main service struct 326 */ 327 static void optee_disable_shm_cache(struct optee *optee) 328 { 329 return __optee_disable_shm_cache(optee, true); 330 } 331 332 /** 333 * optee_disable_unmapped_shm_cache() - Disables caching of shared memory 334 * allocations in OP-TEE which are not 335 * currently mapped 336 * @optee: main service struct 337 */ 338 static void optee_disable_unmapped_shm_cache(struct optee *optee) 339 { 340 return __optee_disable_shm_cache(optee, false); 341 } 342 343 #define PAGELIST_ENTRIES_PER_PAGE \ 344 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1) 345 346 /* 347 * The final entry in each pagelist page is a pointer to the next 348 * pagelist page. 349 */ 350 static size_t get_pages_list_size(size_t num_entries) 351 { 352 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE); 353 354 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE; 355 } 356 357 static u64 *optee_allocate_pages_list(size_t num_entries) 358 { 359 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL); 360 } 361 362 static void optee_free_pages_list(void *list, size_t num_entries) 363 { 364 free_pages_exact(list, get_pages_list_size(num_entries)); 365 } 366 367 /** 368 * optee_fill_pages_list() - write list of user pages to given shared 369 * buffer. 370 * 371 * @dst: page-aligned buffer where list of pages will be stored 372 * @pages: array of pages that represents shared buffer 373 * @num_pages: number of entries in @pages 374 * @page_offset: offset of user buffer from page start 375 * 376 * @dst should be big enough to hold list of user page addresses and 377 * links to the next pages of buffer 378 */ 379 static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages, 380 size_t page_offset) 381 { 382 int n = 0; 383 phys_addr_t optee_page; 384 /* 385 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h 386 * for details. 387 */ 388 struct { 389 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE]; 390 u64 next_page_data; 391 } *pages_data; 392 393 /* 394 * Currently OP-TEE uses 4k page size and it does not looks 395 * like this will change in the future. On other hand, there are 396 * no know ARM architectures with page size < 4k. 397 * Thus the next built assert looks redundant. But the following 398 * code heavily relies on this assumption, so it is better be 399 * safe than sorry. 400 */ 401 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE); 402 403 pages_data = (void *)dst; 404 /* 405 * If linux page is bigger than 4k, and user buffer offset is 406 * larger than 4k/8k/12k/etc this will skip first 4k pages, 407 * because they bear no value data for OP-TEE. 408 */ 409 optee_page = page_to_phys(*pages) + 410 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE); 411 412 while (true) { 413 pages_data->pages_list[n++] = optee_page; 414 415 if (n == PAGELIST_ENTRIES_PER_PAGE) { 416 pages_data->next_page_data = 417 virt_to_phys(pages_data + 1); 418 pages_data++; 419 n = 0; 420 } 421 422 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE; 423 if (!(optee_page & ~PAGE_MASK)) { 424 if (!--num_pages) 425 break; 426 pages++; 427 optee_page = page_to_phys(*pages); 428 } 429 } 430 } 431 432 static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm, 433 struct page **pages, size_t num_pages, 434 unsigned long start) 435 { 436 struct optee *optee = tee_get_drvdata(ctx->teedev); 437 struct optee_msg_arg *msg_arg; 438 struct tee_shm *shm_arg; 439 u64 *pages_list; 440 int rc; 441 442 if (!num_pages) 443 return -EINVAL; 444 445 rc = optee_check_mem_type(start, num_pages); 446 if (rc) 447 return rc; 448 449 pages_list = optee_allocate_pages_list(num_pages); 450 if (!pages_list) 451 return -ENOMEM; 452 453 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 454 if (IS_ERR(shm_arg)) { 455 rc = PTR_ERR(shm_arg); 456 goto out; 457 } 458 459 optee_fill_pages_list(pages_list, pages, num_pages, 460 tee_shm_get_page_offset(shm)); 461 462 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM; 463 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 464 OPTEE_MSG_ATTR_NONCONTIG; 465 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm; 466 msg_arg->params->u.tmem.size = tee_shm_get_size(shm); 467 /* 468 * In the least bits of msg_arg->params->u.tmem.buf_ptr we 469 * store buffer offset from 4k page, as described in OP-TEE ABI. 470 */ 471 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) | 472 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 473 474 if (optee->ops->do_call_with_arg(ctx, shm_arg) || 475 msg_arg->ret != TEEC_SUCCESS) 476 rc = -EINVAL; 477 478 tee_shm_free(shm_arg); 479 out: 480 optee_free_pages_list(pages_list, num_pages); 481 return rc; 482 } 483 484 static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) 485 { 486 struct optee *optee = tee_get_drvdata(ctx->teedev); 487 struct optee_msg_arg *msg_arg; 488 struct tee_shm *shm_arg; 489 int rc = 0; 490 491 shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg); 492 if (IS_ERR(shm_arg)) 493 return PTR_ERR(shm_arg); 494 495 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM; 496 497 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; 498 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm; 499 500 if (optee->ops->do_call_with_arg(ctx, shm_arg) || 501 msg_arg->ret != TEEC_SUCCESS) 502 rc = -EINVAL; 503 tee_shm_free(shm_arg); 504 return rc; 505 } 506 507 static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm, 508 struct page **pages, size_t num_pages, 509 unsigned long start) 510 { 511 /* 512 * We don't want to register supplicant memory in OP-TEE. 513 * Instead information about it will be passed in RPC code. 514 */ 515 return optee_check_mem_type(start, num_pages); 516 } 517 518 static int optee_shm_unregister_supp(struct tee_context *ctx, 519 struct tee_shm *shm) 520 { 521 return 0; 522 } 523 524 /* 525 * 3. Dynamic shared memory pool based on alloc_pages() 526 * 527 * Implements an OP-TEE specific shared memory pool which is used 528 * when dynamic shared memory is supported by secure world. 529 * 530 * The main function is optee_shm_pool_alloc_pages(). 531 */ 532 533 static int pool_op_alloc(struct tee_shm_pool *pool, 534 struct tee_shm *shm, size_t size, size_t align) 535 { 536 /* 537 * Shared memory private to the OP-TEE driver doesn't need 538 * to be registered with OP-TEE. 539 */ 540 if (shm->flags & TEE_SHM_PRIV) 541 return optee_pool_op_alloc_helper(pool, shm, size, align, NULL); 542 543 return optee_pool_op_alloc_helper(pool, shm, size, align, 544 optee_shm_register); 545 } 546 547 static void pool_op_free(struct tee_shm_pool *pool, 548 struct tee_shm *shm) 549 { 550 if (!(shm->flags & TEE_SHM_PRIV)) 551 optee_pool_op_free_helper(pool, shm, optee_shm_unregister); 552 else 553 optee_pool_op_free_helper(pool, shm, NULL); 554 } 555 556 static void pool_op_destroy_pool(struct tee_shm_pool *pool) 557 { 558 kfree(pool); 559 } 560 561 static const struct tee_shm_pool_ops pool_ops = { 562 .alloc = pool_op_alloc, 563 .free = pool_op_free, 564 .destroy_pool = pool_op_destroy_pool, 565 }; 566 567 /** 568 * optee_shm_pool_alloc_pages() - create page-based allocator pool 569 * 570 * This pool is used when OP-TEE supports dymanic SHM. In this case 571 * command buffers and such are allocated from kernel's own memory. 572 */ 573 static struct tee_shm_pool *optee_shm_pool_alloc_pages(void) 574 { 575 struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL); 576 577 if (!pool) 578 return ERR_PTR(-ENOMEM); 579 580 pool->ops = &pool_ops; 581 582 return pool; 583 } 584 585 /* 586 * 4. Do a normal scheduled call into secure world 587 * 588 * The function optee_smc_do_call_with_arg() performs a normal scheduled 589 * call into secure world. During this call may normal world request help 590 * from normal world using RPCs, Remote Procedure Calls. This includes 591 * delivery of non-secure interrupts to for instance allow rescheduling of 592 * the current task. 593 */ 594 595 static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx, 596 struct optee_msg_arg *arg) 597 { 598 struct tee_shm *shm; 599 600 arg->ret_origin = TEEC_ORIGIN_COMMS; 601 602 if (arg->num_params != 1 || 603 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 604 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 605 return; 606 } 607 608 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b; 609 switch (arg->params[0].u.value.a) { 610 case OPTEE_RPC_SHM_TYPE_APPL: 611 optee_rpc_cmd_free_suppl(ctx, shm); 612 break; 613 case OPTEE_RPC_SHM_TYPE_KERNEL: 614 tee_shm_free(shm); 615 break; 616 default: 617 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 618 } 619 arg->ret = TEEC_SUCCESS; 620 } 621 622 static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 623 struct optee *optee, 624 struct optee_msg_arg *arg, 625 struct optee_call_ctx *call_ctx) 626 { 627 phys_addr_t pa; 628 struct tee_shm *shm; 629 size_t sz; 630 size_t n; 631 632 arg->ret_origin = TEEC_ORIGIN_COMMS; 633 634 if (!arg->num_params || 635 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) { 636 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 637 return; 638 } 639 640 for (n = 1; n < arg->num_params; n++) { 641 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) { 642 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 643 return; 644 } 645 } 646 647 sz = arg->params[0].u.value.b; 648 switch (arg->params[0].u.value.a) { 649 case OPTEE_RPC_SHM_TYPE_APPL: 650 shm = optee_rpc_cmd_alloc_suppl(ctx, sz); 651 break; 652 case OPTEE_RPC_SHM_TYPE_KERNEL: 653 shm = tee_shm_alloc_priv_buf(optee->ctx, sz); 654 break; 655 default: 656 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 657 return; 658 } 659 660 if (IS_ERR(shm)) { 661 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 662 return; 663 } 664 665 if (tee_shm_get_pa(shm, 0, &pa)) { 666 arg->ret = TEEC_ERROR_BAD_PARAMETERS; 667 goto bad; 668 } 669 670 sz = tee_shm_get_size(shm); 671 672 if (tee_shm_is_dynamic(shm)) { 673 struct page **pages; 674 u64 *pages_list; 675 size_t page_num; 676 677 pages = tee_shm_get_pages(shm, &page_num); 678 if (!pages || !page_num) { 679 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 680 goto bad; 681 } 682 683 pages_list = optee_allocate_pages_list(page_num); 684 if (!pages_list) { 685 arg->ret = TEEC_ERROR_OUT_OF_MEMORY; 686 goto bad; 687 } 688 689 call_ctx->pages_list = pages_list; 690 call_ctx->num_entries = page_num; 691 692 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT | 693 OPTEE_MSG_ATTR_NONCONTIG; 694 /* 695 * In the least bits of u.tmem.buf_ptr we store buffer offset 696 * from 4k page, as described in OP-TEE ABI. 697 */ 698 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) | 699 (tee_shm_get_page_offset(shm) & 700 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1)); 701 arg->params[0].u.tmem.size = tee_shm_get_size(shm); 702 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 703 704 optee_fill_pages_list(pages_list, pages, page_num, 705 tee_shm_get_page_offset(shm)); 706 } else { 707 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 708 arg->params[0].u.tmem.buf_ptr = pa; 709 arg->params[0].u.tmem.size = sz; 710 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 711 } 712 713 arg->ret = TEEC_SUCCESS; 714 return; 715 bad: 716 tee_shm_free(shm); 717 } 718 719 static void free_pages_list(struct optee_call_ctx *call_ctx) 720 { 721 if (call_ctx->pages_list) { 722 optee_free_pages_list(call_ctx->pages_list, 723 call_ctx->num_entries); 724 call_ctx->pages_list = NULL; 725 call_ctx->num_entries = 0; 726 } 727 } 728 729 static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx) 730 { 731 free_pages_list(call_ctx); 732 } 733 734 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 735 struct tee_shm *shm, 736 struct optee_call_ctx *call_ctx) 737 { 738 struct optee_msg_arg *arg; 739 740 arg = tee_shm_get_va(shm, 0); 741 if (IS_ERR(arg)) { 742 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm); 743 return; 744 } 745 746 switch (arg->cmd) { 747 case OPTEE_RPC_CMD_SHM_ALLOC: 748 free_pages_list(call_ctx); 749 handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx); 750 break; 751 case OPTEE_RPC_CMD_SHM_FREE: 752 handle_rpc_func_cmd_shm_free(ctx, arg); 753 break; 754 default: 755 optee_rpc_cmd(ctx, optee, arg); 756 } 757 } 758 759 /** 760 * optee_handle_rpc() - handle RPC from secure world 761 * @ctx: context doing the RPC 762 * @param: value of registers for the RPC 763 * @call_ctx: call context. Preserved during one OP-TEE invocation 764 * 765 * Result of RPC is written back into @param. 766 */ 767 static void optee_handle_rpc(struct tee_context *ctx, 768 struct optee_rpc_param *param, 769 struct optee_call_ctx *call_ctx) 770 { 771 struct tee_device *teedev = ctx->teedev; 772 struct optee *optee = tee_get_drvdata(teedev); 773 struct tee_shm *shm; 774 phys_addr_t pa; 775 776 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) { 777 case OPTEE_SMC_RPC_FUNC_ALLOC: 778 shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1); 779 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) { 780 reg_pair_from_64(¶m->a1, ¶m->a2, pa); 781 reg_pair_from_64(¶m->a4, ¶m->a5, 782 (unsigned long)shm); 783 } else { 784 param->a1 = 0; 785 param->a2 = 0; 786 param->a4 = 0; 787 param->a5 = 0; 788 } 789 kmemleak_not_leak(shm); 790 break; 791 case OPTEE_SMC_RPC_FUNC_FREE: 792 shm = reg_pair_to_ptr(param->a1, param->a2); 793 tee_shm_free(shm); 794 break; 795 case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR: 796 /* 797 * A foreign interrupt was raised while secure world was 798 * executing, since they are handled in Linux a dummy RPC is 799 * performed to let Linux take the interrupt through the normal 800 * vector. 801 */ 802 break; 803 case OPTEE_SMC_RPC_FUNC_CMD: 804 shm = reg_pair_to_ptr(param->a1, param->a2); 805 handle_rpc_func_cmd(ctx, optee, shm, call_ctx); 806 break; 807 default: 808 pr_warn("Unknown RPC func 0x%x\n", 809 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)); 810 break; 811 } 812 813 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC; 814 } 815 816 /** 817 * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world 818 * @ctx: calling context 819 * @arg: shared memory holding the message to pass to secure world 820 * 821 * Does and SMC to OP-TEE in secure world and handles eventual resulting 822 * Remote Procedure Calls (RPC) from OP-TEE. 823 * 824 * Returns return code from secure world, 0 is OK 825 */ 826 static int optee_smc_do_call_with_arg(struct tee_context *ctx, 827 struct tee_shm *arg) 828 { 829 struct optee *optee = tee_get_drvdata(ctx->teedev); 830 struct optee_call_waiter w; 831 struct optee_rpc_param param = { }; 832 struct optee_call_ctx call_ctx = { }; 833 phys_addr_t parg; 834 int rc; 835 836 rc = tee_shm_get_pa(arg, 0, &parg); 837 if (rc) 838 return rc; 839 840 param.a0 = OPTEE_SMC_CALL_WITH_ARG; 841 reg_pair_from_64(¶m.a1, ¶m.a2, parg); 842 /* Initialize waiter */ 843 optee_cq_wait_init(&optee->call_queue, &w); 844 while (true) { 845 struct arm_smccc_res res; 846 847 trace_optee_invoke_fn_begin(¶m); 848 optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3, 849 param.a4, param.a5, param.a6, param.a7, 850 &res); 851 trace_optee_invoke_fn_end(¶m, &res); 852 853 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) { 854 /* 855 * Out of threads in secure world, wait for a thread 856 * become available. 857 */ 858 optee_cq_wait_for_completion(&optee->call_queue, &w); 859 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { 860 cond_resched(); 861 param.a0 = res.a0; 862 param.a1 = res.a1; 863 param.a2 = res.a2; 864 param.a3 = res.a3; 865 optee_handle_rpc(ctx, ¶m, &call_ctx); 866 } else { 867 rc = res.a0; 868 break; 869 } 870 } 871 872 optee_rpc_finalize_call(&call_ctx); 873 /* 874 * We're done with our thread in secure world, if there's any 875 * thread waiters wake up one. 876 */ 877 optee_cq_wait_final(&optee->call_queue, &w); 878 879 return rc; 880 } 881 882 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd) 883 { 884 struct optee_msg_arg *msg_arg; 885 struct tee_shm *shm; 886 887 shm = optee_get_msg_arg(ctx, 0, &msg_arg); 888 if (IS_ERR(shm)) 889 return PTR_ERR(shm); 890 891 msg_arg->cmd = cmd; 892 optee_smc_do_call_with_arg(ctx, shm); 893 894 tee_shm_free(shm); 895 return 0; 896 } 897 898 static int optee_smc_do_bottom_half(struct tee_context *ctx) 899 { 900 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_DO_BOTTOM_HALF); 901 } 902 903 static int optee_smc_stop_async_notif(struct tee_context *ctx) 904 { 905 return simple_call_with_arg(ctx, OPTEE_MSG_CMD_STOP_ASYNC_NOTIF); 906 } 907 908 /* 909 * 5. Asynchronous notification 910 */ 911 912 static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid, 913 bool *value_pending) 914 { 915 struct arm_smccc_res res; 916 917 invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res); 918 919 if (res.a0) 920 return 0; 921 *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID); 922 *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING); 923 return res.a1; 924 } 925 926 static irqreturn_t notif_irq_handler(int irq, void *dev_id) 927 { 928 struct optee *optee = dev_id; 929 bool do_bottom_half = false; 930 bool value_valid; 931 bool value_pending; 932 u32 value; 933 934 do { 935 value = get_async_notif_value(optee->smc.invoke_fn, 936 &value_valid, &value_pending); 937 if (!value_valid) 938 break; 939 940 if (value == OPTEE_SMC_ASYNC_NOTIF_VALUE_DO_BOTTOM_HALF) 941 do_bottom_half = true; 942 else 943 optee_notif_send(optee, value); 944 } while (value_pending); 945 946 if (do_bottom_half) 947 return IRQ_WAKE_THREAD; 948 return IRQ_HANDLED; 949 } 950 951 static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id) 952 { 953 struct optee *optee = dev_id; 954 955 optee_smc_do_bottom_half(optee->ctx); 956 957 return IRQ_HANDLED; 958 } 959 960 static int optee_smc_notif_init_irq(struct optee *optee, u_int irq) 961 { 962 int rc; 963 964 rc = request_threaded_irq(irq, notif_irq_handler, 965 notif_irq_thread_fn, 966 0, "optee_notification", optee); 967 if (rc) 968 return rc; 969 970 optee->smc.notif_irq = irq; 971 972 return 0; 973 } 974 975 static void optee_smc_notif_uninit_irq(struct optee *optee) 976 { 977 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { 978 optee_smc_stop_async_notif(optee->ctx); 979 if (optee->smc.notif_irq) { 980 free_irq(optee->smc.notif_irq, optee); 981 irq_dispose_mapping(optee->smc.notif_irq); 982 } 983 } 984 } 985 986 /* 987 * 6. Driver initialization 988 * 989 * During driver initialization is secure world probed to find out which 990 * features it supports so the driver can be initialized with a matching 991 * configuration. This involves for instance support for dynamic shared 992 * memory instead of a static memory carvout. 993 */ 994 995 static void optee_get_version(struct tee_device *teedev, 996 struct tee_ioctl_version_data *vers) 997 { 998 struct tee_ioctl_version_data v = { 999 .impl_id = TEE_IMPL_ID_OPTEE, 1000 .impl_caps = TEE_OPTEE_CAP_TZ, 1001 .gen_caps = TEE_GEN_CAP_GP, 1002 }; 1003 struct optee *optee = tee_get_drvdata(teedev); 1004 1005 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1006 v.gen_caps |= TEE_GEN_CAP_REG_MEM; 1007 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL) 1008 v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL; 1009 *vers = v; 1010 } 1011 1012 static int optee_smc_open(struct tee_context *ctx) 1013 { 1014 struct optee *optee = tee_get_drvdata(ctx->teedev); 1015 u32 sec_caps = optee->smc.sec_caps; 1016 1017 return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL); 1018 } 1019 1020 static const struct tee_driver_ops optee_clnt_ops = { 1021 .get_version = optee_get_version, 1022 .open = optee_smc_open, 1023 .release = optee_release, 1024 .open_session = optee_open_session, 1025 .close_session = optee_close_session, 1026 .invoke_func = optee_invoke_func, 1027 .cancel_req = optee_cancel_req, 1028 .shm_register = optee_shm_register, 1029 .shm_unregister = optee_shm_unregister, 1030 }; 1031 1032 static const struct tee_desc optee_clnt_desc = { 1033 .name = DRIVER_NAME "-clnt", 1034 .ops = &optee_clnt_ops, 1035 .owner = THIS_MODULE, 1036 }; 1037 1038 static const struct tee_driver_ops optee_supp_ops = { 1039 .get_version = optee_get_version, 1040 .open = optee_smc_open, 1041 .release = optee_release_supp, 1042 .supp_recv = optee_supp_recv, 1043 .supp_send = optee_supp_send, 1044 .shm_register = optee_shm_register_supp, 1045 .shm_unregister = optee_shm_unregister_supp, 1046 }; 1047 1048 static const struct tee_desc optee_supp_desc = { 1049 .name = DRIVER_NAME "-supp", 1050 .ops = &optee_supp_ops, 1051 .owner = THIS_MODULE, 1052 .flags = TEE_DESC_PRIVILEGED, 1053 }; 1054 1055 static const struct optee_ops optee_ops = { 1056 .do_call_with_arg = optee_smc_do_call_with_arg, 1057 .to_msg_param = optee_to_msg_param, 1058 .from_msg_param = optee_from_msg_param, 1059 }; 1060 1061 static int enable_async_notif(optee_invoke_fn *invoke_fn) 1062 { 1063 struct arm_smccc_res res; 1064 1065 invoke_fn(OPTEE_SMC_ENABLE_ASYNC_NOTIF, 0, 0, 0, 0, 0, 0, 0, &res); 1066 1067 if (res.a0) 1068 return -EINVAL; 1069 return 0; 1070 } 1071 1072 static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) 1073 { 1074 struct arm_smccc_res res; 1075 1076 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); 1077 1078 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && 1079 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) 1080 return true; 1081 return false; 1082 } 1083 1084 static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn) 1085 { 1086 union { 1087 struct arm_smccc_res smccc; 1088 struct optee_smc_call_get_os_revision_result result; 1089 } res = { 1090 .result = { 1091 .build_id = 0 1092 } 1093 }; 1094 1095 invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0, 1096 &res.smccc); 1097 1098 if (res.result.build_id) 1099 pr_info("revision %lu.%lu (%08lx)", res.result.major, 1100 res.result.minor, res.result.build_id); 1101 else 1102 pr_info("revision %lu.%lu", res.result.major, res.result.minor); 1103 } 1104 1105 static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) 1106 { 1107 union { 1108 struct arm_smccc_res smccc; 1109 struct optee_smc_calls_revision_result result; 1110 } res; 1111 1112 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 1113 1114 if (res.result.major == OPTEE_MSG_REVISION_MAJOR && 1115 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) 1116 return true; 1117 return false; 1118 } 1119 1120 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, 1121 u32 *sec_caps, u32 *max_notif_value) 1122 { 1123 union { 1124 struct arm_smccc_res smccc; 1125 struct optee_smc_exchange_capabilities_result result; 1126 } res; 1127 u32 a1 = 0; 1128 1129 /* 1130 * TODO This isn't enough to tell if it's UP system (from kernel 1131 * point of view) or not, is_smp() returns the information 1132 * needed, but can't be called directly from here. 1133 */ 1134 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) 1135 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; 1136 1137 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, 1138 &res.smccc); 1139 1140 if (res.result.status != OPTEE_SMC_RETURN_OK) 1141 return false; 1142 1143 *sec_caps = res.result.capabilities; 1144 if (*sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) 1145 *max_notif_value = res.result.max_notif_value; 1146 else 1147 *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE; 1148 1149 return true; 1150 } 1151 1152 static struct tee_shm_pool * 1153 optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 1154 { 1155 union { 1156 struct arm_smccc_res smccc; 1157 struct optee_smc_get_shm_config_result result; 1158 } res; 1159 unsigned long vaddr; 1160 phys_addr_t paddr; 1161 size_t size; 1162 phys_addr_t begin; 1163 phys_addr_t end; 1164 void *va; 1165 void *rc; 1166 1167 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 1168 if (res.result.status != OPTEE_SMC_RETURN_OK) { 1169 pr_err("static shm service not available\n"); 1170 return ERR_PTR(-ENOENT); 1171 } 1172 1173 if (res.result.settings != OPTEE_SMC_SHM_CACHED) { 1174 pr_err("only normal cached shared memory supported\n"); 1175 return ERR_PTR(-EINVAL); 1176 } 1177 1178 begin = roundup(res.result.start, PAGE_SIZE); 1179 end = rounddown(res.result.start + res.result.size, PAGE_SIZE); 1180 paddr = begin; 1181 size = end - begin; 1182 1183 va = memremap(paddr, size, MEMREMAP_WB); 1184 if (!va) { 1185 pr_err("shared memory ioremap failed\n"); 1186 return ERR_PTR(-EINVAL); 1187 } 1188 vaddr = (unsigned long)va; 1189 1190 rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size, 1191 OPTEE_MIN_STATIC_POOL_ALIGN); 1192 if (IS_ERR(rc)) 1193 memunmap(va); 1194 else 1195 *memremaped_shm = va; 1196 1197 return rc; 1198 } 1199 1200 /* Simple wrapper functions to be able to use a function pointer */ 1201 static void optee_smccc_smc(unsigned long a0, unsigned long a1, 1202 unsigned long a2, unsigned long a3, 1203 unsigned long a4, unsigned long a5, 1204 unsigned long a6, unsigned long a7, 1205 struct arm_smccc_res *res) 1206 { 1207 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1208 } 1209 1210 static void optee_smccc_hvc(unsigned long a0, unsigned long a1, 1211 unsigned long a2, unsigned long a3, 1212 unsigned long a4, unsigned long a5, 1213 unsigned long a6, unsigned long a7, 1214 struct arm_smccc_res *res) 1215 { 1216 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); 1217 } 1218 1219 static optee_invoke_fn *get_invoke_func(struct device *dev) 1220 { 1221 const char *method; 1222 1223 pr_info("probing for conduit method.\n"); 1224 1225 if (device_property_read_string(dev, "method", &method)) { 1226 pr_warn("missing \"method\" property\n"); 1227 return ERR_PTR(-ENXIO); 1228 } 1229 1230 if (!strcmp("hvc", method)) 1231 return optee_smccc_hvc; 1232 else if (!strcmp("smc", method)) 1233 return optee_smccc_smc; 1234 1235 pr_warn("invalid \"method\" property: %s\n", method); 1236 return ERR_PTR(-EINVAL); 1237 } 1238 1239 /* optee_remove - Device Removal Routine 1240 * @pdev: platform device information struct 1241 * 1242 * optee_remove is called by platform subsystem to alert the driver 1243 * that it should release the device 1244 */ 1245 static int optee_smc_remove(struct platform_device *pdev) 1246 { 1247 struct optee *optee = platform_get_drvdata(pdev); 1248 1249 /* 1250 * Ask OP-TEE to free all cached shared memory objects to decrease 1251 * reference counters and also avoid wild pointers in secure world 1252 * into the old shared memory range. 1253 */ 1254 optee_disable_shm_cache(optee); 1255 1256 optee_smc_notif_uninit_irq(optee); 1257 1258 optee_remove_common(optee); 1259 1260 if (optee->smc.memremaped_shm) 1261 memunmap(optee->smc.memremaped_shm); 1262 1263 kfree(optee); 1264 1265 return 0; 1266 } 1267 1268 /* optee_shutdown - Device Removal Routine 1269 * @pdev: platform device information struct 1270 * 1271 * platform_shutdown is called by the platform subsystem to alert 1272 * the driver that a shutdown, reboot, or kexec is happening and 1273 * device must be disabled. 1274 */ 1275 static void optee_shutdown(struct platform_device *pdev) 1276 { 1277 optee_disable_shm_cache(platform_get_drvdata(pdev)); 1278 } 1279 1280 static int optee_probe(struct platform_device *pdev) 1281 { 1282 optee_invoke_fn *invoke_fn; 1283 struct tee_shm_pool *pool = ERR_PTR(-EINVAL); 1284 struct optee *optee = NULL; 1285 void *memremaped_shm = NULL; 1286 struct tee_device *teedev; 1287 struct tee_context *ctx; 1288 u32 max_notif_value; 1289 u32 sec_caps; 1290 int rc; 1291 1292 invoke_fn = get_invoke_func(&pdev->dev); 1293 if (IS_ERR(invoke_fn)) 1294 return PTR_ERR(invoke_fn); 1295 1296 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { 1297 pr_warn("api uid mismatch\n"); 1298 return -EINVAL; 1299 } 1300 1301 optee_msg_get_os_revision(invoke_fn); 1302 1303 if (!optee_msg_api_revision_is_compatible(invoke_fn)) { 1304 pr_warn("api revision mismatch\n"); 1305 return -EINVAL; 1306 } 1307 1308 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps, 1309 &max_notif_value)) { 1310 pr_warn("capabilities mismatch\n"); 1311 return -EINVAL; 1312 } 1313 1314 /* 1315 * Try to use dynamic shared memory if possible 1316 */ 1317 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1318 pool = optee_shm_pool_alloc_pages(); 1319 1320 /* 1321 * If dynamic shared memory is not available or failed - try static one 1322 */ 1323 if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 1324 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 1325 1326 if (IS_ERR(pool)) 1327 return PTR_ERR(pool); 1328 1329 optee = kzalloc(sizeof(*optee), GFP_KERNEL); 1330 if (!optee) { 1331 rc = -ENOMEM; 1332 goto err_free_pool; 1333 } 1334 1335 optee->ops = &optee_ops; 1336 optee->smc.invoke_fn = invoke_fn; 1337 optee->smc.sec_caps = sec_caps; 1338 1339 teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee); 1340 if (IS_ERR(teedev)) { 1341 rc = PTR_ERR(teedev); 1342 goto err_free_optee; 1343 } 1344 optee->teedev = teedev; 1345 1346 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); 1347 if (IS_ERR(teedev)) { 1348 rc = PTR_ERR(teedev); 1349 goto err_unreg_teedev; 1350 } 1351 optee->supp_teedev = teedev; 1352 1353 rc = tee_device_register(optee->teedev); 1354 if (rc) 1355 goto err_unreg_supp_teedev; 1356 1357 rc = tee_device_register(optee->supp_teedev); 1358 if (rc) 1359 goto err_unreg_supp_teedev; 1360 1361 mutex_init(&optee->call_queue.mutex); 1362 INIT_LIST_HEAD(&optee->call_queue.waiters); 1363 optee_supp_init(&optee->supp); 1364 optee->smc.memremaped_shm = memremaped_shm; 1365 optee->pool = pool; 1366 1367 platform_set_drvdata(pdev, optee); 1368 ctx = teedev_open(optee->teedev); 1369 if (IS_ERR(ctx)) { 1370 rc = PTR_ERR(ctx); 1371 goto err_supp_uninit; 1372 } 1373 optee->ctx = ctx; 1374 rc = optee_notif_init(optee, max_notif_value); 1375 if (rc) 1376 goto err_close_ctx; 1377 1378 if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) { 1379 unsigned int irq; 1380 1381 rc = platform_get_irq(pdev, 0); 1382 if (rc < 0) { 1383 pr_err("platform_get_irq: ret %d\n", rc); 1384 goto err_notif_uninit; 1385 } 1386 irq = rc; 1387 1388 rc = optee_smc_notif_init_irq(optee, irq); 1389 if (rc) { 1390 irq_dispose_mapping(irq); 1391 goto err_notif_uninit; 1392 } 1393 enable_async_notif(optee->smc.invoke_fn); 1394 pr_info("Asynchronous notifications enabled\n"); 1395 } 1396 1397 /* 1398 * Ensure that there are no pre-existing shm objects before enabling 1399 * the shm cache so that there's no chance of receiving an invalid 1400 * address during shutdown. This could occur, for example, if we're 1401 * kexec booting from an older kernel that did not properly cleanup the 1402 * shm cache. 1403 */ 1404 optee_disable_unmapped_shm_cache(optee); 1405 1406 optee_enable_shm_cache(optee); 1407 1408 if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) 1409 pr_info("dynamic shared memory is enabled\n"); 1410 1411 rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); 1412 if (rc) 1413 goto err_disable_shm_cache; 1414 1415 pr_info("initialized driver\n"); 1416 return 0; 1417 1418 err_disable_shm_cache: 1419 optee_disable_shm_cache(optee); 1420 optee_smc_notif_uninit_irq(optee); 1421 optee_unregister_devices(); 1422 err_notif_uninit: 1423 optee_notif_uninit(optee); 1424 err_close_ctx: 1425 teedev_close_context(ctx); 1426 err_supp_uninit: 1427 optee_supp_uninit(&optee->supp); 1428 mutex_destroy(&optee->call_queue.mutex); 1429 err_unreg_supp_teedev: 1430 tee_device_unregister(optee->supp_teedev); 1431 err_unreg_teedev: 1432 tee_device_unregister(optee->teedev); 1433 err_free_optee: 1434 kfree(optee); 1435 err_free_pool: 1436 tee_shm_pool_free(pool); 1437 if (memremaped_shm) 1438 memunmap(memremaped_shm); 1439 return rc; 1440 } 1441 1442 static const struct of_device_id optee_dt_match[] = { 1443 { .compatible = "linaro,optee-tz" }, 1444 {}, 1445 }; 1446 MODULE_DEVICE_TABLE(of, optee_dt_match); 1447 1448 static struct platform_driver optee_driver = { 1449 .probe = optee_probe, 1450 .remove = optee_smc_remove, 1451 .shutdown = optee_shutdown, 1452 .driver = { 1453 .name = "optee", 1454 .of_match_table = optee_dt_match, 1455 }, 1456 }; 1457 1458 int optee_smc_abi_register(void) 1459 { 1460 return platform_driver_register(&optee_driver); 1461 } 1462 1463 void optee_smc_abi_unregister(void) 1464 { 1465 platform_driver_unregister(&optee_driver); 1466 } 1467