1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by 4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware 5 * Framework for Arm A-profile", which is specified by Arm in document 6 * number DEN0077. 7 * 8 * Copyright (C) 2022 - Google LLC 9 * Author: Andrew Walbran <qwandor@google.com> 10 * 11 * This driver hooks into the SMC trapping logic for the host and intercepts 12 * all calls falling within the FF-A range. Each call is either: 13 * 14 * - Forwarded on unmodified to the SPMD at EL3 15 * - Rejected as "unsupported" 16 * - Accompanied by a host stage-2 page-table check/update and reissued 17 * 18 * Consequently, any attempts by the host to make guest memory pages 19 * accessible to the secure world using FF-A will be detected either here 20 * (in the case that the memory is already owned by the guest) or during 21 * donation to the guest (in the case that the memory was previously shared 22 * with the secure world). 23 * 24 * To allow the rolling-back of page-table updates and FF-A calls in the 25 * event of failure, operations involving the RXTX buffers are locked for 26 * the duration and are therefore serialised. 27 */ 28 29 #include <linux/arm-smccc.h> 30 #include <linux/arm_ffa.h> 31 #include <asm/kvm_pkvm.h> 32 33 #include <nvhe/ffa.h> 34 #include <nvhe/mem_protect.h> 35 #include <nvhe/memory.h> 36 #include <nvhe/trap_handler.h> 37 #include <nvhe/spinlock.h> 38 39 /* 40 * "ID value 0 must be returned at the Non-secure physical FF-A instance" 41 * We share this ID with the host. 42 */ 43 #define HOST_FFA_ID 0 44 45 /* 46 * A buffer to hold the maximum descriptor size we can see from the host, 47 * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP 48 * when resolving the handle on the reclaim path. 49 */ 50 struct kvm_ffa_descriptor_buffer { 51 void *buf; 52 size_t len; 53 }; 54 55 static struct kvm_ffa_descriptor_buffer ffa_desc_buf; 56 57 struct kvm_ffa_buffers { 58 hyp_spinlock_t lock; 59 void *tx; 60 void *rx; 61 }; 62 63 /* 64 * Note that we don't currently lock these buffers explicitly, instead 65 * relying on the locking of the host FFA buffers as we only have one 66 * client. 67 */ 68 static struct kvm_ffa_buffers hyp_buffers; 69 static struct kvm_ffa_buffers host_buffers; 70 71 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) 72 { 73 *res = (struct arm_smccc_res) { 74 .a0 = FFA_ERROR, 75 .a2 = ffa_errno, 76 }; 77 } 78 79 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop) 80 { 81 if (ret == FFA_RET_SUCCESS) { 82 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS, 83 .a2 = prop }; 84 } else { 85 ffa_to_smccc_error(res, ret); 86 } 87 } 88 89 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) 90 { 91 ffa_to_smccc_res_prop(res, ret, 0); 92 } 93 94 static void ffa_set_retval(struct kvm_cpu_context *ctxt, 95 struct arm_smccc_res *res) 96 { 97 cpu_reg(ctxt, 0) = res->a0; 98 cpu_reg(ctxt, 1) = res->a1; 99 cpu_reg(ctxt, 2) = res->a2; 100 cpu_reg(ctxt, 3) = res->a3; 101 } 102 103 static bool is_ffa_call(u64 func_id) 104 { 105 return ARM_SMCCC_IS_FAST_CALL(func_id) && 106 ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD && 107 ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM && 108 ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; 109 } 110 111 static int ffa_map_hyp_buffers(u64 ffa_page_count) 112 { 113 struct arm_smccc_res res; 114 115 arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP, 116 hyp_virt_to_phys(hyp_buffers.tx), 117 hyp_virt_to_phys(hyp_buffers.rx), 118 ffa_page_count, 119 0, 0, 0, 0, 120 &res); 121 122 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 123 } 124 125 static int ffa_unmap_hyp_buffers(void) 126 { 127 struct arm_smccc_res res; 128 129 arm_smccc_1_1_smc(FFA_RXTX_UNMAP, 130 HOST_FFA_ID, 131 0, 0, 0, 0, 0, 0, 132 &res); 133 134 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 135 } 136 137 static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo, 138 u32 handle_hi, u32 fraglen, u32 endpoint_id) 139 { 140 arm_smccc_1_1_smc(FFA_MEM_FRAG_TX, 141 handle_lo, handle_hi, fraglen, endpoint_id, 142 0, 0, 0, 143 res); 144 } 145 146 static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo, 147 u32 handle_hi, u32 fragoff) 148 { 149 arm_smccc_1_1_smc(FFA_MEM_FRAG_RX, 150 handle_lo, handle_hi, fragoff, HOST_FFA_ID, 151 0, 0, 0, 152 res); 153 } 154 155 static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len, 156 u32 fraglen) 157 { 158 arm_smccc_1_1_smc(func_id, len, fraglen, 159 0, 0, 0, 0, 0, 160 res); 161 } 162 163 static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo, 164 u32 handle_hi, u32 flags) 165 { 166 arm_smccc_1_1_smc(FFA_MEM_RECLAIM, 167 handle_lo, handle_hi, flags, 168 0, 0, 0, 0, 169 res); 170 } 171 172 static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len) 173 { 174 arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ, 175 len, len, 176 0, 0, 0, 0, 0, 177 res); 178 } 179 180 static void do_ffa_rxtx_map(struct arm_smccc_res *res, 181 struct kvm_cpu_context *ctxt) 182 { 183 DECLARE_REG(phys_addr_t, tx, ctxt, 1); 184 DECLARE_REG(phys_addr_t, rx, ctxt, 2); 185 DECLARE_REG(u32, npages, ctxt, 3); 186 int ret = 0; 187 void *rx_virt, *tx_virt; 188 189 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) { 190 ret = FFA_RET_INVALID_PARAMETERS; 191 goto out; 192 } 193 194 if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) { 195 ret = FFA_RET_INVALID_PARAMETERS; 196 goto out; 197 } 198 199 hyp_spin_lock(&host_buffers.lock); 200 if (host_buffers.tx) { 201 ret = FFA_RET_DENIED; 202 goto out_unlock; 203 } 204 205 /* 206 * Map our hypervisor buffers into the SPMD before mapping and 207 * pinning the host buffers in our own address space. 208 */ 209 ret = ffa_map_hyp_buffers(npages); 210 if (ret) 211 goto out_unlock; 212 213 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx)); 214 if (ret) { 215 ret = FFA_RET_INVALID_PARAMETERS; 216 goto err_unmap; 217 } 218 219 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx)); 220 if (ret) { 221 ret = FFA_RET_INVALID_PARAMETERS; 222 goto err_unshare_tx; 223 } 224 225 tx_virt = hyp_phys_to_virt(tx); 226 ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1); 227 if (ret) { 228 ret = FFA_RET_INVALID_PARAMETERS; 229 goto err_unshare_rx; 230 } 231 232 rx_virt = hyp_phys_to_virt(rx); 233 ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1); 234 if (ret) { 235 ret = FFA_RET_INVALID_PARAMETERS; 236 goto err_unpin_tx; 237 } 238 239 host_buffers.tx = tx_virt; 240 host_buffers.rx = rx_virt; 241 242 out_unlock: 243 hyp_spin_unlock(&host_buffers.lock); 244 out: 245 ffa_to_smccc_res(res, ret); 246 return; 247 248 err_unpin_tx: 249 hyp_unpin_shared_mem(tx_virt, tx_virt + 1); 250 err_unshare_rx: 251 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx)); 252 err_unshare_tx: 253 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx)); 254 err_unmap: 255 ffa_unmap_hyp_buffers(); 256 goto out_unlock; 257 } 258 259 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res, 260 struct kvm_cpu_context *ctxt) 261 { 262 DECLARE_REG(u32, id, ctxt, 1); 263 int ret = 0; 264 265 if (id != HOST_FFA_ID) { 266 ret = FFA_RET_INVALID_PARAMETERS; 267 goto out; 268 } 269 270 hyp_spin_lock(&host_buffers.lock); 271 if (!host_buffers.tx) { 272 ret = FFA_RET_INVALID_PARAMETERS; 273 goto out_unlock; 274 } 275 276 hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1); 277 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx))); 278 host_buffers.tx = NULL; 279 280 hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1); 281 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx))); 282 host_buffers.rx = NULL; 283 284 ffa_unmap_hyp_buffers(); 285 286 out_unlock: 287 hyp_spin_unlock(&host_buffers.lock); 288 out: 289 ffa_to_smccc_res(res, ret); 290 } 291 292 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, 293 u32 nranges) 294 { 295 u32 i; 296 297 for (i = 0; i < nranges; ++i) { 298 struct ffa_mem_region_addr_range *range = &ranges[i]; 299 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; 300 u64 pfn = hyp_phys_to_pfn(range->address); 301 302 if (!PAGE_ALIGNED(sz)) 303 break; 304 305 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE)) 306 break; 307 } 308 309 return i; 310 } 311 312 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, 313 u32 nranges) 314 { 315 u32 i; 316 317 for (i = 0; i < nranges; ++i) { 318 struct ffa_mem_region_addr_range *range = &ranges[i]; 319 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE; 320 u64 pfn = hyp_phys_to_pfn(range->address); 321 322 if (!PAGE_ALIGNED(sz)) 323 break; 324 325 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE)) 326 break; 327 } 328 329 return i; 330 } 331 332 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges, 333 u32 nranges) 334 { 335 u32 nshared = __ffa_host_share_ranges(ranges, nranges); 336 int ret = 0; 337 338 if (nshared != nranges) { 339 WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared); 340 ret = FFA_RET_DENIED; 341 } 342 343 return ret; 344 } 345 346 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges, 347 u32 nranges) 348 { 349 u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges); 350 int ret = 0; 351 352 if (nunshared != nranges) { 353 WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared); 354 ret = FFA_RET_DENIED; 355 } 356 357 return ret; 358 } 359 360 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res, 361 struct kvm_cpu_context *ctxt) 362 { 363 DECLARE_REG(u32, handle_lo, ctxt, 1); 364 DECLARE_REG(u32, handle_hi, ctxt, 2); 365 DECLARE_REG(u32, fraglen, ctxt, 3); 366 DECLARE_REG(u32, endpoint_id, ctxt, 4); 367 struct ffa_mem_region_addr_range *buf; 368 int ret = FFA_RET_INVALID_PARAMETERS; 369 u32 nr_ranges; 370 371 if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) 372 goto out; 373 374 if (fraglen % sizeof(*buf)) 375 goto out; 376 377 hyp_spin_lock(&host_buffers.lock); 378 if (!host_buffers.tx) 379 goto out_unlock; 380 381 buf = hyp_buffers.tx; 382 memcpy(buf, host_buffers.tx, fraglen); 383 nr_ranges = fraglen / sizeof(*buf); 384 385 ret = ffa_host_share_ranges(buf, nr_ranges); 386 if (ret) { 387 /* 388 * We're effectively aborting the transaction, so we need 389 * to restore the global state back to what it was prior to 390 * transmission of the first fragment. 391 */ 392 ffa_mem_reclaim(res, handle_lo, handle_hi, 0); 393 WARN_ON(res->a0 != FFA_SUCCESS); 394 goto out_unlock; 395 } 396 397 ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id); 398 if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX) 399 WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges)); 400 401 out_unlock: 402 hyp_spin_unlock(&host_buffers.lock); 403 out: 404 if (ret) 405 ffa_to_smccc_res(res, ret); 406 407 /* 408 * If for any reason this did not succeed, we're in trouble as we have 409 * now lost the content of the previous fragments and we can't rollback 410 * the host stage-2 changes. The pages previously marked as shared will 411 * remain stuck in that state forever, hence preventing the host from 412 * sharing/donating them again and may possibly lead to subsequent 413 * failures, but this will not compromise confidentiality. 414 */ 415 return; 416 } 417 418 static void __do_ffa_mem_xfer(const u64 func_id, 419 struct arm_smccc_res *res, 420 struct kvm_cpu_context *ctxt) 421 { 422 DECLARE_REG(u32, len, ctxt, 1); 423 DECLARE_REG(u32, fraglen, ctxt, 2); 424 DECLARE_REG(u64, addr_mbz, ctxt, 3); 425 DECLARE_REG(u32, npages_mbz, ctxt, 4); 426 struct ffa_composite_mem_region *reg; 427 struct ffa_mem_region *buf; 428 u32 offset, nr_ranges; 429 int ret = 0; 430 431 if (addr_mbz || npages_mbz || fraglen > len || 432 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) { 433 ret = FFA_RET_INVALID_PARAMETERS; 434 goto out; 435 } 436 437 if (fraglen < sizeof(struct ffa_mem_region) + 438 sizeof(struct ffa_mem_region_attributes)) { 439 ret = FFA_RET_INVALID_PARAMETERS; 440 goto out; 441 } 442 443 hyp_spin_lock(&host_buffers.lock); 444 if (!host_buffers.tx) { 445 ret = FFA_RET_INVALID_PARAMETERS; 446 goto out_unlock; 447 } 448 449 if (len > ffa_desc_buf.len) { 450 ret = FFA_RET_NO_MEMORY; 451 goto out_unlock; 452 } 453 454 buf = hyp_buffers.tx; 455 memcpy(buf, host_buffers.tx, fraglen); 456 457 offset = buf->ep_mem_access[0].composite_off; 458 if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { 459 ret = FFA_RET_INVALID_PARAMETERS; 460 goto out_unlock; 461 } 462 463 if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) { 464 ret = FFA_RET_INVALID_PARAMETERS; 465 goto out_unlock; 466 } 467 468 reg = (void *)buf + offset; 469 nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents; 470 if (nr_ranges % sizeof(reg->constituents[0])) { 471 ret = FFA_RET_INVALID_PARAMETERS; 472 goto out_unlock; 473 } 474 475 nr_ranges /= sizeof(reg->constituents[0]); 476 ret = ffa_host_share_ranges(reg->constituents, nr_ranges); 477 if (ret) 478 goto out_unlock; 479 480 ffa_mem_xfer(res, func_id, len, fraglen); 481 if (fraglen != len) { 482 if (res->a0 != FFA_MEM_FRAG_RX) 483 goto err_unshare; 484 485 if (res->a3 != fraglen) 486 goto err_unshare; 487 } else if (res->a0 != FFA_SUCCESS) { 488 goto err_unshare; 489 } 490 491 out_unlock: 492 hyp_spin_unlock(&host_buffers.lock); 493 out: 494 if (ret) 495 ffa_to_smccc_res(res, ret); 496 return; 497 498 err_unshare: 499 WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges)); 500 goto out_unlock; 501 } 502 503 #define do_ffa_mem_xfer(fid, res, ctxt) \ 504 do { \ 505 BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \ 506 (fid) != FFA_FN64_MEM_LEND); \ 507 __do_ffa_mem_xfer((fid), (res), (ctxt)); \ 508 } while (0); 509 510 static void do_ffa_mem_reclaim(struct arm_smccc_res *res, 511 struct kvm_cpu_context *ctxt) 512 { 513 DECLARE_REG(u32, handle_lo, ctxt, 1); 514 DECLARE_REG(u32, handle_hi, ctxt, 2); 515 DECLARE_REG(u32, flags, ctxt, 3); 516 struct ffa_composite_mem_region *reg; 517 u32 offset, len, fraglen, fragoff; 518 struct ffa_mem_region *buf; 519 int ret = 0; 520 u64 handle; 521 522 handle = PACK_HANDLE(handle_lo, handle_hi); 523 524 hyp_spin_lock(&host_buffers.lock); 525 526 buf = hyp_buffers.tx; 527 *buf = (struct ffa_mem_region) { 528 .sender_id = HOST_FFA_ID, 529 .handle = handle, 530 }; 531 532 ffa_retrieve_req(res, sizeof(*buf)); 533 buf = hyp_buffers.rx; 534 if (res->a0 != FFA_MEM_RETRIEVE_RESP) 535 goto out_unlock; 536 537 len = res->a1; 538 fraglen = res->a2; 539 540 offset = buf->ep_mem_access[0].composite_off; 541 /* 542 * We can trust the SPMD to get this right, but let's at least 543 * check that we end up with something that doesn't look _completely_ 544 * bogus. 545 */ 546 if (WARN_ON(offset > len || 547 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) { 548 ret = FFA_RET_ABORTED; 549 goto out_unlock; 550 } 551 552 if (len > ffa_desc_buf.len) { 553 ret = FFA_RET_NO_MEMORY; 554 goto out_unlock; 555 } 556 557 buf = ffa_desc_buf.buf; 558 memcpy(buf, hyp_buffers.rx, fraglen); 559 560 for (fragoff = fraglen; fragoff < len; fragoff += fraglen) { 561 ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff); 562 if (res->a0 != FFA_MEM_FRAG_TX) { 563 ret = FFA_RET_INVALID_PARAMETERS; 564 goto out_unlock; 565 } 566 567 fraglen = res->a3; 568 memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen); 569 } 570 571 ffa_mem_reclaim(res, handle_lo, handle_hi, flags); 572 if (res->a0 != FFA_SUCCESS) 573 goto out_unlock; 574 575 reg = (void *)buf + offset; 576 /* If the SPMD was happy, then we should be too. */ 577 WARN_ON(ffa_host_unshare_ranges(reg->constituents, 578 reg->addr_range_cnt)); 579 out_unlock: 580 hyp_spin_unlock(&host_buffers.lock); 581 582 if (ret) 583 ffa_to_smccc_res(res, ret); 584 } 585 586 /* 587 * Is a given FFA function supported, either by forwarding on directly 588 * or by handling at EL2? 589 */ 590 static bool ffa_call_supported(u64 func_id) 591 { 592 switch (func_id) { 593 /* Unsupported memory management calls */ 594 case FFA_FN64_MEM_RETRIEVE_REQ: 595 case FFA_MEM_RETRIEVE_RESP: 596 case FFA_MEM_RELINQUISH: 597 case FFA_MEM_OP_PAUSE: 598 case FFA_MEM_OP_RESUME: 599 case FFA_MEM_FRAG_RX: 600 case FFA_FN64_MEM_DONATE: 601 /* Indirect message passing via RX/TX buffers */ 602 case FFA_MSG_SEND: 603 case FFA_MSG_POLL: 604 case FFA_MSG_WAIT: 605 /* 32-bit variants of 64-bit calls */ 606 case FFA_MSG_SEND_DIRECT_REQ: 607 case FFA_MSG_SEND_DIRECT_RESP: 608 case FFA_RXTX_MAP: 609 case FFA_MEM_DONATE: 610 case FFA_MEM_RETRIEVE_REQ: 611 return false; 612 } 613 614 return true; 615 } 616 617 static bool do_ffa_features(struct arm_smccc_res *res, 618 struct kvm_cpu_context *ctxt) 619 { 620 DECLARE_REG(u32, id, ctxt, 1); 621 u64 prop = 0; 622 int ret = 0; 623 624 if (!ffa_call_supported(id)) { 625 ret = FFA_RET_NOT_SUPPORTED; 626 goto out_handled; 627 } 628 629 switch (id) { 630 case FFA_MEM_SHARE: 631 case FFA_FN64_MEM_SHARE: 632 case FFA_MEM_LEND: 633 case FFA_FN64_MEM_LEND: 634 ret = FFA_RET_SUCCESS; 635 prop = 0; /* No support for dynamic buffers */ 636 goto out_handled; 637 default: 638 return false; 639 } 640 641 out_handled: 642 ffa_to_smccc_res_prop(res, ret, prop); 643 return true; 644 } 645 646 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) 647 { 648 struct arm_smccc_res res; 649 650 /* 651 * There's no way we can tell what a non-standard SMC call might 652 * be up to. Ideally, we would terminate these here and return 653 * an error to the host, but sadly devices make use of custom 654 * firmware calls for things like power management, debugging, 655 * RNG access and crash reporting. 656 * 657 * Given that the architecture requires us to trust EL3 anyway, 658 * we forward unrecognised calls on under the assumption that 659 * the firmware doesn't expose a mechanism to access arbitrary 660 * non-secure memory. Short of a per-device table of SMCs, this 661 * is the best we can do. 662 */ 663 if (!is_ffa_call(func_id)) 664 return false; 665 666 switch (func_id) { 667 case FFA_FEATURES: 668 if (!do_ffa_features(&res, host_ctxt)) 669 return false; 670 goto out_handled; 671 /* Memory management */ 672 case FFA_FN64_RXTX_MAP: 673 do_ffa_rxtx_map(&res, host_ctxt); 674 goto out_handled; 675 case FFA_RXTX_UNMAP: 676 do_ffa_rxtx_unmap(&res, host_ctxt); 677 goto out_handled; 678 case FFA_MEM_SHARE: 679 case FFA_FN64_MEM_SHARE: 680 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt); 681 goto out_handled; 682 case FFA_MEM_RECLAIM: 683 do_ffa_mem_reclaim(&res, host_ctxt); 684 goto out_handled; 685 case FFA_MEM_LEND: 686 case FFA_FN64_MEM_LEND: 687 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt); 688 goto out_handled; 689 case FFA_MEM_FRAG_TX: 690 do_ffa_mem_frag_tx(&res, host_ctxt); 691 goto out_handled; 692 } 693 694 if (ffa_call_supported(func_id)) 695 return false; /* Pass through */ 696 697 ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED); 698 out_handled: 699 ffa_set_retval(host_ctxt, &res); 700 return true; 701 } 702 703 int hyp_ffa_init(void *pages) 704 { 705 struct arm_smccc_res res; 706 size_t min_rxtx_sz; 707 void *tx, *rx; 708 709 if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) 710 return 0; 711 712 arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res); 713 if (res.a0 == FFA_RET_NOT_SUPPORTED) 714 return 0; 715 716 /* 717 * Firmware returns the maximum supported version of the FF-A 718 * implementation. Check that the returned version is 719 * backwards-compatible with the hyp according to the rules in DEN0077A 720 * v1.1 REL0 13.2.1. 721 * 722 * Of course, things are never simple when dealing with firmware. v1.1 723 * broke ABI with v1.0 on several structures, which is itself 724 * incompatible with the aforementioned versioning scheme. The 725 * expectation is that v1.x implementations that do not support the v1.0 726 * ABI return NOT_SUPPORTED rather than a version number, according to 727 * DEN0077A v1.1 REL0 18.6.4. 728 */ 729 if (FFA_MAJOR_VERSION(res.a0) != 1) 730 return -EOPNOTSUPP; 731 732 arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); 733 if (res.a0 != FFA_SUCCESS) 734 return -EOPNOTSUPP; 735 736 if (res.a2 != HOST_FFA_ID) 737 return -EINVAL; 738 739 arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP, 740 0, 0, 0, 0, 0, 0, &res); 741 if (res.a0 != FFA_SUCCESS) 742 return -EOPNOTSUPP; 743 744 switch (res.a2) { 745 case FFA_FEAT_RXTX_MIN_SZ_4K: 746 min_rxtx_sz = SZ_4K; 747 break; 748 case FFA_FEAT_RXTX_MIN_SZ_16K: 749 min_rxtx_sz = SZ_16K; 750 break; 751 case FFA_FEAT_RXTX_MIN_SZ_64K: 752 min_rxtx_sz = SZ_64K; 753 break; 754 default: 755 return -EINVAL; 756 } 757 758 if (min_rxtx_sz > PAGE_SIZE) 759 return -EOPNOTSUPP; 760 761 tx = pages; 762 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 763 rx = pages; 764 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE; 765 766 ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) { 767 .buf = pages, 768 .len = PAGE_SIZE * 769 (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)), 770 }; 771 772 hyp_buffers = (struct kvm_ffa_buffers) { 773 .lock = __HYP_SPIN_LOCK_UNLOCKED, 774 .tx = tx, 775 .rx = rx, 776 }; 777 778 host_buffers = (struct kvm_ffa_buffers) { 779 .lock = __HYP_SPIN_LOCK_UNLOCKED, 780 }; 781 782 return 0; 783 } 784