1048be5feSWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2048be5feSWill Deacon /*
3048be5feSWill Deacon * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4048be5feSWill Deacon * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5048be5feSWill Deacon * Framework for Arm A-profile", which is specified by Arm in document
6048be5feSWill Deacon * number DEN0077.
7048be5feSWill Deacon *
8048be5feSWill Deacon * Copyright (C) 2022 - Google LLC
9048be5feSWill Deacon * Author: Andrew Walbran <qwandor@google.com>
10048be5feSWill Deacon *
11048be5feSWill Deacon * This driver hooks into the SMC trapping logic for the host and intercepts
12048be5feSWill Deacon * all calls falling within the FF-A range. Each call is either:
13048be5feSWill Deacon *
14048be5feSWill Deacon * - Forwarded on unmodified to the SPMD at EL3
15048be5feSWill Deacon * - Rejected as "unsupported"
16048be5feSWill Deacon * - Accompanied by a host stage-2 page-table check/update and reissued
17048be5feSWill Deacon *
18048be5feSWill Deacon * Consequently, any attempts by the host to make guest memory pages
19048be5feSWill Deacon * accessible to the secure world using FF-A will be detected either here
20048be5feSWill Deacon * (in the case that the memory is already owned by the guest) or during
21048be5feSWill Deacon * donation to the guest (in the case that the memory was previously shared
22048be5feSWill Deacon * with the secure world).
23048be5feSWill Deacon *
24048be5feSWill Deacon * To allow the rolling-back of page-table updates and FF-A calls in the
25048be5feSWill Deacon * event of failure, operations involving the RXTX buffers are locked for
26048be5feSWill Deacon * the duration and are therefore serialised.
27048be5feSWill Deacon */
28048be5feSWill Deacon
29048be5feSWill Deacon #include <linux/arm-smccc.h>
30048be5feSWill Deacon #include <linux/arm_ffa.h>
31bc3888a0SWill Deacon #include <asm/kvm_pkvm.h>
32bc3888a0SWill Deacon
33048be5feSWill Deacon #include <nvhe/ffa.h>
349d0c6a9aSWill Deacon #include <nvhe/mem_protect.h>
359d0c6a9aSWill Deacon #include <nvhe/memory.h>
36048be5feSWill Deacon #include <nvhe/trap_handler.h>
37bc3888a0SWill Deacon #include <nvhe/spinlock.h>
38048be5feSWill Deacon
3912bdce4fSWill Deacon /*
4012bdce4fSWill Deacon * "ID value 0 must be returned at the Non-secure physical FF-A instance"
4112bdce4fSWill Deacon * We share this ID with the host.
4212bdce4fSWill Deacon */
4312bdce4fSWill Deacon #define HOST_FFA_ID 0
4412bdce4fSWill Deacon
450a9f15fdSQuentin Perret /*
460a9f15fdSQuentin Perret * A buffer to hold the maximum descriptor size we can see from the host,
470a9f15fdSQuentin Perret * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
480a9f15fdSQuentin Perret * when resolving the handle on the reclaim path.
490a9f15fdSQuentin Perret */
500a9f15fdSQuentin Perret struct kvm_ffa_descriptor_buffer {
510a9f15fdSQuentin Perret void *buf;
520a9f15fdSQuentin Perret size_t len;
530a9f15fdSQuentin Perret };
540a9f15fdSQuentin Perret
550a9f15fdSQuentin Perret static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
560a9f15fdSQuentin Perret
57bc3888a0SWill Deacon struct kvm_ffa_buffers {
58bc3888a0SWill Deacon hyp_spinlock_t lock;
59bc3888a0SWill Deacon void *tx;
60bc3888a0SWill Deacon void *rx;
61bc3888a0SWill Deacon };
62bc3888a0SWill Deacon
63bc3888a0SWill Deacon /*
64bc3888a0SWill Deacon * Note that we don't currently lock these buffers explicitly, instead
65bc3888a0SWill Deacon * relying on the locking of the host FFA buffers as we only have one
66bc3888a0SWill Deacon * client.
67bc3888a0SWill Deacon */
68bc3888a0SWill Deacon static struct kvm_ffa_buffers hyp_buffers;
699d0c6a9aSWill Deacon static struct kvm_ffa_buffers host_buffers;
70bc3888a0SWill Deacon
ffa_to_smccc_error(struct arm_smccc_res * res,u64 ffa_errno)71048be5feSWill Deacon static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
72048be5feSWill Deacon {
73048be5feSWill Deacon *res = (struct arm_smccc_res) {
74048be5feSWill Deacon .a0 = FFA_ERROR,
75048be5feSWill Deacon .a2 = ffa_errno,
76048be5feSWill Deacon };
77048be5feSWill Deacon }
78048be5feSWill Deacon
ffa_to_smccc_res_prop(struct arm_smccc_res * res,int ret,u64 prop)7920936cd1SFuad Tabba static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
809d0c6a9aSWill Deacon {
819d0c6a9aSWill Deacon if (ret == FFA_RET_SUCCESS) {
8220936cd1SFuad Tabba *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
8320936cd1SFuad Tabba .a2 = prop };
849d0c6a9aSWill Deacon } else {
859d0c6a9aSWill Deacon ffa_to_smccc_error(res, ret);
869d0c6a9aSWill Deacon }
879d0c6a9aSWill Deacon }
889d0c6a9aSWill Deacon
ffa_to_smccc_res(struct arm_smccc_res * res,int ret)8920936cd1SFuad Tabba static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
9020936cd1SFuad Tabba {
9120936cd1SFuad Tabba ffa_to_smccc_res_prop(res, ret, 0);
9220936cd1SFuad Tabba }
9320936cd1SFuad Tabba
ffa_set_retval(struct kvm_cpu_context * ctxt,struct arm_smccc_res * res)94048be5feSWill Deacon static void ffa_set_retval(struct kvm_cpu_context *ctxt,
95048be5feSWill Deacon struct arm_smccc_res *res)
96048be5feSWill Deacon {
97048be5feSWill Deacon cpu_reg(ctxt, 0) = res->a0;
98048be5feSWill Deacon cpu_reg(ctxt, 1) = res->a1;
99048be5feSWill Deacon cpu_reg(ctxt, 2) = res->a2;
100048be5feSWill Deacon cpu_reg(ctxt, 3) = res->a3;
101048be5feSWill Deacon }
102048be5feSWill Deacon
is_ffa_call(u64 func_id)103048be5feSWill Deacon static bool is_ffa_call(u64 func_id)
104048be5feSWill Deacon {
105048be5feSWill Deacon return ARM_SMCCC_IS_FAST_CALL(func_id) &&
106048be5feSWill Deacon ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
107048be5feSWill Deacon ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
108048be5feSWill Deacon ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
109048be5feSWill Deacon }
110048be5feSWill Deacon
ffa_map_hyp_buffers(u64 ffa_page_count)1119d0c6a9aSWill Deacon static int ffa_map_hyp_buffers(u64 ffa_page_count)
1129d0c6a9aSWill Deacon {
1139d0c6a9aSWill Deacon struct arm_smccc_res res;
1149d0c6a9aSWill Deacon
1159d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
1169d0c6a9aSWill Deacon hyp_virt_to_phys(hyp_buffers.tx),
1179d0c6a9aSWill Deacon hyp_virt_to_phys(hyp_buffers.rx),
1189d0c6a9aSWill Deacon ffa_page_count,
1199d0c6a9aSWill Deacon 0, 0, 0, 0,
1209d0c6a9aSWill Deacon &res);
1219d0c6a9aSWill Deacon
1229d0c6a9aSWill Deacon return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1239d0c6a9aSWill Deacon }
1249d0c6a9aSWill Deacon
ffa_unmap_hyp_buffers(void)1259d0c6a9aSWill Deacon static int ffa_unmap_hyp_buffers(void)
1269d0c6a9aSWill Deacon {
1279d0c6a9aSWill Deacon struct arm_smccc_res res;
1289d0c6a9aSWill Deacon
1299d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
1309d0c6a9aSWill Deacon HOST_FFA_ID,
1319d0c6a9aSWill Deacon 0, 0, 0, 0, 0, 0,
1329d0c6a9aSWill Deacon &res);
1339d0c6a9aSWill Deacon
1349d0c6a9aSWill Deacon return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1359d0c6a9aSWill Deacon }
1369d0c6a9aSWill Deacon
ffa_mem_frag_tx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fraglen,u32 endpoint_id)1370a9f15fdSQuentin Perret static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
1380a9f15fdSQuentin Perret u32 handle_hi, u32 fraglen, u32 endpoint_id)
1390a9f15fdSQuentin Perret {
1400a9f15fdSQuentin Perret arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
1410a9f15fdSQuentin Perret handle_lo, handle_hi, fraglen, endpoint_id,
1420a9f15fdSQuentin Perret 0, 0, 0,
1430a9f15fdSQuentin Perret res);
1440a9f15fdSQuentin Perret }
1450a9f15fdSQuentin Perret
ffa_mem_frag_rx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fragoff)1460a9f15fdSQuentin Perret static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
1470a9f15fdSQuentin Perret u32 handle_hi, u32 fragoff)
1480a9f15fdSQuentin Perret {
1490a9f15fdSQuentin Perret arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
1500a9f15fdSQuentin Perret handle_lo, handle_hi, fragoff, HOST_FFA_ID,
1510a9f15fdSQuentin Perret 0, 0, 0,
1520a9f15fdSQuentin Perret res);
1530a9f15fdSQuentin Perret }
1540a9f15fdSQuentin Perret
ffa_mem_xfer(struct arm_smccc_res * res,u64 func_id,u32 len,u32 fraglen)155634d90cfSWill Deacon static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
156634d90cfSWill Deacon u32 fraglen)
15743609000SWill Deacon {
158634d90cfSWill Deacon arm_smccc_1_1_smc(func_id, len, fraglen,
15943609000SWill Deacon 0, 0, 0, 0, 0,
16043609000SWill Deacon res);
16143609000SWill Deacon }
16243609000SWill Deacon
ffa_mem_reclaim(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 flags)1630e3bcb49SWill Deacon static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
1640e3bcb49SWill Deacon u32 handle_hi, u32 flags)
1650e3bcb49SWill Deacon {
1660e3bcb49SWill Deacon arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
1670e3bcb49SWill Deacon handle_lo, handle_hi, flags,
1680e3bcb49SWill Deacon 0, 0, 0, 0,
1690e3bcb49SWill Deacon res);
1700e3bcb49SWill Deacon }
1710e3bcb49SWill Deacon
ffa_retrieve_req(struct arm_smccc_res * res,u32 len)1720e3bcb49SWill Deacon static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
1730e3bcb49SWill Deacon {
1740e3bcb49SWill Deacon arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
1750e3bcb49SWill Deacon len, len,
1760e3bcb49SWill Deacon 0, 0, 0, 0, 0,
1770e3bcb49SWill Deacon res);
1780e3bcb49SWill Deacon }
1790e3bcb49SWill Deacon
do_ffa_rxtx_map(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)1809d0c6a9aSWill Deacon static void do_ffa_rxtx_map(struct arm_smccc_res *res,
1819d0c6a9aSWill Deacon struct kvm_cpu_context *ctxt)
1829d0c6a9aSWill Deacon {
1839d0c6a9aSWill Deacon DECLARE_REG(phys_addr_t, tx, ctxt, 1);
1849d0c6a9aSWill Deacon DECLARE_REG(phys_addr_t, rx, ctxt, 2);
1859d0c6a9aSWill Deacon DECLARE_REG(u32, npages, ctxt, 3);
1869d0c6a9aSWill Deacon int ret = 0;
1879d0c6a9aSWill Deacon void *rx_virt, *tx_virt;
1889d0c6a9aSWill Deacon
1899d0c6a9aSWill Deacon if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
1909d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
1919d0c6a9aSWill Deacon goto out;
1929d0c6a9aSWill Deacon }
1939d0c6a9aSWill Deacon
1949d0c6a9aSWill Deacon if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
1959d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
1969d0c6a9aSWill Deacon goto out;
1979d0c6a9aSWill Deacon }
1989d0c6a9aSWill Deacon
1999d0c6a9aSWill Deacon hyp_spin_lock(&host_buffers.lock);
2009d0c6a9aSWill Deacon if (host_buffers.tx) {
2019d0c6a9aSWill Deacon ret = FFA_RET_DENIED;
2029d0c6a9aSWill Deacon goto out_unlock;
2039d0c6a9aSWill Deacon }
2049d0c6a9aSWill Deacon
2059d0c6a9aSWill Deacon /*
2069d0c6a9aSWill Deacon * Map our hypervisor buffers into the SPMD before mapping and
2079d0c6a9aSWill Deacon * pinning the host buffers in our own address space.
2089d0c6a9aSWill Deacon */
2099d0c6a9aSWill Deacon ret = ffa_map_hyp_buffers(npages);
2109d0c6a9aSWill Deacon if (ret)
2119d0c6a9aSWill Deacon goto out_unlock;
2129d0c6a9aSWill Deacon
2139d0c6a9aSWill Deacon ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
2149d0c6a9aSWill Deacon if (ret) {
2159d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2169d0c6a9aSWill Deacon goto err_unmap;
2179d0c6a9aSWill Deacon }
2189d0c6a9aSWill Deacon
2199d0c6a9aSWill Deacon ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
2209d0c6a9aSWill Deacon if (ret) {
2219d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2229d0c6a9aSWill Deacon goto err_unshare_tx;
2239d0c6a9aSWill Deacon }
2249d0c6a9aSWill Deacon
2259d0c6a9aSWill Deacon tx_virt = hyp_phys_to_virt(tx);
2269d0c6a9aSWill Deacon ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
2279d0c6a9aSWill Deacon if (ret) {
2289d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2299d0c6a9aSWill Deacon goto err_unshare_rx;
2309d0c6a9aSWill Deacon }
2319d0c6a9aSWill Deacon
2329d0c6a9aSWill Deacon rx_virt = hyp_phys_to_virt(rx);
2339d0c6a9aSWill Deacon ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
2349d0c6a9aSWill Deacon if (ret) {
2359d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2369d0c6a9aSWill Deacon goto err_unpin_tx;
2379d0c6a9aSWill Deacon }
2389d0c6a9aSWill Deacon
2399d0c6a9aSWill Deacon host_buffers.tx = tx_virt;
2409d0c6a9aSWill Deacon host_buffers.rx = rx_virt;
2419d0c6a9aSWill Deacon
2429d0c6a9aSWill Deacon out_unlock:
2439d0c6a9aSWill Deacon hyp_spin_unlock(&host_buffers.lock);
2449d0c6a9aSWill Deacon out:
2459d0c6a9aSWill Deacon ffa_to_smccc_res(res, ret);
2469d0c6a9aSWill Deacon return;
2479d0c6a9aSWill Deacon
2489d0c6a9aSWill Deacon err_unpin_tx:
2499d0c6a9aSWill Deacon hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
2509d0c6a9aSWill Deacon err_unshare_rx:
2519d0c6a9aSWill Deacon __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
2529d0c6a9aSWill Deacon err_unshare_tx:
2539d0c6a9aSWill Deacon __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
2549d0c6a9aSWill Deacon err_unmap:
2559d0c6a9aSWill Deacon ffa_unmap_hyp_buffers();
2569d0c6a9aSWill Deacon goto out_unlock;
2579d0c6a9aSWill Deacon }
2589d0c6a9aSWill Deacon
do_ffa_rxtx_unmap(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)2599d0c6a9aSWill Deacon static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
2609d0c6a9aSWill Deacon struct kvm_cpu_context *ctxt)
2619d0c6a9aSWill Deacon {
2629d0c6a9aSWill Deacon DECLARE_REG(u32, id, ctxt, 1);
2639d0c6a9aSWill Deacon int ret = 0;
2649d0c6a9aSWill Deacon
2659d0c6a9aSWill Deacon if (id != HOST_FFA_ID) {
2669d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2679d0c6a9aSWill Deacon goto out;
2689d0c6a9aSWill Deacon }
2699d0c6a9aSWill Deacon
2709d0c6a9aSWill Deacon hyp_spin_lock(&host_buffers.lock);
2719d0c6a9aSWill Deacon if (!host_buffers.tx) {
2729d0c6a9aSWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
2739d0c6a9aSWill Deacon goto out_unlock;
2749d0c6a9aSWill Deacon }
2759d0c6a9aSWill Deacon
2769d0c6a9aSWill Deacon hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
2779d0c6a9aSWill Deacon WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
2789d0c6a9aSWill Deacon host_buffers.tx = NULL;
2799d0c6a9aSWill Deacon
2809d0c6a9aSWill Deacon hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
2819d0c6a9aSWill Deacon WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
2829d0c6a9aSWill Deacon host_buffers.rx = NULL;
2839d0c6a9aSWill Deacon
2849d0c6a9aSWill Deacon ffa_unmap_hyp_buffers();
2859d0c6a9aSWill Deacon
2869d0c6a9aSWill Deacon out_unlock:
2879d0c6a9aSWill Deacon hyp_spin_unlock(&host_buffers.lock);
2889d0c6a9aSWill Deacon out:
2899d0c6a9aSWill Deacon ffa_to_smccc_res(res, ret);
2909d0c6a9aSWill Deacon }
2919d0c6a9aSWill Deacon
__ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)29243609000SWill Deacon static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
29343609000SWill Deacon u32 nranges)
29443609000SWill Deacon {
29543609000SWill Deacon u32 i;
29643609000SWill Deacon
29743609000SWill Deacon for (i = 0; i < nranges; ++i) {
29843609000SWill Deacon struct ffa_mem_region_addr_range *range = &ranges[i];
29943609000SWill Deacon u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
30043609000SWill Deacon u64 pfn = hyp_phys_to_pfn(range->address);
30143609000SWill Deacon
30243609000SWill Deacon if (!PAGE_ALIGNED(sz))
30343609000SWill Deacon break;
30443609000SWill Deacon
30543609000SWill Deacon if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
30643609000SWill Deacon break;
30743609000SWill Deacon }
30843609000SWill Deacon
30943609000SWill Deacon return i;
31043609000SWill Deacon }
31143609000SWill Deacon
__ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)31243609000SWill Deacon static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
31343609000SWill Deacon u32 nranges)
31443609000SWill Deacon {
31543609000SWill Deacon u32 i;
31643609000SWill Deacon
31743609000SWill Deacon for (i = 0; i < nranges; ++i) {
31843609000SWill Deacon struct ffa_mem_region_addr_range *range = &ranges[i];
31943609000SWill Deacon u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
32043609000SWill Deacon u64 pfn = hyp_phys_to_pfn(range->address);
32143609000SWill Deacon
32243609000SWill Deacon if (!PAGE_ALIGNED(sz))
32343609000SWill Deacon break;
32443609000SWill Deacon
32543609000SWill Deacon if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
32643609000SWill Deacon break;
32743609000SWill Deacon }
32843609000SWill Deacon
32943609000SWill Deacon return i;
33043609000SWill Deacon }
33143609000SWill Deacon
ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)33243609000SWill Deacon static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
33343609000SWill Deacon u32 nranges)
33443609000SWill Deacon {
33543609000SWill Deacon u32 nshared = __ffa_host_share_ranges(ranges, nranges);
33643609000SWill Deacon int ret = 0;
33743609000SWill Deacon
33843609000SWill Deacon if (nshared != nranges) {
33943609000SWill Deacon WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
34043609000SWill Deacon ret = FFA_RET_DENIED;
34143609000SWill Deacon }
34243609000SWill Deacon
34343609000SWill Deacon return ret;
34443609000SWill Deacon }
34543609000SWill Deacon
ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)34643609000SWill Deacon static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
34743609000SWill Deacon u32 nranges)
34843609000SWill Deacon {
34943609000SWill Deacon u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
35043609000SWill Deacon int ret = 0;
35143609000SWill Deacon
35243609000SWill Deacon if (nunshared != nranges) {
35343609000SWill Deacon WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
35443609000SWill Deacon ret = FFA_RET_DENIED;
35543609000SWill Deacon }
35643609000SWill Deacon
35743609000SWill Deacon return ret;
35843609000SWill Deacon }
35943609000SWill Deacon
do_ffa_mem_frag_tx(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)3600a9f15fdSQuentin Perret static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
3610a9f15fdSQuentin Perret struct kvm_cpu_context *ctxt)
3620a9f15fdSQuentin Perret {
3630a9f15fdSQuentin Perret DECLARE_REG(u32, handle_lo, ctxt, 1);
3640a9f15fdSQuentin Perret DECLARE_REG(u32, handle_hi, ctxt, 2);
3650a9f15fdSQuentin Perret DECLARE_REG(u32, fraglen, ctxt, 3);
3660a9f15fdSQuentin Perret DECLARE_REG(u32, endpoint_id, ctxt, 4);
3670a9f15fdSQuentin Perret struct ffa_mem_region_addr_range *buf;
3680a9f15fdSQuentin Perret int ret = FFA_RET_INVALID_PARAMETERS;
3690a9f15fdSQuentin Perret u32 nr_ranges;
3700a9f15fdSQuentin Perret
3710a9f15fdSQuentin Perret if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
3720a9f15fdSQuentin Perret goto out;
3730a9f15fdSQuentin Perret
3740a9f15fdSQuentin Perret if (fraglen % sizeof(*buf))
3750a9f15fdSQuentin Perret goto out;
3760a9f15fdSQuentin Perret
3770a9f15fdSQuentin Perret hyp_spin_lock(&host_buffers.lock);
3780a9f15fdSQuentin Perret if (!host_buffers.tx)
3790a9f15fdSQuentin Perret goto out_unlock;
3800a9f15fdSQuentin Perret
3810a9f15fdSQuentin Perret buf = hyp_buffers.tx;
3820a9f15fdSQuentin Perret memcpy(buf, host_buffers.tx, fraglen);
3830a9f15fdSQuentin Perret nr_ranges = fraglen / sizeof(*buf);
3840a9f15fdSQuentin Perret
3850a9f15fdSQuentin Perret ret = ffa_host_share_ranges(buf, nr_ranges);
3860a9f15fdSQuentin Perret if (ret) {
3870a9f15fdSQuentin Perret /*
3880a9f15fdSQuentin Perret * We're effectively aborting the transaction, so we need
3890a9f15fdSQuentin Perret * to restore the global state back to what it was prior to
3900a9f15fdSQuentin Perret * transmission of the first fragment.
3910a9f15fdSQuentin Perret */
3920a9f15fdSQuentin Perret ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
3930a9f15fdSQuentin Perret WARN_ON(res->a0 != FFA_SUCCESS);
3940a9f15fdSQuentin Perret goto out_unlock;
3950a9f15fdSQuentin Perret }
3960a9f15fdSQuentin Perret
3970a9f15fdSQuentin Perret ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
3980a9f15fdSQuentin Perret if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
3990a9f15fdSQuentin Perret WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
4000a9f15fdSQuentin Perret
4010a9f15fdSQuentin Perret out_unlock:
4020a9f15fdSQuentin Perret hyp_spin_unlock(&host_buffers.lock);
4030a9f15fdSQuentin Perret out:
4040a9f15fdSQuentin Perret if (ret)
4050a9f15fdSQuentin Perret ffa_to_smccc_res(res, ret);
4060a9f15fdSQuentin Perret
4070a9f15fdSQuentin Perret /*
4080a9f15fdSQuentin Perret * If for any reason this did not succeed, we're in trouble as we have
4090a9f15fdSQuentin Perret * now lost the content of the previous fragments and we can't rollback
4100a9f15fdSQuentin Perret * the host stage-2 changes. The pages previously marked as shared will
4110a9f15fdSQuentin Perret * remain stuck in that state forever, hence preventing the host from
4120a9f15fdSQuentin Perret * sharing/donating them again and may possibly lead to subsequent
4130a9f15fdSQuentin Perret * failures, but this will not compromise confidentiality.
4140a9f15fdSQuentin Perret */
4150a9f15fdSQuentin Perret return;
4160a9f15fdSQuentin Perret }
4170a9f15fdSQuentin Perret
__do_ffa_mem_xfer(const u64 func_id,struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)418*d5d6489bSSnehal Koukuntla static void __do_ffa_mem_xfer(const u64 func_id,
419634d90cfSWill Deacon struct arm_smccc_res *res,
42043609000SWill Deacon struct kvm_cpu_context *ctxt)
42143609000SWill Deacon {
42243609000SWill Deacon DECLARE_REG(u32, len, ctxt, 1);
42343609000SWill Deacon DECLARE_REG(u32, fraglen, ctxt, 2);
42443609000SWill Deacon DECLARE_REG(u64, addr_mbz, ctxt, 3);
42543609000SWill Deacon DECLARE_REG(u32, npages_mbz, ctxt, 4);
42643609000SWill Deacon struct ffa_composite_mem_region *reg;
42743609000SWill Deacon struct ffa_mem_region *buf;
4280a9f15fdSQuentin Perret u32 offset, nr_ranges;
42943609000SWill Deacon int ret = 0;
43043609000SWill Deacon
43143609000SWill Deacon if (addr_mbz || npages_mbz || fraglen > len ||
43243609000SWill Deacon fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
43343609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
43443609000SWill Deacon goto out;
43543609000SWill Deacon }
43643609000SWill Deacon
43743609000SWill Deacon if (fraglen < sizeof(struct ffa_mem_region) +
43843609000SWill Deacon sizeof(struct ffa_mem_region_attributes)) {
43943609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
44043609000SWill Deacon goto out;
44143609000SWill Deacon }
44243609000SWill Deacon
44343609000SWill Deacon hyp_spin_lock(&host_buffers.lock);
44443609000SWill Deacon if (!host_buffers.tx) {
44543609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
44643609000SWill Deacon goto out_unlock;
44743609000SWill Deacon }
44843609000SWill Deacon
449*d5d6489bSSnehal Koukuntla if (len > ffa_desc_buf.len) {
450*d5d6489bSSnehal Koukuntla ret = FFA_RET_NO_MEMORY;
451*d5d6489bSSnehal Koukuntla goto out_unlock;
452*d5d6489bSSnehal Koukuntla }
453*d5d6489bSSnehal Koukuntla
45443609000SWill Deacon buf = hyp_buffers.tx;
45543609000SWill Deacon memcpy(buf, host_buffers.tx, fraglen);
45643609000SWill Deacon
45743609000SWill Deacon offset = buf->ep_mem_access[0].composite_off;
45843609000SWill Deacon if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
45943609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
46043609000SWill Deacon goto out_unlock;
46143609000SWill Deacon }
46243609000SWill Deacon
46343609000SWill Deacon if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
46443609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
46543609000SWill Deacon goto out_unlock;
46643609000SWill Deacon }
46743609000SWill Deacon
46843609000SWill Deacon reg = (void *)buf + offset;
4690a9f15fdSQuentin Perret nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
4700a9f15fdSQuentin Perret if (nr_ranges % sizeof(reg->constituents[0])) {
47143609000SWill Deacon ret = FFA_RET_INVALID_PARAMETERS;
47243609000SWill Deacon goto out_unlock;
47343609000SWill Deacon }
47443609000SWill Deacon
4750a9f15fdSQuentin Perret nr_ranges /= sizeof(reg->constituents[0]);
4760a9f15fdSQuentin Perret ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
47743609000SWill Deacon if (ret)
47843609000SWill Deacon goto out_unlock;
47943609000SWill Deacon
480634d90cfSWill Deacon ffa_mem_xfer(res, func_id, len, fraglen);
4810a9f15fdSQuentin Perret if (fraglen != len) {
4820a9f15fdSQuentin Perret if (res->a0 != FFA_MEM_FRAG_RX)
4830a9f15fdSQuentin Perret goto err_unshare;
4840a9f15fdSQuentin Perret
4850a9f15fdSQuentin Perret if (res->a3 != fraglen)
4860a9f15fdSQuentin Perret goto err_unshare;
4870a9f15fdSQuentin Perret } else if (res->a0 != FFA_SUCCESS) {
4880a9f15fdSQuentin Perret goto err_unshare;
48943609000SWill Deacon }
49043609000SWill Deacon
49143609000SWill Deacon out_unlock:
49243609000SWill Deacon hyp_spin_unlock(&host_buffers.lock);
49343609000SWill Deacon out:
49443609000SWill Deacon if (ret)
49543609000SWill Deacon ffa_to_smccc_res(res, ret);
49643609000SWill Deacon return;
4970a9f15fdSQuentin Perret
4980a9f15fdSQuentin Perret err_unshare:
4990a9f15fdSQuentin Perret WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
5000a9f15fdSQuentin Perret goto out_unlock;
50143609000SWill Deacon }
50243609000SWill Deacon
503*d5d6489bSSnehal Koukuntla #define do_ffa_mem_xfer(fid, res, ctxt) \
504*d5d6489bSSnehal Koukuntla do { \
505*d5d6489bSSnehal Koukuntla BUILD_BUG_ON((fid) != FFA_FN64_MEM_SHARE && \
506*d5d6489bSSnehal Koukuntla (fid) != FFA_FN64_MEM_LEND); \
507*d5d6489bSSnehal Koukuntla __do_ffa_mem_xfer((fid), (res), (ctxt)); \
508*d5d6489bSSnehal Koukuntla } while (0);
509*d5d6489bSSnehal Koukuntla
do_ffa_mem_reclaim(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)5100e3bcb49SWill Deacon static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
5110e3bcb49SWill Deacon struct kvm_cpu_context *ctxt)
5120e3bcb49SWill Deacon {
5130e3bcb49SWill Deacon DECLARE_REG(u32, handle_lo, ctxt, 1);
5140e3bcb49SWill Deacon DECLARE_REG(u32, handle_hi, ctxt, 2);
5150e3bcb49SWill Deacon DECLARE_REG(u32, flags, ctxt, 3);
5160e3bcb49SWill Deacon struct ffa_composite_mem_region *reg;
5170a9f15fdSQuentin Perret u32 offset, len, fraglen, fragoff;
5180e3bcb49SWill Deacon struct ffa_mem_region *buf;
5190e3bcb49SWill Deacon int ret = 0;
5200e3bcb49SWill Deacon u64 handle;
5210e3bcb49SWill Deacon
5220e3bcb49SWill Deacon handle = PACK_HANDLE(handle_lo, handle_hi);
5230e3bcb49SWill Deacon
5240e3bcb49SWill Deacon hyp_spin_lock(&host_buffers.lock);
5250e3bcb49SWill Deacon
5260e3bcb49SWill Deacon buf = hyp_buffers.tx;
5270e3bcb49SWill Deacon *buf = (struct ffa_mem_region) {
5280e3bcb49SWill Deacon .sender_id = HOST_FFA_ID,
5290e3bcb49SWill Deacon .handle = handle,
5300e3bcb49SWill Deacon };
5310e3bcb49SWill Deacon
5320e3bcb49SWill Deacon ffa_retrieve_req(res, sizeof(*buf));
5330e3bcb49SWill Deacon buf = hyp_buffers.rx;
5340e3bcb49SWill Deacon if (res->a0 != FFA_MEM_RETRIEVE_RESP)
5350e3bcb49SWill Deacon goto out_unlock;
5360e3bcb49SWill Deacon
5370a9f15fdSQuentin Perret len = res->a1;
5380a9f15fdSQuentin Perret fraglen = res->a2;
5390e3bcb49SWill Deacon
5400e3bcb49SWill Deacon offset = buf->ep_mem_access[0].composite_off;
5410e3bcb49SWill Deacon /*
5420e3bcb49SWill Deacon * We can trust the SPMD to get this right, but let's at least
5430e3bcb49SWill Deacon * check that we end up with something that doesn't look _completely_
5440e3bcb49SWill Deacon * bogus.
5450e3bcb49SWill Deacon */
5460a9f15fdSQuentin Perret if (WARN_ON(offset > len ||
5470a9f15fdSQuentin Perret fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
5480e3bcb49SWill Deacon ret = FFA_RET_ABORTED;
5490e3bcb49SWill Deacon goto out_unlock;
5500e3bcb49SWill Deacon }
5510e3bcb49SWill Deacon
5520a9f15fdSQuentin Perret if (len > ffa_desc_buf.len) {
5530a9f15fdSQuentin Perret ret = FFA_RET_NO_MEMORY;
5540a9f15fdSQuentin Perret goto out_unlock;
5550a9f15fdSQuentin Perret }
5560a9f15fdSQuentin Perret
5570a9f15fdSQuentin Perret buf = ffa_desc_buf.buf;
5580a9f15fdSQuentin Perret memcpy(buf, hyp_buffers.rx, fraglen);
5590a9f15fdSQuentin Perret
5600a9f15fdSQuentin Perret for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
5610a9f15fdSQuentin Perret ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
5620a9f15fdSQuentin Perret if (res->a0 != FFA_MEM_FRAG_TX) {
5630a9f15fdSQuentin Perret ret = FFA_RET_INVALID_PARAMETERS;
5640a9f15fdSQuentin Perret goto out_unlock;
5650a9f15fdSQuentin Perret }
5660a9f15fdSQuentin Perret
5670a9f15fdSQuentin Perret fraglen = res->a3;
5680a9f15fdSQuentin Perret memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
5690a9f15fdSQuentin Perret }
5700a9f15fdSQuentin Perret
5710e3bcb49SWill Deacon ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
5720e3bcb49SWill Deacon if (res->a0 != FFA_SUCCESS)
5730e3bcb49SWill Deacon goto out_unlock;
5740e3bcb49SWill Deacon
5750a9f15fdSQuentin Perret reg = (void *)buf + offset;
5760e3bcb49SWill Deacon /* If the SPMD was happy, then we should be too. */
5770e3bcb49SWill Deacon WARN_ON(ffa_host_unshare_ranges(reg->constituents,
5780e3bcb49SWill Deacon reg->addr_range_cnt));
5790e3bcb49SWill Deacon out_unlock:
5800e3bcb49SWill Deacon hyp_spin_unlock(&host_buffers.lock);
5810e3bcb49SWill Deacon
5820e3bcb49SWill Deacon if (ret)
5830e3bcb49SWill Deacon ffa_to_smccc_res(res, ret);
5840e3bcb49SWill Deacon }
5850e3bcb49SWill Deacon
586048be5feSWill Deacon /*
587048be5feSWill Deacon * Is a given FFA function supported, either by forwarding on directly
588048be5feSWill Deacon * or by handling at EL2?
589048be5feSWill Deacon */
ffa_call_supported(u64 func_id)590048be5feSWill Deacon static bool ffa_call_supported(u64 func_id)
591048be5feSWill Deacon {
592048be5feSWill Deacon switch (func_id) {
593048be5feSWill Deacon /* Unsupported memory management calls */
594048be5feSWill Deacon case FFA_FN64_MEM_RETRIEVE_REQ:
595048be5feSWill Deacon case FFA_MEM_RETRIEVE_RESP:
596048be5feSWill Deacon case FFA_MEM_RELINQUISH:
597048be5feSWill Deacon case FFA_MEM_OP_PAUSE:
598048be5feSWill Deacon case FFA_MEM_OP_RESUME:
599048be5feSWill Deacon case FFA_MEM_FRAG_RX:
600048be5feSWill Deacon case FFA_FN64_MEM_DONATE:
601048be5feSWill Deacon /* Indirect message passing via RX/TX buffers */
602048be5feSWill Deacon case FFA_MSG_SEND:
603048be5feSWill Deacon case FFA_MSG_POLL:
604048be5feSWill Deacon case FFA_MSG_WAIT:
605048be5feSWill Deacon /* 32-bit variants of 64-bit calls */
606048be5feSWill Deacon case FFA_MSG_SEND_DIRECT_REQ:
607048be5feSWill Deacon case FFA_MSG_SEND_DIRECT_RESP:
608048be5feSWill Deacon case FFA_RXTX_MAP:
609048be5feSWill Deacon case FFA_MEM_DONATE:
610048be5feSWill Deacon case FFA_MEM_RETRIEVE_REQ:
611048be5feSWill Deacon return false;
612048be5feSWill Deacon }
613048be5feSWill Deacon
614048be5feSWill Deacon return true;
615048be5feSWill Deacon }
616048be5feSWill Deacon
do_ffa_features(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)61720936cd1SFuad Tabba static bool do_ffa_features(struct arm_smccc_res *res,
61820936cd1SFuad Tabba struct kvm_cpu_context *ctxt)
61920936cd1SFuad Tabba {
62020936cd1SFuad Tabba DECLARE_REG(u32, id, ctxt, 1);
62120936cd1SFuad Tabba u64 prop = 0;
62220936cd1SFuad Tabba int ret = 0;
62320936cd1SFuad Tabba
62420936cd1SFuad Tabba if (!ffa_call_supported(id)) {
62520936cd1SFuad Tabba ret = FFA_RET_NOT_SUPPORTED;
62620936cd1SFuad Tabba goto out_handled;
62720936cd1SFuad Tabba }
62820936cd1SFuad Tabba
62920936cd1SFuad Tabba switch (id) {
63020936cd1SFuad Tabba case FFA_MEM_SHARE:
63120936cd1SFuad Tabba case FFA_FN64_MEM_SHARE:
63220936cd1SFuad Tabba case FFA_MEM_LEND:
63320936cd1SFuad Tabba case FFA_FN64_MEM_LEND:
63420936cd1SFuad Tabba ret = FFA_RET_SUCCESS;
63520936cd1SFuad Tabba prop = 0; /* No support for dynamic buffers */
63620936cd1SFuad Tabba goto out_handled;
63720936cd1SFuad Tabba default:
63820936cd1SFuad Tabba return false;
63920936cd1SFuad Tabba }
64020936cd1SFuad Tabba
64120936cd1SFuad Tabba out_handled:
64220936cd1SFuad Tabba ffa_to_smccc_res_prop(res, ret, prop);
64320936cd1SFuad Tabba return true;
64420936cd1SFuad Tabba }
64520936cd1SFuad Tabba
kvm_host_ffa_handler(struct kvm_cpu_context * host_ctxt,u32 func_id)646373beef0SJean-Philippe Brucker bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
647048be5feSWill Deacon {
648048be5feSWill Deacon struct arm_smccc_res res;
649048be5feSWill Deacon
650048be5feSWill Deacon /*
651048be5feSWill Deacon * There's no way we can tell what a non-standard SMC call might
652048be5feSWill Deacon * be up to. Ideally, we would terminate these here and return
653048be5feSWill Deacon * an error to the host, but sadly devices make use of custom
654048be5feSWill Deacon * firmware calls for things like power management, debugging,
655048be5feSWill Deacon * RNG access and crash reporting.
656048be5feSWill Deacon *
657048be5feSWill Deacon * Given that the architecture requires us to trust EL3 anyway,
658048be5feSWill Deacon * we forward unrecognised calls on under the assumption that
659048be5feSWill Deacon * the firmware doesn't expose a mechanism to access arbitrary
660048be5feSWill Deacon * non-secure memory. Short of a per-device table of SMCs, this
661048be5feSWill Deacon * is the best we can do.
662048be5feSWill Deacon */
663048be5feSWill Deacon if (!is_ffa_call(func_id))
664048be5feSWill Deacon return false;
665048be5feSWill Deacon
6669d0c6a9aSWill Deacon switch (func_id) {
66720936cd1SFuad Tabba case FFA_FEATURES:
66820936cd1SFuad Tabba if (!do_ffa_features(&res, host_ctxt))
66920936cd1SFuad Tabba return false;
67020936cd1SFuad Tabba goto out_handled;
6719d0c6a9aSWill Deacon /* Memory management */
6729d0c6a9aSWill Deacon case FFA_FN64_RXTX_MAP:
6739d0c6a9aSWill Deacon do_ffa_rxtx_map(&res, host_ctxt);
6749d0c6a9aSWill Deacon goto out_handled;
6759d0c6a9aSWill Deacon case FFA_RXTX_UNMAP:
6769d0c6a9aSWill Deacon do_ffa_rxtx_unmap(&res, host_ctxt);
6779d0c6a9aSWill Deacon goto out_handled;
67843609000SWill Deacon case FFA_MEM_SHARE:
67943609000SWill Deacon case FFA_FN64_MEM_SHARE:
680634d90cfSWill Deacon do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
68143609000SWill Deacon goto out_handled;
6820e3bcb49SWill Deacon case FFA_MEM_RECLAIM:
6830e3bcb49SWill Deacon do_ffa_mem_reclaim(&res, host_ctxt);
6840e3bcb49SWill Deacon goto out_handled;
685634d90cfSWill Deacon case FFA_MEM_LEND:
686634d90cfSWill Deacon case FFA_FN64_MEM_LEND:
687634d90cfSWill Deacon do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
688634d90cfSWill Deacon goto out_handled;
6890a9f15fdSQuentin Perret case FFA_MEM_FRAG_TX:
6900a9f15fdSQuentin Perret do_ffa_mem_frag_tx(&res, host_ctxt);
6910a9f15fdSQuentin Perret goto out_handled;
6929d0c6a9aSWill Deacon }
6939d0c6a9aSWill Deacon
694048be5feSWill Deacon if (ffa_call_supported(func_id))
695048be5feSWill Deacon return false; /* Pass through */
696048be5feSWill Deacon
697048be5feSWill Deacon ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
6989d0c6a9aSWill Deacon out_handled:
699048be5feSWill Deacon ffa_set_retval(host_ctxt, &res);
700048be5feSWill Deacon return true;
701048be5feSWill Deacon }
70212bdce4fSWill Deacon
hyp_ffa_init(void * pages)703bc3888a0SWill Deacon int hyp_ffa_init(void *pages)
70412bdce4fSWill Deacon {
70512bdce4fSWill Deacon struct arm_smccc_res res;
7069d0c6a9aSWill Deacon size_t min_rxtx_sz;
7070a9f15fdSQuentin Perret void *tx, *rx;
70812bdce4fSWill Deacon
70912bdce4fSWill Deacon if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
71012bdce4fSWill Deacon return 0;
71112bdce4fSWill Deacon
71212bdce4fSWill Deacon arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
71312bdce4fSWill Deacon if (res.a0 == FFA_RET_NOT_SUPPORTED)
71412bdce4fSWill Deacon return 0;
71512bdce4fSWill Deacon
71684f68679SOliver Upton /*
71784f68679SOliver Upton * Firmware returns the maximum supported version of the FF-A
71884f68679SOliver Upton * implementation. Check that the returned version is
71984f68679SOliver Upton * backwards-compatible with the hyp according to the rules in DEN0077A
72084f68679SOliver Upton * v1.1 REL0 13.2.1.
72184f68679SOliver Upton *
72284f68679SOliver Upton * Of course, things are never simple when dealing with firmware. v1.1
72384f68679SOliver Upton * broke ABI with v1.0 on several structures, which is itself
72484f68679SOliver Upton * incompatible with the aforementioned versioning scheme. The
72584f68679SOliver Upton * expectation is that v1.x implementations that do not support the v1.0
72684f68679SOliver Upton * ABI return NOT_SUPPORTED rather than a version number, according to
72784f68679SOliver Upton * DEN0077A v1.1 REL0 18.6.4.
72884f68679SOliver Upton */
72984f68679SOliver Upton if (FFA_MAJOR_VERSION(res.a0) != 1)
73012bdce4fSWill Deacon return -EOPNOTSUPP;
73112bdce4fSWill Deacon
73212bdce4fSWill Deacon arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
73312bdce4fSWill Deacon if (res.a0 != FFA_SUCCESS)
73412bdce4fSWill Deacon return -EOPNOTSUPP;
73512bdce4fSWill Deacon
73612bdce4fSWill Deacon if (res.a2 != HOST_FFA_ID)
73712bdce4fSWill Deacon return -EINVAL;
73812bdce4fSWill Deacon
7399d0c6a9aSWill Deacon arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
7409d0c6a9aSWill Deacon 0, 0, 0, 0, 0, 0, &res);
7419d0c6a9aSWill Deacon if (res.a0 != FFA_SUCCESS)
7429d0c6a9aSWill Deacon return -EOPNOTSUPP;
7439d0c6a9aSWill Deacon
7449d0c6a9aSWill Deacon switch (res.a2) {
7459d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_4K:
7469d0c6a9aSWill Deacon min_rxtx_sz = SZ_4K;
7479d0c6a9aSWill Deacon break;
7489d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_16K:
7499d0c6a9aSWill Deacon min_rxtx_sz = SZ_16K;
7509d0c6a9aSWill Deacon break;
7519d0c6a9aSWill Deacon case FFA_FEAT_RXTX_MIN_SZ_64K:
7529d0c6a9aSWill Deacon min_rxtx_sz = SZ_64K;
7539d0c6a9aSWill Deacon break;
7549d0c6a9aSWill Deacon default:
7559d0c6a9aSWill Deacon return -EINVAL;
7569d0c6a9aSWill Deacon }
7579d0c6a9aSWill Deacon
7589d0c6a9aSWill Deacon if (min_rxtx_sz > PAGE_SIZE)
7599d0c6a9aSWill Deacon return -EOPNOTSUPP;
7609d0c6a9aSWill Deacon
7610a9f15fdSQuentin Perret tx = pages;
7620a9f15fdSQuentin Perret pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
7630a9f15fdSQuentin Perret rx = pages;
7640a9f15fdSQuentin Perret pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
7650a9f15fdSQuentin Perret
7660a9f15fdSQuentin Perret ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
7670a9f15fdSQuentin Perret .buf = pages,
7680a9f15fdSQuentin Perret .len = PAGE_SIZE *
7690a9f15fdSQuentin Perret (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
7700a9f15fdSQuentin Perret };
7710a9f15fdSQuentin Perret
772bc3888a0SWill Deacon hyp_buffers = (struct kvm_ffa_buffers) {
773bc3888a0SWill Deacon .lock = __HYP_SPIN_LOCK_UNLOCKED,
7740a9f15fdSQuentin Perret .tx = tx,
7750a9f15fdSQuentin Perret .rx = rx,
776bc3888a0SWill Deacon };
777bc3888a0SWill Deacon
7789d0c6a9aSWill Deacon host_buffers = (struct kvm_ffa_buffers) {
7799d0c6a9aSWill Deacon .lock = __HYP_SPIN_LOCK_UNLOCKED,
7809d0c6a9aSWill Deacon };
7819d0c6a9aSWill Deacon
78212bdce4fSWill Deacon return 0;
78312bdce4fSWill Deacon }
784