xref: /openbmc/linux/arch/arm64/kvm/hyp/nvhe/ffa.c (revision 373beef0)
1048be5feSWill Deacon // SPDX-License-Identifier: GPL-2.0-only
2048be5feSWill Deacon /*
3048be5feSWill Deacon  * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4048be5feSWill Deacon  * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5048be5feSWill Deacon  * Framework for Arm A-profile", which is specified by Arm in document
6048be5feSWill Deacon  * number DEN0077.
7048be5feSWill Deacon  *
8048be5feSWill Deacon  * Copyright (C) 2022 - Google LLC
9048be5feSWill Deacon  * Author: Andrew Walbran <qwandor@google.com>
10048be5feSWill Deacon  *
11048be5feSWill Deacon  * This driver hooks into the SMC trapping logic for the host and intercepts
12048be5feSWill Deacon  * all calls falling within the FF-A range. Each call is either:
13048be5feSWill Deacon  *
14048be5feSWill Deacon  *	- Forwarded on unmodified to the SPMD at EL3
15048be5feSWill Deacon  *	- Rejected as "unsupported"
16048be5feSWill Deacon  *	- Accompanied by a host stage-2 page-table check/update and reissued
17048be5feSWill Deacon  *
18048be5feSWill Deacon  * Consequently, any attempts by the host to make guest memory pages
19048be5feSWill Deacon  * accessible to the secure world using FF-A will be detected either here
20048be5feSWill Deacon  * (in the case that the memory is already owned by the guest) or during
21048be5feSWill Deacon  * donation to the guest (in the case that the memory was previously shared
22048be5feSWill Deacon  * with the secure world).
23048be5feSWill Deacon  *
24048be5feSWill Deacon  * To allow the rolling-back of page-table updates and FF-A calls in the
25048be5feSWill Deacon  * event of failure, operations involving the RXTX buffers are locked for
26048be5feSWill Deacon  * the duration and are therefore serialised.
27048be5feSWill Deacon  */
28048be5feSWill Deacon 
29048be5feSWill Deacon #include <linux/arm-smccc.h>
30048be5feSWill Deacon #include <linux/arm_ffa.h>
31bc3888a0SWill Deacon #include <asm/kvm_pkvm.h>
32bc3888a0SWill Deacon 
33048be5feSWill Deacon #include <nvhe/ffa.h>
349d0c6a9aSWill Deacon #include <nvhe/mem_protect.h>
359d0c6a9aSWill Deacon #include <nvhe/memory.h>
36048be5feSWill Deacon #include <nvhe/trap_handler.h>
37bc3888a0SWill Deacon #include <nvhe/spinlock.h>
38048be5feSWill Deacon 
3912bdce4fSWill Deacon /*
4012bdce4fSWill Deacon  * "ID value 0 must be returned at the Non-secure physical FF-A instance"
4112bdce4fSWill Deacon  * We share this ID with the host.
4212bdce4fSWill Deacon  */
4312bdce4fSWill Deacon #define HOST_FFA_ID	0
4412bdce4fSWill Deacon 
450a9f15fdSQuentin Perret /*
460a9f15fdSQuentin Perret  * A buffer to hold the maximum descriptor size we can see from the host,
470a9f15fdSQuentin Perret  * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
480a9f15fdSQuentin Perret  * when resolving the handle on the reclaim path.
490a9f15fdSQuentin Perret  */
500a9f15fdSQuentin Perret struct kvm_ffa_descriptor_buffer {
510a9f15fdSQuentin Perret 	void	*buf;
520a9f15fdSQuentin Perret 	size_t	len;
530a9f15fdSQuentin Perret };
540a9f15fdSQuentin Perret 
550a9f15fdSQuentin Perret static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
560a9f15fdSQuentin Perret 
57bc3888a0SWill Deacon struct kvm_ffa_buffers {
58bc3888a0SWill Deacon 	hyp_spinlock_t lock;
59bc3888a0SWill Deacon 	void *tx;
60bc3888a0SWill Deacon 	void *rx;
61bc3888a0SWill Deacon };
62bc3888a0SWill Deacon 
63bc3888a0SWill Deacon /*
64bc3888a0SWill Deacon  * Note that we don't currently lock these buffers explicitly, instead
65bc3888a0SWill Deacon  * relying on the locking of the host FFA buffers as we only have one
66bc3888a0SWill Deacon  * client.
67bc3888a0SWill Deacon  */
68bc3888a0SWill Deacon static struct kvm_ffa_buffers hyp_buffers;
699d0c6a9aSWill Deacon static struct kvm_ffa_buffers host_buffers;
70bc3888a0SWill Deacon 
ffa_to_smccc_error(struct arm_smccc_res * res,u64 ffa_errno)71048be5feSWill Deacon static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
72048be5feSWill Deacon {
73048be5feSWill Deacon 	*res = (struct arm_smccc_res) {
74048be5feSWill Deacon 		.a0	= FFA_ERROR,
75048be5feSWill Deacon 		.a2	= ffa_errno,
76048be5feSWill Deacon 	};
77048be5feSWill Deacon }
78048be5feSWill Deacon 
ffa_to_smccc_res_prop(struct arm_smccc_res * res,int ret,u64 prop)7920936cd1SFuad Tabba static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
809d0c6a9aSWill Deacon {
819d0c6a9aSWill Deacon 	if (ret == FFA_RET_SUCCESS) {
8220936cd1SFuad Tabba 		*res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
8320936cd1SFuad Tabba 						.a2 = prop };
849d0c6a9aSWill Deacon 	} else {
859d0c6a9aSWill Deacon 		ffa_to_smccc_error(res, ret);
869d0c6a9aSWill Deacon 	}
879d0c6a9aSWill Deacon }
889d0c6a9aSWill Deacon 
ffa_to_smccc_res(struct arm_smccc_res * res,int ret)8920936cd1SFuad Tabba static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
9020936cd1SFuad Tabba {
9120936cd1SFuad Tabba 	ffa_to_smccc_res_prop(res, ret, 0);
9220936cd1SFuad Tabba }
9320936cd1SFuad Tabba 
ffa_set_retval(struct kvm_cpu_context * ctxt,struct arm_smccc_res * res)94048be5feSWill Deacon static void ffa_set_retval(struct kvm_cpu_context *ctxt,
95048be5feSWill Deacon 			   struct arm_smccc_res *res)
96048be5feSWill Deacon {
97048be5feSWill Deacon 	cpu_reg(ctxt, 0) = res->a0;
98048be5feSWill Deacon 	cpu_reg(ctxt, 1) = res->a1;
99048be5feSWill Deacon 	cpu_reg(ctxt, 2) = res->a2;
100048be5feSWill Deacon 	cpu_reg(ctxt, 3) = res->a3;
101048be5feSWill Deacon }
102048be5feSWill Deacon 
is_ffa_call(u64 func_id)103048be5feSWill Deacon static bool is_ffa_call(u64 func_id)
104048be5feSWill Deacon {
105048be5feSWill Deacon 	return ARM_SMCCC_IS_FAST_CALL(func_id) &&
106048be5feSWill Deacon 	       ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
107048be5feSWill Deacon 	       ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
108048be5feSWill Deacon 	       ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
109048be5feSWill Deacon }
110048be5feSWill Deacon 
ffa_map_hyp_buffers(u64 ffa_page_count)1119d0c6a9aSWill Deacon static int ffa_map_hyp_buffers(u64 ffa_page_count)
1129d0c6a9aSWill Deacon {
1139d0c6a9aSWill Deacon 	struct arm_smccc_res res;
1149d0c6a9aSWill Deacon 
1159d0c6a9aSWill Deacon 	arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
1169d0c6a9aSWill Deacon 			  hyp_virt_to_phys(hyp_buffers.tx),
1179d0c6a9aSWill Deacon 			  hyp_virt_to_phys(hyp_buffers.rx),
1189d0c6a9aSWill Deacon 			  ffa_page_count,
1199d0c6a9aSWill Deacon 			  0, 0, 0, 0,
1209d0c6a9aSWill Deacon 			  &res);
1219d0c6a9aSWill Deacon 
1229d0c6a9aSWill Deacon 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1239d0c6a9aSWill Deacon }
1249d0c6a9aSWill Deacon 
ffa_unmap_hyp_buffers(void)1259d0c6a9aSWill Deacon static int ffa_unmap_hyp_buffers(void)
1269d0c6a9aSWill Deacon {
1279d0c6a9aSWill Deacon 	struct arm_smccc_res res;
1289d0c6a9aSWill Deacon 
1299d0c6a9aSWill Deacon 	arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
1309d0c6a9aSWill Deacon 			  HOST_FFA_ID,
1319d0c6a9aSWill Deacon 			  0, 0, 0, 0, 0, 0,
1329d0c6a9aSWill Deacon 			  &res);
1339d0c6a9aSWill Deacon 
1349d0c6a9aSWill Deacon 	return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
1359d0c6a9aSWill Deacon }
1369d0c6a9aSWill Deacon 
ffa_mem_frag_tx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fraglen,u32 endpoint_id)1370a9f15fdSQuentin Perret static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
1380a9f15fdSQuentin Perret 			     u32 handle_hi, u32 fraglen, u32 endpoint_id)
1390a9f15fdSQuentin Perret {
1400a9f15fdSQuentin Perret 	arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
1410a9f15fdSQuentin Perret 			  handle_lo, handle_hi, fraglen, endpoint_id,
1420a9f15fdSQuentin Perret 			  0, 0, 0,
1430a9f15fdSQuentin Perret 			  res);
1440a9f15fdSQuentin Perret }
1450a9f15fdSQuentin Perret 
ffa_mem_frag_rx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fragoff)1460a9f15fdSQuentin Perret static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
1470a9f15fdSQuentin Perret 			     u32 handle_hi, u32 fragoff)
1480a9f15fdSQuentin Perret {
1490a9f15fdSQuentin Perret 	arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
1500a9f15fdSQuentin Perret 			  handle_lo, handle_hi, fragoff, HOST_FFA_ID,
1510a9f15fdSQuentin Perret 			  0, 0, 0,
1520a9f15fdSQuentin Perret 			  res);
1530a9f15fdSQuentin Perret }
1540a9f15fdSQuentin Perret 
ffa_mem_xfer(struct arm_smccc_res * res,u64 func_id,u32 len,u32 fraglen)155634d90cfSWill Deacon static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
156634d90cfSWill Deacon 			  u32 fraglen)
15743609000SWill Deacon {
158634d90cfSWill Deacon 	arm_smccc_1_1_smc(func_id, len, fraglen,
15943609000SWill Deacon 			  0, 0, 0, 0, 0,
16043609000SWill Deacon 			  res);
16143609000SWill Deacon }
16243609000SWill Deacon 
ffa_mem_reclaim(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 flags)1630e3bcb49SWill Deacon static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
1640e3bcb49SWill Deacon 			     u32 handle_hi, u32 flags)
1650e3bcb49SWill Deacon {
1660e3bcb49SWill Deacon 	arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
1670e3bcb49SWill Deacon 			  handle_lo, handle_hi, flags,
1680e3bcb49SWill Deacon 			  0, 0, 0, 0,
1690e3bcb49SWill Deacon 			  res);
1700e3bcb49SWill Deacon }
1710e3bcb49SWill Deacon 
ffa_retrieve_req(struct arm_smccc_res * res,u32 len)1720e3bcb49SWill Deacon static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
1730e3bcb49SWill Deacon {
1740e3bcb49SWill Deacon 	arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
1750e3bcb49SWill Deacon 			  len, len,
1760e3bcb49SWill Deacon 			  0, 0, 0, 0, 0,
1770e3bcb49SWill Deacon 			  res);
1780e3bcb49SWill Deacon }
1790e3bcb49SWill Deacon 
do_ffa_rxtx_map(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)1809d0c6a9aSWill Deacon static void do_ffa_rxtx_map(struct arm_smccc_res *res,
1819d0c6a9aSWill Deacon 			    struct kvm_cpu_context *ctxt)
1829d0c6a9aSWill Deacon {
1839d0c6a9aSWill Deacon 	DECLARE_REG(phys_addr_t, tx, ctxt, 1);
1849d0c6a9aSWill Deacon 	DECLARE_REG(phys_addr_t, rx, ctxt, 2);
1859d0c6a9aSWill Deacon 	DECLARE_REG(u32, npages, ctxt, 3);
1869d0c6a9aSWill Deacon 	int ret = 0;
1879d0c6a9aSWill Deacon 	void *rx_virt, *tx_virt;
1889d0c6a9aSWill Deacon 
1899d0c6a9aSWill Deacon 	if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
1909d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
1919d0c6a9aSWill Deacon 		goto out;
1929d0c6a9aSWill Deacon 	}
1939d0c6a9aSWill Deacon 
1949d0c6a9aSWill Deacon 	if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
1959d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
1969d0c6a9aSWill Deacon 		goto out;
1979d0c6a9aSWill Deacon 	}
1989d0c6a9aSWill Deacon 
1999d0c6a9aSWill Deacon 	hyp_spin_lock(&host_buffers.lock);
2009d0c6a9aSWill Deacon 	if (host_buffers.tx) {
2019d0c6a9aSWill Deacon 		ret = FFA_RET_DENIED;
2029d0c6a9aSWill Deacon 		goto out_unlock;
2039d0c6a9aSWill Deacon 	}
2049d0c6a9aSWill Deacon 
2059d0c6a9aSWill Deacon 	/*
2069d0c6a9aSWill Deacon 	 * Map our hypervisor buffers into the SPMD before mapping and
2079d0c6a9aSWill Deacon 	 * pinning the host buffers in our own address space.
2089d0c6a9aSWill Deacon 	 */
2099d0c6a9aSWill Deacon 	ret = ffa_map_hyp_buffers(npages);
2109d0c6a9aSWill Deacon 	if (ret)
2119d0c6a9aSWill Deacon 		goto out_unlock;
2129d0c6a9aSWill Deacon 
2139d0c6a9aSWill Deacon 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
2149d0c6a9aSWill Deacon 	if (ret) {
2159d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2169d0c6a9aSWill Deacon 		goto err_unmap;
2179d0c6a9aSWill Deacon 	}
2189d0c6a9aSWill Deacon 
2199d0c6a9aSWill Deacon 	ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
2209d0c6a9aSWill Deacon 	if (ret) {
2219d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2229d0c6a9aSWill Deacon 		goto err_unshare_tx;
2239d0c6a9aSWill Deacon 	}
2249d0c6a9aSWill Deacon 
2259d0c6a9aSWill Deacon 	tx_virt = hyp_phys_to_virt(tx);
2269d0c6a9aSWill Deacon 	ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
2279d0c6a9aSWill Deacon 	if (ret) {
2289d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2299d0c6a9aSWill Deacon 		goto err_unshare_rx;
2309d0c6a9aSWill Deacon 	}
2319d0c6a9aSWill Deacon 
2329d0c6a9aSWill Deacon 	rx_virt = hyp_phys_to_virt(rx);
2339d0c6a9aSWill Deacon 	ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
2349d0c6a9aSWill Deacon 	if (ret) {
2359d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2369d0c6a9aSWill Deacon 		goto err_unpin_tx;
2379d0c6a9aSWill Deacon 	}
2389d0c6a9aSWill Deacon 
2399d0c6a9aSWill Deacon 	host_buffers.tx = tx_virt;
2409d0c6a9aSWill Deacon 	host_buffers.rx = rx_virt;
2419d0c6a9aSWill Deacon 
2429d0c6a9aSWill Deacon out_unlock:
2439d0c6a9aSWill Deacon 	hyp_spin_unlock(&host_buffers.lock);
2449d0c6a9aSWill Deacon out:
2459d0c6a9aSWill Deacon 	ffa_to_smccc_res(res, ret);
2469d0c6a9aSWill Deacon 	return;
2479d0c6a9aSWill Deacon 
2489d0c6a9aSWill Deacon err_unpin_tx:
2499d0c6a9aSWill Deacon 	hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
2509d0c6a9aSWill Deacon err_unshare_rx:
2519d0c6a9aSWill Deacon 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
2529d0c6a9aSWill Deacon err_unshare_tx:
2539d0c6a9aSWill Deacon 	__pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
2549d0c6a9aSWill Deacon err_unmap:
2559d0c6a9aSWill Deacon 	ffa_unmap_hyp_buffers();
2569d0c6a9aSWill Deacon 	goto out_unlock;
2579d0c6a9aSWill Deacon }
2589d0c6a9aSWill Deacon 
do_ffa_rxtx_unmap(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)2599d0c6a9aSWill Deacon static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
2609d0c6a9aSWill Deacon 			      struct kvm_cpu_context *ctxt)
2619d0c6a9aSWill Deacon {
2629d0c6a9aSWill Deacon 	DECLARE_REG(u32, id, ctxt, 1);
2639d0c6a9aSWill Deacon 	int ret = 0;
2649d0c6a9aSWill Deacon 
2659d0c6a9aSWill Deacon 	if (id != HOST_FFA_ID) {
2669d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2679d0c6a9aSWill Deacon 		goto out;
2689d0c6a9aSWill Deacon 	}
2699d0c6a9aSWill Deacon 
2709d0c6a9aSWill Deacon 	hyp_spin_lock(&host_buffers.lock);
2719d0c6a9aSWill Deacon 	if (!host_buffers.tx) {
2729d0c6a9aSWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
2739d0c6a9aSWill Deacon 		goto out_unlock;
2749d0c6a9aSWill Deacon 	}
2759d0c6a9aSWill Deacon 
2769d0c6a9aSWill Deacon 	hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
2779d0c6a9aSWill Deacon 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
2789d0c6a9aSWill Deacon 	host_buffers.tx = NULL;
2799d0c6a9aSWill Deacon 
2809d0c6a9aSWill Deacon 	hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
2819d0c6a9aSWill Deacon 	WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
2829d0c6a9aSWill Deacon 	host_buffers.rx = NULL;
2839d0c6a9aSWill Deacon 
2849d0c6a9aSWill Deacon 	ffa_unmap_hyp_buffers();
2859d0c6a9aSWill Deacon 
2869d0c6a9aSWill Deacon out_unlock:
2879d0c6a9aSWill Deacon 	hyp_spin_unlock(&host_buffers.lock);
2889d0c6a9aSWill Deacon out:
2899d0c6a9aSWill Deacon 	ffa_to_smccc_res(res, ret);
2909d0c6a9aSWill Deacon }
2919d0c6a9aSWill Deacon 
__ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)29243609000SWill Deacon static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
29343609000SWill Deacon 				   u32 nranges)
29443609000SWill Deacon {
29543609000SWill Deacon 	u32 i;
29643609000SWill Deacon 
29743609000SWill Deacon 	for (i = 0; i < nranges; ++i) {
29843609000SWill Deacon 		struct ffa_mem_region_addr_range *range = &ranges[i];
29943609000SWill Deacon 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
30043609000SWill Deacon 		u64 pfn = hyp_phys_to_pfn(range->address);
30143609000SWill Deacon 
30243609000SWill Deacon 		if (!PAGE_ALIGNED(sz))
30343609000SWill Deacon 			break;
30443609000SWill Deacon 
30543609000SWill Deacon 		if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
30643609000SWill Deacon 			break;
30743609000SWill Deacon 	}
30843609000SWill Deacon 
30943609000SWill Deacon 	return i;
31043609000SWill Deacon }
31143609000SWill Deacon 
__ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)31243609000SWill Deacon static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
31343609000SWill Deacon 				     u32 nranges)
31443609000SWill Deacon {
31543609000SWill Deacon 	u32 i;
31643609000SWill Deacon 
31743609000SWill Deacon 	for (i = 0; i < nranges; ++i) {
31843609000SWill Deacon 		struct ffa_mem_region_addr_range *range = &ranges[i];
31943609000SWill Deacon 		u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
32043609000SWill Deacon 		u64 pfn = hyp_phys_to_pfn(range->address);
32143609000SWill Deacon 
32243609000SWill Deacon 		if (!PAGE_ALIGNED(sz))
32343609000SWill Deacon 			break;
32443609000SWill Deacon 
32543609000SWill Deacon 		if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
32643609000SWill Deacon 			break;
32743609000SWill Deacon 	}
32843609000SWill Deacon 
32943609000SWill Deacon 	return i;
33043609000SWill Deacon }
33143609000SWill Deacon 
ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)33243609000SWill Deacon static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
33343609000SWill Deacon 				 u32 nranges)
33443609000SWill Deacon {
33543609000SWill Deacon 	u32 nshared = __ffa_host_share_ranges(ranges, nranges);
33643609000SWill Deacon 	int ret = 0;
33743609000SWill Deacon 
33843609000SWill Deacon 	if (nshared != nranges) {
33943609000SWill Deacon 		WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
34043609000SWill Deacon 		ret = FFA_RET_DENIED;
34143609000SWill Deacon 	}
34243609000SWill Deacon 
34343609000SWill Deacon 	return ret;
34443609000SWill Deacon }
34543609000SWill Deacon 
ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)34643609000SWill Deacon static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
34743609000SWill Deacon 				   u32 nranges)
34843609000SWill Deacon {
34943609000SWill Deacon 	u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
35043609000SWill Deacon 	int ret = 0;
35143609000SWill Deacon 
35243609000SWill Deacon 	if (nunshared != nranges) {
35343609000SWill Deacon 		WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
35443609000SWill Deacon 		ret = FFA_RET_DENIED;
35543609000SWill Deacon 	}
35643609000SWill Deacon 
35743609000SWill Deacon 	return ret;
35843609000SWill Deacon }
35943609000SWill Deacon 
do_ffa_mem_frag_tx(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)3600a9f15fdSQuentin Perret static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
3610a9f15fdSQuentin Perret 			       struct kvm_cpu_context *ctxt)
3620a9f15fdSQuentin Perret {
3630a9f15fdSQuentin Perret 	DECLARE_REG(u32, handle_lo, ctxt, 1);
3640a9f15fdSQuentin Perret 	DECLARE_REG(u32, handle_hi, ctxt, 2);
3650a9f15fdSQuentin Perret 	DECLARE_REG(u32, fraglen, ctxt, 3);
3660a9f15fdSQuentin Perret 	DECLARE_REG(u32, endpoint_id, ctxt, 4);
3670a9f15fdSQuentin Perret 	struct ffa_mem_region_addr_range *buf;
3680a9f15fdSQuentin Perret 	int ret = FFA_RET_INVALID_PARAMETERS;
3690a9f15fdSQuentin Perret 	u32 nr_ranges;
3700a9f15fdSQuentin Perret 
3710a9f15fdSQuentin Perret 	if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
3720a9f15fdSQuentin Perret 		goto out;
3730a9f15fdSQuentin Perret 
3740a9f15fdSQuentin Perret 	if (fraglen % sizeof(*buf))
3750a9f15fdSQuentin Perret 		goto out;
3760a9f15fdSQuentin Perret 
3770a9f15fdSQuentin Perret 	hyp_spin_lock(&host_buffers.lock);
3780a9f15fdSQuentin Perret 	if (!host_buffers.tx)
3790a9f15fdSQuentin Perret 		goto out_unlock;
3800a9f15fdSQuentin Perret 
3810a9f15fdSQuentin Perret 	buf = hyp_buffers.tx;
3820a9f15fdSQuentin Perret 	memcpy(buf, host_buffers.tx, fraglen);
3830a9f15fdSQuentin Perret 	nr_ranges = fraglen / sizeof(*buf);
3840a9f15fdSQuentin Perret 
3850a9f15fdSQuentin Perret 	ret = ffa_host_share_ranges(buf, nr_ranges);
3860a9f15fdSQuentin Perret 	if (ret) {
3870a9f15fdSQuentin Perret 		/*
3880a9f15fdSQuentin Perret 		 * We're effectively aborting the transaction, so we need
3890a9f15fdSQuentin Perret 		 * to restore the global state back to what it was prior to
3900a9f15fdSQuentin Perret 		 * transmission of the first fragment.
3910a9f15fdSQuentin Perret 		 */
3920a9f15fdSQuentin Perret 		ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
3930a9f15fdSQuentin Perret 		WARN_ON(res->a0 != FFA_SUCCESS);
3940a9f15fdSQuentin Perret 		goto out_unlock;
3950a9f15fdSQuentin Perret 	}
3960a9f15fdSQuentin Perret 
3970a9f15fdSQuentin Perret 	ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
3980a9f15fdSQuentin Perret 	if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
3990a9f15fdSQuentin Perret 		WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
4000a9f15fdSQuentin Perret 
4010a9f15fdSQuentin Perret out_unlock:
4020a9f15fdSQuentin Perret 	hyp_spin_unlock(&host_buffers.lock);
4030a9f15fdSQuentin Perret out:
4040a9f15fdSQuentin Perret 	if (ret)
4050a9f15fdSQuentin Perret 		ffa_to_smccc_res(res, ret);
4060a9f15fdSQuentin Perret 
4070a9f15fdSQuentin Perret 	/*
4080a9f15fdSQuentin Perret 	 * If for any reason this did not succeed, we're in trouble as we have
4090a9f15fdSQuentin Perret 	 * now lost the content of the previous fragments and we can't rollback
4100a9f15fdSQuentin Perret 	 * the host stage-2 changes. The pages previously marked as shared will
4110a9f15fdSQuentin Perret 	 * remain stuck in that state forever, hence preventing the host from
4120a9f15fdSQuentin Perret 	 * sharing/donating them again and may possibly lead to subsequent
4130a9f15fdSQuentin Perret 	 * failures, but this will not compromise confidentiality.
4140a9f15fdSQuentin Perret 	 */
4150a9f15fdSQuentin Perret 	return;
4160a9f15fdSQuentin Perret }
4170a9f15fdSQuentin Perret 
do_ffa_mem_xfer(const u64 func_id,struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)418634d90cfSWill Deacon static __always_inline void do_ffa_mem_xfer(const u64 func_id,
419634d90cfSWill Deacon 					    struct arm_smccc_res *res,
42043609000SWill Deacon 					    struct kvm_cpu_context *ctxt)
42143609000SWill Deacon {
42243609000SWill Deacon 	DECLARE_REG(u32, len, ctxt, 1);
42343609000SWill Deacon 	DECLARE_REG(u32, fraglen, ctxt, 2);
42443609000SWill Deacon 	DECLARE_REG(u64, addr_mbz, ctxt, 3);
42543609000SWill Deacon 	DECLARE_REG(u32, npages_mbz, ctxt, 4);
42643609000SWill Deacon 	struct ffa_composite_mem_region *reg;
42743609000SWill Deacon 	struct ffa_mem_region *buf;
4280a9f15fdSQuentin Perret 	u32 offset, nr_ranges;
42943609000SWill Deacon 	int ret = 0;
43043609000SWill Deacon 
431634d90cfSWill Deacon 	BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
432634d90cfSWill Deacon 		     func_id != FFA_FN64_MEM_LEND);
433634d90cfSWill Deacon 
43443609000SWill Deacon 	if (addr_mbz || npages_mbz || fraglen > len ||
43543609000SWill Deacon 	    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
43643609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
43743609000SWill Deacon 		goto out;
43843609000SWill Deacon 	}
43943609000SWill Deacon 
44043609000SWill Deacon 	if (fraglen < sizeof(struct ffa_mem_region) +
44143609000SWill Deacon 		      sizeof(struct ffa_mem_region_attributes)) {
44243609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
44343609000SWill Deacon 		goto out;
44443609000SWill Deacon 	}
44543609000SWill Deacon 
44643609000SWill Deacon 	hyp_spin_lock(&host_buffers.lock);
44743609000SWill Deacon 	if (!host_buffers.tx) {
44843609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
44943609000SWill Deacon 		goto out_unlock;
45043609000SWill Deacon 	}
45143609000SWill Deacon 
45243609000SWill Deacon 	buf = hyp_buffers.tx;
45343609000SWill Deacon 	memcpy(buf, host_buffers.tx, fraglen);
45443609000SWill Deacon 
45543609000SWill Deacon 	offset = buf->ep_mem_access[0].composite_off;
45643609000SWill Deacon 	if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
45743609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
45843609000SWill Deacon 		goto out_unlock;
45943609000SWill Deacon 	}
46043609000SWill Deacon 
46143609000SWill Deacon 	if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
46243609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
46343609000SWill Deacon 		goto out_unlock;
46443609000SWill Deacon 	}
46543609000SWill Deacon 
46643609000SWill Deacon 	reg = (void *)buf + offset;
4670a9f15fdSQuentin Perret 	nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
4680a9f15fdSQuentin Perret 	if (nr_ranges % sizeof(reg->constituents[0])) {
46943609000SWill Deacon 		ret = FFA_RET_INVALID_PARAMETERS;
47043609000SWill Deacon 		goto out_unlock;
47143609000SWill Deacon 	}
47243609000SWill Deacon 
4730a9f15fdSQuentin Perret 	nr_ranges /= sizeof(reg->constituents[0]);
4740a9f15fdSQuentin Perret 	ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
47543609000SWill Deacon 	if (ret)
47643609000SWill Deacon 		goto out_unlock;
47743609000SWill Deacon 
478634d90cfSWill Deacon 	ffa_mem_xfer(res, func_id, len, fraglen);
4790a9f15fdSQuentin Perret 	if (fraglen != len) {
4800a9f15fdSQuentin Perret 		if (res->a0 != FFA_MEM_FRAG_RX)
4810a9f15fdSQuentin Perret 			goto err_unshare;
4820a9f15fdSQuentin Perret 
4830a9f15fdSQuentin Perret 		if (res->a3 != fraglen)
4840a9f15fdSQuentin Perret 			goto err_unshare;
4850a9f15fdSQuentin Perret 	} else if (res->a0 != FFA_SUCCESS) {
4860a9f15fdSQuentin Perret 		goto err_unshare;
48743609000SWill Deacon 	}
48843609000SWill Deacon 
48943609000SWill Deacon out_unlock:
49043609000SWill Deacon 	hyp_spin_unlock(&host_buffers.lock);
49143609000SWill Deacon out:
49243609000SWill Deacon 	if (ret)
49343609000SWill Deacon 		ffa_to_smccc_res(res, ret);
49443609000SWill Deacon 	return;
4950a9f15fdSQuentin Perret 
4960a9f15fdSQuentin Perret err_unshare:
4970a9f15fdSQuentin Perret 	WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
4980a9f15fdSQuentin Perret 	goto out_unlock;
49943609000SWill Deacon }
50043609000SWill Deacon 
do_ffa_mem_reclaim(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)5010e3bcb49SWill Deacon static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
5020e3bcb49SWill Deacon 			       struct kvm_cpu_context *ctxt)
5030e3bcb49SWill Deacon {
5040e3bcb49SWill Deacon 	DECLARE_REG(u32, handle_lo, ctxt, 1);
5050e3bcb49SWill Deacon 	DECLARE_REG(u32, handle_hi, ctxt, 2);
5060e3bcb49SWill Deacon 	DECLARE_REG(u32, flags, ctxt, 3);
5070e3bcb49SWill Deacon 	struct ffa_composite_mem_region *reg;
5080a9f15fdSQuentin Perret 	u32 offset, len, fraglen, fragoff;
5090e3bcb49SWill Deacon 	struct ffa_mem_region *buf;
5100e3bcb49SWill Deacon 	int ret = 0;
5110e3bcb49SWill Deacon 	u64 handle;
5120e3bcb49SWill Deacon 
5130e3bcb49SWill Deacon 	handle = PACK_HANDLE(handle_lo, handle_hi);
5140e3bcb49SWill Deacon 
5150e3bcb49SWill Deacon 	hyp_spin_lock(&host_buffers.lock);
5160e3bcb49SWill Deacon 
5170e3bcb49SWill Deacon 	buf = hyp_buffers.tx;
5180e3bcb49SWill Deacon 	*buf = (struct ffa_mem_region) {
5190e3bcb49SWill Deacon 		.sender_id	= HOST_FFA_ID,
5200e3bcb49SWill Deacon 		.handle		= handle,
5210e3bcb49SWill Deacon 	};
5220e3bcb49SWill Deacon 
5230e3bcb49SWill Deacon 	ffa_retrieve_req(res, sizeof(*buf));
5240e3bcb49SWill Deacon 	buf = hyp_buffers.rx;
5250e3bcb49SWill Deacon 	if (res->a0 != FFA_MEM_RETRIEVE_RESP)
5260e3bcb49SWill Deacon 		goto out_unlock;
5270e3bcb49SWill Deacon 
5280a9f15fdSQuentin Perret 	len = res->a1;
5290a9f15fdSQuentin Perret 	fraglen = res->a2;
5300e3bcb49SWill Deacon 
5310e3bcb49SWill Deacon 	offset = buf->ep_mem_access[0].composite_off;
5320e3bcb49SWill Deacon 	/*
5330e3bcb49SWill Deacon 	 * We can trust the SPMD to get this right, but let's at least
5340e3bcb49SWill Deacon 	 * check that we end up with something that doesn't look _completely_
5350e3bcb49SWill Deacon 	 * bogus.
5360e3bcb49SWill Deacon 	 */
5370a9f15fdSQuentin Perret 	if (WARN_ON(offset > len ||
5380a9f15fdSQuentin Perret 		    fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
5390e3bcb49SWill Deacon 		ret = FFA_RET_ABORTED;
5400e3bcb49SWill Deacon 		goto out_unlock;
5410e3bcb49SWill Deacon 	}
5420e3bcb49SWill Deacon 
5430a9f15fdSQuentin Perret 	if (len > ffa_desc_buf.len) {
5440a9f15fdSQuentin Perret 		ret = FFA_RET_NO_MEMORY;
5450a9f15fdSQuentin Perret 		goto out_unlock;
5460a9f15fdSQuentin Perret 	}
5470a9f15fdSQuentin Perret 
5480a9f15fdSQuentin Perret 	buf = ffa_desc_buf.buf;
5490a9f15fdSQuentin Perret 	memcpy(buf, hyp_buffers.rx, fraglen);
5500a9f15fdSQuentin Perret 
5510a9f15fdSQuentin Perret 	for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
5520a9f15fdSQuentin Perret 		ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
5530a9f15fdSQuentin Perret 		if (res->a0 != FFA_MEM_FRAG_TX) {
5540a9f15fdSQuentin Perret 			ret = FFA_RET_INVALID_PARAMETERS;
5550a9f15fdSQuentin Perret 			goto out_unlock;
5560a9f15fdSQuentin Perret 		}
5570a9f15fdSQuentin Perret 
5580a9f15fdSQuentin Perret 		fraglen = res->a3;
5590a9f15fdSQuentin Perret 		memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
5600a9f15fdSQuentin Perret 	}
5610a9f15fdSQuentin Perret 
5620e3bcb49SWill Deacon 	ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
5630e3bcb49SWill Deacon 	if (res->a0 != FFA_SUCCESS)
5640e3bcb49SWill Deacon 		goto out_unlock;
5650e3bcb49SWill Deacon 
5660a9f15fdSQuentin Perret 	reg = (void *)buf + offset;
5670e3bcb49SWill Deacon 	/* If the SPMD was happy, then we should be too. */
5680e3bcb49SWill Deacon 	WARN_ON(ffa_host_unshare_ranges(reg->constituents,
5690e3bcb49SWill Deacon 					reg->addr_range_cnt));
5700e3bcb49SWill Deacon out_unlock:
5710e3bcb49SWill Deacon 	hyp_spin_unlock(&host_buffers.lock);
5720e3bcb49SWill Deacon 
5730e3bcb49SWill Deacon 	if (ret)
5740e3bcb49SWill Deacon 		ffa_to_smccc_res(res, ret);
5750e3bcb49SWill Deacon }
5760e3bcb49SWill Deacon 
577048be5feSWill Deacon /*
578048be5feSWill Deacon  * Is a given FFA function supported, either by forwarding on directly
579048be5feSWill Deacon  * or by handling at EL2?
580048be5feSWill Deacon  */
ffa_call_supported(u64 func_id)581048be5feSWill Deacon static bool ffa_call_supported(u64 func_id)
582048be5feSWill Deacon {
583048be5feSWill Deacon 	switch (func_id) {
584048be5feSWill Deacon 	/* Unsupported memory management calls */
585048be5feSWill Deacon 	case FFA_FN64_MEM_RETRIEVE_REQ:
586048be5feSWill Deacon 	case FFA_MEM_RETRIEVE_RESP:
587048be5feSWill Deacon 	case FFA_MEM_RELINQUISH:
588048be5feSWill Deacon 	case FFA_MEM_OP_PAUSE:
589048be5feSWill Deacon 	case FFA_MEM_OP_RESUME:
590048be5feSWill Deacon 	case FFA_MEM_FRAG_RX:
591048be5feSWill Deacon 	case FFA_FN64_MEM_DONATE:
592048be5feSWill Deacon 	/* Indirect message passing via RX/TX buffers */
593048be5feSWill Deacon 	case FFA_MSG_SEND:
594048be5feSWill Deacon 	case FFA_MSG_POLL:
595048be5feSWill Deacon 	case FFA_MSG_WAIT:
596048be5feSWill Deacon 	/* 32-bit variants of 64-bit calls */
597048be5feSWill Deacon 	case FFA_MSG_SEND_DIRECT_REQ:
598048be5feSWill Deacon 	case FFA_MSG_SEND_DIRECT_RESP:
599048be5feSWill Deacon 	case FFA_RXTX_MAP:
600048be5feSWill Deacon 	case FFA_MEM_DONATE:
601048be5feSWill Deacon 	case FFA_MEM_RETRIEVE_REQ:
602048be5feSWill Deacon 		return false;
603048be5feSWill Deacon 	}
604048be5feSWill Deacon 
605048be5feSWill Deacon 	return true;
606048be5feSWill Deacon }
607048be5feSWill Deacon 
do_ffa_features(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)60820936cd1SFuad Tabba static bool do_ffa_features(struct arm_smccc_res *res,
60920936cd1SFuad Tabba 			    struct kvm_cpu_context *ctxt)
61020936cd1SFuad Tabba {
61120936cd1SFuad Tabba 	DECLARE_REG(u32, id, ctxt, 1);
61220936cd1SFuad Tabba 	u64 prop = 0;
61320936cd1SFuad Tabba 	int ret = 0;
61420936cd1SFuad Tabba 
61520936cd1SFuad Tabba 	if (!ffa_call_supported(id)) {
61620936cd1SFuad Tabba 		ret = FFA_RET_NOT_SUPPORTED;
61720936cd1SFuad Tabba 		goto out_handled;
61820936cd1SFuad Tabba 	}
61920936cd1SFuad Tabba 
62020936cd1SFuad Tabba 	switch (id) {
62120936cd1SFuad Tabba 	case FFA_MEM_SHARE:
62220936cd1SFuad Tabba 	case FFA_FN64_MEM_SHARE:
62320936cd1SFuad Tabba 	case FFA_MEM_LEND:
62420936cd1SFuad Tabba 	case FFA_FN64_MEM_LEND:
62520936cd1SFuad Tabba 		ret = FFA_RET_SUCCESS;
62620936cd1SFuad Tabba 		prop = 0; /* No support for dynamic buffers */
62720936cd1SFuad Tabba 		goto out_handled;
62820936cd1SFuad Tabba 	default:
62920936cd1SFuad Tabba 		return false;
63020936cd1SFuad Tabba 	}
63120936cd1SFuad Tabba 
63220936cd1SFuad Tabba out_handled:
63320936cd1SFuad Tabba 	ffa_to_smccc_res_prop(res, ret, prop);
63420936cd1SFuad Tabba 	return true;
63520936cd1SFuad Tabba }
63620936cd1SFuad Tabba 
kvm_host_ffa_handler(struct kvm_cpu_context * host_ctxt,u32 func_id)637*373beef0SJean-Philippe Brucker bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
638048be5feSWill Deacon {
639048be5feSWill Deacon 	struct arm_smccc_res res;
640048be5feSWill Deacon 
641048be5feSWill Deacon 	/*
642048be5feSWill Deacon 	 * There's no way we can tell what a non-standard SMC call might
643048be5feSWill Deacon 	 * be up to. Ideally, we would terminate these here and return
644048be5feSWill Deacon 	 * an error to the host, but sadly devices make use of custom
645048be5feSWill Deacon 	 * firmware calls for things like power management, debugging,
646048be5feSWill Deacon 	 * RNG access and crash reporting.
647048be5feSWill Deacon 	 *
648048be5feSWill Deacon 	 * Given that the architecture requires us to trust EL3 anyway,
649048be5feSWill Deacon 	 * we forward unrecognised calls on under the assumption that
650048be5feSWill Deacon 	 * the firmware doesn't expose a mechanism to access arbitrary
651048be5feSWill Deacon 	 * non-secure memory. Short of a per-device table of SMCs, this
652048be5feSWill Deacon 	 * is the best we can do.
653048be5feSWill Deacon 	 */
654048be5feSWill Deacon 	if (!is_ffa_call(func_id))
655048be5feSWill Deacon 		return false;
656048be5feSWill Deacon 
6579d0c6a9aSWill Deacon 	switch (func_id) {
65820936cd1SFuad Tabba 	case FFA_FEATURES:
65920936cd1SFuad Tabba 		if (!do_ffa_features(&res, host_ctxt))
66020936cd1SFuad Tabba 			return false;
66120936cd1SFuad Tabba 		goto out_handled;
6629d0c6a9aSWill Deacon 	/* Memory management */
6639d0c6a9aSWill Deacon 	case FFA_FN64_RXTX_MAP:
6649d0c6a9aSWill Deacon 		do_ffa_rxtx_map(&res, host_ctxt);
6659d0c6a9aSWill Deacon 		goto out_handled;
6669d0c6a9aSWill Deacon 	case FFA_RXTX_UNMAP:
6679d0c6a9aSWill Deacon 		do_ffa_rxtx_unmap(&res, host_ctxt);
6689d0c6a9aSWill Deacon 		goto out_handled;
66943609000SWill Deacon 	case FFA_MEM_SHARE:
67043609000SWill Deacon 	case FFA_FN64_MEM_SHARE:
671634d90cfSWill Deacon 		do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
67243609000SWill Deacon 		goto out_handled;
6730e3bcb49SWill Deacon 	case FFA_MEM_RECLAIM:
6740e3bcb49SWill Deacon 		do_ffa_mem_reclaim(&res, host_ctxt);
6750e3bcb49SWill Deacon 		goto out_handled;
676634d90cfSWill Deacon 	case FFA_MEM_LEND:
677634d90cfSWill Deacon 	case FFA_FN64_MEM_LEND:
678634d90cfSWill Deacon 		do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
679634d90cfSWill Deacon 		goto out_handled;
6800a9f15fdSQuentin Perret 	case FFA_MEM_FRAG_TX:
6810a9f15fdSQuentin Perret 		do_ffa_mem_frag_tx(&res, host_ctxt);
6820a9f15fdSQuentin Perret 		goto out_handled;
6839d0c6a9aSWill Deacon 	}
6849d0c6a9aSWill Deacon 
685048be5feSWill Deacon 	if (ffa_call_supported(func_id))
686048be5feSWill Deacon 		return false; /* Pass through */
687048be5feSWill Deacon 
688048be5feSWill Deacon 	ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
6899d0c6a9aSWill Deacon out_handled:
690048be5feSWill Deacon 	ffa_set_retval(host_ctxt, &res);
691048be5feSWill Deacon 	return true;
692048be5feSWill Deacon }
69312bdce4fSWill Deacon 
hyp_ffa_init(void * pages)694bc3888a0SWill Deacon int hyp_ffa_init(void *pages)
69512bdce4fSWill Deacon {
69612bdce4fSWill Deacon 	struct arm_smccc_res res;
6979d0c6a9aSWill Deacon 	size_t min_rxtx_sz;
6980a9f15fdSQuentin Perret 	void *tx, *rx;
69912bdce4fSWill Deacon 
70012bdce4fSWill Deacon 	if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
70112bdce4fSWill Deacon 		return 0;
70212bdce4fSWill Deacon 
70312bdce4fSWill Deacon 	arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
70412bdce4fSWill Deacon 	if (res.a0 == FFA_RET_NOT_SUPPORTED)
70512bdce4fSWill Deacon 		return 0;
70612bdce4fSWill Deacon 
70784f68679SOliver Upton 	/*
70884f68679SOliver Upton 	 * Firmware returns the maximum supported version of the FF-A
70984f68679SOliver Upton 	 * implementation. Check that the returned version is
71084f68679SOliver Upton 	 * backwards-compatible with the hyp according to the rules in DEN0077A
71184f68679SOliver Upton 	 * v1.1 REL0 13.2.1.
71284f68679SOliver Upton 	 *
71384f68679SOliver Upton 	 * Of course, things are never simple when dealing with firmware. v1.1
71484f68679SOliver Upton 	 * broke ABI with v1.0 on several structures, which is itself
71584f68679SOliver Upton 	 * incompatible with the aforementioned versioning scheme. The
71684f68679SOliver Upton 	 * expectation is that v1.x implementations that do not support the v1.0
71784f68679SOliver Upton 	 * ABI return NOT_SUPPORTED rather than a version number, according to
71884f68679SOliver Upton 	 * DEN0077A v1.1 REL0 18.6.4.
71984f68679SOliver Upton 	 */
72084f68679SOliver Upton 	if (FFA_MAJOR_VERSION(res.a0) != 1)
72112bdce4fSWill Deacon 		return -EOPNOTSUPP;
72212bdce4fSWill Deacon 
72312bdce4fSWill Deacon 	arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
72412bdce4fSWill Deacon 	if (res.a0 != FFA_SUCCESS)
72512bdce4fSWill Deacon 		return -EOPNOTSUPP;
72612bdce4fSWill Deacon 
72712bdce4fSWill Deacon 	if (res.a2 != HOST_FFA_ID)
72812bdce4fSWill Deacon 		return -EINVAL;
72912bdce4fSWill Deacon 
7309d0c6a9aSWill Deacon 	arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
7319d0c6a9aSWill Deacon 			  0, 0, 0, 0, 0, 0, &res);
7329d0c6a9aSWill Deacon 	if (res.a0 != FFA_SUCCESS)
7339d0c6a9aSWill Deacon 		return -EOPNOTSUPP;
7349d0c6a9aSWill Deacon 
7359d0c6a9aSWill Deacon 	switch (res.a2) {
7369d0c6a9aSWill Deacon 	case FFA_FEAT_RXTX_MIN_SZ_4K:
7379d0c6a9aSWill Deacon 		min_rxtx_sz = SZ_4K;
7389d0c6a9aSWill Deacon 		break;
7399d0c6a9aSWill Deacon 	case FFA_FEAT_RXTX_MIN_SZ_16K:
7409d0c6a9aSWill Deacon 		min_rxtx_sz = SZ_16K;
7419d0c6a9aSWill Deacon 		break;
7429d0c6a9aSWill Deacon 	case FFA_FEAT_RXTX_MIN_SZ_64K:
7439d0c6a9aSWill Deacon 		min_rxtx_sz = SZ_64K;
7449d0c6a9aSWill Deacon 		break;
7459d0c6a9aSWill Deacon 	default:
7469d0c6a9aSWill Deacon 		return -EINVAL;
7479d0c6a9aSWill Deacon 	}
7489d0c6a9aSWill Deacon 
7499d0c6a9aSWill Deacon 	if (min_rxtx_sz > PAGE_SIZE)
7509d0c6a9aSWill Deacon 		return -EOPNOTSUPP;
7519d0c6a9aSWill Deacon 
7520a9f15fdSQuentin Perret 	tx = pages;
7530a9f15fdSQuentin Perret 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
7540a9f15fdSQuentin Perret 	rx = pages;
7550a9f15fdSQuentin Perret 	pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
7560a9f15fdSQuentin Perret 
7570a9f15fdSQuentin Perret 	ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
7580a9f15fdSQuentin Perret 		.buf	= pages,
7590a9f15fdSQuentin Perret 		.len	= PAGE_SIZE *
7600a9f15fdSQuentin Perret 			  (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
7610a9f15fdSQuentin Perret 	};
7620a9f15fdSQuentin Perret 
763bc3888a0SWill Deacon 	hyp_buffers = (struct kvm_ffa_buffers) {
764bc3888a0SWill Deacon 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
7650a9f15fdSQuentin Perret 		.tx	= tx,
7660a9f15fdSQuentin Perret 		.rx	= rx,
767bc3888a0SWill Deacon 	};
768bc3888a0SWill Deacon 
7699d0c6a9aSWill Deacon 	host_buffers = (struct kvm_ffa_buffers) {
7709d0c6a9aSWill Deacon 		.lock	= __HYP_SPIN_LOCK_UNLOCKED,
7719d0c6a9aSWill Deacon 	};
7729d0c6a9aSWill Deacon 
77312bdce4fSWill Deacon 	return 0;
77412bdce4fSWill Deacon }
775