1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by 4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware 5 * Framework for Arm A-profile", which is specified by Arm in document 6 * number DEN0077. 7 * 8 * Copyright (C) 2022 - Google LLC 9 * Author: Andrew Walbran <qwandor@google.com> 10 * 11 * This driver hooks into the SMC trapping logic for the host and intercepts 12 * all calls falling within the FF-A range. Each call is either: 13 * 14 * - Forwarded on unmodified to the SPMD at EL3 15 * - Rejected as "unsupported" 16 * - Accompanied by a host stage-2 page-table check/update and reissued 17 * 18 * Consequently, any attempts by the host to make guest memory pages 19 * accessible to the secure world using FF-A will be detected either here 20 * (in the case that the memory is already owned by the guest) or during 21 * donation to the guest (in the case that the memory was previously shared 22 * with the secure world). 23 * 24 * To allow the rolling-back of page-table updates and FF-A calls in the 25 * event of failure, operations involving the RXTX buffers are locked for 26 * the duration and are therefore serialised. 27 */ 28 29 #include <linux/arm-smccc.h> 30 #include <linux/arm_ffa.h> 31 #include <asm/kvm_pkvm.h> 32 33 #include <nvhe/ffa.h> 34 #include <nvhe/mem_protect.h> 35 #include <nvhe/memory.h> 36 #include <nvhe/trap_handler.h> 37 #include <nvhe/spinlock.h> 38 39 /* 40 * "ID value 0 must be returned at the Non-secure physical FF-A instance" 41 * We share this ID with the host. 42 */ 43 #define HOST_FFA_ID 0 44 45 struct kvm_ffa_buffers { 46 hyp_spinlock_t lock; 47 void *tx; 48 void *rx; 49 }; 50 51 /* 52 * Note that we don't currently lock these buffers explicitly, instead 53 * relying on the locking of the host FFA buffers as we only have one 54 * client. 55 */ 56 static struct kvm_ffa_buffers hyp_buffers; 57 static struct kvm_ffa_buffers host_buffers; 58 59 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno) 60 { 61 *res = (struct arm_smccc_res) { 62 .a0 = FFA_ERROR, 63 .a2 = ffa_errno, 64 }; 65 } 66 67 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret) 68 { 69 if (ret == FFA_RET_SUCCESS) { 70 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS }; 71 } else { 72 ffa_to_smccc_error(res, ret); 73 } 74 } 75 76 static void ffa_set_retval(struct kvm_cpu_context *ctxt, 77 struct arm_smccc_res *res) 78 { 79 cpu_reg(ctxt, 0) = res->a0; 80 cpu_reg(ctxt, 1) = res->a1; 81 cpu_reg(ctxt, 2) = res->a2; 82 cpu_reg(ctxt, 3) = res->a3; 83 } 84 85 static bool is_ffa_call(u64 func_id) 86 { 87 return ARM_SMCCC_IS_FAST_CALL(func_id) && 88 ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD && 89 ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM && 90 ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM; 91 } 92 93 static int ffa_map_hyp_buffers(u64 ffa_page_count) 94 { 95 struct arm_smccc_res res; 96 97 arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP, 98 hyp_virt_to_phys(hyp_buffers.tx), 99 hyp_virt_to_phys(hyp_buffers.rx), 100 ffa_page_count, 101 0, 0, 0, 0, 102 &res); 103 104 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 105 } 106 107 static int ffa_unmap_hyp_buffers(void) 108 { 109 struct arm_smccc_res res; 110 111 arm_smccc_1_1_smc(FFA_RXTX_UNMAP, 112 HOST_FFA_ID, 113 0, 0, 0, 0, 0, 0, 114 &res); 115 116 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2; 117 } 118 119 static void do_ffa_rxtx_map(struct arm_smccc_res *res, 120 struct kvm_cpu_context *ctxt) 121 { 122 DECLARE_REG(phys_addr_t, tx, ctxt, 1); 123 DECLARE_REG(phys_addr_t, rx, ctxt, 2); 124 DECLARE_REG(u32, npages, ctxt, 3); 125 int ret = 0; 126 void *rx_virt, *tx_virt; 127 128 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) { 129 ret = FFA_RET_INVALID_PARAMETERS; 130 goto out; 131 } 132 133 if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) { 134 ret = FFA_RET_INVALID_PARAMETERS; 135 goto out; 136 } 137 138 hyp_spin_lock(&host_buffers.lock); 139 if (host_buffers.tx) { 140 ret = FFA_RET_DENIED; 141 goto out_unlock; 142 } 143 144 /* 145 * Map our hypervisor buffers into the SPMD before mapping and 146 * pinning the host buffers in our own address space. 147 */ 148 ret = ffa_map_hyp_buffers(npages); 149 if (ret) 150 goto out_unlock; 151 152 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx)); 153 if (ret) { 154 ret = FFA_RET_INVALID_PARAMETERS; 155 goto err_unmap; 156 } 157 158 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx)); 159 if (ret) { 160 ret = FFA_RET_INVALID_PARAMETERS; 161 goto err_unshare_tx; 162 } 163 164 tx_virt = hyp_phys_to_virt(tx); 165 ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1); 166 if (ret) { 167 ret = FFA_RET_INVALID_PARAMETERS; 168 goto err_unshare_rx; 169 } 170 171 rx_virt = hyp_phys_to_virt(rx); 172 ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1); 173 if (ret) { 174 ret = FFA_RET_INVALID_PARAMETERS; 175 goto err_unpin_tx; 176 } 177 178 host_buffers.tx = tx_virt; 179 host_buffers.rx = rx_virt; 180 181 out_unlock: 182 hyp_spin_unlock(&host_buffers.lock); 183 out: 184 ffa_to_smccc_res(res, ret); 185 return; 186 187 err_unpin_tx: 188 hyp_unpin_shared_mem(tx_virt, tx_virt + 1); 189 err_unshare_rx: 190 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx)); 191 err_unshare_tx: 192 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx)); 193 err_unmap: 194 ffa_unmap_hyp_buffers(); 195 goto out_unlock; 196 } 197 198 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res, 199 struct kvm_cpu_context *ctxt) 200 { 201 DECLARE_REG(u32, id, ctxt, 1); 202 int ret = 0; 203 204 if (id != HOST_FFA_ID) { 205 ret = FFA_RET_INVALID_PARAMETERS; 206 goto out; 207 } 208 209 hyp_spin_lock(&host_buffers.lock); 210 if (!host_buffers.tx) { 211 ret = FFA_RET_INVALID_PARAMETERS; 212 goto out_unlock; 213 } 214 215 hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1); 216 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx))); 217 host_buffers.tx = NULL; 218 219 hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1); 220 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx))); 221 host_buffers.rx = NULL; 222 223 ffa_unmap_hyp_buffers(); 224 225 out_unlock: 226 hyp_spin_unlock(&host_buffers.lock); 227 out: 228 ffa_to_smccc_res(res, ret); 229 } 230 231 /* 232 * Is a given FFA function supported, either by forwarding on directly 233 * or by handling at EL2? 234 */ 235 static bool ffa_call_supported(u64 func_id) 236 { 237 switch (func_id) { 238 /* Unsupported memory management calls */ 239 case FFA_FN64_MEM_RETRIEVE_REQ: 240 case FFA_MEM_RETRIEVE_RESP: 241 case FFA_MEM_RELINQUISH: 242 case FFA_MEM_OP_PAUSE: 243 case FFA_MEM_OP_RESUME: 244 case FFA_MEM_FRAG_RX: 245 case FFA_FN64_MEM_DONATE: 246 /* Indirect message passing via RX/TX buffers */ 247 case FFA_MSG_SEND: 248 case FFA_MSG_POLL: 249 case FFA_MSG_WAIT: 250 /* 32-bit variants of 64-bit calls */ 251 case FFA_MSG_SEND_DIRECT_REQ: 252 case FFA_MSG_SEND_DIRECT_RESP: 253 case FFA_RXTX_MAP: 254 case FFA_MEM_DONATE: 255 case FFA_MEM_RETRIEVE_REQ: 256 /* Don't advertise any features just yet */ 257 case FFA_FEATURES: 258 return false; 259 } 260 261 return true; 262 } 263 264 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt) 265 { 266 DECLARE_REG(u64, func_id, host_ctxt, 0); 267 struct arm_smccc_res res; 268 269 /* 270 * There's no way we can tell what a non-standard SMC call might 271 * be up to. Ideally, we would terminate these here and return 272 * an error to the host, but sadly devices make use of custom 273 * firmware calls for things like power management, debugging, 274 * RNG access and crash reporting. 275 * 276 * Given that the architecture requires us to trust EL3 anyway, 277 * we forward unrecognised calls on under the assumption that 278 * the firmware doesn't expose a mechanism to access arbitrary 279 * non-secure memory. Short of a per-device table of SMCs, this 280 * is the best we can do. 281 */ 282 if (!is_ffa_call(func_id)) 283 return false; 284 285 switch (func_id) { 286 /* Memory management */ 287 case FFA_FN64_RXTX_MAP: 288 do_ffa_rxtx_map(&res, host_ctxt); 289 goto out_handled; 290 case FFA_RXTX_UNMAP: 291 do_ffa_rxtx_unmap(&res, host_ctxt); 292 goto out_handled; 293 } 294 295 if (ffa_call_supported(func_id)) 296 return false; /* Pass through */ 297 298 ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED); 299 out_handled: 300 ffa_set_retval(host_ctxt, &res); 301 return true; 302 } 303 304 int hyp_ffa_init(void *pages) 305 { 306 struct arm_smccc_res res; 307 size_t min_rxtx_sz; 308 309 if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2) 310 return 0; 311 312 arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res); 313 if (res.a0 == FFA_RET_NOT_SUPPORTED) 314 return 0; 315 316 if (res.a0 != FFA_VERSION_1_0) 317 return -EOPNOTSUPP; 318 319 arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res); 320 if (res.a0 != FFA_SUCCESS) 321 return -EOPNOTSUPP; 322 323 if (res.a2 != HOST_FFA_ID) 324 return -EINVAL; 325 326 arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP, 327 0, 0, 0, 0, 0, 0, &res); 328 if (res.a0 != FFA_SUCCESS) 329 return -EOPNOTSUPP; 330 331 switch (res.a2) { 332 case FFA_FEAT_RXTX_MIN_SZ_4K: 333 min_rxtx_sz = SZ_4K; 334 break; 335 case FFA_FEAT_RXTX_MIN_SZ_16K: 336 min_rxtx_sz = SZ_16K; 337 break; 338 case FFA_FEAT_RXTX_MIN_SZ_64K: 339 min_rxtx_sz = SZ_64K; 340 break; 341 default: 342 return -EINVAL; 343 } 344 345 if (min_rxtx_sz > PAGE_SIZE) 346 return -EOPNOTSUPP; 347 348 hyp_buffers = (struct kvm_ffa_buffers) { 349 .lock = __HYP_SPIN_LOCK_UNLOCKED, 350 .tx = pages, 351 .rx = pages + (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE), 352 }; 353 354 host_buffers = (struct kvm_ffa_buffers) { 355 .lock = __HYP_SPIN_LOCK_UNLOCKED, 356 }; 357 358 return 0; 359 } 360