1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * Copyright (c) 2022 Ventana Micro Systems Inc. 5 */ 6 7 #include <linux/bitops.h> 8 #include <linux/kvm_host.h> 9 10 #define INSN_OPCODE_MASK 0x007c 11 #define INSN_OPCODE_SHIFT 2 12 #define INSN_OPCODE_SYSTEM 28 13 14 #define INSN_MASK_WFI 0xffffffff 15 #define INSN_MATCH_WFI 0x10500073 16 17 #define INSN_MATCH_CSRRW 0x1073 18 #define INSN_MASK_CSRRW 0x707f 19 #define INSN_MATCH_CSRRS 0x2073 20 #define INSN_MASK_CSRRS 0x707f 21 #define INSN_MATCH_CSRRC 0x3073 22 #define INSN_MASK_CSRRC 0x707f 23 #define INSN_MATCH_CSRRWI 0x5073 24 #define INSN_MASK_CSRRWI 0x707f 25 #define INSN_MATCH_CSRRSI 0x6073 26 #define INSN_MASK_CSRRSI 0x707f 27 #define INSN_MATCH_CSRRCI 0x7073 28 #define INSN_MASK_CSRRCI 0x707f 29 30 #define INSN_MATCH_LB 0x3 31 #define INSN_MASK_LB 0x707f 32 #define INSN_MATCH_LH 0x1003 33 #define INSN_MASK_LH 0x707f 34 #define INSN_MATCH_LW 0x2003 35 #define INSN_MASK_LW 0x707f 36 #define INSN_MATCH_LD 0x3003 37 #define INSN_MASK_LD 0x707f 38 #define INSN_MATCH_LBU 0x4003 39 #define INSN_MASK_LBU 0x707f 40 #define INSN_MATCH_LHU 0x5003 41 #define INSN_MASK_LHU 0x707f 42 #define INSN_MATCH_LWU 0x6003 43 #define INSN_MASK_LWU 0x707f 44 #define INSN_MATCH_SB 0x23 45 #define INSN_MASK_SB 0x707f 46 #define INSN_MATCH_SH 0x1023 47 #define INSN_MASK_SH 0x707f 48 #define INSN_MATCH_SW 0x2023 49 #define INSN_MASK_SW 0x707f 50 #define INSN_MATCH_SD 0x3023 51 #define INSN_MASK_SD 0x707f 52 53 #define INSN_MATCH_C_LD 0x6000 54 #define INSN_MASK_C_LD 0xe003 55 #define INSN_MATCH_C_SD 0xe000 56 #define INSN_MASK_C_SD 0xe003 57 #define INSN_MATCH_C_LW 0x4000 58 #define INSN_MASK_C_LW 0xe003 59 #define INSN_MATCH_C_SW 0xc000 60 #define INSN_MASK_C_SW 0xe003 61 #define INSN_MATCH_C_LDSP 0x6002 62 #define INSN_MASK_C_LDSP 0xe003 63 #define INSN_MATCH_C_SDSP 0xe002 64 #define INSN_MASK_C_SDSP 0xe003 65 #define INSN_MATCH_C_LWSP 0x4002 66 #define INSN_MASK_C_LWSP 0xe003 67 #define INSN_MATCH_C_SWSP 0xc002 68 #define INSN_MASK_C_SWSP 0xe003 69 70 #define INSN_16BIT_MASK 0x3 71 72 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) 73 74 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) 75 76 #ifdef CONFIG_64BIT 77 #define LOG_REGBYTES 3 78 #else 79 #define LOG_REGBYTES 2 80 #endif 81 #define REGBYTES (1 << LOG_REGBYTES) 82 83 #define SH_RD 7 84 #define SH_RS1 15 85 #define SH_RS2 20 86 #define SH_RS2C 2 87 #define MASK_RX 0x1f 88 89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) 90 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ 91 (RV_X(x, 10, 3) << 3) | \ 92 (RV_X(x, 5, 1) << 6)) 93 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ 94 (RV_X(x, 5, 2) << 6)) 95 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ 96 (RV_X(x, 12, 1) << 5) | \ 97 (RV_X(x, 2, 2) << 6)) 98 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ 99 (RV_X(x, 12, 1) << 5) | \ 100 (RV_X(x, 2, 3) << 6)) 101 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ 102 (RV_X(x, 7, 2) << 6)) 103 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ 104 (RV_X(x, 7, 3) << 6)) 105 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) 106 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) 107 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) 108 109 #define SHIFT_RIGHT(x, y) \ 110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) 111 112 #define REG_MASK \ 113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) 114 115 #define REG_OFFSET(insn, pos) \ 116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) 117 118 #define REG_PTR(insn, pos, regs) \ 119 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) 120 121 #define GET_FUNCT3(insn) (((insn) >> 12) & 7) 122 123 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) 124 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) 125 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) 126 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) 127 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) 128 #define GET_SP(regs) (*REG_PTR(2, 0, regs)) 129 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) 130 #define IMM_I(insn) ((s32)(insn) >> 20) 131 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ 132 (s32)(((insn) >> 7) & 0x1f)) 133 134 struct insn_func { 135 unsigned long mask; 136 unsigned long match; 137 /* 138 * Possible return values are as follows: 139 * 1) Returns < 0 for error case 140 * 2) Returns 0 for exit to user-space 141 * 3) Returns 1 to continue with next sepc 142 * 4) Returns 2 to continue with same sepc 143 * 5) Returns 3 to inject illegal instruction trap and continue 144 * 6) Returns 4 to inject virtual instruction trap and continue 145 * 146 * Use enum kvm_insn_return for return values 147 */ 148 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn); 149 }; 150 151 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 152 ulong insn) 153 { 154 struct kvm_cpu_trap utrap = { 0 }; 155 156 /* Redirect trap to Guest VCPU */ 157 utrap.sepc = vcpu->arch.guest_context.sepc; 158 utrap.scause = EXC_INST_ILLEGAL; 159 utrap.stval = insn; 160 utrap.htval = 0; 161 utrap.htinst = 0; 162 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 163 164 return 1; 165 } 166 167 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 168 ulong insn) 169 { 170 struct kvm_cpu_trap utrap = { 0 }; 171 172 /* Redirect trap to Guest VCPU */ 173 utrap.sepc = vcpu->arch.guest_context.sepc; 174 utrap.scause = EXC_VIRTUAL_INST_FAULT; 175 utrap.stval = insn; 176 utrap.htval = 0; 177 utrap.htinst = 0; 178 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 179 180 return 1; 181 } 182 183 /** 184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour 185 * 186 * @vcpu: The VCPU pointer 187 */ 188 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) 189 { 190 if (!kvm_arch_vcpu_runnable(vcpu)) { 191 kvm_vcpu_srcu_read_unlock(vcpu); 192 kvm_vcpu_halt(vcpu); 193 kvm_vcpu_srcu_read_lock(vcpu); 194 } 195 } 196 197 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) 198 { 199 vcpu->stat.wfi_exit_stat++; 200 kvm_riscv_vcpu_wfi(vcpu); 201 return KVM_INSN_CONTINUE_NEXT_SEPC; 202 } 203 204 struct csr_func { 205 unsigned int base; 206 unsigned int count; 207 /* 208 * Possible return values are as same as "func" callback in 209 * "struct insn_func". 210 */ 211 int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num, 212 unsigned long *val, unsigned long new_val, 213 unsigned long wr_mask); 214 }; 215 216 static const struct csr_func csr_funcs[] = { }; 217 218 /** 219 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space 220 * emulation or in-kernel emulation 221 * 222 * @vcpu: The VCPU pointer 223 * @run: The VCPU run struct containing the CSR data 224 * 225 * Returns > 0 upon failure and 0 upon success 226 */ 227 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 228 { 229 ulong insn; 230 231 if (vcpu->arch.csr_decode.return_handled) 232 return 0; 233 vcpu->arch.csr_decode.return_handled = 1; 234 235 /* Update destination register for CSR reads */ 236 insn = vcpu->arch.csr_decode.insn; 237 if ((insn >> SH_RD) & MASK_RX) 238 SET_RD(insn, &vcpu->arch.guest_context, 239 run->riscv_csr.ret_value); 240 241 /* Move to next instruction */ 242 vcpu->arch.guest_context.sepc += INSN_LEN(insn); 243 244 return 0; 245 } 246 247 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) 248 { 249 int i, rc = KVM_INSN_ILLEGAL_TRAP; 250 unsigned int csr_num = insn >> SH_RS2; 251 unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; 252 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); 253 const struct csr_func *tcfn, *cfn = NULL; 254 ulong val = 0, wr_mask = 0, new_val = 0; 255 256 /* Decode the CSR instruction */ 257 switch (GET_FUNCT3(insn)) { 258 case GET_FUNCT3(INSN_MATCH_CSRRW): 259 wr_mask = -1UL; 260 new_val = rs1_val; 261 break; 262 case GET_FUNCT3(INSN_MATCH_CSRRS): 263 wr_mask = rs1_val; 264 new_val = -1UL; 265 break; 266 case GET_FUNCT3(INSN_MATCH_CSRRC): 267 wr_mask = rs1_val; 268 new_val = 0; 269 break; 270 case GET_FUNCT3(INSN_MATCH_CSRRWI): 271 wr_mask = -1UL; 272 new_val = rs1_num; 273 break; 274 case GET_FUNCT3(INSN_MATCH_CSRRSI): 275 wr_mask = rs1_num; 276 new_val = -1UL; 277 break; 278 case GET_FUNCT3(INSN_MATCH_CSRRCI): 279 wr_mask = rs1_num; 280 new_val = 0; 281 break; 282 default: 283 return rc; 284 } 285 286 /* Save instruction decode info */ 287 vcpu->arch.csr_decode.insn = insn; 288 vcpu->arch.csr_decode.return_handled = 0; 289 290 /* Update CSR details in kvm_run struct */ 291 run->riscv_csr.csr_num = csr_num; 292 run->riscv_csr.new_value = new_val; 293 run->riscv_csr.write_mask = wr_mask; 294 run->riscv_csr.ret_value = 0; 295 296 /* Find in-kernel CSR function */ 297 for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) { 298 tcfn = &csr_funcs[i]; 299 if ((tcfn->base <= csr_num) && 300 (csr_num < (tcfn->base + tcfn->count))) { 301 cfn = tcfn; 302 break; 303 } 304 } 305 306 /* First try in-kernel CSR emulation */ 307 if (cfn && cfn->func) { 308 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); 309 if (rc > KVM_INSN_EXIT_TO_USER_SPACE) { 310 if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) { 311 run->riscv_csr.ret_value = val; 312 vcpu->stat.csr_exit_kernel++; 313 kvm_riscv_vcpu_csr_return(vcpu, run); 314 rc = KVM_INSN_CONTINUE_SAME_SEPC; 315 } 316 return rc; 317 } 318 } 319 320 /* Exit to user-space for CSR emulation */ 321 if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) { 322 vcpu->stat.csr_exit_user++; 323 run->exit_reason = KVM_EXIT_RISCV_CSR; 324 } 325 326 return rc; 327 } 328 329 static const struct insn_func system_opcode_funcs[] = { 330 { 331 .mask = INSN_MASK_CSRRW, 332 .match = INSN_MATCH_CSRRW, 333 .func = csr_insn, 334 }, 335 { 336 .mask = INSN_MASK_CSRRS, 337 .match = INSN_MATCH_CSRRS, 338 .func = csr_insn, 339 }, 340 { 341 .mask = INSN_MASK_CSRRC, 342 .match = INSN_MATCH_CSRRC, 343 .func = csr_insn, 344 }, 345 { 346 .mask = INSN_MASK_CSRRWI, 347 .match = INSN_MATCH_CSRRWI, 348 .func = csr_insn, 349 }, 350 { 351 .mask = INSN_MASK_CSRRSI, 352 .match = INSN_MATCH_CSRRSI, 353 .func = csr_insn, 354 }, 355 { 356 .mask = INSN_MASK_CSRRCI, 357 .match = INSN_MATCH_CSRRCI, 358 .func = csr_insn, 359 }, 360 { 361 .mask = INSN_MASK_WFI, 362 .match = INSN_MATCH_WFI, 363 .func = wfi_insn, 364 }, 365 }; 366 367 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 368 ulong insn) 369 { 370 int i, rc = KVM_INSN_ILLEGAL_TRAP; 371 const struct insn_func *ifn; 372 373 for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) { 374 ifn = &system_opcode_funcs[i]; 375 if ((insn & ifn->mask) == ifn->match) { 376 rc = ifn->func(vcpu, run, insn); 377 break; 378 } 379 } 380 381 switch (rc) { 382 case KVM_INSN_ILLEGAL_TRAP: 383 return truly_illegal_insn(vcpu, run, insn); 384 case KVM_INSN_VIRTUAL_TRAP: 385 return truly_virtual_insn(vcpu, run, insn); 386 case KVM_INSN_CONTINUE_NEXT_SEPC: 387 vcpu->arch.guest_context.sepc += INSN_LEN(insn); 388 break; 389 default: 390 break; 391 } 392 393 return (rc <= 0) ? rc : 1; 394 } 395 396 /** 397 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap 398 * 399 * @vcpu: The VCPU pointer 400 * @run: The VCPU run struct containing the mmio data 401 * @trap: Trap details 402 * 403 * Returns > 0 to continue run-loop 404 * Returns 0 to exit run-loop and handle in user-space. 405 * Returns < 0 to report failure and exit run-loop 406 */ 407 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 408 struct kvm_cpu_trap *trap) 409 { 410 unsigned long insn = trap->stval; 411 struct kvm_cpu_trap utrap = { 0 }; 412 struct kvm_cpu_context *ct; 413 414 if (unlikely(INSN_IS_16BIT(insn))) { 415 if (insn == 0) { 416 ct = &vcpu->arch.guest_context; 417 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, 418 ct->sepc, 419 &utrap); 420 if (utrap.scause) { 421 utrap.sepc = ct->sepc; 422 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 423 return 1; 424 } 425 } 426 if (INSN_IS_16BIT(insn)) 427 return truly_illegal_insn(vcpu, run, insn); 428 } 429 430 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { 431 case INSN_OPCODE_SYSTEM: 432 return system_opcode_insn(vcpu, run, insn); 433 default: 434 return truly_illegal_insn(vcpu, run, insn); 435 } 436 } 437 438 /** 439 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction 440 * 441 * @vcpu: The VCPU pointer 442 * @run: The VCPU run struct containing the mmio data 443 * @fault_addr: Guest physical address to load 444 * @htinst: Transformed encoding of the load instruction 445 * 446 * Returns > 0 to continue run-loop 447 * Returns 0 to exit run-loop and handle in user-space. 448 * Returns < 0 to report failure and exit run-loop 449 */ 450 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, 451 unsigned long fault_addr, 452 unsigned long htinst) 453 { 454 u8 data_buf[8]; 455 unsigned long insn; 456 int shift = 0, len = 0, insn_len = 0; 457 struct kvm_cpu_trap utrap = { 0 }; 458 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; 459 460 /* Determine trapped instruction */ 461 if (htinst & 0x1) { 462 /* 463 * Bit[0] == 1 implies trapped instruction value is 464 * transformed instruction or custom instruction. 465 */ 466 insn = htinst | INSN_16BIT_MASK; 467 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; 468 } else { 469 /* 470 * Bit[0] == 0 implies trapped instruction value is 471 * zero or special value. 472 */ 473 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, 474 &utrap); 475 if (utrap.scause) { 476 /* Redirect trap if we failed to read instruction */ 477 utrap.sepc = ct->sepc; 478 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 479 return 1; 480 } 481 insn_len = INSN_LEN(insn); 482 } 483 484 /* Decode length of MMIO and shift */ 485 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { 486 len = 4; 487 shift = 8 * (sizeof(ulong) - len); 488 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { 489 len = 1; 490 shift = 8 * (sizeof(ulong) - len); 491 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { 492 len = 1; 493 shift = 8 * (sizeof(ulong) - len); 494 #ifdef CONFIG_64BIT 495 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { 496 len = 8; 497 shift = 8 * (sizeof(ulong) - len); 498 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { 499 len = 4; 500 #endif 501 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { 502 len = 2; 503 shift = 8 * (sizeof(ulong) - len); 504 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { 505 len = 2; 506 #ifdef CONFIG_64BIT 507 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { 508 len = 8; 509 shift = 8 * (sizeof(ulong) - len); 510 insn = RVC_RS2S(insn) << SH_RD; 511 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && 512 ((insn >> SH_RD) & 0x1f)) { 513 len = 8; 514 shift = 8 * (sizeof(ulong) - len); 515 #endif 516 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { 517 len = 4; 518 shift = 8 * (sizeof(ulong) - len); 519 insn = RVC_RS2S(insn) << SH_RD; 520 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && 521 ((insn >> SH_RD) & 0x1f)) { 522 len = 4; 523 shift = 8 * (sizeof(ulong) - len); 524 } else { 525 return -EOPNOTSUPP; 526 } 527 528 /* Fault address should be aligned to length of MMIO */ 529 if (fault_addr & (len - 1)) 530 return -EIO; 531 532 /* Save instruction decode info */ 533 vcpu->arch.mmio_decode.insn = insn; 534 vcpu->arch.mmio_decode.insn_len = insn_len; 535 vcpu->arch.mmio_decode.shift = shift; 536 vcpu->arch.mmio_decode.len = len; 537 vcpu->arch.mmio_decode.return_handled = 0; 538 539 /* Update MMIO details in kvm_run struct */ 540 run->mmio.is_write = false; 541 run->mmio.phys_addr = fault_addr; 542 run->mmio.len = len; 543 544 /* Try to handle MMIO access in the kernel */ 545 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { 546 /* Successfully handled MMIO access in the kernel so resume */ 547 memcpy(run->mmio.data, data_buf, len); 548 vcpu->stat.mmio_exit_kernel++; 549 kvm_riscv_vcpu_mmio_return(vcpu, run); 550 return 1; 551 } 552 553 /* Exit to userspace for MMIO emulation */ 554 vcpu->stat.mmio_exit_user++; 555 run->exit_reason = KVM_EXIT_MMIO; 556 557 return 0; 558 } 559 560 /** 561 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction 562 * 563 * @vcpu: The VCPU pointer 564 * @run: The VCPU run struct containing the mmio data 565 * @fault_addr: Guest physical address to store 566 * @htinst: Transformed encoding of the store instruction 567 * 568 * Returns > 0 to continue run-loop 569 * Returns 0 to exit run-loop and handle in user-space. 570 * Returns < 0 to report failure and exit run-loop 571 */ 572 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, 573 unsigned long fault_addr, 574 unsigned long htinst) 575 { 576 u8 data8; 577 u16 data16; 578 u32 data32; 579 u64 data64; 580 ulong data; 581 unsigned long insn; 582 int len = 0, insn_len = 0; 583 struct kvm_cpu_trap utrap = { 0 }; 584 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; 585 586 /* Determine trapped instruction */ 587 if (htinst & 0x1) { 588 /* 589 * Bit[0] == 1 implies trapped instruction value is 590 * transformed instruction or custom instruction. 591 */ 592 insn = htinst | INSN_16BIT_MASK; 593 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; 594 } else { 595 /* 596 * Bit[0] == 0 implies trapped instruction value is 597 * zero or special value. 598 */ 599 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, 600 &utrap); 601 if (utrap.scause) { 602 /* Redirect trap if we failed to read instruction */ 603 utrap.sepc = ct->sepc; 604 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 605 return 1; 606 } 607 insn_len = INSN_LEN(insn); 608 } 609 610 data = GET_RS2(insn, &vcpu->arch.guest_context); 611 data8 = data16 = data32 = data64 = data; 612 613 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { 614 len = 4; 615 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { 616 len = 1; 617 #ifdef CONFIG_64BIT 618 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { 619 len = 8; 620 #endif 621 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { 622 len = 2; 623 #ifdef CONFIG_64BIT 624 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { 625 len = 8; 626 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); 627 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && 628 ((insn >> SH_RD) & 0x1f)) { 629 len = 8; 630 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); 631 #endif 632 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { 633 len = 4; 634 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); 635 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && 636 ((insn >> SH_RD) & 0x1f)) { 637 len = 4; 638 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); 639 } else { 640 return -EOPNOTSUPP; 641 } 642 643 /* Fault address should be aligned to length of MMIO */ 644 if (fault_addr & (len - 1)) 645 return -EIO; 646 647 /* Save instruction decode info */ 648 vcpu->arch.mmio_decode.insn = insn; 649 vcpu->arch.mmio_decode.insn_len = insn_len; 650 vcpu->arch.mmio_decode.shift = 0; 651 vcpu->arch.mmio_decode.len = len; 652 vcpu->arch.mmio_decode.return_handled = 0; 653 654 /* Copy data to kvm_run instance */ 655 switch (len) { 656 case 1: 657 *((u8 *)run->mmio.data) = data8; 658 break; 659 case 2: 660 *((u16 *)run->mmio.data) = data16; 661 break; 662 case 4: 663 *((u32 *)run->mmio.data) = data32; 664 break; 665 case 8: 666 *((u64 *)run->mmio.data) = data64; 667 break; 668 default: 669 return -EOPNOTSUPP; 670 } 671 672 /* Update MMIO details in kvm_run struct */ 673 run->mmio.is_write = true; 674 run->mmio.phys_addr = fault_addr; 675 run->mmio.len = len; 676 677 /* Try to handle MMIO access in the kernel */ 678 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS, 679 fault_addr, len, run->mmio.data)) { 680 /* Successfully handled MMIO access in the kernel so resume */ 681 vcpu->stat.mmio_exit_kernel++; 682 kvm_riscv_vcpu_mmio_return(vcpu, run); 683 return 1; 684 } 685 686 /* Exit to userspace for MMIO emulation */ 687 vcpu->stat.mmio_exit_user++; 688 run->exit_reason = KVM_EXIT_MMIO; 689 690 return 0; 691 } 692 693 /** 694 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation 695 * or in-kernel IO emulation 696 * 697 * @vcpu: The VCPU pointer 698 * @run: The VCPU run struct containing the mmio data 699 */ 700 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 701 { 702 u8 data8; 703 u16 data16; 704 u32 data32; 705 u64 data64; 706 ulong insn; 707 int len, shift; 708 709 if (vcpu->arch.mmio_decode.return_handled) 710 return 0; 711 712 vcpu->arch.mmio_decode.return_handled = 1; 713 insn = vcpu->arch.mmio_decode.insn; 714 715 if (run->mmio.is_write) 716 goto done; 717 718 len = vcpu->arch.mmio_decode.len; 719 shift = vcpu->arch.mmio_decode.shift; 720 721 switch (len) { 722 case 1: 723 data8 = *((u8 *)run->mmio.data); 724 SET_RD(insn, &vcpu->arch.guest_context, 725 (ulong)data8 << shift >> shift); 726 break; 727 case 2: 728 data16 = *((u16 *)run->mmio.data); 729 SET_RD(insn, &vcpu->arch.guest_context, 730 (ulong)data16 << shift >> shift); 731 break; 732 case 4: 733 data32 = *((u32 *)run->mmio.data); 734 SET_RD(insn, &vcpu->arch.guest_context, 735 (ulong)data32 << shift >> shift); 736 break; 737 case 8: 738 data64 = *((u64 *)run->mmio.data); 739 SET_RD(insn, &vcpu->arch.guest_context, 740 (ulong)data64 << shift >> shift); 741 break; 742 default: 743 return -EOPNOTSUPP; 744 } 745 746 done: 747 /* Move to next instruction */ 748 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; 749 750 return 0; 751 } 752