1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2019 Western Digital Corporation or its affiliates. 4 * Copyright (c) 2022 Ventana Micro Systems Inc. 5 */ 6 7 #include <linux/bitops.h> 8 #include <linux/kvm_host.h> 9 10 #define INSN_OPCODE_MASK 0x007c 11 #define INSN_OPCODE_SHIFT 2 12 #define INSN_OPCODE_SYSTEM 28 13 14 #define INSN_MASK_WFI 0xffffffff 15 #define INSN_MATCH_WFI 0x10500073 16 17 #define INSN_MATCH_CSRRW 0x1073 18 #define INSN_MASK_CSRRW 0x707f 19 #define INSN_MATCH_CSRRS 0x2073 20 #define INSN_MASK_CSRRS 0x707f 21 #define INSN_MATCH_CSRRC 0x3073 22 #define INSN_MASK_CSRRC 0x707f 23 #define INSN_MATCH_CSRRWI 0x5073 24 #define INSN_MASK_CSRRWI 0x707f 25 #define INSN_MATCH_CSRRSI 0x6073 26 #define INSN_MASK_CSRRSI 0x707f 27 #define INSN_MATCH_CSRRCI 0x7073 28 #define INSN_MASK_CSRRCI 0x707f 29 30 #define INSN_MATCH_LB 0x3 31 #define INSN_MASK_LB 0x707f 32 #define INSN_MATCH_LH 0x1003 33 #define INSN_MASK_LH 0x707f 34 #define INSN_MATCH_LW 0x2003 35 #define INSN_MASK_LW 0x707f 36 #define INSN_MATCH_LD 0x3003 37 #define INSN_MASK_LD 0x707f 38 #define INSN_MATCH_LBU 0x4003 39 #define INSN_MASK_LBU 0x707f 40 #define INSN_MATCH_LHU 0x5003 41 #define INSN_MASK_LHU 0x707f 42 #define INSN_MATCH_LWU 0x6003 43 #define INSN_MASK_LWU 0x707f 44 #define INSN_MATCH_SB 0x23 45 #define INSN_MASK_SB 0x707f 46 #define INSN_MATCH_SH 0x1023 47 #define INSN_MASK_SH 0x707f 48 #define INSN_MATCH_SW 0x2023 49 #define INSN_MASK_SW 0x707f 50 #define INSN_MATCH_SD 0x3023 51 #define INSN_MASK_SD 0x707f 52 53 #define INSN_MATCH_C_LD 0x6000 54 #define INSN_MASK_C_LD 0xe003 55 #define INSN_MATCH_C_SD 0xe000 56 #define INSN_MASK_C_SD 0xe003 57 #define INSN_MATCH_C_LW 0x4000 58 #define INSN_MASK_C_LW 0xe003 59 #define INSN_MATCH_C_SW 0xc000 60 #define INSN_MASK_C_SW 0xe003 61 #define INSN_MATCH_C_LDSP 0x6002 62 #define INSN_MASK_C_LDSP 0xe003 63 #define INSN_MATCH_C_SDSP 0xe002 64 #define INSN_MASK_C_SDSP 0xe003 65 #define INSN_MATCH_C_LWSP 0x4002 66 #define INSN_MASK_C_LWSP 0xe003 67 #define INSN_MATCH_C_SWSP 0xc002 68 #define INSN_MASK_C_SWSP 0xe003 69 70 #define INSN_16BIT_MASK 0x3 71 72 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK) 73 74 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4) 75 76 #ifdef CONFIG_64BIT 77 #define LOG_REGBYTES 3 78 #else 79 #define LOG_REGBYTES 2 80 #endif 81 #define REGBYTES (1 << LOG_REGBYTES) 82 83 #define SH_RD 7 84 #define SH_RS1 15 85 #define SH_RS2 20 86 #define SH_RS2C 2 87 #define MASK_RX 0x1f 88 89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) 90 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \ 91 (RV_X(x, 10, 3) << 3) | \ 92 (RV_X(x, 5, 1) << 6)) 93 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \ 94 (RV_X(x, 5, 2) << 6)) 95 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \ 96 (RV_X(x, 12, 1) << 5) | \ 97 (RV_X(x, 2, 2) << 6)) 98 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \ 99 (RV_X(x, 12, 1) << 5) | \ 100 (RV_X(x, 2, 3) << 6)) 101 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \ 102 (RV_X(x, 7, 2) << 6)) 103 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \ 104 (RV_X(x, 7, 3) << 6)) 105 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3)) 106 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3)) 107 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5) 108 109 #define SHIFT_RIGHT(x, y) \ 110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y))) 111 112 #define REG_MASK \ 113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES)) 114 115 #define REG_OFFSET(insn, pos) \ 116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK) 117 118 #define REG_PTR(insn, pos, regs) \ 119 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))) 120 121 #define GET_FUNCT3(insn) (((insn) >> 12) & 7) 122 123 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs)) 124 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs)) 125 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs)) 126 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs)) 127 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs)) 128 #define GET_SP(regs) (*REG_PTR(2, 0, regs)) 129 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val)) 130 #define IMM_I(insn) ((s32)(insn) >> 20) 131 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \ 132 (s32)(((insn) >> 7) & 0x1f)) 133 134 struct insn_func { 135 unsigned long mask; 136 unsigned long match; 137 /* 138 * Possible return values are as follows: 139 * 1) Returns < 0 for error case 140 * 2) Returns 0 for exit to user-space 141 * 3) Returns 1 to continue with next sepc 142 * 4) Returns 2 to continue with same sepc 143 * 5) Returns 3 to inject illegal instruction trap and continue 144 * 6) Returns 4 to inject virtual instruction trap and continue 145 * 146 * Use enum kvm_insn_return for return values 147 */ 148 int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn); 149 }; 150 151 static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 152 ulong insn) 153 { 154 struct kvm_cpu_trap utrap = { 0 }; 155 156 /* Redirect trap to Guest VCPU */ 157 utrap.sepc = vcpu->arch.guest_context.sepc; 158 utrap.scause = EXC_INST_ILLEGAL; 159 utrap.stval = insn; 160 utrap.htval = 0; 161 utrap.htinst = 0; 162 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 163 164 return 1; 165 } 166 167 static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 168 ulong insn) 169 { 170 struct kvm_cpu_trap utrap = { 0 }; 171 172 /* Redirect trap to Guest VCPU */ 173 utrap.sepc = vcpu->arch.guest_context.sepc; 174 utrap.scause = EXC_VIRTUAL_INST_FAULT; 175 utrap.stval = insn; 176 utrap.htval = 0; 177 utrap.htinst = 0; 178 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 179 180 return 1; 181 } 182 183 /** 184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour 185 * 186 * @vcpu: The VCPU pointer 187 */ 188 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu) 189 { 190 if (!kvm_arch_vcpu_runnable(vcpu)) { 191 kvm_vcpu_srcu_read_unlock(vcpu); 192 kvm_vcpu_halt(vcpu); 193 kvm_vcpu_srcu_read_lock(vcpu); 194 kvm_clear_request(KVM_REQ_UNHALT, vcpu); 195 } 196 } 197 198 static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) 199 { 200 vcpu->stat.wfi_exit_stat++; 201 kvm_riscv_vcpu_wfi(vcpu); 202 return KVM_INSN_CONTINUE_NEXT_SEPC; 203 } 204 205 struct csr_func { 206 unsigned int base; 207 unsigned int count; 208 /* 209 * Possible return values are as same as "func" callback in 210 * "struct insn_func". 211 */ 212 int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num, 213 unsigned long *val, unsigned long new_val, 214 unsigned long wr_mask); 215 }; 216 217 static const struct csr_func csr_funcs[] = { }; 218 219 /** 220 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space 221 * emulation or in-kernel emulation 222 * 223 * @vcpu: The VCPU pointer 224 * @run: The VCPU run struct containing the CSR data 225 * 226 * Returns > 0 upon failure and 0 upon success 227 */ 228 int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 229 { 230 ulong insn; 231 232 if (vcpu->arch.csr_decode.return_handled) 233 return 0; 234 vcpu->arch.csr_decode.return_handled = 1; 235 236 /* Update destination register for CSR reads */ 237 insn = vcpu->arch.csr_decode.insn; 238 if ((insn >> SH_RD) & MASK_RX) 239 SET_RD(insn, &vcpu->arch.guest_context, 240 run->riscv_csr.ret_value); 241 242 /* Move to next instruction */ 243 vcpu->arch.guest_context.sepc += INSN_LEN(insn); 244 245 return 0; 246 } 247 248 static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) 249 { 250 int i, rc = KVM_INSN_ILLEGAL_TRAP; 251 unsigned int csr_num = insn >> SH_RS2; 252 unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX; 253 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); 254 const struct csr_func *tcfn, *cfn = NULL; 255 ulong val = 0, wr_mask = 0, new_val = 0; 256 257 /* Decode the CSR instruction */ 258 switch (GET_FUNCT3(insn)) { 259 case GET_FUNCT3(INSN_MATCH_CSRRW): 260 wr_mask = -1UL; 261 new_val = rs1_val; 262 break; 263 case GET_FUNCT3(INSN_MATCH_CSRRS): 264 wr_mask = rs1_val; 265 new_val = -1UL; 266 break; 267 case GET_FUNCT3(INSN_MATCH_CSRRC): 268 wr_mask = rs1_val; 269 new_val = 0; 270 break; 271 case GET_FUNCT3(INSN_MATCH_CSRRWI): 272 wr_mask = -1UL; 273 new_val = rs1_num; 274 break; 275 case GET_FUNCT3(INSN_MATCH_CSRRSI): 276 wr_mask = rs1_num; 277 new_val = -1UL; 278 break; 279 case GET_FUNCT3(INSN_MATCH_CSRRCI): 280 wr_mask = rs1_num; 281 new_val = 0; 282 break; 283 default: 284 return rc; 285 } 286 287 /* Save instruction decode info */ 288 vcpu->arch.csr_decode.insn = insn; 289 vcpu->arch.csr_decode.return_handled = 0; 290 291 /* Update CSR details in kvm_run struct */ 292 run->riscv_csr.csr_num = csr_num; 293 run->riscv_csr.new_value = new_val; 294 run->riscv_csr.write_mask = wr_mask; 295 run->riscv_csr.ret_value = 0; 296 297 /* Find in-kernel CSR function */ 298 for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) { 299 tcfn = &csr_funcs[i]; 300 if ((tcfn->base <= csr_num) && 301 (csr_num < (tcfn->base + tcfn->count))) { 302 cfn = tcfn; 303 break; 304 } 305 } 306 307 /* First try in-kernel CSR emulation */ 308 if (cfn && cfn->func) { 309 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); 310 if (rc > KVM_INSN_EXIT_TO_USER_SPACE) { 311 if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) { 312 run->riscv_csr.ret_value = val; 313 vcpu->stat.csr_exit_kernel++; 314 kvm_riscv_vcpu_csr_return(vcpu, run); 315 rc = KVM_INSN_CONTINUE_SAME_SEPC; 316 } 317 return rc; 318 } 319 } 320 321 /* Exit to user-space for CSR emulation */ 322 if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) { 323 vcpu->stat.csr_exit_user++; 324 run->exit_reason = KVM_EXIT_RISCV_CSR; 325 } 326 327 return rc; 328 } 329 330 static const struct insn_func system_opcode_funcs[] = { 331 { 332 .mask = INSN_MASK_CSRRW, 333 .match = INSN_MATCH_CSRRW, 334 .func = csr_insn, 335 }, 336 { 337 .mask = INSN_MASK_CSRRS, 338 .match = INSN_MATCH_CSRRS, 339 .func = csr_insn, 340 }, 341 { 342 .mask = INSN_MASK_CSRRC, 343 .match = INSN_MATCH_CSRRC, 344 .func = csr_insn, 345 }, 346 { 347 .mask = INSN_MASK_CSRRWI, 348 .match = INSN_MATCH_CSRRWI, 349 .func = csr_insn, 350 }, 351 { 352 .mask = INSN_MASK_CSRRSI, 353 .match = INSN_MATCH_CSRRSI, 354 .func = csr_insn, 355 }, 356 { 357 .mask = INSN_MASK_CSRRCI, 358 .match = INSN_MATCH_CSRRCI, 359 .func = csr_insn, 360 }, 361 { 362 .mask = INSN_MASK_WFI, 363 .match = INSN_MATCH_WFI, 364 .func = wfi_insn, 365 }, 366 }; 367 368 static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 369 ulong insn) 370 { 371 int i, rc = KVM_INSN_ILLEGAL_TRAP; 372 const struct insn_func *ifn; 373 374 for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) { 375 ifn = &system_opcode_funcs[i]; 376 if ((insn & ifn->mask) == ifn->match) { 377 rc = ifn->func(vcpu, run, insn); 378 break; 379 } 380 } 381 382 switch (rc) { 383 case KVM_INSN_ILLEGAL_TRAP: 384 return truly_illegal_insn(vcpu, run, insn); 385 case KVM_INSN_VIRTUAL_TRAP: 386 return truly_virtual_insn(vcpu, run, insn); 387 case KVM_INSN_CONTINUE_NEXT_SEPC: 388 vcpu->arch.guest_context.sepc += INSN_LEN(insn); 389 break; 390 default: 391 break; 392 } 393 394 return (rc <= 0) ? rc : 1; 395 } 396 397 /** 398 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap 399 * 400 * @vcpu: The VCPU pointer 401 * @run: The VCPU run struct containing the mmio data 402 * @trap: Trap details 403 * 404 * Returns > 0 to continue run-loop 405 * Returns 0 to exit run-loop and handle in user-space. 406 * Returns < 0 to report failure and exit run-loop 407 */ 408 int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, 409 struct kvm_cpu_trap *trap) 410 { 411 unsigned long insn = trap->stval; 412 struct kvm_cpu_trap utrap = { 0 }; 413 struct kvm_cpu_context *ct; 414 415 if (unlikely(INSN_IS_16BIT(insn))) { 416 if (insn == 0) { 417 ct = &vcpu->arch.guest_context; 418 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, 419 ct->sepc, 420 &utrap); 421 if (utrap.scause) { 422 utrap.sepc = ct->sepc; 423 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 424 return 1; 425 } 426 } 427 if (INSN_IS_16BIT(insn)) 428 return truly_illegal_insn(vcpu, run, insn); 429 } 430 431 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) { 432 case INSN_OPCODE_SYSTEM: 433 return system_opcode_insn(vcpu, run, insn); 434 default: 435 return truly_illegal_insn(vcpu, run, insn); 436 } 437 } 438 439 /** 440 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction 441 * 442 * @vcpu: The VCPU pointer 443 * @run: The VCPU run struct containing the mmio data 444 * @fault_addr: Guest physical address to load 445 * @htinst: Transformed encoding of the load instruction 446 * 447 * Returns > 0 to continue run-loop 448 * Returns 0 to exit run-loop and handle in user-space. 449 * Returns < 0 to report failure and exit run-loop 450 */ 451 int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, 452 unsigned long fault_addr, 453 unsigned long htinst) 454 { 455 u8 data_buf[8]; 456 unsigned long insn; 457 int shift = 0, len = 0, insn_len = 0; 458 struct kvm_cpu_trap utrap = { 0 }; 459 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; 460 461 /* Determine trapped instruction */ 462 if (htinst & 0x1) { 463 /* 464 * Bit[0] == 1 implies trapped instruction value is 465 * transformed instruction or custom instruction. 466 */ 467 insn = htinst | INSN_16BIT_MASK; 468 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; 469 } else { 470 /* 471 * Bit[0] == 0 implies trapped instruction value is 472 * zero or special value. 473 */ 474 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, 475 &utrap); 476 if (utrap.scause) { 477 /* Redirect trap if we failed to read instruction */ 478 utrap.sepc = ct->sepc; 479 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 480 return 1; 481 } 482 insn_len = INSN_LEN(insn); 483 } 484 485 /* Decode length of MMIO and shift */ 486 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { 487 len = 4; 488 shift = 8 * (sizeof(ulong) - len); 489 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) { 490 len = 1; 491 shift = 8 * (sizeof(ulong) - len); 492 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) { 493 len = 1; 494 shift = 8 * (sizeof(ulong) - len); 495 #ifdef CONFIG_64BIT 496 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) { 497 len = 8; 498 shift = 8 * (sizeof(ulong) - len); 499 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) { 500 len = 4; 501 #endif 502 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) { 503 len = 2; 504 shift = 8 * (sizeof(ulong) - len); 505 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) { 506 len = 2; 507 #ifdef CONFIG_64BIT 508 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) { 509 len = 8; 510 shift = 8 * (sizeof(ulong) - len); 511 insn = RVC_RS2S(insn) << SH_RD; 512 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP && 513 ((insn >> SH_RD) & 0x1f)) { 514 len = 8; 515 shift = 8 * (sizeof(ulong) - len); 516 #endif 517 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) { 518 len = 4; 519 shift = 8 * (sizeof(ulong) - len); 520 insn = RVC_RS2S(insn) << SH_RD; 521 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP && 522 ((insn >> SH_RD) & 0x1f)) { 523 len = 4; 524 shift = 8 * (sizeof(ulong) - len); 525 } else { 526 return -EOPNOTSUPP; 527 } 528 529 /* Fault address should be aligned to length of MMIO */ 530 if (fault_addr & (len - 1)) 531 return -EIO; 532 533 /* Save instruction decode info */ 534 vcpu->arch.mmio_decode.insn = insn; 535 vcpu->arch.mmio_decode.insn_len = insn_len; 536 vcpu->arch.mmio_decode.shift = shift; 537 vcpu->arch.mmio_decode.len = len; 538 vcpu->arch.mmio_decode.return_handled = 0; 539 540 /* Update MMIO details in kvm_run struct */ 541 run->mmio.is_write = false; 542 run->mmio.phys_addr = fault_addr; 543 run->mmio.len = len; 544 545 /* Try to handle MMIO access in the kernel */ 546 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) { 547 /* Successfully handled MMIO access in the kernel so resume */ 548 memcpy(run->mmio.data, data_buf, len); 549 vcpu->stat.mmio_exit_kernel++; 550 kvm_riscv_vcpu_mmio_return(vcpu, run); 551 return 1; 552 } 553 554 /* Exit to userspace for MMIO emulation */ 555 vcpu->stat.mmio_exit_user++; 556 run->exit_reason = KVM_EXIT_MMIO; 557 558 return 0; 559 } 560 561 /** 562 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction 563 * 564 * @vcpu: The VCPU pointer 565 * @run: The VCPU run struct containing the mmio data 566 * @fault_addr: Guest physical address to store 567 * @htinst: Transformed encoding of the store instruction 568 * 569 * Returns > 0 to continue run-loop 570 * Returns 0 to exit run-loop and handle in user-space. 571 * Returns < 0 to report failure and exit run-loop 572 */ 573 int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, 574 unsigned long fault_addr, 575 unsigned long htinst) 576 { 577 u8 data8; 578 u16 data16; 579 u32 data32; 580 u64 data64; 581 ulong data; 582 unsigned long insn; 583 int len = 0, insn_len = 0; 584 struct kvm_cpu_trap utrap = { 0 }; 585 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; 586 587 /* Determine trapped instruction */ 588 if (htinst & 0x1) { 589 /* 590 * Bit[0] == 1 implies trapped instruction value is 591 * transformed instruction or custom instruction. 592 */ 593 insn = htinst | INSN_16BIT_MASK; 594 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2; 595 } else { 596 /* 597 * Bit[0] == 0 implies trapped instruction value is 598 * zero or special value. 599 */ 600 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, 601 &utrap); 602 if (utrap.scause) { 603 /* Redirect trap if we failed to read instruction */ 604 utrap.sepc = ct->sepc; 605 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap); 606 return 1; 607 } 608 insn_len = INSN_LEN(insn); 609 } 610 611 data = GET_RS2(insn, &vcpu->arch.guest_context); 612 data8 = data16 = data32 = data64 = data; 613 614 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) { 615 len = 4; 616 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) { 617 len = 1; 618 #ifdef CONFIG_64BIT 619 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { 620 len = 8; 621 #endif 622 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { 623 len = 2; 624 #ifdef CONFIG_64BIT 625 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) { 626 len = 8; 627 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); 628 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP && 629 ((insn >> SH_RD) & 0x1f)) { 630 len = 8; 631 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); 632 #endif 633 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) { 634 len = 4; 635 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); 636 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP && 637 ((insn >> SH_RD) & 0x1f)) { 638 len = 4; 639 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); 640 } else { 641 return -EOPNOTSUPP; 642 } 643 644 /* Fault address should be aligned to length of MMIO */ 645 if (fault_addr & (len - 1)) 646 return -EIO; 647 648 /* Save instruction decode info */ 649 vcpu->arch.mmio_decode.insn = insn; 650 vcpu->arch.mmio_decode.insn_len = insn_len; 651 vcpu->arch.mmio_decode.shift = 0; 652 vcpu->arch.mmio_decode.len = len; 653 vcpu->arch.mmio_decode.return_handled = 0; 654 655 /* Copy data to kvm_run instance */ 656 switch (len) { 657 case 1: 658 *((u8 *)run->mmio.data) = data8; 659 break; 660 case 2: 661 *((u16 *)run->mmio.data) = data16; 662 break; 663 case 4: 664 *((u32 *)run->mmio.data) = data32; 665 break; 666 case 8: 667 *((u64 *)run->mmio.data) = data64; 668 break; 669 default: 670 return -EOPNOTSUPP; 671 } 672 673 /* Update MMIO details in kvm_run struct */ 674 run->mmio.is_write = true; 675 run->mmio.phys_addr = fault_addr; 676 run->mmio.len = len; 677 678 /* Try to handle MMIO access in the kernel */ 679 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS, 680 fault_addr, len, run->mmio.data)) { 681 /* Successfully handled MMIO access in the kernel so resume */ 682 vcpu->stat.mmio_exit_kernel++; 683 kvm_riscv_vcpu_mmio_return(vcpu, run); 684 return 1; 685 } 686 687 /* Exit to userspace for MMIO emulation */ 688 vcpu->stat.mmio_exit_user++; 689 run->exit_reason = KVM_EXIT_MMIO; 690 691 return 0; 692 } 693 694 /** 695 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation 696 * or in-kernel IO emulation 697 * 698 * @vcpu: The VCPU pointer 699 * @run: The VCPU run struct containing the mmio data 700 */ 701 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) 702 { 703 u8 data8; 704 u16 data16; 705 u32 data32; 706 u64 data64; 707 ulong insn; 708 int len, shift; 709 710 if (vcpu->arch.mmio_decode.return_handled) 711 return 0; 712 713 vcpu->arch.mmio_decode.return_handled = 1; 714 insn = vcpu->arch.mmio_decode.insn; 715 716 if (run->mmio.is_write) 717 goto done; 718 719 len = vcpu->arch.mmio_decode.len; 720 shift = vcpu->arch.mmio_decode.shift; 721 722 switch (len) { 723 case 1: 724 data8 = *((u8 *)run->mmio.data); 725 SET_RD(insn, &vcpu->arch.guest_context, 726 (ulong)data8 << shift >> shift); 727 break; 728 case 2: 729 data16 = *((u16 *)run->mmio.data); 730 SET_RD(insn, &vcpu->arch.guest_context, 731 (ulong)data16 << shift >> shift); 732 break; 733 case 4: 734 data32 = *((u32 *)run->mmio.data); 735 SET_RD(insn, &vcpu->arch.guest_context, 736 (ulong)data32 << shift >> shift); 737 break; 738 case 8: 739 data64 = *((u64 *)run->mmio.data); 740 SET_RD(insn, &vcpu->arch.guest_context, 741 (ulong)data64 << shift >> shift); 742 break; 743 default: 744 return -EOPNOTSUPP; 745 } 746 747 done: 748 /* Move to next instruction */ 749 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; 750 751 return 0; 752 } 753