Lines Matching +full:data +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0
89 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
110 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
113 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
116 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
140 * 2) Returns 0 for exit to user-space
157 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn()
173 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_virtual_insn()
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
199 vcpu->stat.wfi_exit_stat++; in wfi_insn()
222 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
223 * emulation or in-kernel emulation
226 * @run: The VCPU run struct containing the CSR data
234 if (vcpu->arch.csr_decode.return_handled) in kvm_riscv_vcpu_csr_return()
236 vcpu->arch.csr_decode.return_handled = 1; in kvm_riscv_vcpu_csr_return()
239 insn = vcpu->arch.csr_decode.insn; in kvm_riscv_vcpu_csr_return()
241 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_csr_return()
242 run->riscv_csr.ret_value); in kvm_riscv_vcpu_csr_return()
245 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in kvm_riscv_vcpu_csr_return()
255 ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context); in csr_insn()
262 wr_mask = -1UL; in csr_insn()
267 new_val = -1UL; in csr_insn()
274 wr_mask = -1UL; in csr_insn()
279 new_val = -1UL; in csr_insn()
290 vcpu->arch.csr_decode.insn = insn; in csr_insn()
291 vcpu->arch.csr_decode.return_handled = 0; in csr_insn()
294 run->riscv_csr.csr_num = csr_num; in csr_insn()
295 run->riscv_csr.new_value = new_val; in csr_insn()
296 run->riscv_csr.write_mask = wr_mask; in csr_insn()
297 run->riscv_csr.ret_value = 0; in csr_insn()
299 /* Find in-kernel CSR function */ in csr_insn()
302 if ((tcfn->base <= csr_num) && in csr_insn()
303 (csr_num < (tcfn->base + tcfn->count))) { in csr_insn()
309 /* First try in-kernel CSR emulation */ in csr_insn()
310 if (cfn && cfn->func) { in csr_insn()
311 rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask); in csr_insn()
314 run->riscv_csr.ret_value = val; in csr_insn()
315 vcpu->stat.csr_exit_kernel++; in csr_insn()
323 /* Exit to user-space for CSR emulation */ in csr_insn()
325 vcpu->stat.csr_exit_user++; in csr_insn()
326 run->exit_reason = KVM_EXIT_RISCV_CSR; in csr_insn()
378 if ((insn & ifn->mask) == ifn->match) { in system_opcode_insn()
379 rc = ifn->func(vcpu, run, insn); in system_opcode_insn()
390 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn()
400 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
403 * @run: The VCPU run struct containing the mmio data
406 * Returns > 0 to continue run-loop
407 * Returns 0 to exit run-loop and handle in user-space.
408 * Returns < 0 to report failure and exit run-loop
413 unsigned long insn = trap->stval; in kvm_riscv_vcpu_virtual_insn()
419 ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_virtual_insn()
421 ct->sepc, in kvm_riscv_vcpu_virtual_insn()
424 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_virtual_insn()
442 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
445 * @run: The VCPU run struct containing the mmio data
449 * Returns > 0 to continue run-loop
450 * Returns 0 to exit run-loop and handle in user-space.
451 * Returns < 0 to report failure and exit run-loop
459 int shift = 0, len = 0, insn_len = 0; in kvm_riscv_vcpu_mmio_load() local
461 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load()
476 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_load()
480 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_load()
487 /* Decode length of MMIO and shift */ in kvm_riscv_vcpu_mmio_load()
490 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
493 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
496 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
500 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
506 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
512 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
517 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
521 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
526 shift = 8 * (sizeof(ulong) - len); in kvm_riscv_vcpu_mmio_load()
528 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_load()
532 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_load()
533 return -EIO; in kvm_riscv_vcpu_mmio_load()
536 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_load()
537 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_load()
538 vcpu->arch.mmio_decode.shift = shift; in kvm_riscv_vcpu_mmio_load()
539 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_load()
540 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_load()
543 run->mmio.is_write = false; in kvm_riscv_vcpu_mmio_load()
544 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_load()
545 run->mmio.len = len; in kvm_riscv_vcpu_mmio_load()
550 memcpy(run->mmio.data, data_buf, len); in kvm_riscv_vcpu_mmio_load()
551 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_load()
557 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_load()
558 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_load()
564 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
567 * @run: The VCPU run struct containing the mmio data
571 * Returns > 0 to continue run-loop
572 * Returns 0 to exit run-loop and handle in user-space.
573 * Returns < 0 to report failure and exit run-loop
583 ulong data; in kvm_riscv_vcpu_mmio_store() local
587 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
602 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc, in kvm_riscv_vcpu_mmio_store()
606 utrap.sepc = ct->sepc; in kvm_riscv_vcpu_mmio_store()
613 data = GET_RS2(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
614 data8 = data16 = data32 = data64 = data; in kvm_riscv_vcpu_mmio_store()
629 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
633 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
637 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
641 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in kvm_riscv_vcpu_mmio_store()
643 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
647 if (fault_addr & (len - 1)) in kvm_riscv_vcpu_mmio_store()
648 return -EIO; in kvm_riscv_vcpu_mmio_store()
651 vcpu->arch.mmio_decode.insn = insn; in kvm_riscv_vcpu_mmio_store()
652 vcpu->arch.mmio_decode.insn_len = insn_len; in kvm_riscv_vcpu_mmio_store()
653 vcpu->arch.mmio_decode.shift = 0; in kvm_riscv_vcpu_mmio_store()
654 vcpu->arch.mmio_decode.len = len; in kvm_riscv_vcpu_mmio_store()
655 vcpu->arch.mmio_decode.return_handled = 0; in kvm_riscv_vcpu_mmio_store()
657 /* Copy data to kvm_run instance */ in kvm_riscv_vcpu_mmio_store()
660 *((u8 *)run->mmio.data) = data8; in kvm_riscv_vcpu_mmio_store()
663 *((u16 *)run->mmio.data) = data16; in kvm_riscv_vcpu_mmio_store()
666 *((u32 *)run->mmio.data) = data32; in kvm_riscv_vcpu_mmio_store()
669 *((u64 *)run->mmio.data) = data64; in kvm_riscv_vcpu_mmio_store()
672 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_store()
676 run->mmio.is_write = true; in kvm_riscv_vcpu_mmio_store()
677 run->mmio.phys_addr = fault_addr; in kvm_riscv_vcpu_mmio_store()
678 run->mmio.len = len; in kvm_riscv_vcpu_mmio_store()
682 fault_addr, len, run->mmio.data)) { in kvm_riscv_vcpu_mmio_store()
684 vcpu->stat.mmio_exit_kernel++; in kvm_riscv_vcpu_mmio_store()
690 vcpu->stat.mmio_exit_user++; in kvm_riscv_vcpu_mmio_store()
691 run->exit_reason = KVM_EXIT_MMIO; in kvm_riscv_vcpu_mmio_store()
697 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
698 * or in-kernel IO emulation
701 * @run: The VCPU run struct containing the mmio data
710 int len, shift; in kvm_riscv_vcpu_mmio_return() local
712 if (vcpu->arch.mmio_decode.return_handled) in kvm_riscv_vcpu_mmio_return()
715 vcpu->arch.mmio_decode.return_handled = 1; in kvm_riscv_vcpu_mmio_return()
716 insn = vcpu->arch.mmio_decode.insn; in kvm_riscv_vcpu_mmio_return()
718 if (run->mmio.is_write) in kvm_riscv_vcpu_mmio_return()
721 len = vcpu->arch.mmio_decode.len; in kvm_riscv_vcpu_mmio_return()
722 shift = vcpu->arch.mmio_decode.shift; in kvm_riscv_vcpu_mmio_return()
726 data8 = *((u8 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
727 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
728 (ulong)data8 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
731 data16 = *((u16 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
732 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
733 (ulong)data16 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
736 data32 = *((u32 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
737 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
738 (ulong)data32 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
741 data64 = *((u64 *)run->mmio.data); in kvm_riscv_vcpu_mmio_return()
742 SET_RD(insn, &vcpu->arch.guest_context, in kvm_riscv_vcpu_mmio_return()
743 (ulong)data64 << shift >> shift); in kvm_riscv_vcpu_mmio_return()
746 return -EOPNOTSUPP; in kvm_riscv_vcpu_mmio_return()
751 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len; in kvm_riscv_vcpu_mmio_return()