xref: /openbmc/linux/arch/riscv/kvm/vcpu_exit.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
199cdc6c1SAnup Patel // SPDX-License-Identifier: GPL-2.0
299cdc6c1SAnup Patel /*
399cdc6c1SAnup Patel  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
499cdc6c1SAnup Patel  *
599cdc6c1SAnup Patel  * Authors:
699cdc6c1SAnup Patel  *     Anup Patel <anup.patel@wdc.com>
799cdc6c1SAnup Patel  */
899cdc6c1SAnup Patel 
999cdc6c1SAnup Patel #include <linux/kvm_host.h>
109f701326SAnup Patel #include <asm/csr.h>
1126b73f14SAndrew Jones #include <asm/insn-def.h>
129f701326SAnup Patel 
gstage_page_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)1326708234SAnup Patel static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
149f701326SAnup Patel 			     struct kvm_cpu_trap *trap)
159f701326SAnup Patel {
169f701326SAnup Patel 	struct kvm_memory_slot *memslot;
179f701326SAnup Patel 	unsigned long hva, fault_addr;
186259d2f8SZhang Jiaming 	bool writable;
199f701326SAnup Patel 	gfn_t gfn;
209f701326SAnup Patel 	int ret;
219f701326SAnup Patel 
229f701326SAnup Patel 	fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
239f701326SAnup Patel 	gfn = fault_addr >> PAGE_SHIFT;
249f701326SAnup Patel 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
256259d2f8SZhang Jiaming 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
269f701326SAnup Patel 
279f701326SAnup Patel 	if (kvm_is_error_hva(hva) ||
286259d2f8SZhang Jiaming 	    (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
299f701326SAnup Patel 		switch (trap->scause) {
309f701326SAnup Patel 		case EXC_LOAD_GUEST_PAGE_FAULT:
31b91f0e4cSAnup Patel 			return kvm_riscv_vcpu_mmio_load(vcpu, run,
32b91f0e4cSAnup Patel 							fault_addr,
339f701326SAnup Patel 							trap->htinst);
349f701326SAnup Patel 		case EXC_STORE_GUEST_PAGE_FAULT:
35b91f0e4cSAnup Patel 			return kvm_riscv_vcpu_mmio_store(vcpu, run,
36b91f0e4cSAnup Patel 							 fault_addr,
379f701326SAnup Patel 							 trap->htinst);
389f701326SAnup Patel 		default:
399f701326SAnup Patel 			return -EOPNOTSUPP;
409f701326SAnup Patel 		};
419f701326SAnup Patel 	}
429f701326SAnup Patel 
4326708234SAnup Patel 	ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
449f701326SAnup Patel 		(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
459f701326SAnup Patel 	if (ret < 0)
469f701326SAnup Patel 		return ret;
479f701326SAnup Patel 
489f701326SAnup Patel 	return 1;
499f701326SAnup Patel }
509f701326SAnup Patel 
519f701326SAnup Patel /**
529f701326SAnup Patel  * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
539f701326SAnup Patel  *
549f701326SAnup Patel  * @vcpu: The VCPU pointer
559f701326SAnup Patel  * @read_insn: Flag representing whether we are reading instruction
569f701326SAnup Patel  * @guest_addr: Guest address to read
579f701326SAnup Patel  * @trap: Output pointer to trap details
589f701326SAnup Patel  */
kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu * vcpu,bool read_insn,unsigned long guest_addr,struct kvm_cpu_trap * trap)599f701326SAnup Patel unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
609f701326SAnup Patel 					 bool read_insn,
619f701326SAnup Patel 					 unsigned long guest_addr,
629f701326SAnup Patel 					 struct kvm_cpu_trap *trap)
639f701326SAnup Patel {
649f701326SAnup Patel 	register unsigned long taddr asm("a0") = (unsigned long)trap;
659f701326SAnup Patel 	register unsigned long ttmp asm("a1");
6626b73f14SAndrew Jones 	unsigned long flags, val, tmp, old_stvec, old_hstatus;
679f701326SAnup Patel 
689f701326SAnup Patel 	local_irq_save(flags);
699f701326SAnup Patel 
709f701326SAnup Patel 	old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
719f701326SAnup Patel 	old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
729f701326SAnup Patel 
739f701326SAnup Patel 	if (read_insn) {
749f701326SAnup Patel 		/*
759f701326SAnup Patel 		 * HLVX.HU instruction
769f701326SAnup Patel 		 * 0110010 00011 rs1 100 rd 1110011
779f701326SAnup Patel 		 */
789f701326SAnup Patel 		asm volatile ("\n"
799f701326SAnup Patel 			".option push\n"
809f701326SAnup Patel 			".option norvc\n"
819f701326SAnup Patel 			"add %[ttmp], %[taddr], 0\n"
8226b73f14SAndrew Jones 			HLVX_HU(%[val], %[addr])
839f701326SAnup Patel 			"andi %[tmp], %[val], 3\n"
849f701326SAnup Patel 			"addi %[tmp], %[tmp], -3\n"
859f701326SAnup Patel 			"bne %[tmp], zero, 2f\n"
869f701326SAnup Patel 			"addi %[addr], %[addr], 2\n"
8726b73f14SAndrew Jones 			HLVX_HU(%[tmp], %[addr])
889f701326SAnup Patel 			"sll %[tmp], %[tmp], 16\n"
899f701326SAnup Patel 			"add %[val], %[val], %[tmp]\n"
909f701326SAnup Patel 			"2:\n"
919f701326SAnup Patel 			".option pop"
929f701326SAnup Patel 		: [val] "=&r" (val), [tmp] "=&r" (tmp),
939f701326SAnup Patel 		  [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
9426b73f14SAndrew Jones 		  [addr] "+&r" (guest_addr) : : "memory");
959f701326SAnup Patel 
969f701326SAnup Patel 		if (trap->scause == EXC_LOAD_PAGE_FAULT)
979f701326SAnup Patel 			trap->scause = EXC_INST_PAGE_FAULT;
989f701326SAnup Patel 	} else {
999f701326SAnup Patel 		/*
1009f701326SAnup Patel 		 * HLV.D instruction
1019f701326SAnup Patel 		 * 0110110 00000 rs1 100 rd 1110011
1029f701326SAnup Patel 		 *
1039f701326SAnup Patel 		 * HLV.W instruction
1049f701326SAnup Patel 		 * 0110100 00000 rs1 100 rd 1110011
1059f701326SAnup Patel 		 */
1069f701326SAnup Patel 		asm volatile ("\n"
1079f701326SAnup Patel 			".option push\n"
1089f701326SAnup Patel 			".option norvc\n"
1099f701326SAnup Patel 			"add %[ttmp], %[taddr], 0\n"
1109f701326SAnup Patel #ifdef CONFIG_64BIT
11126b73f14SAndrew Jones 			HLV_D(%[val], %[addr])
1129f701326SAnup Patel #else
11326b73f14SAndrew Jones 			HLV_W(%[val], %[addr])
1149f701326SAnup Patel #endif
1159f701326SAnup Patel 			".option pop"
1169f701326SAnup Patel 		: [val] "=&r" (val),
1179f701326SAnup Patel 		  [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
11826b73f14SAndrew Jones 		: [addr] "r" (guest_addr) : "memory");
1199f701326SAnup Patel 	}
1209f701326SAnup Patel 
1219f701326SAnup Patel 	csr_write(CSR_STVEC, old_stvec);
1229f701326SAnup Patel 	csr_write(CSR_HSTATUS, old_hstatus);
1239f701326SAnup Patel 
1249f701326SAnup Patel 	local_irq_restore(flags);
1259f701326SAnup Patel 
1269f701326SAnup Patel 	return val;
1279f701326SAnup Patel }
1289f701326SAnup Patel 
1299f701326SAnup Patel /**
1309f701326SAnup Patel  * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
1319f701326SAnup Patel  *
1329f701326SAnup Patel  * @vcpu: The VCPU pointer
1339f701326SAnup Patel  * @trap: Trap details
1349f701326SAnup Patel  */
kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)1359f701326SAnup Patel void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
1369f701326SAnup Patel 				  struct kvm_cpu_trap *trap)
1379f701326SAnup Patel {
1389f701326SAnup Patel 	unsigned long vsstatus = csr_read(CSR_VSSTATUS);
1399f701326SAnup Patel 
1409f701326SAnup Patel 	/* Change Guest SSTATUS.SPP bit */
1419f701326SAnup Patel 	vsstatus &= ~SR_SPP;
1429f701326SAnup Patel 	if (vcpu->arch.guest_context.sstatus & SR_SPP)
1439f701326SAnup Patel 		vsstatus |= SR_SPP;
1449f701326SAnup Patel 
1459f701326SAnup Patel 	/* Change Guest SSTATUS.SPIE bit */
1469f701326SAnup Patel 	vsstatus &= ~SR_SPIE;
1479f701326SAnup Patel 	if (vsstatus & SR_SIE)
1489f701326SAnup Patel 		vsstatus |= SR_SPIE;
1499f701326SAnup Patel 
1509f701326SAnup Patel 	/* Clear Guest SSTATUS.SIE bit */
1519f701326SAnup Patel 	vsstatus &= ~SR_SIE;
1529f701326SAnup Patel 
1539f701326SAnup Patel 	/* Update Guest SSTATUS */
1549f701326SAnup Patel 	csr_write(CSR_VSSTATUS, vsstatus);
1559f701326SAnup Patel 
1569f701326SAnup Patel 	/* Update Guest SCAUSE, STVAL, and SEPC */
1579f701326SAnup Patel 	csr_write(CSR_VSCAUSE, trap->scause);
1589f701326SAnup Patel 	csr_write(CSR_VSTVAL, trap->stval);
1599f701326SAnup Patel 	csr_write(CSR_VSEPC, trap->sepc);
1609f701326SAnup Patel 
1619f701326SAnup Patel 	/* Set Guest PC to Guest exception vector */
1629f701326SAnup Patel 	vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
16306723e12SAnup Patel 
16406723e12SAnup Patel 	/* Set Guest privilege mode to supervisor */
16506723e12SAnup Patel 	vcpu->arch.guest_context.sstatus |= SR_SPP;
1669f701326SAnup Patel }
16799cdc6c1SAnup Patel 
16899cdc6c1SAnup Patel /*
16999cdc6c1SAnup Patel  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
17099cdc6c1SAnup Patel  * proper exit to userspace.
17199cdc6c1SAnup Patel  */
kvm_riscv_vcpu_exit(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)17299cdc6c1SAnup Patel int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
17399cdc6c1SAnup Patel 			struct kvm_cpu_trap *trap)
17499cdc6c1SAnup Patel {
1759f701326SAnup Patel 	int ret;
1769f701326SAnup Patel 
1779f701326SAnup Patel 	/* If we got host interrupt then do nothing */
1789f701326SAnup Patel 	if (trap->scause & CAUSE_IRQ_FLAG)
1799f701326SAnup Patel 		return 1;
1809f701326SAnup Patel 
1819f701326SAnup Patel 	/* Handle guest traps */
1829f701326SAnup Patel 	ret = -EFAULT;
1839f701326SAnup Patel 	run->exit_reason = KVM_EXIT_UNKNOWN;
1849f701326SAnup Patel 	switch (trap->scause) {
185cdeb59bbSAndy Chiu 	case EXC_INST_ILLEGAL:
186*19bff88eSwchen 	case EXC_LOAD_MISALIGNED:
187*19bff88eSwchen 	case EXC_STORE_MISALIGNED:
188cdeb59bbSAndy Chiu 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
189cdeb59bbSAndy Chiu 			kvm_riscv_vcpu_trap_redirect(vcpu, trap);
190cdeb59bbSAndy Chiu 			ret = 1;
191cdeb59bbSAndy Chiu 		}
192cdeb59bbSAndy Chiu 		break;
1935a5d79acSAnup Patel 	case EXC_VIRTUAL_INST_FAULT:
1945a5d79acSAnup Patel 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
195b91f0e4cSAnup Patel 			ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
1965a5d79acSAnup Patel 		break;
1979f701326SAnup Patel 	case EXC_INST_GUEST_PAGE_FAULT:
1989f701326SAnup Patel 	case EXC_LOAD_GUEST_PAGE_FAULT:
1999f701326SAnup Patel 	case EXC_STORE_GUEST_PAGE_FAULT:
2009f701326SAnup Patel 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
20126708234SAnup Patel 			ret = gstage_page_fault(vcpu, run, trap);
2029f701326SAnup Patel 		break;
203dea8ee31SAtish Patra 	case EXC_SUPERVISOR_SYSCALL:
204dea8ee31SAtish Patra 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
205dea8ee31SAtish Patra 			ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
206dea8ee31SAtish Patra 		break;
2079f701326SAnup Patel 	default:
2089f701326SAnup Patel 		break;
2097b161d9cSran jianping 	}
2109f701326SAnup Patel 
2119f701326SAnup Patel 	/* Print details in-case of error */
2129f701326SAnup Patel 	if (ret < 0) {
2139f701326SAnup Patel 		kvm_err("VCPU exit error %d\n", ret);
2149f701326SAnup Patel 		kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
2159f701326SAnup Patel 			vcpu->arch.guest_context.sepc,
2169f701326SAnup Patel 			vcpu->arch.guest_context.sstatus,
2179f701326SAnup Patel 			vcpu->arch.guest_context.hstatus);
2189f701326SAnup Patel 		kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
2199f701326SAnup Patel 			trap->scause, trap->stval, trap->htval, trap->htinst);
2209f701326SAnup Patel 	}
2219f701326SAnup Patel 
2229f701326SAnup Patel 	return ret;
22399cdc6c1SAnup Patel }
224