xref: /openbmc/linux/arch/riscv/kvm/vcpu_exit.c (revision 703e7713)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Anup Patel <anup.patel@wdc.com>
7  */
8 
9 #include <linux/kvm_host.h>
10 #include <asm/csr.h>
11 #include <asm/insn-def.h>
12 
13 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
14 			     struct kvm_cpu_trap *trap)
15 {
16 	struct kvm_memory_slot *memslot;
17 	unsigned long hva, fault_addr;
18 	bool writable;
19 	gfn_t gfn;
20 	int ret;
21 
22 	fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
23 	gfn = fault_addr >> PAGE_SHIFT;
24 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
25 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
26 
27 	if (kvm_is_error_hva(hva) ||
28 	    (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writable)) {
29 		switch (trap->scause) {
30 		case EXC_LOAD_GUEST_PAGE_FAULT:
31 			return kvm_riscv_vcpu_mmio_load(vcpu, run,
32 							fault_addr,
33 							trap->htinst);
34 		case EXC_STORE_GUEST_PAGE_FAULT:
35 			return kvm_riscv_vcpu_mmio_store(vcpu, run,
36 							 fault_addr,
37 							 trap->htinst);
38 		default:
39 			return -EOPNOTSUPP;
40 		};
41 	}
42 
43 	ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
44 		(trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
45 	if (ret < 0)
46 		return ret;
47 
48 	return 1;
49 }
50 
51 /**
52  * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
53  *
54  * @vcpu: The VCPU pointer
55  * @read_insn: Flag representing whether we are reading instruction
56  * @guest_addr: Guest address to read
57  * @trap: Output pointer to trap details
58  */
59 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
60 					 bool read_insn,
61 					 unsigned long guest_addr,
62 					 struct kvm_cpu_trap *trap)
63 {
64 	register unsigned long taddr asm("a0") = (unsigned long)trap;
65 	register unsigned long ttmp asm("a1");
66 	unsigned long flags, val, tmp, old_stvec, old_hstatus;
67 
68 	local_irq_save(flags);
69 
70 	old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
71 	old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
72 
73 	if (read_insn) {
74 		/*
75 		 * HLVX.HU instruction
76 		 * 0110010 00011 rs1 100 rd 1110011
77 		 */
78 		asm volatile ("\n"
79 			".option push\n"
80 			".option norvc\n"
81 			"add %[ttmp], %[taddr], 0\n"
82 			HLVX_HU(%[val], %[addr])
83 			"andi %[tmp], %[val], 3\n"
84 			"addi %[tmp], %[tmp], -3\n"
85 			"bne %[tmp], zero, 2f\n"
86 			"addi %[addr], %[addr], 2\n"
87 			HLVX_HU(%[tmp], %[addr])
88 			"sll %[tmp], %[tmp], 16\n"
89 			"add %[val], %[val], %[tmp]\n"
90 			"2:\n"
91 			".option pop"
92 		: [val] "=&r" (val), [tmp] "=&r" (tmp),
93 		  [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
94 		  [addr] "+&r" (guest_addr) : : "memory");
95 
96 		if (trap->scause == EXC_LOAD_PAGE_FAULT)
97 			trap->scause = EXC_INST_PAGE_FAULT;
98 	} else {
99 		/*
100 		 * HLV.D instruction
101 		 * 0110110 00000 rs1 100 rd 1110011
102 		 *
103 		 * HLV.W instruction
104 		 * 0110100 00000 rs1 100 rd 1110011
105 		 */
106 		asm volatile ("\n"
107 			".option push\n"
108 			".option norvc\n"
109 			"add %[ttmp], %[taddr], 0\n"
110 #ifdef CONFIG_64BIT
111 			HLV_D(%[val], %[addr])
112 #else
113 			HLV_W(%[val], %[addr])
114 #endif
115 			".option pop"
116 		: [val] "=&r" (val),
117 		  [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
118 		: [addr] "r" (guest_addr) : "memory");
119 	}
120 
121 	csr_write(CSR_STVEC, old_stvec);
122 	csr_write(CSR_HSTATUS, old_hstatus);
123 
124 	local_irq_restore(flags);
125 
126 	return val;
127 }
128 
129 /**
130  * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
131  *
132  * @vcpu: The VCPU pointer
133  * @trap: Trap details
134  */
135 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
136 				  struct kvm_cpu_trap *trap)
137 {
138 	unsigned long vsstatus = csr_read(CSR_VSSTATUS);
139 
140 	/* Change Guest SSTATUS.SPP bit */
141 	vsstatus &= ~SR_SPP;
142 	if (vcpu->arch.guest_context.sstatus & SR_SPP)
143 		vsstatus |= SR_SPP;
144 
145 	/* Change Guest SSTATUS.SPIE bit */
146 	vsstatus &= ~SR_SPIE;
147 	if (vsstatus & SR_SIE)
148 		vsstatus |= SR_SPIE;
149 
150 	/* Clear Guest SSTATUS.SIE bit */
151 	vsstatus &= ~SR_SIE;
152 
153 	/* Update Guest SSTATUS */
154 	csr_write(CSR_VSSTATUS, vsstatus);
155 
156 	/* Update Guest SCAUSE, STVAL, and SEPC */
157 	csr_write(CSR_VSCAUSE, trap->scause);
158 	csr_write(CSR_VSTVAL, trap->stval);
159 	csr_write(CSR_VSEPC, trap->sepc);
160 
161 	/* Set Guest PC to Guest exception vector */
162 	vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
163 
164 	/* Set Guest privilege mode to supervisor */
165 	vcpu->arch.guest_context.sstatus |= SR_SPP;
166 }
167 
168 /*
169  * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
170  * proper exit to userspace.
171  */
172 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
173 			struct kvm_cpu_trap *trap)
174 {
175 	int ret;
176 
177 	/* If we got host interrupt then do nothing */
178 	if (trap->scause & CAUSE_IRQ_FLAG)
179 		return 1;
180 
181 	/* Handle guest traps */
182 	ret = -EFAULT;
183 	run->exit_reason = KVM_EXIT_UNKNOWN;
184 	switch (trap->scause) {
185 	case EXC_INST_ILLEGAL:
186 	case EXC_LOAD_MISALIGNED:
187 	case EXC_STORE_MISALIGNED:
188 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) {
189 			kvm_riscv_vcpu_trap_redirect(vcpu, trap);
190 			ret = 1;
191 		}
192 		break;
193 	case EXC_VIRTUAL_INST_FAULT:
194 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
195 			ret = kvm_riscv_vcpu_virtual_insn(vcpu, run, trap);
196 		break;
197 	case EXC_INST_GUEST_PAGE_FAULT:
198 	case EXC_LOAD_GUEST_PAGE_FAULT:
199 	case EXC_STORE_GUEST_PAGE_FAULT:
200 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
201 			ret = gstage_page_fault(vcpu, run, trap);
202 		break;
203 	case EXC_SUPERVISOR_SYSCALL:
204 		if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
205 			ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
206 		break;
207 	default:
208 		break;
209 	}
210 
211 	/* Print details in-case of error */
212 	if (ret < 0) {
213 		kvm_err("VCPU exit error %d\n", ret);
214 		kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
215 			vcpu->arch.guest_context.sepc,
216 			vcpu->arch.guest_context.sstatus,
217 			vcpu->arch.guest_context.hstatus);
218 		kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
219 			trap->scause, trap->stval, trap->htval, trap->htinst);
220 	}
221 
222 	return ret;
223 }
224