xref: /openbmc/linux/arch/powerpc/kvm/emulate_loadstore.c (revision 2e6ae11dd0d1c37f44cec51a58fb2092e55ed0f5)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/sstep.h>
35 #include "timing.h"
36 #include "trace.h"
37 
38 #ifdef CONFIG_PPC_FPU
39 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
40 {
41 	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
42 		kvmppc_core_queue_fpunavail(vcpu);
43 		return true;
44 	}
45 
46 	return false;
47 }
48 #endif /* CONFIG_PPC_FPU */
49 
50 #ifdef CONFIG_VSX
51 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
52 {
53 	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
54 		kvmppc_core_queue_vsx_unavail(vcpu);
55 		return true;
56 	}
57 
58 	return false;
59 }
60 #endif /* CONFIG_VSX */
61 
62 #ifdef CONFIG_ALTIVEC
63 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
64 {
65 	if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
66 		kvmppc_core_queue_vec_unavail(vcpu);
67 		return true;
68 	}
69 
70 	return false;
71 }
72 #endif /* CONFIG_ALTIVEC */
73 
74 /*
75  * XXX to do:
76  * lfiwax, lfiwzx
77  * vector loads and stores
78  *
79  * Instructions that trap when used on cache-inhibited mappings
80  * are not emulated here: multiple and string instructions,
81  * lq/stq, and the load-reserve/store-conditional instructions.
82  */
83 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
84 {
85 	struct kvm_run *run = vcpu->run;
86 	u32 inst;
87 	int ra, rs, rt;
88 	enum emulation_result emulated = EMULATE_FAIL;
89 	int advance = 1;
90 	struct instruction_op op;
91 
92 	/* this default type might be overwritten by subcategories */
93 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
94 
95 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96 	if (emulated != EMULATE_DONE)
97 		return emulated;
98 
99 	ra = get_ra(inst);
100 	rs = get_rs(inst);
101 	rt = get_rt(inst);
102 
103 	/*
104 	 * if mmio_vsx_tx_sx_enabled == 0, copy data between
105 	 * VSR[0..31] and memory
106 	 * if mmio_vsx_tx_sx_enabled == 1, copy data between
107 	 * VSR[32..63] and memory
108 	 */
109 	vcpu->arch.mmio_vsx_copy_nums = 0;
110 	vcpu->arch.mmio_vsx_offset = 0;
111 	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
112 	vcpu->arch.mmio_sp64_extend = 0;
113 	vcpu->arch.mmio_sign_extend = 0;
114 	vcpu->arch.mmio_vmx_copy_nums = 0;
115 	vcpu->arch.mmio_vmx_offset = 0;
116 	vcpu->arch.mmio_host_swabbed = 0;
117 
118 	emulated = EMULATE_FAIL;
119 	vcpu->arch.regs.msr = vcpu->arch.shared->msr;
120 	vcpu->arch.regs.ccr = vcpu->arch.cr;
121 	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
122 		int type = op.type & INSTR_TYPE_MASK;
123 		int size = GETSIZE(op.type);
124 
125 		switch (type) {
126 		case LOAD:  {
127 			int instr_byte_swap = op.type & BYTEREV;
128 
129 			if (op.type & SIGNEXT)
130 				emulated = kvmppc_handle_loads(run, vcpu,
131 						op.reg, size, !instr_byte_swap);
132 			else
133 				emulated = kvmppc_handle_load(run, vcpu,
134 						op.reg, size, !instr_byte_swap);
135 
136 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
137 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
138 
139 			break;
140 		}
141 #ifdef CONFIG_PPC_FPU
142 		case LOAD_FP:
143 			if (kvmppc_check_fp_disabled(vcpu))
144 				return EMULATE_DONE;
145 
146 			if (op.type & FPCONV)
147 				vcpu->arch.mmio_sp64_extend = 1;
148 
149 			if (op.type & SIGNEXT)
150 				emulated = kvmppc_handle_loads(run, vcpu,
151 					     KVM_MMIO_REG_FPR|op.reg, size, 1);
152 			else
153 				emulated = kvmppc_handle_load(run, vcpu,
154 					     KVM_MMIO_REG_FPR|op.reg, size, 1);
155 
156 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
157 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
158 
159 			break;
160 #endif
161 #ifdef CONFIG_ALTIVEC
162 		case LOAD_VMX:
163 			if (kvmppc_check_altivec_disabled(vcpu))
164 				return EMULATE_DONE;
165 
166 			/* Hardware enforces alignment of VMX accesses */
167 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
168 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
169 
170 			if (size == 16) { /* lvx */
171 				vcpu->arch.mmio_copy_type =
172 						KVMPPC_VMX_COPY_DWORD;
173 			} else if (size == 4) { /* lvewx  */
174 				vcpu->arch.mmio_copy_type =
175 						KVMPPC_VMX_COPY_WORD;
176 			} else if (size == 2) { /* lvehx  */
177 				vcpu->arch.mmio_copy_type =
178 						KVMPPC_VMX_COPY_HWORD;
179 			} else if (size == 1) { /* lvebx  */
180 				vcpu->arch.mmio_copy_type =
181 						KVMPPC_VMX_COPY_BYTE;
182 			} else
183 				break;
184 
185 			vcpu->arch.mmio_vmx_offset =
186 				(vcpu->arch.vaddr_accessed & 0xf)/size;
187 
188 			if (size == 16) {
189 				vcpu->arch.mmio_vmx_copy_nums = 2;
190 				emulated = kvmppc_handle_vmx_load(run,
191 						vcpu, KVM_MMIO_REG_VMX|op.reg,
192 						8, 1);
193 			} else {
194 				vcpu->arch.mmio_vmx_copy_nums = 1;
195 				emulated = kvmppc_handle_vmx_load(run, vcpu,
196 						KVM_MMIO_REG_VMX|op.reg,
197 						size, 1);
198 			}
199 			break;
200 #endif
201 #ifdef CONFIG_VSX
202 		case LOAD_VSX: {
203 			int io_size_each;
204 
205 			if (op.vsx_flags & VSX_CHECK_VEC) {
206 				if (kvmppc_check_altivec_disabled(vcpu))
207 					return EMULATE_DONE;
208 			} else {
209 				if (kvmppc_check_vsx_disabled(vcpu))
210 					return EMULATE_DONE;
211 			}
212 
213 			if (op.vsx_flags & VSX_FPCONV)
214 				vcpu->arch.mmio_sp64_extend = 1;
215 
216 			if (op.element_size == 8)  {
217 				if (op.vsx_flags & VSX_SPLAT)
218 					vcpu->arch.mmio_copy_type =
219 						KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
220 				else
221 					vcpu->arch.mmio_copy_type =
222 						KVMPPC_VSX_COPY_DWORD;
223 			} else if (op.element_size == 4) {
224 				if (op.vsx_flags & VSX_SPLAT)
225 					vcpu->arch.mmio_copy_type =
226 						KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
227 				else
228 					vcpu->arch.mmio_copy_type =
229 						KVMPPC_VSX_COPY_WORD;
230 			} else
231 				break;
232 
233 			if (size < op.element_size) {
234 				/* precision convert case: lxsspx, etc */
235 				vcpu->arch.mmio_vsx_copy_nums = 1;
236 				io_size_each = size;
237 			} else { /* lxvw4x, lxvd2x, etc */
238 				vcpu->arch.mmio_vsx_copy_nums =
239 					size/op.element_size;
240 				io_size_each = op.element_size;
241 			}
242 
243 			emulated = kvmppc_handle_vsx_load(run, vcpu,
244 					KVM_MMIO_REG_VSX|op.reg, io_size_each,
245 					1, op.type & SIGNEXT);
246 			break;
247 		}
248 #endif
249 		case STORE:
250 			/* if need byte reverse, op.val has been reversed by
251 			 * analyse_instr().
252 			 */
253 			emulated = kvmppc_handle_store(run, vcpu, op.val,
254 					size, 1);
255 
256 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
257 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
258 
259 			break;
260 #ifdef CONFIG_PPC_FPU
261 		case STORE_FP:
262 			if (kvmppc_check_fp_disabled(vcpu))
263 				return EMULATE_DONE;
264 
265 			/* The FP registers need to be flushed so that
266 			 * kvmppc_handle_store() can read actual FP vals
267 			 * from vcpu->arch.
268 			 */
269 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
270 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
271 						MSR_FP);
272 
273 			if (op.type & FPCONV)
274 				vcpu->arch.mmio_sp64_extend = 1;
275 
276 			emulated = kvmppc_handle_store(run, vcpu,
277 					VCPU_FPR(vcpu, op.reg), size, 1);
278 
279 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
280 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
281 
282 			break;
283 #endif
284 #ifdef CONFIG_ALTIVEC
285 		case STORE_VMX:
286 			if (kvmppc_check_altivec_disabled(vcpu))
287 				return EMULATE_DONE;
288 
289 			/* Hardware enforces alignment of VMX accesses. */
290 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
291 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
292 
293 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
294 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
295 						MSR_VEC);
296 			if (size == 16) { /* stvx */
297 				vcpu->arch.mmio_copy_type =
298 						KVMPPC_VMX_COPY_DWORD;
299 			} else if (size == 4) { /* stvewx  */
300 				vcpu->arch.mmio_copy_type =
301 						KVMPPC_VMX_COPY_WORD;
302 			} else if (size == 2) { /* stvehx  */
303 				vcpu->arch.mmio_copy_type =
304 						KVMPPC_VMX_COPY_HWORD;
305 			} else if (size == 1) { /* stvebx  */
306 				vcpu->arch.mmio_copy_type =
307 						KVMPPC_VMX_COPY_BYTE;
308 			} else
309 				break;
310 
311 			vcpu->arch.mmio_vmx_offset =
312 				(vcpu->arch.vaddr_accessed & 0xf)/size;
313 
314 			if (size == 16) {
315 				vcpu->arch.mmio_vmx_copy_nums = 2;
316 				emulated = kvmppc_handle_vmx_store(run,
317 						vcpu, op.reg, 8, 1);
318 			} else {
319 				vcpu->arch.mmio_vmx_copy_nums = 1;
320 				emulated = kvmppc_handle_vmx_store(run,
321 						vcpu, op.reg, size, 1);
322 			}
323 
324 			break;
325 #endif
326 #ifdef CONFIG_VSX
327 		case STORE_VSX: {
328 			int io_size_each;
329 
330 			if (op.vsx_flags & VSX_CHECK_VEC) {
331 				if (kvmppc_check_altivec_disabled(vcpu))
332 					return EMULATE_DONE;
333 			} else {
334 				if (kvmppc_check_vsx_disabled(vcpu))
335 					return EMULATE_DONE;
336 			}
337 
338 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
339 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
340 						MSR_VSX);
341 
342 			if (op.vsx_flags & VSX_FPCONV)
343 				vcpu->arch.mmio_sp64_extend = 1;
344 
345 			if (op.element_size == 8)
346 				vcpu->arch.mmio_copy_type =
347 						KVMPPC_VSX_COPY_DWORD;
348 			else if (op.element_size == 4)
349 				vcpu->arch.mmio_copy_type =
350 						KVMPPC_VSX_COPY_WORD;
351 			else
352 				break;
353 
354 			if (size < op.element_size) {
355 				/* precise conversion case, like stxsspx */
356 				vcpu->arch.mmio_vsx_copy_nums = 1;
357 				io_size_each = size;
358 			} else { /* stxvw4x, stxvd2x, etc */
359 				vcpu->arch.mmio_vsx_copy_nums =
360 						size/op.element_size;
361 				io_size_each = op.element_size;
362 			}
363 
364 			emulated = kvmppc_handle_vsx_store(run, vcpu,
365 					op.reg, io_size_each, 1);
366 			break;
367 		}
368 #endif
369 		case CACHEOP:
370 			/* Do nothing. The guest is performing dcbi because
371 			 * hardware DMA is not snooped by the dcache, but
372 			 * emulated DMA either goes through the dcache as
373 			 * normal writes, or the host kernel has handled dcache
374 			 * coherence.
375 			 */
376 			emulated = EMULATE_DONE;
377 			break;
378 		default:
379 			break;
380 		}
381 	}
382 
383 	if (emulated == EMULATE_FAIL) {
384 		advance = 0;
385 		kvmppc_core_queue_program(vcpu, 0);
386 	}
387 
388 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
389 
390 	/* Advance past emulated instruction. */
391 	if (advance)
392 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
393 
394 	return emulated;
395 }
396