1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include <asm/sstep.h>
35 #include "timing.h"
36 #include "trace.h"
37 
38 #ifdef CONFIG_PPC_FPU
39 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
40 {
41 	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
42 		kvmppc_core_queue_fpunavail(vcpu);
43 		return true;
44 	}
45 
46 	return false;
47 }
48 #endif /* CONFIG_PPC_FPU */
49 
50 #ifdef CONFIG_VSX
51 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
52 {
53 	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
54 		kvmppc_core_queue_vsx_unavail(vcpu);
55 		return true;
56 	}
57 
58 	return false;
59 }
60 #endif /* CONFIG_VSX */
61 
62 #ifdef CONFIG_ALTIVEC
63 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
64 {
65 	if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
66 		kvmppc_core_queue_vec_unavail(vcpu);
67 		return true;
68 	}
69 
70 	return false;
71 }
72 #endif /* CONFIG_ALTIVEC */
73 
74 /*
75  * XXX to do:
76  * lfiwax, lfiwzx
77  * vector loads and stores
78  *
79  * Instructions that trap when used on cache-inhibited mappings
80  * are not emulated here: multiple and string instructions,
81  * lq/stq, and the load-reserve/store-conditional instructions.
82  */
83 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
84 {
85 	struct kvm_run *run = vcpu->run;
86 	u32 inst;
87 	int ra, rs, rt;
88 	enum emulation_result emulated = EMULATE_FAIL;
89 	int advance = 1;
90 	struct instruction_op op;
91 
92 	/* this default type might be overwritten by subcategories */
93 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
94 
95 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
96 	if (emulated != EMULATE_DONE)
97 		return emulated;
98 
99 	ra = get_ra(inst);
100 	rs = get_rs(inst);
101 	rt = get_rt(inst);
102 
103 	/*
104 	 * if mmio_vsx_tx_sx_enabled == 0, copy data between
105 	 * VSR[0..31] and memory
106 	 * if mmio_vsx_tx_sx_enabled == 1, copy data between
107 	 * VSR[32..63] and memory
108 	 */
109 	vcpu->arch.mmio_vsx_copy_nums = 0;
110 	vcpu->arch.mmio_vsx_offset = 0;
111 	vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
112 	vcpu->arch.mmio_sp64_extend = 0;
113 	vcpu->arch.mmio_sign_extend = 0;
114 	vcpu->arch.mmio_vmx_copy_nums = 0;
115 	vcpu->arch.mmio_vmx_offset = 0;
116 	vcpu->arch.mmio_host_swabbed = 0;
117 
118 	emulated = EMULATE_FAIL;
119 	vcpu->arch.regs.msr = vcpu->arch.shared->msr;
120 	if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
121 		int type = op.type & INSTR_TYPE_MASK;
122 		int size = GETSIZE(op.type);
123 
124 		switch (type) {
125 		case LOAD:  {
126 			int instr_byte_swap = op.type & BYTEREV;
127 
128 			if (op.type & SIGNEXT)
129 				emulated = kvmppc_handle_loads(run, vcpu,
130 						op.reg, size, !instr_byte_swap);
131 			else
132 				emulated = kvmppc_handle_load(run, vcpu,
133 						op.reg, size, !instr_byte_swap);
134 
135 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
136 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
137 
138 			break;
139 		}
140 #ifdef CONFIG_PPC_FPU
141 		case LOAD_FP:
142 			if (kvmppc_check_fp_disabled(vcpu))
143 				return EMULATE_DONE;
144 
145 			if (op.type & FPCONV)
146 				vcpu->arch.mmio_sp64_extend = 1;
147 
148 			if (op.type & SIGNEXT)
149 				emulated = kvmppc_handle_loads(run, vcpu,
150 					     KVM_MMIO_REG_FPR|op.reg, size, 1);
151 			else
152 				emulated = kvmppc_handle_load(run, vcpu,
153 					     KVM_MMIO_REG_FPR|op.reg, size, 1);
154 
155 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
156 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
157 
158 			break;
159 #endif
160 #ifdef CONFIG_ALTIVEC
161 		case LOAD_VMX:
162 			if (kvmppc_check_altivec_disabled(vcpu))
163 				return EMULATE_DONE;
164 
165 			/* Hardware enforces alignment of VMX accesses */
166 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
167 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
168 
169 			if (size == 16) { /* lvx */
170 				vcpu->arch.mmio_copy_type =
171 						KVMPPC_VMX_COPY_DWORD;
172 			} else if (size == 4) { /* lvewx  */
173 				vcpu->arch.mmio_copy_type =
174 						KVMPPC_VMX_COPY_WORD;
175 			} else if (size == 2) { /* lvehx  */
176 				vcpu->arch.mmio_copy_type =
177 						KVMPPC_VMX_COPY_HWORD;
178 			} else if (size == 1) { /* lvebx  */
179 				vcpu->arch.mmio_copy_type =
180 						KVMPPC_VMX_COPY_BYTE;
181 			} else
182 				break;
183 
184 			vcpu->arch.mmio_vmx_offset =
185 				(vcpu->arch.vaddr_accessed & 0xf)/size;
186 
187 			if (size == 16) {
188 				vcpu->arch.mmio_vmx_copy_nums = 2;
189 				emulated = kvmppc_handle_vmx_load(run,
190 						vcpu, KVM_MMIO_REG_VMX|op.reg,
191 						8, 1);
192 			} else {
193 				vcpu->arch.mmio_vmx_copy_nums = 1;
194 				emulated = kvmppc_handle_vmx_load(run, vcpu,
195 						KVM_MMIO_REG_VMX|op.reg,
196 						size, 1);
197 			}
198 			break;
199 #endif
200 #ifdef CONFIG_VSX
201 		case LOAD_VSX: {
202 			int io_size_each;
203 
204 			if (op.vsx_flags & VSX_CHECK_VEC) {
205 				if (kvmppc_check_altivec_disabled(vcpu))
206 					return EMULATE_DONE;
207 			} else {
208 				if (kvmppc_check_vsx_disabled(vcpu))
209 					return EMULATE_DONE;
210 			}
211 
212 			if (op.vsx_flags & VSX_FPCONV)
213 				vcpu->arch.mmio_sp64_extend = 1;
214 
215 			if (op.element_size == 8)  {
216 				if (op.vsx_flags & VSX_SPLAT)
217 					vcpu->arch.mmio_copy_type =
218 						KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
219 				else
220 					vcpu->arch.mmio_copy_type =
221 						KVMPPC_VSX_COPY_DWORD;
222 			} else if (op.element_size == 4) {
223 				if (op.vsx_flags & VSX_SPLAT)
224 					vcpu->arch.mmio_copy_type =
225 						KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
226 				else
227 					vcpu->arch.mmio_copy_type =
228 						KVMPPC_VSX_COPY_WORD;
229 			} else
230 				break;
231 
232 			if (size < op.element_size) {
233 				/* precision convert case: lxsspx, etc */
234 				vcpu->arch.mmio_vsx_copy_nums = 1;
235 				io_size_each = size;
236 			} else { /* lxvw4x, lxvd2x, etc */
237 				vcpu->arch.mmio_vsx_copy_nums =
238 					size/op.element_size;
239 				io_size_each = op.element_size;
240 			}
241 
242 			emulated = kvmppc_handle_vsx_load(run, vcpu,
243 					KVM_MMIO_REG_VSX|op.reg, io_size_each,
244 					1, op.type & SIGNEXT);
245 			break;
246 		}
247 #endif
248 		case STORE:
249 			/* if need byte reverse, op.val has been reversed by
250 			 * analyse_instr().
251 			 */
252 			emulated = kvmppc_handle_store(run, vcpu, op.val,
253 					size, 1);
254 
255 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
256 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
257 
258 			break;
259 #ifdef CONFIG_PPC_FPU
260 		case STORE_FP:
261 			if (kvmppc_check_fp_disabled(vcpu))
262 				return EMULATE_DONE;
263 
264 			/* The FP registers need to be flushed so that
265 			 * kvmppc_handle_store() can read actual FP vals
266 			 * from vcpu->arch.
267 			 */
268 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
269 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
270 						MSR_FP);
271 
272 			if (op.type & FPCONV)
273 				vcpu->arch.mmio_sp64_extend = 1;
274 
275 			emulated = kvmppc_handle_store(run, vcpu,
276 					VCPU_FPR(vcpu, op.reg), size, 1);
277 
278 			if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
279 				kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
280 
281 			break;
282 #endif
283 #ifdef CONFIG_ALTIVEC
284 		case STORE_VMX:
285 			if (kvmppc_check_altivec_disabled(vcpu))
286 				return EMULATE_DONE;
287 
288 			/* Hardware enforces alignment of VMX accesses. */
289 			vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
290 			vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
291 
292 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
293 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
294 						MSR_VEC);
295 			if (size == 16) { /* stvx */
296 				vcpu->arch.mmio_copy_type =
297 						KVMPPC_VMX_COPY_DWORD;
298 			} else if (size == 4) { /* stvewx  */
299 				vcpu->arch.mmio_copy_type =
300 						KVMPPC_VMX_COPY_WORD;
301 			} else if (size == 2) { /* stvehx  */
302 				vcpu->arch.mmio_copy_type =
303 						KVMPPC_VMX_COPY_HWORD;
304 			} else if (size == 1) { /* stvebx  */
305 				vcpu->arch.mmio_copy_type =
306 						KVMPPC_VMX_COPY_BYTE;
307 			} else
308 				break;
309 
310 			vcpu->arch.mmio_vmx_offset =
311 				(vcpu->arch.vaddr_accessed & 0xf)/size;
312 
313 			if (size == 16) {
314 				vcpu->arch.mmio_vmx_copy_nums = 2;
315 				emulated = kvmppc_handle_vmx_store(run,
316 						vcpu, op.reg, 8, 1);
317 			} else {
318 				vcpu->arch.mmio_vmx_copy_nums = 1;
319 				emulated = kvmppc_handle_vmx_store(run,
320 						vcpu, op.reg, size, 1);
321 			}
322 
323 			break;
324 #endif
325 #ifdef CONFIG_VSX
326 		case STORE_VSX: {
327 			int io_size_each;
328 
329 			if (op.vsx_flags & VSX_CHECK_VEC) {
330 				if (kvmppc_check_altivec_disabled(vcpu))
331 					return EMULATE_DONE;
332 			} else {
333 				if (kvmppc_check_vsx_disabled(vcpu))
334 					return EMULATE_DONE;
335 			}
336 
337 			if (vcpu->kvm->arch.kvm_ops->giveup_ext)
338 				vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
339 						MSR_VSX);
340 
341 			if (op.vsx_flags & VSX_FPCONV)
342 				vcpu->arch.mmio_sp64_extend = 1;
343 
344 			if (op.element_size == 8)
345 				vcpu->arch.mmio_copy_type =
346 						KVMPPC_VSX_COPY_DWORD;
347 			else if (op.element_size == 4)
348 				vcpu->arch.mmio_copy_type =
349 						KVMPPC_VSX_COPY_WORD;
350 			else
351 				break;
352 
353 			if (size < op.element_size) {
354 				/* precise conversion case, like stxsspx */
355 				vcpu->arch.mmio_vsx_copy_nums = 1;
356 				io_size_each = size;
357 			} else { /* stxvw4x, stxvd2x, etc */
358 				vcpu->arch.mmio_vsx_copy_nums =
359 						size/op.element_size;
360 				io_size_each = op.element_size;
361 			}
362 
363 			emulated = kvmppc_handle_vsx_store(run, vcpu,
364 					op.reg, io_size_each, 1);
365 			break;
366 		}
367 #endif
368 		case CACHEOP:
369 			/* Do nothing. The guest is performing dcbi because
370 			 * hardware DMA is not snooped by the dcache, but
371 			 * emulated DMA either goes through the dcache as
372 			 * normal writes, or the host kernel has handled dcache
373 			 * coherence.
374 			 */
375 			emulated = EMULATE_DONE;
376 			break;
377 		default:
378 			break;
379 		}
380 	}
381 
382 	if (emulated == EMULATE_FAIL) {
383 		advance = 0;
384 		kvmppc_core_queue_program(vcpu, 0);
385 	}
386 
387 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
388 
389 	/* Advance past emulated instruction. */
390 	if (advance)
391 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
392 
393 	return emulated;
394 }
395