1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include "timing.h"
35 #include "trace.h"
36 
37 #ifdef CONFIG_PPC_FPU
38 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
39 {
40 	if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
41 		kvmppc_core_queue_fpunavail(vcpu);
42 		return true;
43 	}
44 
45 	return false;
46 }
47 #endif /* CONFIG_PPC_FPU */
48 
49 #ifdef CONFIG_VSX
50 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
51 {
52 	if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
53 		kvmppc_core_queue_vsx_unavail(vcpu);
54 		return true;
55 	}
56 
57 	return false;
58 }
59 #endif /* CONFIG_VSX */
60 
61 /*
62  * XXX to do:
63  * lfiwax, lfiwzx
64  * vector loads and stores
65  *
66  * Instructions that trap when used on cache-inhibited mappings
67  * are not emulated here: multiple and string instructions,
68  * lq/stq, and the load-reserve/store-conditional instructions.
69  */
70 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
71 {
72 	struct kvm_run *run = vcpu->run;
73 	u32 inst;
74 	int ra, rs, rt;
75 	enum emulation_result emulated;
76 	int advance = 1;
77 
78 	/* this default type might be overwritten by subcategories */
79 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
80 
81 	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
82 	if (emulated != EMULATE_DONE)
83 		return emulated;
84 
85 	ra = get_ra(inst);
86 	rs = get_rs(inst);
87 	rt = get_rt(inst);
88 
89 	/*
90 	 * if mmio_vsx_tx_sx_enabled == 0, copy data between
91 	 * VSR[0..31] and memory
92 	 * if mmio_vsx_tx_sx_enabled == 1, copy data between
93 	 * VSR[32..63] and memory
94 	 */
95 	vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
96 	vcpu->arch.mmio_vsx_copy_nums = 0;
97 	vcpu->arch.mmio_vsx_offset = 0;
98 	vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
99 	vcpu->arch.mmio_sp64_extend = 0;
100 	vcpu->arch.mmio_sign_extend = 0;
101 
102 	switch (get_op(inst)) {
103 	case 31:
104 		switch (get_xop(inst)) {
105 		case OP_31_XOP_LWZX:
106 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
107 			break;
108 
109 		case OP_31_XOP_LWZUX:
110 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
111 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
112 			break;
113 
114 		case OP_31_XOP_LBZX:
115 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
116 			break;
117 
118 		case OP_31_XOP_LBZUX:
119 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
120 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
121 			break;
122 
123 		case OP_31_XOP_STDX:
124 			emulated = kvmppc_handle_store(run, vcpu,
125 					kvmppc_get_gpr(vcpu, rs), 8, 1);
126 			break;
127 
128 		case OP_31_XOP_STDUX:
129 			emulated = kvmppc_handle_store(run, vcpu,
130 					kvmppc_get_gpr(vcpu, rs), 8, 1);
131 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
132 			break;
133 
134 		case OP_31_XOP_STWX:
135 			emulated = kvmppc_handle_store(run, vcpu,
136 					kvmppc_get_gpr(vcpu, rs), 4, 1);
137 			break;
138 
139 		case OP_31_XOP_STWUX:
140 			emulated = kvmppc_handle_store(run, vcpu,
141 					kvmppc_get_gpr(vcpu, rs), 4, 1);
142 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
143 			break;
144 
145 		case OP_31_XOP_STBX:
146 			emulated = kvmppc_handle_store(run, vcpu,
147 					kvmppc_get_gpr(vcpu, rs), 1, 1);
148 			break;
149 
150 		case OP_31_XOP_STBUX:
151 			emulated = kvmppc_handle_store(run, vcpu,
152 					kvmppc_get_gpr(vcpu, rs), 1, 1);
153 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
154 			break;
155 
156 		case OP_31_XOP_LHAX:
157 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
158 			break;
159 
160 		case OP_31_XOP_LHAUX:
161 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
162 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
163 			break;
164 
165 		case OP_31_XOP_LHZX:
166 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
167 			break;
168 
169 		case OP_31_XOP_LHZUX:
170 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
171 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
172 			break;
173 
174 		case OP_31_XOP_STHX:
175 			emulated = kvmppc_handle_store(run, vcpu,
176 					kvmppc_get_gpr(vcpu, rs), 2, 1);
177 			break;
178 
179 		case OP_31_XOP_STHUX:
180 			emulated = kvmppc_handle_store(run, vcpu,
181 					kvmppc_get_gpr(vcpu, rs), 2, 1);
182 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
183 			break;
184 
185 		case OP_31_XOP_DCBST:
186 		case OP_31_XOP_DCBF:
187 		case OP_31_XOP_DCBI:
188 			/* Do nothing. The guest is performing dcbi because
189 			 * hardware DMA is not snooped by the dcache, but
190 			 * emulated DMA either goes through the dcache as
191 			 * normal writes, or the host kernel has handled dcache
192 			 * coherence. */
193 			break;
194 
195 		case OP_31_XOP_LWBRX:
196 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
197 			break;
198 
199 		case OP_31_XOP_STWBRX:
200 			emulated = kvmppc_handle_store(run, vcpu,
201 					kvmppc_get_gpr(vcpu, rs), 4, 0);
202 			break;
203 
204 		case OP_31_XOP_LHBRX:
205 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
206 			break;
207 
208 		case OP_31_XOP_STHBRX:
209 			emulated = kvmppc_handle_store(run, vcpu,
210 					kvmppc_get_gpr(vcpu, rs), 2, 0);
211 			break;
212 
213 		case OP_31_XOP_LDBRX:
214 			emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
215 			break;
216 
217 		case OP_31_XOP_STDBRX:
218 			emulated = kvmppc_handle_store(run, vcpu,
219 					kvmppc_get_gpr(vcpu, rs), 8, 0);
220 			break;
221 
222 		case OP_31_XOP_LDX:
223 			emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
224 			break;
225 
226 		case OP_31_XOP_LDUX:
227 			emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
228 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
229 			break;
230 
231 		case OP_31_XOP_LWAX:
232 			emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
233 			break;
234 
235 		case OP_31_XOP_LWAUX:
236 			emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
237 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
238 			break;
239 
240 #ifdef CONFIG_PPC_FPU
241 		case OP_31_XOP_LFSX:
242 			if (kvmppc_check_fp_disabled(vcpu))
243 				return EMULATE_DONE;
244 			vcpu->arch.mmio_sp64_extend = 1;
245 			emulated = kvmppc_handle_load(run, vcpu,
246 				KVM_MMIO_REG_FPR|rt, 4, 1);
247 			break;
248 
249 		case OP_31_XOP_LFSUX:
250 			if (kvmppc_check_fp_disabled(vcpu))
251 				return EMULATE_DONE;
252 			vcpu->arch.mmio_sp64_extend = 1;
253 			emulated = kvmppc_handle_load(run, vcpu,
254 				KVM_MMIO_REG_FPR|rt, 4, 1);
255 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
256 			break;
257 
258 		case OP_31_XOP_LFDX:
259 			if (kvmppc_check_fp_disabled(vcpu))
260 				return EMULATE_DONE;
261 			emulated = kvmppc_handle_load(run, vcpu,
262 				KVM_MMIO_REG_FPR|rt, 8, 1);
263 			break;
264 
265 		case OP_31_XOP_LFDUX:
266 			if (kvmppc_check_fp_disabled(vcpu))
267 				return EMULATE_DONE;
268 			emulated = kvmppc_handle_load(run, vcpu,
269 				KVM_MMIO_REG_FPR|rt, 8, 1);
270 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
271 			break;
272 
273 		case OP_31_XOP_LFIWAX:
274 			if (kvmppc_check_fp_disabled(vcpu))
275 				return EMULATE_DONE;
276 			emulated = kvmppc_handle_loads(run, vcpu,
277 				KVM_MMIO_REG_FPR|rt, 4, 1);
278 			break;
279 
280 		case OP_31_XOP_LFIWZX:
281 			if (kvmppc_check_fp_disabled(vcpu))
282 				return EMULATE_DONE;
283 			emulated = kvmppc_handle_load(run, vcpu,
284 				KVM_MMIO_REG_FPR|rt, 4, 1);
285 			break;
286 
287 		case OP_31_XOP_STFSX:
288 			if (kvmppc_check_fp_disabled(vcpu))
289 				return EMULATE_DONE;
290 			vcpu->arch.mmio_sp64_extend = 1;
291 			emulated = kvmppc_handle_store(run, vcpu,
292 				VCPU_FPR(vcpu, rs), 4, 1);
293 			break;
294 
295 		case OP_31_XOP_STFSUX:
296 			if (kvmppc_check_fp_disabled(vcpu))
297 				return EMULATE_DONE;
298 			vcpu->arch.mmio_sp64_extend = 1;
299 			emulated = kvmppc_handle_store(run, vcpu,
300 				VCPU_FPR(vcpu, rs), 4, 1);
301 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
302 			break;
303 
304 		case OP_31_XOP_STFDX:
305 			if (kvmppc_check_fp_disabled(vcpu))
306 				return EMULATE_DONE;
307 			emulated = kvmppc_handle_store(run, vcpu,
308 				VCPU_FPR(vcpu, rs), 8, 1);
309 			break;
310 
311 		case OP_31_XOP_STFDUX:
312 			if (kvmppc_check_fp_disabled(vcpu))
313 				return EMULATE_DONE;
314 			emulated = kvmppc_handle_store(run, vcpu,
315 				VCPU_FPR(vcpu, rs), 8, 1);
316 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
317 			break;
318 
319 		case OP_31_XOP_STFIWX:
320 			if (kvmppc_check_fp_disabled(vcpu))
321 				return EMULATE_DONE;
322 			emulated = kvmppc_handle_store(run, vcpu,
323 				VCPU_FPR(vcpu, rs), 4, 1);
324 			break;
325 #endif
326 
327 #ifdef CONFIG_VSX
328 		case OP_31_XOP_LXSDX:
329 			if (kvmppc_check_vsx_disabled(vcpu))
330 				return EMULATE_DONE;
331 			vcpu->arch.mmio_vsx_copy_nums = 1;
332 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
333 			emulated = kvmppc_handle_vsx_load(run, vcpu,
334 				KVM_MMIO_REG_VSX|rt, 8, 1, 0);
335 			break;
336 
337 		case OP_31_XOP_LXSSPX:
338 			if (kvmppc_check_vsx_disabled(vcpu))
339 				return EMULATE_DONE;
340 			vcpu->arch.mmio_vsx_copy_nums = 1;
341 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
342 			vcpu->arch.mmio_sp64_extend = 1;
343 			emulated = kvmppc_handle_vsx_load(run, vcpu,
344 				KVM_MMIO_REG_VSX|rt, 4, 1, 0);
345 			break;
346 
347 		case OP_31_XOP_LXSIWAX:
348 			if (kvmppc_check_vsx_disabled(vcpu))
349 				return EMULATE_DONE;
350 			vcpu->arch.mmio_vsx_copy_nums = 1;
351 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
352 			emulated = kvmppc_handle_vsx_load(run, vcpu,
353 				KVM_MMIO_REG_VSX|rt, 4, 1, 1);
354 			break;
355 
356 		case OP_31_XOP_LXSIWZX:
357 			if (kvmppc_check_vsx_disabled(vcpu))
358 				return EMULATE_DONE;
359 			vcpu->arch.mmio_vsx_copy_nums = 1;
360 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
361 			emulated = kvmppc_handle_vsx_load(run, vcpu,
362 				KVM_MMIO_REG_VSX|rt, 4, 1, 0);
363 			break;
364 
365 		case OP_31_XOP_LXVD2X:
366 		/*
367 		 * In this case, the official load/store process is like this:
368 		 * Step1, exit from vm by page fault isr, then kvm save vsr.
369 		 * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
370 		 * as reference.
371 		 *
372 		 * Step2, copy data between memory and VCPU
373 		 * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
374 		 * 2copies*8bytes or 4copies*4bytes
375 		 * to simulate one copy of 16bytes.
376 		 * Also there is an endian issue here, we should notice the
377 		 * layout of memory.
378 		 * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
379 		 * If host is little-endian, kvm will call XXSWAPD for
380 		 * LXVD2X_ROT/STXVD2X_ROT.
381 		 * So, if host is little-endian,
382 		 * the postion of memeory should be swapped.
383 		 *
384 		 * Step3, return to guest, kvm reset register.
385 		 * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
386 		 * as reference.
387 		 */
388 			if (kvmppc_check_vsx_disabled(vcpu))
389 				return EMULATE_DONE;
390 			vcpu->arch.mmio_vsx_copy_nums = 2;
391 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
392 			emulated = kvmppc_handle_vsx_load(run, vcpu,
393 				KVM_MMIO_REG_VSX|rt, 8, 1, 0);
394 			break;
395 
396 		case OP_31_XOP_LXVW4X:
397 			if (kvmppc_check_vsx_disabled(vcpu))
398 				return EMULATE_DONE;
399 			vcpu->arch.mmio_vsx_copy_nums = 4;
400 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
401 			emulated = kvmppc_handle_vsx_load(run, vcpu,
402 				KVM_MMIO_REG_VSX|rt, 4, 1, 0);
403 			break;
404 
405 		case OP_31_XOP_LXVDSX:
406 			if (kvmppc_check_vsx_disabled(vcpu))
407 				return EMULATE_DONE;
408 			vcpu->arch.mmio_vsx_copy_nums = 1;
409 			vcpu->arch.mmio_vsx_copy_type =
410 				 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
411 			emulated = kvmppc_handle_vsx_load(run, vcpu,
412 				KVM_MMIO_REG_VSX|rt, 8, 1, 0);
413 			break;
414 
415 		case OP_31_XOP_STXSDX:
416 			if (kvmppc_check_vsx_disabled(vcpu))
417 				return EMULATE_DONE;
418 			vcpu->arch.mmio_vsx_copy_nums = 1;
419 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
420 			emulated = kvmppc_handle_vsx_store(run, vcpu,
421 						 rs, 8, 1);
422 			break;
423 
424 		case OP_31_XOP_STXSSPX:
425 			if (kvmppc_check_vsx_disabled(vcpu))
426 				return EMULATE_DONE;
427 			vcpu->arch.mmio_vsx_copy_nums = 1;
428 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
429 			vcpu->arch.mmio_sp64_extend = 1;
430 			emulated = kvmppc_handle_vsx_store(run, vcpu,
431 						 rs, 4, 1);
432 			break;
433 
434 		case OP_31_XOP_STXSIWX:
435 			if (kvmppc_check_vsx_disabled(vcpu))
436 				return EMULATE_DONE;
437 			vcpu->arch.mmio_vsx_offset = 1;
438 			vcpu->arch.mmio_vsx_copy_nums = 1;
439 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
440 			emulated = kvmppc_handle_vsx_store(run, vcpu,
441 							 rs, 4, 1);
442 			break;
443 
444 		case OP_31_XOP_STXVD2X:
445 			if (kvmppc_check_vsx_disabled(vcpu))
446 				return EMULATE_DONE;
447 			vcpu->arch.mmio_vsx_copy_nums = 2;
448 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
449 			emulated = kvmppc_handle_vsx_store(run, vcpu,
450 							 rs, 8, 1);
451 			break;
452 
453 		case OP_31_XOP_STXVW4X:
454 			if (kvmppc_check_vsx_disabled(vcpu))
455 				return EMULATE_DONE;
456 			vcpu->arch.mmio_vsx_copy_nums = 4;
457 			vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
458 			emulated = kvmppc_handle_vsx_store(run, vcpu,
459 							 rs, 4, 1);
460 			break;
461 #endif /* CONFIG_VSX */
462 		default:
463 			emulated = EMULATE_FAIL;
464 			break;
465 		}
466 		break;
467 
468 	case OP_LWZ:
469 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
470 		break;
471 
472 #ifdef CONFIG_PPC_FPU
473 	case OP_STFS:
474 		if (kvmppc_check_fp_disabled(vcpu))
475 			return EMULATE_DONE;
476 		vcpu->arch.mmio_sp64_extend = 1;
477 		emulated = kvmppc_handle_store(run, vcpu,
478 			VCPU_FPR(vcpu, rs),
479 			4, 1);
480 		break;
481 
482 	case OP_STFSU:
483 		if (kvmppc_check_fp_disabled(vcpu))
484 			return EMULATE_DONE;
485 		vcpu->arch.mmio_sp64_extend = 1;
486 		emulated = kvmppc_handle_store(run, vcpu,
487 			VCPU_FPR(vcpu, rs),
488 			4, 1);
489 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
490 		break;
491 
492 	case OP_STFD:
493 		if (kvmppc_check_fp_disabled(vcpu))
494 			return EMULATE_DONE;
495 		emulated = kvmppc_handle_store(run, vcpu,
496 			VCPU_FPR(vcpu, rs),
497 	                               8, 1);
498 		break;
499 
500 	case OP_STFDU:
501 		if (kvmppc_check_fp_disabled(vcpu))
502 			return EMULATE_DONE;
503 		emulated = kvmppc_handle_store(run, vcpu,
504 			VCPU_FPR(vcpu, rs),
505 	                               8, 1);
506 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
507 		break;
508 #endif
509 
510 	case OP_LD:
511 		rt = get_rt(inst);
512 		switch (inst & 3) {
513 		case 0:	/* ld */
514 			emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
515 			break;
516 		case 1: /* ldu */
517 			emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
518 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
519 			break;
520 		case 2:	/* lwa */
521 			emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
522 			break;
523 		default:
524 			emulated = EMULATE_FAIL;
525 		}
526 		break;
527 
528 	case OP_LWZU:
529 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
530 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
531 		break;
532 
533 	case OP_LBZ:
534 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
535 		break;
536 
537 	case OP_LBZU:
538 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
539 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
540 		break;
541 
542 	case OP_STW:
543 		emulated = kvmppc_handle_store(run, vcpu,
544 					       kvmppc_get_gpr(vcpu, rs),
545 		                               4, 1);
546 		break;
547 
548 	case OP_STD:
549 		rs = get_rs(inst);
550 		switch (inst & 3) {
551 		case 0:	/* std */
552 			emulated = kvmppc_handle_store(run, vcpu,
553 				kvmppc_get_gpr(vcpu, rs), 8, 1);
554 			break;
555 		case 1: /* stdu */
556 			emulated = kvmppc_handle_store(run, vcpu,
557 				kvmppc_get_gpr(vcpu, rs), 8, 1);
558 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
559 			break;
560 		default:
561 			emulated = EMULATE_FAIL;
562 		}
563 		break;
564 
565 	case OP_STWU:
566 		emulated = kvmppc_handle_store(run, vcpu,
567 				kvmppc_get_gpr(vcpu, rs), 4, 1);
568 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
569 		break;
570 
571 	case OP_STB:
572 		emulated = kvmppc_handle_store(run, vcpu,
573 				kvmppc_get_gpr(vcpu, rs), 1, 1);
574 		break;
575 
576 	case OP_STBU:
577 		emulated = kvmppc_handle_store(run, vcpu,
578 				kvmppc_get_gpr(vcpu, rs), 1, 1);
579 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
580 		break;
581 
582 	case OP_LHZ:
583 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
584 		break;
585 
586 	case OP_LHZU:
587 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
588 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
589 		break;
590 
591 	case OP_LHA:
592 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
593 		break;
594 
595 	case OP_LHAU:
596 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
597 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
598 		break;
599 
600 	case OP_STH:
601 		emulated = kvmppc_handle_store(run, vcpu,
602 				kvmppc_get_gpr(vcpu, rs), 2, 1);
603 		break;
604 
605 	case OP_STHU:
606 		emulated = kvmppc_handle_store(run, vcpu,
607 				kvmppc_get_gpr(vcpu, rs), 2, 1);
608 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
609 		break;
610 
611 #ifdef CONFIG_PPC_FPU
612 	case OP_LFS:
613 		if (kvmppc_check_fp_disabled(vcpu))
614 			return EMULATE_DONE;
615 		vcpu->arch.mmio_sp64_extend = 1;
616 		emulated = kvmppc_handle_load(run, vcpu,
617 			KVM_MMIO_REG_FPR|rt, 4, 1);
618 		break;
619 
620 	case OP_LFSU:
621 		if (kvmppc_check_fp_disabled(vcpu))
622 			return EMULATE_DONE;
623 		vcpu->arch.mmio_sp64_extend = 1;
624 		emulated = kvmppc_handle_load(run, vcpu,
625 			KVM_MMIO_REG_FPR|rt, 4, 1);
626 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
627 		break;
628 
629 	case OP_LFD:
630 		if (kvmppc_check_fp_disabled(vcpu))
631 			return EMULATE_DONE;
632 		emulated = kvmppc_handle_load(run, vcpu,
633 			KVM_MMIO_REG_FPR|rt, 8, 1);
634 		break;
635 
636 	case OP_LFDU:
637 		if (kvmppc_check_fp_disabled(vcpu))
638 			return EMULATE_DONE;
639 		emulated = kvmppc_handle_load(run, vcpu,
640 			KVM_MMIO_REG_FPR|rt, 8, 1);
641 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
642 		break;
643 #endif
644 
645 	default:
646 		emulated = EMULATE_FAIL;
647 		break;
648 	}
649 
650 	if (emulated == EMULATE_FAIL) {
651 		advance = 0;
652 		kvmppc_core_queue_program(vcpu, 0);
653 	}
654 
655 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
656 
657 	/* Advance past emulated instruction. */
658 	if (advance)
659 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
660 
661 	return emulated;
662 }
663