xref: /openbmc/linux/arch/powerpc/kvm/emulate.c (revision afc98d90)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 #include <linux/clockchips.h>
27 
28 #include <asm/reg.h>
29 #include <asm/time.h>
30 #include <asm/byteorder.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/disassemble.h>
33 #include <asm/ppc-opcode.h>
34 #include "timing.h"
35 #include "trace.h"
36 
37 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
38 {
39 	unsigned long dec_nsec;
40 	unsigned long long dec_time;
41 
42 	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
43 	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
44 
45 #ifdef CONFIG_PPC_BOOK3S
46 	/* mtdec lowers the interrupt line when positive. */
47 	kvmppc_core_dequeue_dec(vcpu);
48 
49 	/* POWER4+ triggers a dec interrupt if the value is < 0 */
50 	if (vcpu->arch.dec & 0x80000000) {
51 		kvmppc_core_queue_dec(vcpu);
52 		return;
53 	}
54 #endif
55 
56 #ifdef CONFIG_BOOKE
57 	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
58 	if (vcpu->arch.dec == 0)
59 		return;
60 #endif
61 
62 	/*
63 	 * The decrementer ticks at the same rate as the timebase, so
64 	 * that's how we convert the guest DEC value to the number of
65 	 * host ticks.
66 	 */
67 
68 	dec_time = vcpu->arch.dec;
69 	/*
70 	 * Guest timebase ticks at the same frequency as host decrementer.
71 	 * So use the host decrementer calculations for decrementer emulation.
72 	 */
73 	dec_time = dec_time << decrementer_clockevent.shift;
74 	do_div(dec_time, decrementer_clockevent.mult);
75 	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
76 	hrtimer_start(&vcpu->arch.dec_timer,
77 		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
78 	vcpu->arch.dec_jiffies = get_tb();
79 }
80 
81 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
82 {
83 	u64 jd = tb - vcpu->arch.dec_jiffies;
84 
85 #ifdef CONFIG_BOOKE
86 	if (vcpu->arch.dec < jd)
87 		return 0;
88 #endif
89 
90 	return vcpu->arch.dec - jd;
91 }
92 
93 static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
94 {
95 	enum emulation_result emulated = EMULATE_DONE;
96 	ulong spr_val = kvmppc_get_gpr(vcpu, rs);
97 
98 	switch (sprn) {
99 	case SPRN_SRR0:
100 		vcpu->arch.shared->srr0 = spr_val;
101 		break;
102 	case SPRN_SRR1:
103 		vcpu->arch.shared->srr1 = spr_val;
104 		break;
105 
106 	/* XXX We need to context-switch the timebase for
107 	 * watchdog and FIT. */
108 	case SPRN_TBWL: break;
109 	case SPRN_TBWU: break;
110 
111 	case SPRN_DEC:
112 		vcpu->arch.dec = spr_val;
113 		kvmppc_emulate_dec(vcpu);
114 		break;
115 
116 	case SPRN_SPRG0:
117 		vcpu->arch.shared->sprg0 = spr_val;
118 		break;
119 	case SPRN_SPRG1:
120 		vcpu->arch.shared->sprg1 = spr_val;
121 		break;
122 	case SPRN_SPRG2:
123 		vcpu->arch.shared->sprg2 = spr_val;
124 		break;
125 	case SPRN_SPRG3:
126 		vcpu->arch.shared->sprg3 = spr_val;
127 		break;
128 
129 	/* PIR can legally be written, but we ignore it */
130 	case SPRN_PIR: break;
131 
132 	default:
133 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mtspr(vcpu, sprn,
134 								  spr_val);
135 		if (emulated == EMULATE_FAIL)
136 			printk(KERN_INFO "mtspr: unknown spr "
137 				"0x%x\n", sprn);
138 		break;
139 	}
140 
141 	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
142 
143 	return emulated;
144 }
145 
146 static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
147 {
148 	enum emulation_result emulated = EMULATE_DONE;
149 	ulong spr_val = 0;
150 
151 	switch (sprn) {
152 	case SPRN_SRR0:
153 		spr_val = vcpu->arch.shared->srr0;
154 		break;
155 	case SPRN_SRR1:
156 		spr_val = vcpu->arch.shared->srr1;
157 		break;
158 	case SPRN_PVR:
159 		spr_val = vcpu->arch.pvr;
160 		break;
161 	case SPRN_PIR:
162 		spr_val = vcpu->vcpu_id;
163 		break;
164 
165 	/* Note: mftb and TBRL/TBWL are user-accessible, so
166 	 * the guest can always access the real TB anyways.
167 	 * In fact, we probably will never see these traps. */
168 	case SPRN_TBWL:
169 		spr_val = get_tb() >> 32;
170 		break;
171 	case SPRN_TBWU:
172 		spr_val = get_tb();
173 		break;
174 
175 	case SPRN_SPRG0:
176 		spr_val = vcpu->arch.shared->sprg0;
177 		break;
178 	case SPRN_SPRG1:
179 		spr_val = vcpu->arch.shared->sprg1;
180 		break;
181 	case SPRN_SPRG2:
182 		spr_val = vcpu->arch.shared->sprg2;
183 		break;
184 	case SPRN_SPRG3:
185 		spr_val = vcpu->arch.shared->sprg3;
186 		break;
187 	/* Note: SPRG4-7 are user-readable, so we don't get
188 	 * a trap. */
189 
190 	case SPRN_DEC:
191 		spr_val = kvmppc_get_dec(vcpu, get_tb());
192 		break;
193 	default:
194 		emulated = vcpu->kvm->arch.kvm_ops->emulate_mfspr(vcpu, sprn,
195 								  &spr_val);
196 		if (unlikely(emulated == EMULATE_FAIL)) {
197 			printk(KERN_INFO "mfspr: unknown spr "
198 				"0x%x\n", sprn);
199 		}
200 		break;
201 	}
202 
203 	if (emulated == EMULATE_DONE)
204 		kvmppc_set_gpr(vcpu, rt, spr_val);
205 	kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
206 
207 	return emulated;
208 }
209 
210 /* XXX to do:
211  * lhax
212  * lhaux
213  * lswx
214  * lswi
215  * stswx
216  * stswi
217  * lha
218  * lhau
219  * lmw
220  * stmw
221  *
222  */
223 /* XXX Should probably auto-generate instruction decoding for a particular core
224  * from opcode tables in the future. */
225 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
226 {
227 	u32 inst = kvmppc_get_last_inst(vcpu);
228 	int ra = get_ra(inst);
229 	int rs = get_rs(inst);
230 	int rt = get_rt(inst);
231 	int sprn = get_sprn(inst);
232 	enum emulation_result emulated = EMULATE_DONE;
233 	int advance = 1;
234 
235 	/* this default type might be overwritten by subcategories */
236 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
237 
238 	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
239 
240 	switch (get_op(inst)) {
241 	case OP_TRAP:
242 #ifdef CONFIG_PPC_BOOK3S
243 	case OP_TRAP_64:
244 		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
245 #else
246 		kvmppc_core_queue_program(vcpu,
247 					  vcpu->arch.shared->esr | ESR_PTR);
248 #endif
249 		advance = 0;
250 		break;
251 
252 	case 31:
253 		switch (get_xop(inst)) {
254 
255 		case OP_31_XOP_TRAP:
256 #ifdef CONFIG_64BIT
257 		case OP_31_XOP_TRAP_64:
258 #endif
259 #ifdef CONFIG_PPC_BOOK3S
260 			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
261 #else
262 			kvmppc_core_queue_program(vcpu,
263 					vcpu->arch.shared->esr | ESR_PTR);
264 #endif
265 			advance = 0;
266 			break;
267 		case OP_31_XOP_LWZX:
268 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
269 			break;
270 
271 		case OP_31_XOP_LBZX:
272 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
273 			break;
274 
275 		case OP_31_XOP_LBZUX:
276 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
277 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
278 			break;
279 
280 		case OP_31_XOP_STWX:
281 			emulated = kvmppc_handle_store(run, vcpu,
282 						       kvmppc_get_gpr(vcpu, rs),
283 			                               4, 1);
284 			break;
285 
286 		case OP_31_XOP_STBX:
287 			emulated = kvmppc_handle_store(run, vcpu,
288 						       kvmppc_get_gpr(vcpu, rs),
289 			                               1, 1);
290 			break;
291 
292 		case OP_31_XOP_STBUX:
293 			emulated = kvmppc_handle_store(run, vcpu,
294 						       kvmppc_get_gpr(vcpu, rs),
295 			                               1, 1);
296 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
297 			break;
298 
299 		case OP_31_XOP_LHAX:
300 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
301 			break;
302 
303 		case OP_31_XOP_LHZX:
304 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
305 			break;
306 
307 		case OP_31_XOP_LHZUX:
308 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
309 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
310 			break;
311 
312 		case OP_31_XOP_MFSPR:
313 			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
314 			break;
315 
316 		case OP_31_XOP_STHX:
317 			emulated = kvmppc_handle_store(run, vcpu,
318 						       kvmppc_get_gpr(vcpu, rs),
319 			                               2, 1);
320 			break;
321 
322 		case OP_31_XOP_STHUX:
323 			emulated = kvmppc_handle_store(run, vcpu,
324 						       kvmppc_get_gpr(vcpu, rs),
325 			                               2, 1);
326 			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
327 			break;
328 
329 		case OP_31_XOP_MTSPR:
330 			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
331 			break;
332 
333 		case OP_31_XOP_DCBST:
334 		case OP_31_XOP_DCBF:
335 		case OP_31_XOP_DCBI:
336 			/* Do nothing. The guest is performing dcbi because
337 			 * hardware DMA is not snooped by the dcache, but
338 			 * emulated DMA either goes through the dcache as
339 			 * normal writes, or the host kernel has handled dcache
340 			 * coherence. */
341 			break;
342 
343 		case OP_31_XOP_LWBRX:
344 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
345 			break;
346 
347 		case OP_31_XOP_TLBSYNC:
348 			break;
349 
350 		case OP_31_XOP_STWBRX:
351 			emulated = kvmppc_handle_store(run, vcpu,
352 						       kvmppc_get_gpr(vcpu, rs),
353 			                               4, 0);
354 			break;
355 
356 		case OP_31_XOP_LHBRX:
357 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
358 			break;
359 
360 		case OP_31_XOP_STHBRX:
361 			emulated = kvmppc_handle_store(run, vcpu,
362 						       kvmppc_get_gpr(vcpu, rs),
363 			                               2, 0);
364 			break;
365 
366 		default:
367 			/* Attempt core-specific emulation below. */
368 			emulated = EMULATE_FAIL;
369 		}
370 		break;
371 
372 	case OP_LWZ:
373 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
374 		break;
375 
376 	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
377 	case OP_LD:
378 		rt = get_rt(inst);
379 		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
380 		break;
381 
382 	case OP_LWZU:
383 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
384 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
385 		break;
386 
387 	case OP_LBZ:
388 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
389 		break;
390 
391 	case OP_LBZU:
392 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
393 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
394 		break;
395 
396 	case OP_STW:
397 		emulated = kvmppc_handle_store(run, vcpu,
398 					       kvmppc_get_gpr(vcpu, rs),
399 		                               4, 1);
400 		break;
401 
402 	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
403 	case OP_STD:
404 		rs = get_rs(inst);
405 		emulated = kvmppc_handle_store(run, vcpu,
406 					       kvmppc_get_gpr(vcpu, rs),
407 		                               8, 1);
408 		break;
409 
410 	case OP_STWU:
411 		emulated = kvmppc_handle_store(run, vcpu,
412 					       kvmppc_get_gpr(vcpu, rs),
413 		                               4, 1);
414 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
415 		break;
416 
417 	case OP_STB:
418 		emulated = kvmppc_handle_store(run, vcpu,
419 					       kvmppc_get_gpr(vcpu, rs),
420 		                               1, 1);
421 		break;
422 
423 	case OP_STBU:
424 		emulated = kvmppc_handle_store(run, vcpu,
425 					       kvmppc_get_gpr(vcpu, rs),
426 		                               1, 1);
427 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
428 		break;
429 
430 	case OP_LHZ:
431 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
432 		break;
433 
434 	case OP_LHZU:
435 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
436 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
437 		break;
438 
439 	case OP_LHA:
440 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
441 		break;
442 
443 	case OP_LHAU:
444 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
445 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
446 		break;
447 
448 	case OP_STH:
449 		emulated = kvmppc_handle_store(run, vcpu,
450 					       kvmppc_get_gpr(vcpu, rs),
451 		                               2, 1);
452 		break;
453 
454 	case OP_STHU:
455 		emulated = kvmppc_handle_store(run, vcpu,
456 					       kvmppc_get_gpr(vcpu, rs),
457 		                               2, 1);
458 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
459 		break;
460 
461 	default:
462 		emulated = EMULATE_FAIL;
463 	}
464 
465 	if (emulated == EMULATE_FAIL) {
466 		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
467 							       &advance);
468 		if (emulated == EMULATE_AGAIN) {
469 			advance = 0;
470 		} else if (emulated == EMULATE_FAIL) {
471 			advance = 0;
472 			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
473 			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
474 			kvmppc_core_queue_program(vcpu, 0);
475 		}
476 	}
477 
478 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
479 
480 	/* Advance past emulated instruction. */
481 	if (advance)
482 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
483 
484 	return emulated;
485 }
486 EXPORT_SYMBOL_GPL(kvmppc_emulate_instruction);
487