xref: /openbmc/linux/arch/powerpc/kvm/book3s.c (revision 5af50993)
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 
28 #include <asm/reg.h>
29 #include <asm/cputable.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <linux/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/mmu_context.h>
37 #include <asm/page.h>
38 #include <asm/xive.h>
39 
40 #include "book3s.h"
41 #include "trace.h"
42 
43 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 
45 /* #define EXIT_DEBUG */
46 
47 struct kvm_stats_debugfs_item debugfs_entries[] = {
48 	{ "exits",       VCPU_STAT(sum_exits) },
49 	{ "mmio",        VCPU_STAT(mmio_exits) },
50 	{ "sig",         VCPU_STAT(signal_exits) },
51 	{ "sysc",        VCPU_STAT(syscall_exits) },
52 	{ "inst_emu",    VCPU_STAT(emulated_inst_exits) },
53 	{ "dec",         VCPU_STAT(dec_exits) },
54 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
55 	{ "queue_intr",  VCPU_STAT(queue_intr) },
56 	{ "halt_poll_success_ns",	VCPU_STAT(halt_poll_success_ns) },
57 	{ "halt_poll_fail_ns",		VCPU_STAT(halt_poll_fail_ns) },
58 	{ "halt_wait_ns",		VCPU_STAT(halt_wait_ns) },
59 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
60 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
61 	{ "halt_successful_wait",	VCPU_STAT(halt_successful_wait) },
62 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
63 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
64 	{ "pf_storage",  VCPU_STAT(pf_storage) },
65 	{ "sp_storage",  VCPU_STAT(sp_storage) },
66 	{ "pf_instruc",  VCPU_STAT(pf_instruc) },
67 	{ "sp_instruc",  VCPU_STAT(sp_instruc) },
68 	{ "ld",          VCPU_STAT(ld) },
69 	{ "ld_slow",     VCPU_STAT(ld_slow) },
70 	{ "st",          VCPU_STAT(st) },
71 	{ "st_slow",     VCPU_STAT(st_slow) },
72 	{ "pthru_all",       VCPU_STAT(pthru_all) },
73 	{ "pthru_host",      VCPU_STAT(pthru_host) },
74 	{ "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
75 	{ NULL }
76 };
77 
78 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
79 {
80 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
81 		ulong pc = kvmppc_get_pc(vcpu);
82 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
83 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
84 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
85 	}
86 }
87 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
88 
89 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
90 {
91 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
92 		return to_book3s(vcpu)->hior;
93 	return 0;
94 }
95 
96 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
97 			unsigned long pending_now, unsigned long old_pending)
98 {
99 	if (is_kvmppc_hv_enabled(vcpu->kvm))
100 		return;
101 	if (pending_now)
102 		kvmppc_set_int_pending(vcpu, 1);
103 	else if (old_pending)
104 		kvmppc_set_int_pending(vcpu, 0);
105 }
106 
107 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
108 {
109 	ulong crit_raw;
110 	ulong crit_r1;
111 	bool crit;
112 
113 	if (is_kvmppc_hv_enabled(vcpu->kvm))
114 		return false;
115 
116 	crit_raw = kvmppc_get_critical(vcpu);
117 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
118 
119 	/* Truncate crit indicators in 32 bit mode */
120 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
121 		crit_raw &= 0xffffffff;
122 		crit_r1 &= 0xffffffff;
123 	}
124 
125 	/* Critical section when crit == r1 */
126 	crit = (crit_raw == crit_r1);
127 	/* ... and we're in supervisor mode */
128 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
129 
130 	return crit;
131 }
132 
133 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
134 {
135 	kvmppc_unfixup_split_real(vcpu);
136 	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
137 	kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
138 	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
139 	vcpu->arch.mmu.reset_msr(vcpu);
140 }
141 
142 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
143 {
144 	unsigned int prio;
145 
146 	switch (vec) {
147 	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
148 	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
149 	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
150 	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
151 	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
152 	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
153 	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
154 	case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL;	break;
155 	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
156 	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
157 	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
158 	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
159 	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
160 	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
161 	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
162 	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
163 	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
164 	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
165 	}
166 
167 	return prio;
168 }
169 
170 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
171 					  unsigned int vec)
172 {
173 	unsigned long old_pending = vcpu->arch.pending_exceptions;
174 
175 	clear_bit(kvmppc_book3s_vec2irqprio(vec),
176 		  &vcpu->arch.pending_exceptions);
177 
178 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
179 				  old_pending);
180 }
181 
182 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
183 {
184 	vcpu->stat.queue_intr++;
185 
186 	set_bit(kvmppc_book3s_vec2irqprio(vec),
187 		&vcpu->arch.pending_exceptions);
188 #ifdef EXIT_DEBUG
189 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
190 #endif
191 }
192 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
193 
194 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
195 {
196 	/* might as well deliver this straight away */
197 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
198 }
199 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
200 
201 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
202 {
203 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
204 }
205 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
206 
207 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
208 {
209 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
210 }
211 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
212 
213 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
214 {
215 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
216 }
217 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
218 
219 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
220                                 struct kvm_interrupt *irq)
221 {
222 	unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
223 
224 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
225 		vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
226 
227 	kvmppc_book3s_queue_irqprio(vcpu, vec);
228 }
229 
230 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
231 {
232 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
233 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
234 }
235 
236 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
237 				    ulong flags)
238 {
239 	kvmppc_set_dar(vcpu, dar);
240 	kvmppc_set_dsisr(vcpu, flags);
241 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
242 }
243 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);	/* used by kvm_hv */
244 
245 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
246 {
247 	u64 msr = kvmppc_get_msr(vcpu);
248 	msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
249 	msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
250 	kvmppc_set_msr_fast(vcpu, msr);
251 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
252 }
253 
254 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
255 					 unsigned int priority)
256 {
257 	int deliver = 1;
258 	int vec = 0;
259 	bool crit = kvmppc_critical_section(vcpu);
260 
261 	switch (priority) {
262 	case BOOK3S_IRQPRIO_DECREMENTER:
263 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
264 		vec = BOOK3S_INTERRUPT_DECREMENTER;
265 		break;
266 	case BOOK3S_IRQPRIO_EXTERNAL:
267 	case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
268 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
269 		vec = BOOK3S_INTERRUPT_EXTERNAL;
270 		break;
271 	case BOOK3S_IRQPRIO_SYSTEM_RESET:
272 		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
273 		break;
274 	case BOOK3S_IRQPRIO_MACHINE_CHECK:
275 		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
276 		break;
277 	case BOOK3S_IRQPRIO_DATA_STORAGE:
278 		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
279 		break;
280 	case BOOK3S_IRQPRIO_INST_STORAGE:
281 		vec = BOOK3S_INTERRUPT_INST_STORAGE;
282 		break;
283 	case BOOK3S_IRQPRIO_DATA_SEGMENT:
284 		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
285 		break;
286 	case BOOK3S_IRQPRIO_INST_SEGMENT:
287 		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
288 		break;
289 	case BOOK3S_IRQPRIO_ALIGNMENT:
290 		vec = BOOK3S_INTERRUPT_ALIGNMENT;
291 		break;
292 	case BOOK3S_IRQPRIO_PROGRAM:
293 		vec = BOOK3S_INTERRUPT_PROGRAM;
294 		break;
295 	case BOOK3S_IRQPRIO_VSX:
296 		vec = BOOK3S_INTERRUPT_VSX;
297 		break;
298 	case BOOK3S_IRQPRIO_ALTIVEC:
299 		vec = BOOK3S_INTERRUPT_ALTIVEC;
300 		break;
301 	case BOOK3S_IRQPRIO_FP_UNAVAIL:
302 		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
303 		break;
304 	case BOOK3S_IRQPRIO_SYSCALL:
305 		vec = BOOK3S_INTERRUPT_SYSCALL;
306 		break;
307 	case BOOK3S_IRQPRIO_DEBUG:
308 		vec = BOOK3S_INTERRUPT_TRACE;
309 		break;
310 	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
311 		vec = BOOK3S_INTERRUPT_PERFMON;
312 		break;
313 	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
314 		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
315 		break;
316 	default:
317 		deliver = 0;
318 		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
319 		break;
320 	}
321 
322 #if 0
323 	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
324 #endif
325 
326 	if (deliver)
327 		kvmppc_inject_interrupt(vcpu, vec, 0);
328 
329 	return deliver;
330 }
331 
332 /*
333  * This function determines if an irqprio should be cleared once issued.
334  */
335 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
336 {
337 	switch (priority) {
338 		case BOOK3S_IRQPRIO_DECREMENTER:
339 			/* DEC interrupts get cleared by mtdec */
340 			return false;
341 		case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
342 			/* External interrupts get cleared by userspace */
343 			return false;
344 	}
345 
346 	return true;
347 }
348 
349 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
350 {
351 	unsigned long *pending = &vcpu->arch.pending_exceptions;
352 	unsigned long old_pending = vcpu->arch.pending_exceptions;
353 	unsigned int priority;
354 
355 #ifdef EXIT_DEBUG
356 	if (vcpu->arch.pending_exceptions)
357 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
358 #endif
359 	priority = __ffs(*pending);
360 	while (priority < BOOK3S_IRQPRIO_MAX) {
361 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
362 		    clear_irqprio(vcpu, priority)) {
363 			clear_bit(priority, &vcpu->arch.pending_exceptions);
364 			break;
365 		}
366 
367 		priority = find_next_bit(pending,
368 					 BITS_PER_BYTE * sizeof(*pending),
369 					 priority + 1);
370 	}
371 
372 	/* Tell the guest about our interrupt status */
373 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
374 
375 	return 0;
376 }
377 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
378 
379 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
380 			bool *writable)
381 {
382 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
383 	gfn_t gfn = gpa >> PAGE_SHIFT;
384 
385 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
386 		mp_pa = (uint32_t)mp_pa;
387 
388 	/* Magic page override */
389 	gpa &= ~0xFFFULL;
390 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
391 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
392 		kvm_pfn_t pfn;
393 
394 		pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
395 		get_page(pfn_to_page(pfn));
396 		if (writable)
397 			*writable = true;
398 		return pfn;
399 	}
400 
401 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
402 }
403 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
404 
405 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
406 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
407 {
408 	bool data = (xlid == XLATE_DATA);
409 	bool iswrite = (xlrw == XLATE_WRITE);
410 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
411 	int r;
412 
413 	if (relocated) {
414 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
415 	} else {
416 		pte->eaddr = eaddr;
417 		pte->raddr = eaddr & KVM_PAM;
418 		pte->vpage = VSID_REAL | eaddr >> 12;
419 		pte->may_read = true;
420 		pte->may_write = true;
421 		pte->may_execute = true;
422 		r = 0;
423 
424 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
425 		    !data) {
426 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
427 			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
428 			pte->raddr &= ~SPLIT_HACK_MASK;
429 		}
430 	}
431 
432 	return r;
433 }
434 
435 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
436 					 u32 *inst)
437 {
438 	ulong pc = kvmppc_get_pc(vcpu);
439 	int r;
440 
441 	if (type == INST_SC)
442 		pc -= 4;
443 
444 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
445 	if (r == EMULATE_DONE)
446 		return r;
447 	else
448 		return EMULATE_AGAIN;
449 }
450 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
451 
452 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
453 {
454 	return 0;
455 }
456 
457 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
458 {
459 	return 0;
460 }
461 
462 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
463 {
464 }
465 
466 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
467 				  struct kvm_sregs *sregs)
468 {
469 	return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
470 }
471 
472 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
473 				  struct kvm_sregs *sregs)
474 {
475 	return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
476 }
477 
478 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
479 {
480 	int i;
481 
482 	regs->pc = kvmppc_get_pc(vcpu);
483 	regs->cr = kvmppc_get_cr(vcpu);
484 	regs->ctr = kvmppc_get_ctr(vcpu);
485 	regs->lr = kvmppc_get_lr(vcpu);
486 	regs->xer = kvmppc_get_xer(vcpu);
487 	regs->msr = kvmppc_get_msr(vcpu);
488 	regs->srr0 = kvmppc_get_srr0(vcpu);
489 	regs->srr1 = kvmppc_get_srr1(vcpu);
490 	regs->pid = vcpu->arch.pid;
491 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
492 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
493 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
494 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
495 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
496 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
497 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
498 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
499 
500 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
501 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
502 
503 	return 0;
504 }
505 
506 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
507 {
508 	int i;
509 
510 	kvmppc_set_pc(vcpu, regs->pc);
511 	kvmppc_set_cr(vcpu, regs->cr);
512 	kvmppc_set_ctr(vcpu, regs->ctr);
513 	kvmppc_set_lr(vcpu, regs->lr);
514 	kvmppc_set_xer(vcpu, regs->xer);
515 	kvmppc_set_msr(vcpu, regs->msr);
516 	kvmppc_set_srr0(vcpu, regs->srr0);
517 	kvmppc_set_srr1(vcpu, regs->srr1);
518 	kvmppc_set_sprg0(vcpu, regs->sprg0);
519 	kvmppc_set_sprg1(vcpu, regs->sprg1);
520 	kvmppc_set_sprg2(vcpu, regs->sprg2);
521 	kvmppc_set_sprg3(vcpu, regs->sprg3);
522 	kvmppc_set_sprg4(vcpu, regs->sprg4);
523 	kvmppc_set_sprg5(vcpu, regs->sprg5);
524 	kvmppc_set_sprg6(vcpu, regs->sprg6);
525 	kvmppc_set_sprg7(vcpu, regs->sprg7);
526 
527 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
528 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
529 
530 	return 0;
531 }
532 
533 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
534 {
535 	return -ENOTSUPP;
536 }
537 
538 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
539 {
540 	return -ENOTSUPP;
541 }
542 
543 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
544 			union kvmppc_one_reg *val)
545 {
546 	int r = 0;
547 	long int i;
548 
549 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
550 	if (r == -EINVAL) {
551 		r = 0;
552 		switch (id) {
553 		case KVM_REG_PPC_DAR:
554 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
555 			break;
556 		case KVM_REG_PPC_DSISR:
557 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
558 			break;
559 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
560 			i = id - KVM_REG_PPC_FPR0;
561 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
562 			break;
563 		case KVM_REG_PPC_FPSCR:
564 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
565 			break;
566 #ifdef CONFIG_VSX
567 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
568 			if (cpu_has_feature(CPU_FTR_VSX)) {
569 				i = id - KVM_REG_PPC_VSR0;
570 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
571 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
572 			} else {
573 				r = -ENXIO;
574 			}
575 			break;
576 #endif /* CONFIG_VSX */
577 		case KVM_REG_PPC_DEBUG_INST:
578 			*val = get_reg_val(id, INS_TW);
579 			break;
580 #ifdef CONFIG_KVM_XICS
581 		case KVM_REG_PPC_ICP_STATE:
582 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
583 				r = -ENXIO;
584 				break;
585 			}
586 			if (xive_enabled())
587 				*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
588 			else
589 				*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
590 			break;
591 #endif /* CONFIG_KVM_XICS */
592 		case KVM_REG_PPC_FSCR:
593 			*val = get_reg_val(id, vcpu->arch.fscr);
594 			break;
595 		case KVM_REG_PPC_TAR:
596 			*val = get_reg_val(id, vcpu->arch.tar);
597 			break;
598 		case KVM_REG_PPC_EBBHR:
599 			*val = get_reg_val(id, vcpu->arch.ebbhr);
600 			break;
601 		case KVM_REG_PPC_EBBRR:
602 			*val = get_reg_val(id, vcpu->arch.ebbrr);
603 			break;
604 		case KVM_REG_PPC_BESCR:
605 			*val = get_reg_val(id, vcpu->arch.bescr);
606 			break;
607 		case KVM_REG_PPC_IC:
608 			*val = get_reg_val(id, vcpu->arch.ic);
609 			break;
610 		default:
611 			r = -EINVAL;
612 			break;
613 		}
614 	}
615 
616 	return r;
617 }
618 
619 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
620 			union kvmppc_one_reg *val)
621 {
622 	int r = 0;
623 	long int i;
624 
625 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
626 	if (r == -EINVAL) {
627 		r = 0;
628 		switch (id) {
629 		case KVM_REG_PPC_DAR:
630 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
631 			break;
632 		case KVM_REG_PPC_DSISR:
633 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
634 			break;
635 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
636 			i = id - KVM_REG_PPC_FPR0;
637 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
638 			break;
639 		case KVM_REG_PPC_FPSCR:
640 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
641 			break;
642 #ifdef CONFIG_VSX
643 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
644 			if (cpu_has_feature(CPU_FTR_VSX)) {
645 				i = id - KVM_REG_PPC_VSR0;
646 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
647 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
648 			} else {
649 				r = -ENXIO;
650 			}
651 			break;
652 #endif /* CONFIG_VSX */
653 #ifdef CONFIG_KVM_XICS
654 		case KVM_REG_PPC_ICP_STATE:
655 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
656 				r = -ENXIO;
657 				break;
658 			}
659 			if (xive_enabled())
660 				r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
661 			else
662 				r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
663 			break;
664 #endif /* CONFIG_KVM_XICS */
665 		case KVM_REG_PPC_FSCR:
666 			vcpu->arch.fscr = set_reg_val(id, *val);
667 			break;
668 		case KVM_REG_PPC_TAR:
669 			vcpu->arch.tar = set_reg_val(id, *val);
670 			break;
671 		case KVM_REG_PPC_EBBHR:
672 			vcpu->arch.ebbhr = set_reg_val(id, *val);
673 			break;
674 		case KVM_REG_PPC_EBBRR:
675 			vcpu->arch.ebbrr = set_reg_val(id, *val);
676 			break;
677 		case KVM_REG_PPC_BESCR:
678 			vcpu->arch.bescr = set_reg_val(id, *val);
679 			break;
680 		case KVM_REG_PPC_IC:
681 			vcpu->arch.ic = set_reg_val(id, *val);
682 			break;
683 		default:
684 			r = -EINVAL;
685 			break;
686 		}
687 	}
688 
689 	return r;
690 }
691 
692 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
693 {
694 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
695 }
696 
697 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
698 {
699 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
700 }
701 
702 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
703 {
704 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
705 }
706 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
707 
708 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
709 {
710 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
711 }
712 
713 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
714                                   struct kvm_translation *tr)
715 {
716 	return 0;
717 }
718 
719 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
720 					struct kvm_guest_debug *dbg)
721 {
722 	vcpu->guest_debug = dbg->control;
723 	return 0;
724 }
725 
726 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
727 {
728 	kvmppc_core_queue_dec(vcpu);
729 	kvm_vcpu_kick(vcpu);
730 }
731 
732 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
733 {
734 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
735 }
736 
737 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
738 {
739 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
740 }
741 
742 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
743 {
744 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
745 }
746 
747 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
748 {
749 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
750 }
751 
752 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
753 			      struct kvm_memory_slot *dont)
754 {
755 	kvm->arch.kvm_ops->free_memslot(free, dont);
756 }
757 
758 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
759 			       unsigned long npages)
760 {
761 	return kvm->arch.kvm_ops->create_memslot(slot, npages);
762 }
763 
764 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
765 {
766 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
767 }
768 
769 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
770 				struct kvm_memory_slot *memslot,
771 				const struct kvm_userspace_memory_region *mem)
772 {
773 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
774 }
775 
776 void kvmppc_core_commit_memory_region(struct kvm *kvm,
777 				const struct kvm_userspace_memory_region *mem,
778 				const struct kvm_memory_slot *old,
779 				const struct kvm_memory_slot *new)
780 {
781 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
782 }
783 
784 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
785 {
786 	return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
787 }
788 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
789 
790 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
791 {
792 	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
793 }
794 
795 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
796 {
797 	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
798 }
799 
800 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
801 {
802 	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
803 }
804 
805 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
806 {
807 	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
808 }
809 
810 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
811 {
812 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
813 }
814 
815 int kvmppc_core_init_vm(struct kvm *kvm)
816 {
817 
818 #ifdef CONFIG_PPC64
819 	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
820 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
821 #endif
822 
823 	return kvm->arch.kvm_ops->init_vm(kvm);
824 }
825 
826 void kvmppc_core_destroy_vm(struct kvm *kvm)
827 {
828 	kvm->arch.kvm_ops->destroy_vm(kvm);
829 
830 #ifdef CONFIG_PPC64
831 	kvmppc_rtas_tokens_free(kvm);
832 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
833 #endif
834 }
835 
836 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
837 {
838 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
839 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
840 	u64 buf;
841 	int srcu_idx;
842 	int ret;
843 
844 	if (!is_power_of_2(size) || (size > sizeof(buf)))
845 		return H_TOO_HARD;
846 
847 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
848 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
849 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
850 	if (ret != 0)
851 		return H_TOO_HARD;
852 
853 	switch (size) {
854 	case 1:
855 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
856 		break;
857 
858 	case 2:
859 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
860 		break;
861 
862 	case 4:
863 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
864 		break;
865 
866 	case 8:
867 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
868 		break;
869 
870 	default:
871 		BUG();
872 	}
873 
874 	return H_SUCCESS;
875 }
876 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
877 
878 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
879 {
880 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
881 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
882 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
883 	u64 buf;
884 	int srcu_idx;
885 	int ret;
886 
887 	switch (size) {
888 	case 1:
889 		*(u8 *)&buf = val;
890 		break;
891 
892 	case 2:
893 		*(__be16 *)&buf = cpu_to_be16(val);
894 		break;
895 
896 	case 4:
897 		*(__be32 *)&buf = cpu_to_be32(val);
898 		break;
899 
900 	case 8:
901 		*(__be64 *)&buf = cpu_to_be64(val);
902 		break;
903 
904 	default:
905 		return H_TOO_HARD;
906 	}
907 
908 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
909 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
910 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
911 	if (ret != 0)
912 		return H_TOO_HARD;
913 
914 	return H_SUCCESS;
915 }
916 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
917 
918 int kvmppc_core_check_processor_compat(void)
919 {
920 	/*
921 	 * We always return 0 for book3s. We check
922 	 * for compatibility while loading the HV
923 	 * or PR module
924 	 */
925 	return 0;
926 }
927 
928 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
929 {
930 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
931 }
932 
933 #ifdef CONFIG_KVM_XICS
934 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
935 		bool line_status)
936 {
937 	if (xive_enabled())
938 		return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
939 					   line_status);
940 	else
941 		return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
942 					   line_status);
943 }
944 
945 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
946 			      struct kvm *kvm, int irq_source_id,
947 			      int level, bool line_status)
948 {
949 	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
950 			   level, line_status);
951 }
952 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
953 				 struct kvm *kvm, int irq_source_id, int level,
954 				 bool line_status)
955 {
956 	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
957 }
958 
959 int kvm_irq_map_gsi(struct kvm *kvm,
960 		    struct kvm_kernel_irq_routing_entry *entries, int gsi)
961 {
962 	entries->gsi = gsi;
963 	entries->type = KVM_IRQ_ROUTING_IRQCHIP;
964 	entries->set = kvmppc_book3s_set_irq;
965 	entries->irqchip.irqchip = 0;
966 	entries->irqchip.pin = gsi;
967 	return 1;
968 }
969 
970 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
971 {
972 	return pin;
973 }
974 
975 #endif /* CONFIG_KVM_XICS */
976 
977 static int kvmppc_book3s_init(void)
978 {
979 	int r;
980 
981 	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
982 	if (r)
983 		return r;
984 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
985 	r = kvmppc_book3s_init_pr();
986 #endif
987 
988 #ifdef CONFIG_KVM_XICS
989 #ifdef CONFIG_KVM_XIVE
990 	if (xive_enabled()) {
991 		kvmppc_xive_init_module();
992 		kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
993 	} else
994 #endif
995 		kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
996 #endif
997 	return r;
998 }
999 
1000 static void kvmppc_book3s_exit(void)
1001 {
1002 #ifdef CONFIG_KVM_XICS
1003 	if (xive_enabled())
1004 		kvmppc_xive_exit_module();
1005 #endif
1006 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1007 	kvmppc_book3s_exit_pr();
1008 #endif
1009 	kvm_exit();
1010 }
1011 
1012 module_init(kvmppc_book3s_init);
1013 module_exit(kvmppc_book3s_exit);
1014 
1015 /* On 32bit this is our one and only kernel module */
1016 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1017 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1018 MODULE_ALIAS("devname:kvm");
1019 #endif
1020