xref: /openbmc/linux/arch/powerpc/kvm/book3s.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4  *
5  * Authors:
6  *    Alexander Graf <agraf@suse.de>
7  *    Kevin Wolf <mail@kevin-wolf.de>
8  *
9  * Description:
10  * This file is derived from arch/powerpc/kvm/44x.c,
11  * by Hollis Blanchard <hollisb@us.ibm.com>.
12  */
13 
14 #include <linux/kvm_host.h>
15 #include <linux/err.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/miscdevice.h>
20 #include <linux/gfp.h>
21 #include <linux/sched.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
24 
25 #include <asm/reg.h>
26 #include <asm/cputable.h>
27 #include <asm/cacheflush.h>
28 #include <linux/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <asm/xive.h>
35 
36 #include "book3s.h"
37 #include "trace.h"
38 
39 /* #define EXIT_DEBUG */
40 
41 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
42 	KVM_GENERIC_VM_STATS(),
43 	STATS_DESC_ICOUNTER(VM, num_2M_pages),
44 	STATS_DESC_ICOUNTER(VM, num_1G_pages)
45 };
46 static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
47 		sizeof(struct kvm_vm_stat) / sizeof(u64));
48 
49 const struct kvm_stats_header kvm_vm_stats_header = {
50 	.name_size = KVM_STATS_NAME_SIZE,
51 	.num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
52 	.id_offset = sizeof(struct kvm_stats_header),
53 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
54 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
55 		       sizeof(kvm_vm_stats_desc),
56 };
57 
58 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
59 	KVM_GENERIC_VCPU_STATS(),
60 	STATS_DESC_COUNTER(VCPU, sum_exits),
61 	STATS_DESC_COUNTER(VCPU, mmio_exits),
62 	STATS_DESC_COUNTER(VCPU, signal_exits),
63 	STATS_DESC_COUNTER(VCPU, light_exits),
64 	STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
65 	STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
66 	STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
67 	STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
68 	STATS_DESC_COUNTER(VCPU, syscall_exits),
69 	STATS_DESC_COUNTER(VCPU, isi_exits),
70 	STATS_DESC_COUNTER(VCPU, dsi_exits),
71 	STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
72 	STATS_DESC_COUNTER(VCPU, dec_exits),
73 	STATS_DESC_COUNTER(VCPU, ext_intr_exits),
74 	STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
75 	STATS_DESC_COUNTER(VCPU, halt_successful_wait),
76 	STATS_DESC_COUNTER(VCPU, dbell_exits),
77 	STATS_DESC_COUNTER(VCPU, gdbell_exits),
78 	STATS_DESC_COUNTER(VCPU, ld),
79 	STATS_DESC_COUNTER(VCPU, st),
80 	STATS_DESC_COUNTER(VCPU, pf_storage),
81 	STATS_DESC_COUNTER(VCPU, pf_instruc),
82 	STATS_DESC_COUNTER(VCPU, sp_storage),
83 	STATS_DESC_COUNTER(VCPU, sp_instruc),
84 	STATS_DESC_COUNTER(VCPU, queue_intr),
85 	STATS_DESC_COUNTER(VCPU, ld_slow),
86 	STATS_DESC_COUNTER(VCPU, st_slow),
87 	STATS_DESC_COUNTER(VCPU, pthru_all),
88 	STATS_DESC_COUNTER(VCPU, pthru_host),
89 	STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
90 };
91 static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
92 		sizeof(struct kvm_vcpu_stat) / sizeof(u64));
93 
94 const struct kvm_stats_header kvm_vcpu_stats_header = {
95 	.name_size = KVM_STATS_NAME_SIZE,
96 	.num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
97 	.id_offset = sizeof(struct kvm_stats_header),
98 	.desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
99 	.data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
100 		       sizeof(kvm_vcpu_stats_desc),
101 };
102 
103 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
104 			unsigned long pending_now, unsigned long old_pending)
105 {
106 	if (is_kvmppc_hv_enabled(vcpu->kvm))
107 		return;
108 	if (pending_now)
109 		kvmppc_set_int_pending(vcpu, 1);
110 	else if (old_pending)
111 		kvmppc_set_int_pending(vcpu, 0);
112 }
113 
114 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
115 {
116 	ulong crit_raw;
117 	ulong crit_r1;
118 	bool crit;
119 
120 	if (is_kvmppc_hv_enabled(vcpu->kvm))
121 		return false;
122 
123 	crit_raw = kvmppc_get_critical(vcpu);
124 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
125 
126 	/* Truncate crit indicators in 32 bit mode */
127 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
128 		crit_raw &= 0xffffffff;
129 		crit_r1 &= 0xffffffff;
130 	}
131 
132 	/* Critical section when crit == r1 */
133 	crit = (crit_raw == crit_r1);
134 	/* ... and we're in supervisor mode */
135 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
136 
137 	return crit;
138 }
139 
140 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
141 {
142 	vcpu->kvm->arch.kvm_ops->inject_interrupt(vcpu, vec, flags);
143 }
144 
145 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
146 {
147 	unsigned int prio;
148 
149 	switch (vec) {
150 	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
151 	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
152 	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
153 	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
154 	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
155 	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
156 	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
157 	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
158 	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
159 	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
160 	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
161 	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
162 	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
163 	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
164 	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
165 	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
166 	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
167 	}
168 
169 	return prio;
170 }
171 
172 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
173 					  unsigned int vec)
174 {
175 	unsigned long old_pending = vcpu->arch.pending_exceptions;
176 
177 	clear_bit(kvmppc_book3s_vec2irqprio(vec),
178 		  &vcpu->arch.pending_exceptions);
179 
180 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
181 				  old_pending);
182 }
183 
184 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
185 {
186 	vcpu->stat.queue_intr++;
187 
188 	set_bit(kvmppc_book3s_vec2irqprio(vec),
189 		&vcpu->arch.pending_exceptions);
190 #ifdef EXIT_DEBUG
191 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
192 #endif
193 }
194 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
195 
196 void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
197 {
198 	/* might as well deliver this straight away */
199 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
200 }
201 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
202 
203 void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
204 {
205 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
206 }
207 EXPORT_SYMBOL(kvmppc_core_queue_syscall);
208 
209 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
210 {
211 	/* might as well deliver this straight away */
212 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
213 }
214 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
215 
216 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
217 {
218 	/* might as well deliver this straight away */
219 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
220 }
221 
222 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
223 {
224 	/* might as well deliver this straight away */
225 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
226 }
227 
228 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
229 {
230 	/* might as well deliver this straight away */
231 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
232 }
233 
234 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
235 {
236 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
237 }
238 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
239 
240 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
241 {
242 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
243 }
244 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
245 
246 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
247 {
248 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
249 }
250 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
251 
252 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
253                                 struct kvm_interrupt *irq)
254 {
255 	/*
256 	 * This case (KVM_INTERRUPT_SET) should never actually arise for
257 	 * a pseries guest (because pseries guests expect their interrupt
258 	 * controllers to continue asserting an external interrupt request
259 	 * until it is acknowledged at the interrupt controller), but is
260 	 * included to avoid ABI breakage and potentially for other
261 	 * sorts of guest.
262 	 *
263 	 * There is a subtlety here: HV KVM does not test the
264 	 * external_oneshot flag in the code that synthesizes
265 	 * external interrupts for the guest just before entering
266 	 * the guest.  That is OK even if userspace did do a
267 	 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
268 	 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
269 	 * which ends up doing a smp_send_reschedule(), which will
270 	 * pull the guest all the way out to the host, meaning that
271 	 * we will call kvmppc_core_prepare_to_enter() before entering
272 	 * the guest again, and that will handle the external_oneshot
273 	 * flag correctly.
274 	 */
275 	if (irq->irq == KVM_INTERRUPT_SET)
276 		vcpu->arch.external_oneshot = 1;
277 
278 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
279 }
280 
281 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
282 {
283 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
284 }
285 
286 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
287 				    ulong flags)
288 {
289 	kvmppc_set_dar(vcpu, dar);
290 	kvmppc_set_dsisr(vcpu, flags);
291 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
292 }
293 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
294 
295 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
296 {
297 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
298 }
299 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
300 
301 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
302 					 unsigned int priority)
303 {
304 	int deliver = 1;
305 	int vec = 0;
306 	bool crit = kvmppc_critical_section(vcpu);
307 
308 	switch (priority) {
309 	case BOOK3S_IRQPRIO_DECREMENTER:
310 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
311 		vec = BOOK3S_INTERRUPT_DECREMENTER;
312 		break;
313 	case BOOK3S_IRQPRIO_EXTERNAL:
314 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
315 		vec = BOOK3S_INTERRUPT_EXTERNAL;
316 		break;
317 	case BOOK3S_IRQPRIO_SYSTEM_RESET:
318 		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
319 		break;
320 	case BOOK3S_IRQPRIO_MACHINE_CHECK:
321 		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
322 		break;
323 	case BOOK3S_IRQPRIO_DATA_STORAGE:
324 		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
325 		break;
326 	case BOOK3S_IRQPRIO_INST_STORAGE:
327 		vec = BOOK3S_INTERRUPT_INST_STORAGE;
328 		break;
329 	case BOOK3S_IRQPRIO_DATA_SEGMENT:
330 		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
331 		break;
332 	case BOOK3S_IRQPRIO_INST_SEGMENT:
333 		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
334 		break;
335 	case BOOK3S_IRQPRIO_ALIGNMENT:
336 		vec = BOOK3S_INTERRUPT_ALIGNMENT;
337 		break;
338 	case BOOK3S_IRQPRIO_PROGRAM:
339 		vec = BOOK3S_INTERRUPT_PROGRAM;
340 		break;
341 	case BOOK3S_IRQPRIO_VSX:
342 		vec = BOOK3S_INTERRUPT_VSX;
343 		break;
344 	case BOOK3S_IRQPRIO_ALTIVEC:
345 		vec = BOOK3S_INTERRUPT_ALTIVEC;
346 		break;
347 	case BOOK3S_IRQPRIO_FP_UNAVAIL:
348 		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
349 		break;
350 	case BOOK3S_IRQPRIO_SYSCALL:
351 		vec = BOOK3S_INTERRUPT_SYSCALL;
352 		break;
353 	case BOOK3S_IRQPRIO_DEBUG:
354 		vec = BOOK3S_INTERRUPT_TRACE;
355 		break;
356 	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
357 		vec = BOOK3S_INTERRUPT_PERFMON;
358 		break;
359 	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
360 		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
361 		break;
362 	default:
363 		deliver = 0;
364 		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
365 		break;
366 	}
367 
368 #if 0
369 	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
370 #endif
371 
372 	if (deliver)
373 		kvmppc_inject_interrupt(vcpu, vec, 0);
374 
375 	return deliver;
376 }
377 
378 /*
379  * This function determines if an irqprio should be cleared once issued.
380  */
381 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
382 {
383 	switch (priority) {
384 		case BOOK3S_IRQPRIO_DECREMENTER:
385 			/* DEC interrupts get cleared by mtdec */
386 			return false;
387 		case BOOK3S_IRQPRIO_EXTERNAL:
388 			/*
389 			 * External interrupts get cleared by userspace
390 			 * except when set by the KVM_INTERRUPT ioctl with
391 			 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
392 			 */
393 			if (vcpu->arch.external_oneshot) {
394 				vcpu->arch.external_oneshot = 0;
395 				return true;
396 			}
397 			return false;
398 	}
399 
400 	return true;
401 }
402 
403 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
404 {
405 	unsigned long *pending = &vcpu->arch.pending_exceptions;
406 	unsigned long old_pending = vcpu->arch.pending_exceptions;
407 	unsigned int priority;
408 
409 #ifdef EXIT_DEBUG
410 	if (vcpu->arch.pending_exceptions)
411 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
412 #endif
413 	priority = __ffs(*pending);
414 	while (priority < BOOK3S_IRQPRIO_MAX) {
415 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
416 		    clear_irqprio(vcpu, priority)) {
417 			clear_bit(priority, &vcpu->arch.pending_exceptions);
418 			break;
419 		}
420 
421 		priority = find_next_bit(pending,
422 					 BITS_PER_BYTE * sizeof(*pending),
423 					 priority + 1);
424 	}
425 
426 	/* Tell the guest about our interrupt status */
427 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
428 
429 	return 0;
430 }
431 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
432 
433 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
434 			bool *writable)
435 {
436 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
437 	gfn_t gfn = gpa >> PAGE_SHIFT;
438 
439 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
440 		mp_pa = (uint32_t)mp_pa;
441 
442 	/* Magic page override */
443 	gpa &= ~0xFFFULL;
444 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
445 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
446 		kvm_pfn_t pfn;
447 
448 		pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
449 		get_page(pfn_to_page(pfn));
450 		if (writable)
451 			*writable = true;
452 		return pfn;
453 	}
454 
455 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
456 }
457 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
458 
459 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
460 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
461 {
462 	bool data = (xlid == XLATE_DATA);
463 	bool iswrite = (xlrw == XLATE_WRITE);
464 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
465 	int r;
466 
467 	if (relocated) {
468 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
469 	} else {
470 		pte->eaddr = eaddr;
471 		pte->raddr = eaddr & KVM_PAM;
472 		pte->vpage = VSID_REAL | eaddr >> 12;
473 		pte->may_read = true;
474 		pte->may_write = true;
475 		pte->may_execute = true;
476 		r = 0;
477 
478 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
479 		    !data) {
480 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
481 			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
482 			pte->raddr &= ~SPLIT_HACK_MASK;
483 		}
484 	}
485 
486 	return r;
487 }
488 
489 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
490 		enum instruction_fetch_type type, u32 *inst)
491 {
492 	ulong pc = kvmppc_get_pc(vcpu);
493 	int r;
494 
495 	if (type == INST_SC)
496 		pc -= 4;
497 
498 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
499 	if (r == EMULATE_DONE)
500 		return r;
501 	else
502 		return EMULATE_AGAIN;
503 }
504 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
505 
506 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
507 {
508 	return 0;
509 }
510 
511 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
512 {
513 }
514 
515 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
516 				  struct kvm_sregs *sregs)
517 {
518 	int ret;
519 
520 	vcpu_load(vcpu);
521 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
522 	vcpu_put(vcpu);
523 
524 	return ret;
525 }
526 
527 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
528 				  struct kvm_sregs *sregs)
529 {
530 	int ret;
531 
532 	vcpu_load(vcpu);
533 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
534 	vcpu_put(vcpu);
535 
536 	return ret;
537 }
538 
539 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
540 {
541 	int i;
542 
543 	regs->pc = kvmppc_get_pc(vcpu);
544 	regs->cr = kvmppc_get_cr(vcpu);
545 	regs->ctr = kvmppc_get_ctr(vcpu);
546 	regs->lr = kvmppc_get_lr(vcpu);
547 	regs->xer = kvmppc_get_xer(vcpu);
548 	regs->msr = kvmppc_get_msr(vcpu);
549 	regs->srr0 = kvmppc_get_srr0(vcpu);
550 	regs->srr1 = kvmppc_get_srr1(vcpu);
551 	regs->pid = vcpu->arch.pid;
552 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
553 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
554 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
555 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
556 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
557 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
558 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
559 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
560 
561 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
562 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
563 
564 	return 0;
565 }
566 
567 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
568 {
569 	int i;
570 
571 	kvmppc_set_pc(vcpu, regs->pc);
572 	kvmppc_set_cr(vcpu, regs->cr);
573 	kvmppc_set_ctr(vcpu, regs->ctr);
574 	kvmppc_set_lr(vcpu, regs->lr);
575 	kvmppc_set_xer(vcpu, regs->xer);
576 	kvmppc_set_msr(vcpu, regs->msr);
577 	kvmppc_set_srr0(vcpu, regs->srr0);
578 	kvmppc_set_srr1(vcpu, regs->srr1);
579 	kvmppc_set_sprg0(vcpu, regs->sprg0);
580 	kvmppc_set_sprg1(vcpu, regs->sprg1);
581 	kvmppc_set_sprg2(vcpu, regs->sprg2);
582 	kvmppc_set_sprg3(vcpu, regs->sprg3);
583 	kvmppc_set_sprg4(vcpu, regs->sprg4);
584 	kvmppc_set_sprg5(vcpu, regs->sprg5);
585 	kvmppc_set_sprg6(vcpu, regs->sprg6);
586 	kvmppc_set_sprg7(vcpu, regs->sprg7);
587 
588 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
589 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
590 
591 	return 0;
592 }
593 
594 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
595 {
596 	return -EOPNOTSUPP;
597 }
598 
599 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
600 {
601 	return -EOPNOTSUPP;
602 }
603 
604 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
605 			union kvmppc_one_reg *val)
606 {
607 	int r = 0;
608 	long int i;
609 
610 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
611 	if (r == -EINVAL) {
612 		r = 0;
613 		switch (id) {
614 		case KVM_REG_PPC_DAR:
615 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
616 			break;
617 		case KVM_REG_PPC_DSISR:
618 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
619 			break;
620 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
621 			i = id - KVM_REG_PPC_FPR0;
622 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
623 			break;
624 		case KVM_REG_PPC_FPSCR:
625 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
626 			break;
627 #ifdef CONFIG_VSX
628 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
629 			if (cpu_has_feature(CPU_FTR_VSX)) {
630 				i = id - KVM_REG_PPC_VSR0;
631 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
632 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
633 			} else {
634 				r = -ENXIO;
635 			}
636 			break;
637 #endif /* CONFIG_VSX */
638 		case KVM_REG_PPC_DEBUG_INST:
639 			*val = get_reg_val(id, INS_TW);
640 			break;
641 #ifdef CONFIG_KVM_XICS
642 		case KVM_REG_PPC_ICP_STATE:
643 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
644 				r = -ENXIO;
645 				break;
646 			}
647 			if (xics_on_xive())
648 				*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
649 			else
650 				*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
651 			break;
652 #endif /* CONFIG_KVM_XICS */
653 #ifdef CONFIG_KVM_XIVE
654 		case KVM_REG_PPC_VP_STATE:
655 			if (!vcpu->arch.xive_vcpu) {
656 				r = -ENXIO;
657 				break;
658 			}
659 			if (xive_enabled())
660 				r = kvmppc_xive_native_get_vp(vcpu, val);
661 			else
662 				r = -ENXIO;
663 			break;
664 #endif /* CONFIG_KVM_XIVE */
665 		case KVM_REG_PPC_FSCR:
666 			*val = get_reg_val(id, vcpu->arch.fscr);
667 			break;
668 		case KVM_REG_PPC_TAR:
669 			*val = get_reg_val(id, vcpu->arch.tar);
670 			break;
671 		case KVM_REG_PPC_EBBHR:
672 			*val = get_reg_val(id, vcpu->arch.ebbhr);
673 			break;
674 		case KVM_REG_PPC_EBBRR:
675 			*val = get_reg_val(id, vcpu->arch.ebbrr);
676 			break;
677 		case KVM_REG_PPC_BESCR:
678 			*val = get_reg_val(id, vcpu->arch.bescr);
679 			break;
680 		case KVM_REG_PPC_IC:
681 			*val = get_reg_val(id, vcpu->arch.ic);
682 			break;
683 		default:
684 			r = -EINVAL;
685 			break;
686 		}
687 	}
688 
689 	return r;
690 }
691 
692 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
693 			union kvmppc_one_reg *val)
694 {
695 	int r = 0;
696 	long int i;
697 
698 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
699 	if (r == -EINVAL) {
700 		r = 0;
701 		switch (id) {
702 		case KVM_REG_PPC_DAR:
703 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
704 			break;
705 		case KVM_REG_PPC_DSISR:
706 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
707 			break;
708 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
709 			i = id - KVM_REG_PPC_FPR0;
710 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
711 			break;
712 		case KVM_REG_PPC_FPSCR:
713 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
714 			break;
715 #ifdef CONFIG_VSX
716 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
717 			if (cpu_has_feature(CPU_FTR_VSX)) {
718 				i = id - KVM_REG_PPC_VSR0;
719 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
720 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
721 			} else {
722 				r = -ENXIO;
723 			}
724 			break;
725 #endif /* CONFIG_VSX */
726 #ifdef CONFIG_KVM_XICS
727 		case KVM_REG_PPC_ICP_STATE:
728 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
729 				r = -ENXIO;
730 				break;
731 			}
732 			if (xics_on_xive())
733 				r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
734 			else
735 				r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
736 			break;
737 #endif /* CONFIG_KVM_XICS */
738 #ifdef CONFIG_KVM_XIVE
739 		case KVM_REG_PPC_VP_STATE:
740 			if (!vcpu->arch.xive_vcpu) {
741 				r = -ENXIO;
742 				break;
743 			}
744 			if (xive_enabled())
745 				r = kvmppc_xive_native_set_vp(vcpu, val);
746 			else
747 				r = -ENXIO;
748 			break;
749 #endif /* CONFIG_KVM_XIVE */
750 		case KVM_REG_PPC_FSCR:
751 			vcpu->arch.fscr = set_reg_val(id, *val);
752 			break;
753 		case KVM_REG_PPC_TAR:
754 			vcpu->arch.tar = set_reg_val(id, *val);
755 			break;
756 		case KVM_REG_PPC_EBBHR:
757 			vcpu->arch.ebbhr = set_reg_val(id, *val);
758 			break;
759 		case KVM_REG_PPC_EBBRR:
760 			vcpu->arch.ebbrr = set_reg_val(id, *val);
761 			break;
762 		case KVM_REG_PPC_BESCR:
763 			vcpu->arch.bescr = set_reg_val(id, *val);
764 			break;
765 		case KVM_REG_PPC_IC:
766 			vcpu->arch.ic = set_reg_val(id, *val);
767 			break;
768 		default:
769 			r = -EINVAL;
770 			break;
771 		}
772 	}
773 
774 	return r;
775 }
776 
777 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
778 {
779 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
780 }
781 
782 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
783 {
784 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
785 }
786 
787 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
788 {
789 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
790 }
791 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
792 
793 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
794 {
795 	return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
796 }
797 
798 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
799                                   struct kvm_translation *tr)
800 {
801 	return 0;
802 }
803 
804 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
805 					struct kvm_guest_debug *dbg)
806 {
807 	vcpu_load(vcpu);
808 	vcpu->guest_debug = dbg->control;
809 	vcpu_put(vcpu);
810 	return 0;
811 }
812 
813 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
814 {
815 	kvmppc_core_queue_dec(vcpu);
816 	kvm_vcpu_kick(vcpu);
817 }
818 
819 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
820 {
821 	return vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
822 }
823 
824 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
825 {
826 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
827 }
828 
829 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
830 {
831 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
832 }
833 
834 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
835 {
836 
837 }
838 
839 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
840 {
841 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
842 }
843 
844 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
845 {
846 	kvm->arch.kvm_ops->free_memslot(slot);
847 }
848 
849 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
850 {
851 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
852 }
853 
854 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
855 				struct kvm_memory_slot *memslot,
856 				const struct kvm_userspace_memory_region *mem,
857 				enum kvm_mr_change change)
858 {
859 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
860 							change);
861 }
862 
863 void kvmppc_core_commit_memory_region(struct kvm *kvm,
864 				const struct kvm_userspace_memory_region *mem,
865 				const struct kvm_memory_slot *old,
866 				const struct kvm_memory_slot *new,
867 				enum kvm_mr_change change)
868 {
869 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
870 }
871 
872 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
873 {
874 	return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
875 }
876 
877 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
878 {
879 	return kvm->arch.kvm_ops->age_gfn(kvm, range);
880 }
881 
882 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
883 {
884 	return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
885 }
886 
887 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
888 {
889 	return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
890 }
891 
892 int kvmppc_core_init_vm(struct kvm *kvm)
893 {
894 
895 #ifdef CONFIG_PPC64
896 	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
897 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
898 	mutex_init(&kvm->arch.rtas_token_lock);
899 #endif
900 
901 	return kvm->arch.kvm_ops->init_vm(kvm);
902 }
903 
904 void kvmppc_core_destroy_vm(struct kvm *kvm)
905 {
906 	kvm->arch.kvm_ops->destroy_vm(kvm);
907 
908 #ifdef CONFIG_PPC64
909 	kvmppc_rtas_tokens_free(kvm);
910 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
911 #endif
912 
913 #ifdef CONFIG_KVM_XICS
914 	/*
915 	 * Free the XIVE and XICS devices which are not directly freed by the
916 	 * device 'release' method
917 	 */
918 	kfree(kvm->arch.xive_devices.native);
919 	kvm->arch.xive_devices.native = NULL;
920 	kfree(kvm->arch.xive_devices.xics_on_xive);
921 	kvm->arch.xive_devices.xics_on_xive = NULL;
922 	kfree(kvm->arch.xics_device);
923 	kvm->arch.xics_device = NULL;
924 #endif /* CONFIG_KVM_XICS */
925 }
926 
927 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
928 {
929 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
930 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
931 	u64 buf;
932 	int srcu_idx;
933 	int ret;
934 
935 	if (!is_power_of_2(size) || (size > sizeof(buf)))
936 		return H_TOO_HARD;
937 
938 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
939 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
940 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
941 	if (ret != 0)
942 		return H_TOO_HARD;
943 
944 	switch (size) {
945 	case 1:
946 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
947 		break;
948 
949 	case 2:
950 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
951 		break;
952 
953 	case 4:
954 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
955 		break;
956 
957 	case 8:
958 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
959 		break;
960 
961 	default:
962 		BUG();
963 	}
964 
965 	return H_SUCCESS;
966 }
967 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
968 
969 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
970 {
971 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
972 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
973 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
974 	u64 buf;
975 	int srcu_idx;
976 	int ret;
977 
978 	switch (size) {
979 	case 1:
980 		*(u8 *)&buf = val;
981 		break;
982 
983 	case 2:
984 		*(__be16 *)&buf = cpu_to_be16(val);
985 		break;
986 
987 	case 4:
988 		*(__be32 *)&buf = cpu_to_be32(val);
989 		break;
990 
991 	case 8:
992 		*(__be64 *)&buf = cpu_to_be64(val);
993 		break;
994 
995 	default:
996 		return H_TOO_HARD;
997 	}
998 
999 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1000 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
1001 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1002 	if (ret != 0)
1003 		return H_TOO_HARD;
1004 
1005 	return H_SUCCESS;
1006 }
1007 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
1008 
1009 int kvmppc_core_check_processor_compat(void)
1010 {
1011 	/*
1012 	 * We always return 0 for book3s. We check
1013 	 * for compatibility while loading the HV
1014 	 * or PR module
1015 	 */
1016 	return 0;
1017 }
1018 
1019 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
1020 {
1021 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
1022 }
1023 
1024 #ifdef CONFIG_KVM_XICS
1025 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1026 		bool line_status)
1027 {
1028 	if (xics_on_xive())
1029 		return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
1030 					   line_status);
1031 	else
1032 		return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1033 					   line_status);
1034 }
1035 
1036 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1037 			      struct kvm *kvm, int irq_source_id,
1038 			      int level, bool line_status)
1039 {
1040 	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1041 			   level, line_status);
1042 }
1043 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1044 				 struct kvm *kvm, int irq_source_id, int level,
1045 				 bool line_status)
1046 {
1047 	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1048 }
1049 
1050 int kvm_irq_map_gsi(struct kvm *kvm,
1051 		    struct kvm_kernel_irq_routing_entry *entries, int gsi)
1052 {
1053 	entries->gsi = gsi;
1054 	entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1055 	entries->set = kvmppc_book3s_set_irq;
1056 	entries->irqchip.irqchip = 0;
1057 	entries->irqchip.pin = gsi;
1058 	return 1;
1059 }
1060 
1061 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1062 {
1063 	return pin;
1064 }
1065 
1066 #endif /* CONFIG_KVM_XICS */
1067 
1068 static int kvmppc_book3s_init(void)
1069 {
1070 	int r;
1071 
1072 	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1073 	if (r)
1074 		return r;
1075 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1076 	r = kvmppc_book3s_init_pr();
1077 #endif
1078 
1079 #ifdef CONFIG_KVM_XICS
1080 #ifdef CONFIG_KVM_XIVE
1081 	if (xics_on_xive()) {
1082 		kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1083 		if (kvmppc_xive_native_supported())
1084 			kvm_register_device_ops(&kvm_xive_native_ops,
1085 						KVM_DEV_TYPE_XIVE);
1086 	} else
1087 #endif
1088 		kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1089 #endif
1090 	return r;
1091 }
1092 
1093 static void kvmppc_book3s_exit(void)
1094 {
1095 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1096 	kvmppc_book3s_exit_pr();
1097 #endif
1098 	kvm_exit();
1099 }
1100 
1101 module_init(kvmppc_book3s_init);
1102 module_exit(kvmppc_book3s_exit);
1103 
1104 /* On 32bit this is our one and only kernel module */
1105 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1106 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1107 MODULE_ALIAS("devname:kvm");
1108 #endif
1109