xref: /openbmc/linux/arch/powerpc/kvm/book3s.c (revision 3557b3fd)
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23 #include <linux/gfp.h>
24 #include <linux/sched.h>
25 #include <linux/vmalloc.h>
26 #include <linux/highmem.h>
27 
28 #include <asm/reg.h>
29 #include <asm/cputable.h>
30 #include <asm/cacheflush.h>
31 #include <linux/uaccess.h>
32 #include <asm/io.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/mmu_context.h>
36 #include <asm/page.h>
37 #include <asm/xive.h>
38 
39 #include "book3s.h"
40 #include "trace.h"
41 
42 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
43 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 
45 /* #define EXIT_DEBUG */
46 
47 struct kvm_stats_debugfs_item debugfs_entries[] = {
48 	{ "exits",       VCPU_STAT(sum_exits) },
49 	{ "mmio",        VCPU_STAT(mmio_exits) },
50 	{ "sig",         VCPU_STAT(signal_exits) },
51 	{ "sysc",        VCPU_STAT(syscall_exits) },
52 	{ "inst_emu",    VCPU_STAT(emulated_inst_exits) },
53 	{ "dec",         VCPU_STAT(dec_exits) },
54 	{ "ext_intr",    VCPU_STAT(ext_intr_exits) },
55 	{ "queue_intr",  VCPU_STAT(queue_intr) },
56 	{ "halt_poll_success_ns",	VCPU_STAT(halt_poll_success_ns) },
57 	{ "halt_poll_fail_ns",		VCPU_STAT(halt_poll_fail_ns) },
58 	{ "halt_wait_ns",		VCPU_STAT(halt_wait_ns) },
59 	{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
60 	{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
61 	{ "halt_successful_wait",	VCPU_STAT(halt_successful_wait) },
62 	{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
63 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
64 	{ "pf_storage",  VCPU_STAT(pf_storage) },
65 	{ "sp_storage",  VCPU_STAT(sp_storage) },
66 	{ "pf_instruc",  VCPU_STAT(pf_instruc) },
67 	{ "sp_instruc",  VCPU_STAT(sp_instruc) },
68 	{ "ld",          VCPU_STAT(ld) },
69 	{ "ld_slow",     VCPU_STAT(ld_slow) },
70 	{ "st",          VCPU_STAT(st) },
71 	{ "st_slow",     VCPU_STAT(st_slow) },
72 	{ "pthru_all",       VCPU_STAT(pthru_all) },
73 	{ "pthru_host",      VCPU_STAT(pthru_host) },
74 	{ "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
75 	{ "largepages_2M",    VM_STAT(num_2M_pages) },
76 	{ "largepages_1G",    VM_STAT(num_1G_pages) },
77 	{ NULL }
78 };
79 
80 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
81 {
82 	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
83 		ulong pc = kvmppc_get_pc(vcpu);
84 		ulong lr = kvmppc_get_lr(vcpu);
85 		if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
86 			kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
87 		if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
88 			kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
89 		vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
90 	}
91 }
92 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
93 
94 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
95 {
96 	if (!is_kvmppc_hv_enabled(vcpu->kvm))
97 		return to_book3s(vcpu)->hior;
98 	return 0;
99 }
100 
101 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
102 			unsigned long pending_now, unsigned long old_pending)
103 {
104 	if (is_kvmppc_hv_enabled(vcpu->kvm))
105 		return;
106 	if (pending_now)
107 		kvmppc_set_int_pending(vcpu, 1);
108 	else if (old_pending)
109 		kvmppc_set_int_pending(vcpu, 0);
110 }
111 
112 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
113 {
114 	ulong crit_raw;
115 	ulong crit_r1;
116 	bool crit;
117 
118 	if (is_kvmppc_hv_enabled(vcpu->kvm))
119 		return false;
120 
121 	crit_raw = kvmppc_get_critical(vcpu);
122 	crit_r1 = kvmppc_get_gpr(vcpu, 1);
123 
124 	/* Truncate crit indicators in 32 bit mode */
125 	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
126 		crit_raw &= 0xffffffff;
127 		crit_r1 &= 0xffffffff;
128 	}
129 
130 	/* Critical section when crit == r1 */
131 	crit = (crit_raw == crit_r1);
132 	/* ... and we're in supervisor mode */
133 	crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
134 
135 	return crit;
136 }
137 
138 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
139 {
140 	kvmppc_unfixup_split_real(vcpu);
141 	kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
142 	kvmppc_set_srr1(vcpu, (kvmppc_get_msr(vcpu) & ~0x783f0000ul) | flags);
143 	kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
144 	vcpu->arch.mmu.reset_msr(vcpu);
145 }
146 
147 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
148 {
149 	unsigned int prio;
150 
151 	switch (vec) {
152 	case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;		break;
153 	case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;	break;
154 	case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;		break;
155 	case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;		break;
156 	case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;		break;
157 	case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;		break;
158 	case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;		break;
159 	case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;		break;
160 	case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;		break;
161 	case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;		break;
162 	case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;		break;
163 	case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;		break;
164 	case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;		break;
165 	case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;		break;
166 	case 0xf40: prio = BOOK3S_IRQPRIO_VSX;			break;
167 	case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL;		break;
168 	default:    prio = BOOK3S_IRQPRIO_MAX;			break;
169 	}
170 
171 	return prio;
172 }
173 
174 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
175 					  unsigned int vec)
176 {
177 	unsigned long old_pending = vcpu->arch.pending_exceptions;
178 
179 	clear_bit(kvmppc_book3s_vec2irqprio(vec),
180 		  &vcpu->arch.pending_exceptions);
181 
182 	kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
183 				  old_pending);
184 }
185 
186 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
187 {
188 	vcpu->stat.queue_intr++;
189 
190 	set_bit(kvmppc_book3s_vec2irqprio(vec),
191 		&vcpu->arch.pending_exceptions);
192 #ifdef EXIT_DEBUG
193 	printk(KERN_INFO "Queueing interrupt %x\n", vec);
194 #endif
195 }
196 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
197 
198 void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
199 {
200 	/* might as well deliver this straight away */
201 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_MACHINE_CHECK, flags);
202 }
203 EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
204 
205 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
206 {
207 	/* might as well deliver this straight away */
208 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
209 }
210 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
211 
212 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
213 {
214 	/* might as well deliver this straight away */
215 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
216 }
217 
218 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
219 {
220 	/* might as well deliver this straight away */
221 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
222 }
223 
224 void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
225 {
226 	/* might as well deliver this straight away */
227 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
228 }
229 
230 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
231 {
232 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
233 }
234 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
235 
236 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
237 {
238 	return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
239 }
240 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
241 
242 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
243 {
244 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
245 }
246 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
247 
248 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
249                                 struct kvm_interrupt *irq)
250 {
251 	/*
252 	 * This case (KVM_INTERRUPT_SET) should never actually arise for
253 	 * a pseries guest (because pseries guests expect their interrupt
254 	 * controllers to continue asserting an external interrupt request
255 	 * until it is acknowledged at the interrupt controller), but is
256 	 * included to avoid ABI breakage and potentially for other
257 	 * sorts of guest.
258 	 *
259 	 * There is a subtlety here: HV KVM does not test the
260 	 * external_oneshot flag in the code that synthesizes
261 	 * external interrupts for the guest just before entering
262 	 * the guest.  That is OK even if userspace did do a
263 	 * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
264 	 * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
265 	 * which ends up doing a smp_send_reschedule(), which will
266 	 * pull the guest all the way out to the host, meaning that
267 	 * we will call kvmppc_core_prepare_to_enter() before entering
268 	 * the guest again, and that will handle the external_oneshot
269 	 * flag correctly.
270 	 */
271 	if (irq->irq == KVM_INTERRUPT_SET)
272 		vcpu->arch.external_oneshot = 1;
273 
274 	kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
275 }
276 
277 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
278 {
279 	kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
280 }
281 
282 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
283 				    ulong flags)
284 {
285 	kvmppc_set_dar(vcpu, dar);
286 	kvmppc_set_dsisr(vcpu, flags);
287 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
288 }
289 EXPORT_SYMBOL_GPL(kvmppc_core_queue_data_storage);
290 
291 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
292 {
293 	kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, flags);
294 }
295 EXPORT_SYMBOL_GPL(kvmppc_core_queue_inst_storage);
296 
297 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
298 					 unsigned int priority)
299 {
300 	int deliver = 1;
301 	int vec = 0;
302 	bool crit = kvmppc_critical_section(vcpu);
303 
304 	switch (priority) {
305 	case BOOK3S_IRQPRIO_DECREMENTER:
306 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
307 		vec = BOOK3S_INTERRUPT_DECREMENTER;
308 		break;
309 	case BOOK3S_IRQPRIO_EXTERNAL:
310 		deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
311 		vec = BOOK3S_INTERRUPT_EXTERNAL;
312 		break;
313 	case BOOK3S_IRQPRIO_SYSTEM_RESET:
314 		vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
315 		break;
316 	case BOOK3S_IRQPRIO_MACHINE_CHECK:
317 		vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
318 		break;
319 	case BOOK3S_IRQPRIO_DATA_STORAGE:
320 		vec = BOOK3S_INTERRUPT_DATA_STORAGE;
321 		break;
322 	case BOOK3S_IRQPRIO_INST_STORAGE:
323 		vec = BOOK3S_INTERRUPT_INST_STORAGE;
324 		break;
325 	case BOOK3S_IRQPRIO_DATA_SEGMENT:
326 		vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
327 		break;
328 	case BOOK3S_IRQPRIO_INST_SEGMENT:
329 		vec = BOOK3S_INTERRUPT_INST_SEGMENT;
330 		break;
331 	case BOOK3S_IRQPRIO_ALIGNMENT:
332 		vec = BOOK3S_INTERRUPT_ALIGNMENT;
333 		break;
334 	case BOOK3S_IRQPRIO_PROGRAM:
335 		vec = BOOK3S_INTERRUPT_PROGRAM;
336 		break;
337 	case BOOK3S_IRQPRIO_VSX:
338 		vec = BOOK3S_INTERRUPT_VSX;
339 		break;
340 	case BOOK3S_IRQPRIO_ALTIVEC:
341 		vec = BOOK3S_INTERRUPT_ALTIVEC;
342 		break;
343 	case BOOK3S_IRQPRIO_FP_UNAVAIL:
344 		vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
345 		break;
346 	case BOOK3S_IRQPRIO_SYSCALL:
347 		vec = BOOK3S_INTERRUPT_SYSCALL;
348 		break;
349 	case BOOK3S_IRQPRIO_DEBUG:
350 		vec = BOOK3S_INTERRUPT_TRACE;
351 		break;
352 	case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
353 		vec = BOOK3S_INTERRUPT_PERFMON;
354 		break;
355 	case BOOK3S_IRQPRIO_FAC_UNAVAIL:
356 		vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
357 		break;
358 	default:
359 		deliver = 0;
360 		printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
361 		break;
362 	}
363 
364 #if 0
365 	printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
366 #endif
367 
368 	if (deliver)
369 		kvmppc_inject_interrupt(vcpu, vec, 0);
370 
371 	return deliver;
372 }
373 
374 /*
375  * This function determines if an irqprio should be cleared once issued.
376  */
377 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
378 {
379 	switch (priority) {
380 		case BOOK3S_IRQPRIO_DECREMENTER:
381 			/* DEC interrupts get cleared by mtdec */
382 			return false;
383 		case BOOK3S_IRQPRIO_EXTERNAL:
384 			/*
385 			 * External interrupts get cleared by userspace
386 			 * except when set by the KVM_INTERRUPT ioctl with
387 			 * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
388 			 */
389 			if (vcpu->arch.external_oneshot) {
390 				vcpu->arch.external_oneshot = 0;
391 				return true;
392 			}
393 			return false;
394 	}
395 
396 	return true;
397 }
398 
399 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
400 {
401 	unsigned long *pending = &vcpu->arch.pending_exceptions;
402 	unsigned long old_pending = vcpu->arch.pending_exceptions;
403 	unsigned int priority;
404 
405 #ifdef EXIT_DEBUG
406 	if (vcpu->arch.pending_exceptions)
407 		printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
408 #endif
409 	priority = __ffs(*pending);
410 	while (priority < BOOK3S_IRQPRIO_MAX) {
411 		if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
412 		    clear_irqprio(vcpu, priority)) {
413 			clear_bit(priority, &vcpu->arch.pending_exceptions);
414 			break;
415 		}
416 
417 		priority = find_next_bit(pending,
418 					 BITS_PER_BYTE * sizeof(*pending),
419 					 priority + 1);
420 	}
421 
422 	/* Tell the guest about our interrupt status */
423 	kvmppc_update_int_pending(vcpu, *pending, old_pending);
424 
425 	return 0;
426 }
427 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
428 
429 kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
430 			bool *writable)
431 {
432 	ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
433 	gfn_t gfn = gpa >> PAGE_SHIFT;
434 
435 	if (!(kvmppc_get_msr(vcpu) & MSR_SF))
436 		mp_pa = (uint32_t)mp_pa;
437 
438 	/* Magic page override */
439 	gpa &= ~0xFFFULL;
440 	if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
441 		ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
442 		kvm_pfn_t pfn;
443 
444 		pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
445 		get_page(pfn_to_page(pfn));
446 		if (writable)
447 			*writable = true;
448 		return pfn;
449 	}
450 
451 	return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
452 }
453 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
454 
455 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
456 		 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
457 {
458 	bool data = (xlid == XLATE_DATA);
459 	bool iswrite = (xlrw == XLATE_WRITE);
460 	int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
461 	int r;
462 
463 	if (relocated) {
464 		r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
465 	} else {
466 		pte->eaddr = eaddr;
467 		pte->raddr = eaddr & KVM_PAM;
468 		pte->vpage = VSID_REAL | eaddr >> 12;
469 		pte->may_read = true;
470 		pte->may_write = true;
471 		pte->may_execute = true;
472 		r = 0;
473 
474 		if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
475 		    !data) {
476 			if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
477 			    ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
478 			pte->raddr &= ~SPLIT_HACK_MASK;
479 		}
480 	}
481 
482 	return r;
483 }
484 
485 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
486 		enum instruction_fetch_type type, u32 *inst)
487 {
488 	ulong pc = kvmppc_get_pc(vcpu);
489 	int r;
490 
491 	if (type == INST_SC)
492 		pc -= 4;
493 
494 	r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
495 	if (r == EMULATE_DONE)
496 		return r;
497 	else
498 		return EMULATE_AGAIN;
499 }
500 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
501 
502 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
503 {
504 	return 0;
505 }
506 
507 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
508 {
509 	return 0;
510 }
511 
512 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
513 {
514 }
515 
516 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
517 				  struct kvm_sregs *sregs)
518 {
519 	int ret;
520 
521 	vcpu_load(vcpu);
522 	ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
523 	vcpu_put(vcpu);
524 
525 	return ret;
526 }
527 
528 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
529 				  struct kvm_sregs *sregs)
530 {
531 	int ret;
532 
533 	vcpu_load(vcpu);
534 	ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
535 	vcpu_put(vcpu);
536 
537 	return ret;
538 }
539 
540 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
541 {
542 	int i;
543 
544 	regs->pc = kvmppc_get_pc(vcpu);
545 	regs->cr = kvmppc_get_cr(vcpu);
546 	regs->ctr = kvmppc_get_ctr(vcpu);
547 	regs->lr = kvmppc_get_lr(vcpu);
548 	regs->xer = kvmppc_get_xer(vcpu);
549 	regs->msr = kvmppc_get_msr(vcpu);
550 	regs->srr0 = kvmppc_get_srr0(vcpu);
551 	regs->srr1 = kvmppc_get_srr1(vcpu);
552 	regs->pid = vcpu->arch.pid;
553 	regs->sprg0 = kvmppc_get_sprg0(vcpu);
554 	regs->sprg1 = kvmppc_get_sprg1(vcpu);
555 	regs->sprg2 = kvmppc_get_sprg2(vcpu);
556 	regs->sprg3 = kvmppc_get_sprg3(vcpu);
557 	regs->sprg4 = kvmppc_get_sprg4(vcpu);
558 	regs->sprg5 = kvmppc_get_sprg5(vcpu);
559 	regs->sprg6 = kvmppc_get_sprg6(vcpu);
560 	regs->sprg7 = kvmppc_get_sprg7(vcpu);
561 
562 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
563 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
564 
565 	return 0;
566 }
567 
568 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
569 {
570 	int i;
571 
572 	kvmppc_set_pc(vcpu, regs->pc);
573 	kvmppc_set_cr(vcpu, regs->cr);
574 	kvmppc_set_ctr(vcpu, regs->ctr);
575 	kvmppc_set_lr(vcpu, regs->lr);
576 	kvmppc_set_xer(vcpu, regs->xer);
577 	kvmppc_set_msr(vcpu, regs->msr);
578 	kvmppc_set_srr0(vcpu, regs->srr0);
579 	kvmppc_set_srr1(vcpu, regs->srr1);
580 	kvmppc_set_sprg0(vcpu, regs->sprg0);
581 	kvmppc_set_sprg1(vcpu, regs->sprg1);
582 	kvmppc_set_sprg2(vcpu, regs->sprg2);
583 	kvmppc_set_sprg3(vcpu, regs->sprg3);
584 	kvmppc_set_sprg4(vcpu, regs->sprg4);
585 	kvmppc_set_sprg5(vcpu, regs->sprg5);
586 	kvmppc_set_sprg6(vcpu, regs->sprg6);
587 	kvmppc_set_sprg7(vcpu, regs->sprg7);
588 
589 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
590 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
591 
592 	return 0;
593 }
594 
595 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
596 {
597 	return -ENOTSUPP;
598 }
599 
600 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
601 {
602 	return -ENOTSUPP;
603 }
604 
605 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
606 			union kvmppc_one_reg *val)
607 {
608 	int r = 0;
609 	long int i;
610 
611 	r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
612 	if (r == -EINVAL) {
613 		r = 0;
614 		switch (id) {
615 		case KVM_REG_PPC_DAR:
616 			*val = get_reg_val(id, kvmppc_get_dar(vcpu));
617 			break;
618 		case KVM_REG_PPC_DSISR:
619 			*val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
620 			break;
621 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
622 			i = id - KVM_REG_PPC_FPR0;
623 			*val = get_reg_val(id, VCPU_FPR(vcpu, i));
624 			break;
625 		case KVM_REG_PPC_FPSCR:
626 			*val = get_reg_val(id, vcpu->arch.fp.fpscr);
627 			break;
628 #ifdef CONFIG_VSX
629 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
630 			if (cpu_has_feature(CPU_FTR_VSX)) {
631 				i = id - KVM_REG_PPC_VSR0;
632 				val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
633 				val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
634 			} else {
635 				r = -ENXIO;
636 			}
637 			break;
638 #endif /* CONFIG_VSX */
639 		case KVM_REG_PPC_DEBUG_INST:
640 			*val = get_reg_val(id, INS_TW);
641 			break;
642 #ifdef CONFIG_KVM_XICS
643 		case KVM_REG_PPC_ICP_STATE:
644 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
645 				r = -ENXIO;
646 				break;
647 			}
648 			if (xics_on_xive())
649 				*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
650 			else
651 				*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
652 			break;
653 #endif /* CONFIG_KVM_XICS */
654 		case KVM_REG_PPC_FSCR:
655 			*val = get_reg_val(id, vcpu->arch.fscr);
656 			break;
657 		case KVM_REG_PPC_TAR:
658 			*val = get_reg_val(id, vcpu->arch.tar);
659 			break;
660 		case KVM_REG_PPC_EBBHR:
661 			*val = get_reg_val(id, vcpu->arch.ebbhr);
662 			break;
663 		case KVM_REG_PPC_EBBRR:
664 			*val = get_reg_val(id, vcpu->arch.ebbrr);
665 			break;
666 		case KVM_REG_PPC_BESCR:
667 			*val = get_reg_val(id, vcpu->arch.bescr);
668 			break;
669 		case KVM_REG_PPC_IC:
670 			*val = get_reg_val(id, vcpu->arch.ic);
671 			break;
672 		default:
673 			r = -EINVAL;
674 			break;
675 		}
676 	}
677 
678 	return r;
679 }
680 
681 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
682 			union kvmppc_one_reg *val)
683 {
684 	int r = 0;
685 	long int i;
686 
687 	r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
688 	if (r == -EINVAL) {
689 		r = 0;
690 		switch (id) {
691 		case KVM_REG_PPC_DAR:
692 			kvmppc_set_dar(vcpu, set_reg_val(id, *val));
693 			break;
694 		case KVM_REG_PPC_DSISR:
695 			kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
696 			break;
697 		case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
698 			i = id - KVM_REG_PPC_FPR0;
699 			VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
700 			break;
701 		case KVM_REG_PPC_FPSCR:
702 			vcpu->arch.fp.fpscr = set_reg_val(id, *val);
703 			break;
704 #ifdef CONFIG_VSX
705 		case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
706 			if (cpu_has_feature(CPU_FTR_VSX)) {
707 				i = id - KVM_REG_PPC_VSR0;
708 				vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
709 				vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
710 			} else {
711 				r = -ENXIO;
712 			}
713 			break;
714 #endif /* CONFIG_VSX */
715 #ifdef CONFIG_KVM_XICS
716 		case KVM_REG_PPC_ICP_STATE:
717 			if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
718 				r = -ENXIO;
719 				break;
720 			}
721 			if (xics_on_xive())
722 				r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
723 			else
724 				r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
725 			break;
726 #endif /* CONFIG_KVM_XICS */
727 		case KVM_REG_PPC_FSCR:
728 			vcpu->arch.fscr = set_reg_val(id, *val);
729 			break;
730 		case KVM_REG_PPC_TAR:
731 			vcpu->arch.tar = set_reg_val(id, *val);
732 			break;
733 		case KVM_REG_PPC_EBBHR:
734 			vcpu->arch.ebbhr = set_reg_val(id, *val);
735 			break;
736 		case KVM_REG_PPC_EBBRR:
737 			vcpu->arch.ebbrr = set_reg_val(id, *val);
738 			break;
739 		case KVM_REG_PPC_BESCR:
740 			vcpu->arch.bescr = set_reg_val(id, *val);
741 			break;
742 		case KVM_REG_PPC_IC:
743 			vcpu->arch.ic = set_reg_val(id, *val);
744 			break;
745 		default:
746 			r = -EINVAL;
747 			break;
748 		}
749 	}
750 
751 	return r;
752 }
753 
754 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
755 {
756 	vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
757 }
758 
759 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
760 {
761 	vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
762 }
763 
764 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
765 {
766 	vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
767 }
768 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
769 
770 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
771 {
772 	return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
773 }
774 
775 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
776                                   struct kvm_translation *tr)
777 {
778 	return 0;
779 }
780 
781 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
782 					struct kvm_guest_debug *dbg)
783 {
784 	vcpu_load(vcpu);
785 	vcpu->guest_debug = dbg->control;
786 	vcpu_put(vcpu);
787 	return 0;
788 }
789 
790 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
791 {
792 	kvmppc_core_queue_dec(vcpu);
793 	kvm_vcpu_kick(vcpu);
794 }
795 
796 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
797 {
798 	return kvm->arch.kvm_ops->vcpu_create(kvm, id);
799 }
800 
801 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
802 {
803 	vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
804 }
805 
806 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
807 {
808 	return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
809 }
810 
811 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
812 {
813 	return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
814 }
815 
816 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
817 			      struct kvm_memory_slot *dont)
818 {
819 	kvm->arch.kvm_ops->free_memslot(free, dont);
820 }
821 
822 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
823 			       unsigned long npages)
824 {
825 	return kvm->arch.kvm_ops->create_memslot(slot, npages);
826 }
827 
828 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
829 {
830 	kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
831 }
832 
833 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
834 				struct kvm_memory_slot *memslot,
835 				const struct kvm_userspace_memory_region *mem)
836 {
837 	return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
838 }
839 
840 void kvmppc_core_commit_memory_region(struct kvm *kvm,
841 				const struct kvm_userspace_memory_region *mem,
842 				const struct kvm_memory_slot *old,
843 				const struct kvm_memory_slot *new,
844 				enum kvm_mr_change change)
845 {
846 	kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
847 }
848 
849 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
850 {
851 	return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
852 }
853 
854 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
855 {
856 	return kvm->arch.kvm_ops->age_hva(kvm, start, end);
857 }
858 
859 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
860 {
861 	return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
862 }
863 
864 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
865 {
866 	kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
867 	return 0;
868 }
869 
870 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
871 {
872 	vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
873 }
874 
875 int kvmppc_core_init_vm(struct kvm *kvm)
876 {
877 
878 #ifdef CONFIG_PPC64
879 	INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
880 	INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
881 #endif
882 
883 	return kvm->arch.kvm_ops->init_vm(kvm);
884 }
885 
886 void kvmppc_core_destroy_vm(struct kvm *kvm)
887 {
888 	kvm->arch.kvm_ops->destroy_vm(kvm);
889 
890 #ifdef CONFIG_PPC64
891 	kvmppc_rtas_tokens_free(kvm);
892 	WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
893 #endif
894 }
895 
896 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
897 {
898 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
899 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
900 	u64 buf;
901 	int srcu_idx;
902 	int ret;
903 
904 	if (!is_power_of_2(size) || (size > sizeof(buf)))
905 		return H_TOO_HARD;
906 
907 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
908 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
909 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
910 	if (ret != 0)
911 		return H_TOO_HARD;
912 
913 	switch (size) {
914 	case 1:
915 		kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
916 		break;
917 
918 	case 2:
919 		kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
920 		break;
921 
922 	case 4:
923 		kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
924 		break;
925 
926 	case 8:
927 		kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
928 		break;
929 
930 	default:
931 		BUG();
932 	}
933 
934 	return H_SUCCESS;
935 }
936 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
937 
938 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
939 {
940 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
941 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
942 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
943 	u64 buf;
944 	int srcu_idx;
945 	int ret;
946 
947 	switch (size) {
948 	case 1:
949 		*(u8 *)&buf = val;
950 		break;
951 
952 	case 2:
953 		*(__be16 *)&buf = cpu_to_be16(val);
954 		break;
955 
956 	case 4:
957 		*(__be32 *)&buf = cpu_to_be32(val);
958 		break;
959 
960 	case 8:
961 		*(__be64 *)&buf = cpu_to_be64(val);
962 		break;
963 
964 	default:
965 		return H_TOO_HARD;
966 	}
967 
968 	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
969 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
970 	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
971 	if (ret != 0)
972 		return H_TOO_HARD;
973 
974 	return H_SUCCESS;
975 }
976 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
977 
978 int kvmppc_core_check_processor_compat(void)
979 {
980 	/*
981 	 * We always return 0 for book3s. We check
982 	 * for compatibility while loading the HV
983 	 * or PR module
984 	 */
985 	return 0;
986 }
987 
988 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
989 {
990 	return kvm->arch.kvm_ops->hcall_implemented(hcall);
991 }
992 
993 #ifdef CONFIG_KVM_XICS
994 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
995 		bool line_status)
996 {
997 	if (xics_on_xive())
998 		return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
999 					   line_status);
1000 	else
1001 		return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
1002 					   line_status);
1003 }
1004 
1005 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
1006 			      struct kvm *kvm, int irq_source_id,
1007 			      int level, bool line_status)
1008 {
1009 	return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
1010 			   level, line_status);
1011 }
1012 static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
1013 				 struct kvm *kvm, int irq_source_id, int level,
1014 				 bool line_status)
1015 {
1016 	return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
1017 }
1018 
1019 int kvm_irq_map_gsi(struct kvm *kvm,
1020 		    struct kvm_kernel_irq_routing_entry *entries, int gsi)
1021 {
1022 	entries->gsi = gsi;
1023 	entries->type = KVM_IRQ_ROUTING_IRQCHIP;
1024 	entries->set = kvmppc_book3s_set_irq;
1025 	entries->irqchip.irqchip = 0;
1026 	entries->irqchip.pin = gsi;
1027 	return 1;
1028 }
1029 
1030 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
1031 {
1032 	return pin;
1033 }
1034 
1035 #endif /* CONFIG_KVM_XICS */
1036 
1037 static int kvmppc_book3s_init(void)
1038 {
1039 	int r;
1040 
1041 	r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1042 	if (r)
1043 		return r;
1044 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1045 	r = kvmppc_book3s_init_pr();
1046 #endif
1047 
1048 #ifdef CONFIG_KVM_XICS
1049 #ifdef CONFIG_KVM_XIVE
1050 	if (xics_on_xive()) {
1051 		kvmppc_xive_init_module();
1052 		kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
1053 	} else
1054 #endif
1055 		kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
1056 #endif
1057 	return r;
1058 }
1059 
1060 static void kvmppc_book3s_exit(void)
1061 {
1062 #ifdef CONFIG_KVM_XICS
1063 	if (xics_on_xive())
1064 		kvmppc_xive_exit_module();
1065 #endif
1066 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1067 	kvmppc_book3s_exit_pr();
1068 #endif
1069 	kvm_exit();
1070 }
1071 
1072 module_init(kvmppc_book3s_init);
1073 module_exit(kvmppc_book3s_exit);
1074 
1075 /* On 32bit this is our one and only kernel module */
1076 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1077 MODULE_ALIAS_MISCDEV(KVM_MINOR);
1078 MODULE_ALIAS("devname:kvm");
1079 #endif
1080