xref: /openbmc/linux/arch/powerpc/kvm/booke.c (revision fd589a8f)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19  */
20 
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/fs.h>
27 
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include "timing.h"
32 #include <asm/cacheflush.h>
33 
34 #include "booke.h"
35 
36 unsigned long kvmppc_booke_handlers;
37 
38 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40 
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 	{ "mmio",       VCPU_STAT(mmio_exits) },
43 	{ "dcr",        VCPU_STAT(dcr_exits) },
44 	{ "sig",        VCPU_STAT(signal_exits) },
45 	{ "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
46 	{ "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
47 	{ "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
48 	{ "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
49 	{ "sysc",       VCPU_STAT(syscall_exits) },
50 	{ "isi",        VCPU_STAT(isi_exits) },
51 	{ "dsi",        VCPU_STAT(dsi_exits) },
52 	{ "inst_emu",   VCPU_STAT(emulated_inst_exits) },
53 	{ "dec",        VCPU_STAT(dec_exits) },
54 	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
55 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
56 	{ NULL }
57 };
58 
59 /* TODO: use vcpu_printf() */
60 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
61 {
62 	int i;
63 
64 	printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
65 	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
66 	printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
67 
68 	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
69 
70 	for (i = 0; i < 32; i += 4) {
71 		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 		       vcpu->arch.gpr[i],
73 		       vcpu->arch.gpr[i+1],
74 		       vcpu->arch.gpr[i+2],
75 		       vcpu->arch.gpr[i+3]);
76 	}
77 }
78 
79 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
80                                        unsigned int priority)
81 {
82 	set_bit(priority, &vcpu->arch.pending_exceptions);
83 }
84 
85 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
86 {
87 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
88 }
89 
90 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
91 {
92 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
93 }
94 
95 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
96 {
97 	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
98 }
99 
100 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
101                                 struct kvm_interrupt *irq)
102 {
103 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
104 }
105 
106 /* Deliver the interrupt of the corresponding priority, if possible. */
107 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
108                                         unsigned int priority)
109 {
110 	int allowed = 0;
111 	ulong msr_mask;
112 
113 	switch (priority) {
114 	case BOOKE_IRQPRIO_PROGRAM:
115 	case BOOKE_IRQPRIO_DTLB_MISS:
116 	case BOOKE_IRQPRIO_ITLB_MISS:
117 	case BOOKE_IRQPRIO_SYSCALL:
118 	case BOOKE_IRQPRIO_DATA_STORAGE:
119 	case BOOKE_IRQPRIO_INST_STORAGE:
120 	case BOOKE_IRQPRIO_FP_UNAVAIL:
121 	case BOOKE_IRQPRIO_SPE_UNAVAIL:
122 	case BOOKE_IRQPRIO_SPE_FP_DATA:
123 	case BOOKE_IRQPRIO_SPE_FP_ROUND:
124 	case BOOKE_IRQPRIO_AP_UNAVAIL:
125 	case BOOKE_IRQPRIO_ALIGNMENT:
126 		allowed = 1;
127 		msr_mask = MSR_CE|MSR_ME|MSR_DE;
128 		break;
129 	case BOOKE_IRQPRIO_CRITICAL:
130 	case BOOKE_IRQPRIO_WATCHDOG:
131 		allowed = vcpu->arch.msr & MSR_CE;
132 		msr_mask = MSR_ME;
133 		break;
134 	case BOOKE_IRQPRIO_MACHINE_CHECK:
135 		allowed = vcpu->arch.msr & MSR_ME;
136 		msr_mask = 0;
137 		break;
138 	case BOOKE_IRQPRIO_EXTERNAL:
139 	case BOOKE_IRQPRIO_DECREMENTER:
140 	case BOOKE_IRQPRIO_FIT:
141 		allowed = vcpu->arch.msr & MSR_EE;
142 		msr_mask = MSR_CE|MSR_ME|MSR_DE;
143 		break;
144 	case BOOKE_IRQPRIO_DEBUG:
145 		allowed = vcpu->arch.msr & MSR_DE;
146 		msr_mask = MSR_ME;
147 		break;
148 	}
149 
150 	if (allowed) {
151 		vcpu->arch.srr0 = vcpu->arch.pc;
152 		vcpu->arch.srr1 = vcpu->arch.msr;
153 		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
154 		kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
155 
156 		clear_bit(priority, &vcpu->arch.pending_exceptions);
157 	}
158 
159 	return allowed;
160 }
161 
162 /* Check pending exceptions and deliver one, if possible. */
163 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
164 {
165 	unsigned long *pending = &vcpu->arch.pending_exceptions;
166 	unsigned int priority;
167 
168 	priority = __ffs(*pending);
169 	while (priority <= BOOKE_IRQPRIO_MAX) {
170 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
171 			break;
172 
173 		priority = find_next_bit(pending,
174 		                         BITS_PER_BYTE * sizeof(*pending),
175 		                         priority + 1);
176 	}
177 }
178 
179 /**
180  * kvmppc_handle_exit
181  *
182  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
183  */
184 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
185                        unsigned int exit_nr)
186 {
187 	enum emulation_result er;
188 	int r = RESUME_HOST;
189 
190 	/* update before a new last_exit_type is rewritten */
191 	kvmppc_update_timing_stats(vcpu);
192 
193 	local_irq_enable();
194 
195 	run->exit_reason = KVM_EXIT_UNKNOWN;
196 	run->ready_for_interrupt_injection = 1;
197 
198 	switch (exit_nr) {
199 	case BOOKE_INTERRUPT_MACHINE_CHECK:
200 		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
201 		kvmppc_dump_vcpu(vcpu);
202 		r = RESUME_HOST;
203 		break;
204 
205 	case BOOKE_INTERRUPT_EXTERNAL:
206 		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
207 		if (need_resched())
208 			cond_resched();
209 		r = RESUME_GUEST;
210 		break;
211 
212 	case BOOKE_INTERRUPT_DECREMENTER:
213 		/* Since we switched IVPR back to the host's value, the host
214 		 * handled this interrupt the moment we enabled interrupts.
215 		 * Now we just offer it a chance to reschedule the guest. */
216 		kvmppc_account_exit(vcpu, DEC_EXITS);
217 		if (need_resched())
218 			cond_resched();
219 		r = RESUME_GUEST;
220 		break;
221 
222 	case BOOKE_INTERRUPT_PROGRAM:
223 		if (vcpu->arch.msr & MSR_PR) {
224 			/* Program traps generated by user-level software must be handled
225 			 * by the guest kernel. */
226 			vcpu->arch.esr = vcpu->arch.fault_esr;
227 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
228 			r = RESUME_GUEST;
229 			kvmppc_account_exit(vcpu, USR_PR_INST);
230 			break;
231 		}
232 
233 		er = kvmppc_emulate_instruction(run, vcpu);
234 		switch (er) {
235 		case EMULATE_DONE:
236 			/* don't overwrite subtypes, just account kvm_stats */
237 			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
238 			/* Future optimization: only reload non-volatiles if
239 			 * they were actually modified by emulation. */
240 			r = RESUME_GUEST_NV;
241 			break;
242 		case EMULATE_DO_DCR:
243 			run->exit_reason = KVM_EXIT_DCR;
244 			r = RESUME_HOST;
245 			break;
246 		case EMULATE_FAIL:
247 			/* XXX Deliver Program interrupt to guest. */
248 			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
249 			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
250 			/* For debugging, encode the failing instruction and
251 			 * report it to userspace. */
252 			run->hw.hardware_exit_reason = ~0ULL << 32;
253 			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
254 			r = RESUME_HOST;
255 			break;
256 		default:
257 			BUG();
258 		}
259 		break;
260 
261 	case BOOKE_INTERRUPT_FP_UNAVAIL:
262 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
263 		kvmppc_account_exit(vcpu, FP_UNAVAIL);
264 		r = RESUME_GUEST;
265 		break;
266 
267 	case BOOKE_INTERRUPT_SPE_UNAVAIL:
268 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
269 		r = RESUME_GUEST;
270 		break;
271 
272 	case BOOKE_INTERRUPT_SPE_FP_DATA:
273 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
274 		r = RESUME_GUEST;
275 		break;
276 
277 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
278 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
279 		r = RESUME_GUEST;
280 		break;
281 
282 	case BOOKE_INTERRUPT_DATA_STORAGE:
283 		vcpu->arch.dear = vcpu->arch.fault_dear;
284 		vcpu->arch.esr = vcpu->arch.fault_esr;
285 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
286 		kvmppc_account_exit(vcpu, DSI_EXITS);
287 		r = RESUME_GUEST;
288 		break;
289 
290 	case BOOKE_INTERRUPT_INST_STORAGE:
291 		vcpu->arch.esr = vcpu->arch.fault_esr;
292 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
293 		kvmppc_account_exit(vcpu, ISI_EXITS);
294 		r = RESUME_GUEST;
295 		break;
296 
297 	case BOOKE_INTERRUPT_SYSCALL:
298 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
299 		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
300 		r = RESUME_GUEST;
301 		break;
302 
303 	case BOOKE_INTERRUPT_DTLB_MISS: {
304 		unsigned long eaddr = vcpu->arch.fault_dear;
305 		int gtlb_index;
306 		gpa_t gpaddr;
307 		gfn_t gfn;
308 
309 		/* Check the guest TLB. */
310 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
311 		if (gtlb_index < 0) {
312 			/* The guest didn't have a mapping for it. */
313 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
314 			vcpu->arch.dear = vcpu->arch.fault_dear;
315 			vcpu->arch.esr = vcpu->arch.fault_esr;
316 			kvmppc_mmu_dtlb_miss(vcpu);
317 			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
318 			r = RESUME_GUEST;
319 			break;
320 		}
321 
322 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
323 		gfn = gpaddr >> PAGE_SHIFT;
324 
325 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
326 			/* The guest TLB had a mapping, but the shadow TLB
327 			 * didn't, and it is RAM. This could be because:
328 			 * a) the entry is mapping the host kernel, or
329 			 * b) the guest used a large mapping which we're faking
330 			 * Either way, we need to satisfy the fault without
331 			 * invoking the guest. */
332 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
333 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
334 			r = RESUME_GUEST;
335 		} else {
336 			/* Guest has mapped and accessed a page which is not
337 			 * actually RAM. */
338 			vcpu->arch.paddr_accessed = gpaddr;
339 			r = kvmppc_emulate_mmio(run, vcpu);
340 			kvmppc_account_exit(vcpu, MMIO_EXITS);
341 		}
342 
343 		break;
344 	}
345 
346 	case BOOKE_INTERRUPT_ITLB_MISS: {
347 		unsigned long eaddr = vcpu->arch.pc;
348 		gpa_t gpaddr;
349 		gfn_t gfn;
350 		int gtlb_index;
351 
352 		r = RESUME_GUEST;
353 
354 		/* Check the guest TLB. */
355 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
356 		if (gtlb_index < 0) {
357 			/* The guest didn't have a mapping for it. */
358 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
359 			kvmppc_mmu_itlb_miss(vcpu);
360 			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
361 			break;
362 		}
363 
364 		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
365 
366 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
367 		gfn = gpaddr >> PAGE_SHIFT;
368 
369 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
370 			/* The guest TLB had a mapping, but the shadow TLB
371 			 * didn't. This could be because:
372 			 * a) the entry is mapping the host kernel, or
373 			 * b) the guest used a large mapping which we're faking
374 			 * Either way, we need to satisfy the fault without
375 			 * invoking the guest. */
376 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
377 		} else {
378 			/* Guest mapped and leaped at non-RAM! */
379 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
380 		}
381 
382 		break;
383 	}
384 
385 	case BOOKE_INTERRUPT_DEBUG: {
386 		u32 dbsr;
387 
388 		vcpu->arch.pc = mfspr(SPRN_CSRR0);
389 
390 		/* clear IAC events in DBSR register */
391 		dbsr = mfspr(SPRN_DBSR);
392 		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
393 		mtspr(SPRN_DBSR, dbsr);
394 
395 		run->exit_reason = KVM_EXIT_DEBUG;
396 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
397 		r = RESUME_HOST;
398 		break;
399 	}
400 
401 	default:
402 		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
403 		BUG();
404 	}
405 
406 	local_irq_disable();
407 
408 	kvmppc_core_deliver_interrupts(vcpu);
409 
410 	if (!(r & RESUME_HOST)) {
411 		/* To avoid clobbering exit_reason, only check for signals if
412 		 * we aren't already exiting to userspace for some other
413 		 * reason. */
414 		if (signal_pending(current)) {
415 			run->exit_reason = KVM_EXIT_INTR;
416 			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
417 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
418 		}
419 	}
420 
421 	return r;
422 }
423 
424 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
425 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
426 {
427 	vcpu->arch.pc = 0;
428 	vcpu->arch.msr = 0;
429 	vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
430 
431 	vcpu->arch.shadow_pid = 1;
432 
433 	/* Eye-catching number so we know if the guest takes an interrupt
434 	 * before it's programmed its own IVPR. */
435 	vcpu->arch.ivpr = 0x55550000;
436 
437 	kvmppc_init_timing_stats(vcpu);
438 
439 	return kvmppc_core_vcpu_setup(vcpu);
440 }
441 
442 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
443 {
444 	int i;
445 
446 	regs->pc = vcpu->arch.pc;
447 	regs->cr = vcpu->arch.cr;
448 	regs->ctr = vcpu->arch.ctr;
449 	regs->lr = vcpu->arch.lr;
450 	regs->xer = vcpu->arch.xer;
451 	regs->msr = vcpu->arch.msr;
452 	regs->srr0 = vcpu->arch.srr0;
453 	regs->srr1 = vcpu->arch.srr1;
454 	regs->pid = vcpu->arch.pid;
455 	regs->sprg0 = vcpu->arch.sprg0;
456 	regs->sprg1 = vcpu->arch.sprg1;
457 	regs->sprg2 = vcpu->arch.sprg2;
458 	regs->sprg3 = vcpu->arch.sprg3;
459 	regs->sprg5 = vcpu->arch.sprg4;
460 	regs->sprg6 = vcpu->arch.sprg5;
461 	regs->sprg7 = vcpu->arch.sprg6;
462 
463 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
464 		regs->gpr[i] = vcpu->arch.gpr[i];
465 
466 	return 0;
467 }
468 
469 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
470 {
471 	int i;
472 
473 	vcpu->arch.pc = regs->pc;
474 	vcpu->arch.cr = regs->cr;
475 	vcpu->arch.ctr = regs->ctr;
476 	vcpu->arch.lr = regs->lr;
477 	vcpu->arch.xer = regs->xer;
478 	kvmppc_set_msr(vcpu, regs->msr);
479 	vcpu->arch.srr0 = regs->srr0;
480 	vcpu->arch.srr1 = regs->srr1;
481 	vcpu->arch.sprg0 = regs->sprg0;
482 	vcpu->arch.sprg1 = regs->sprg1;
483 	vcpu->arch.sprg2 = regs->sprg2;
484 	vcpu->arch.sprg3 = regs->sprg3;
485 	vcpu->arch.sprg5 = regs->sprg4;
486 	vcpu->arch.sprg6 = regs->sprg5;
487 	vcpu->arch.sprg7 = regs->sprg6;
488 
489 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
490 		vcpu->arch.gpr[i] = regs->gpr[i];
491 
492 	return 0;
493 }
494 
495 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
496                                   struct kvm_sregs *sregs)
497 {
498 	return -ENOTSUPP;
499 }
500 
501 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
502                                   struct kvm_sregs *sregs)
503 {
504 	return -ENOTSUPP;
505 }
506 
507 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
508 {
509 	return -ENOTSUPP;
510 }
511 
512 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
513 {
514 	return -ENOTSUPP;
515 }
516 
517 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
518                                   struct kvm_translation *tr)
519 {
520 	return kvmppc_core_vcpu_translate(vcpu, tr);
521 }
522 
523 int __init kvmppc_booke_init(void)
524 {
525 	unsigned long ivor[16];
526 	unsigned long max_ivor = 0;
527 	int i;
528 
529 	/* We install our own exception handlers by hijacking IVPR. IVPR must
530 	 * be 16-bit aligned, so we need a 64KB allocation. */
531 	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
532 	                                         VCPU_SIZE_ORDER);
533 	if (!kvmppc_booke_handlers)
534 		return -ENOMEM;
535 
536 	/* XXX make sure our handlers are smaller than Linux's */
537 
538 	/* Copy our interrupt handlers to match host IVORs. That way we don't
539 	 * have to swap the IVORs on every guest/host transition. */
540 	ivor[0] = mfspr(SPRN_IVOR0);
541 	ivor[1] = mfspr(SPRN_IVOR1);
542 	ivor[2] = mfspr(SPRN_IVOR2);
543 	ivor[3] = mfspr(SPRN_IVOR3);
544 	ivor[4] = mfspr(SPRN_IVOR4);
545 	ivor[5] = mfspr(SPRN_IVOR5);
546 	ivor[6] = mfspr(SPRN_IVOR6);
547 	ivor[7] = mfspr(SPRN_IVOR7);
548 	ivor[8] = mfspr(SPRN_IVOR8);
549 	ivor[9] = mfspr(SPRN_IVOR9);
550 	ivor[10] = mfspr(SPRN_IVOR10);
551 	ivor[11] = mfspr(SPRN_IVOR11);
552 	ivor[12] = mfspr(SPRN_IVOR12);
553 	ivor[13] = mfspr(SPRN_IVOR13);
554 	ivor[14] = mfspr(SPRN_IVOR14);
555 	ivor[15] = mfspr(SPRN_IVOR15);
556 
557 	for (i = 0; i < 16; i++) {
558 		if (ivor[i] > max_ivor)
559 			max_ivor = ivor[i];
560 
561 		memcpy((void *)kvmppc_booke_handlers + ivor[i],
562 		       kvmppc_handlers_start + i * kvmppc_handler_len,
563 		       kvmppc_handler_len);
564 	}
565 	flush_icache_range(kvmppc_booke_handlers,
566 	                   kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
567 
568 	return 0;
569 }
570 
571 void __exit kvmppc_booke_exit(void)
572 {
573 	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
574 	kvm_exit();
575 }
576