1d9fbd03dSHollis Blanchard /* 2d9fbd03dSHollis Blanchard * This program is free software; you can redistribute it and/or modify 3d9fbd03dSHollis Blanchard * it under the terms of the GNU General Public License, version 2, as 4d9fbd03dSHollis Blanchard * published by the Free Software Foundation. 5d9fbd03dSHollis Blanchard * 6d9fbd03dSHollis Blanchard * This program is distributed in the hope that it will be useful, 7d9fbd03dSHollis Blanchard * but WITHOUT ANY WARRANTY; without even the implied warranty of 8d9fbd03dSHollis Blanchard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9d9fbd03dSHollis Blanchard * GNU General Public License for more details. 10d9fbd03dSHollis Blanchard * 11d9fbd03dSHollis Blanchard * You should have received a copy of the GNU General Public License 12d9fbd03dSHollis Blanchard * along with this program; if not, write to the Free Software 13d9fbd03dSHollis Blanchard * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14d9fbd03dSHollis Blanchard * 15d9fbd03dSHollis Blanchard * Copyright IBM Corp. 2007 16d9fbd03dSHollis Blanchard * 17d9fbd03dSHollis Blanchard * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18d9fbd03dSHollis Blanchard * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19d9fbd03dSHollis Blanchard */ 20d9fbd03dSHollis Blanchard 21d9fbd03dSHollis Blanchard #include <linux/errno.h> 22d9fbd03dSHollis Blanchard #include <linux/err.h> 23d9fbd03dSHollis Blanchard #include <linux/kvm_host.h> 24d9fbd03dSHollis Blanchard #include <linux/module.h> 25d9fbd03dSHollis Blanchard #include <linux/vmalloc.h> 26d9fbd03dSHollis Blanchard #include <linux/fs.h> 27d9fbd03dSHollis Blanchard #include <asm/cputable.h> 28d9fbd03dSHollis Blanchard #include <asm/uaccess.h> 29d9fbd03dSHollis Blanchard #include <asm/kvm_ppc.h> 30d9fbd03dSHollis Blanchard #include <asm/cacheflush.h> 31d9fbd03dSHollis Blanchard 3275f74f0dSHollis Blanchard #include "booke.h" 33d9fbd03dSHollis Blanchard #include "44x_tlb.h" 34d9fbd03dSHollis Blanchard 35d9fbd03dSHollis Blanchard unsigned long kvmppc_booke_handlers; 36d9fbd03dSHollis Blanchard 37d9fbd03dSHollis Blanchard #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 38d9fbd03dSHollis Blanchard #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 39d9fbd03dSHollis Blanchard 40d9fbd03dSHollis Blanchard struct kvm_stats_debugfs_item debugfs_entries[] = { 41d9fbd03dSHollis Blanchard { "mmio", VCPU_STAT(mmio_exits) }, 42d9fbd03dSHollis Blanchard { "dcr", VCPU_STAT(dcr_exits) }, 43d9fbd03dSHollis Blanchard { "sig", VCPU_STAT(signal_exits) }, 44d9fbd03dSHollis Blanchard { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, 45d9fbd03dSHollis Blanchard { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, 46d9fbd03dSHollis Blanchard { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, 47d9fbd03dSHollis Blanchard { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, 48d9fbd03dSHollis Blanchard { "sysc", VCPU_STAT(syscall_exits) }, 49d9fbd03dSHollis Blanchard { "isi", VCPU_STAT(isi_exits) }, 50d9fbd03dSHollis Blanchard { "dsi", VCPU_STAT(dsi_exits) }, 51d9fbd03dSHollis Blanchard { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 52d9fbd03dSHollis Blanchard { "dec", VCPU_STAT(dec_exits) }, 53d9fbd03dSHollis Blanchard { "ext_intr", VCPU_STAT(ext_intr_exits) }, 54d9fbd03dSHollis Blanchard { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 55d9fbd03dSHollis Blanchard { NULL } 56d9fbd03dSHollis Blanchard }; 57d9fbd03dSHollis Blanchard 58d9fbd03dSHollis Blanchard /* TODO: use vcpu_printf() */ 59d9fbd03dSHollis Blanchard void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) 60d9fbd03dSHollis Blanchard { 61d9fbd03dSHollis Blanchard int i; 62d9fbd03dSHollis Blanchard 635cf8ca22SHollis Blanchard printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); 645cf8ca22SHollis Blanchard printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); 655cf8ca22SHollis Blanchard printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); 66d9fbd03dSHollis Blanchard 67d9fbd03dSHollis Blanchard printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); 68d9fbd03dSHollis Blanchard 69d9fbd03dSHollis Blanchard for (i = 0; i < 32; i += 4) { 705cf8ca22SHollis Blanchard printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, 71d9fbd03dSHollis Blanchard vcpu->arch.gpr[i], 72d9fbd03dSHollis Blanchard vcpu->arch.gpr[i+1], 73d9fbd03dSHollis Blanchard vcpu->arch.gpr[i+2], 74d9fbd03dSHollis Blanchard vcpu->arch.gpr[i+3]); 75d9fbd03dSHollis Blanchard } 76d9fbd03dSHollis Blanchard } 77d9fbd03dSHollis Blanchard 78d4cf3892SHollis Blanchard static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, 79d4cf3892SHollis Blanchard unsigned int priority) 809dd921cfSHollis Blanchard { 819dd921cfSHollis Blanchard set_bit(priority, &vcpu->arch.pending_exceptions); 829dd921cfSHollis Blanchard } 839dd921cfSHollis Blanchard 849dd921cfSHollis Blanchard void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) 859dd921cfSHollis Blanchard { 86d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); 879dd921cfSHollis Blanchard } 889dd921cfSHollis Blanchard 899dd921cfSHollis Blanchard void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 909dd921cfSHollis Blanchard { 91d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); 929dd921cfSHollis Blanchard } 939dd921cfSHollis Blanchard 949dd921cfSHollis Blanchard int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 959dd921cfSHollis Blanchard { 96d4cf3892SHollis Blanchard return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); 979dd921cfSHollis Blanchard } 989dd921cfSHollis Blanchard 999dd921cfSHollis Blanchard void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 1009dd921cfSHollis Blanchard struct kvm_interrupt *irq) 1019dd921cfSHollis Blanchard { 102d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); 1039dd921cfSHollis Blanchard } 1049dd921cfSHollis Blanchard 105d4cf3892SHollis Blanchard /* Deliver the interrupt of the corresponding priority, if possible. */ 106d4cf3892SHollis Blanchard static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 107d4cf3892SHollis Blanchard unsigned int priority) 108d9fbd03dSHollis Blanchard { 109d4cf3892SHollis Blanchard int allowed = 0; 110d4cf3892SHollis Blanchard ulong msr_mask; 111d9fbd03dSHollis Blanchard 112d4cf3892SHollis Blanchard switch (priority) { 113d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_PROGRAM: 114d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_DTLB_MISS: 115d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_ITLB_MISS: 116d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_SYSCALL: 117d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_DATA_STORAGE: 118d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_INST_STORAGE: 119d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_FP_UNAVAIL: 120d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_AP_UNAVAIL: 121d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_ALIGNMENT: 122d4cf3892SHollis Blanchard allowed = 1; 123d4cf3892SHollis Blanchard msr_mask = MSR_CE|MSR_ME|MSR_DE; 124d9fbd03dSHollis Blanchard break; 125d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_CRITICAL: 126d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_WATCHDOG: 127d4cf3892SHollis Blanchard allowed = vcpu->arch.msr & MSR_CE; 128d4cf3892SHollis Blanchard msr_mask = MSR_ME; 129d9fbd03dSHollis Blanchard break; 130d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_MACHINE_CHECK: 131d4cf3892SHollis Blanchard allowed = vcpu->arch.msr & MSR_ME; 132d4cf3892SHollis Blanchard msr_mask = 0; 133d9fbd03dSHollis Blanchard break; 134d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_EXTERNAL: 135d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_DECREMENTER: 136d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_FIT: 137d4cf3892SHollis Blanchard allowed = vcpu->arch.msr & MSR_EE; 138d4cf3892SHollis Blanchard msr_mask = MSR_CE|MSR_ME|MSR_DE; 139d9fbd03dSHollis Blanchard break; 140d4cf3892SHollis Blanchard case BOOKE_IRQPRIO_DEBUG: 141d4cf3892SHollis Blanchard allowed = vcpu->arch.msr & MSR_DE; 142d4cf3892SHollis Blanchard msr_mask = MSR_ME; 143d9fbd03dSHollis Blanchard break; 144d9fbd03dSHollis Blanchard } 145d9fbd03dSHollis Blanchard 146d4cf3892SHollis Blanchard if (allowed) { 147d9fbd03dSHollis Blanchard vcpu->arch.srr0 = vcpu->arch.pc; 148d9fbd03dSHollis Blanchard vcpu->arch.srr1 = vcpu->arch.msr; 149d4cf3892SHollis Blanchard vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 150d4cf3892SHollis Blanchard kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); 151d4cf3892SHollis Blanchard 152d4cf3892SHollis Blanchard clear_bit(priority, &vcpu->arch.pending_exceptions); 153d4cf3892SHollis Blanchard } 154d4cf3892SHollis Blanchard 155d4cf3892SHollis Blanchard return allowed; 156d9fbd03dSHollis Blanchard } 157d9fbd03dSHollis Blanchard 158d9fbd03dSHollis Blanchard /* Check pending exceptions and deliver one, if possible. */ 1599dd921cfSHollis Blanchard void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 160d9fbd03dSHollis Blanchard { 161d9fbd03dSHollis Blanchard unsigned long *pending = &vcpu->arch.pending_exceptions; 162d9fbd03dSHollis Blanchard unsigned int priority; 163d9fbd03dSHollis Blanchard 1649ab80843SHollis Blanchard priority = __ffs(*pending); 165d9fbd03dSHollis Blanchard while (priority <= BOOKE_MAX_INTERRUPT) { 166d4cf3892SHollis Blanchard if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 167d9fbd03dSHollis Blanchard break; 168d9fbd03dSHollis Blanchard 169d9fbd03dSHollis Blanchard priority = find_next_bit(pending, 170d9fbd03dSHollis Blanchard BITS_PER_BYTE * sizeof(*pending), 171d9fbd03dSHollis Blanchard priority + 1); 172d9fbd03dSHollis Blanchard } 173d9fbd03dSHollis Blanchard } 174d9fbd03dSHollis Blanchard 175d9fbd03dSHollis Blanchard /** 176d9fbd03dSHollis Blanchard * kvmppc_handle_exit 177d9fbd03dSHollis Blanchard * 178d9fbd03dSHollis Blanchard * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 179d9fbd03dSHollis Blanchard */ 180d9fbd03dSHollis Blanchard int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 181d9fbd03dSHollis Blanchard unsigned int exit_nr) 182d9fbd03dSHollis Blanchard { 183d9fbd03dSHollis Blanchard enum emulation_result er; 184d9fbd03dSHollis Blanchard int r = RESUME_HOST; 185d9fbd03dSHollis Blanchard 186d9fbd03dSHollis Blanchard local_irq_enable(); 187d9fbd03dSHollis Blanchard 188d9fbd03dSHollis Blanchard run->exit_reason = KVM_EXIT_UNKNOWN; 189d9fbd03dSHollis Blanchard run->ready_for_interrupt_injection = 1; 190d9fbd03dSHollis Blanchard 191d9fbd03dSHollis Blanchard switch (exit_nr) { 192d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_MACHINE_CHECK: 193d9fbd03dSHollis Blanchard printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 194d9fbd03dSHollis Blanchard kvmppc_dump_vcpu(vcpu); 195d9fbd03dSHollis Blanchard r = RESUME_HOST; 196d9fbd03dSHollis Blanchard break; 197d9fbd03dSHollis Blanchard 198d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_EXTERNAL: 1991b6766c7SHollis Blanchard vcpu->stat.ext_intr_exits++; 2001b6766c7SHollis Blanchard if (need_resched()) 2011b6766c7SHollis Blanchard cond_resched(); 2021b6766c7SHollis Blanchard r = RESUME_GUEST; 2031b6766c7SHollis Blanchard break; 2041b6766c7SHollis Blanchard 205d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_DECREMENTER: 206d9fbd03dSHollis Blanchard /* Since we switched IVPR back to the host's value, the host 207d9fbd03dSHollis Blanchard * handled this interrupt the moment we enabled interrupts. 208d9fbd03dSHollis Blanchard * Now we just offer it a chance to reschedule the guest. */ 209d9fbd03dSHollis Blanchard 210d9fbd03dSHollis Blanchard /* XXX At this point the TLB still holds our shadow TLB, so if 211d9fbd03dSHollis Blanchard * we do reschedule the host will fault over it. Perhaps we 212d9fbd03dSHollis Blanchard * should politely restore the host's entries to minimize 213d9fbd03dSHollis Blanchard * misses before ceding control. */ 2141b6766c7SHollis Blanchard vcpu->stat.dec_exits++; 215d9fbd03dSHollis Blanchard if (need_resched()) 216d9fbd03dSHollis Blanchard cond_resched(); 217d9fbd03dSHollis Blanchard r = RESUME_GUEST; 218d9fbd03dSHollis Blanchard break; 219d9fbd03dSHollis Blanchard 220d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_PROGRAM: 221d9fbd03dSHollis Blanchard if (vcpu->arch.msr & MSR_PR) { 222d9fbd03dSHollis Blanchard /* Program traps generated by user-level software must be handled 223d9fbd03dSHollis Blanchard * by the guest kernel. */ 224d9fbd03dSHollis Blanchard vcpu->arch.esr = vcpu->arch.fault_esr; 225d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); 226d9fbd03dSHollis Blanchard r = RESUME_GUEST; 227d9fbd03dSHollis Blanchard break; 228d9fbd03dSHollis Blanchard } 229d9fbd03dSHollis Blanchard 230d9fbd03dSHollis Blanchard er = kvmppc_emulate_instruction(run, vcpu); 231d9fbd03dSHollis Blanchard switch (er) { 232d9fbd03dSHollis Blanchard case EMULATE_DONE: 233d9fbd03dSHollis Blanchard /* Future optimization: only reload non-volatiles if 234d9fbd03dSHollis Blanchard * they were actually modified by emulation. */ 235d9fbd03dSHollis Blanchard vcpu->stat.emulated_inst_exits++; 236d9fbd03dSHollis Blanchard r = RESUME_GUEST_NV; 237d9fbd03dSHollis Blanchard break; 238d9fbd03dSHollis Blanchard case EMULATE_DO_DCR: 239d9fbd03dSHollis Blanchard run->exit_reason = KVM_EXIT_DCR; 2401b6766c7SHollis Blanchard vcpu->stat.dcr_exits++; 241d9fbd03dSHollis Blanchard r = RESUME_HOST; 242d9fbd03dSHollis Blanchard break; 243d9fbd03dSHollis Blanchard case EMULATE_FAIL: 244d9fbd03dSHollis Blanchard /* XXX Deliver Program interrupt to guest. */ 2455cf8ca22SHollis Blanchard printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 246d9fbd03dSHollis Blanchard __func__, vcpu->arch.pc, vcpu->arch.last_inst); 247d9fbd03dSHollis Blanchard /* For debugging, encode the failing instruction and 248d9fbd03dSHollis Blanchard * report it to userspace. */ 249d9fbd03dSHollis Blanchard run->hw.hardware_exit_reason = ~0ULL << 32; 250d9fbd03dSHollis Blanchard run->hw.hardware_exit_reason |= vcpu->arch.last_inst; 251d9fbd03dSHollis Blanchard r = RESUME_HOST; 252d9fbd03dSHollis Blanchard break; 253d9fbd03dSHollis Blanchard default: 254d9fbd03dSHollis Blanchard BUG(); 255d9fbd03dSHollis Blanchard } 256d9fbd03dSHollis Blanchard break; 257d9fbd03dSHollis Blanchard 258d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_FP_UNAVAIL: 259d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); 260d9fbd03dSHollis Blanchard r = RESUME_GUEST; 261d9fbd03dSHollis Blanchard break; 262d9fbd03dSHollis Blanchard 263d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_DATA_STORAGE: 264d9fbd03dSHollis Blanchard vcpu->arch.dear = vcpu->arch.fault_dear; 265d9fbd03dSHollis Blanchard vcpu->arch.esr = vcpu->arch.fault_esr; 266d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); 267d9fbd03dSHollis Blanchard vcpu->stat.dsi_exits++; 268d9fbd03dSHollis Blanchard r = RESUME_GUEST; 269d9fbd03dSHollis Blanchard break; 270d9fbd03dSHollis Blanchard 271d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_INST_STORAGE: 272d9fbd03dSHollis Blanchard vcpu->arch.esr = vcpu->arch.fault_esr; 273d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); 274d9fbd03dSHollis Blanchard vcpu->stat.isi_exits++; 275d9fbd03dSHollis Blanchard r = RESUME_GUEST; 276d9fbd03dSHollis Blanchard break; 277d9fbd03dSHollis Blanchard 278d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_SYSCALL: 279d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); 280d9fbd03dSHollis Blanchard vcpu->stat.syscall_exits++; 281d9fbd03dSHollis Blanchard r = RESUME_GUEST; 282d9fbd03dSHollis Blanchard break; 283d9fbd03dSHollis Blanchard 284d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_DTLB_MISS: { 285d9fbd03dSHollis Blanchard struct kvmppc_44x_tlbe *gtlbe; 286d9fbd03dSHollis Blanchard unsigned long eaddr = vcpu->arch.fault_dear; 287d9fbd03dSHollis Blanchard gfn_t gfn; 288d9fbd03dSHollis Blanchard 289d9fbd03dSHollis Blanchard /* Check the guest TLB. */ 290d9fbd03dSHollis Blanchard gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); 291d9fbd03dSHollis Blanchard if (!gtlbe) { 292d9fbd03dSHollis Blanchard /* The guest didn't have a mapping for it. */ 293d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); 294d9fbd03dSHollis Blanchard vcpu->arch.dear = vcpu->arch.fault_dear; 295d9fbd03dSHollis Blanchard vcpu->arch.esr = vcpu->arch.fault_esr; 296d9fbd03dSHollis Blanchard vcpu->stat.dtlb_real_miss_exits++; 297d9fbd03dSHollis Blanchard r = RESUME_GUEST; 298d9fbd03dSHollis Blanchard break; 299d9fbd03dSHollis Blanchard } 300d9fbd03dSHollis Blanchard 301d9fbd03dSHollis Blanchard vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); 302d9fbd03dSHollis Blanchard gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; 303d9fbd03dSHollis Blanchard 304d9fbd03dSHollis Blanchard if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 305d9fbd03dSHollis Blanchard /* The guest TLB had a mapping, but the shadow TLB 306d9fbd03dSHollis Blanchard * didn't, and it is RAM. This could be because: 307d9fbd03dSHollis Blanchard * a) the entry is mapping the host kernel, or 308d9fbd03dSHollis Blanchard * b) the guest used a large mapping which we're faking 309d9fbd03dSHollis Blanchard * Either way, we need to satisfy the fault without 310d9fbd03dSHollis Blanchard * invoking the guest. */ 311*89168618SHollis Blanchard kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, 312*89168618SHollis Blanchard gtlbe->word2, get_tlb_bytes(gtlbe)); 313d9fbd03dSHollis Blanchard vcpu->stat.dtlb_virt_miss_exits++; 314d9fbd03dSHollis Blanchard r = RESUME_GUEST; 315d9fbd03dSHollis Blanchard } else { 316d9fbd03dSHollis Blanchard /* Guest has mapped and accessed a page which is not 317d9fbd03dSHollis Blanchard * actually RAM. */ 318d9fbd03dSHollis Blanchard r = kvmppc_emulate_mmio(run, vcpu); 3191b6766c7SHollis Blanchard vcpu->stat.mmio_exits++; 320d9fbd03dSHollis Blanchard } 321d9fbd03dSHollis Blanchard 322d9fbd03dSHollis Blanchard break; 323d9fbd03dSHollis Blanchard } 324d9fbd03dSHollis Blanchard 325d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_ITLB_MISS: { 326d9fbd03dSHollis Blanchard struct kvmppc_44x_tlbe *gtlbe; 327d9fbd03dSHollis Blanchard unsigned long eaddr = vcpu->arch.pc; 328*89168618SHollis Blanchard gpa_t gpaddr; 329d9fbd03dSHollis Blanchard gfn_t gfn; 330d9fbd03dSHollis Blanchard 331d9fbd03dSHollis Blanchard r = RESUME_GUEST; 332d9fbd03dSHollis Blanchard 333d9fbd03dSHollis Blanchard /* Check the guest TLB. */ 334d9fbd03dSHollis Blanchard gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); 335d9fbd03dSHollis Blanchard if (!gtlbe) { 336d9fbd03dSHollis Blanchard /* The guest didn't have a mapping for it. */ 337d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); 338d9fbd03dSHollis Blanchard vcpu->stat.itlb_real_miss_exits++; 339d9fbd03dSHollis Blanchard break; 340d9fbd03dSHollis Blanchard } 341d9fbd03dSHollis Blanchard 342d9fbd03dSHollis Blanchard vcpu->stat.itlb_virt_miss_exits++; 343d9fbd03dSHollis Blanchard 344*89168618SHollis Blanchard gpaddr = tlb_xlate(gtlbe, eaddr); 345*89168618SHollis Blanchard gfn = gpaddr >> PAGE_SHIFT; 346d9fbd03dSHollis Blanchard 347d9fbd03dSHollis Blanchard if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 348d9fbd03dSHollis Blanchard /* The guest TLB had a mapping, but the shadow TLB 349d9fbd03dSHollis Blanchard * didn't. This could be because: 350d9fbd03dSHollis Blanchard * a) the entry is mapping the host kernel, or 351d9fbd03dSHollis Blanchard * b) the guest used a large mapping which we're faking 352d9fbd03dSHollis Blanchard * Either way, we need to satisfy the fault without 353d9fbd03dSHollis Blanchard * invoking the guest. */ 354*89168618SHollis Blanchard kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, 355*89168618SHollis Blanchard gtlbe->word2, get_tlb_bytes(gtlbe)); 356d9fbd03dSHollis Blanchard } else { 357d9fbd03dSHollis Blanchard /* Guest mapped and leaped at non-RAM! */ 358d4cf3892SHollis Blanchard kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); 359d9fbd03dSHollis Blanchard } 360d9fbd03dSHollis Blanchard 361d9fbd03dSHollis Blanchard break; 362d9fbd03dSHollis Blanchard } 363d9fbd03dSHollis Blanchard 364d9fbd03dSHollis Blanchard case BOOKE_INTERRUPT_DEBUG: { 365d9fbd03dSHollis Blanchard u32 dbsr; 366d9fbd03dSHollis Blanchard 367d9fbd03dSHollis Blanchard vcpu->arch.pc = mfspr(SPRN_CSRR0); 368d9fbd03dSHollis Blanchard 369d9fbd03dSHollis Blanchard /* clear IAC events in DBSR register */ 370d9fbd03dSHollis Blanchard dbsr = mfspr(SPRN_DBSR); 371d9fbd03dSHollis Blanchard dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; 372d9fbd03dSHollis Blanchard mtspr(SPRN_DBSR, dbsr); 373d9fbd03dSHollis Blanchard 374d9fbd03dSHollis Blanchard run->exit_reason = KVM_EXIT_DEBUG; 375d9fbd03dSHollis Blanchard r = RESUME_HOST; 376d9fbd03dSHollis Blanchard break; 377d9fbd03dSHollis Blanchard } 378d9fbd03dSHollis Blanchard 379d9fbd03dSHollis Blanchard default: 380d9fbd03dSHollis Blanchard printk(KERN_EMERG "exit_nr %d\n", exit_nr); 381d9fbd03dSHollis Blanchard BUG(); 382d9fbd03dSHollis Blanchard } 383d9fbd03dSHollis Blanchard 384d9fbd03dSHollis Blanchard local_irq_disable(); 385d9fbd03dSHollis Blanchard 3869dd921cfSHollis Blanchard kvmppc_core_deliver_interrupts(vcpu); 387d9fbd03dSHollis Blanchard 388d9fbd03dSHollis Blanchard if (!(r & RESUME_HOST)) { 389d9fbd03dSHollis Blanchard /* To avoid clobbering exit_reason, only check for signals if 390d9fbd03dSHollis Blanchard * we aren't already exiting to userspace for some other 391d9fbd03dSHollis Blanchard * reason. */ 392d9fbd03dSHollis Blanchard if (signal_pending(current)) { 393d9fbd03dSHollis Blanchard run->exit_reason = KVM_EXIT_INTR; 394d9fbd03dSHollis Blanchard r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 395d9fbd03dSHollis Blanchard vcpu->stat.signal_exits++; 396d9fbd03dSHollis Blanchard } 397d9fbd03dSHollis Blanchard } 398d9fbd03dSHollis Blanchard 399d9fbd03dSHollis Blanchard return r; 400d9fbd03dSHollis Blanchard } 401d9fbd03dSHollis Blanchard 402d9fbd03dSHollis Blanchard /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ 403d9fbd03dSHollis Blanchard int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 404d9fbd03dSHollis Blanchard { 405d9fbd03dSHollis Blanchard vcpu->arch.pc = 0; 406d9fbd03dSHollis Blanchard vcpu->arch.msr = 0; 407d9fbd03dSHollis Blanchard vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 408d9fbd03dSHollis Blanchard 409d9fbd03dSHollis Blanchard vcpu->arch.shadow_pid = 1; 410d9fbd03dSHollis Blanchard 411d9fbd03dSHollis Blanchard /* Eye-catching number so we know if the guest takes an interrupt 412d9fbd03dSHollis Blanchard * before it's programmed its own IVPR. */ 413d9fbd03dSHollis Blanchard vcpu->arch.ivpr = 0x55550000; 414d9fbd03dSHollis Blanchard 4155cbb5106SHollis Blanchard return kvmppc_core_vcpu_setup(vcpu); 416d9fbd03dSHollis Blanchard } 417d9fbd03dSHollis Blanchard 418d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 419d9fbd03dSHollis Blanchard { 420d9fbd03dSHollis Blanchard int i; 421d9fbd03dSHollis Blanchard 422d9fbd03dSHollis Blanchard regs->pc = vcpu->arch.pc; 423d9fbd03dSHollis Blanchard regs->cr = vcpu->arch.cr; 424d9fbd03dSHollis Blanchard regs->ctr = vcpu->arch.ctr; 425d9fbd03dSHollis Blanchard regs->lr = vcpu->arch.lr; 426d9fbd03dSHollis Blanchard regs->xer = vcpu->arch.xer; 427d9fbd03dSHollis Blanchard regs->msr = vcpu->arch.msr; 428d9fbd03dSHollis Blanchard regs->srr0 = vcpu->arch.srr0; 429d9fbd03dSHollis Blanchard regs->srr1 = vcpu->arch.srr1; 430d9fbd03dSHollis Blanchard regs->pid = vcpu->arch.pid; 431d9fbd03dSHollis Blanchard regs->sprg0 = vcpu->arch.sprg0; 432d9fbd03dSHollis Blanchard regs->sprg1 = vcpu->arch.sprg1; 433d9fbd03dSHollis Blanchard regs->sprg2 = vcpu->arch.sprg2; 434d9fbd03dSHollis Blanchard regs->sprg3 = vcpu->arch.sprg3; 435d9fbd03dSHollis Blanchard regs->sprg5 = vcpu->arch.sprg4; 436d9fbd03dSHollis Blanchard regs->sprg6 = vcpu->arch.sprg5; 437d9fbd03dSHollis Blanchard regs->sprg7 = vcpu->arch.sprg6; 438d9fbd03dSHollis Blanchard 439d9fbd03dSHollis Blanchard for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 440d9fbd03dSHollis Blanchard regs->gpr[i] = vcpu->arch.gpr[i]; 441d9fbd03dSHollis Blanchard 442d9fbd03dSHollis Blanchard return 0; 443d9fbd03dSHollis Blanchard } 444d9fbd03dSHollis Blanchard 445d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 446d9fbd03dSHollis Blanchard { 447d9fbd03dSHollis Blanchard int i; 448d9fbd03dSHollis Blanchard 449d9fbd03dSHollis Blanchard vcpu->arch.pc = regs->pc; 450d9fbd03dSHollis Blanchard vcpu->arch.cr = regs->cr; 451d9fbd03dSHollis Blanchard vcpu->arch.ctr = regs->ctr; 452d9fbd03dSHollis Blanchard vcpu->arch.lr = regs->lr; 453d9fbd03dSHollis Blanchard vcpu->arch.xer = regs->xer; 454b8fd68acSHollis Blanchard kvmppc_set_msr(vcpu, regs->msr); 455d9fbd03dSHollis Blanchard vcpu->arch.srr0 = regs->srr0; 456d9fbd03dSHollis Blanchard vcpu->arch.srr1 = regs->srr1; 457d9fbd03dSHollis Blanchard vcpu->arch.sprg0 = regs->sprg0; 458d9fbd03dSHollis Blanchard vcpu->arch.sprg1 = regs->sprg1; 459d9fbd03dSHollis Blanchard vcpu->arch.sprg2 = regs->sprg2; 460d9fbd03dSHollis Blanchard vcpu->arch.sprg3 = regs->sprg3; 461d9fbd03dSHollis Blanchard vcpu->arch.sprg5 = regs->sprg4; 462d9fbd03dSHollis Blanchard vcpu->arch.sprg6 = regs->sprg5; 463d9fbd03dSHollis Blanchard vcpu->arch.sprg7 = regs->sprg6; 464d9fbd03dSHollis Blanchard 465d9fbd03dSHollis Blanchard for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) 466d9fbd03dSHollis Blanchard vcpu->arch.gpr[i] = regs->gpr[i]; 467d9fbd03dSHollis Blanchard 468d9fbd03dSHollis Blanchard return 0; 469d9fbd03dSHollis Blanchard } 470d9fbd03dSHollis Blanchard 471d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 472d9fbd03dSHollis Blanchard struct kvm_sregs *sregs) 473d9fbd03dSHollis Blanchard { 474d9fbd03dSHollis Blanchard return -ENOTSUPP; 475d9fbd03dSHollis Blanchard } 476d9fbd03dSHollis Blanchard 477d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 478d9fbd03dSHollis Blanchard struct kvm_sregs *sregs) 479d9fbd03dSHollis Blanchard { 480d9fbd03dSHollis Blanchard return -ENOTSUPP; 481d9fbd03dSHollis Blanchard } 482d9fbd03dSHollis Blanchard 483d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 484d9fbd03dSHollis Blanchard { 485d9fbd03dSHollis Blanchard return -ENOTSUPP; 486d9fbd03dSHollis Blanchard } 487d9fbd03dSHollis Blanchard 488d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 489d9fbd03dSHollis Blanchard { 490d9fbd03dSHollis Blanchard return -ENOTSUPP; 491d9fbd03dSHollis Blanchard } 492d9fbd03dSHollis Blanchard 493d9fbd03dSHollis Blanchard int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 494d9fbd03dSHollis Blanchard struct kvm_translation *tr) 495d9fbd03dSHollis Blanchard { 4965cbb5106SHollis Blanchard return kvmppc_core_vcpu_translate(vcpu, tr); 497d9fbd03dSHollis Blanchard } 498d9fbd03dSHollis Blanchard 499db93f574SHollis Blanchard int kvmppc_booke_init(void) 500d9fbd03dSHollis Blanchard { 501d9fbd03dSHollis Blanchard unsigned long ivor[16]; 502d9fbd03dSHollis Blanchard unsigned long max_ivor = 0; 503d9fbd03dSHollis Blanchard int i; 504d9fbd03dSHollis Blanchard 505d9fbd03dSHollis Blanchard /* We install our own exception handlers by hijacking IVPR. IVPR must 506d9fbd03dSHollis Blanchard * be 16-bit aligned, so we need a 64KB allocation. */ 507d9fbd03dSHollis Blanchard kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 508d9fbd03dSHollis Blanchard VCPU_SIZE_ORDER); 509d9fbd03dSHollis Blanchard if (!kvmppc_booke_handlers) 510d9fbd03dSHollis Blanchard return -ENOMEM; 511d9fbd03dSHollis Blanchard 512d9fbd03dSHollis Blanchard /* XXX make sure our handlers are smaller than Linux's */ 513d9fbd03dSHollis Blanchard 514d9fbd03dSHollis Blanchard /* Copy our interrupt handlers to match host IVORs. That way we don't 515d9fbd03dSHollis Blanchard * have to swap the IVORs on every guest/host transition. */ 516d9fbd03dSHollis Blanchard ivor[0] = mfspr(SPRN_IVOR0); 517d9fbd03dSHollis Blanchard ivor[1] = mfspr(SPRN_IVOR1); 518d9fbd03dSHollis Blanchard ivor[2] = mfspr(SPRN_IVOR2); 519d9fbd03dSHollis Blanchard ivor[3] = mfspr(SPRN_IVOR3); 520d9fbd03dSHollis Blanchard ivor[4] = mfspr(SPRN_IVOR4); 521d9fbd03dSHollis Blanchard ivor[5] = mfspr(SPRN_IVOR5); 522d9fbd03dSHollis Blanchard ivor[6] = mfspr(SPRN_IVOR6); 523d9fbd03dSHollis Blanchard ivor[7] = mfspr(SPRN_IVOR7); 524d9fbd03dSHollis Blanchard ivor[8] = mfspr(SPRN_IVOR8); 525d9fbd03dSHollis Blanchard ivor[9] = mfspr(SPRN_IVOR9); 526d9fbd03dSHollis Blanchard ivor[10] = mfspr(SPRN_IVOR10); 527d9fbd03dSHollis Blanchard ivor[11] = mfspr(SPRN_IVOR11); 528d9fbd03dSHollis Blanchard ivor[12] = mfspr(SPRN_IVOR12); 529d9fbd03dSHollis Blanchard ivor[13] = mfspr(SPRN_IVOR13); 530d9fbd03dSHollis Blanchard ivor[14] = mfspr(SPRN_IVOR14); 531d9fbd03dSHollis Blanchard ivor[15] = mfspr(SPRN_IVOR15); 532d9fbd03dSHollis Blanchard 533d9fbd03dSHollis Blanchard for (i = 0; i < 16; i++) { 534d9fbd03dSHollis Blanchard if (ivor[i] > max_ivor) 535d9fbd03dSHollis Blanchard max_ivor = ivor[i]; 536d9fbd03dSHollis Blanchard 537d9fbd03dSHollis Blanchard memcpy((void *)kvmppc_booke_handlers + ivor[i], 538d9fbd03dSHollis Blanchard kvmppc_handlers_start + i * kvmppc_handler_len, 539d9fbd03dSHollis Blanchard kvmppc_handler_len); 540d9fbd03dSHollis Blanchard } 541d9fbd03dSHollis Blanchard flush_icache_range(kvmppc_booke_handlers, 542d9fbd03dSHollis Blanchard kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 543d9fbd03dSHollis Blanchard 544db93f574SHollis Blanchard return 0; 545d9fbd03dSHollis Blanchard } 546d9fbd03dSHollis Blanchard 547db93f574SHollis Blanchard void __exit kvmppc_booke_exit(void) 548d9fbd03dSHollis Blanchard { 549d9fbd03dSHollis Blanchard free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); 550d9fbd03dSHollis Blanchard kvm_exit(); 551d9fbd03dSHollis Blanchard } 552