1bbf45ba5SHollis Blanchard /* 2bbf45ba5SHollis Blanchard * This program is free software; you can redistribute it and/or modify 3bbf45ba5SHollis Blanchard * it under the terms of the GNU General Public License, version 2, as 4bbf45ba5SHollis Blanchard * published by the Free Software Foundation. 5bbf45ba5SHollis Blanchard * 6bbf45ba5SHollis Blanchard * This program is distributed in the hope that it will be useful, 7bbf45ba5SHollis Blanchard * but WITHOUT ANY WARRANTY; without even the implied warranty of 8bbf45ba5SHollis Blanchard * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9bbf45ba5SHollis Blanchard * GNU General Public License for more details. 10bbf45ba5SHollis Blanchard * 11bbf45ba5SHollis Blanchard * You should have received a copy of the GNU General Public License 12bbf45ba5SHollis Blanchard * along with this program; if not, write to the Free Software 13bbf45ba5SHollis Blanchard * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14bbf45ba5SHollis Blanchard * 15bbf45ba5SHollis Blanchard * Copyright IBM Corp. 2007 16bbf45ba5SHollis Blanchard * 17bbf45ba5SHollis Blanchard * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18bbf45ba5SHollis Blanchard * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19bbf45ba5SHollis Blanchard */ 20bbf45ba5SHollis Blanchard 21bbf45ba5SHollis Blanchard #include <linux/errno.h> 22bbf45ba5SHollis Blanchard #include <linux/err.h> 23bbf45ba5SHollis Blanchard #include <linux/kvm_host.h> 24bbf45ba5SHollis Blanchard #include <linux/vmalloc.h> 25544c6761SAlexander Graf #include <linux/hrtimer.h> 26bbf45ba5SHollis Blanchard #include <linux/fs.h> 275a0e3ad6STejun Heo #include <linux/slab.h> 28bbf45ba5SHollis Blanchard #include <asm/cputable.h> 29bbf45ba5SHollis Blanchard #include <asm/uaccess.h> 30bbf45ba5SHollis Blanchard #include <asm/kvm_ppc.h> 3183aae4a8SHollis Blanchard #include <asm/tlbflush.h> 32371fefd6SPaul Mackerras #include <asm/cputhreads.h> 3373e75b41SHollis Blanchard #include "timing.h" 34fad7b9b5SPaul Mackerras #include "../mm/mmu_decl.h" 35bbf45ba5SHollis Blanchard 3646f43c6eSMarcelo Tosatti #define CREATE_TRACE_POINTS 3746f43c6eSMarcelo Tosatti #include "trace.h" 3846f43c6eSMarcelo Tosatti 39bbf45ba5SHollis Blanchard int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 40bbf45ba5SHollis Blanchard { 41666e7252SAlexander Graf return !(v->arch.shared->msr & MSR_WE) || 42dfd4d47eSScott Wood !!(v->arch.pending_exceptions) || 43dfd4d47eSScott Wood v->requests; 44bbf45ba5SHollis Blanchard } 45bbf45ba5SHollis Blanchard 462a342ed5SAlexander Graf int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 472a342ed5SAlexander Graf { 482a342ed5SAlexander Graf int nr = kvmppc_get_gpr(vcpu, 11); 492a342ed5SAlexander Graf int r; 502a342ed5SAlexander Graf unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); 512a342ed5SAlexander Graf unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); 522a342ed5SAlexander Graf unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); 532a342ed5SAlexander Graf unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); 542a342ed5SAlexander Graf unsigned long r2 = 0; 552a342ed5SAlexander Graf 562a342ed5SAlexander Graf if (!(vcpu->arch.shared->msr & MSR_SF)) { 572a342ed5SAlexander Graf /* 32 bit mode */ 582a342ed5SAlexander Graf param1 &= 0xffffffff; 592a342ed5SAlexander Graf param2 &= 0xffffffff; 602a342ed5SAlexander Graf param3 &= 0xffffffff; 612a342ed5SAlexander Graf param4 &= 0xffffffff; 622a342ed5SAlexander Graf } 632a342ed5SAlexander Graf 642a342ed5SAlexander Graf switch (nr) { 655fc87407SAlexander Graf case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: 665fc87407SAlexander Graf { 675fc87407SAlexander Graf vcpu->arch.magic_page_pa = param1; 685fc87407SAlexander Graf vcpu->arch.magic_page_ea = param2; 695fc87407SAlexander Graf 70b5904972SScott Wood r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; 717508e16cSAlexander Graf 725fc87407SAlexander Graf r = HC_EV_SUCCESS; 735fc87407SAlexander Graf break; 745fc87407SAlexander Graf } 752a342ed5SAlexander Graf case HC_VENDOR_KVM | KVM_HC_FEATURES: 762a342ed5SAlexander Graf r = HC_EV_SUCCESS; 77a4cd8b23SScott Wood #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) 78a4cd8b23SScott Wood /* XXX Missing magic page on 44x */ 795fc87407SAlexander Graf r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 805fc87407SAlexander Graf #endif 812a342ed5SAlexander Graf 822a342ed5SAlexander Graf /* Second return value is in r4 */ 832a342ed5SAlexander Graf break; 842a342ed5SAlexander Graf default: 852a342ed5SAlexander Graf r = HC_EV_UNIMPLEMENTED; 862a342ed5SAlexander Graf break; 872a342ed5SAlexander Graf } 882a342ed5SAlexander Graf 897508e16cSAlexander Graf kvmppc_set_gpr(vcpu, 4, r2); 907508e16cSAlexander Graf 912a342ed5SAlexander Graf return r; 922a342ed5SAlexander Graf } 93bbf45ba5SHollis Blanchard 94af8f38b3SAlexander Graf int kvmppc_sanity_check(struct kvm_vcpu *vcpu) 95af8f38b3SAlexander Graf { 96af8f38b3SAlexander Graf int r = false; 97af8f38b3SAlexander Graf 98af8f38b3SAlexander Graf /* We have to know what CPU to virtualize */ 99af8f38b3SAlexander Graf if (!vcpu->arch.pvr) 100af8f38b3SAlexander Graf goto out; 101af8f38b3SAlexander Graf 102af8f38b3SAlexander Graf /* PAPR only works with book3s_64 */ 103af8f38b3SAlexander Graf if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) 104af8f38b3SAlexander Graf goto out; 105af8f38b3SAlexander Graf 106af8f38b3SAlexander Graf #ifdef CONFIG_KVM_BOOK3S_64_HV 107af8f38b3SAlexander Graf /* HV KVM can only do PAPR mode for now */ 108af8f38b3SAlexander Graf if (!vcpu->arch.papr_enabled) 109af8f38b3SAlexander Graf goto out; 110af8f38b3SAlexander Graf #endif 111af8f38b3SAlexander Graf 112af8f38b3SAlexander Graf r = true; 113af8f38b3SAlexander Graf 114af8f38b3SAlexander Graf out: 115af8f38b3SAlexander Graf vcpu->arch.sane = r; 116af8f38b3SAlexander Graf return r ? 0 : -EINVAL; 117af8f38b3SAlexander Graf } 118af8f38b3SAlexander Graf 119bbf45ba5SHollis Blanchard int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 120bbf45ba5SHollis Blanchard { 121bbf45ba5SHollis Blanchard enum emulation_result er; 122bbf45ba5SHollis Blanchard int r; 123bbf45ba5SHollis Blanchard 124bbf45ba5SHollis Blanchard er = kvmppc_emulate_instruction(run, vcpu); 125bbf45ba5SHollis Blanchard switch (er) { 126bbf45ba5SHollis Blanchard case EMULATE_DONE: 127bbf45ba5SHollis Blanchard /* Future optimization: only reload non-volatiles if they were 128bbf45ba5SHollis Blanchard * actually modified. */ 129bbf45ba5SHollis Blanchard r = RESUME_GUEST_NV; 130bbf45ba5SHollis Blanchard break; 131bbf45ba5SHollis Blanchard case EMULATE_DO_MMIO: 132bbf45ba5SHollis Blanchard run->exit_reason = KVM_EXIT_MMIO; 133bbf45ba5SHollis Blanchard /* We must reload nonvolatiles because "update" load/store 134bbf45ba5SHollis Blanchard * instructions modify register state. */ 135bbf45ba5SHollis Blanchard /* Future optimization: only reload non-volatiles if they were 136bbf45ba5SHollis Blanchard * actually modified. */ 137bbf45ba5SHollis Blanchard r = RESUME_HOST_NV; 138bbf45ba5SHollis Blanchard break; 139bbf45ba5SHollis Blanchard case EMULATE_FAIL: 140bbf45ba5SHollis Blanchard /* XXX Deliver Program interrupt to guest. */ 141bbf45ba5SHollis Blanchard printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 142c7f38f46SAlexander Graf kvmppc_get_last_inst(vcpu)); 143bbf45ba5SHollis Blanchard r = RESUME_HOST; 144bbf45ba5SHollis Blanchard break; 145bbf45ba5SHollis Blanchard default: 146bbf45ba5SHollis Blanchard BUG(); 147bbf45ba5SHollis Blanchard } 148bbf45ba5SHollis Blanchard 149bbf45ba5SHollis Blanchard return r; 150bbf45ba5SHollis Blanchard } 151bbf45ba5SHollis Blanchard 15210474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage) 153bbf45ba5SHollis Blanchard { 15410474ae8SAlexander Graf return 0; 155bbf45ba5SHollis Blanchard } 156bbf45ba5SHollis Blanchard 157bbf45ba5SHollis Blanchard void kvm_arch_hardware_disable(void *garbage) 158bbf45ba5SHollis Blanchard { 159bbf45ba5SHollis Blanchard } 160bbf45ba5SHollis Blanchard 161bbf45ba5SHollis Blanchard int kvm_arch_hardware_setup(void) 162bbf45ba5SHollis Blanchard { 163bbf45ba5SHollis Blanchard return 0; 164bbf45ba5SHollis Blanchard } 165bbf45ba5SHollis Blanchard 166bbf45ba5SHollis Blanchard void kvm_arch_hardware_unsetup(void) 167bbf45ba5SHollis Blanchard { 168bbf45ba5SHollis Blanchard } 169bbf45ba5SHollis Blanchard 170bbf45ba5SHollis Blanchard void kvm_arch_check_processor_compat(void *rtn) 171bbf45ba5SHollis Blanchard { 1729dd921cfSHollis Blanchard *(int *)rtn = kvmppc_core_check_processor_compat(); 173bbf45ba5SHollis Blanchard } 174bbf45ba5SHollis Blanchard 175e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 176bbf45ba5SHollis Blanchard { 177e08b9637SCarsten Otte if (type) 178e08b9637SCarsten Otte return -EINVAL; 179e08b9637SCarsten Otte 180f9e0554dSPaul Mackerras return kvmppc_core_init_vm(kvm); 181bbf45ba5SHollis Blanchard } 182bbf45ba5SHollis Blanchard 183d89f5effSJan Kiszka void kvm_arch_destroy_vm(struct kvm *kvm) 184bbf45ba5SHollis Blanchard { 185bbf45ba5SHollis Blanchard unsigned int i; 186988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 187bbf45ba5SHollis Blanchard 188988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 189988a2caeSGleb Natapov kvm_arch_vcpu_free(vcpu); 190988a2caeSGleb Natapov 191988a2caeSGleb Natapov mutex_lock(&kvm->lock); 192988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 193bbf45ba5SHollis Blanchard kvm->vcpus[i] = NULL; 194988a2caeSGleb Natapov 195988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 196f9e0554dSPaul Mackerras 197f9e0554dSPaul Mackerras kvmppc_core_destroy_vm(kvm); 198f9e0554dSPaul Mackerras 199988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 200bbf45ba5SHollis Blanchard } 201bbf45ba5SHollis Blanchard 202ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm) 203ad8ba2cdSSheng Yang { 204ad8ba2cdSSheng Yang } 205ad8ba2cdSSheng Yang 206bbf45ba5SHollis Blanchard int kvm_dev_ioctl_check_extension(long ext) 207bbf45ba5SHollis Blanchard { 208bbf45ba5SHollis Blanchard int r; 209bbf45ba5SHollis Blanchard 210bbf45ba5SHollis Blanchard switch (ext) { 2115ce941eeSScott Wood #ifdef CONFIG_BOOKE 2125ce941eeSScott Wood case KVM_CAP_PPC_BOOKE_SREGS: 2135ce941eeSScott Wood #else 214e15a1137SAlexander Graf case KVM_CAP_PPC_SEGSTATE: 215930b412aSAlexander Graf case KVM_CAP_PPC_PAPR: 2165ce941eeSScott Wood #endif 21718978768SAlexander Graf case KVM_CAP_PPC_UNSET_IRQ: 2187b4203e8SAlexander Graf case KVM_CAP_PPC_IRQ_LEVEL: 21971fbfd5fSAlexander Graf case KVM_CAP_ENABLE_CAP: 220de56a948SPaul Mackerras r = 1; 221de56a948SPaul Mackerras break; 222de56a948SPaul Mackerras #ifndef CONFIG_KVM_BOOK3S_64_HV 223de56a948SPaul Mackerras case KVM_CAP_PPC_PAIRED_SINGLES: 224ad0a048bSAlexander Graf case KVM_CAP_PPC_OSI: 22515711e9cSAlexander Graf case KVM_CAP_PPC_GET_PVINFO: 226dc83b8bcSScott Wood #ifdef CONFIG_KVM_E500 227dc83b8bcSScott Wood case KVM_CAP_SW_TLB: 228dc83b8bcSScott Wood #endif 229e15a1137SAlexander Graf r = 1; 230e15a1137SAlexander Graf break; 231588968b6SLaurent Vivier case KVM_CAP_COALESCED_MMIO: 232588968b6SLaurent Vivier r = KVM_COALESCED_MMIO_PAGE_OFFSET; 233588968b6SLaurent Vivier break; 234de56a948SPaul Mackerras #endif 23554738c09SDavid Gibson #ifdef CONFIG_KVM_BOOK3S_64_HV 23654738c09SDavid Gibson case KVM_CAP_SPAPR_TCE: 23754738c09SDavid Gibson r = 1; 23854738c09SDavid Gibson break; 239371fefd6SPaul Mackerras case KVM_CAP_PPC_SMT: 240371fefd6SPaul Mackerras r = threads_per_core; 241371fefd6SPaul Mackerras break; 242aa04b4ccSPaul Mackerras case KVM_CAP_PPC_RMA: 243aa04b4ccSPaul Mackerras r = 1; 2449e368f29SPaul Mackerras /* PPC970 requires an RMA */ 2459e368f29SPaul Mackerras if (cpu_has_feature(CPU_FTR_ARCH_201)) 2469e368f29SPaul Mackerras r = 2; 247aa04b4ccSPaul Mackerras break; 24854738c09SDavid Gibson #endif 249bbf45ba5SHollis Blanchard default: 250bbf45ba5SHollis Blanchard r = 0; 251bbf45ba5SHollis Blanchard break; 252bbf45ba5SHollis Blanchard } 253bbf45ba5SHollis Blanchard return r; 254bbf45ba5SHollis Blanchard 255bbf45ba5SHollis Blanchard } 256bbf45ba5SHollis Blanchard 257bbf45ba5SHollis Blanchard long kvm_arch_dev_ioctl(struct file *filp, 258bbf45ba5SHollis Blanchard unsigned int ioctl, unsigned long arg) 259bbf45ba5SHollis Blanchard { 260bbf45ba5SHollis Blanchard return -EINVAL; 261bbf45ba5SHollis Blanchard } 262bbf45ba5SHollis Blanchard 263f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 264f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 265bbf45ba5SHollis Blanchard struct kvm_memory_slot old, 266f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 267bbf45ba5SHollis Blanchard int user_alloc) 268bbf45ba5SHollis Blanchard { 269f9e0554dSPaul Mackerras return kvmppc_core_prepare_memory_region(kvm, mem); 270bbf45ba5SHollis Blanchard } 271bbf45ba5SHollis Blanchard 272f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 273f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 274f7784b8eSMarcelo Tosatti struct kvm_memory_slot old, 275f7784b8eSMarcelo Tosatti int user_alloc) 276f7784b8eSMarcelo Tosatti { 277f9e0554dSPaul Mackerras kvmppc_core_commit_memory_region(kvm, mem); 278f7784b8eSMarcelo Tosatti } 279f7784b8eSMarcelo Tosatti 280f7784b8eSMarcelo Tosatti 28134d4cb8fSMarcelo Tosatti void kvm_arch_flush_shadow(struct kvm *kvm) 28234d4cb8fSMarcelo Tosatti { 28334d4cb8fSMarcelo Tosatti } 28434d4cb8fSMarcelo Tosatti 285bbf45ba5SHollis Blanchard struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 286bbf45ba5SHollis Blanchard { 28773e75b41SHollis Blanchard struct kvm_vcpu *vcpu; 28873e75b41SHollis Blanchard vcpu = kvmppc_core_vcpu_create(kvm, id); 28919ccb76aSPaul Mackerras vcpu->arch.wqp = &vcpu->wq; 29006056bfbSWei Yongjun if (!IS_ERR(vcpu)) 29173e75b41SHollis Blanchard kvmppc_create_vcpu_debugfs(vcpu, id); 29273e75b41SHollis Blanchard return vcpu; 293bbf45ba5SHollis Blanchard } 294bbf45ba5SHollis Blanchard 295bbf45ba5SHollis Blanchard void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 296bbf45ba5SHollis Blanchard { 297a595405dSAlexander Graf /* Make sure we're not using the vcpu anymore */ 298a595405dSAlexander Graf hrtimer_cancel(&vcpu->arch.dec_timer); 299a595405dSAlexander Graf tasklet_kill(&vcpu->arch.tasklet); 300a595405dSAlexander Graf 30173e75b41SHollis Blanchard kvmppc_remove_vcpu_debugfs(vcpu); 302db93f574SHollis Blanchard kvmppc_core_vcpu_free(vcpu); 303bbf45ba5SHollis Blanchard } 304bbf45ba5SHollis Blanchard 305bbf45ba5SHollis Blanchard void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 306bbf45ba5SHollis Blanchard { 307bbf45ba5SHollis Blanchard kvm_arch_vcpu_free(vcpu); 308bbf45ba5SHollis Blanchard } 309bbf45ba5SHollis Blanchard 310bbf45ba5SHollis Blanchard int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 311bbf45ba5SHollis Blanchard { 3129dd921cfSHollis Blanchard return kvmppc_core_pending_dec(vcpu); 313bbf45ba5SHollis Blanchard } 314bbf45ba5SHollis Blanchard 315544c6761SAlexander Graf /* 316544c6761SAlexander Graf * low level hrtimer wake routine. Because this runs in hardirq context 317544c6761SAlexander Graf * we schedule a tasklet to do the real work. 318544c6761SAlexander Graf */ 319544c6761SAlexander Graf enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) 320544c6761SAlexander Graf { 321544c6761SAlexander Graf struct kvm_vcpu *vcpu; 322544c6761SAlexander Graf 323544c6761SAlexander Graf vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); 324544c6761SAlexander Graf tasklet_schedule(&vcpu->arch.tasklet); 325544c6761SAlexander Graf 326544c6761SAlexander Graf return HRTIMER_NORESTART; 327544c6761SAlexander Graf } 328544c6761SAlexander Graf 329bbf45ba5SHollis Blanchard int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 330bbf45ba5SHollis Blanchard { 331544c6761SAlexander Graf hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 332544c6761SAlexander Graf tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); 333544c6761SAlexander Graf vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; 334de56a948SPaul Mackerras vcpu->arch.dec_expires = ~(u64)0; 335bbf45ba5SHollis Blanchard 33609000adbSBharat Bhushan #ifdef CONFIG_KVM_EXIT_TIMING 33709000adbSBharat Bhushan mutex_init(&vcpu->arch.exit_timing_lock); 33809000adbSBharat Bhushan #endif 33909000adbSBharat Bhushan 340bbf45ba5SHollis Blanchard return 0; 341bbf45ba5SHollis Blanchard } 342bbf45ba5SHollis Blanchard 343bbf45ba5SHollis Blanchard void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 344bbf45ba5SHollis Blanchard { 345ecc0981fSHollis Blanchard kvmppc_mmu_destroy(vcpu); 346bbf45ba5SHollis Blanchard } 347bbf45ba5SHollis Blanchard 348bbf45ba5SHollis Blanchard void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 349bbf45ba5SHollis Blanchard { 350eab17672SScott Wood #ifdef CONFIG_BOOKE 351eab17672SScott Wood /* 352eab17672SScott Wood * vrsave (formerly usprg0) isn't used by Linux, but may 353eab17672SScott Wood * be used by the guest. 354eab17672SScott Wood * 355eab17672SScott Wood * On non-booke this is associated with Altivec and 356eab17672SScott Wood * is handled by code in book3s.c. 357eab17672SScott Wood */ 358eab17672SScott Wood mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); 359eab17672SScott Wood #endif 3609dd921cfSHollis Blanchard kvmppc_core_vcpu_load(vcpu, cpu); 361de56a948SPaul Mackerras vcpu->cpu = smp_processor_id(); 362bbf45ba5SHollis Blanchard } 363bbf45ba5SHollis Blanchard 364bbf45ba5SHollis Blanchard void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 365bbf45ba5SHollis Blanchard { 3669dd921cfSHollis Blanchard kvmppc_core_vcpu_put(vcpu); 367eab17672SScott Wood #ifdef CONFIG_BOOKE 368eab17672SScott Wood vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); 369eab17672SScott Wood #endif 370de56a948SPaul Mackerras vcpu->cpu = -1; 371bbf45ba5SHollis Blanchard } 372bbf45ba5SHollis Blanchard 373d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 374d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 375bbf45ba5SHollis Blanchard { 376f5d0906bSHollis Blanchard return -EINVAL; 377bbf45ba5SHollis Blanchard } 378bbf45ba5SHollis Blanchard 379bbf45ba5SHollis Blanchard static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 380bbf45ba5SHollis Blanchard struct kvm_run *run) 381bbf45ba5SHollis Blanchard { 3828e5b26b5SAlexander Graf kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); 383bbf45ba5SHollis Blanchard } 384bbf45ba5SHollis Blanchard 385bbf45ba5SHollis Blanchard static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 386bbf45ba5SHollis Blanchard struct kvm_run *run) 387bbf45ba5SHollis Blanchard { 38869b61833SDenis Kirjanov u64 uninitialized_var(gpr); 389bbf45ba5SHollis Blanchard 3908e5b26b5SAlexander Graf if (run->mmio.len > sizeof(gpr)) { 391bbf45ba5SHollis Blanchard printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 392bbf45ba5SHollis Blanchard return; 393bbf45ba5SHollis Blanchard } 394bbf45ba5SHollis Blanchard 395bbf45ba5SHollis Blanchard if (vcpu->arch.mmio_is_bigendian) { 396bbf45ba5SHollis Blanchard switch (run->mmio.len) { 397b104d066SAlexander Graf case 8: gpr = *(u64 *)run->mmio.data; break; 3988e5b26b5SAlexander Graf case 4: gpr = *(u32 *)run->mmio.data; break; 3998e5b26b5SAlexander Graf case 2: gpr = *(u16 *)run->mmio.data; break; 4008e5b26b5SAlexander Graf case 1: gpr = *(u8 *)run->mmio.data; break; 401bbf45ba5SHollis Blanchard } 402bbf45ba5SHollis Blanchard } else { 403bbf45ba5SHollis Blanchard /* Convert BE data from userland back to LE. */ 404bbf45ba5SHollis Blanchard switch (run->mmio.len) { 4058e5b26b5SAlexander Graf case 4: gpr = ld_le32((u32 *)run->mmio.data); break; 4068e5b26b5SAlexander Graf case 2: gpr = ld_le16((u16 *)run->mmio.data); break; 4078e5b26b5SAlexander Graf case 1: gpr = *(u8 *)run->mmio.data; break; 408bbf45ba5SHollis Blanchard } 409bbf45ba5SHollis Blanchard } 4108e5b26b5SAlexander Graf 4113587d534SAlexander Graf if (vcpu->arch.mmio_sign_extend) { 4123587d534SAlexander Graf switch (run->mmio.len) { 4133587d534SAlexander Graf #ifdef CONFIG_PPC64 4143587d534SAlexander Graf case 4: 4153587d534SAlexander Graf gpr = (s64)(s32)gpr; 4163587d534SAlexander Graf break; 4173587d534SAlexander Graf #endif 4183587d534SAlexander Graf case 2: 4193587d534SAlexander Graf gpr = (s64)(s16)gpr; 4203587d534SAlexander Graf break; 4213587d534SAlexander Graf case 1: 4223587d534SAlexander Graf gpr = (s64)(s8)gpr; 4233587d534SAlexander Graf break; 4243587d534SAlexander Graf } 4253587d534SAlexander Graf } 4263587d534SAlexander Graf 4278e5b26b5SAlexander Graf kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 428b104d066SAlexander Graf 429b104d066SAlexander Graf switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { 430b104d066SAlexander Graf case KVM_REG_GPR: 431b104d066SAlexander Graf kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 432b104d066SAlexander Graf break; 433b104d066SAlexander Graf case KVM_REG_FPR: 434b104d066SAlexander Graf vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 435b104d066SAlexander Graf break; 436287d5611SAlexander Graf #ifdef CONFIG_PPC_BOOK3S 437b104d066SAlexander Graf case KVM_REG_QPR: 438b104d066SAlexander Graf vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 439b104d066SAlexander Graf break; 440b104d066SAlexander Graf case KVM_REG_FQPR: 441b104d066SAlexander Graf vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 442b104d066SAlexander Graf vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 443b104d066SAlexander Graf break; 444287d5611SAlexander Graf #endif 445b104d066SAlexander Graf default: 446b104d066SAlexander Graf BUG(); 447b104d066SAlexander Graf } 448bbf45ba5SHollis Blanchard } 449bbf45ba5SHollis Blanchard 450bbf45ba5SHollis Blanchard int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 451bbf45ba5SHollis Blanchard unsigned int rt, unsigned int bytes, int is_bigendian) 452bbf45ba5SHollis Blanchard { 453bbf45ba5SHollis Blanchard if (bytes > sizeof(run->mmio.data)) { 454bbf45ba5SHollis Blanchard printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 455bbf45ba5SHollis Blanchard run->mmio.len); 456bbf45ba5SHollis Blanchard } 457bbf45ba5SHollis Blanchard 458bbf45ba5SHollis Blanchard run->mmio.phys_addr = vcpu->arch.paddr_accessed; 459bbf45ba5SHollis Blanchard run->mmio.len = bytes; 460bbf45ba5SHollis Blanchard run->mmio.is_write = 0; 461bbf45ba5SHollis Blanchard 462bbf45ba5SHollis Blanchard vcpu->arch.io_gpr = rt; 463bbf45ba5SHollis Blanchard vcpu->arch.mmio_is_bigendian = is_bigendian; 464bbf45ba5SHollis Blanchard vcpu->mmio_needed = 1; 465bbf45ba5SHollis Blanchard vcpu->mmio_is_write = 0; 4663587d534SAlexander Graf vcpu->arch.mmio_sign_extend = 0; 467bbf45ba5SHollis Blanchard 468bbf45ba5SHollis Blanchard return EMULATE_DO_MMIO; 469bbf45ba5SHollis Blanchard } 470bbf45ba5SHollis Blanchard 4713587d534SAlexander Graf /* Same as above, but sign extends */ 4723587d534SAlexander Graf int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, 4733587d534SAlexander Graf unsigned int rt, unsigned int bytes, int is_bigendian) 4743587d534SAlexander Graf { 4753587d534SAlexander Graf int r; 4763587d534SAlexander Graf 4773587d534SAlexander Graf r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); 4783587d534SAlexander Graf vcpu->arch.mmio_sign_extend = 1; 4793587d534SAlexander Graf 4803587d534SAlexander Graf return r; 4813587d534SAlexander Graf } 4823587d534SAlexander Graf 483bbf45ba5SHollis Blanchard int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 484b104d066SAlexander Graf u64 val, unsigned int bytes, int is_bigendian) 485bbf45ba5SHollis Blanchard { 486bbf45ba5SHollis Blanchard void *data = run->mmio.data; 487bbf45ba5SHollis Blanchard 488bbf45ba5SHollis Blanchard if (bytes > sizeof(run->mmio.data)) { 489bbf45ba5SHollis Blanchard printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 490bbf45ba5SHollis Blanchard run->mmio.len); 491bbf45ba5SHollis Blanchard } 492bbf45ba5SHollis Blanchard 493bbf45ba5SHollis Blanchard run->mmio.phys_addr = vcpu->arch.paddr_accessed; 494bbf45ba5SHollis Blanchard run->mmio.len = bytes; 495bbf45ba5SHollis Blanchard run->mmio.is_write = 1; 496bbf45ba5SHollis Blanchard vcpu->mmio_needed = 1; 497bbf45ba5SHollis Blanchard vcpu->mmio_is_write = 1; 498bbf45ba5SHollis Blanchard 499bbf45ba5SHollis Blanchard /* Store the value at the lowest bytes in 'data'. */ 500bbf45ba5SHollis Blanchard if (is_bigendian) { 501bbf45ba5SHollis Blanchard switch (bytes) { 502b104d066SAlexander Graf case 8: *(u64 *)data = val; break; 503bbf45ba5SHollis Blanchard case 4: *(u32 *)data = val; break; 504bbf45ba5SHollis Blanchard case 2: *(u16 *)data = val; break; 505bbf45ba5SHollis Blanchard case 1: *(u8 *)data = val; break; 506bbf45ba5SHollis Blanchard } 507bbf45ba5SHollis Blanchard } else { 508bbf45ba5SHollis Blanchard /* Store LE value into 'data'. */ 509bbf45ba5SHollis Blanchard switch (bytes) { 510bbf45ba5SHollis Blanchard case 4: st_le32(data, val); break; 511bbf45ba5SHollis Blanchard case 2: st_le16(data, val); break; 512bbf45ba5SHollis Blanchard case 1: *(u8 *)data = val; break; 513bbf45ba5SHollis Blanchard } 514bbf45ba5SHollis Blanchard } 515bbf45ba5SHollis Blanchard 516bbf45ba5SHollis Blanchard return EMULATE_DO_MMIO; 517bbf45ba5SHollis Blanchard } 518bbf45ba5SHollis Blanchard 519bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 520bbf45ba5SHollis Blanchard { 521bbf45ba5SHollis Blanchard int r; 522bbf45ba5SHollis Blanchard sigset_t sigsaved; 523bbf45ba5SHollis Blanchard 524bbf45ba5SHollis Blanchard if (vcpu->sigset_active) 525bbf45ba5SHollis Blanchard sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 526bbf45ba5SHollis Blanchard 527bbf45ba5SHollis Blanchard if (vcpu->mmio_needed) { 528bbf45ba5SHollis Blanchard if (!vcpu->mmio_is_write) 529bbf45ba5SHollis Blanchard kvmppc_complete_mmio_load(vcpu, run); 530bbf45ba5SHollis Blanchard vcpu->mmio_needed = 0; 531bbf45ba5SHollis Blanchard } else if (vcpu->arch.dcr_needed) { 532bbf45ba5SHollis Blanchard if (!vcpu->arch.dcr_is_write) 533bbf45ba5SHollis Blanchard kvmppc_complete_dcr_load(vcpu, run); 534bbf45ba5SHollis Blanchard vcpu->arch.dcr_needed = 0; 535ad0a048bSAlexander Graf } else if (vcpu->arch.osi_needed) { 536ad0a048bSAlexander Graf u64 *gprs = run->osi.gprs; 537ad0a048bSAlexander Graf int i; 538ad0a048bSAlexander Graf 539ad0a048bSAlexander Graf for (i = 0; i < 32; i++) 540ad0a048bSAlexander Graf kvmppc_set_gpr(vcpu, i, gprs[i]); 541ad0a048bSAlexander Graf vcpu->arch.osi_needed = 0; 542de56a948SPaul Mackerras } else if (vcpu->arch.hcall_needed) { 543de56a948SPaul Mackerras int i; 544de56a948SPaul Mackerras 545de56a948SPaul Mackerras kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); 546de56a948SPaul Mackerras for (i = 0; i < 9; ++i) 547de56a948SPaul Mackerras kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); 548de56a948SPaul Mackerras vcpu->arch.hcall_needed = 0; 549bbf45ba5SHollis Blanchard } 550bbf45ba5SHollis Blanchard 551df6909e5SPaul Mackerras r = kvmppc_vcpu_run(run, vcpu); 552bbf45ba5SHollis Blanchard 553bbf45ba5SHollis Blanchard if (vcpu->sigset_active) 554bbf45ba5SHollis Blanchard sigprocmask(SIG_SETMASK, &sigsaved, NULL); 555bbf45ba5SHollis Blanchard 556bbf45ba5SHollis Blanchard return r; 557bbf45ba5SHollis Blanchard } 558bbf45ba5SHollis Blanchard 559dfd4d47eSScott Wood void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 560dfd4d47eSScott Wood { 561*ae21216bSAlexander Graf int me; 562*ae21216bSAlexander Graf int cpu = vcpu->cpu; 563*ae21216bSAlexander Graf 564*ae21216bSAlexander Graf me = get_cpu(); 565dfd4d47eSScott Wood if (waitqueue_active(&vcpu->wq)) { 566dfd4d47eSScott Wood wake_up_interruptible(vcpu->arch.wqp); 567dfd4d47eSScott Wood vcpu->stat.halt_wakeup++; 568*ae21216bSAlexander Graf } else if (cpu != me && cpu != -1) { 569dfd4d47eSScott Wood smp_send_reschedule(vcpu->cpu); 570dfd4d47eSScott Wood } 571*ae21216bSAlexander Graf put_cpu(); 572dfd4d47eSScott Wood } 573dfd4d47eSScott Wood 574bbf45ba5SHollis Blanchard int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 575bbf45ba5SHollis Blanchard { 57619ccb76aSPaul Mackerras if (irq->irq == KVM_INTERRUPT_UNSET) { 57718978768SAlexander Graf kvmppc_core_dequeue_external(vcpu, irq); 57819ccb76aSPaul Mackerras return 0; 57919ccb76aSPaul Mackerras } 58019ccb76aSPaul Mackerras 5819dd921cfSHollis Blanchard kvmppc_core_queue_external(vcpu, irq); 582dfd4d47eSScott Wood kvm_vcpu_kick(vcpu); 58345c5eb67SHollis Blanchard 584bbf45ba5SHollis Blanchard return 0; 585bbf45ba5SHollis Blanchard } 586bbf45ba5SHollis Blanchard 58771fbfd5fSAlexander Graf static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 58871fbfd5fSAlexander Graf struct kvm_enable_cap *cap) 58971fbfd5fSAlexander Graf { 59071fbfd5fSAlexander Graf int r; 59171fbfd5fSAlexander Graf 59271fbfd5fSAlexander Graf if (cap->flags) 59371fbfd5fSAlexander Graf return -EINVAL; 59471fbfd5fSAlexander Graf 59571fbfd5fSAlexander Graf switch (cap->cap) { 596ad0a048bSAlexander Graf case KVM_CAP_PPC_OSI: 597ad0a048bSAlexander Graf r = 0; 598ad0a048bSAlexander Graf vcpu->arch.osi_enabled = true; 599ad0a048bSAlexander Graf break; 600930b412aSAlexander Graf case KVM_CAP_PPC_PAPR: 601930b412aSAlexander Graf r = 0; 602930b412aSAlexander Graf vcpu->arch.papr_enabled = true; 603930b412aSAlexander Graf break; 604dc83b8bcSScott Wood #ifdef CONFIG_KVM_E500 605dc83b8bcSScott Wood case KVM_CAP_SW_TLB: { 606dc83b8bcSScott Wood struct kvm_config_tlb cfg; 607dc83b8bcSScott Wood void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 608dc83b8bcSScott Wood 609dc83b8bcSScott Wood r = -EFAULT; 610dc83b8bcSScott Wood if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) 611dc83b8bcSScott Wood break; 612dc83b8bcSScott Wood 613dc83b8bcSScott Wood r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); 614dc83b8bcSScott Wood break; 615dc83b8bcSScott Wood } 616dc83b8bcSScott Wood #endif 61771fbfd5fSAlexander Graf default: 61871fbfd5fSAlexander Graf r = -EINVAL; 61971fbfd5fSAlexander Graf break; 62071fbfd5fSAlexander Graf } 62171fbfd5fSAlexander Graf 622af8f38b3SAlexander Graf if (!r) 623af8f38b3SAlexander Graf r = kvmppc_sanity_check(vcpu); 624af8f38b3SAlexander Graf 62571fbfd5fSAlexander Graf return r; 62671fbfd5fSAlexander Graf } 62771fbfd5fSAlexander Graf 628bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 629bbf45ba5SHollis Blanchard struct kvm_mp_state *mp_state) 630bbf45ba5SHollis Blanchard { 631bbf45ba5SHollis Blanchard return -EINVAL; 632bbf45ba5SHollis Blanchard } 633bbf45ba5SHollis Blanchard 634bbf45ba5SHollis Blanchard int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 635bbf45ba5SHollis Blanchard struct kvm_mp_state *mp_state) 636bbf45ba5SHollis Blanchard { 637bbf45ba5SHollis Blanchard return -EINVAL; 638bbf45ba5SHollis Blanchard } 639bbf45ba5SHollis Blanchard 640bbf45ba5SHollis Blanchard long kvm_arch_vcpu_ioctl(struct file *filp, 641bbf45ba5SHollis Blanchard unsigned int ioctl, unsigned long arg) 642bbf45ba5SHollis Blanchard { 643bbf45ba5SHollis Blanchard struct kvm_vcpu *vcpu = filp->private_data; 644bbf45ba5SHollis Blanchard void __user *argp = (void __user *)arg; 645bbf45ba5SHollis Blanchard long r; 646bbf45ba5SHollis Blanchard 64793736624SAvi Kivity switch (ioctl) { 64893736624SAvi Kivity case KVM_INTERRUPT: { 649bbf45ba5SHollis Blanchard struct kvm_interrupt irq; 650bbf45ba5SHollis Blanchard r = -EFAULT; 651bbf45ba5SHollis Blanchard if (copy_from_user(&irq, argp, sizeof(irq))) 65293736624SAvi Kivity goto out; 653bbf45ba5SHollis Blanchard r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 65493736624SAvi Kivity goto out; 655bbf45ba5SHollis Blanchard } 65619483d14SAvi Kivity 65771fbfd5fSAlexander Graf case KVM_ENABLE_CAP: 65871fbfd5fSAlexander Graf { 65971fbfd5fSAlexander Graf struct kvm_enable_cap cap; 66071fbfd5fSAlexander Graf r = -EFAULT; 66171fbfd5fSAlexander Graf if (copy_from_user(&cap, argp, sizeof(cap))) 66271fbfd5fSAlexander Graf goto out; 66371fbfd5fSAlexander Graf r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 66471fbfd5fSAlexander Graf break; 66571fbfd5fSAlexander Graf } 666dc83b8bcSScott Wood 667dc83b8bcSScott Wood #ifdef CONFIG_KVM_E500 668dc83b8bcSScott Wood case KVM_DIRTY_TLB: { 669dc83b8bcSScott Wood struct kvm_dirty_tlb dirty; 670dc83b8bcSScott Wood r = -EFAULT; 671dc83b8bcSScott Wood if (copy_from_user(&dirty, argp, sizeof(dirty))) 672dc83b8bcSScott Wood goto out; 673dc83b8bcSScott Wood r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); 674dc83b8bcSScott Wood break; 675dc83b8bcSScott Wood } 676dc83b8bcSScott Wood #endif 677dc83b8bcSScott Wood 678bbf45ba5SHollis Blanchard default: 679bbf45ba5SHollis Blanchard r = -EINVAL; 680bbf45ba5SHollis Blanchard } 681bbf45ba5SHollis Blanchard 682bbf45ba5SHollis Blanchard out: 683bbf45ba5SHollis Blanchard return r; 684bbf45ba5SHollis Blanchard } 685bbf45ba5SHollis Blanchard 6865b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 6875b1c1493SCarsten Otte { 6885b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 6895b1c1493SCarsten Otte } 6905b1c1493SCarsten Otte 69115711e9cSAlexander Graf static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 69215711e9cSAlexander Graf { 69315711e9cSAlexander Graf u32 inst_lis = 0x3c000000; 69415711e9cSAlexander Graf u32 inst_ori = 0x60000000; 69515711e9cSAlexander Graf u32 inst_nop = 0x60000000; 69615711e9cSAlexander Graf u32 inst_sc = 0x44000002; 69715711e9cSAlexander Graf u32 inst_imm_mask = 0xffff; 69815711e9cSAlexander Graf 69915711e9cSAlexander Graf /* 70015711e9cSAlexander Graf * The hypercall to get into KVM from within guest context is as 70115711e9cSAlexander Graf * follows: 70215711e9cSAlexander Graf * 70315711e9cSAlexander Graf * lis r0, r0, KVM_SC_MAGIC_R0@h 70415711e9cSAlexander Graf * ori r0, KVM_SC_MAGIC_R0@l 70515711e9cSAlexander Graf * sc 70615711e9cSAlexander Graf * nop 70715711e9cSAlexander Graf */ 70815711e9cSAlexander Graf pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); 70915711e9cSAlexander Graf pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); 71015711e9cSAlexander Graf pvinfo->hcall[2] = inst_sc; 71115711e9cSAlexander Graf pvinfo->hcall[3] = inst_nop; 71215711e9cSAlexander Graf 71315711e9cSAlexander Graf return 0; 71415711e9cSAlexander Graf } 71515711e9cSAlexander Graf 716bbf45ba5SHollis Blanchard long kvm_arch_vm_ioctl(struct file *filp, 717bbf45ba5SHollis Blanchard unsigned int ioctl, unsigned long arg) 718bbf45ba5SHollis Blanchard { 71915711e9cSAlexander Graf void __user *argp = (void __user *)arg; 720bbf45ba5SHollis Blanchard long r; 721bbf45ba5SHollis Blanchard 722bbf45ba5SHollis Blanchard switch (ioctl) { 72315711e9cSAlexander Graf case KVM_PPC_GET_PVINFO: { 72415711e9cSAlexander Graf struct kvm_ppc_pvinfo pvinfo; 725d8cdddcdSVasiliy Kulikov memset(&pvinfo, 0, sizeof(pvinfo)); 72615711e9cSAlexander Graf r = kvm_vm_ioctl_get_pvinfo(&pvinfo); 72715711e9cSAlexander Graf if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { 72815711e9cSAlexander Graf r = -EFAULT; 72915711e9cSAlexander Graf goto out; 73015711e9cSAlexander Graf } 73115711e9cSAlexander Graf 73215711e9cSAlexander Graf break; 73315711e9cSAlexander Graf } 73454738c09SDavid Gibson #ifdef CONFIG_KVM_BOOK3S_64_HV 73554738c09SDavid Gibson case KVM_CREATE_SPAPR_TCE: { 73654738c09SDavid Gibson struct kvm_create_spapr_tce create_tce; 73754738c09SDavid Gibson struct kvm *kvm = filp->private_data; 73854738c09SDavid Gibson 73954738c09SDavid Gibson r = -EFAULT; 74054738c09SDavid Gibson if (copy_from_user(&create_tce, argp, sizeof(create_tce))) 74154738c09SDavid Gibson goto out; 74254738c09SDavid Gibson r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 74354738c09SDavid Gibson goto out; 74454738c09SDavid Gibson } 745aa04b4ccSPaul Mackerras 746aa04b4ccSPaul Mackerras case KVM_ALLOCATE_RMA: { 747aa04b4ccSPaul Mackerras struct kvm *kvm = filp->private_data; 748aa04b4ccSPaul Mackerras struct kvm_allocate_rma rma; 749aa04b4ccSPaul Mackerras 750aa04b4ccSPaul Mackerras r = kvm_vm_ioctl_allocate_rma(kvm, &rma); 751aa04b4ccSPaul Mackerras if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) 752aa04b4ccSPaul Mackerras r = -EFAULT; 753aa04b4ccSPaul Mackerras break; 754aa04b4ccSPaul Mackerras } 75554738c09SDavid Gibson #endif /* CONFIG_KVM_BOOK3S_64_HV */ 75654738c09SDavid Gibson 757bbf45ba5SHollis Blanchard default: 758367e1319SAvi Kivity r = -ENOTTY; 759bbf45ba5SHollis Blanchard } 760bbf45ba5SHollis Blanchard 76115711e9cSAlexander Graf out: 762bbf45ba5SHollis Blanchard return r; 763bbf45ba5SHollis Blanchard } 764bbf45ba5SHollis Blanchard 765bbf45ba5SHollis Blanchard int kvm_arch_init(void *opaque) 766bbf45ba5SHollis Blanchard { 767bbf45ba5SHollis Blanchard return 0; 768bbf45ba5SHollis Blanchard } 769bbf45ba5SHollis Blanchard 770bbf45ba5SHollis Blanchard void kvm_arch_exit(void) 771bbf45ba5SHollis Blanchard { 772bbf45ba5SHollis Blanchard } 773