1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25b0c632dbSHeiko Carstens #include <linux/slab.h> 26ba5c1e9bSCarsten Otte #include <linux/timer.h> 27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 28b0c632dbSHeiko Carstens #include <asm/lowcore.h> 29b0c632dbSHeiko Carstens #include <asm/pgtable.h> 30f5daba1dSHeiko Carstens #include <asm/nmi.h> 31a0616cdeSDavid Howells #include <asm/switch_to.h> 3278c4b59fSMichael Mueller #include <asm/facility.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 54ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 55aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 56aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 57ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 587697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 59ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 60ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 61ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 65ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6669d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 67453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 68453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 69453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 70453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 71453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 728a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 73453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 74453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 75b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 76453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 77453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 78bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 795288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 80bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 817697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 825288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 835288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 855288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 865288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 87388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 88e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 8941628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 90b0c632dbSHeiko Carstens { NULL } 91b0c632dbSHeiko Carstens }; 92b0c632dbSHeiko Carstens 9378c4b59fSMichael Mueller unsigned long *vfacilities; 942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 95b0c632dbSHeiko Carstens 9678c4b59fSMichael Mueller /* test availability of vfacility */ 97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 9878c4b59fSMichael Mueller { 9978c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 10078c4b59fSMichael Mueller } 10178c4b59fSMichael Mueller 102b0c632dbSHeiko Carstens /* Section: not file related */ 103*13a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 104b0c632dbSHeiko Carstens { 105b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 10610474ae8SAlexander Graf return 0; 107b0c632dbSHeiko Carstens } 108b0c632dbSHeiko Carstens 1092c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1102c70fe44SChristian Borntraeger 111b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 112b0c632dbSHeiko Carstens { 1132c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1142c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 115b0c632dbSHeiko Carstens return 0; 116b0c632dbSHeiko Carstens } 117b0c632dbSHeiko Carstens 118b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 119b0c632dbSHeiko Carstens { 1202c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 121b0c632dbSHeiko Carstens } 122b0c632dbSHeiko Carstens 123b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 124b0c632dbSHeiko Carstens { 125b0c632dbSHeiko Carstens return 0; 126b0c632dbSHeiko Carstens } 127b0c632dbSHeiko Carstens 128b0c632dbSHeiko Carstens /* Section: device related */ 129b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 130b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 131b0c632dbSHeiko Carstens { 132b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 133b0c632dbSHeiko Carstens return s390_enable_sie(); 134b0c632dbSHeiko Carstens return -EINVAL; 135b0c632dbSHeiko Carstens } 136b0c632dbSHeiko Carstens 137784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 138b0c632dbSHeiko Carstens { 139d7b0b5ebSCarsten Otte int r; 140d7b0b5ebSCarsten Otte 1412bd0ac4eSCarsten Otte switch (ext) { 142d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 143b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 14452e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1451efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1461efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1471efd0f59SCarsten Otte #endif 1483c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 14960b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 15014eebd91SCarsten Otte case KVM_CAP_ONE_REG: 151d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 152fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 153ebc32262SCornelia Huck case KVM_CAP_IRQFD: 15410ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 155c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 156d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 15778599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 158f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1596352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 160d7b0b5ebSCarsten Otte r = 1; 161d7b0b5ebSCarsten Otte break; 162e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 163e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 164e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 165e726b1bdSChristian Borntraeger break; 166e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 167e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 168e1e2e605SNick Wang break; 1691526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 170abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1711526bf9cSChristian Borntraeger break; 1722bd0ac4eSCarsten Otte default: 173d7b0b5ebSCarsten Otte r = 0; 174b0c632dbSHeiko Carstens } 175d7b0b5ebSCarsten Otte return r; 1762bd0ac4eSCarsten Otte } 177b0c632dbSHeiko Carstens 17815f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 17915f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 18015f36ebdSJason J. Herne { 18115f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 18215f36ebdSJason J. Herne unsigned long address; 18315f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 18415f36ebdSJason J. Herne 18515f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 18615f36ebdSJason J. Herne /* Loop over all guest pages */ 18715f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 18815f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 18915f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 19015f36ebdSJason J. Herne 19115f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 19215f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 19315f36ebdSJason J. Herne } 19415f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 19515f36ebdSJason J. Herne } 19615f36ebdSJason J. Herne 197b0c632dbSHeiko Carstens /* Section: vm related */ 198b0c632dbSHeiko Carstens /* 199b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 200b0c632dbSHeiko Carstens */ 201b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 202b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 203b0c632dbSHeiko Carstens { 20415f36ebdSJason J. Herne int r; 20515f36ebdSJason J. Herne unsigned long n; 20615f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 20715f36ebdSJason J. Herne int is_dirty = 0; 20815f36ebdSJason J. Herne 20915f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 21015f36ebdSJason J. Herne 21115f36ebdSJason J. Herne r = -EINVAL; 21215f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 21315f36ebdSJason J. Herne goto out; 21415f36ebdSJason J. Herne 21515f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 21615f36ebdSJason J. Herne r = -ENOENT; 21715f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 21815f36ebdSJason J. Herne goto out; 21915f36ebdSJason J. Herne 22015f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 22115f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 22215f36ebdSJason J. Herne if (r) 22315f36ebdSJason J. Herne goto out; 22415f36ebdSJason J. Herne 22515f36ebdSJason J. Herne /* Clear the dirty log */ 22615f36ebdSJason J. Herne if (is_dirty) { 22715f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 22815f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 22915f36ebdSJason J. Herne } 23015f36ebdSJason J. Herne r = 0; 23115f36ebdSJason J. Herne out: 23215f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 23315f36ebdSJason J. Herne return r; 234b0c632dbSHeiko Carstens } 235b0c632dbSHeiko Carstens 236d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 237d938dc55SCornelia Huck { 238d938dc55SCornelia Huck int r; 239d938dc55SCornelia Huck 240d938dc55SCornelia Huck if (cap->flags) 241d938dc55SCornelia Huck return -EINVAL; 242d938dc55SCornelia Huck 243d938dc55SCornelia Huck switch (cap->cap) { 24484223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 24584223598SCornelia Huck kvm->arch.use_irqchip = 1; 24684223598SCornelia Huck r = 0; 24784223598SCornelia Huck break; 248d938dc55SCornelia Huck default: 249d938dc55SCornelia Huck r = -EINVAL; 250d938dc55SCornelia Huck break; 251d938dc55SCornelia Huck } 252d938dc55SCornelia Huck return r; 253d938dc55SCornelia Huck } 254d938dc55SCornelia Huck 2554f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2564f718eabSDominik Dingel { 2574f718eabSDominik Dingel int ret; 2584f718eabSDominik Dingel unsigned int idx; 2594f718eabSDominik Dingel switch (attr->attr) { 2604f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2614f718eabSDominik Dingel ret = -EBUSY; 2624f718eabSDominik Dingel mutex_lock(&kvm->lock); 2634f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2644f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2654f718eabSDominik Dingel ret = 0; 2664f718eabSDominik Dingel } 2674f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2684f718eabSDominik Dingel break; 2694f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 2704f718eabSDominik Dingel mutex_lock(&kvm->lock); 2714f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 2724f718eabSDominik Dingel page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); 2734f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 2744f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2754f718eabSDominik Dingel ret = 0; 2764f718eabSDominik Dingel break; 2774f718eabSDominik Dingel default: 2784f718eabSDominik Dingel ret = -ENXIO; 2794f718eabSDominik Dingel break; 2804f718eabSDominik Dingel } 2814f718eabSDominik Dingel return ret; 2824f718eabSDominik Dingel } 2834f718eabSDominik Dingel 284f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 285f2061656SDominik Dingel { 286f2061656SDominik Dingel int ret; 287f2061656SDominik Dingel 288f2061656SDominik Dingel switch (attr->group) { 2894f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 2904f718eabSDominik Dingel ret = kvm_s390_mem_control(kvm, attr); 2914f718eabSDominik Dingel break; 292f2061656SDominik Dingel default: 293f2061656SDominik Dingel ret = -ENXIO; 294f2061656SDominik Dingel break; 295f2061656SDominik Dingel } 296f2061656SDominik Dingel 297f2061656SDominik Dingel return ret; 298f2061656SDominik Dingel } 299f2061656SDominik Dingel 300f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 301f2061656SDominik Dingel { 302f2061656SDominik Dingel return -ENXIO; 303f2061656SDominik Dingel } 304f2061656SDominik Dingel 305f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 306f2061656SDominik Dingel { 307f2061656SDominik Dingel int ret; 308f2061656SDominik Dingel 309f2061656SDominik Dingel switch (attr->group) { 3104f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3114f718eabSDominik Dingel switch (attr->attr) { 3124f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3134f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3144f718eabSDominik Dingel ret = 0; 3154f718eabSDominik Dingel break; 3164f718eabSDominik Dingel default: 3174f718eabSDominik Dingel ret = -ENXIO; 3184f718eabSDominik Dingel break; 3194f718eabSDominik Dingel } 3204f718eabSDominik Dingel break; 321f2061656SDominik Dingel default: 322f2061656SDominik Dingel ret = -ENXIO; 323f2061656SDominik Dingel break; 324f2061656SDominik Dingel } 325f2061656SDominik Dingel 326f2061656SDominik Dingel return ret; 327f2061656SDominik Dingel } 328f2061656SDominik Dingel 329b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 330b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 331b0c632dbSHeiko Carstens { 332b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 333b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 334f2061656SDominik Dingel struct kvm_device_attr attr; 335b0c632dbSHeiko Carstens int r; 336b0c632dbSHeiko Carstens 337b0c632dbSHeiko Carstens switch (ioctl) { 338ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 339ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 340ba5c1e9bSCarsten Otte 341ba5c1e9bSCarsten Otte r = -EFAULT; 342ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 343ba5c1e9bSCarsten Otte break; 344ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 345ba5c1e9bSCarsten Otte break; 346ba5c1e9bSCarsten Otte } 347d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 348d938dc55SCornelia Huck struct kvm_enable_cap cap; 349d938dc55SCornelia Huck r = -EFAULT; 350d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 351d938dc55SCornelia Huck break; 352d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 353d938dc55SCornelia Huck break; 354d938dc55SCornelia Huck } 35584223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 35684223598SCornelia Huck struct kvm_irq_routing_entry routing; 35784223598SCornelia Huck 35884223598SCornelia Huck r = -EINVAL; 35984223598SCornelia Huck if (kvm->arch.use_irqchip) { 36084223598SCornelia Huck /* Set up dummy routing. */ 36184223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 36284223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 36384223598SCornelia Huck r = 0; 36484223598SCornelia Huck } 36584223598SCornelia Huck break; 36684223598SCornelia Huck } 367f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 368f2061656SDominik Dingel r = -EFAULT; 369f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 370f2061656SDominik Dingel break; 371f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 372f2061656SDominik Dingel break; 373f2061656SDominik Dingel } 374f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 375f2061656SDominik Dingel r = -EFAULT; 376f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 377f2061656SDominik Dingel break; 378f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 379f2061656SDominik Dingel break; 380f2061656SDominik Dingel } 381f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 382f2061656SDominik Dingel r = -EFAULT; 383f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 384f2061656SDominik Dingel break; 385f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 386f2061656SDominik Dingel break; 387f2061656SDominik Dingel } 388b0c632dbSHeiko Carstens default: 389367e1319SAvi Kivity r = -ENOTTY; 390b0c632dbSHeiko Carstens } 391b0c632dbSHeiko Carstens 392b0c632dbSHeiko Carstens return r; 393b0c632dbSHeiko Carstens } 394b0c632dbSHeiko Carstens 395e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 396b0c632dbSHeiko Carstens { 397b0c632dbSHeiko Carstens int rc; 398b0c632dbSHeiko Carstens char debug_name[16]; 399f6c137ffSChristian Borntraeger static unsigned long sca_offset; 400b0c632dbSHeiko Carstens 401e08b9637SCarsten Otte rc = -EINVAL; 402e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 403e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 404e08b9637SCarsten Otte goto out_err; 405e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 406e08b9637SCarsten Otte goto out_err; 407e08b9637SCarsten Otte #else 408e08b9637SCarsten Otte if (type) 409e08b9637SCarsten Otte goto out_err; 410e08b9637SCarsten Otte #endif 411e08b9637SCarsten Otte 412b0c632dbSHeiko Carstens rc = s390_enable_sie(); 413b0c632dbSHeiko Carstens if (rc) 414d89f5effSJan Kiszka goto out_err; 415b0c632dbSHeiko Carstens 416b290411aSCarsten Otte rc = -ENOMEM; 417b290411aSCarsten Otte 418b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 419b0c632dbSHeiko Carstens if (!kvm->arch.sca) 420d89f5effSJan Kiszka goto out_err; 421f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 422f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 423f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 424f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 425b0c632dbSHeiko Carstens 426b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 427b0c632dbSHeiko Carstens 428b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 429b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 430b0c632dbSHeiko Carstens goto out_nodbf; 431b0c632dbSHeiko Carstens 432ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 433ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 4348a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 435ba5c1e9bSCarsten Otte 436b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 437b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 438b0c632dbSHeiko Carstens 439e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 440e08b9637SCarsten Otte kvm->arch.gmap = NULL; 441e08b9637SCarsten Otte } else { 442c6c956b8SMartin Schwidefsky kvm->arch.gmap = gmap_alloc(current->mm, -1UL); 443598841caSCarsten Otte if (!kvm->arch.gmap) 444598841caSCarsten Otte goto out_nogmap; 4452c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 44624eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 447e08b9637SCarsten Otte } 448fa6b7fe9SCornelia Huck 449fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 45084223598SCornelia Huck kvm->arch.use_irqchip = 0; 451fa6b7fe9SCornelia Huck 4528ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 4538ad35755SDavid Hildenbrand 454d89f5effSJan Kiszka return 0; 455598841caSCarsten Otte out_nogmap: 456598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 457b0c632dbSHeiko Carstens out_nodbf: 458b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 459d89f5effSJan Kiszka out_err: 460d89f5effSJan Kiszka return rc; 461b0c632dbSHeiko Carstens } 462b0c632dbSHeiko Carstens 463d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 464d329c035SChristian Borntraeger { 465d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 466ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 46767335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 4683c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 46958f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 47058f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 47158f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 472abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 473abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 474abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 47558f9460bSCarsten Otte } 476abf4a71eSCarsten Otte smp_mb(); 47727e0393fSCarsten Otte 47827e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 47927e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 48027e0393fSCarsten Otte 481b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 482b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 483d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 484b31288faSKonstantin Weitz 4856692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 486b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 487d329c035SChristian Borntraeger } 488d329c035SChristian Borntraeger 489d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 490d329c035SChristian Borntraeger { 491d329c035SChristian Borntraeger unsigned int i; 492988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 493d329c035SChristian Borntraeger 494988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 495988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 496988a2caeSGleb Natapov 497988a2caeSGleb Natapov mutex_lock(&kvm->lock); 498988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 499d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 500988a2caeSGleb Natapov 501988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 502988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 503d329c035SChristian Borntraeger } 504d329c035SChristian Borntraeger 505b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 506b0c632dbSHeiko Carstens { 507d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 508b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 509d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 51027e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 511598841caSCarsten Otte gmap_free(kvm->arch.gmap); 512841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 51367335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 514b0c632dbSHeiko Carstens } 515b0c632dbSHeiko Carstens 516b0c632dbSHeiko Carstens /* Section: vcpu related */ 517b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 518b0c632dbSHeiko Carstens { 5193c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5203c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 52127e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) { 522c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 52327e0393fSCarsten Otte if (!vcpu->arch.gmap) 52427e0393fSCarsten Otte return -ENOMEM; 5252c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 52627e0393fSCarsten Otte return 0; 52727e0393fSCarsten Otte } 52827e0393fSCarsten Otte 529598841caSCarsten Otte vcpu->arch.gmap = vcpu->kvm->arch.gmap; 53059674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 53159674c1aSChristian Borntraeger KVM_SYNC_GPRS | 5329eed0735SChristian Borntraeger KVM_SYNC_ACRS | 533b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 534b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 535b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 536b0c632dbSHeiko Carstens return 0; 537b0c632dbSHeiko Carstens } 538b0c632dbSHeiko Carstens 539b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 540b0c632dbSHeiko Carstens { 5414725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5424725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 543b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 5444725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5454725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 54659674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 547480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 5489e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 549b0c632dbSHeiko Carstens } 550b0c632dbSHeiko Carstens 551b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 552b0c632dbSHeiko Carstens { 5539e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 554480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 5554725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5564725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 55759674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 5584725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5594725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 560b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 561b0c632dbSHeiko Carstens } 562b0c632dbSHeiko Carstens 563b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 564b0c632dbSHeiko Carstens { 565b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 566b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 567b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 5688d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 569b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 570b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 571b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 572b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 573b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 574b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 575b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 576b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 577b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 578672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 5793c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5803c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 5816352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 5826852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 5832ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 584b0c632dbSHeiko Carstens } 585b0c632dbSHeiko Carstens 58642897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 58742897d86SMarcelo Tosatti { 58842897d86SMarcelo Tosatti return 0; 58942897d86SMarcelo Tosatti } 59042897d86SMarcelo Tosatti 591b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 592b31605c1SDominik Dingel { 593b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 594b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 595b31605c1SDominik Dingel } 596b31605c1SDominik Dingel 597b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 598b31605c1SDominik Dingel { 599b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 600b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 601b31605c1SDominik Dingel return -ENOMEM; 602b31605c1SDominik Dingel 603b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 604b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 605b31605c1SDominik Dingel return 0; 606b31605c1SDominik Dingel } 607b31605c1SDominik Dingel 608b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 609b0c632dbSHeiko Carstens { 610b31605c1SDominik Dingel int rc = 0; 611b31288faSKonstantin Weitz 6129e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 6139e6dabefSCornelia Huck CPUSTAT_SM | 61469d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 61569d0d3a3SChristian Borntraeger CPUSTAT_GED); 616fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 6177feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 6187feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 6197feb6bb8SMichael Mueller 62069d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 6214953919fSDavid Hildenbrand vcpu->arch.sie_block->eca = 0xD1002000U; 622217a4406SHeiko Carstens if (sclp_has_siif()) 623217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 62478c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 6255a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 6265a5e6536SMatthew Rosato ICTL_TPROT; 6275a5e6536SMatthew Rosato 628b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 629b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 630b31605c1SDominik Dingel if (rc) 631b31605c1SDominik Dingel return rc; 632b31288faSKonstantin Weitz } 633ca872302SChristian Borntraeger hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 634ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 635453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 63692e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 637b31605c1SDominik Dingel return rc; 638b0c632dbSHeiko Carstens } 639b0c632dbSHeiko Carstens 640b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 641b0c632dbSHeiko Carstens unsigned int id) 642b0c632dbSHeiko Carstens { 6434d47555aSCarsten Otte struct kvm_vcpu *vcpu; 6447feb6bb8SMichael Mueller struct sie_page *sie_page; 6454d47555aSCarsten Otte int rc = -EINVAL; 646b0c632dbSHeiko Carstens 6474d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 6484d47555aSCarsten Otte goto out; 6494d47555aSCarsten Otte 6504d47555aSCarsten Otte rc = -ENOMEM; 6514d47555aSCarsten Otte 652b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 653b0c632dbSHeiko Carstens if (!vcpu) 6544d47555aSCarsten Otte goto out; 655b0c632dbSHeiko Carstens 6567feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 6577feb6bb8SMichael Mueller if (!sie_page) 658b0c632dbSHeiko Carstens goto out_free_cpu; 659b0c632dbSHeiko Carstens 6607feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 6617feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 6627feb6bb8SMichael Mueller 663b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 66458f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 66558f9460bSCarsten Otte if (!kvm->arch.sca) { 66658f9460bSCarsten Otte WARN_ON_ONCE(1); 66758f9460bSCarsten Otte goto out_free_cpu; 66858f9460bSCarsten Otte } 669abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 67058f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 67158f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 67258f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 67358f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 674b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 675fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 67658f9460bSCarsten Otte } 677b0c632dbSHeiko Carstens 678ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 679ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&vcpu->arch.local_int.list); 680ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 681d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 6825288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 683ba5c1e9bSCarsten Otte 684b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 685b0c632dbSHeiko Carstens if (rc) 6867b06bf2fSWei Yongjun goto out_free_sie_block; 687b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 688b0c632dbSHeiko Carstens vcpu->arch.sie_block); 689ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 690b0c632dbSHeiko Carstens 691b0c632dbSHeiko Carstens return vcpu; 6927b06bf2fSWei Yongjun out_free_sie_block: 6937b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 694b0c632dbSHeiko Carstens out_free_cpu: 695b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 6964d47555aSCarsten Otte out: 697b0c632dbSHeiko Carstens return ERR_PTR(rc); 698b0c632dbSHeiko Carstens } 699b0c632dbSHeiko Carstens 700b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 701b0c632dbSHeiko Carstens { 702f87618e8SMichael Mueller return kvm_cpu_has_interrupt(vcpu); 703b0c632dbSHeiko Carstens } 704b0c632dbSHeiko Carstens 70549b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 70649b99e1eSChristian Borntraeger { 70749b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 70849b99e1eSChristian Borntraeger } 70949b99e1eSChristian Borntraeger 71049b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 71149b99e1eSChristian Borntraeger { 71249b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 71349b99e1eSChristian Borntraeger } 71449b99e1eSChristian Borntraeger 71549b99e1eSChristian Borntraeger /* 71649b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 71749b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 71849b99e1eSChristian Borntraeger * return immediately. */ 71949b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 72049b99e1eSChristian Borntraeger { 72149b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 72249b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 72349b99e1eSChristian Borntraeger cpu_relax(); 72449b99e1eSChristian Borntraeger } 72549b99e1eSChristian Borntraeger 72649b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 72749b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 72849b99e1eSChristian Borntraeger { 72949b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 73049b99e1eSChristian Borntraeger exit_sie(vcpu); 73149b99e1eSChristian Borntraeger } 73249b99e1eSChristian Borntraeger 7332c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 7342c70fe44SChristian Borntraeger { 7352c70fe44SChristian Borntraeger int i; 7362c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 7372c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 7382c70fe44SChristian Borntraeger 7392c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 7402c70fe44SChristian Borntraeger /* match against both prefix pages */ 741fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 7422c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 7432c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 7442c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 7452c70fe44SChristian Borntraeger } 7462c70fe44SChristian Borntraeger } 7472c70fe44SChristian Borntraeger } 7482c70fe44SChristian Borntraeger 749b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 750b6d33834SChristoffer Dall { 751b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 752b6d33834SChristoffer Dall BUG(); 753b6d33834SChristoffer Dall return 0; 754b6d33834SChristoffer Dall } 755b6d33834SChristoffer Dall 75614eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 75714eebd91SCarsten Otte struct kvm_one_reg *reg) 75814eebd91SCarsten Otte { 75914eebd91SCarsten Otte int r = -EINVAL; 76014eebd91SCarsten Otte 76114eebd91SCarsten Otte switch (reg->id) { 76229b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 76329b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 76429b7c71bSCarsten Otte (u32 __user *)reg->addr); 76529b7c71bSCarsten Otte break; 76629b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 76729b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 76829b7c71bSCarsten Otte (u64 __user *)reg->addr); 76929b7c71bSCarsten Otte break; 77046a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 77146a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 77246a6dd1cSJason J. herne (u64 __user *)reg->addr); 77346a6dd1cSJason J. herne break; 77446a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 77546a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 77646a6dd1cSJason J. herne (u64 __user *)reg->addr); 77746a6dd1cSJason J. herne break; 778536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 779536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 780536336c2SDominik Dingel (u64 __user *)reg->addr); 781536336c2SDominik Dingel break; 782536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 783536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 784536336c2SDominik Dingel (u64 __user *)reg->addr); 785536336c2SDominik Dingel break; 786536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 787536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 788536336c2SDominik Dingel (u64 __user *)reg->addr); 789536336c2SDominik Dingel break; 790672550fbSChristian Borntraeger case KVM_REG_S390_PP: 791672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 792672550fbSChristian Borntraeger (u64 __user *)reg->addr); 793672550fbSChristian Borntraeger break; 794afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 795afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 796afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 797afa45ff5SChristian Borntraeger break; 79814eebd91SCarsten Otte default: 79914eebd91SCarsten Otte break; 80014eebd91SCarsten Otte } 80114eebd91SCarsten Otte 80214eebd91SCarsten Otte return r; 80314eebd91SCarsten Otte } 80414eebd91SCarsten Otte 80514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 80614eebd91SCarsten Otte struct kvm_one_reg *reg) 80714eebd91SCarsten Otte { 80814eebd91SCarsten Otte int r = -EINVAL; 80914eebd91SCarsten Otte 81014eebd91SCarsten Otte switch (reg->id) { 81129b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 81229b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 81329b7c71bSCarsten Otte (u32 __user *)reg->addr); 81429b7c71bSCarsten Otte break; 81529b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 81629b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 81729b7c71bSCarsten Otte (u64 __user *)reg->addr); 81829b7c71bSCarsten Otte break; 81946a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 82046a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 82146a6dd1cSJason J. herne (u64 __user *)reg->addr); 82246a6dd1cSJason J. herne break; 82346a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 82446a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 82546a6dd1cSJason J. herne (u64 __user *)reg->addr); 82646a6dd1cSJason J. herne break; 827536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 828536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 829536336c2SDominik Dingel (u64 __user *)reg->addr); 830536336c2SDominik Dingel break; 831536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 832536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 833536336c2SDominik Dingel (u64 __user *)reg->addr); 834536336c2SDominik Dingel break; 835536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 836536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 837536336c2SDominik Dingel (u64 __user *)reg->addr); 838536336c2SDominik Dingel break; 839672550fbSChristian Borntraeger case KVM_REG_S390_PP: 840672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 841672550fbSChristian Borntraeger (u64 __user *)reg->addr); 842672550fbSChristian Borntraeger break; 843afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 844afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 845afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 846afa45ff5SChristian Borntraeger break; 84714eebd91SCarsten Otte default: 84814eebd91SCarsten Otte break; 84914eebd91SCarsten Otte } 85014eebd91SCarsten Otte 85114eebd91SCarsten Otte return r; 85214eebd91SCarsten Otte } 853b6d33834SChristoffer Dall 854b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 855b0c632dbSHeiko Carstens { 856b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 857b0c632dbSHeiko Carstens return 0; 858b0c632dbSHeiko Carstens } 859b0c632dbSHeiko Carstens 860b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 861b0c632dbSHeiko Carstens { 8625a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 863b0c632dbSHeiko Carstens return 0; 864b0c632dbSHeiko Carstens } 865b0c632dbSHeiko Carstens 866b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 867b0c632dbSHeiko Carstens { 8685a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 869b0c632dbSHeiko Carstens return 0; 870b0c632dbSHeiko Carstens } 871b0c632dbSHeiko Carstens 872b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 873b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 874b0c632dbSHeiko Carstens { 87559674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 876b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 87759674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 878b0c632dbSHeiko Carstens return 0; 879b0c632dbSHeiko Carstens } 880b0c632dbSHeiko Carstens 881b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 882b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 883b0c632dbSHeiko Carstens { 88459674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 885b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 886b0c632dbSHeiko Carstens return 0; 887b0c632dbSHeiko Carstens } 888b0c632dbSHeiko Carstens 889b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 890b0c632dbSHeiko Carstens { 8914725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 8924725c860SMartin Schwidefsky return -EINVAL; 893b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 8944725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 8954725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 8964725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 897b0c632dbSHeiko Carstens return 0; 898b0c632dbSHeiko Carstens } 899b0c632dbSHeiko Carstens 900b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 901b0c632dbSHeiko Carstens { 902b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 903b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 904b0c632dbSHeiko Carstens return 0; 905b0c632dbSHeiko Carstens } 906b0c632dbSHeiko Carstens 907b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 908b0c632dbSHeiko Carstens { 909b0c632dbSHeiko Carstens int rc = 0; 910b0c632dbSHeiko Carstens 9117a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 912b0c632dbSHeiko Carstens rc = -EBUSY; 913d7b0b5ebSCarsten Otte else { 914d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 915d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 916d7b0b5ebSCarsten Otte } 917b0c632dbSHeiko Carstens return rc; 918b0c632dbSHeiko Carstens } 919b0c632dbSHeiko Carstens 920b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 921b0c632dbSHeiko Carstens struct kvm_translation *tr) 922b0c632dbSHeiko Carstens { 923b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 924b0c632dbSHeiko Carstens } 925b0c632dbSHeiko Carstens 92627291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 92727291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 92827291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 92927291e21SDavid Hildenbrand 930d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 931d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 932b0c632dbSHeiko Carstens { 93327291e21SDavid Hildenbrand int rc = 0; 93427291e21SDavid Hildenbrand 93527291e21SDavid Hildenbrand vcpu->guest_debug = 0; 93627291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 93727291e21SDavid Hildenbrand 9382de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 93927291e21SDavid Hildenbrand return -EINVAL; 94027291e21SDavid Hildenbrand 94127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 94227291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 94327291e21SDavid Hildenbrand /* enforce guest PER */ 94427291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 94527291e21SDavid Hildenbrand 94627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 94727291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 94827291e21SDavid Hildenbrand } else { 94927291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 95027291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 95127291e21SDavid Hildenbrand } 95227291e21SDavid Hildenbrand 95327291e21SDavid Hildenbrand if (rc) { 95427291e21SDavid Hildenbrand vcpu->guest_debug = 0; 95527291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 95627291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 95727291e21SDavid Hildenbrand } 95827291e21SDavid Hildenbrand 95927291e21SDavid Hildenbrand return rc; 960b0c632dbSHeiko Carstens } 961b0c632dbSHeiko Carstens 96262d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 96362d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 96462d9f0dbSMarcelo Tosatti { 9656352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 9666352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 9676352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 96862d9f0dbSMarcelo Tosatti } 96962d9f0dbSMarcelo Tosatti 97062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 97162d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 97262d9f0dbSMarcelo Tosatti { 9736352e4d2SDavid Hildenbrand int rc = 0; 9746352e4d2SDavid Hildenbrand 9756352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 9766352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 9776352e4d2SDavid Hildenbrand 9786352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 9796352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 9806352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 9816352e4d2SDavid Hildenbrand break; 9826352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 9836352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 9846352e4d2SDavid Hildenbrand break; 9856352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 9866352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 9876352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 9886352e4d2SDavid Hildenbrand default: 9896352e4d2SDavid Hildenbrand rc = -ENXIO; 9906352e4d2SDavid Hildenbrand } 9916352e4d2SDavid Hildenbrand 9926352e4d2SDavid Hildenbrand return rc; 99362d9f0dbSMarcelo Tosatti } 99462d9f0dbSMarcelo Tosatti 995b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 996b31605c1SDominik Dingel { 997b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 998b31605c1SDominik Dingel return false; 999b31605c1SDominik Dingel /* only enable for z10 and later */ 1000b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1001b31605c1SDominik Dingel return false; 1002b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1003b31605c1SDominik Dingel return false; 1004b31605c1SDominik Dingel return true; 1005b31605c1SDominik Dingel } 1006b31605c1SDominik Dingel 10078ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 10088ad35755SDavid Hildenbrand { 10098ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 10108ad35755SDavid Hildenbrand } 10118ad35755SDavid Hildenbrand 10122c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 10132c70fe44SChristian Borntraeger { 10148ad35755SDavid Hildenbrand retry: 10158ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 10162c70fe44SChristian Borntraeger /* 10172c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 10182c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 10192c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 10202c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 10212c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 10222c70fe44SChristian Borntraeger */ 10238ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 10242c70fe44SChristian Borntraeger int rc; 10252c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1026fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 10272c70fe44SChristian Borntraeger PAGE_SIZE * 2); 10282c70fe44SChristian Borntraeger if (rc) 10292c70fe44SChristian Borntraeger return rc; 10308ad35755SDavid Hildenbrand goto retry; 10312c70fe44SChristian Borntraeger } 10328ad35755SDavid Hildenbrand 1033d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1034d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1035d3d692c8SDavid Hildenbrand goto retry; 1036d3d692c8SDavid Hildenbrand } 1037d3d692c8SDavid Hildenbrand 10388ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 10398ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 10408ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 10418ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 10428ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10438ad35755SDavid Hildenbrand } 10448ad35755SDavid Hildenbrand goto retry; 10458ad35755SDavid Hildenbrand } 10468ad35755SDavid Hildenbrand 10478ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 10488ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 10498ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 10508ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 10518ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10528ad35755SDavid Hildenbrand } 10538ad35755SDavid Hildenbrand goto retry; 10548ad35755SDavid Hildenbrand } 10558ad35755SDavid Hildenbrand 10560759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 10570759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 10580759d068SDavid Hildenbrand 10592c70fe44SChristian Borntraeger return 0; 10602c70fe44SChristian Borntraeger } 10612c70fe44SChristian Borntraeger 1062fa576c58SThomas Huth /** 1063fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1064fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1065fa576c58SThomas Huth * @gpa: Guest physical address 1066fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1067fa576c58SThomas Huth * 1068fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1069fa576c58SThomas Huth * 1070fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1071fa576c58SThomas Huth */ 1072fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 107324eb3a82SDominik Dingel { 1074527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1075527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 107624eb3a82SDominik Dingel } 107724eb3a82SDominik Dingel 10783c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 10793c038e6bSDominik Dingel unsigned long token) 10803c038e6bSDominik Dingel { 10813c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 10823c038e6bSDominik Dingel inti.parm64 = token; 10833c038e6bSDominik Dingel 10843c038e6bSDominik Dingel if (start_token) { 10853c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_INIT; 10863c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 10873c038e6bSDominik Dingel } else { 10883c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 10893c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 10903c038e6bSDominik Dingel } 10913c038e6bSDominik Dingel } 10923c038e6bSDominik Dingel 10933c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 10943c038e6bSDominik Dingel struct kvm_async_pf *work) 10953c038e6bSDominik Dingel { 10963c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 10973c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 10983c038e6bSDominik Dingel } 10993c038e6bSDominik Dingel 11003c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 11013c038e6bSDominik Dingel struct kvm_async_pf *work) 11023c038e6bSDominik Dingel { 11033c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 11043c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 11053c038e6bSDominik Dingel } 11063c038e6bSDominik Dingel 11073c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 11083c038e6bSDominik Dingel struct kvm_async_pf *work) 11093c038e6bSDominik Dingel { 11103c038e6bSDominik Dingel /* s390 will always inject the page directly */ 11113c038e6bSDominik Dingel } 11123c038e6bSDominik Dingel 11133c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 11143c038e6bSDominik Dingel { 11153c038e6bSDominik Dingel /* 11163c038e6bSDominik Dingel * s390 will always inject the page directly, 11173c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 11183c038e6bSDominik Dingel */ 11193c038e6bSDominik Dingel return true; 11203c038e6bSDominik Dingel } 11213c038e6bSDominik Dingel 11223c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 11233c038e6bSDominik Dingel { 11243c038e6bSDominik Dingel hva_t hva; 11253c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 11263c038e6bSDominik Dingel int rc; 11273c038e6bSDominik Dingel 11283c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 11293c038e6bSDominik Dingel return 0; 11303c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 11313c038e6bSDominik Dingel vcpu->arch.pfault_compare) 11323c038e6bSDominik Dingel return 0; 11333c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 11343c038e6bSDominik Dingel return 0; 11353c038e6bSDominik Dingel if (kvm_cpu_has_interrupt(vcpu)) 11363c038e6bSDominik Dingel return 0; 11373c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 11383c038e6bSDominik Dingel return 0; 11393c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 11403c038e6bSDominik Dingel return 0; 11413c038e6bSDominik Dingel 114281480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 114381480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 114481480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 11453c038e6bSDominik Dingel return 0; 11463c038e6bSDominik Dingel 11473c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 11483c038e6bSDominik Dingel return rc; 11493c038e6bSDominik Dingel } 11503c038e6bSDominik Dingel 11513fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1152b0c632dbSHeiko Carstens { 11533fb4c40fSThomas Huth int rc, cpuflags; 1154e168bf8dSCarsten Otte 11553c038e6bSDominik Dingel /* 11563c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 11573c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 11583c038e6bSDominik Dingel * handled outside the worker. 11593c038e6bSDominik Dingel */ 11603c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 11613c038e6bSDominik Dingel 11625a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1163b0c632dbSHeiko Carstens 1164b0c632dbSHeiko Carstens if (need_resched()) 1165b0c632dbSHeiko Carstens schedule(); 1166b0c632dbSHeiko Carstens 1167d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 116871cde587SChristian Borntraeger s390_handle_mcck(); 116971cde587SChristian Borntraeger 117079395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 117179395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 117279395031SJens Freimann if (rc) 117379395031SJens Freimann return rc; 117479395031SJens Freimann } 11750ff31867SCarsten Otte 11762c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 11772c70fe44SChristian Borntraeger if (rc) 11782c70fe44SChristian Borntraeger return rc; 11792c70fe44SChristian Borntraeger 118027291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 118127291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 118227291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 118327291e21SDavid Hildenbrand } 118427291e21SDavid Hildenbrand 1185b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 11863fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 11873fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 11883fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 11892b29a9fdSDominik Dingel 11903fb4c40fSThomas Huth return 0; 11913fb4c40fSThomas Huth } 11923fb4c40fSThomas Huth 11933fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 11943fb4c40fSThomas Huth { 119524eb3a82SDominik Dingel int rc = -1; 11962b29a9fdSDominik Dingel 11972b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 11982b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 11992b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 12002b29a9fdSDominik Dingel 120127291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 120227291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 120327291e21SDavid Hildenbrand 12043fb4c40fSThomas Huth if (exit_reason >= 0) { 12057c470539SMartin Schwidefsky rc = 0; 1206210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1207210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1208210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1209210b1607SThomas Huth current->thread.gmap_addr; 1210210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1211210b1607SThomas Huth rc = -EREMOTE; 121224eb3a82SDominik Dingel 121324eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 12143c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 121524eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1216fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 121724eb3a82SDominik Dingel rc = 0; 1218fa576c58SThomas Huth } else { 1219fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1220fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1221fa576c58SThomas Huth } 122224eb3a82SDominik Dingel } 122324eb3a82SDominik Dingel 122424eb3a82SDominik Dingel if (rc == -1) { 1225699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1226699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1227699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 12281f0d0f09SCarsten Otte } 1229b0c632dbSHeiko Carstens 12305a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 12313fb4c40fSThomas Huth 1232a76ccff6SThomas Huth if (rc == 0) { 1233a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 12342955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 12352955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1236a76ccff6SThomas Huth else 1237a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1238a76ccff6SThomas Huth } 1239a76ccff6SThomas Huth 12403fb4c40fSThomas Huth return rc; 12413fb4c40fSThomas Huth } 12423fb4c40fSThomas Huth 12433fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 12443fb4c40fSThomas Huth { 12453fb4c40fSThomas Huth int rc, exit_reason; 12463fb4c40fSThomas Huth 1247800c1065SThomas Huth /* 1248800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1249800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1250800c1065SThomas Huth */ 1251800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1252800c1065SThomas Huth 1253a76ccff6SThomas Huth do { 12543fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 12553fb4c40fSThomas Huth if (rc) 1256a76ccff6SThomas Huth break; 12573fb4c40fSThomas Huth 1258800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 12593fb4c40fSThomas Huth /* 1260a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1261a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 12623fb4c40fSThomas Huth */ 12633fb4c40fSThomas Huth preempt_disable(); 12643fb4c40fSThomas Huth kvm_guest_enter(); 12653fb4c40fSThomas Huth preempt_enable(); 1266a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1267a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 12683fb4c40fSThomas Huth kvm_guest_exit(); 1269800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 12703fb4c40fSThomas Huth 12713fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 127227291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 12733fb4c40fSThomas Huth 1274800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1275e168bf8dSCarsten Otte return rc; 1276b0c632dbSHeiko Carstens } 1277b0c632dbSHeiko Carstens 1278b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1279b028ee3eSDavid Hildenbrand { 1280b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1281b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1282b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1283b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1284b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1285b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1286d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1287d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1288b028ee3eSDavid Hildenbrand } 1289b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1290b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1291b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1292b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1293b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1294b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1295b028ee3eSDavid Hildenbrand } 1296b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1297b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1298b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1299b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 1300b028ee3eSDavid Hildenbrand } 1301b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1302b028ee3eSDavid Hildenbrand } 1303b028ee3eSDavid Hildenbrand 1304b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1305b028ee3eSDavid Hildenbrand { 1306b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1307b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1308b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1309b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1310b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1311b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1312b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1313b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1314b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1315b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1316b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1317b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1318b028ee3eSDavid Hildenbrand } 1319b028ee3eSDavid Hildenbrand 1320b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1321b0c632dbSHeiko Carstens { 13228f2abe6aSChristian Borntraeger int rc; 1323b0c632dbSHeiko Carstens sigset_t sigsaved; 1324b0c632dbSHeiko Carstens 132527291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 132627291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 132727291e21SDavid Hildenbrand return 0; 132827291e21SDavid Hildenbrand } 132927291e21SDavid Hildenbrand 1330b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1331b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1332b0c632dbSHeiko Carstens 13336352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 13346852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 13356352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 13366352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 13376352e4d2SDavid Hildenbrand vcpu->vcpu_id); 13386352e4d2SDavid Hildenbrand return -EINVAL; 13396352e4d2SDavid Hildenbrand } 1340b0c632dbSHeiko Carstens 1341b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1342d7b0b5ebSCarsten Otte 1343dab4079dSHeiko Carstens might_fault(); 1344e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 13459ace903dSChristian Ehrhardt 1346b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1347b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 13488f2abe6aSChristian Borntraeger rc = -EINTR; 1349b1d16c49SChristian Ehrhardt } 13508f2abe6aSChristian Borntraeger 135127291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 135227291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 135327291e21SDavid Hildenbrand rc = 0; 135427291e21SDavid Hildenbrand } 135527291e21SDavid Hildenbrand 1356b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 13578f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 13588f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 13598f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 13608f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 13618f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 13628f2abe6aSChristian Borntraeger rc = 0; 13638f2abe6aSChristian Borntraeger } 13648f2abe6aSChristian Borntraeger 13658f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 13668f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 13678f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 13688f2abe6aSChristian Borntraeger rc = 0; 13698f2abe6aSChristian Borntraeger } 13708f2abe6aSChristian Borntraeger 1371b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1372d7b0b5ebSCarsten Otte 1373b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1374b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1375b0c632dbSHeiko Carstens 1376b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 13777e8e6ab4SHeiko Carstens return rc; 1378b0c632dbSHeiko Carstens } 1379b0c632dbSHeiko Carstens 1380b0c632dbSHeiko Carstens /* 1381b0c632dbSHeiko Carstens * store status at address 1382b0c632dbSHeiko Carstens * we use have two special cases: 1383b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1384b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1385b0c632dbSHeiko Carstens */ 1386d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1387b0c632dbSHeiko Carstens { 1388092670cdSCarsten Otte unsigned char archmode = 1; 1389fda902cbSMichael Mueller unsigned int px; 1390178bd789SThomas Huth u64 clkcomp; 1391d0bce605SHeiko Carstens int rc; 1392b0c632dbSHeiko Carstens 1393d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1394d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1395b0c632dbSHeiko Carstens return -EFAULT; 1396d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1397d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1398d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1399b0c632dbSHeiko Carstens return -EFAULT; 1400d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1401d0bce605SHeiko Carstens } 1402d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1403d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1404d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1405d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1406d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1407d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1408fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1409d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1410fda902cbSMichael Mueller &px, 4); 1411d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1412d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1413d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1414d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1415d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1416d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1417d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1418178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1419d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1420d0bce605SHeiko Carstens &clkcomp, 8); 1421d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1422d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1423d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1424d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1425d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1426b0c632dbSHeiko Carstens } 1427b0c632dbSHeiko Carstens 1428e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1429e879892cSThomas Huth { 1430e879892cSThomas Huth /* 1431e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1432e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1433e879892cSThomas Huth * it into the save area 1434e879892cSThomas Huth */ 1435e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1436e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1437e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1438e879892cSThomas Huth 1439e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1440e879892cSThomas Huth } 1441e879892cSThomas Huth 14428ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14438ad35755SDavid Hildenbrand { 14448ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 14458ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 14468ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14478ad35755SDavid Hildenbrand } 14488ad35755SDavid Hildenbrand 14498ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 14508ad35755SDavid Hildenbrand { 14518ad35755SDavid Hildenbrand unsigned int i; 14528ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 14538ad35755SDavid Hildenbrand 14548ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 14558ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 14568ad35755SDavid Hildenbrand } 14578ad35755SDavid Hildenbrand } 14588ad35755SDavid Hildenbrand 14598ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14608ad35755SDavid Hildenbrand { 14618ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 14628ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 14638ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14648ad35755SDavid Hildenbrand } 14658ad35755SDavid Hildenbrand 14666852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 14676852d7b6SDavid Hildenbrand { 14688ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 14698ad35755SDavid Hildenbrand 14708ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 14718ad35755SDavid Hildenbrand return; 14728ad35755SDavid Hildenbrand 14736852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 14748ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1475433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 14768ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 14778ad35755SDavid Hildenbrand 14788ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 14798ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 14808ad35755SDavid Hildenbrand started_vcpus++; 14818ad35755SDavid Hildenbrand } 14828ad35755SDavid Hildenbrand 14838ad35755SDavid Hildenbrand if (started_vcpus == 0) { 14848ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 14858ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 14868ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 14878ad35755SDavid Hildenbrand /* 14888ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 14898ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 14908ad35755SDavid Hildenbrand * oustanding ENABLE requests. 14918ad35755SDavid Hildenbrand */ 14928ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 14938ad35755SDavid Hildenbrand } 14948ad35755SDavid Hildenbrand 14956852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 14968ad35755SDavid Hildenbrand /* 14978ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 14988ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 14998ad35755SDavid Hildenbrand */ 1500d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1501433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 15028ad35755SDavid Hildenbrand return; 15036852d7b6SDavid Hildenbrand } 15046852d7b6SDavid Hildenbrand 15056852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 15066852d7b6SDavid Hildenbrand { 15078ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 15088ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 15098ad35755SDavid Hildenbrand 15108ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 15118ad35755SDavid Hildenbrand return; 15128ad35755SDavid Hildenbrand 15136852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 15148ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1515433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 15168ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 15178ad35755SDavid Hildenbrand 151832f5ff63SDavid Hildenbrand /* Need to lock access to action_bits to avoid a SIGP race condition */ 15194ae3c081SDavid Hildenbrand spin_lock(&vcpu->arch.local_int.lock); 15206852d7b6SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 152132f5ff63SDavid Hildenbrand 152232f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 152332f5ff63SDavid Hildenbrand vcpu->arch.local_int.action_bits &= 152432f5ff63SDavid Hildenbrand ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); 15254ae3c081SDavid Hildenbrand spin_unlock(&vcpu->arch.local_int.lock); 152632f5ff63SDavid Hildenbrand 15278ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 15288ad35755SDavid Hildenbrand 15298ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 15308ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 15318ad35755SDavid Hildenbrand started_vcpus++; 15328ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 15338ad35755SDavid Hildenbrand } 15348ad35755SDavid Hildenbrand } 15358ad35755SDavid Hildenbrand 15368ad35755SDavid Hildenbrand if (started_vcpus == 1) { 15378ad35755SDavid Hildenbrand /* 15388ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 15398ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 15408ad35755SDavid Hildenbrand */ 15418ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 15428ad35755SDavid Hildenbrand } 15438ad35755SDavid Hildenbrand 1544433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 15458ad35755SDavid Hildenbrand return; 15466852d7b6SDavid Hildenbrand } 15476852d7b6SDavid Hildenbrand 1548d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1549d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1550d6712df9SCornelia Huck { 1551d6712df9SCornelia Huck int r; 1552d6712df9SCornelia Huck 1553d6712df9SCornelia Huck if (cap->flags) 1554d6712df9SCornelia Huck return -EINVAL; 1555d6712df9SCornelia Huck 1556d6712df9SCornelia Huck switch (cap->cap) { 1557fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1558fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1559fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1560fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1561fa6b7fe9SCornelia Huck } 1562fa6b7fe9SCornelia Huck r = 0; 1563fa6b7fe9SCornelia Huck break; 1564d6712df9SCornelia Huck default: 1565d6712df9SCornelia Huck r = -EINVAL; 1566d6712df9SCornelia Huck break; 1567d6712df9SCornelia Huck } 1568d6712df9SCornelia Huck return r; 1569d6712df9SCornelia Huck } 1570d6712df9SCornelia Huck 1571b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1572b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1573b0c632dbSHeiko Carstens { 1574b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1575b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1576800c1065SThomas Huth int idx; 1577bc923cc9SAvi Kivity long r; 1578b0c632dbSHeiko Carstens 157993736624SAvi Kivity switch (ioctl) { 158093736624SAvi Kivity case KVM_S390_INTERRUPT: { 1581ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1582ba5c1e9bSCarsten Otte 158393736624SAvi Kivity r = -EFAULT; 1584ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 158593736624SAvi Kivity break; 158693736624SAvi Kivity r = kvm_s390_inject_vcpu(vcpu, &s390int); 158793736624SAvi Kivity break; 1588ba5c1e9bSCarsten Otte } 1589b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1590800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1591bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1592800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1593bc923cc9SAvi Kivity break; 1594b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1595b0c632dbSHeiko Carstens psw_t psw; 1596b0c632dbSHeiko Carstens 1597bc923cc9SAvi Kivity r = -EFAULT; 1598b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1599bc923cc9SAvi Kivity break; 1600bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1601bc923cc9SAvi Kivity break; 1602b0c632dbSHeiko Carstens } 1603b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1604bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1605bc923cc9SAvi Kivity break; 160614eebd91SCarsten Otte case KVM_SET_ONE_REG: 160714eebd91SCarsten Otte case KVM_GET_ONE_REG: { 160814eebd91SCarsten Otte struct kvm_one_reg reg; 160914eebd91SCarsten Otte r = -EFAULT; 161014eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 161114eebd91SCarsten Otte break; 161214eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 161314eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 161414eebd91SCarsten Otte else 161514eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 161614eebd91SCarsten Otte break; 161714eebd91SCarsten Otte } 161827e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 161927e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 162027e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 162127e0393fSCarsten Otte 162227e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 162327e0393fSCarsten Otte r = -EFAULT; 162427e0393fSCarsten Otte break; 162527e0393fSCarsten Otte } 162627e0393fSCarsten Otte 162727e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 162827e0393fSCarsten Otte r = -EINVAL; 162927e0393fSCarsten Otte break; 163027e0393fSCarsten Otte } 163127e0393fSCarsten Otte 163227e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 163327e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 163427e0393fSCarsten Otte break; 163527e0393fSCarsten Otte } 163627e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 163727e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 163827e0393fSCarsten Otte 163927e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 164027e0393fSCarsten Otte r = -EFAULT; 164127e0393fSCarsten Otte break; 164227e0393fSCarsten Otte } 164327e0393fSCarsten Otte 164427e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 164527e0393fSCarsten Otte r = -EINVAL; 164627e0393fSCarsten Otte break; 164727e0393fSCarsten Otte } 164827e0393fSCarsten Otte 164927e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 165027e0393fSCarsten Otte ucasmap.length); 165127e0393fSCarsten Otte break; 165227e0393fSCarsten Otte } 165327e0393fSCarsten Otte #endif 1654ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1655527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 1656ccc7910fSCarsten Otte break; 1657ccc7910fSCarsten Otte } 1658d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1659d6712df9SCornelia Huck { 1660d6712df9SCornelia Huck struct kvm_enable_cap cap; 1661d6712df9SCornelia Huck r = -EFAULT; 1662d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1663d6712df9SCornelia Huck break; 1664d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1665d6712df9SCornelia Huck break; 1666d6712df9SCornelia Huck } 1667b0c632dbSHeiko Carstens default: 16683e6afcf1SCarsten Otte r = -ENOTTY; 1669b0c632dbSHeiko Carstens } 1670bc923cc9SAvi Kivity return r; 1671b0c632dbSHeiko Carstens } 1672b0c632dbSHeiko Carstens 16735b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 16745b1c1493SCarsten Otte { 16755b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 16765b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 16775b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 16785b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 16795b1c1493SCarsten Otte get_page(vmf->page); 16805b1c1493SCarsten Otte return 0; 16815b1c1493SCarsten Otte } 16825b1c1493SCarsten Otte #endif 16835b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 16845b1c1493SCarsten Otte } 16855b1c1493SCarsten Otte 16865587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 16875587027cSAneesh Kumar K.V unsigned long npages) 1688db3fe4ebSTakuya Yoshikawa { 1689db3fe4ebSTakuya Yoshikawa return 0; 1690db3fe4ebSTakuya Yoshikawa } 1691db3fe4ebSTakuya Yoshikawa 1692b0c632dbSHeiko Carstens /* Section: memory related */ 1693f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1694f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 16957b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 16967b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1697b0c632dbSHeiko Carstens { 1698dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1699dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1700dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1701dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1702b0c632dbSHeiko Carstens 1703598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1704b0c632dbSHeiko Carstens return -EINVAL; 1705b0c632dbSHeiko Carstens 1706598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1707b0c632dbSHeiko Carstens return -EINVAL; 1708b0c632dbSHeiko Carstens 1709f7784b8eSMarcelo Tosatti return 0; 1710f7784b8eSMarcelo Tosatti } 1711f7784b8eSMarcelo Tosatti 1712f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1713f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 17148482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 17158482644aSTakuya Yoshikawa enum kvm_mr_change change) 1716f7784b8eSMarcelo Tosatti { 1717f7850c92SCarsten Otte int rc; 1718f7784b8eSMarcelo Tosatti 17192cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 17202cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 17212cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 17222cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 17232cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 17242cef4debSChristian Borntraeger */ 17252cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 17262cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 17272cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 17282cef4debSChristian Borntraeger return; 1729598841caSCarsten Otte 1730598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1731598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1732598841caSCarsten Otte if (rc) 1733f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1734598841caSCarsten Otte return; 1735b0c632dbSHeiko Carstens } 1736b0c632dbSHeiko Carstens 1737b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1738b0c632dbSHeiko Carstens { 1739ef50f7acSChristian Borntraeger int ret; 17400ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1741ef50f7acSChristian Borntraeger if (ret) 1742ef50f7acSChristian Borntraeger return ret; 1743ef50f7acSChristian Borntraeger 1744ef50f7acSChristian Borntraeger /* 1745ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 174625985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1747ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1748ef50f7acSChristian Borntraeger */ 174978c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 175078c4b59fSMichael Mueller if (!vfacilities) { 1751ef50f7acSChristian Borntraeger kvm_exit(); 1752ef50f7acSChristian Borntraeger return -ENOMEM; 1753ef50f7acSChristian Borntraeger } 175478c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1755d208c79dSThomas Huth vfacilities[0] &= 0xff82fff3f4fc2000UL; 17567feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1757ef50f7acSChristian Borntraeger return 0; 1758b0c632dbSHeiko Carstens } 1759b0c632dbSHeiko Carstens 1760b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1761b0c632dbSHeiko Carstens { 176278c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1763b0c632dbSHeiko Carstens kvm_exit(); 1764b0c632dbSHeiko Carstens } 1765b0c632dbSHeiko Carstens 1766b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1767b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1768566af940SCornelia Huck 1769566af940SCornelia Huck /* 1770566af940SCornelia Huck * Enable autoloading of the kvm module. 1771566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1772566af940SCornelia Huck * since x86 takes a different approach. 1773566af940SCornelia Huck */ 1774566af940SCornelia Huck #include <linux/miscdevice.h> 1775566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1776566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1777