1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25b0c632dbSHeiko Carstens #include <linux/slab.h> 26ba5c1e9bSCarsten Otte #include <linux/timer.h> 27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 28b0c632dbSHeiko Carstens #include <asm/lowcore.h> 29b0c632dbSHeiko Carstens #include <asm/pgtable.h> 30f5daba1dSHeiko Carstens #include <asm/nmi.h> 31a0616cdeSDavid Howells #include <asm/switch_to.h> 3278c4b59fSMichael Mueller #include <asm/facility.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 54ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 55aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 56aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 57ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 587697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 59ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 60ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 61ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 65ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6669d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 67453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 68453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 69453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 70453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 71453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 728a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 73453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 74453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 75b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 76453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 77453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 78bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 795288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 80bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 817697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 825288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 835288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 855288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 865288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 87388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 88e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 8941628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 90b0c632dbSHeiko Carstens { NULL } 91b0c632dbSHeiko Carstens }; 92b0c632dbSHeiko Carstens 9378c4b59fSMichael Mueller unsigned long *vfacilities; 942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 95b0c632dbSHeiko Carstens 9678c4b59fSMichael Mueller /* test availability of vfacility */ 97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 9878c4b59fSMichael Mueller { 9978c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 10078c4b59fSMichael Mueller } 10178c4b59fSMichael Mueller 102b0c632dbSHeiko Carstens /* Section: not file related */ 10310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage) 104b0c632dbSHeiko Carstens { 105b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 10610474ae8SAlexander Graf return 0; 107b0c632dbSHeiko Carstens } 108b0c632dbSHeiko Carstens 109b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage) 110b0c632dbSHeiko Carstens { 111b0c632dbSHeiko Carstens } 112b0c632dbSHeiko Carstens 1132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1142c70fe44SChristian Borntraeger 115b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 116b0c632dbSHeiko Carstens { 1172c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1182c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 119b0c632dbSHeiko Carstens return 0; 120b0c632dbSHeiko Carstens } 121b0c632dbSHeiko Carstens 122b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 123b0c632dbSHeiko Carstens { 1242c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 125b0c632dbSHeiko Carstens } 126b0c632dbSHeiko Carstens 127b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn) 128b0c632dbSHeiko Carstens { 129b0c632dbSHeiko Carstens } 130b0c632dbSHeiko Carstens 131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 132b0c632dbSHeiko Carstens { 133b0c632dbSHeiko Carstens return 0; 134b0c632dbSHeiko Carstens } 135b0c632dbSHeiko Carstens 136b0c632dbSHeiko Carstens void kvm_arch_exit(void) 137b0c632dbSHeiko Carstens { 138b0c632dbSHeiko Carstens } 139b0c632dbSHeiko Carstens 140b0c632dbSHeiko Carstens /* Section: device related */ 141b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 142b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 143b0c632dbSHeiko Carstens { 144b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 145b0c632dbSHeiko Carstens return s390_enable_sie(); 146b0c632dbSHeiko Carstens return -EINVAL; 147b0c632dbSHeiko Carstens } 148b0c632dbSHeiko Carstens 149b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext) 150b0c632dbSHeiko Carstens { 151d7b0b5ebSCarsten Otte int r; 152d7b0b5ebSCarsten Otte 1532bd0ac4eSCarsten Otte switch (ext) { 154d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 155b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15652e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1571efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1581efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1591efd0f59SCarsten Otte #endif 1603c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16160b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16214eebd91SCarsten Otte case KVM_CAP_ONE_REG: 163d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 164fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 165ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16610ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 167c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 168d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 169f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 170*6352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 171d7b0b5ebSCarsten Otte r = 1; 172d7b0b5ebSCarsten Otte break; 173e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 174e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 175e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 176e726b1bdSChristian Borntraeger break; 177e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 178e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 179e1e2e605SNick Wang break; 1801526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 181abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1821526bf9cSChristian Borntraeger break; 1832bd0ac4eSCarsten Otte default: 184d7b0b5ebSCarsten Otte r = 0; 185b0c632dbSHeiko Carstens } 186d7b0b5ebSCarsten Otte return r; 1872bd0ac4eSCarsten Otte } 188b0c632dbSHeiko Carstens 18915f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 19015f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 19115f36ebdSJason J. Herne { 19215f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19315f36ebdSJason J. Herne unsigned long address; 19415f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 19515f36ebdSJason J. Herne 19615f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 19715f36ebdSJason J. Herne /* Loop over all guest pages */ 19815f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 19915f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 20015f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 20115f36ebdSJason J. Herne 20215f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20315f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20415f36ebdSJason J. Herne } 20515f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 20615f36ebdSJason J. Herne } 20715f36ebdSJason J. Herne 208b0c632dbSHeiko Carstens /* Section: vm related */ 209b0c632dbSHeiko Carstens /* 210b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 211b0c632dbSHeiko Carstens */ 212b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 213b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 214b0c632dbSHeiko Carstens { 21515f36ebdSJason J. Herne int r; 21615f36ebdSJason J. Herne unsigned long n; 21715f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 21815f36ebdSJason J. Herne int is_dirty = 0; 21915f36ebdSJason J. Herne 22015f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 22115f36ebdSJason J. Herne 22215f36ebdSJason J. Herne r = -EINVAL; 22315f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22415f36ebdSJason J. Herne goto out; 22515f36ebdSJason J. Herne 22615f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 22715f36ebdSJason J. Herne r = -ENOENT; 22815f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 22915f36ebdSJason J. Herne goto out; 23015f36ebdSJason J. Herne 23115f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23215f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23315f36ebdSJason J. Herne if (r) 23415f36ebdSJason J. Herne goto out; 23515f36ebdSJason J. Herne 23615f36ebdSJason J. Herne /* Clear the dirty log */ 23715f36ebdSJason J. Herne if (is_dirty) { 23815f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 23915f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 24015f36ebdSJason J. Herne } 24115f36ebdSJason J. Herne r = 0; 24215f36ebdSJason J. Herne out: 24315f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24415f36ebdSJason J. Herne return r; 245b0c632dbSHeiko Carstens } 246b0c632dbSHeiko Carstens 247d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 248d938dc55SCornelia Huck { 249d938dc55SCornelia Huck int r; 250d938dc55SCornelia Huck 251d938dc55SCornelia Huck if (cap->flags) 252d938dc55SCornelia Huck return -EINVAL; 253d938dc55SCornelia Huck 254d938dc55SCornelia Huck switch (cap->cap) { 25584223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 25684223598SCornelia Huck kvm->arch.use_irqchip = 1; 25784223598SCornelia Huck r = 0; 25884223598SCornelia Huck break; 259d938dc55SCornelia Huck default: 260d938dc55SCornelia Huck r = -EINVAL; 261d938dc55SCornelia Huck break; 262d938dc55SCornelia Huck } 263d938dc55SCornelia Huck return r; 264d938dc55SCornelia Huck } 265d938dc55SCornelia Huck 2664f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2674f718eabSDominik Dingel { 2684f718eabSDominik Dingel int ret; 2694f718eabSDominik Dingel unsigned int idx; 2704f718eabSDominik Dingel switch (attr->attr) { 2714f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2724f718eabSDominik Dingel ret = -EBUSY; 2734f718eabSDominik Dingel mutex_lock(&kvm->lock); 2744f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2754f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2764f718eabSDominik Dingel ret = 0; 2774f718eabSDominik Dingel } 2784f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2794f718eabSDominik Dingel break; 2804f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 2814f718eabSDominik Dingel mutex_lock(&kvm->lock); 2824f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 2834f718eabSDominik Dingel page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); 2844f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 2854f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2864f718eabSDominik Dingel ret = 0; 2874f718eabSDominik Dingel break; 2884f718eabSDominik Dingel default: 2894f718eabSDominik Dingel ret = -ENXIO; 2904f718eabSDominik Dingel break; 2914f718eabSDominik Dingel } 2924f718eabSDominik Dingel return ret; 2934f718eabSDominik Dingel } 2944f718eabSDominik Dingel 295f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 296f2061656SDominik Dingel { 297f2061656SDominik Dingel int ret; 298f2061656SDominik Dingel 299f2061656SDominik Dingel switch (attr->group) { 3004f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3014f718eabSDominik Dingel ret = kvm_s390_mem_control(kvm, attr); 3024f718eabSDominik Dingel break; 303f2061656SDominik Dingel default: 304f2061656SDominik Dingel ret = -ENXIO; 305f2061656SDominik Dingel break; 306f2061656SDominik Dingel } 307f2061656SDominik Dingel 308f2061656SDominik Dingel return ret; 309f2061656SDominik Dingel } 310f2061656SDominik Dingel 311f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 312f2061656SDominik Dingel { 313f2061656SDominik Dingel return -ENXIO; 314f2061656SDominik Dingel } 315f2061656SDominik Dingel 316f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 317f2061656SDominik Dingel { 318f2061656SDominik Dingel int ret; 319f2061656SDominik Dingel 320f2061656SDominik Dingel switch (attr->group) { 3214f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3224f718eabSDominik Dingel switch (attr->attr) { 3234f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3244f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3254f718eabSDominik Dingel ret = 0; 3264f718eabSDominik Dingel break; 3274f718eabSDominik Dingel default: 3284f718eabSDominik Dingel ret = -ENXIO; 3294f718eabSDominik Dingel break; 3304f718eabSDominik Dingel } 3314f718eabSDominik Dingel break; 332f2061656SDominik Dingel default: 333f2061656SDominik Dingel ret = -ENXIO; 334f2061656SDominik Dingel break; 335f2061656SDominik Dingel } 336f2061656SDominik Dingel 337f2061656SDominik Dingel return ret; 338f2061656SDominik Dingel } 339f2061656SDominik Dingel 340b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 341b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 342b0c632dbSHeiko Carstens { 343b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 344b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 345f2061656SDominik Dingel struct kvm_device_attr attr; 346b0c632dbSHeiko Carstens int r; 347b0c632dbSHeiko Carstens 348b0c632dbSHeiko Carstens switch (ioctl) { 349ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 350ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 351ba5c1e9bSCarsten Otte 352ba5c1e9bSCarsten Otte r = -EFAULT; 353ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 354ba5c1e9bSCarsten Otte break; 355ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 356ba5c1e9bSCarsten Otte break; 357ba5c1e9bSCarsten Otte } 358d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 359d938dc55SCornelia Huck struct kvm_enable_cap cap; 360d938dc55SCornelia Huck r = -EFAULT; 361d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 362d938dc55SCornelia Huck break; 363d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 364d938dc55SCornelia Huck break; 365d938dc55SCornelia Huck } 36684223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 36784223598SCornelia Huck struct kvm_irq_routing_entry routing; 36884223598SCornelia Huck 36984223598SCornelia Huck r = -EINVAL; 37084223598SCornelia Huck if (kvm->arch.use_irqchip) { 37184223598SCornelia Huck /* Set up dummy routing. */ 37284223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 37384223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 37484223598SCornelia Huck r = 0; 37584223598SCornelia Huck } 37684223598SCornelia Huck break; 37784223598SCornelia Huck } 378f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 379f2061656SDominik Dingel r = -EFAULT; 380f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 381f2061656SDominik Dingel break; 382f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 383f2061656SDominik Dingel break; 384f2061656SDominik Dingel } 385f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 386f2061656SDominik Dingel r = -EFAULT; 387f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 388f2061656SDominik Dingel break; 389f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 390f2061656SDominik Dingel break; 391f2061656SDominik Dingel } 392f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 393f2061656SDominik Dingel r = -EFAULT; 394f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 395f2061656SDominik Dingel break; 396f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 397f2061656SDominik Dingel break; 398f2061656SDominik Dingel } 399b0c632dbSHeiko Carstens default: 400367e1319SAvi Kivity r = -ENOTTY; 401b0c632dbSHeiko Carstens } 402b0c632dbSHeiko Carstens 403b0c632dbSHeiko Carstens return r; 404b0c632dbSHeiko Carstens } 405b0c632dbSHeiko Carstens 406e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 407b0c632dbSHeiko Carstens { 408b0c632dbSHeiko Carstens int rc; 409b0c632dbSHeiko Carstens char debug_name[16]; 410f6c137ffSChristian Borntraeger static unsigned long sca_offset; 411b0c632dbSHeiko Carstens 412e08b9637SCarsten Otte rc = -EINVAL; 413e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 414e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 415e08b9637SCarsten Otte goto out_err; 416e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 417e08b9637SCarsten Otte goto out_err; 418e08b9637SCarsten Otte #else 419e08b9637SCarsten Otte if (type) 420e08b9637SCarsten Otte goto out_err; 421e08b9637SCarsten Otte #endif 422e08b9637SCarsten Otte 423b0c632dbSHeiko Carstens rc = s390_enable_sie(); 424b0c632dbSHeiko Carstens if (rc) 425d89f5effSJan Kiszka goto out_err; 426b0c632dbSHeiko Carstens 427b290411aSCarsten Otte rc = -ENOMEM; 428b290411aSCarsten Otte 429b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 430b0c632dbSHeiko Carstens if (!kvm->arch.sca) 431d89f5effSJan Kiszka goto out_err; 432f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 433f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 434f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 435f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 436b0c632dbSHeiko Carstens 437b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 438b0c632dbSHeiko Carstens 439b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 440b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 441b0c632dbSHeiko Carstens goto out_nodbf; 442b0c632dbSHeiko Carstens 443ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 444ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 4458a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 446ba5c1e9bSCarsten Otte 447b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 448b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 449b0c632dbSHeiko Carstens 450e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 451e08b9637SCarsten Otte kvm->arch.gmap = NULL; 452e08b9637SCarsten Otte } else { 453598841caSCarsten Otte kvm->arch.gmap = gmap_alloc(current->mm); 454598841caSCarsten Otte if (!kvm->arch.gmap) 455598841caSCarsten Otte goto out_nogmap; 4562c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 45724eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 458e08b9637SCarsten Otte } 459fa6b7fe9SCornelia Huck 460fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 46184223598SCornelia Huck kvm->arch.use_irqchip = 0; 462fa6b7fe9SCornelia Huck 4638ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 4648ad35755SDavid Hildenbrand 465d89f5effSJan Kiszka return 0; 466598841caSCarsten Otte out_nogmap: 467598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 468b0c632dbSHeiko Carstens out_nodbf: 469b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 470d89f5effSJan Kiszka out_err: 471d89f5effSJan Kiszka return rc; 472b0c632dbSHeiko Carstens } 473b0c632dbSHeiko Carstens 474d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 475d329c035SChristian Borntraeger { 476d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 477ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 47867335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 4793c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 48058f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 48158f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 48258f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 483abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 484abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 485abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 48658f9460bSCarsten Otte } 487abf4a71eSCarsten Otte smp_mb(); 48827e0393fSCarsten Otte 48927e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 49027e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 49127e0393fSCarsten Otte 492b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 493b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 494d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 495b31288faSKonstantin Weitz 4966692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 497b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 498d329c035SChristian Borntraeger } 499d329c035SChristian Borntraeger 500d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 501d329c035SChristian Borntraeger { 502d329c035SChristian Borntraeger unsigned int i; 503988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 504d329c035SChristian Borntraeger 505988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 506988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 507988a2caeSGleb Natapov 508988a2caeSGleb Natapov mutex_lock(&kvm->lock); 509988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 510d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 511988a2caeSGleb Natapov 512988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 513988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 514d329c035SChristian Borntraeger } 515d329c035SChristian Borntraeger 516ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm) 517ad8ba2cdSSheng Yang { 518ad8ba2cdSSheng Yang } 519ad8ba2cdSSheng Yang 520b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 521b0c632dbSHeiko Carstens { 522d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 523b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 524d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 52527e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 526598841caSCarsten Otte gmap_free(kvm->arch.gmap); 527841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 52867335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 529b0c632dbSHeiko Carstens } 530b0c632dbSHeiko Carstens 531b0c632dbSHeiko Carstens /* Section: vcpu related */ 532b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 533b0c632dbSHeiko Carstens { 5343c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5353c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 53627e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) { 53727e0393fSCarsten Otte vcpu->arch.gmap = gmap_alloc(current->mm); 53827e0393fSCarsten Otte if (!vcpu->arch.gmap) 53927e0393fSCarsten Otte return -ENOMEM; 5402c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 54127e0393fSCarsten Otte return 0; 54227e0393fSCarsten Otte } 54327e0393fSCarsten Otte 544598841caSCarsten Otte vcpu->arch.gmap = vcpu->kvm->arch.gmap; 54559674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 54659674c1aSChristian Borntraeger KVM_SYNC_GPRS | 5479eed0735SChristian Borntraeger KVM_SYNC_ACRS | 5489eed0735SChristian Borntraeger KVM_SYNC_CRS; 549b0c632dbSHeiko Carstens return 0; 550b0c632dbSHeiko Carstens } 551b0c632dbSHeiko Carstens 552b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 553b0c632dbSHeiko Carstens { 5546692cef3SChristian Borntraeger /* Nothing todo */ 555b0c632dbSHeiko Carstens } 556b0c632dbSHeiko Carstens 557b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 558b0c632dbSHeiko Carstens { 5594725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5604725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 561b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 5624725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5634725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 56459674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 565480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 5669e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 567b0c632dbSHeiko Carstens } 568b0c632dbSHeiko Carstens 569b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 570b0c632dbSHeiko Carstens { 5719e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 572480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 5734725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5744725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 57559674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 5764725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5774725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 578b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 579b0c632dbSHeiko Carstens } 580b0c632dbSHeiko Carstens 581b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 582b0c632dbSHeiko Carstens { 583b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 584b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 585b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 5868d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 587b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 588b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 589b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 590b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 591b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 592b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 593b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 594b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 595b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 596672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 5973c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5983c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 599*6352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 6006852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 6012ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 602b0c632dbSHeiko Carstens } 603b0c632dbSHeiko Carstens 60442897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 60542897d86SMarcelo Tosatti { 60642897d86SMarcelo Tosatti return 0; 60742897d86SMarcelo Tosatti } 60842897d86SMarcelo Tosatti 609b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 610b31605c1SDominik Dingel { 611b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 612b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 613b31605c1SDominik Dingel } 614b31605c1SDominik Dingel 615b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 616b31605c1SDominik Dingel { 617b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 618b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 619b31605c1SDominik Dingel return -ENOMEM; 620b31605c1SDominik Dingel 621b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 622b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 623b31605c1SDominik Dingel return 0; 624b31605c1SDominik Dingel } 625b31605c1SDominik Dingel 626b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 627b0c632dbSHeiko Carstens { 628b31605c1SDominik Dingel int rc = 0; 629b31288faSKonstantin Weitz 6309e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 6319e6dabefSCornelia Huck CPUSTAT_SM | 63269d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 63369d0d3a3SChristian Borntraeger CPUSTAT_GED); 634fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 6357feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 6367feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 6377feb6bb8SMichael Mueller 63869d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 6394953919fSDavid Hildenbrand vcpu->arch.sie_block->eca = 0xD1002000U; 640217a4406SHeiko Carstens if (sclp_has_siif()) 641217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 64278c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 6435a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 6445a5e6536SMatthew Rosato ICTL_TPROT; 6455a5e6536SMatthew Rosato 646b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 647b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 648b31605c1SDominik Dingel if (rc) 649b31605c1SDominik Dingel return rc; 650b31288faSKonstantin Weitz } 651ca872302SChristian Borntraeger hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 652ca872302SChristian Borntraeger tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 653ba5c1e9bSCarsten Otte (unsigned long) vcpu); 654ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 655453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 65692e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 657b31605c1SDominik Dingel return rc; 658b0c632dbSHeiko Carstens } 659b0c632dbSHeiko Carstens 660b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 661b0c632dbSHeiko Carstens unsigned int id) 662b0c632dbSHeiko Carstens { 6634d47555aSCarsten Otte struct kvm_vcpu *vcpu; 6647feb6bb8SMichael Mueller struct sie_page *sie_page; 6654d47555aSCarsten Otte int rc = -EINVAL; 666b0c632dbSHeiko Carstens 6674d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 6684d47555aSCarsten Otte goto out; 6694d47555aSCarsten Otte 6704d47555aSCarsten Otte rc = -ENOMEM; 6714d47555aSCarsten Otte 672b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 673b0c632dbSHeiko Carstens if (!vcpu) 6744d47555aSCarsten Otte goto out; 675b0c632dbSHeiko Carstens 6767feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 6777feb6bb8SMichael Mueller if (!sie_page) 678b0c632dbSHeiko Carstens goto out_free_cpu; 679b0c632dbSHeiko Carstens 6807feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 6817feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 6827feb6bb8SMichael Mueller 683b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 68458f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 68558f9460bSCarsten Otte if (!kvm->arch.sca) { 68658f9460bSCarsten Otte WARN_ON_ONCE(1); 68758f9460bSCarsten Otte goto out_free_cpu; 68858f9460bSCarsten Otte } 689abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 69058f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 69158f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 69258f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 69358f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 694b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 695fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 69658f9460bSCarsten Otte } 697b0c632dbSHeiko Carstens 698ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 699ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&vcpu->arch.local_int.list); 700ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 701d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 7025288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 703ba5c1e9bSCarsten Otte 704b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 705b0c632dbSHeiko Carstens if (rc) 7067b06bf2fSWei Yongjun goto out_free_sie_block; 707b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 708b0c632dbSHeiko Carstens vcpu->arch.sie_block); 709ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 710b0c632dbSHeiko Carstens 711b0c632dbSHeiko Carstens return vcpu; 7127b06bf2fSWei Yongjun out_free_sie_block: 7137b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 714b0c632dbSHeiko Carstens out_free_cpu: 715b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 7164d47555aSCarsten Otte out: 717b0c632dbSHeiko Carstens return ERR_PTR(rc); 718b0c632dbSHeiko Carstens } 719b0c632dbSHeiko Carstens 720b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 721b0c632dbSHeiko Carstens { 722f87618e8SMichael Mueller return kvm_cpu_has_interrupt(vcpu); 723b0c632dbSHeiko Carstens } 724b0c632dbSHeiko Carstens 72549b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 72649b99e1eSChristian Borntraeger { 72749b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 72849b99e1eSChristian Borntraeger } 72949b99e1eSChristian Borntraeger 73049b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 73149b99e1eSChristian Borntraeger { 73249b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 73349b99e1eSChristian Borntraeger } 73449b99e1eSChristian Borntraeger 73549b99e1eSChristian Borntraeger /* 73649b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 73749b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 73849b99e1eSChristian Borntraeger * return immediately. */ 73949b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 74049b99e1eSChristian Borntraeger { 74149b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 74249b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 74349b99e1eSChristian Borntraeger cpu_relax(); 74449b99e1eSChristian Borntraeger } 74549b99e1eSChristian Borntraeger 74649b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 74749b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 74849b99e1eSChristian Borntraeger { 74949b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 75049b99e1eSChristian Borntraeger exit_sie(vcpu); 75149b99e1eSChristian Borntraeger } 75249b99e1eSChristian Borntraeger 7532c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 7542c70fe44SChristian Borntraeger { 7552c70fe44SChristian Borntraeger int i; 7562c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 7572c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 7582c70fe44SChristian Borntraeger 7592c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 7602c70fe44SChristian Borntraeger /* match against both prefix pages */ 761fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 7622c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 7632c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 7642c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 7652c70fe44SChristian Borntraeger } 7662c70fe44SChristian Borntraeger } 7672c70fe44SChristian Borntraeger } 7682c70fe44SChristian Borntraeger 769b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 770b6d33834SChristoffer Dall { 771b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 772b6d33834SChristoffer Dall BUG(); 773b6d33834SChristoffer Dall return 0; 774b6d33834SChristoffer Dall } 775b6d33834SChristoffer Dall 77614eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 77714eebd91SCarsten Otte struct kvm_one_reg *reg) 77814eebd91SCarsten Otte { 77914eebd91SCarsten Otte int r = -EINVAL; 78014eebd91SCarsten Otte 78114eebd91SCarsten Otte switch (reg->id) { 78229b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 78329b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 78429b7c71bSCarsten Otte (u32 __user *)reg->addr); 78529b7c71bSCarsten Otte break; 78629b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 78729b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 78829b7c71bSCarsten Otte (u64 __user *)reg->addr); 78929b7c71bSCarsten Otte break; 79046a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 79146a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 79246a6dd1cSJason J. herne (u64 __user *)reg->addr); 79346a6dd1cSJason J. herne break; 79446a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 79546a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 79646a6dd1cSJason J. herne (u64 __user *)reg->addr); 79746a6dd1cSJason J. herne break; 798536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 799536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 800536336c2SDominik Dingel (u64 __user *)reg->addr); 801536336c2SDominik Dingel break; 802536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 803536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 804536336c2SDominik Dingel (u64 __user *)reg->addr); 805536336c2SDominik Dingel break; 806536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 807536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 808536336c2SDominik Dingel (u64 __user *)reg->addr); 809536336c2SDominik Dingel break; 810672550fbSChristian Borntraeger case KVM_REG_S390_PP: 811672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 812672550fbSChristian Borntraeger (u64 __user *)reg->addr); 813672550fbSChristian Borntraeger break; 814afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 815afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 816afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 817afa45ff5SChristian Borntraeger break; 81814eebd91SCarsten Otte default: 81914eebd91SCarsten Otte break; 82014eebd91SCarsten Otte } 82114eebd91SCarsten Otte 82214eebd91SCarsten Otte return r; 82314eebd91SCarsten Otte } 82414eebd91SCarsten Otte 82514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 82614eebd91SCarsten Otte struct kvm_one_reg *reg) 82714eebd91SCarsten Otte { 82814eebd91SCarsten Otte int r = -EINVAL; 82914eebd91SCarsten Otte 83014eebd91SCarsten Otte switch (reg->id) { 83129b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 83229b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 83329b7c71bSCarsten Otte (u32 __user *)reg->addr); 83429b7c71bSCarsten Otte break; 83529b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 83629b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 83729b7c71bSCarsten Otte (u64 __user *)reg->addr); 83829b7c71bSCarsten Otte break; 83946a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 84046a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 84146a6dd1cSJason J. herne (u64 __user *)reg->addr); 84246a6dd1cSJason J. herne break; 84346a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 84446a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 84546a6dd1cSJason J. herne (u64 __user *)reg->addr); 84646a6dd1cSJason J. herne break; 847536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 848536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 849536336c2SDominik Dingel (u64 __user *)reg->addr); 850536336c2SDominik Dingel break; 851536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 852536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 853536336c2SDominik Dingel (u64 __user *)reg->addr); 854536336c2SDominik Dingel break; 855536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 856536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 857536336c2SDominik Dingel (u64 __user *)reg->addr); 858536336c2SDominik Dingel break; 859672550fbSChristian Borntraeger case KVM_REG_S390_PP: 860672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 861672550fbSChristian Borntraeger (u64 __user *)reg->addr); 862672550fbSChristian Borntraeger break; 863afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 864afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 865afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 866afa45ff5SChristian Borntraeger break; 86714eebd91SCarsten Otte default: 86814eebd91SCarsten Otte break; 86914eebd91SCarsten Otte } 87014eebd91SCarsten Otte 87114eebd91SCarsten Otte return r; 87214eebd91SCarsten Otte } 873b6d33834SChristoffer Dall 874b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 875b0c632dbSHeiko Carstens { 876b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 877b0c632dbSHeiko Carstens return 0; 878b0c632dbSHeiko Carstens } 879b0c632dbSHeiko Carstens 880b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 881b0c632dbSHeiko Carstens { 8825a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 883b0c632dbSHeiko Carstens return 0; 884b0c632dbSHeiko Carstens } 885b0c632dbSHeiko Carstens 886b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 887b0c632dbSHeiko Carstens { 8885a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 889b0c632dbSHeiko Carstens return 0; 890b0c632dbSHeiko Carstens } 891b0c632dbSHeiko Carstens 892b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 893b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 894b0c632dbSHeiko Carstens { 89559674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 896b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 89759674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 898b0c632dbSHeiko Carstens return 0; 899b0c632dbSHeiko Carstens } 900b0c632dbSHeiko Carstens 901b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 902b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 903b0c632dbSHeiko Carstens { 90459674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 905b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 906b0c632dbSHeiko Carstens return 0; 907b0c632dbSHeiko Carstens } 908b0c632dbSHeiko Carstens 909b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 910b0c632dbSHeiko Carstens { 9114725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 9124725c860SMartin Schwidefsky return -EINVAL; 913b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 9144725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 9154725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 9164725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 917b0c632dbSHeiko Carstens return 0; 918b0c632dbSHeiko Carstens } 919b0c632dbSHeiko Carstens 920b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 921b0c632dbSHeiko Carstens { 922b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 923b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 924b0c632dbSHeiko Carstens return 0; 925b0c632dbSHeiko Carstens } 926b0c632dbSHeiko Carstens 927b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 928b0c632dbSHeiko Carstens { 929b0c632dbSHeiko Carstens int rc = 0; 930b0c632dbSHeiko Carstens 9317a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 932b0c632dbSHeiko Carstens rc = -EBUSY; 933d7b0b5ebSCarsten Otte else { 934d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 935d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 936d7b0b5ebSCarsten Otte } 937b0c632dbSHeiko Carstens return rc; 938b0c632dbSHeiko Carstens } 939b0c632dbSHeiko Carstens 940b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 941b0c632dbSHeiko Carstens struct kvm_translation *tr) 942b0c632dbSHeiko Carstens { 943b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 944b0c632dbSHeiko Carstens } 945b0c632dbSHeiko Carstens 94627291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 94727291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 94827291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 94927291e21SDavid Hildenbrand 950d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 951d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 952b0c632dbSHeiko Carstens { 95327291e21SDavid Hildenbrand int rc = 0; 95427291e21SDavid Hildenbrand 95527291e21SDavid Hildenbrand vcpu->guest_debug = 0; 95627291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 95727291e21SDavid Hildenbrand 9582de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 95927291e21SDavid Hildenbrand return -EINVAL; 96027291e21SDavid Hildenbrand 96127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 96227291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 96327291e21SDavid Hildenbrand /* enforce guest PER */ 96427291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 96527291e21SDavid Hildenbrand 96627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 96727291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 96827291e21SDavid Hildenbrand } else { 96927291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 97027291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 97127291e21SDavid Hildenbrand } 97227291e21SDavid Hildenbrand 97327291e21SDavid Hildenbrand if (rc) { 97427291e21SDavid Hildenbrand vcpu->guest_debug = 0; 97527291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 97627291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 97727291e21SDavid Hildenbrand } 97827291e21SDavid Hildenbrand 97927291e21SDavid Hildenbrand return rc; 980b0c632dbSHeiko Carstens } 981b0c632dbSHeiko Carstens 98262d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 98362d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 98462d9f0dbSMarcelo Tosatti { 985*6352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 986*6352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 987*6352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 98862d9f0dbSMarcelo Tosatti } 98962d9f0dbSMarcelo Tosatti 99062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 99162d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 99262d9f0dbSMarcelo Tosatti { 993*6352e4d2SDavid Hildenbrand int rc = 0; 994*6352e4d2SDavid Hildenbrand 995*6352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 996*6352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 997*6352e4d2SDavid Hildenbrand 998*6352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 999*6352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 1000*6352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 1001*6352e4d2SDavid Hildenbrand break; 1002*6352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 1003*6352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 1004*6352e4d2SDavid Hildenbrand break; 1005*6352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 1006*6352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 1007*6352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 1008*6352e4d2SDavid Hildenbrand default: 1009*6352e4d2SDavid Hildenbrand rc = -ENXIO; 1010*6352e4d2SDavid Hildenbrand } 1011*6352e4d2SDavid Hildenbrand 1012*6352e4d2SDavid Hildenbrand return rc; 101362d9f0dbSMarcelo Tosatti } 101462d9f0dbSMarcelo Tosatti 1015b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1016b31605c1SDominik Dingel { 1017b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1018b31605c1SDominik Dingel return false; 1019b31605c1SDominik Dingel /* only enable for z10 and later */ 1020b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1021b31605c1SDominik Dingel return false; 1022b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1023b31605c1SDominik Dingel return false; 1024b31605c1SDominik Dingel return true; 1025b31605c1SDominik Dingel } 1026b31605c1SDominik Dingel 10278ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 10288ad35755SDavid Hildenbrand { 10298ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 10308ad35755SDavid Hildenbrand } 10318ad35755SDavid Hildenbrand 10322c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 10332c70fe44SChristian Borntraeger { 10348ad35755SDavid Hildenbrand retry: 10358ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 10362c70fe44SChristian Borntraeger /* 10372c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 10382c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 10392c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 10402c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 10412c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 10422c70fe44SChristian Borntraeger */ 10438ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 10442c70fe44SChristian Borntraeger int rc; 10452c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1046fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 10472c70fe44SChristian Borntraeger PAGE_SIZE * 2); 10482c70fe44SChristian Borntraeger if (rc) 10492c70fe44SChristian Borntraeger return rc; 10508ad35755SDavid Hildenbrand goto retry; 10512c70fe44SChristian Borntraeger } 10528ad35755SDavid Hildenbrand 10538ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 10548ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 10558ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 10568ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 10578ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10588ad35755SDavid Hildenbrand } 10598ad35755SDavid Hildenbrand goto retry; 10608ad35755SDavid Hildenbrand } 10618ad35755SDavid Hildenbrand 10628ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 10638ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 10648ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 10658ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 10668ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10678ad35755SDavid Hildenbrand } 10688ad35755SDavid Hildenbrand goto retry; 10698ad35755SDavid Hildenbrand } 10708ad35755SDavid Hildenbrand 10712c70fe44SChristian Borntraeger return 0; 10722c70fe44SChristian Borntraeger } 10732c70fe44SChristian Borntraeger 1074fa576c58SThomas Huth /** 1075fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1076fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1077fa576c58SThomas Huth * @gpa: Guest physical address 1078fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1079fa576c58SThomas Huth * 1080fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1081fa576c58SThomas Huth * 1082fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1083fa576c58SThomas Huth */ 1084fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 108524eb3a82SDominik Dingel { 108624eb3a82SDominik Dingel struct mm_struct *mm = current->mm; 1087fa576c58SThomas Huth hva_t hva; 1088fa576c58SThomas Huth long rc; 1089fa576c58SThomas Huth 1090fa576c58SThomas Huth hva = gmap_fault(gpa, vcpu->arch.gmap); 1091fa576c58SThomas Huth if (IS_ERR_VALUE(hva)) 1092fa576c58SThomas Huth return (long)hva; 109324eb3a82SDominik Dingel down_read(&mm->mmap_sem); 1094fa576c58SThomas Huth rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL); 109524eb3a82SDominik Dingel up_read(&mm->mmap_sem); 1096fa576c58SThomas Huth 1097fa576c58SThomas Huth return rc < 0 ? rc : 0; 109824eb3a82SDominik Dingel } 109924eb3a82SDominik Dingel 11003c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 11013c038e6bSDominik Dingel unsigned long token) 11023c038e6bSDominik Dingel { 11033c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 11043c038e6bSDominik Dingel inti.parm64 = token; 11053c038e6bSDominik Dingel 11063c038e6bSDominik Dingel if (start_token) { 11073c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_INIT; 11083c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 11093c038e6bSDominik Dingel } else { 11103c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 11113c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 11123c038e6bSDominik Dingel } 11133c038e6bSDominik Dingel } 11143c038e6bSDominik Dingel 11153c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 11163c038e6bSDominik Dingel struct kvm_async_pf *work) 11173c038e6bSDominik Dingel { 11183c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 11193c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 11203c038e6bSDominik Dingel } 11213c038e6bSDominik Dingel 11223c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 11233c038e6bSDominik Dingel struct kvm_async_pf *work) 11243c038e6bSDominik Dingel { 11253c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 11263c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 11273c038e6bSDominik Dingel } 11283c038e6bSDominik Dingel 11293c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 11303c038e6bSDominik Dingel struct kvm_async_pf *work) 11313c038e6bSDominik Dingel { 11323c038e6bSDominik Dingel /* s390 will always inject the page directly */ 11333c038e6bSDominik Dingel } 11343c038e6bSDominik Dingel 11353c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 11363c038e6bSDominik Dingel { 11373c038e6bSDominik Dingel /* 11383c038e6bSDominik Dingel * s390 will always inject the page directly, 11393c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 11403c038e6bSDominik Dingel */ 11413c038e6bSDominik Dingel return true; 11423c038e6bSDominik Dingel } 11433c038e6bSDominik Dingel 11443c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 11453c038e6bSDominik Dingel { 11463c038e6bSDominik Dingel hva_t hva; 11473c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 11483c038e6bSDominik Dingel int rc; 11493c038e6bSDominik Dingel 11503c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 11513c038e6bSDominik Dingel return 0; 11523c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 11533c038e6bSDominik Dingel vcpu->arch.pfault_compare) 11543c038e6bSDominik Dingel return 0; 11553c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 11563c038e6bSDominik Dingel return 0; 11573c038e6bSDominik Dingel if (kvm_cpu_has_interrupt(vcpu)) 11583c038e6bSDominik Dingel return 0; 11593c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 11603c038e6bSDominik Dingel return 0; 11613c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 11623c038e6bSDominik Dingel return 0; 11633c038e6bSDominik Dingel 116481480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 116581480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 116681480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 11673c038e6bSDominik Dingel return 0; 11683c038e6bSDominik Dingel 11693c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 11703c038e6bSDominik Dingel return rc; 11713c038e6bSDominik Dingel } 11723c038e6bSDominik Dingel 11733fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1174b0c632dbSHeiko Carstens { 11753fb4c40fSThomas Huth int rc, cpuflags; 1176e168bf8dSCarsten Otte 11773c038e6bSDominik Dingel /* 11783c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 11793c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 11803c038e6bSDominik Dingel * handled outside the worker. 11813c038e6bSDominik Dingel */ 11823c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 11833c038e6bSDominik Dingel 11845a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1185b0c632dbSHeiko Carstens 1186b0c632dbSHeiko Carstens if (need_resched()) 1187b0c632dbSHeiko Carstens schedule(); 1188b0c632dbSHeiko Carstens 1189d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 119071cde587SChristian Borntraeger s390_handle_mcck(); 119171cde587SChristian Borntraeger 1192d6b6d166SCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) 11930ff31867SCarsten Otte kvm_s390_deliver_pending_interrupts(vcpu); 11940ff31867SCarsten Otte 11952c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 11962c70fe44SChristian Borntraeger if (rc) 11972c70fe44SChristian Borntraeger return rc; 11982c70fe44SChristian Borntraeger 119927291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 120027291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 120127291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 120227291e21SDavid Hildenbrand } 120327291e21SDavid Hildenbrand 1204b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 12053fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 12063fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 12073fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 12082b29a9fdSDominik Dingel 12093fb4c40fSThomas Huth return 0; 12103fb4c40fSThomas Huth } 12113fb4c40fSThomas Huth 12123fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 12133fb4c40fSThomas Huth { 121424eb3a82SDominik Dingel int rc = -1; 12152b29a9fdSDominik Dingel 12162b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 12172b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 12182b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 12192b29a9fdSDominik Dingel 122027291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 122127291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 122227291e21SDavid Hildenbrand 12233fb4c40fSThomas Huth if (exit_reason >= 0) { 12247c470539SMartin Schwidefsky rc = 0; 1225210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1226210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1227210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1228210b1607SThomas Huth current->thread.gmap_addr; 1229210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1230210b1607SThomas Huth rc = -EREMOTE; 123124eb3a82SDominik Dingel 123224eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 12333c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 123424eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1235fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 123624eb3a82SDominik Dingel rc = 0; 1237fa576c58SThomas Huth } else { 1238fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1239fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1240fa576c58SThomas Huth } 124124eb3a82SDominik Dingel } 124224eb3a82SDominik Dingel 124324eb3a82SDominik Dingel if (rc == -1) { 1244699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1245699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1246699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 12471f0d0f09SCarsten Otte } 1248b0c632dbSHeiko Carstens 12495a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 12503fb4c40fSThomas Huth 1251a76ccff6SThomas Huth if (rc == 0) { 1252a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 12532955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 12542955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1255a76ccff6SThomas Huth else 1256a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1257a76ccff6SThomas Huth } 1258a76ccff6SThomas Huth 12593fb4c40fSThomas Huth return rc; 12603fb4c40fSThomas Huth } 12613fb4c40fSThomas Huth 12623fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 12633fb4c40fSThomas Huth { 12643fb4c40fSThomas Huth int rc, exit_reason; 12653fb4c40fSThomas Huth 1266800c1065SThomas Huth /* 1267800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1268800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1269800c1065SThomas Huth */ 1270800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1271800c1065SThomas Huth 1272a76ccff6SThomas Huth do { 12733fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 12743fb4c40fSThomas Huth if (rc) 1275a76ccff6SThomas Huth break; 12763fb4c40fSThomas Huth 1277800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 12783fb4c40fSThomas Huth /* 1279a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1280a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 12813fb4c40fSThomas Huth */ 12823fb4c40fSThomas Huth preempt_disable(); 12833fb4c40fSThomas Huth kvm_guest_enter(); 12843fb4c40fSThomas Huth preempt_enable(); 1285a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1286a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 12873fb4c40fSThomas Huth kvm_guest_exit(); 1288800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 12893fb4c40fSThomas Huth 12903fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 129127291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 12923fb4c40fSThomas Huth 1293800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1294e168bf8dSCarsten Otte return rc; 1295b0c632dbSHeiko Carstens } 1296b0c632dbSHeiko Carstens 1297b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1298b0c632dbSHeiko Carstens { 12998f2abe6aSChristian Borntraeger int rc; 1300b0c632dbSHeiko Carstens sigset_t sigsaved; 1301b0c632dbSHeiko Carstens 130227291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 130327291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 130427291e21SDavid Hildenbrand return 0; 130527291e21SDavid Hildenbrand } 130627291e21SDavid Hildenbrand 1307b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1308b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1309b0c632dbSHeiko Carstens 1310*6352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 13116852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 1312*6352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 1313*6352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 1314*6352e4d2SDavid Hildenbrand vcpu->vcpu_id); 1315*6352e4d2SDavid Hildenbrand return -EINVAL; 1316*6352e4d2SDavid Hildenbrand } 1317b0c632dbSHeiko Carstens 13188f2abe6aSChristian Borntraeger switch (kvm_run->exit_reason) { 13198f2abe6aSChristian Borntraeger case KVM_EXIT_S390_SIEIC: 13208f2abe6aSChristian Borntraeger case KVM_EXIT_UNKNOWN: 13219ace903dSChristian Ehrhardt case KVM_EXIT_INTR: 13228f2abe6aSChristian Borntraeger case KVM_EXIT_S390_RESET: 1323e168bf8dSCarsten Otte case KVM_EXIT_S390_UCONTROL: 1324fa6b7fe9SCornelia Huck case KVM_EXIT_S390_TSCH: 132527291e21SDavid Hildenbrand case KVM_EXIT_DEBUG: 13268f2abe6aSChristian Borntraeger break; 13278f2abe6aSChristian Borntraeger default: 13288f2abe6aSChristian Borntraeger BUG(); 13298f2abe6aSChristian Borntraeger } 13308f2abe6aSChristian Borntraeger 1331d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1332d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 133360b413c9SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { 133460b413c9SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; 133560b413c9SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 133660b413c9SChristian Borntraeger } 13379eed0735SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 13389eed0735SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; 13399eed0735SChristian Borntraeger memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 13409eed0735SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 13419eed0735SChristian Borntraeger } 1342d7b0b5ebSCarsten Otte 1343dab4079dSHeiko Carstens might_fault(); 1344e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 13459ace903dSChristian Ehrhardt 1346b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1347b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 13488f2abe6aSChristian Borntraeger rc = -EINTR; 1349b1d16c49SChristian Ehrhardt } 13508f2abe6aSChristian Borntraeger 135127291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 135227291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 135327291e21SDavid Hildenbrand rc = 0; 135427291e21SDavid Hildenbrand } 135527291e21SDavid Hildenbrand 1356b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 13578f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 13588f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 13598f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 13608f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 13618f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 13628f2abe6aSChristian Borntraeger rc = 0; 13638f2abe6aSChristian Borntraeger } 13648f2abe6aSChristian Borntraeger 13658f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 13668f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 13678f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 13688f2abe6aSChristian Borntraeger rc = 0; 13698f2abe6aSChristian Borntraeger } 13708f2abe6aSChristian Borntraeger 1371d7b0b5ebSCarsten Otte kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1372d7b0b5ebSCarsten Otte kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1373fda902cbSMichael Mueller kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 13749eed0735SChristian Borntraeger memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1375d7b0b5ebSCarsten Otte 1376b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1377b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1378b0c632dbSHeiko Carstens 1379b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 13807e8e6ab4SHeiko Carstens return rc; 1381b0c632dbSHeiko Carstens } 1382b0c632dbSHeiko Carstens 1383b0c632dbSHeiko Carstens /* 1384b0c632dbSHeiko Carstens * store status at address 1385b0c632dbSHeiko Carstens * we use have two special cases: 1386b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1387b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1388b0c632dbSHeiko Carstens */ 1389d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1390b0c632dbSHeiko Carstens { 1391092670cdSCarsten Otte unsigned char archmode = 1; 1392fda902cbSMichael Mueller unsigned int px; 1393178bd789SThomas Huth u64 clkcomp; 1394d0bce605SHeiko Carstens int rc; 1395b0c632dbSHeiko Carstens 1396d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1397d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1398b0c632dbSHeiko Carstens return -EFAULT; 1399d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1400d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1401d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1402b0c632dbSHeiko Carstens return -EFAULT; 1403d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1404d0bce605SHeiko Carstens } 1405d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1406d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1407d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1408d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1409d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1410d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1411fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1412d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1413fda902cbSMichael Mueller &px, 4); 1414d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1415d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1416d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1417d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1418d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1419d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1420d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1421178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1422d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1423d0bce605SHeiko Carstens &clkcomp, 8); 1424d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1425d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1426d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1427d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1428d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1429b0c632dbSHeiko Carstens } 1430b0c632dbSHeiko Carstens 1431e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1432e879892cSThomas Huth { 1433e879892cSThomas Huth /* 1434e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1435e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1436e879892cSThomas Huth * it into the save area 1437e879892cSThomas Huth */ 1438e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1439e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1440e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1441e879892cSThomas Huth 1442e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1443e879892cSThomas Huth } 1444e879892cSThomas Huth 14458ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14468ad35755SDavid Hildenbrand { 14478ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 14488ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 14498ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14508ad35755SDavid Hildenbrand } 14518ad35755SDavid Hildenbrand 14528ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 14538ad35755SDavid Hildenbrand { 14548ad35755SDavid Hildenbrand unsigned int i; 14558ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 14568ad35755SDavid Hildenbrand 14578ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 14588ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 14598ad35755SDavid Hildenbrand } 14608ad35755SDavid Hildenbrand } 14618ad35755SDavid Hildenbrand 14628ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14638ad35755SDavid Hildenbrand { 14648ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 14658ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 14668ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14678ad35755SDavid Hildenbrand } 14688ad35755SDavid Hildenbrand 14696852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 14706852d7b6SDavid Hildenbrand { 14718ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 14728ad35755SDavid Hildenbrand 14738ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 14748ad35755SDavid Hildenbrand return; 14758ad35755SDavid Hildenbrand 14766852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 14778ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 14788ad35755SDavid Hildenbrand spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 14798ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 14808ad35755SDavid Hildenbrand 14818ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 14828ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 14838ad35755SDavid Hildenbrand started_vcpus++; 14848ad35755SDavid Hildenbrand } 14858ad35755SDavid Hildenbrand 14868ad35755SDavid Hildenbrand if (started_vcpus == 0) { 14878ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 14888ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 14898ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 14908ad35755SDavid Hildenbrand /* 14918ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 14928ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 14938ad35755SDavid Hildenbrand * oustanding ENABLE requests. 14948ad35755SDavid Hildenbrand */ 14958ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 14968ad35755SDavid Hildenbrand } 14978ad35755SDavid Hildenbrand 14986852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 14998ad35755SDavid Hildenbrand /* 15008ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 15018ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 15028ad35755SDavid Hildenbrand */ 15038ad35755SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 15048ad35755SDavid Hildenbrand spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 15058ad35755SDavid Hildenbrand return; 15066852d7b6SDavid Hildenbrand } 15076852d7b6SDavid Hildenbrand 15086852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 15096852d7b6SDavid Hildenbrand { 15108ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 15118ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 15128ad35755SDavid Hildenbrand 15138ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 15148ad35755SDavid Hildenbrand return; 15158ad35755SDavid Hildenbrand 15166852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 15178ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 15188ad35755SDavid Hildenbrand spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 15198ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 15208ad35755SDavid Hildenbrand 152132f5ff63SDavid Hildenbrand /* Need to lock access to action_bits to avoid a SIGP race condition */ 152232f5ff63SDavid Hildenbrand spin_lock_bh(&vcpu->arch.local_int.lock); 15236852d7b6SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 152432f5ff63SDavid Hildenbrand 152532f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 152632f5ff63SDavid Hildenbrand vcpu->arch.local_int.action_bits &= 152732f5ff63SDavid Hildenbrand ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); 152832f5ff63SDavid Hildenbrand spin_unlock_bh(&vcpu->arch.local_int.lock); 152932f5ff63SDavid Hildenbrand 15308ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 15318ad35755SDavid Hildenbrand 15328ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 15338ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 15348ad35755SDavid Hildenbrand started_vcpus++; 15358ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 15368ad35755SDavid Hildenbrand } 15378ad35755SDavid Hildenbrand } 15388ad35755SDavid Hildenbrand 15398ad35755SDavid Hildenbrand if (started_vcpus == 1) { 15408ad35755SDavid Hildenbrand /* 15418ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 15428ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 15438ad35755SDavid Hildenbrand */ 15448ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 15458ad35755SDavid Hildenbrand } 15468ad35755SDavid Hildenbrand 15478ad35755SDavid Hildenbrand spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 15488ad35755SDavid Hildenbrand return; 15496852d7b6SDavid Hildenbrand } 15506852d7b6SDavid Hildenbrand 1551d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1552d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1553d6712df9SCornelia Huck { 1554d6712df9SCornelia Huck int r; 1555d6712df9SCornelia Huck 1556d6712df9SCornelia Huck if (cap->flags) 1557d6712df9SCornelia Huck return -EINVAL; 1558d6712df9SCornelia Huck 1559d6712df9SCornelia Huck switch (cap->cap) { 1560fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1561fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1562fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1563fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1564fa6b7fe9SCornelia Huck } 1565fa6b7fe9SCornelia Huck r = 0; 1566fa6b7fe9SCornelia Huck break; 1567d6712df9SCornelia Huck default: 1568d6712df9SCornelia Huck r = -EINVAL; 1569d6712df9SCornelia Huck break; 1570d6712df9SCornelia Huck } 1571d6712df9SCornelia Huck return r; 1572d6712df9SCornelia Huck } 1573d6712df9SCornelia Huck 1574b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1575b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1576b0c632dbSHeiko Carstens { 1577b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1578b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1579800c1065SThomas Huth int idx; 1580bc923cc9SAvi Kivity long r; 1581b0c632dbSHeiko Carstens 158293736624SAvi Kivity switch (ioctl) { 158393736624SAvi Kivity case KVM_S390_INTERRUPT: { 1584ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1585ba5c1e9bSCarsten Otte 158693736624SAvi Kivity r = -EFAULT; 1587ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 158893736624SAvi Kivity break; 158993736624SAvi Kivity r = kvm_s390_inject_vcpu(vcpu, &s390int); 159093736624SAvi Kivity break; 1591ba5c1e9bSCarsten Otte } 1592b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1593800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1594bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1595800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1596bc923cc9SAvi Kivity break; 1597b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1598b0c632dbSHeiko Carstens psw_t psw; 1599b0c632dbSHeiko Carstens 1600bc923cc9SAvi Kivity r = -EFAULT; 1601b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1602bc923cc9SAvi Kivity break; 1603bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1604bc923cc9SAvi Kivity break; 1605b0c632dbSHeiko Carstens } 1606b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1607bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1608bc923cc9SAvi Kivity break; 160914eebd91SCarsten Otte case KVM_SET_ONE_REG: 161014eebd91SCarsten Otte case KVM_GET_ONE_REG: { 161114eebd91SCarsten Otte struct kvm_one_reg reg; 161214eebd91SCarsten Otte r = -EFAULT; 161314eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 161414eebd91SCarsten Otte break; 161514eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 161614eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 161714eebd91SCarsten Otte else 161814eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 161914eebd91SCarsten Otte break; 162014eebd91SCarsten Otte } 162127e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 162227e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 162327e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 162427e0393fSCarsten Otte 162527e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 162627e0393fSCarsten Otte r = -EFAULT; 162727e0393fSCarsten Otte break; 162827e0393fSCarsten Otte } 162927e0393fSCarsten Otte 163027e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 163127e0393fSCarsten Otte r = -EINVAL; 163227e0393fSCarsten Otte break; 163327e0393fSCarsten Otte } 163427e0393fSCarsten Otte 163527e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 163627e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 163727e0393fSCarsten Otte break; 163827e0393fSCarsten Otte } 163927e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 164027e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 164127e0393fSCarsten Otte 164227e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 164327e0393fSCarsten Otte r = -EFAULT; 164427e0393fSCarsten Otte break; 164527e0393fSCarsten Otte } 164627e0393fSCarsten Otte 164727e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 164827e0393fSCarsten Otte r = -EINVAL; 164927e0393fSCarsten Otte break; 165027e0393fSCarsten Otte } 165127e0393fSCarsten Otte 165227e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 165327e0393fSCarsten Otte ucasmap.length); 165427e0393fSCarsten Otte break; 165527e0393fSCarsten Otte } 165627e0393fSCarsten Otte #endif 1657ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1658ccc7910fSCarsten Otte r = gmap_fault(arg, vcpu->arch.gmap); 1659ccc7910fSCarsten Otte if (!IS_ERR_VALUE(r)) 1660ccc7910fSCarsten Otte r = 0; 1661ccc7910fSCarsten Otte break; 1662ccc7910fSCarsten Otte } 1663d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1664d6712df9SCornelia Huck { 1665d6712df9SCornelia Huck struct kvm_enable_cap cap; 1666d6712df9SCornelia Huck r = -EFAULT; 1667d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1668d6712df9SCornelia Huck break; 1669d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1670d6712df9SCornelia Huck break; 1671d6712df9SCornelia Huck } 1672b0c632dbSHeiko Carstens default: 16733e6afcf1SCarsten Otte r = -ENOTTY; 1674b0c632dbSHeiko Carstens } 1675bc923cc9SAvi Kivity return r; 1676b0c632dbSHeiko Carstens } 1677b0c632dbSHeiko Carstens 16785b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 16795b1c1493SCarsten Otte { 16805b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 16815b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 16825b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 16835b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 16845b1c1493SCarsten Otte get_page(vmf->page); 16855b1c1493SCarsten Otte return 0; 16865b1c1493SCarsten Otte } 16875b1c1493SCarsten Otte #endif 16885b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 16895b1c1493SCarsten Otte } 16905b1c1493SCarsten Otte 16915587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1692db3fe4ebSTakuya Yoshikawa struct kvm_memory_slot *dont) 1693db3fe4ebSTakuya Yoshikawa { 1694db3fe4ebSTakuya Yoshikawa } 1695db3fe4ebSTakuya Yoshikawa 16965587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 16975587027cSAneesh Kumar K.V unsigned long npages) 1698db3fe4ebSTakuya Yoshikawa { 1699db3fe4ebSTakuya Yoshikawa return 0; 1700db3fe4ebSTakuya Yoshikawa } 1701db3fe4ebSTakuya Yoshikawa 1702e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm) 1703e59dbe09STakuya Yoshikawa { 1704e59dbe09STakuya Yoshikawa } 1705e59dbe09STakuya Yoshikawa 1706b0c632dbSHeiko Carstens /* Section: memory related */ 1707f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1708f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 17097b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 17107b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1711b0c632dbSHeiko Carstens { 1712dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1713dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1714dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1715dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1716b0c632dbSHeiko Carstens 1717598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1718b0c632dbSHeiko Carstens return -EINVAL; 1719b0c632dbSHeiko Carstens 1720598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1721b0c632dbSHeiko Carstens return -EINVAL; 1722b0c632dbSHeiko Carstens 1723f7784b8eSMarcelo Tosatti return 0; 1724f7784b8eSMarcelo Tosatti } 1725f7784b8eSMarcelo Tosatti 1726f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1727f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 17288482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 17298482644aSTakuya Yoshikawa enum kvm_mr_change change) 1730f7784b8eSMarcelo Tosatti { 1731f7850c92SCarsten Otte int rc; 1732f7784b8eSMarcelo Tosatti 17332cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 17342cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 17352cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 17362cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 17372cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 17382cef4debSChristian Borntraeger */ 17392cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 17402cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 17412cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 17422cef4debSChristian Borntraeger return; 1743598841caSCarsten Otte 1744598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1745598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1746598841caSCarsten Otte if (rc) 1747f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1748598841caSCarsten Otte return; 1749b0c632dbSHeiko Carstens } 1750b0c632dbSHeiko Carstens 17512df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm) 17522df72e9bSMarcelo Tosatti { 17532df72e9bSMarcelo Tosatti } 17542df72e9bSMarcelo Tosatti 17552df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 17562df72e9bSMarcelo Tosatti struct kvm_memory_slot *slot) 175734d4cb8fSMarcelo Tosatti { 175834d4cb8fSMarcelo Tosatti } 175934d4cb8fSMarcelo Tosatti 1760b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1761b0c632dbSHeiko Carstens { 1762ef50f7acSChristian Borntraeger int ret; 17630ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1764ef50f7acSChristian Borntraeger if (ret) 1765ef50f7acSChristian Borntraeger return ret; 1766ef50f7acSChristian Borntraeger 1767ef50f7acSChristian Borntraeger /* 1768ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 176925985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1770ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1771ef50f7acSChristian Borntraeger */ 177278c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 177378c4b59fSMichael Mueller if (!vfacilities) { 1774ef50f7acSChristian Borntraeger kvm_exit(); 1775ef50f7acSChristian Borntraeger return -ENOMEM; 1776ef50f7acSChristian Borntraeger } 177778c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1778d208c79dSThomas Huth vfacilities[0] &= 0xff82fff3f4fc2000UL; 17797feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1780ef50f7acSChristian Borntraeger return 0; 1781b0c632dbSHeiko Carstens } 1782b0c632dbSHeiko Carstens 1783b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1784b0c632dbSHeiko Carstens { 178578c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1786b0c632dbSHeiko Carstens kvm_exit(); 1787b0c632dbSHeiko Carstens } 1788b0c632dbSHeiko Carstens 1789b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1790b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1791566af940SCornelia Huck 1792566af940SCornelia Huck /* 1793566af940SCornelia Huck * Enable autoloading of the kvm module. 1794566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1795566af940SCornelia Huck * since x86 takes a different approach. 1796566af940SCornelia Huck */ 1797566af940SCornelia Huck #include <linux/miscdevice.h> 1798566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1799566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1800