1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25b0c632dbSHeiko Carstens #include <linux/slab.h> 26ba5c1e9bSCarsten Otte #include <linux/timer.h> 27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 28b0c632dbSHeiko Carstens #include <asm/lowcore.h> 29b0c632dbSHeiko Carstens #include <asm/pgtable.h> 30f5daba1dSHeiko Carstens #include <asm/nmi.h> 31a0616cdeSDavid Howells #include <asm/switch_to.h> 3278c4b59fSMichael Mueller #include <asm/facility.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 54ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 55aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 56aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 57ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 587697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 59ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 60ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 61ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 65ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6669d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 67453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 68453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 69453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 70453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 71453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 728a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 73453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 74453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 75b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 76453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 77453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 78bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 795288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 80bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 817697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 825288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 835288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 855288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 865288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 87388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 88e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 8941628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 90b0c632dbSHeiko Carstens { NULL } 91b0c632dbSHeiko Carstens }; 92b0c632dbSHeiko Carstens 9378c4b59fSMichael Mueller unsigned long *vfacilities; 942c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 95b0c632dbSHeiko Carstens 9678c4b59fSMichael Mueller /* test availability of vfacility */ 97280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 9878c4b59fSMichael Mueller { 9978c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 10078c4b59fSMichael Mueller } 10178c4b59fSMichael Mueller 102b0c632dbSHeiko Carstens /* Section: not file related */ 10310474ae8SAlexander Graf int kvm_arch_hardware_enable(void *garbage) 104b0c632dbSHeiko Carstens { 105b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 10610474ae8SAlexander Graf return 0; 107b0c632dbSHeiko Carstens } 108b0c632dbSHeiko Carstens 109b0c632dbSHeiko Carstens void kvm_arch_hardware_disable(void *garbage) 110b0c632dbSHeiko Carstens { 111b0c632dbSHeiko Carstens } 112b0c632dbSHeiko Carstens 1132c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1142c70fe44SChristian Borntraeger 115b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 116b0c632dbSHeiko Carstens { 1172c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1182c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 119b0c632dbSHeiko Carstens return 0; 120b0c632dbSHeiko Carstens } 121b0c632dbSHeiko Carstens 122b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 123b0c632dbSHeiko Carstens { 1242c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 125b0c632dbSHeiko Carstens } 126b0c632dbSHeiko Carstens 127b0c632dbSHeiko Carstens void kvm_arch_check_processor_compat(void *rtn) 128b0c632dbSHeiko Carstens { 129b0c632dbSHeiko Carstens } 130b0c632dbSHeiko Carstens 131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 132b0c632dbSHeiko Carstens { 133b0c632dbSHeiko Carstens return 0; 134b0c632dbSHeiko Carstens } 135b0c632dbSHeiko Carstens 136b0c632dbSHeiko Carstens void kvm_arch_exit(void) 137b0c632dbSHeiko Carstens { 138b0c632dbSHeiko Carstens } 139b0c632dbSHeiko Carstens 140b0c632dbSHeiko Carstens /* Section: device related */ 141b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 142b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 143b0c632dbSHeiko Carstens { 144b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 145b0c632dbSHeiko Carstens return s390_enable_sie(); 146b0c632dbSHeiko Carstens return -EINVAL; 147b0c632dbSHeiko Carstens } 148b0c632dbSHeiko Carstens 149b0c632dbSHeiko Carstens int kvm_dev_ioctl_check_extension(long ext) 150b0c632dbSHeiko Carstens { 151d7b0b5ebSCarsten Otte int r; 152d7b0b5ebSCarsten Otte 1532bd0ac4eSCarsten Otte switch (ext) { 154d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 155b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15652e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1571efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1581efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1591efd0f59SCarsten Otte #endif 1603c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16160b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16214eebd91SCarsten Otte case KVM_CAP_ONE_REG: 163d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 164fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 16510ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 166c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 167d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 168f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 169d7b0b5ebSCarsten Otte r = 1; 170d7b0b5ebSCarsten Otte break; 171e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 172e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 173e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 174e726b1bdSChristian Borntraeger break; 175e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 176e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 177e1e2e605SNick Wang break; 1781526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 179abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1801526bf9cSChristian Borntraeger break; 1812bd0ac4eSCarsten Otte default: 182d7b0b5ebSCarsten Otte r = 0; 183b0c632dbSHeiko Carstens } 184d7b0b5ebSCarsten Otte return r; 1852bd0ac4eSCarsten Otte } 186b0c632dbSHeiko Carstens 18715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 18815f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 18915f36ebdSJason J. Herne { 19015f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19115f36ebdSJason J. Herne unsigned long address; 19215f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 19315f36ebdSJason J. Herne 19415f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 19515f36ebdSJason J. Herne /* Loop over all guest pages */ 19615f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 19715f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 19815f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 19915f36ebdSJason J. Herne 20015f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20115f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20215f36ebdSJason J. Herne } 20315f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 20415f36ebdSJason J. Herne } 20515f36ebdSJason J. Herne 206b0c632dbSHeiko Carstens /* Section: vm related */ 207b0c632dbSHeiko Carstens /* 208b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 209b0c632dbSHeiko Carstens */ 210b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 211b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 212b0c632dbSHeiko Carstens { 21315f36ebdSJason J. Herne int r; 21415f36ebdSJason J. Herne unsigned long n; 21515f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 21615f36ebdSJason J. Herne int is_dirty = 0; 21715f36ebdSJason J. Herne 21815f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 21915f36ebdSJason J. Herne 22015f36ebdSJason J. Herne r = -EINVAL; 22115f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22215f36ebdSJason J. Herne goto out; 22315f36ebdSJason J. Herne 22415f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 22515f36ebdSJason J. Herne r = -ENOENT; 22615f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 22715f36ebdSJason J. Herne goto out; 22815f36ebdSJason J. Herne 22915f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23015f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23115f36ebdSJason J. Herne if (r) 23215f36ebdSJason J. Herne goto out; 23315f36ebdSJason J. Herne 23415f36ebdSJason J. Herne /* Clear the dirty log */ 23515f36ebdSJason J. Herne if (is_dirty) { 23615f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 23715f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 23815f36ebdSJason J. Herne } 23915f36ebdSJason J. Herne r = 0; 24015f36ebdSJason J. Herne out: 24115f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24215f36ebdSJason J. Herne return r; 243b0c632dbSHeiko Carstens } 244b0c632dbSHeiko Carstens 245d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 246d938dc55SCornelia Huck { 247d938dc55SCornelia Huck int r; 248d938dc55SCornelia Huck 249d938dc55SCornelia Huck if (cap->flags) 250d938dc55SCornelia Huck return -EINVAL; 251d938dc55SCornelia Huck 252d938dc55SCornelia Huck switch (cap->cap) { 25384223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 25484223598SCornelia Huck kvm->arch.use_irqchip = 1; 25584223598SCornelia Huck r = 0; 25684223598SCornelia Huck break; 257d938dc55SCornelia Huck default: 258d938dc55SCornelia Huck r = -EINVAL; 259d938dc55SCornelia Huck break; 260d938dc55SCornelia Huck } 261d938dc55SCornelia Huck return r; 262d938dc55SCornelia Huck } 263d938dc55SCornelia Huck 2644f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2654f718eabSDominik Dingel { 2664f718eabSDominik Dingel int ret; 2674f718eabSDominik Dingel unsigned int idx; 2684f718eabSDominik Dingel switch (attr->attr) { 2694f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2704f718eabSDominik Dingel ret = -EBUSY; 2714f718eabSDominik Dingel mutex_lock(&kvm->lock); 2724f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2734f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2744f718eabSDominik Dingel ret = 0; 2754f718eabSDominik Dingel } 2764f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2774f718eabSDominik Dingel break; 2784f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 2794f718eabSDominik Dingel mutex_lock(&kvm->lock); 2804f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 2814f718eabSDominik Dingel page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); 2824f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 2834f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2844f718eabSDominik Dingel ret = 0; 2854f718eabSDominik Dingel break; 2864f718eabSDominik Dingel default: 2874f718eabSDominik Dingel ret = -ENXIO; 2884f718eabSDominik Dingel break; 2894f718eabSDominik Dingel } 2904f718eabSDominik Dingel return ret; 2914f718eabSDominik Dingel } 2924f718eabSDominik Dingel 293f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 294f2061656SDominik Dingel { 295f2061656SDominik Dingel int ret; 296f2061656SDominik Dingel 297f2061656SDominik Dingel switch (attr->group) { 2984f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 2994f718eabSDominik Dingel ret = kvm_s390_mem_control(kvm, attr); 3004f718eabSDominik Dingel break; 301f2061656SDominik Dingel default: 302f2061656SDominik Dingel ret = -ENXIO; 303f2061656SDominik Dingel break; 304f2061656SDominik Dingel } 305f2061656SDominik Dingel 306f2061656SDominik Dingel return ret; 307f2061656SDominik Dingel } 308f2061656SDominik Dingel 309f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 310f2061656SDominik Dingel { 311f2061656SDominik Dingel return -ENXIO; 312f2061656SDominik Dingel } 313f2061656SDominik Dingel 314f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 315f2061656SDominik Dingel { 316f2061656SDominik Dingel int ret; 317f2061656SDominik Dingel 318f2061656SDominik Dingel switch (attr->group) { 3194f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3204f718eabSDominik Dingel switch (attr->attr) { 3214f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3224f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3234f718eabSDominik Dingel ret = 0; 3244f718eabSDominik Dingel break; 3254f718eabSDominik Dingel default: 3264f718eabSDominik Dingel ret = -ENXIO; 3274f718eabSDominik Dingel break; 3284f718eabSDominik Dingel } 3294f718eabSDominik Dingel break; 330f2061656SDominik Dingel default: 331f2061656SDominik Dingel ret = -ENXIO; 332f2061656SDominik Dingel break; 333f2061656SDominik Dingel } 334f2061656SDominik Dingel 335f2061656SDominik Dingel return ret; 336f2061656SDominik Dingel } 337f2061656SDominik Dingel 338b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 339b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 340b0c632dbSHeiko Carstens { 341b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 342b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 343f2061656SDominik Dingel struct kvm_device_attr attr; 344b0c632dbSHeiko Carstens int r; 345b0c632dbSHeiko Carstens 346b0c632dbSHeiko Carstens switch (ioctl) { 347ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 348ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 349ba5c1e9bSCarsten Otte 350ba5c1e9bSCarsten Otte r = -EFAULT; 351ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 352ba5c1e9bSCarsten Otte break; 353ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 354ba5c1e9bSCarsten Otte break; 355ba5c1e9bSCarsten Otte } 356d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 357d938dc55SCornelia Huck struct kvm_enable_cap cap; 358d938dc55SCornelia Huck r = -EFAULT; 359d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 360d938dc55SCornelia Huck break; 361d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 362d938dc55SCornelia Huck break; 363d938dc55SCornelia Huck } 36484223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 36584223598SCornelia Huck struct kvm_irq_routing_entry routing; 36684223598SCornelia Huck 36784223598SCornelia Huck r = -EINVAL; 36884223598SCornelia Huck if (kvm->arch.use_irqchip) { 36984223598SCornelia Huck /* Set up dummy routing. */ 37084223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 37184223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 37284223598SCornelia Huck r = 0; 37384223598SCornelia Huck } 37484223598SCornelia Huck break; 37584223598SCornelia Huck } 376f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 377f2061656SDominik Dingel r = -EFAULT; 378f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 379f2061656SDominik Dingel break; 380f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 381f2061656SDominik Dingel break; 382f2061656SDominik Dingel } 383f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 384f2061656SDominik Dingel r = -EFAULT; 385f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 386f2061656SDominik Dingel break; 387f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 388f2061656SDominik Dingel break; 389f2061656SDominik Dingel } 390f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 391f2061656SDominik Dingel r = -EFAULT; 392f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 393f2061656SDominik Dingel break; 394f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 395f2061656SDominik Dingel break; 396f2061656SDominik Dingel } 397b0c632dbSHeiko Carstens default: 398367e1319SAvi Kivity r = -ENOTTY; 399b0c632dbSHeiko Carstens } 400b0c632dbSHeiko Carstens 401b0c632dbSHeiko Carstens return r; 402b0c632dbSHeiko Carstens } 403b0c632dbSHeiko Carstens 404e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 405b0c632dbSHeiko Carstens { 406b0c632dbSHeiko Carstens int rc; 407b0c632dbSHeiko Carstens char debug_name[16]; 408f6c137ffSChristian Borntraeger static unsigned long sca_offset; 409b0c632dbSHeiko Carstens 410e08b9637SCarsten Otte rc = -EINVAL; 411e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 412e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 413e08b9637SCarsten Otte goto out_err; 414e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 415e08b9637SCarsten Otte goto out_err; 416e08b9637SCarsten Otte #else 417e08b9637SCarsten Otte if (type) 418e08b9637SCarsten Otte goto out_err; 419e08b9637SCarsten Otte #endif 420e08b9637SCarsten Otte 421b0c632dbSHeiko Carstens rc = s390_enable_sie(); 422b0c632dbSHeiko Carstens if (rc) 423d89f5effSJan Kiszka goto out_err; 424b0c632dbSHeiko Carstens 425b290411aSCarsten Otte rc = -ENOMEM; 426b290411aSCarsten Otte 427b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 428b0c632dbSHeiko Carstens if (!kvm->arch.sca) 429d89f5effSJan Kiszka goto out_err; 430f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 431f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 432f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 433f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 434b0c632dbSHeiko Carstens 435b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 436b0c632dbSHeiko Carstens 437b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 438b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 439b0c632dbSHeiko Carstens goto out_nodbf; 440b0c632dbSHeiko Carstens 441ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 442ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 4438a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 444ba5c1e9bSCarsten Otte 445b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 446b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 447b0c632dbSHeiko Carstens 448e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 449e08b9637SCarsten Otte kvm->arch.gmap = NULL; 450e08b9637SCarsten Otte } else { 451598841caSCarsten Otte kvm->arch.gmap = gmap_alloc(current->mm); 452598841caSCarsten Otte if (!kvm->arch.gmap) 453598841caSCarsten Otte goto out_nogmap; 4542c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 45524eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 456e08b9637SCarsten Otte } 457fa6b7fe9SCornelia Huck 458fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 45984223598SCornelia Huck kvm->arch.use_irqchip = 0; 460fa6b7fe9SCornelia Huck 461*8ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 462*8ad35755SDavid Hildenbrand 463d89f5effSJan Kiszka return 0; 464598841caSCarsten Otte out_nogmap: 465598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 466b0c632dbSHeiko Carstens out_nodbf: 467b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 468d89f5effSJan Kiszka out_err: 469d89f5effSJan Kiszka return rc; 470b0c632dbSHeiko Carstens } 471b0c632dbSHeiko Carstens 472d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 473d329c035SChristian Borntraeger { 474d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 475ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 47667335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 4773c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 47858f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 47958f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 48058f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 481abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 482abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 483abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 48458f9460bSCarsten Otte } 485abf4a71eSCarsten Otte smp_mb(); 48627e0393fSCarsten Otte 48727e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 48827e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 48927e0393fSCarsten Otte 490b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 491b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 492d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 493b31288faSKonstantin Weitz 4946692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 495b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 496d329c035SChristian Borntraeger } 497d329c035SChristian Borntraeger 498d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 499d329c035SChristian Borntraeger { 500d329c035SChristian Borntraeger unsigned int i; 501988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 502d329c035SChristian Borntraeger 503988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 504988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 505988a2caeSGleb Natapov 506988a2caeSGleb Natapov mutex_lock(&kvm->lock); 507988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 508d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 509988a2caeSGleb Natapov 510988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 511988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 512d329c035SChristian Borntraeger } 513d329c035SChristian Borntraeger 514ad8ba2cdSSheng Yang void kvm_arch_sync_events(struct kvm *kvm) 515ad8ba2cdSSheng Yang { 516ad8ba2cdSSheng Yang } 517ad8ba2cdSSheng Yang 518b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 519b0c632dbSHeiko Carstens { 520d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 521b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 522d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 52327e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 524598841caSCarsten Otte gmap_free(kvm->arch.gmap); 525841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 52667335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 527b0c632dbSHeiko Carstens } 528b0c632dbSHeiko Carstens 529b0c632dbSHeiko Carstens /* Section: vcpu related */ 530b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 531b0c632dbSHeiko Carstens { 5323c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5333c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 53427e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) { 53527e0393fSCarsten Otte vcpu->arch.gmap = gmap_alloc(current->mm); 53627e0393fSCarsten Otte if (!vcpu->arch.gmap) 53727e0393fSCarsten Otte return -ENOMEM; 5382c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 53927e0393fSCarsten Otte return 0; 54027e0393fSCarsten Otte } 54127e0393fSCarsten Otte 542598841caSCarsten Otte vcpu->arch.gmap = vcpu->kvm->arch.gmap; 54359674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 54459674c1aSChristian Borntraeger KVM_SYNC_GPRS | 5459eed0735SChristian Borntraeger KVM_SYNC_ACRS | 5469eed0735SChristian Borntraeger KVM_SYNC_CRS; 547b0c632dbSHeiko Carstens return 0; 548b0c632dbSHeiko Carstens } 549b0c632dbSHeiko Carstens 550b0c632dbSHeiko Carstens void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 551b0c632dbSHeiko Carstens { 5526692cef3SChristian Borntraeger /* Nothing todo */ 553b0c632dbSHeiko Carstens } 554b0c632dbSHeiko Carstens 555b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 556b0c632dbSHeiko Carstens { 5574725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5584725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 559b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 5604725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5614725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 56259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 563480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 5649e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 565b0c632dbSHeiko Carstens } 566b0c632dbSHeiko Carstens 567b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 568b0c632dbSHeiko Carstens { 5699e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 570480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 5714725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5724725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 57359674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 5744725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5754725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 576b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 577b0c632dbSHeiko Carstens } 578b0c632dbSHeiko Carstens 579b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 580b0c632dbSHeiko Carstens { 581b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 582b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 583b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 5848d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 585b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 586b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 587b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 588b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 589b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 590b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 591b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 592b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 593b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 594672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 5953c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5963c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 5976852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 5982ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 599b0c632dbSHeiko Carstens } 600b0c632dbSHeiko Carstens 60142897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 60242897d86SMarcelo Tosatti { 60342897d86SMarcelo Tosatti return 0; 60442897d86SMarcelo Tosatti } 60542897d86SMarcelo Tosatti 606b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 607b31605c1SDominik Dingel { 608b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 609b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 610b31605c1SDominik Dingel } 611b31605c1SDominik Dingel 612b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 613b31605c1SDominik Dingel { 614b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 615b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 616b31605c1SDominik Dingel return -ENOMEM; 617b31605c1SDominik Dingel 618b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 619b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 620b31605c1SDominik Dingel return 0; 621b31605c1SDominik Dingel } 622b31605c1SDominik Dingel 623b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 624b0c632dbSHeiko Carstens { 625b31605c1SDominik Dingel int rc = 0; 626b31288faSKonstantin Weitz 6279e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 6289e6dabefSCornelia Huck CPUSTAT_SM | 62969d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 63069d0d3a3SChristian Borntraeger CPUSTAT_GED); 631fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 6327feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 6337feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 6347feb6bb8SMichael Mueller 63569d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 636217a4406SHeiko Carstens vcpu->arch.sie_block->eca = 0xC1002000U; 637217a4406SHeiko Carstens if (sclp_has_siif()) 638217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 63978c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 640693ffc08SDominik Dingel vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 641b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 642b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 643b31605c1SDominik Dingel if (rc) 644b31605c1SDominik Dingel return rc; 645b31288faSKonstantin Weitz } 646ca872302SChristian Borntraeger hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 647ca872302SChristian Borntraeger tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 648ba5c1e9bSCarsten Otte (unsigned long) vcpu); 649ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 650453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 65192e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 652b31605c1SDominik Dingel return rc; 653b0c632dbSHeiko Carstens } 654b0c632dbSHeiko Carstens 655b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 656b0c632dbSHeiko Carstens unsigned int id) 657b0c632dbSHeiko Carstens { 6584d47555aSCarsten Otte struct kvm_vcpu *vcpu; 6597feb6bb8SMichael Mueller struct sie_page *sie_page; 6604d47555aSCarsten Otte int rc = -EINVAL; 661b0c632dbSHeiko Carstens 6624d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 6634d47555aSCarsten Otte goto out; 6644d47555aSCarsten Otte 6654d47555aSCarsten Otte rc = -ENOMEM; 6664d47555aSCarsten Otte 667b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 668b0c632dbSHeiko Carstens if (!vcpu) 6694d47555aSCarsten Otte goto out; 670b0c632dbSHeiko Carstens 6717feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 6727feb6bb8SMichael Mueller if (!sie_page) 673b0c632dbSHeiko Carstens goto out_free_cpu; 674b0c632dbSHeiko Carstens 6757feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 6767feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 6777feb6bb8SMichael Mueller 678b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 67958f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 68058f9460bSCarsten Otte if (!kvm->arch.sca) { 68158f9460bSCarsten Otte WARN_ON_ONCE(1); 68258f9460bSCarsten Otte goto out_free_cpu; 68358f9460bSCarsten Otte } 684abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 68558f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 68658f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 68758f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 68858f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 689b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 690fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 69158f9460bSCarsten Otte } 692b0c632dbSHeiko Carstens 693ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 694ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&vcpu->arch.local_int.list); 695ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 696d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 6975288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 698ba5c1e9bSCarsten Otte 699b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 700b0c632dbSHeiko Carstens if (rc) 7017b06bf2fSWei Yongjun goto out_free_sie_block; 702b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 703b0c632dbSHeiko Carstens vcpu->arch.sie_block); 704ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 705b0c632dbSHeiko Carstens 706b0c632dbSHeiko Carstens return vcpu; 7077b06bf2fSWei Yongjun out_free_sie_block: 7087b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 709b0c632dbSHeiko Carstens out_free_cpu: 710b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 7114d47555aSCarsten Otte out: 712b0c632dbSHeiko Carstens return ERR_PTR(rc); 713b0c632dbSHeiko Carstens } 714b0c632dbSHeiko Carstens 715b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 716b0c632dbSHeiko Carstens { 717f87618e8SMichael Mueller return kvm_cpu_has_interrupt(vcpu); 718b0c632dbSHeiko Carstens } 719b0c632dbSHeiko Carstens 72049b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 72149b99e1eSChristian Borntraeger { 72249b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 72349b99e1eSChristian Borntraeger } 72449b99e1eSChristian Borntraeger 72549b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 72649b99e1eSChristian Borntraeger { 72749b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 72849b99e1eSChristian Borntraeger } 72949b99e1eSChristian Borntraeger 73049b99e1eSChristian Borntraeger /* 73149b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 73249b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 73349b99e1eSChristian Borntraeger * return immediately. */ 73449b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 73549b99e1eSChristian Borntraeger { 73649b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 73749b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 73849b99e1eSChristian Borntraeger cpu_relax(); 73949b99e1eSChristian Borntraeger } 74049b99e1eSChristian Borntraeger 74149b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 74249b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 74349b99e1eSChristian Borntraeger { 74449b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 74549b99e1eSChristian Borntraeger exit_sie(vcpu); 74649b99e1eSChristian Borntraeger } 74749b99e1eSChristian Borntraeger 7482c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 7492c70fe44SChristian Borntraeger { 7502c70fe44SChristian Borntraeger int i; 7512c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 7522c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 7532c70fe44SChristian Borntraeger 7542c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 7552c70fe44SChristian Borntraeger /* match against both prefix pages */ 7562c70fe44SChristian Borntraeger if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) { 7572c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 7582c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 7592c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 7602c70fe44SChristian Borntraeger } 7612c70fe44SChristian Borntraeger } 7622c70fe44SChristian Borntraeger } 7632c70fe44SChristian Borntraeger 764b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 765b6d33834SChristoffer Dall { 766b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 767b6d33834SChristoffer Dall BUG(); 768b6d33834SChristoffer Dall return 0; 769b6d33834SChristoffer Dall } 770b6d33834SChristoffer Dall 77114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 77214eebd91SCarsten Otte struct kvm_one_reg *reg) 77314eebd91SCarsten Otte { 77414eebd91SCarsten Otte int r = -EINVAL; 77514eebd91SCarsten Otte 77614eebd91SCarsten Otte switch (reg->id) { 77729b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 77829b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 77929b7c71bSCarsten Otte (u32 __user *)reg->addr); 78029b7c71bSCarsten Otte break; 78129b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 78229b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 78329b7c71bSCarsten Otte (u64 __user *)reg->addr); 78429b7c71bSCarsten Otte break; 78546a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 78646a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 78746a6dd1cSJason J. herne (u64 __user *)reg->addr); 78846a6dd1cSJason J. herne break; 78946a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 79046a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 79146a6dd1cSJason J. herne (u64 __user *)reg->addr); 79246a6dd1cSJason J. herne break; 793536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 794536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 795536336c2SDominik Dingel (u64 __user *)reg->addr); 796536336c2SDominik Dingel break; 797536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 798536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 799536336c2SDominik Dingel (u64 __user *)reg->addr); 800536336c2SDominik Dingel break; 801536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 802536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 803536336c2SDominik Dingel (u64 __user *)reg->addr); 804536336c2SDominik Dingel break; 805672550fbSChristian Borntraeger case KVM_REG_S390_PP: 806672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 807672550fbSChristian Borntraeger (u64 __user *)reg->addr); 808672550fbSChristian Borntraeger break; 809afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 810afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 811afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 812afa45ff5SChristian Borntraeger break; 81314eebd91SCarsten Otte default: 81414eebd91SCarsten Otte break; 81514eebd91SCarsten Otte } 81614eebd91SCarsten Otte 81714eebd91SCarsten Otte return r; 81814eebd91SCarsten Otte } 81914eebd91SCarsten Otte 82014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 82114eebd91SCarsten Otte struct kvm_one_reg *reg) 82214eebd91SCarsten Otte { 82314eebd91SCarsten Otte int r = -EINVAL; 82414eebd91SCarsten Otte 82514eebd91SCarsten Otte switch (reg->id) { 82629b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 82729b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 82829b7c71bSCarsten Otte (u32 __user *)reg->addr); 82929b7c71bSCarsten Otte break; 83029b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 83129b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 83229b7c71bSCarsten Otte (u64 __user *)reg->addr); 83329b7c71bSCarsten Otte break; 83446a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 83546a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 83646a6dd1cSJason J. herne (u64 __user *)reg->addr); 83746a6dd1cSJason J. herne break; 83846a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 83946a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 84046a6dd1cSJason J. herne (u64 __user *)reg->addr); 84146a6dd1cSJason J. herne break; 842536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 843536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 844536336c2SDominik Dingel (u64 __user *)reg->addr); 845536336c2SDominik Dingel break; 846536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 847536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 848536336c2SDominik Dingel (u64 __user *)reg->addr); 849536336c2SDominik Dingel break; 850536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 851536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 852536336c2SDominik Dingel (u64 __user *)reg->addr); 853536336c2SDominik Dingel break; 854672550fbSChristian Borntraeger case KVM_REG_S390_PP: 855672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 856672550fbSChristian Borntraeger (u64 __user *)reg->addr); 857672550fbSChristian Borntraeger break; 858afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 859afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 860afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 861afa45ff5SChristian Borntraeger break; 86214eebd91SCarsten Otte default: 86314eebd91SCarsten Otte break; 86414eebd91SCarsten Otte } 86514eebd91SCarsten Otte 86614eebd91SCarsten Otte return r; 86714eebd91SCarsten Otte } 868b6d33834SChristoffer Dall 869b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 870b0c632dbSHeiko Carstens { 871b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 872b0c632dbSHeiko Carstens return 0; 873b0c632dbSHeiko Carstens } 874b0c632dbSHeiko Carstens 875b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 876b0c632dbSHeiko Carstens { 8775a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 878b0c632dbSHeiko Carstens return 0; 879b0c632dbSHeiko Carstens } 880b0c632dbSHeiko Carstens 881b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 882b0c632dbSHeiko Carstens { 8835a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 884b0c632dbSHeiko Carstens return 0; 885b0c632dbSHeiko Carstens } 886b0c632dbSHeiko Carstens 887b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 888b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 889b0c632dbSHeiko Carstens { 89059674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 891b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 89259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 893b0c632dbSHeiko Carstens return 0; 894b0c632dbSHeiko Carstens } 895b0c632dbSHeiko Carstens 896b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 897b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 898b0c632dbSHeiko Carstens { 89959674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 900b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 901b0c632dbSHeiko Carstens return 0; 902b0c632dbSHeiko Carstens } 903b0c632dbSHeiko Carstens 904b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 905b0c632dbSHeiko Carstens { 9064725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 9074725c860SMartin Schwidefsky return -EINVAL; 908b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 9094725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 9104725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 9114725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 912b0c632dbSHeiko Carstens return 0; 913b0c632dbSHeiko Carstens } 914b0c632dbSHeiko Carstens 915b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 916b0c632dbSHeiko Carstens { 917b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 918b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 919b0c632dbSHeiko Carstens return 0; 920b0c632dbSHeiko Carstens } 921b0c632dbSHeiko Carstens 922b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 923b0c632dbSHeiko Carstens { 924b0c632dbSHeiko Carstens int rc = 0; 925b0c632dbSHeiko Carstens 9269e6dabefSCornelia Huck if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) 927b0c632dbSHeiko Carstens rc = -EBUSY; 928d7b0b5ebSCarsten Otte else { 929d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 930d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 931d7b0b5ebSCarsten Otte } 932b0c632dbSHeiko Carstens return rc; 933b0c632dbSHeiko Carstens } 934b0c632dbSHeiko Carstens 935b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 936b0c632dbSHeiko Carstens struct kvm_translation *tr) 937b0c632dbSHeiko Carstens { 938b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 939b0c632dbSHeiko Carstens } 940b0c632dbSHeiko Carstens 94127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 94227291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 94327291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 94427291e21SDavid Hildenbrand 945d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 946d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 947b0c632dbSHeiko Carstens { 94827291e21SDavid Hildenbrand int rc = 0; 94927291e21SDavid Hildenbrand 95027291e21SDavid Hildenbrand vcpu->guest_debug = 0; 95127291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 95227291e21SDavid Hildenbrand 95327291e21SDavid Hildenbrand if (vcpu->guest_debug & ~VALID_GUESTDBG_FLAGS) 95427291e21SDavid Hildenbrand return -EINVAL; 95527291e21SDavid Hildenbrand 95627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 95727291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 95827291e21SDavid Hildenbrand /* enforce guest PER */ 95927291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 96027291e21SDavid Hildenbrand 96127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 96227291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 96327291e21SDavid Hildenbrand } else { 96427291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 96527291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 96627291e21SDavid Hildenbrand } 96727291e21SDavid Hildenbrand 96827291e21SDavid Hildenbrand if (rc) { 96927291e21SDavid Hildenbrand vcpu->guest_debug = 0; 97027291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 97127291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 97227291e21SDavid Hildenbrand } 97327291e21SDavid Hildenbrand 97427291e21SDavid Hildenbrand return rc; 975b0c632dbSHeiko Carstens } 976b0c632dbSHeiko Carstens 97762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 97862d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 97962d9f0dbSMarcelo Tosatti { 98062d9f0dbSMarcelo Tosatti return -EINVAL; /* not implemented yet */ 98162d9f0dbSMarcelo Tosatti } 98262d9f0dbSMarcelo Tosatti 98362d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 98462d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 98562d9f0dbSMarcelo Tosatti { 98662d9f0dbSMarcelo Tosatti return -EINVAL; /* not implemented yet */ 98762d9f0dbSMarcelo Tosatti } 98862d9f0dbSMarcelo Tosatti 989b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 990b31605c1SDominik Dingel { 991b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 992b31605c1SDominik Dingel return false; 993b31605c1SDominik Dingel /* only enable for z10 and later */ 994b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 995b31605c1SDominik Dingel return false; 996b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 997b31605c1SDominik Dingel return false; 998b31605c1SDominik Dingel return true; 999b31605c1SDominik Dingel } 1000b31605c1SDominik Dingel 1001*8ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 1002*8ad35755SDavid Hildenbrand { 1003*8ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 1004*8ad35755SDavid Hildenbrand } 1005*8ad35755SDavid Hildenbrand 10062c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 10072c70fe44SChristian Borntraeger { 1008*8ad35755SDavid Hildenbrand retry: 1009*8ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 10102c70fe44SChristian Borntraeger /* 10112c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 10122c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 10132c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 10142c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 10152c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 10162c70fe44SChristian Borntraeger */ 1017*8ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 10182c70fe44SChristian Borntraeger int rc; 10192c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 10202c70fe44SChristian Borntraeger vcpu->arch.sie_block->prefix, 10212c70fe44SChristian Borntraeger PAGE_SIZE * 2); 10222c70fe44SChristian Borntraeger if (rc) 10232c70fe44SChristian Borntraeger return rc; 1024*8ad35755SDavid Hildenbrand goto retry; 10252c70fe44SChristian Borntraeger } 1026*8ad35755SDavid Hildenbrand 1027*8ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 1028*8ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 1029*8ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 1030*8ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 1031*8ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 1032*8ad35755SDavid Hildenbrand } 1033*8ad35755SDavid Hildenbrand goto retry; 1034*8ad35755SDavid Hildenbrand } 1035*8ad35755SDavid Hildenbrand 1036*8ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 1037*8ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 1038*8ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 1039*8ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 1040*8ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 1041*8ad35755SDavid Hildenbrand } 1042*8ad35755SDavid Hildenbrand goto retry; 1043*8ad35755SDavid Hildenbrand } 1044*8ad35755SDavid Hildenbrand 10452c70fe44SChristian Borntraeger return 0; 10462c70fe44SChristian Borntraeger } 10472c70fe44SChristian Borntraeger 104824eb3a82SDominik Dingel static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu) 104924eb3a82SDominik Dingel { 105024eb3a82SDominik Dingel long rc; 105124eb3a82SDominik Dingel hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap); 105224eb3a82SDominik Dingel struct mm_struct *mm = current->mm; 105324eb3a82SDominik Dingel down_read(&mm->mmap_sem); 105424eb3a82SDominik Dingel rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL); 105524eb3a82SDominik Dingel up_read(&mm->mmap_sem); 105624eb3a82SDominik Dingel return rc; 105724eb3a82SDominik Dingel } 105824eb3a82SDominik Dingel 10593c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 10603c038e6bSDominik Dingel unsigned long token) 10613c038e6bSDominik Dingel { 10623c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 10633c038e6bSDominik Dingel inti.parm64 = token; 10643c038e6bSDominik Dingel 10653c038e6bSDominik Dingel if (start_token) { 10663c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_INIT; 10673c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 10683c038e6bSDominik Dingel } else { 10693c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 10703c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 10713c038e6bSDominik Dingel } 10723c038e6bSDominik Dingel } 10733c038e6bSDominik Dingel 10743c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 10753c038e6bSDominik Dingel struct kvm_async_pf *work) 10763c038e6bSDominik Dingel { 10773c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 10783c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 10793c038e6bSDominik Dingel } 10803c038e6bSDominik Dingel 10813c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 10823c038e6bSDominik Dingel struct kvm_async_pf *work) 10833c038e6bSDominik Dingel { 10843c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 10853c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 10863c038e6bSDominik Dingel } 10873c038e6bSDominik Dingel 10883c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 10893c038e6bSDominik Dingel struct kvm_async_pf *work) 10903c038e6bSDominik Dingel { 10913c038e6bSDominik Dingel /* s390 will always inject the page directly */ 10923c038e6bSDominik Dingel } 10933c038e6bSDominik Dingel 10943c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 10953c038e6bSDominik Dingel { 10963c038e6bSDominik Dingel /* 10973c038e6bSDominik Dingel * s390 will always inject the page directly, 10983c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 10993c038e6bSDominik Dingel */ 11003c038e6bSDominik Dingel return true; 11013c038e6bSDominik Dingel } 11023c038e6bSDominik Dingel 11033c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 11043c038e6bSDominik Dingel { 11053c038e6bSDominik Dingel hva_t hva; 11063c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 11073c038e6bSDominik Dingel int rc; 11083c038e6bSDominik Dingel 11093c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 11103c038e6bSDominik Dingel return 0; 11113c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 11123c038e6bSDominik Dingel vcpu->arch.pfault_compare) 11133c038e6bSDominik Dingel return 0; 11143c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 11153c038e6bSDominik Dingel return 0; 11163c038e6bSDominik Dingel if (kvm_cpu_has_interrupt(vcpu)) 11173c038e6bSDominik Dingel return 0; 11183c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 11193c038e6bSDominik Dingel return 0; 11203c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 11213c038e6bSDominik Dingel return 0; 11223c038e6bSDominik Dingel 112381480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 112481480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 112581480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 11263c038e6bSDominik Dingel return 0; 11273c038e6bSDominik Dingel 11283c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 11293c038e6bSDominik Dingel return rc; 11303c038e6bSDominik Dingel } 11313c038e6bSDominik Dingel 11323fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1133b0c632dbSHeiko Carstens { 11343fb4c40fSThomas Huth int rc, cpuflags; 1135e168bf8dSCarsten Otte 11363c038e6bSDominik Dingel /* 11373c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 11383c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 11393c038e6bSDominik Dingel * handled outside the worker. 11403c038e6bSDominik Dingel */ 11413c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 11423c038e6bSDominik Dingel 11435a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1144b0c632dbSHeiko Carstens 1145b0c632dbSHeiko Carstens if (need_resched()) 1146b0c632dbSHeiko Carstens schedule(); 1147b0c632dbSHeiko Carstens 114871cde587SChristian Borntraeger if (test_thread_flag(TIF_MCCK_PENDING)) 114971cde587SChristian Borntraeger s390_handle_mcck(); 115071cde587SChristian Borntraeger 1151d6b6d166SCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) 11520ff31867SCarsten Otte kvm_s390_deliver_pending_interrupts(vcpu); 11530ff31867SCarsten Otte 11542c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 11552c70fe44SChristian Borntraeger if (rc) 11562c70fe44SChristian Borntraeger return rc; 11572c70fe44SChristian Borntraeger 115827291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 115927291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 116027291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 116127291e21SDavid Hildenbrand } 116227291e21SDavid Hildenbrand 1163b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 11643fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 11653fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 11663fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 11672b29a9fdSDominik Dingel 11683fb4c40fSThomas Huth return 0; 11693fb4c40fSThomas Huth } 11703fb4c40fSThomas Huth 11713fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 11723fb4c40fSThomas Huth { 117324eb3a82SDominik Dingel int rc = -1; 11742b29a9fdSDominik Dingel 11752b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 11762b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 11772b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 11782b29a9fdSDominik Dingel 117927291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 118027291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 118127291e21SDavid Hildenbrand 11823fb4c40fSThomas Huth if (exit_reason >= 0) { 11837c470539SMartin Schwidefsky rc = 0; 1184210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1185210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1186210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1187210b1607SThomas Huth current->thread.gmap_addr; 1188210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1189210b1607SThomas Huth rc = -EREMOTE; 119024eb3a82SDominik Dingel 119124eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 11923c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 119324eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 11943c038e6bSDominik Dingel if (kvm_arch_setup_async_pf(vcpu) || 11953c038e6bSDominik Dingel (kvm_arch_fault_in_sync(vcpu) >= 0)) 119624eb3a82SDominik Dingel rc = 0; 119724eb3a82SDominik Dingel } 119824eb3a82SDominik Dingel 119924eb3a82SDominik Dingel if (rc == -1) { 1200699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1201699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1202699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 12031f0d0f09SCarsten Otte } 1204b0c632dbSHeiko Carstens 12055a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 12063fb4c40fSThomas Huth 1207a76ccff6SThomas Huth if (rc == 0) { 1208a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 12092955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 12102955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1211a76ccff6SThomas Huth else 1212a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1213a76ccff6SThomas Huth } 1214a76ccff6SThomas Huth 12153fb4c40fSThomas Huth return rc; 12163fb4c40fSThomas Huth } 12173fb4c40fSThomas Huth 12183fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 12193fb4c40fSThomas Huth { 12203fb4c40fSThomas Huth int rc, exit_reason; 12213fb4c40fSThomas Huth 1222800c1065SThomas Huth /* 1223800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1224800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1225800c1065SThomas Huth */ 1226800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1227800c1065SThomas Huth 1228a76ccff6SThomas Huth do { 12293fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 12303fb4c40fSThomas Huth if (rc) 1231a76ccff6SThomas Huth break; 12323fb4c40fSThomas Huth 1233800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 12343fb4c40fSThomas Huth /* 1235a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1236a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 12373fb4c40fSThomas Huth */ 12383fb4c40fSThomas Huth preempt_disable(); 12393fb4c40fSThomas Huth kvm_guest_enter(); 12403fb4c40fSThomas Huth preempt_enable(); 1241a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1242a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 12433fb4c40fSThomas Huth kvm_guest_exit(); 1244800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 12453fb4c40fSThomas Huth 12463fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 124727291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 12483fb4c40fSThomas Huth 1249800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1250e168bf8dSCarsten Otte return rc; 1251b0c632dbSHeiko Carstens } 1252b0c632dbSHeiko Carstens 1253b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1254b0c632dbSHeiko Carstens { 12558f2abe6aSChristian Borntraeger int rc; 1256b0c632dbSHeiko Carstens sigset_t sigsaved; 1257b0c632dbSHeiko Carstens 125827291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 125927291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 126027291e21SDavid Hildenbrand return 0; 126127291e21SDavid Hildenbrand } 126227291e21SDavid Hildenbrand 1263b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1264b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1265b0c632dbSHeiko Carstens 12666852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 1267b0c632dbSHeiko Carstens 12688f2abe6aSChristian Borntraeger switch (kvm_run->exit_reason) { 12698f2abe6aSChristian Borntraeger case KVM_EXIT_S390_SIEIC: 12708f2abe6aSChristian Borntraeger case KVM_EXIT_UNKNOWN: 12719ace903dSChristian Ehrhardt case KVM_EXIT_INTR: 12728f2abe6aSChristian Borntraeger case KVM_EXIT_S390_RESET: 1273e168bf8dSCarsten Otte case KVM_EXIT_S390_UCONTROL: 1274fa6b7fe9SCornelia Huck case KVM_EXIT_S390_TSCH: 127527291e21SDavid Hildenbrand case KVM_EXIT_DEBUG: 12768f2abe6aSChristian Borntraeger break; 12778f2abe6aSChristian Borntraeger default: 12788f2abe6aSChristian Borntraeger BUG(); 12798f2abe6aSChristian Borntraeger } 12808f2abe6aSChristian Borntraeger 1281d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1282d7b0b5ebSCarsten Otte vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 128360b413c9SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { 128460b413c9SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX; 128560b413c9SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 128660b413c9SChristian Borntraeger } 12879eed0735SChristian Borntraeger if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 12889eed0735SChristian Borntraeger kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS; 12899eed0735SChristian Borntraeger memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 12909eed0735SChristian Borntraeger kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 12919eed0735SChristian Borntraeger } 1292d7b0b5ebSCarsten Otte 1293dab4079dSHeiko Carstens might_fault(); 1294e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 12959ace903dSChristian Ehrhardt 1296b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1297b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 12988f2abe6aSChristian Borntraeger rc = -EINTR; 1299b1d16c49SChristian Ehrhardt } 13008f2abe6aSChristian Borntraeger 130127291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 130227291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 130327291e21SDavid Hildenbrand rc = 0; 130427291e21SDavid Hildenbrand } 130527291e21SDavid Hildenbrand 1306b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 13078f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 13088f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 13098f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 13108f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 13118f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 13128f2abe6aSChristian Borntraeger rc = 0; 13138f2abe6aSChristian Borntraeger } 13148f2abe6aSChristian Borntraeger 13158f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 13168f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 13178f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 13188f2abe6aSChristian Borntraeger rc = 0; 13198f2abe6aSChristian Borntraeger } 13208f2abe6aSChristian Borntraeger 1321d7b0b5ebSCarsten Otte kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1322d7b0b5ebSCarsten Otte kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 132360b413c9SChristian Borntraeger kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix; 13249eed0735SChristian Borntraeger memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1325d7b0b5ebSCarsten Otte 1326b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1327b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1328b0c632dbSHeiko Carstens 1329b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 13307e8e6ab4SHeiko Carstens return rc; 1331b0c632dbSHeiko Carstens } 1332b0c632dbSHeiko Carstens 1333b0c632dbSHeiko Carstens /* 1334b0c632dbSHeiko Carstens * store status at address 1335b0c632dbSHeiko Carstens * we use have two special cases: 1336b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1337b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1338b0c632dbSHeiko Carstens */ 1339d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1340b0c632dbSHeiko Carstens { 1341092670cdSCarsten Otte unsigned char archmode = 1; 1342178bd789SThomas Huth u64 clkcomp; 1343d0bce605SHeiko Carstens int rc; 1344b0c632dbSHeiko Carstens 1345d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1346d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1347b0c632dbSHeiko Carstens return -EFAULT; 1348d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1349d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1350d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1351b0c632dbSHeiko Carstens return -EFAULT; 1352d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1353d0bce605SHeiko Carstens } 1354d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1355d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1356d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1357d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1358d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1359d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1360d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1361d0bce605SHeiko Carstens &vcpu->arch.sie_block->prefix, 4); 1362d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1363d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1364d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1365d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1366d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1367d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1368d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1369178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1370d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1371d0bce605SHeiko Carstens &clkcomp, 8); 1372d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1373d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1374d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1375d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1376d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1377b0c632dbSHeiko Carstens } 1378b0c632dbSHeiko Carstens 1379e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1380e879892cSThomas Huth { 1381e879892cSThomas Huth /* 1382e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1383e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1384e879892cSThomas Huth * it into the save area 1385e879892cSThomas Huth */ 1386e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1387e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1388e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1389e879892cSThomas Huth 1390e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1391e879892cSThomas Huth } 1392e879892cSThomas Huth 1393*8ad35755SDavid Hildenbrand static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) 1394*8ad35755SDavid Hildenbrand { 1395*8ad35755SDavid Hildenbrand return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; 1396*8ad35755SDavid Hildenbrand } 1397*8ad35755SDavid Hildenbrand 1398*8ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 1399*8ad35755SDavid Hildenbrand { 1400*8ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 1401*8ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 1402*8ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 1403*8ad35755SDavid Hildenbrand } 1404*8ad35755SDavid Hildenbrand 1405*8ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 1406*8ad35755SDavid Hildenbrand { 1407*8ad35755SDavid Hildenbrand unsigned int i; 1408*8ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 1409*8ad35755SDavid Hildenbrand 1410*8ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 1411*8ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 1412*8ad35755SDavid Hildenbrand } 1413*8ad35755SDavid Hildenbrand } 1414*8ad35755SDavid Hildenbrand 1415*8ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 1416*8ad35755SDavid Hildenbrand { 1417*8ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 1418*8ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 1419*8ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 1420*8ad35755SDavid Hildenbrand } 1421*8ad35755SDavid Hildenbrand 14226852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 14236852d7b6SDavid Hildenbrand { 1424*8ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 1425*8ad35755SDavid Hildenbrand 1426*8ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1427*8ad35755SDavid Hildenbrand return; 1428*8ad35755SDavid Hildenbrand 14296852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 1430*8ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1431*8ad35755SDavid Hildenbrand spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 1432*8ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1433*8ad35755SDavid Hildenbrand 1434*8ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 1435*8ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 1436*8ad35755SDavid Hildenbrand started_vcpus++; 1437*8ad35755SDavid Hildenbrand } 1438*8ad35755SDavid Hildenbrand 1439*8ad35755SDavid Hildenbrand if (started_vcpus == 0) { 1440*8ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 1441*8ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 1442*8ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 1443*8ad35755SDavid Hildenbrand /* 1444*8ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 1445*8ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 1446*8ad35755SDavid Hildenbrand * oustanding ENABLE requests. 1447*8ad35755SDavid Hildenbrand */ 1448*8ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 1449*8ad35755SDavid Hildenbrand } 1450*8ad35755SDavid Hildenbrand 14516852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1452*8ad35755SDavid Hildenbrand /* 1453*8ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 1454*8ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 1455*8ad35755SDavid Hildenbrand */ 1456*8ad35755SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1457*8ad35755SDavid Hildenbrand spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 1458*8ad35755SDavid Hildenbrand return; 14596852d7b6SDavid Hildenbrand } 14606852d7b6SDavid Hildenbrand 14616852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 14626852d7b6SDavid Hildenbrand { 1463*8ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 1464*8ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 1465*8ad35755SDavid Hildenbrand 1466*8ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 1467*8ad35755SDavid Hildenbrand return; 1468*8ad35755SDavid Hildenbrand 14696852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 1470*8ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1471*8ad35755SDavid Hildenbrand spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); 1472*8ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 1473*8ad35755SDavid Hildenbrand 14746852d7b6SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 1475*8ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 1476*8ad35755SDavid Hildenbrand 1477*8ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 1478*8ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 1479*8ad35755SDavid Hildenbrand started_vcpus++; 1480*8ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 1481*8ad35755SDavid Hildenbrand } 1482*8ad35755SDavid Hildenbrand } 1483*8ad35755SDavid Hildenbrand 1484*8ad35755SDavid Hildenbrand if (started_vcpus == 1) { 1485*8ad35755SDavid Hildenbrand /* 1486*8ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 1487*8ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 1488*8ad35755SDavid Hildenbrand */ 1489*8ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 1490*8ad35755SDavid Hildenbrand } 1491*8ad35755SDavid Hildenbrand 1492*8ad35755SDavid Hildenbrand spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); 1493*8ad35755SDavid Hildenbrand return; 14946852d7b6SDavid Hildenbrand } 14956852d7b6SDavid Hildenbrand 1496d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1497d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1498d6712df9SCornelia Huck { 1499d6712df9SCornelia Huck int r; 1500d6712df9SCornelia Huck 1501d6712df9SCornelia Huck if (cap->flags) 1502d6712df9SCornelia Huck return -EINVAL; 1503d6712df9SCornelia Huck 1504d6712df9SCornelia Huck switch (cap->cap) { 1505fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1506fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1507fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1508fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1509fa6b7fe9SCornelia Huck } 1510fa6b7fe9SCornelia Huck r = 0; 1511fa6b7fe9SCornelia Huck break; 1512d6712df9SCornelia Huck default: 1513d6712df9SCornelia Huck r = -EINVAL; 1514d6712df9SCornelia Huck break; 1515d6712df9SCornelia Huck } 1516d6712df9SCornelia Huck return r; 1517d6712df9SCornelia Huck } 1518d6712df9SCornelia Huck 1519b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1520b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1521b0c632dbSHeiko Carstens { 1522b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1523b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1524800c1065SThomas Huth int idx; 1525bc923cc9SAvi Kivity long r; 1526b0c632dbSHeiko Carstens 152793736624SAvi Kivity switch (ioctl) { 152893736624SAvi Kivity case KVM_S390_INTERRUPT: { 1529ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1530ba5c1e9bSCarsten Otte 153193736624SAvi Kivity r = -EFAULT; 1532ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 153393736624SAvi Kivity break; 153493736624SAvi Kivity r = kvm_s390_inject_vcpu(vcpu, &s390int); 153593736624SAvi Kivity break; 1536ba5c1e9bSCarsten Otte } 1537b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1538800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1539bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1540800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1541bc923cc9SAvi Kivity break; 1542b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1543b0c632dbSHeiko Carstens psw_t psw; 1544b0c632dbSHeiko Carstens 1545bc923cc9SAvi Kivity r = -EFAULT; 1546b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1547bc923cc9SAvi Kivity break; 1548bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1549bc923cc9SAvi Kivity break; 1550b0c632dbSHeiko Carstens } 1551b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1552bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1553bc923cc9SAvi Kivity break; 155414eebd91SCarsten Otte case KVM_SET_ONE_REG: 155514eebd91SCarsten Otte case KVM_GET_ONE_REG: { 155614eebd91SCarsten Otte struct kvm_one_reg reg; 155714eebd91SCarsten Otte r = -EFAULT; 155814eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 155914eebd91SCarsten Otte break; 156014eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 156114eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 156214eebd91SCarsten Otte else 156314eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 156414eebd91SCarsten Otte break; 156514eebd91SCarsten Otte } 156627e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 156727e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 156827e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 156927e0393fSCarsten Otte 157027e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 157127e0393fSCarsten Otte r = -EFAULT; 157227e0393fSCarsten Otte break; 157327e0393fSCarsten Otte } 157427e0393fSCarsten Otte 157527e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 157627e0393fSCarsten Otte r = -EINVAL; 157727e0393fSCarsten Otte break; 157827e0393fSCarsten Otte } 157927e0393fSCarsten Otte 158027e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 158127e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 158227e0393fSCarsten Otte break; 158327e0393fSCarsten Otte } 158427e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 158527e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 158627e0393fSCarsten Otte 158727e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 158827e0393fSCarsten Otte r = -EFAULT; 158927e0393fSCarsten Otte break; 159027e0393fSCarsten Otte } 159127e0393fSCarsten Otte 159227e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 159327e0393fSCarsten Otte r = -EINVAL; 159427e0393fSCarsten Otte break; 159527e0393fSCarsten Otte } 159627e0393fSCarsten Otte 159727e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 159827e0393fSCarsten Otte ucasmap.length); 159927e0393fSCarsten Otte break; 160027e0393fSCarsten Otte } 160127e0393fSCarsten Otte #endif 1602ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1603ccc7910fSCarsten Otte r = gmap_fault(arg, vcpu->arch.gmap); 1604ccc7910fSCarsten Otte if (!IS_ERR_VALUE(r)) 1605ccc7910fSCarsten Otte r = 0; 1606ccc7910fSCarsten Otte break; 1607ccc7910fSCarsten Otte } 1608d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1609d6712df9SCornelia Huck { 1610d6712df9SCornelia Huck struct kvm_enable_cap cap; 1611d6712df9SCornelia Huck r = -EFAULT; 1612d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1613d6712df9SCornelia Huck break; 1614d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1615d6712df9SCornelia Huck break; 1616d6712df9SCornelia Huck } 1617b0c632dbSHeiko Carstens default: 16183e6afcf1SCarsten Otte r = -ENOTTY; 1619b0c632dbSHeiko Carstens } 1620bc923cc9SAvi Kivity return r; 1621b0c632dbSHeiko Carstens } 1622b0c632dbSHeiko Carstens 16235b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 16245b1c1493SCarsten Otte { 16255b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 16265b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 16275b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 16285b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 16295b1c1493SCarsten Otte get_page(vmf->page); 16305b1c1493SCarsten Otte return 0; 16315b1c1493SCarsten Otte } 16325b1c1493SCarsten Otte #endif 16335b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 16345b1c1493SCarsten Otte } 16355b1c1493SCarsten Otte 16365587027cSAneesh Kumar K.V void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 1637db3fe4ebSTakuya Yoshikawa struct kvm_memory_slot *dont) 1638db3fe4ebSTakuya Yoshikawa { 1639db3fe4ebSTakuya Yoshikawa } 1640db3fe4ebSTakuya Yoshikawa 16415587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 16425587027cSAneesh Kumar K.V unsigned long npages) 1643db3fe4ebSTakuya Yoshikawa { 1644db3fe4ebSTakuya Yoshikawa return 0; 1645db3fe4ebSTakuya Yoshikawa } 1646db3fe4ebSTakuya Yoshikawa 1647e59dbe09STakuya Yoshikawa void kvm_arch_memslots_updated(struct kvm *kvm) 1648e59dbe09STakuya Yoshikawa { 1649e59dbe09STakuya Yoshikawa } 1650e59dbe09STakuya Yoshikawa 1651b0c632dbSHeiko Carstens /* Section: memory related */ 1652f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1653f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 16547b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 16557b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1656b0c632dbSHeiko Carstens { 1657dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1658dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1659dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1660dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1661b0c632dbSHeiko Carstens 1662598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1663b0c632dbSHeiko Carstens return -EINVAL; 1664b0c632dbSHeiko Carstens 1665598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1666b0c632dbSHeiko Carstens return -EINVAL; 1667b0c632dbSHeiko Carstens 1668f7784b8eSMarcelo Tosatti return 0; 1669f7784b8eSMarcelo Tosatti } 1670f7784b8eSMarcelo Tosatti 1671f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1672f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 16738482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 16748482644aSTakuya Yoshikawa enum kvm_mr_change change) 1675f7784b8eSMarcelo Tosatti { 1676f7850c92SCarsten Otte int rc; 1677f7784b8eSMarcelo Tosatti 16782cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 16792cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 16802cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 16812cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 16822cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 16832cef4debSChristian Borntraeger */ 16842cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 16852cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 16862cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 16872cef4debSChristian Borntraeger return; 1688598841caSCarsten Otte 1689598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1690598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1691598841caSCarsten Otte if (rc) 1692f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1693598841caSCarsten Otte return; 1694b0c632dbSHeiko Carstens } 1695b0c632dbSHeiko Carstens 16962df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_all(struct kvm *kvm) 16972df72e9bSMarcelo Tosatti { 16982df72e9bSMarcelo Tosatti } 16992df72e9bSMarcelo Tosatti 17002df72e9bSMarcelo Tosatti void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 17012df72e9bSMarcelo Tosatti struct kvm_memory_slot *slot) 170234d4cb8fSMarcelo Tosatti { 170334d4cb8fSMarcelo Tosatti } 170434d4cb8fSMarcelo Tosatti 1705b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1706b0c632dbSHeiko Carstens { 1707ef50f7acSChristian Borntraeger int ret; 17080ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1709ef50f7acSChristian Borntraeger if (ret) 1710ef50f7acSChristian Borntraeger return ret; 1711ef50f7acSChristian Borntraeger 1712ef50f7acSChristian Borntraeger /* 1713ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 171425985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1715ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1716ef50f7acSChristian Borntraeger */ 171778c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 171878c4b59fSMichael Mueller if (!vfacilities) { 1719ef50f7acSChristian Borntraeger kvm_exit(); 1720ef50f7acSChristian Borntraeger return -ENOMEM; 1721ef50f7acSChristian Borntraeger } 172278c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 1723d208c79dSThomas Huth vfacilities[0] &= 0xff82fff3f4fc2000UL; 17247feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1725ef50f7acSChristian Borntraeger return 0; 1726b0c632dbSHeiko Carstens } 1727b0c632dbSHeiko Carstens 1728b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1729b0c632dbSHeiko Carstens { 173078c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1731b0c632dbSHeiko Carstens kvm_exit(); 1732b0c632dbSHeiko Carstens } 1733b0c632dbSHeiko Carstens 1734b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1735b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1736566af940SCornelia Huck 1737566af940SCornelia Huck /* 1738566af940SCornelia Huck * Enable autoloading of the kvm module. 1739566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1740566af940SCornelia Huck * since x86 takes a different approach. 1741566af940SCornelia Huck */ 1742566af940SCornelia Huck #include <linux/miscdevice.h> 1743566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1744566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1745