1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25b0c632dbSHeiko Carstens #include <linux/slab.h> 26ba5c1e9bSCarsten Otte #include <linux/timer.h> 27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 28b0c632dbSHeiko Carstens #include <asm/lowcore.h> 29b0c632dbSHeiko Carstens #include <asm/pgtable.h> 30f5daba1dSHeiko Carstens #include <asm/nmi.h> 31a0616cdeSDavid Howells #include <asm/switch_to.h> 3278c4b59fSMichael Mueller #include <asm/facility.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 54f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 55ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 56aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 58ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 597697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 60ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 61ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 62ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 66ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6769d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 68453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 69453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 70453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 71453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 72453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 738a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 74453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 75453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 76b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 77453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 78453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 79bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 805288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 81bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 827697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 835288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 855288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 865288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 875288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 88388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 89e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9041628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 91b0c632dbSHeiko Carstens { NULL } 92b0c632dbSHeiko Carstens }; 93b0c632dbSHeiko Carstens 9478c4b59fSMichael Mueller unsigned long *vfacilities; 952c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 96b0c632dbSHeiko Carstens 9778c4b59fSMichael Mueller /* test availability of vfacility */ 98280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 9978c4b59fSMichael Mueller { 10078c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 10178c4b59fSMichael Mueller } 10278c4b59fSMichael Mueller 103b0c632dbSHeiko Carstens /* Section: not file related */ 10413a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 105b0c632dbSHeiko Carstens { 106b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 10710474ae8SAlexander Graf return 0; 108b0c632dbSHeiko Carstens } 109b0c632dbSHeiko Carstens 1102c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1112c70fe44SChristian Borntraeger 112b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 113b0c632dbSHeiko Carstens { 1142c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1152c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 116b0c632dbSHeiko Carstens return 0; 117b0c632dbSHeiko Carstens } 118b0c632dbSHeiko Carstens 119b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 120b0c632dbSHeiko Carstens { 1212c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 122b0c632dbSHeiko Carstens } 123b0c632dbSHeiko Carstens 124b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 125b0c632dbSHeiko Carstens { 12684877d93SCornelia Huck /* Register floating interrupt controller interface. */ 12784877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 128b0c632dbSHeiko Carstens } 129b0c632dbSHeiko Carstens 130b0c632dbSHeiko Carstens /* Section: device related */ 131b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 132b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 133b0c632dbSHeiko Carstens { 134b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 135b0c632dbSHeiko Carstens return s390_enable_sie(); 136b0c632dbSHeiko Carstens return -EINVAL; 137b0c632dbSHeiko Carstens } 138b0c632dbSHeiko Carstens 139784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 140b0c632dbSHeiko Carstens { 141d7b0b5ebSCarsten Otte int r; 142d7b0b5ebSCarsten Otte 1432bd0ac4eSCarsten Otte switch (ext) { 144d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 145b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 14652e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1471efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1481efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1491efd0f59SCarsten Otte #endif 1503c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 15160b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 15214eebd91SCarsten Otte case KVM_CAP_ONE_REG: 153d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 154fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 155ebc32262SCornelia Huck case KVM_CAP_IRQFD: 15610ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 157c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 158d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 15978599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 160f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1616352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 162d7b0b5ebSCarsten Otte r = 1; 163d7b0b5ebSCarsten Otte break; 164e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 165e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 166e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 167e726b1bdSChristian Borntraeger break; 168e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 169e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 170e1e2e605SNick Wang break; 1711526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 172abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1731526bf9cSChristian Borntraeger break; 1742bd0ac4eSCarsten Otte default: 175d7b0b5ebSCarsten Otte r = 0; 176b0c632dbSHeiko Carstens } 177d7b0b5ebSCarsten Otte return r; 1782bd0ac4eSCarsten Otte } 179b0c632dbSHeiko Carstens 18015f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 18115f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 18215f36ebdSJason J. Herne { 18315f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 18415f36ebdSJason J. Herne unsigned long address; 18515f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 18615f36ebdSJason J. Herne 18715f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 18815f36ebdSJason J. Herne /* Loop over all guest pages */ 18915f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 19015f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 19115f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 19215f36ebdSJason J. Herne 19315f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 19415f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 19515f36ebdSJason J. Herne } 19615f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 19715f36ebdSJason J. Herne } 19815f36ebdSJason J. Herne 199b0c632dbSHeiko Carstens /* Section: vm related */ 200b0c632dbSHeiko Carstens /* 201b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 202b0c632dbSHeiko Carstens */ 203b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 204b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 205b0c632dbSHeiko Carstens { 20615f36ebdSJason J. Herne int r; 20715f36ebdSJason J. Herne unsigned long n; 20815f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 20915f36ebdSJason J. Herne int is_dirty = 0; 21015f36ebdSJason J. Herne 21115f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 21215f36ebdSJason J. Herne 21315f36ebdSJason J. Herne r = -EINVAL; 21415f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 21515f36ebdSJason J. Herne goto out; 21615f36ebdSJason J. Herne 21715f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 21815f36ebdSJason J. Herne r = -ENOENT; 21915f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 22015f36ebdSJason J. Herne goto out; 22115f36ebdSJason J. Herne 22215f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 22315f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 22415f36ebdSJason J. Herne if (r) 22515f36ebdSJason J. Herne goto out; 22615f36ebdSJason J. Herne 22715f36ebdSJason J. Herne /* Clear the dirty log */ 22815f36ebdSJason J. Herne if (is_dirty) { 22915f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 23015f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 23115f36ebdSJason J. Herne } 23215f36ebdSJason J. Herne r = 0; 23315f36ebdSJason J. Herne out: 23415f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 23515f36ebdSJason J. Herne return r; 236b0c632dbSHeiko Carstens } 237b0c632dbSHeiko Carstens 238d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 239d938dc55SCornelia Huck { 240d938dc55SCornelia Huck int r; 241d938dc55SCornelia Huck 242d938dc55SCornelia Huck if (cap->flags) 243d938dc55SCornelia Huck return -EINVAL; 244d938dc55SCornelia Huck 245d938dc55SCornelia Huck switch (cap->cap) { 24684223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 24784223598SCornelia Huck kvm->arch.use_irqchip = 1; 24884223598SCornelia Huck r = 0; 24984223598SCornelia Huck break; 250d938dc55SCornelia Huck default: 251d938dc55SCornelia Huck r = -EINVAL; 252d938dc55SCornelia Huck break; 253d938dc55SCornelia Huck } 254d938dc55SCornelia Huck return r; 255d938dc55SCornelia Huck } 256d938dc55SCornelia Huck 2574f718eabSDominik Dingel static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2584f718eabSDominik Dingel { 2594f718eabSDominik Dingel int ret; 2604f718eabSDominik Dingel unsigned int idx; 2614f718eabSDominik Dingel switch (attr->attr) { 2624f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2634f718eabSDominik Dingel ret = -EBUSY; 2644f718eabSDominik Dingel mutex_lock(&kvm->lock); 2654f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2664f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2674f718eabSDominik Dingel ret = 0; 2684f718eabSDominik Dingel } 2694f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2704f718eabSDominik Dingel break; 2714f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 2724f718eabSDominik Dingel mutex_lock(&kvm->lock); 2734f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 274*a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 2754f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 2764f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2774f718eabSDominik Dingel ret = 0; 2784f718eabSDominik Dingel break; 2794f718eabSDominik Dingel default: 2804f718eabSDominik Dingel ret = -ENXIO; 2814f718eabSDominik Dingel break; 2824f718eabSDominik Dingel } 2834f718eabSDominik Dingel return ret; 2844f718eabSDominik Dingel } 2854f718eabSDominik Dingel 286f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 287f2061656SDominik Dingel { 288f2061656SDominik Dingel int ret; 289f2061656SDominik Dingel 290f2061656SDominik Dingel switch (attr->group) { 2914f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 2924f718eabSDominik Dingel ret = kvm_s390_mem_control(kvm, attr); 2934f718eabSDominik Dingel break; 294f2061656SDominik Dingel default: 295f2061656SDominik Dingel ret = -ENXIO; 296f2061656SDominik Dingel break; 297f2061656SDominik Dingel } 298f2061656SDominik Dingel 299f2061656SDominik Dingel return ret; 300f2061656SDominik Dingel } 301f2061656SDominik Dingel 302f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 303f2061656SDominik Dingel { 304f2061656SDominik Dingel return -ENXIO; 305f2061656SDominik Dingel } 306f2061656SDominik Dingel 307f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 308f2061656SDominik Dingel { 309f2061656SDominik Dingel int ret; 310f2061656SDominik Dingel 311f2061656SDominik Dingel switch (attr->group) { 3124f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3134f718eabSDominik Dingel switch (attr->attr) { 3144f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3154f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3164f718eabSDominik Dingel ret = 0; 3174f718eabSDominik Dingel break; 3184f718eabSDominik Dingel default: 3194f718eabSDominik Dingel ret = -ENXIO; 3204f718eabSDominik Dingel break; 3214f718eabSDominik Dingel } 3224f718eabSDominik Dingel break; 323f2061656SDominik Dingel default: 324f2061656SDominik Dingel ret = -ENXIO; 325f2061656SDominik Dingel break; 326f2061656SDominik Dingel } 327f2061656SDominik Dingel 328f2061656SDominik Dingel return ret; 329f2061656SDominik Dingel } 330f2061656SDominik Dingel 331b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 332b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 333b0c632dbSHeiko Carstens { 334b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 335b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 336f2061656SDominik Dingel struct kvm_device_attr attr; 337b0c632dbSHeiko Carstens int r; 338b0c632dbSHeiko Carstens 339b0c632dbSHeiko Carstens switch (ioctl) { 340ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 341ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 342ba5c1e9bSCarsten Otte 343ba5c1e9bSCarsten Otte r = -EFAULT; 344ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 345ba5c1e9bSCarsten Otte break; 346ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 347ba5c1e9bSCarsten Otte break; 348ba5c1e9bSCarsten Otte } 349d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 350d938dc55SCornelia Huck struct kvm_enable_cap cap; 351d938dc55SCornelia Huck r = -EFAULT; 352d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 353d938dc55SCornelia Huck break; 354d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 355d938dc55SCornelia Huck break; 356d938dc55SCornelia Huck } 35784223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 35884223598SCornelia Huck struct kvm_irq_routing_entry routing; 35984223598SCornelia Huck 36084223598SCornelia Huck r = -EINVAL; 36184223598SCornelia Huck if (kvm->arch.use_irqchip) { 36284223598SCornelia Huck /* Set up dummy routing. */ 36384223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 36484223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 36584223598SCornelia Huck r = 0; 36684223598SCornelia Huck } 36784223598SCornelia Huck break; 36884223598SCornelia Huck } 369f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 370f2061656SDominik Dingel r = -EFAULT; 371f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 372f2061656SDominik Dingel break; 373f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 374f2061656SDominik Dingel break; 375f2061656SDominik Dingel } 376f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 377f2061656SDominik Dingel r = -EFAULT; 378f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 379f2061656SDominik Dingel break; 380f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 381f2061656SDominik Dingel break; 382f2061656SDominik Dingel } 383f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 384f2061656SDominik Dingel r = -EFAULT; 385f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 386f2061656SDominik Dingel break; 387f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 388f2061656SDominik Dingel break; 389f2061656SDominik Dingel } 390b0c632dbSHeiko Carstens default: 391367e1319SAvi Kivity r = -ENOTTY; 392b0c632dbSHeiko Carstens } 393b0c632dbSHeiko Carstens 394b0c632dbSHeiko Carstens return r; 395b0c632dbSHeiko Carstens } 396b0c632dbSHeiko Carstens 3975102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 3985102ee87STony Krowiak { 3995102ee87STony Krowiak if (!test_vfacility(76)) 4005102ee87STony Krowiak return 0; 4015102ee87STony Krowiak 4025102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 4035102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 4045102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 4055102ee87STony Krowiak return -ENOMEM; 4065102ee87STony Krowiak 4075102ee87STony Krowiak kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | 4085102ee87STony Krowiak CRYCB_FORMAT1; 4095102ee87STony Krowiak 4105102ee87STony Krowiak return 0; 4115102ee87STony Krowiak } 4125102ee87STony Krowiak 413e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 414b0c632dbSHeiko Carstens { 415b0c632dbSHeiko Carstens int rc; 416b0c632dbSHeiko Carstens char debug_name[16]; 417f6c137ffSChristian Borntraeger static unsigned long sca_offset; 418b0c632dbSHeiko Carstens 419e08b9637SCarsten Otte rc = -EINVAL; 420e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 421e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 422e08b9637SCarsten Otte goto out_err; 423e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 424e08b9637SCarsten Otte goto out_err; 425e08b9637SCarsten Otte #else 426e08b9637SCarsten Otte if (type) 427e08b9637SCarsten Otte goto out_err; 428e08b9637SCarsten Otte #endif 429e08b9637SCarsten Otte 430b0c632dbSHeiko Carstens rc = s390_enable_sie(); 431b0c632dbSHeiko Carstens if (rc) 432d89f5effSJan Kiszka goto out_err; 433b0c632dbSHeiko Carstens 434b290411aSCarsten Otte rc = -ENOMEM; 435b290411aSCarsten Otte 436b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 437b0c632dbSHeiko Carstens if (!kvm->arch.sca) 438d89f5effSJan Kiszka goto out_err; 439f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 440f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 441f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 442f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 443b0c632dbSHeiko Carstens 444b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 445b0c632dbSHeiko Carstens 446b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 447b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 448b0c632dbSHeiko Carstens goto out_nodbf; 449b0c632dbSHeiko Carstens 4505102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 4515102ee87STony Krowiak goto out_crypto; 4525102ee87STony Krowiak 453ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 454ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 4558a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 456ba5c1e9bSCarsten Otte 457b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 458b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 459b0c632dbSHeiko Carstens 460e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 461e08b9637SCarsten Otte kvm->arch.gmap = NULL; 462e08b9637SCarsten Otte } else { 4630349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 464598841caSCarsten Otte if (!kvm->arch.gmap) 465598841caSCarsten Otte goto out_nogmap; 4662c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 46724eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 468e08b9637SCarsten Otte } 469fa6b7fe9SCornelia Huck 470fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 47184223598SCornelia Huck kvm->arch.use_irqchip = 0; 472fa6b7fe9SCornelia Huck 4738ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 4748ad35755SDavid Hildenbrand 475d89f5effSJan Kiszka return 0; 476598841caSCarsten Otte out_nogmap: 4775102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 4785102ee87STony Krowiak out_crypto: 479598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 480b0c632dbSHeiko Carstens out_nodbf: 481b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 482d89f5effSJan Kiszka out_err: 483d89f5effSJan Kiszka return rc; 484b0c632dbSHeiko Carstens } 485b0c632dbSHeiko Carstens 486d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 487d329c035SChristian Borntraeger { 488d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 489ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 49067335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 4913c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 49258f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 49358f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 49458f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 495abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 496abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 497abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 49858f9460bSCarsten Otte } 499abf4a71eSCarsten Otte smp_mb(); 50027e0393fSCarsten Otte 50127e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 50227e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 50327e0393fSCarsten Otte 504b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 505b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 506d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 507b31288faSKonstantin Weitz 5086692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 509b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 510d329c035SChristian Borntraeger } 511d329c035SChristian Borntraeger 512d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 513d329c035SChristian Borntraeger { 514d329c035SChristian Borntraeger unsigned int i; 515988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 516d329c035SChristian Borntraeger 517988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 518988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 519988a2caeSGleb Natapov 520988a2caeSGleb Natapov mutex_lock(&kvm->lock); 521988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 522d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 523988a2caeSGleb Natapov 524988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 525988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 526d329c035SChristian Borntraeger } 527d329c035SChristian Borntraeger 528b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 529b0c632dbSHeiko Carstens { 530d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 531b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 532d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 5335102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 53427e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 535598841caSCarsten Otte gmap_free(kvm->arch.gmap); 536841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 53767335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 538b0c632dbSHeiko Carstens } 539b0c632dbSHeiko Carstens 540b0c632dbSHeiko Carstens /* Section: vcpu related */ 541b0c632dbSHeiko Carstens int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 542b0c632dbSHeiko Carstens { 5433c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 5443c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 54527e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) { 546c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 54727e0393fSCarsten Otte if (!vcpu->arch.gmap) 54827e0393fSCarsten Otte return -ENOMEM; 5492c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 55027e0393fSCarsten Otte return 0; 55127e0393fSCarsten Otte } 55227e0393fSCarsten Otte 553598841caSCarsten Otte vcpu->arch.gmap = vcpu->kvm->arch.gmap; 55459674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 55559674c1aSChristian Borntraeger KVM_SYNC_GPRS | 5569eed0735SChristian Borntraeger KVM_SYNC_ACRS | 557b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 558b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 559b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 560b0c632dbSHeiko Carstens return 0; 561b0c632dbSHeiko Carstens } 562b0c632dbSHeiko Carstens 563b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 564b0c632dbSHeiko Carstens { 5654725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5664725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 567b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 5684725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5694725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 57059674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 571480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 5729e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 573b0c632dbSHeiko Carstens } 574b0c632dbSHeiko Carstens 575b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 576b0c632dbSHeiko Carstens { 5779e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 578480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 5794725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 5804725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 58159674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 5824725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 5834725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 584b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 585b0c632dbSHeiko Carstens } 586b0c632dbSHeiko Carstens 587b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 588b0c632dbSHeiko Carstens { 589b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 590b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 591b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 5928d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 593b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 594b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 595b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 596b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 597b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 598b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 599b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 600b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 601b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 602672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 6033c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 6043c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 6056352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 6066852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 6072ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 608b0c632dbSHeiko Carstens } 609b0c632dbSHeiko Carstens 61042897d86SMarcelo Tosatti int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 61142897d86SMarcelo Tosatti { 61242897d86SMarcelo Tosatti return 0; 61342897d86SMarcelo Tosatti } 61442897d86SMarcelo Tosatti 6155102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 6165102ee87STony Krowiak { 6175102ee87STony Krowiak if (!test_vfacility(76)) 6185102ee87STony Krowiak return; 6195102ee87STony Krowiak 6205102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 6215102ee87STony Krowiak } 6225102ee87STony Krowiak 623b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 624b31605c1SDominik Dingel { 625b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 626b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 627b31605c1SDominik Dingel } 628b31605c1SDominik Dingel 629b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 630b31605c1SDominik Dingel { 631b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 632b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 633b31605c1SDominik Dingel return -ENOMEM; 634b31605c1SDominik Dingel 635b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 636b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 637b31605c1SDominik Dingel return 0; 638b31605c1SDominik Dingel } 639b31605c1SDominik Dingel 640b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 641b0c632dbSHeiko Carstens { 642b31605c1SDominik Dingel int rc = 0; 643b31288faSKonstantin Weitz 6449e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 6459e6dabefSCornelia Huck CPUSTAT_SM | 64669d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 64769d0d3a3SChristian Borntraeger CPUSTAT_GED); 648fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 6497feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 6507feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 6517feb6bb8SMichael Mueller 65269d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 6534953919fSDavid Hildenbrand vcpu->arch.sie_block->eca = 0xD1002000U; 654217a4406SHeiko Carstens if (sclp_has_siif()) 655217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 65678c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 6575a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 6585a5e6536SMatthew Rosato ICTL_TPROT; 6595a5e6536SMatthew Rosato 660b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 661b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 662b31605c1SDominik Dingel if (rc) 663b31605c1SDominik Dingel return rc; 664b31288faSKonstantin Weitz } 665ca872302SChristian Borntraeger hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 666ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 667453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 66892e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 6695102ee87STony Krowiak 6705102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 6715102ee87STony Krowiak 672b31605c1SDominik Dingel return rc; 673b0c632dbSHeiko Carstens } 674b0c632dbSHeiko Carstens 675b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 676b0c632dbSHeiko Carstens unsigned int id) 677b0c632dbSHeiko Carstens { 6784d47555aSCarsten Otte struct kvm_vcpu *vcpu; 6797feb6bb8SMichael Mueller struct sie_page *sie_page; 6804d47555aSCarsten Otte int rc = -EINVAL; 681b0c632dbSHeiko Carstens 6824d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 6834d47555aSCarsten Otte goto out; 6844d47555aSCarsten Otte 6854d47555aSCarsten Otte rc = -ENOMEM; 6864d47555aSCarsten Otte 687b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 688b0c632dbSHeiko Carstens if (!vcpu) 6894d47555aSCarsten Otte goto out; 690b0c632dbSHeiko Carstens 6917feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 6927feb6bb8SMichael Mueller if (!sie_page) 693b0c632dbSHeiko Carstens goto out_free_cpu; 694b0c632dbSHeiko Carstens 6957feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 6967feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 6977feb6bb8SMichael Mueller 698b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 69958f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 70058f9460bSCarsten Otte if (!kvm->arch.sca) { 70158f9460bSCarsten Otte WARN_ON_ONCE(1); 70258f9460bSCarsten Otte goto out_free_cpu; 70358f9460bSCarsten Otte } 704abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 70558f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 70658f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 70758f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 70858f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 709b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 710fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 71158f9460bSCarsten Otte } 712b0c632dbSHeiko Carstens 713ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 714ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&vcpu->arch.local_int.list); 715ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 716d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 7175288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 718ba5c1e9bSCarsten Otte 719b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 720b0c632dbSHeiko Carstens if (rc) 7217b06bf2fSWei Yongjun goto out_free_sie_block; 722b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 723b0c632dbSHeiko Carstens vcpu->arch.sie_block); 724ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 725b0c632dbSHeiko Carstens 726b0c632dbSHeiko Carstens return vcpu; 7277b06bf2fSWei Yongjun out_free_sie_block: 7287b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 729b0c632dbSHeiko Carstens out_free_cpu: 730b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 7314d47555aSCarsten Otte out: 732b0c632dbSHeiko Carstens return ERR_PTR(rc); 733b0c632dbSHeiko Carstens } 734b0c632dbSHeiko Carstens 735b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 736b0c632dbSHeiko Carstens { 737f87618e8SMichael Mueller return kvm_cpu_has_interrupt(vcpu); 738b0c632dbSHeiko Carstens } 739b0c632dbSHeiko Carstens 74049b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 74149b99e1eSChristian Borntraeger { 74249b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 74349b99e1eSChristian Borntraeger } 74449b99e1eSChristian Borntraeger 74549b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 74649b99e1eSChristian Borntraeger { 74749b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 74849b99e1eSChristian Borntraeger } 74949b99e1eSChristian Borntraeger 75049b99e1eSChristian Borntraeger /* 75149b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 75249b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 75349b99e1eSChristian Borntraeger * return immediately. */ 75449b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 75549b99e1eSChristian Borntraeger { 75649b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 75749b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 75849b99e1eSChristian Borntraeger cpu_relax(); 75949b99e1eSChristian Borntraeger } 76049b99e1eSChristian Borntraeger 76149b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 76249b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 76349b99e1eSChristian Borntraeger { 76449b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 76549b99e1eSChristian Borntraeger exit_sie(vcpu); 76649b99e1eSChristian Borntraeger } 76749b99e1eSChristian Borntraeger 7682c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 7692c70fe44SChristian Borntraeger { 7702c70fe44SChristian Borntraeger int i; 7712c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 7722c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 7732c70fe44SChristian Borntraeger 7742c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 7752c70fe44SChristian Borntraeger /* match against both prefix pages */ 776fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 7772c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 7782c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 7792c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 7802c70fe44SChristian Borntraeger } 7812c70fe44SChristian Borntraeger } 7822c70fe44SChristian Borntraeger } 7832c70fe44SChristian Borntraeger 784b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 785b6d33834SChristoffer Dall { 786b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 787b6d33834SChristoffer Dall BUG(); 788b6d33834SChristoffer Dall return 0; 789b6d33834SChristoffer Dall } 790b6d33834SChristoffer Dall 79114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 79214eebd91SCarsten Otte struct kvm_one_reg *reg) 79314eebd91SCarsten Otte { 79414eebd91SCarsten Otte int r = -EINVAL; 79514eebd91SCarsten Otte 79614eebd91SCarsten Otte switch (reg->id) { 79729b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 79829b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 79929b7c71bSCarsten Otte (u32 __user *)reg->addr); 80029b7c71bSCarsten Otte break; 80129b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 80229b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 80329b7c71bSCarsten Otte (u64 __user *)reg->addr); 80429b7c71bSCarsten Otte break; 80546a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 80646a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 80746a6dd1cSJason J. herne (u64 __user *)reg->addr); 80846a6dd1cSJason J. herne break; 80946a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 81046a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 81146a6dd1cSJason J. herne (u64 __user *)reg->addr); 81246a6dd1cSJason J. herne break; 813536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 814536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 815536336c2SDominik Dingel (u64 __user *)reg->addr); 816536336c2SDominik Dingel break; 817536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 818536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 819536336c2SDominik Dingel (u64 __user *)reg->addr); 820536336c2SDominik Dingel break; 821536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 822536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 823536336c2SDominik Dingel (u64 __user *)reg->addr); 824536336c2SDominik Dingel break; 825672550fbSChristian Borntraeger case KVM_REG_S390_PP: 826672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 827672550fbSChristian Borntraeger (u64 __user *)reg->addr); 828672550fbSChristian Borntraeger break; 829afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 830afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 831afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 832afa45ff5SChristian Borntraeger break; 83314eebd91SCarsten Otte default: 83414eebd91SCarsten Otte break; 83514eebd91SCarsten Otte } 83614eebd91SCarsten Otte 83714eebd91SCarsten Otte return r; 83814eebd91SCarsten Otte } 83914eebd91SCarsten Otte 84014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 84114eebd91SCarsten Otte struct kvm_one_reg *reg) 84214eebd91SCarsten Otte { 84314eebd91SCarsten Otte int r = -EINVAL; 84414eebd91SCarsten Otte 84514eebd91SCarsten Otte switch (reg->id) { 84629b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 84729b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 84829b7c71bSCarsten Otte (u32 __user *)reg->addr); 84929b7c71bSCarsten Otte break; 85029b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 85129b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 85229b7c71bSCarsten Otte (u64 __user *)reg->addr); 85329b7c71bSCarsten Otte break; 85446a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 85546a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 85646a6dd1cSJason J. herne (u64 __user *)reg->addr); 85746a6dd1cSJason J. herne break; 85846a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 85946a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 86046a6dd1cSJason J. herne (u64 __user *)reg->addr); 86146a6dd1cSJason J. herne break; 862536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 863536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 864536336c2SDominik Dingel (u64 __user *)reg->addr); 865536336c2SDominik Dingel break; 866536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 867536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 868536336c2SDominik Dingel (u64 __user *)reg->addr); 869536336c2SDominik Dingel break; 870536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 871536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 872536336c2SDominik Dingel (u64 __user *)reg->addr); 873536336c2SDominik Dingel break; 874672550fbSChristian Borntraeger case KVM_REG_S390_PP: 875672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 876672550fbSChristian Borntraeger (u64 __user *)reg->addr); 877672550fbSChristian Borntraeger break; 878afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 879afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 880afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 881afa45ff5SChristian Borntraeger break; 88214eebd91SCarsten Otte default: 88314eebd91SCarsten Otte break; 88414eebd91SCarsten Otte } 88514eebd91SCarsten Otte 88614eebd91SCarsten Otte return r; 88714eebd91SCarsten Otte } 888b6d33834SChristoffer Dall 889b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 890b0c632dbSHeiko Carstens { 891b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 892b0c632dbSHeiko Carstens return 0; 893b0c632dbSHeiko Carstens } 894b0c632dbSHeiko Carstens 895b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 896b0c632dbSHeiko Carstens { 8975a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 898b0c632dbSHeiko Carstens return 0; 899b0c632dbSHeiko Carstens } 900b0c632dbSHeiko Carstens 901b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 902b0c632dbSHeiko Carstens { 9035a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 904b0c632dbSHeiko Carstens return 0; 905b0c632dbSHeiko Carstens } 906b0c632dbSHeiko Carstens 907b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 908b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 909b0c632dbSHeiko Carstens { 91059674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 911b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 91259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 913b0c632dbSHeiko Carstens return 0; 914b0c632dbSHeiko Carstens } 915b0c632dbSHeiko Carstens 916b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 917b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 918b0c632dbSHeiko Carstens { 91959674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 920b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 921b0c632dbSHeiko Carstens return 0; 922b0c632dbSHeiko Carstens } 923b0c632dbSHeiko Carstens 924b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 925b0c632dbSHeiko Carstens { 9264725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 9274725c860SMartin Schwidefsky return -EINVAL; 928b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 9294725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 9304725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 9314725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 932b0c632dbSHeiko Carstens return 0; 933b0c632dbSHeiko Carstens } 934b0c632dbSHeiko Carstens 935b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 936b0c632dbSHeiko Carstens { 937b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 938b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 939b0c632dbSHeiko Carstens return 0; 940b0c632dbSHeiko Carstens } 941b0c632dbSHeiko Carstens 942b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 943b0c632dbSHeiko Carstens { 944b0c632dbSHeiko Carstens int rc = 0; 945b0c632dbSHeiko Carstens 9467a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 947b0c632dbSHeiko Carstens rc = -EBUSY; 948d7b0b5ebSCarsten Otte else { 949d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 950d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 951d7b0b5ebSCarsten Otte } 952b0c632dbSHeiko Carstens return rc; 953b0c632dbSHeiko Carstens } 954b0c632dbSHeiko Carstens 955b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 956b0c632dbSHeiko Carstens struct kvm_translation *tr) 957b0c632dbSHeiko Carstens { 958b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 959b0c632dbSHeiko Carstens } 960b0c632dbSHeiko Carstens 96127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 96227291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 96327291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 96427291e21SDavid Hildenbrand 965d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 966d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 967b0c632dbSHeiko Carstens { 96827291e21SDavid Hildenbrand int rc = 0; 96927291e21SDavid Hildenbrand 97027291e21SDavid Hildenbrand vcpu->guest_debug = 0; 97127291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 97227291e21SDavid Hildenbrand 9732de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 97427291e21SDavid Hildenbrand return -EINVAL; 97527291e21SDavid Hildenbrand 97627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 97727291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 97827291e21SDavid Hildenbrand /* enforce guest PER */ 97927291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 98027291e21SDavid Hildenbrand 98127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 98227291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 98327291e21SDavid Hildenbrand } else { 98427291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 98527291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 98627291e21SDavid Hildenbrand } 98727291e21SDavid Hildenbrand 98827291e21SDavid Hildenbrand if (rc) { 98927291e21SDavid Hildenbrand vcpu->guest_debug = 0; 99027291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 99127291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 99227291e21SDavid Hildenbrand } 99327291e21SDavid Hildenbrand 99427291e21SDavid Hildenbrand return rc; 995b0c632dbSHeiko Carstens } 996b0c632dbSHeiko Carstens 99762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 99862d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 99962d9f0dbSMarcelo Tosatti { 10006352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 10016352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 10026352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 100362d9f0dbSMarcelo Tosatti } 100462d9f0dbSMarcelo Tosatti 100562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 100662d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 100762d9f0dbSMarcelo Tosatti { 10086352e4d2SDavid Hildenbrand int rc = 0; 10096352e4d2SDavid Hildenbrand 10106352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 10116352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 10126352e4d2SDavid Hildenbrand 10136352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 10146352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 10156352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 10166352e4d2SDavid Hildenbrand break; 10176352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 10186352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 10196352e4d2SDavid Hildenbrand break; 10206352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 10216352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 10226352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 10236352e4d2SDavid Hildenbrand default: 10246352e4d2SDavid Hildenbrand rc = -ENXIO; 10256352e4d2SDavid Hildenbrand } 10266352e4d2SDavid Hildenbrand 10276352e4d2SDavid Hildenbrand return rc; 102862d9f0dbSMarcelo Tosatti } 102962d9f0dbSMarcelo Tosatti 1030b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1031b31605c1SDominik Dingel { 1032b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1033b31605c1SDominik Dingel return false; 1034b31605c1SDominik Dingel /* only enable for z10 and later */ 1035b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1036b31605c1SDominik Dingel return false; 1037b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1038b31605c1SDominik Dingel return false; 1039b31605c1SDominik Dingel return true; 1040b31605c1SDominik Dingel } 1041b31605c1SDominik Dingel 10428ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 10438ad35755SDavid Hildenbrand { 10448ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 10458ad35755SDavid Hildenbrand } 10468ad35755SDavid Hildenbrand 10472c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 10482c70fe44SChristian Borntraeger { 10498ad35755SDavid Hildenbrand retry: 10508ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 10512c70fe44SChristian Borntraeger /* 10522c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 10532c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 10542c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 10552c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 10562c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 10572c70fe44SChristian Borntraeger */ 10588ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 10592c70fe44SChristian Borntraeger int rc; 10602c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1061fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 10622c70fe44SChristian Borntraeger PAGE_SIZE * 2); 10632c70fe44SChristian Borntraeger if (rc) 10642c70fe44SChristian Borntraeger return rc; 10658ad35755SDavid Hildenbrand goto retry; 10662c70fe44SChristian Borntraeger } 10678ad35755SDavid Hildenbrand 1068d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1069d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1070d3d692c8SDavid Hildenbrand goto retry; 1071d3d692c8SDavid Hildenbrand } 1072d3d692c8SDavid Hildenbrand 10738ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 10748ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 10758ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 10768ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 10778ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10788ad35755SDavid Hildenbrand } 10798ad35755SDavid Hildenbrand goto retry; 10808ad35755SDavid Hildenbrand } 10818ad35755SDavid Hildenbrand 10828ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 10838ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 10848ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 10858ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 10868ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 10878ad35755SDavid Hildenbrand } 10888ad35755SDavid Hildenbrand goto retry; 10898ad35755SDavid Hildenbrand } 10908ad35755SDavid Hildenbrand 10910759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 10920759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 10930759d068SDavid Hildenbrand 10942c70fe44SChristian Borntraeger return 0; 10952c70fe44SChristian Borntraeger } 10962c70fe44SChristian Borntraeger 1097fa576c58SThomas Huth /** 1098fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1099fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1100fa576c58SThomas Huth * @gpa: Guest physical address 1101fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1102fa576c58SThomas Huth * 1103fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1104fa576c58SThomas Huth * 1105fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1106fa576c58SThomas Huth */ 1107fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 110824eb3a82SDominik Dingel { 1109527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1110527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 111124eb3a82SDominik Dingel } 111224eb3a82SDominik Dingel 11133c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 11143c038e6bSDominik Dingel unsigned long token) 11153c038e6bSDominik Dingel { 11163c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 11173c038e6bSDominik Dingel inti.parm64 = token; 11183c038e6bSDominik Dingel 11193c038e6bSDominik Dingel if (start_token) { 11203c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_INIT; 11213c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti)); 11223c038e6bSDominik Dingel } else { 11233c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 11243c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 11253c038e6bSDominik Dingel } 11263c038e6bSDominik Dingel } 11273c038e6bSDominik Dingel 11283c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 11293c038e6bSDominik Dingel struct kvm_async_pf *work) 11303c038e6bSDominik Dingel { 11313c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 11323c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 11333c038e6bSDominik Dingel } 11343c038e6bSDominik Dingel 11353c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 11363c038e6bSDominik Dingel struct kvm_async_pf *work) 11373c038e6bSDominik Dingel { 11383c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 11393c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 11403c038e6bSDominik Dingel } 11413c038e6bSDominik Dingel 11423c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 11433c038e6bSDominik Dingel struct kvm_async_pf *work) 11443c038e6bSDominik Dingel { 11453c038e6bSDominik Dingel /* s390 will always inject the page directly */ 11463c038e6bSDominik Dingel } 11473c038e6bSDominik Dingel 11483c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 11493c038e6bSDominik Dingel { 11503c038e6bSDominik Dingel /* 11513c038e6bSDominik Dingel * s390 will always inject the page directly, 11523c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 11533c038e6bSDominik Dingel */ 11543c038e6bSDominik Dingel return true; 11553c038e6bSDominik Dingel } 11563c038e6bSDominik Dingel 11573c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 11583c038e6bSDominik Dingel { 11593c038e6bSDominik Dingel hva_t hva; 11603c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 11613c038e6bSDominik Dingel int rc; 11623c038e6bSDominik Dingel 11633c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 11643c038e6bSDominik Dingel return 0; 11653c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 11663c038e6bSDominik Dingel vcpu->arch.pfault_compare) 11673c038e6bSDominik Dingel return 0; 11683c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 11693c038e6bSDominik Dingel return 0; 11703c038e6bSDominik Dingel if (kvm_cpu_has_interrupt(vcpu)) 11713c038e6bSDominik Dingel return 0; 11723c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 11733c038e6bSDominik Dingel return 0; 11743c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 11753c038e6bSDominik Dingel return 0; 11763c038e6bSDominik Dingel 117781480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 117881480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 117981480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 11803c038e6bSDominik Dingel return 0; 11813c038e6bSDominik Dingel 11823c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 11833c038e6bSDominik Dingel return rc; 11843c038e6bSDominik Dingel } 11853c038e6bSDominik Dingel 11863fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1187b0c632dbSHeiko Carstens { 11883fb4c40fSThomas Huth int rc, cpuflags; 1189e168bf8dSCarsten Otte 11903c038e6bSDominik Dingel /* 11913c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 11923c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 11933c038e6bSDominik Dingel * handled outside the worker. 11943c038e6bSDominik Dingel */ 11953c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 11963c038e6bSDominik Dingel 11975a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1198b0c632dbSHeiko Carstens 1199b0c632dbSHeiko Carstens if (need_resched()) 1200b0c632dbSHeiko Carstens schedule(); 1201b0c632dbSHeiko Carstens 1202d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 120371cde587SChristian Borntraeger s390_handle_mcck(); 120471cde587SChristian Borntraeger 120579395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 120679395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 120779395031SJens Freimann if (rc) 120879395031SJens Freimann return rc; 120979395031SJens Freimann } 12100ff31867SCarsten Otte 12112c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 12122c70fe44SChristian Borntraeger if (rc) 12132c70fe44SChristian Borntraeger return rc; 12142c70fe44SChristian Borntraeger 121527291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 121627291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 121727291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 121827291e21SDavid Hildenbrand } 121927291e21SDavid Hildenbrand 1220b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 12213fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 12223fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 12233fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 12242b29a9fdSDominik Dingel 12253fb4c40fSThomas Huth return 0; 12263fb4c40fSThomas Huth } 12273fb4c40fSThomas Huth 12283fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 12293fb4c40fSThomas Huth { 123024eb3a82SDominik Dingel int rc = -1; 12312b29a9fdSDominik Dingel 12322b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 12332b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 12342b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 12352b29a9fdSDominik Dingel 123627291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 123727291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 123827291e21SDavid Hildenbrand 12393fb4c40fSThomas Huth if (exit_reason >= 0) { 12407c470539SMartin Schwidefsky rc = 0; 1241210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1242210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1243210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1244210b1607SThomas Huth current->thread.gmap_addr; 1245210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1246210b1607SThomas Huth rc = -EREMOTE; 124724eb3a82SDominik Dingel 124824eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 12493c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 125024eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1251fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 125224eb3a82SDominik Dingel rc = 0; 1253fa576c58SThomas Huth } else { 1254fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1255fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1256fa576c58SThomas Huth } 125724eb3a82SDominik Dingel } 125824eb3a82SDominik Dingel 125924eb3a82SDominik Dingel if (rc == -1) { 1260699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1261699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1262699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 12631f0d0f09SCarsten Otte } 1264b0c632dbSHeiko Carstens 12655a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 12663fb4c40fSThomas Huth 1267a76ccff6SThomas Huth if (rc == 0) { 1268a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 12692955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 12702955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1271a76ccff6SThomas Huth else 1272a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1273a76ccff6SThomas Huth } 1274a76ccff6SThomas Huth 12753fb4c40fSThomas Huth return rc; 12763fb4c40fSThomas Huth } 12773fb4c40fSThomas Huth 12783fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 12793fb4c40fSThomas Huth { 12803fb4c40fSThomas Huth int rc, exit_reason; 12813fb4c40fSThomas Huth 1282800c1065SThomas Huth /* 1283800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1284800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1285800c1065SThomas Huth */ 1286800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1287800c1065SThomas Huth 1288a76ccff6SThomas Huth do { 12893fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 12903fb4c40fSThomas Huth if (rc) 1291a76ccff6SThomas Huth break; 12923fb4c40fSThomas Huth 1293800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 12943fb4c40fSThomas Huth /* 1295a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1296a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 12973fb4c40fSThomas Huth */ 12983fb4c40fSThomas Huth preempt_disable(); 12993fb4c40fSThomas Huth kvm_guest_enter(); 13003fb4c40fSThomas Huth preempt_enable(); 1301a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1302a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 13033fb4c40fSThomas Huth kvm_guest_exit(); 1304800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 13053fb4c40fSThomas Huth 13063fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 130727291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 13083fb4c40fSThomas Huth 1309800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1310e168bf8dSCarsten Otte return rc; 1311b0c632dbSHeiko Carstens } 1312b0c632dbSHeiko Carstens 1313b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1314b028ee3eSDavid Hildenbrand { 1315b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1316b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1317b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1318b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1319b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1320b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1321d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1322d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1323b028ee3eSDavid Hildenbrand } 1324b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1325b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1326b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1327b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1328b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1329b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1330b028ee3eSDavid Hildenbrand } 1331b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1332b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1333b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1334b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 1335b028ee3eSDavid Hildenbrand } 1336b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1337b028ee3eSDavid Hildenbrand } 1338b028ee3eSDavid Hildenbrand 1339b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1340b028ee3eSDavid Hildenbrand { 1341b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1342b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1343b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1344b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1345b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1346b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1347b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1348b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1349b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1350b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1351b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1352b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1353b028ee3eSDavid Hildenbrand } 1354b028ee3eSDavid Hildenbrand 1355b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1356b0c632dbSHeiko Carstens { 13578f2abe6aSChristian Borntraeger int rc; 1358b0c632dbSHeiko Carstens sigset_t sigsaved; 1359b0c632dbSHeiko Carstens 136027291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 136127291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 136227291e21SDavid Hildenbrand return 0; 136327291e21SDavid Hildenbrand } 136427291e21SDavid Hildenbrand 1365b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1366b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1367b0c632dbSHeiko Carstens 13686352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 13696852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 13706352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 13716352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 13726352e4d2SDavid Hildenbrand vcpu->vcpu_id); 13736352e4d2SDavid Hildenbrand return -EINVAL; 13746352e4d2SDavid Hildenbrand } 1375b0c632dbSHeiko Carstens 1376b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1377d7b0b5ebSCarsten Otte 1378dab4079dSHeiko Carstens might_fault(); 1379e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 13809ace903dSChristian Ehrhardt 1381b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1382b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 13838f2abe6aSChristian Borntraeger rc = -EINTR; 1384b1d16c49SChristian Ehrhardt } 13858f2abe6aSChristian Borntraeger 138627291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 138727291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 138827291e21SDavid Hildenbrand rc = 0; 138927291e21SDavid Hildenbrand } 139027291e21SDavid Hildenbrand 1391b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 13928f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 13938f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 13948f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 13958f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 13968f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 13978f2abe6aSChristian Borntraeger rc = 0; 13988f2abe6aSChristian Borntraeger } 13998f2abe6aSChristian Borntraeger 14008f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 14018f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 14028f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 14038f2abe6aSChristian Borntraeger rc = 0; 14048f2abe6aSChristian Borntraeger } 14058f2abe6aSChristian Borntraeger 1406b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1407d7b0b5ebSCarsten Otte 1408b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1409b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1410b0c632dbSHeiko Carstens 1411b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 14127e8e6ab4SHeiko Carstens return rc; 1413b0c632dbSHeiko Carstens } 1414b0c632dbSHeiko Carstens 1415b0c632dbSHeiko Carstens /* 1416b0c632dbSHeiko Carstens * store status at address 1417b0c632dbSHeiko Carstens * we use have two special cases: 1418b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1419b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1420b0c632dbSHeiko Carstens */ 1421d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1422b0c632dbSHeiko Carstens { 1423092670cdSCarsten Otte unsigned char archmode = 1; 1424fda902cbSMichael Mueller unsigned int px; 1425178bd789SThomas Huth u64 clkcomp; 1426d0bce605SHeiko Carstens int rc; 1427b0c632dbSHeiko Carstens 1428d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1429d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1430b0c632dbSHeiko Carstens return -EFAULT; 1431d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1432d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1433d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1434b0c632dbSHeiko Carstens return -EFAULT; 1435d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1436d0bce605SHeiko Carstens } 1437d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1438d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1439d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1440d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1441d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1442d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1443fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1444d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1445fda902cbSMichael Mueller &px, 4); 1446d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1447d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1448d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1449d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1450d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1451d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1452d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1453178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1454d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1455d0bce605SHeiko Carstens &clkcomp, 8); 1456d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1457d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1458d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1459d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1460d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1461b0c632dbSHeiko Carstens } 1462b0c632dbSHeiko Carstens 1463e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1464e879892cSThomas Huth { 1465e879892cSThomas Huth /* 1466e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1467e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1468e879892cSThomas Huth * it into the save area 1469e879892cSThomas Huth */ 1470e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1471e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1472e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1473e879892cSThomas Huth 1474e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1475e879892cSThomas Huth } 1476e879892cSThomas Huth 14778ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14788ad35755SDavid Hildenbrand { 14798ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 14808ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 14818ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14828ad35755SDavid Hildenbrand } 14838ad35755SDavid Hildenbrand 14848ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 14858ad35755SDavid Hildenbrand { 14868ad35755SDavid Hildenbrand unsigned int i; 14878ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 14888ad35755SDavid Hildenbrand 14898ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 14908ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 14918ad35755SDavid Hildenbrand } 14928ad35755SDavid Hildenbrand } 14938ad35755SDavid Hildenbrand 14948ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 14958ad35755SDavid Hildenbrand { 14968ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 14978ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 14988ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 14998ad35755SDavid Hildenbrand } 15008ad35755SDavid Hildenbrand 15016852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 15026852d7b6SDavid Hildenbrand { 15038ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 15048ad35755SDavid Hildenbrand 15058ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 15068ad35755SDavid Hildenbrand return; 15078ad35755SDavid Hildenbrand 15086852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 15098ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1510433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 15118ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 15128ad35755SDavid Hildenbrand 15138ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 15148ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 15158ad35755SDavid Hildenbrand started_vcpus++; 15168ad35755SDavid Hildenbrand } 15178ad35755SDavid Hildenbrand 15188ad35755SDavid Hildenbrand if (started_vcpus == 0) { 15198ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 15208ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 15218ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 15228ad35755SDavid Hildenbrand /* 15238ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 15248ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 15258ad35755SDavid Hildenbrand * oustanding ENABLE requests. 15268ad35755SDavid Hildenbrand */ 15278ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 15288ad35755SDavid Hildenbrand } 15298ad35755SDavid Hildenbrand 15306852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 15318ad35755SDavid Hildenbrand /* 15328ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 15338ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 15348ad35755SDavid Hildenbrand */ 1535d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1536433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 15378ad35755SDavid Hildenbrand return; 15386852d7b6SDavid Hildenbrand } 15396852d7b6SDavid Hildenbrand 15406852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 15416852d7b6SDavid Hildenbrand { 15428ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 15438ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 15448ad35755SDavid Hildenbrand 15458ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 15468ad35755SDavid Hildenbrand return; 15478ad35755SDavid Hildenbrand 15486852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 15498ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1550433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 15518ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 15528ad35755SDavid Hildenbrand 155332f5ff63SDavid Hildenbrand /* Need to lock access to action_bits to avoid a SIGP race condition */ 15544ae3c081SDavid Hildenbrand spin_lock(&vcpu->arch.local_int.lock); 15556852d7b6SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 155632f5ff63SDavid Hildenbrand 155732f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 155832f5ff63SDavid Hildenbrand vcpu->arch.local_int.action_bits &= 155932f5ff63SDavid Hildenbrand ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); 15604ae3c081SDavid Hildenbrand spin_unlock(&vcpu->arch.local_int.lock); 156132f5ff63SDavid Hildenbrand 15628ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 15638ad35755SDavid Hildenbrand 15648ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 15658ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 15668ad35755SDavid Hildenbrand started_vcpus++; 15678ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 15688ad35755SDavid Hildenbrand } 15698ad35755SDavid Hildenbrand } 15708ad35755SDavid Hildenbrand 15718ad35755SDavid Hildenbrand if (started_vcpus == 1) { 15728ad35755SDavid Hildenbrand /* 15738ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 15748ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 15758ad35755SDavid Hildenbrand */ 15768ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 15778ad35755SDavid Hildenbrand } 15788ad35755SDavid Hildenbrand 1579433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 15808ad35755SDavid Hildenbrand return; 15816852d7b6SDavid Hildenbrand } 15826852d7b6SDavid Hildenbrand 1583d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1584d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1585d6712df9SCornelia Huck { 1586d6712df9SCornelia Huck int r; 1587d6712df9SCornelia Huck 1588d6712df9SCornelia Huck if (cap->flags) 1589d6712df9SCornelia Huck return -EINVAL; 1590d6712df9SCornelia Huck 1591d6712df9SCornelia Huck switch (cap->cap) { 1592fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1593fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1594fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1595fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1596fa6b7fe9SCornelia Huck } 1597fa6b7fe9SCornelia Huck r = 0; 1598fa6b7fe9SCornelia Huck break; 1599d6712df9SCornelia Huck default: 1600d6712df9SCornelia Huck r = -EINVAL; 1601d6712df9SCornelia Huck break; 1602d6712df9SCornelia Huck } 1603d6712df9SCornelia Huck return r; 1604d6712df9SCornelia Huck } 1605d6712df9SCornelia Huck 1606b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1607b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1608b0c632dbSHeiko Carstens { 1609b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1610b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1611800c1065SThomas Huth int idx; 1612bc923cc9SAvi Kivity long r; 1613b0c632dbSHeiko Carstens 161493736624SAvi Kivity switch (ioctl) { 161593736624SAvi Kivity case KVM_S390_INTERRUPT: { 1616ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1617ba5c1e9bSCarsten Otte 161893736624SAvi Kivity r = -EFAULT; 1619ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 162093736624SAvi Kivity break; 162193736624SAvi Kivity r = kvm_s390_inject_vcpu(vcpu, &s390int); 162293736624SAvi Kivity break; 1623ba5c1e9bSCarsten Otte } 1624b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1625800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1626bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1627800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1628bc923cc9SAvi Kivity break; 1629b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1630b0c632dbSHeiko Carstens psw_t psw; 1631b0c632dbSHeiko Carstens 1632bc923cc9SAvi Kivity r = -EFAULT; 1633b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1634bc923cc9SAvi Kivity break; 1635bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1636bc923cc9SAvi Kivity break; 1637b0c632dbSHeiko Carstens } 1638b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1639bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1640bc923cc9SAvi Kivity break; 164114eebd91SCarsten Otte case KVM_SET_ONE_REG: 164214eebd91SCarsten Otte case KVM_GET_ONE_REG: { 164314eebd91SCarsten Otte struct kvm_one_reg reg; 164414eebd91SCarsten Otte r = -EFAULT; 164514eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 164614eebd91SCarsten Otte break; 164714eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 164814eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 164914eebd91SCarsten Otte else 165014eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 165114eebd91SCarsten Otte break; 165214eebd91SCarsten Otte } 165327e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 165427e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 165527e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 165627e0393fSCarsten Otte 165727e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 165827e0393fSCarsten Otte r = -EFAULT; 165927e0393fSCarsten Otte break; 166027e0393fSCarsten Otte } 166127e0393fSCarsten Otte 166227e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 166327e0393fSCarsten Otte r = -EINVAL; 166427e0393fSCarsten Otte break; 166527e0393fSCarsten Otte } 166627e0393fSCarsten Otte 166727e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 166827e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 166927e0393fSCarsten Otte break; 167027e0393fSCarsten Otte } 167127e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 167227e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 167327e0393fSCarsten Otte 167427e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 167527e0393fSCarsten Otte r = -EFAULT; 167627e0393fSCarsten Otte break; 167727e0393fSCarsten Otte } 167827e0393fSCarsten Otte 167927e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 168027e0393fSCarsten Otte r = -EINVAL; 168127e0393fSCarsten Otte break; 168227e0393fSCarsten Otte } 168327e0393fSCarsten Otte 168427e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 168527e0393fSCarsten Otte ucasmap.length); 168627e0393fSCarsten Otte break; 168727e0393fSCarsten Otte } 168827e0393fSCarsten Otte #endif 1689ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1690527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 1691ccc7910fSCarsten Otte break; 1692ccc7910fSCarsten Otte } 1693d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1694d6712df9SCornelia Huck { 1695d6712df9SCornelia Huck struct kvm_enable_cap cap; 1696d6712df9SCornelia Huck r = -EFAULT; 1697d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1698d6712df9SCornelia Huck break; 1699d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1700d6712df9SCornelia Huck break; 1701d6712df9SCornelia Huck } 1702b0c632dbSHeiko Carstens default: 17033e6afcf1SCarsten Otte r = -ENOTTY; 1704b0c632dbSHeiko Carstens } 1705bc923cc9SAvi Kivity return r; 1706b0c632dbSHeiko Carstens } 1707b0c632dbSHeiko Carstens 17085b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 17095b1c1493SCarsten Otte { 17105b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 17115b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 17125b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 17135b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 17145b1c1493SCarsten Otte get_page(vmf->page); 17155b1c1493SCarsten Otte return 0; 17165b1c1493SCarsten Otte } 17175b1c1493SCarsten Otte #endif 17185b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 17195b1c1493SCarsten Otte } 17205b1c1493SCarsten Otte 17215587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 17225587027cSAneesh Kumar K.V unsigned long npages) 1723db3fe4ebSTakuya Yoshikawa { 1724db3fe4ebSTakuya Yoshikawa return 0; 1725db3fe4ebSTakuya Yoshikawa } 1726db3fe4ebSTakuya Yoshikawa 1727b0c632dbSHeiko Carstens /* Section: memory related */ 1728f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1729f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 17307b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 17317b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1732b0c632dbSHeiko Carstens { 1733dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1734dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1735dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1736dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1737b0c632dbSHeiko Carstens 1738598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1739b0c632dbSHeiko Carstens return -EINVAL; 1740b0c632dbSHeiko Carstens 1741598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1742b0c632dbSHeiko Carstens return -EINVAL; 1743b0c632dbSHeiko Carstens 1744f7784b8eSMarcelo Tosatti return 0; 1745f7784b8eSMarcelo Tosatti } 1746f7784b8eSMarcelo Tosatti 1747f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1748f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 17498482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 17508482644aSTakuya Yoshikawa enum kvm_mr_change change) 1751f7784b8eSMarcelo Tosatti { 1752f7850c92SCarsten Otte int rc; 1753f7784b8eSMarcelo Tosatti 17542cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 17552cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 17562cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 17572cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 17582cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 17592cef4debSChristian Borntraeger */ 17602cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 17612cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 17622cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 17632cef4debSChristian Borntraeger return; 1764598841caSCarsten Otte 1765598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1766598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1767598841caSCarsten Otte if (rc) 1768f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1769598841caSCarsten Otte return; 1770b0c632dbSHeiko Carstens } 1771b0c632dbSHeiko Carstens 1772b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1773b0c632dbSHeiko Carstens { 1774ef50f7acSChristian Borntraeger int ret; 17750ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1776ef50f7acSChristian Borntraeger if (ret) 1777ef50f7acSChristian Borntraeger return ret; 1778ef50f7acSChristian Borntraeger 1779ef50f7acSChristian Borntraeger /* 1780ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 178125985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1782ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1783ef50f7acSChristian Borntraeger */ 178478c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 178578c4b59fSMichael Mueller if (!vfacilities) { 1786ef50f7acSChristian Borntraeger kvm_exit(); 1787ef50f7acSChristian Borntraeger return -ENOMEM; 1788ef50f7acSChristian Borntraeger } 178978c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 17907be81a46SChristian Borntraeger vfacilities[0] &= 0xff82fffbf47c2000UL; 17917feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1792ef50f7acSChristian Borntraeger return 0; 1793b0c632dbSHeiko Carstens } 1794b0c632dbSHeiko Carstens 1795b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1796b0c632dbSHeiko Carstens { 179778c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1798b0c632dbSHeiko Carstens kvm_exit(); 1799b0c632dbSHeiko Carstens } 1800b0c632dbSHeiko Carstens 1801b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1802b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1803566af940SCornelia Huck 1804566af940SCornelia Huck /* 1805566af940SCornelia Huck * Enable autoloading of the kvm module. 1806566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1807566af940SCornelia Huck * since x86 takes a different approach. 1808566af940SCornelia Huck */ 1809566af940SCornelia Huck #include <linux/miscdevice.h> 1810566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1811566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1812