1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25a374e892STony Krowiak #include <linux/random.h> 26b0c632dbSHeiko Carstens #include <linux/slab.h> 27ba5c1e9bSCarsten Otte #include <linux/timer.h> 28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 29b0c632dbSHeiko Carstens #include <asm/lowcore.h> 30b0c632dbSHeiko Carstens #include <asm/pgtable.h> 31f5daba1dSHeiko Carstens #include <asm/nmi.h> 32a0616cdeSDavid Howells #include <asm/switch_to.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f7819512SPaolo Bonzini { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 54ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 55f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 56ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 58aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 59ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 607697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 61ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 63ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 66ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 67ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6869d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 69453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 70453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 71453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 72453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 73453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 748a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 75453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 76453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 77b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 78453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 79453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 80bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 815288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 82bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 837697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8542cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8642cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 875288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8842cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 8942cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 905288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 925288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9342cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9542cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 96388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 97e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9841628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 99b0c632dbSHeiko Carstens { NULL } 100b0c632dbSHeiko Carstens }; 101b0c632dbSHeiko Carstens 1029d8d5786SMichael Mueller /* upper facilities limit for kvm */ 1039d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = { 1049d8d5786SMichael Mueller 0xff82fffbf4fc2000UL, 1059d8d5786SMichael Mueller 0x005c000000000000UL, 1069d8d5786SMichael Mueller }; 107b0c632dbSHeiko Carstens 1089d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void) 10978c4b59fSMichael Mueller { 1109d8d5786SMichael Mueller BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); 1119d8d5786SMichael Mueller return ARRAY_SIZE(kvm_s390_fac_list_mask); 11278c4b59fSMichael Mueller } 11378c4b59fSMichael Mueller 1149d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier; 1159d8d5786SMichael Mueller 116b0c632dbSHeiko Carstens /* Section: not file related */ 11713a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 118b0c632dbSHeiko Carstens { 119b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 12010474ae8SAlexander Graf return 0; 121b0c632dbSHeiko Carstens } 122b0c632dbSHeiko Carstens 1232c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1242c70fe44SChristian Borntraeger 125b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 126b0c632dbSHeiko Carstens { 1272c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1282c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 129b0c632dbSHeiko Carstens return 0; 130b0c632dbSHeiko Carstens } 131b0c632dbSHeiko Carstens 132b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 133b0c632dbSHeiko Carstens { 1342c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 135b0c632dbSHeiko Carstens } 136b0c632dbSHeiko Carstens 137b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 138b0c632dbSHeiko Carstens { 13984877d93SCornelia Huck /* Register floating interrupt controller interface. */ 14084877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 141b0c632dbSHeiko Carstens } 142b0c632dbSHeiko Carstens 143b0c632dbSHeiko Carstens /* Section: device related */ 144b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 145b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 146b0c632dbSHeiko Carstens { 147b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 148b0c632dbSHeiko Carstens return s390_enable_sie(); 149b0c632dbSHeiko Carstens return -EINVAL; 150b0c632dbSHeiko Carstens } 151b0c632dbSHeiko Carstens 152784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 153b0c632dbSHeiko Carstens { 154d7b0b5ebSCarsten Otte int r; 155d7b0b5ebSCarsten Otte 1562bd0ac4eSCarsten Otte switch (ext) { 157d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 158b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15952e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1601efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1611efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1621efd0f59SCarsten Otte #endif 1633c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16460b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16514eebd91SCarsten Otte case KVM_CAP_ONE_REG: 166d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 167fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 168ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16910ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 170c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 171d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 17278599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 173f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1746352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 1752444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 176d7b0b5ebSCarsten Otte r = 1; 177d7b0b5ebSCarsten Otte break; 178e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 179e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 180e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 181e726b1bdSChristian Borntraeger break; 182e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 183e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 184e1e2e605SNick Wang break; 1851526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 186abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1871526bf9cSChristian Borntraeger break; 188*68c55750SEric Farman case KVM_CAP_S390_VECTOR_REGISTERS: 189*68c55750SEric Farman r = MACHINE_HAS_VX; 190*68c55750SEric Farman break; 1912bd0ac4eSCarsten Otte default: 192d7b0b5ebSCarsten Otte r = 0; 193b0c632dbSHeiko Carstens } 194d7b0b5ebSCarsten Otte return r; 1952bd0ac4eSCarsten Otte } 196b0c632dbSHeiko Carstens 19715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 19815f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 19915f36ebdSJason J. Herne { 20015f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 20115f36ebdSJason J. Herne unsigned long address; 20215f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 20315f36ebdSJason J. Herne 20415f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 20515f36ebdSJason J. Herne /* Loop over all guest pages */ 20615f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 20715f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 20815f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 20915f36ebdSJason J. Herne 21015f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 21115f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 21215f36ebdSJason J. Herne } 21315f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 21415f36ebdSJason J. Herne } 21515f36ebdSJason J. Herne 216b0c632dbSHeiko Carstens /* Section: vm related */ 217b0c632dbSHeiko Carstens /* 218b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 219b0c632dbSHeiko Carstens */ 220b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 221b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 222b0c632dbSHeiko Carstens { 22315f36ebdSJason J. Herne int r; 22415f36ebdSJason J. Herne unsigned long n; 22515f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 22615f36ebdSJason J. Herne int is_dirty = 0; 22715f36ebdSJason J. Herne 22815f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 22915f36ebdSJason J. Herne 23015f36ebdSJason J. Herne r = -EINVAL; 23115f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 23215f36ebdSJason J. Herne goto out; 23315f36ebdSJason J. Herne 23415f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 23515f36ebdSJason J. Herne r = -ENOENT; 23615f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 23715f36ebdSJason J. Herne goto out; 23815f36ebdSJason J. Herne 23915f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 24015f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 24115f36ebdSJason J. Herne if (r) 24215f36ebdSJason J. Herne goto out; 24315f36ebdSJason J. Herne 24415f36ebdSJason J. Herne /* Clear the dirty log */ 24515f36ebdSJason J. Herne if (is_dirty) { 24615f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 24715f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 24815f36ebdSJason J. Herne } 24915f36ebdSJason J. Herne r = 0; 25015f36ebdSJason J. Herne out: 25115f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 25215f36ebdSJason J. Herne return r; 253b0c632dbSHeiko Carstens } 254b0c632dbSHeiko Carstens 255d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 256d938dc55SCornelia Huck { 257d938dc55SCornelia Huck int r; 258d938dc55SCornelia Huck 259d938dc55SCornelia Huck if (cap->flags) 260d938dc55SCornelia Huck return -EINVAL; 261d938dc55SCornelia Huck 262d938dc55SCornelia Huck switch (cap->cap) { 26384223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 26484223598SCornelia Huck kvm->arch.use_irqchip = 1; 26584223598SCornelia Huck r = 0; 26684223598SCornelia Huck break; 2672444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 2682444b352SDavid Hildenbrand kvm->arch.user_sigp = 1; 2692444b352SDavid Hildenbrand r = 0; 2702444b352SDavid Hildenbrand break; 271*68c55750SEric Farman case KVM_CAP_S390_VECTOR_REGISTERS: 272*68c55750SEric Farman kvm->arch.use_vectors = MACHINE_HAS_VX; 273*68c55750SEric Farman r = MACHINE_HAS_VX ? 0 : -EINVAL; 274*68c55750SEric Farman break; 275d938dc55SCornelia Huck default: 276d938dc55SCornelia Huck r = -EINVAL; 277d938dc55SCornelia Huck break; 278d938dc55SCornelia Huck } 279d938dc55SCornelia Huck return r; 280d938dc55SCornelia Huck } 281d938dc55SCornelia Huck 2828c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2838c0a7ce6SDominik Dingel { 2848c0a7ce6SDominik Dingel int ret; 2858c0a7ce6SDominik Dingel 2868c0a7ce6SDominik Dingel switch (attr->attr) { 2878c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2888c0a7ce6SDominik Dingel ret = 0; 2898c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2908c0a7ce6SDominik Dingel ret = -EFAULT; 2918c0a7ce6SDominik Dingel break; 2928c0a7ce6SDominik Dingel default: 2938c0a7ce6SDominik Dingel ret = -ENXIO; 2948c0a7ce6SDominik Dingel break; 2958c0a7ce6SDominik Dingel } 2968c0a7ce6SDominik Dingel return ret; 2978c0a7ce6SDominik Dingel } 2988c0a7ce6SDominik Dingel 2998c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 3004f718eabSDominik Dingel { 3014f718eabSDominik Dingel int ret; 3024f718eabSDominik Dingel unsigned int idx; 3034f718eabSDominik Dingel switch (attr->attr) { 3044f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3054f718eabSDominik Dingel ret = -EBUSY; 3064f718eabSDominik Dingel mutex_lock(&kvm->lock); 3074f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3084f718eabSDominik Dingel kvm->arch.use_cmma = 1; 3094f718eabSDominik Dingel ret = 0; 3104f718eabSDominik Dingel } 3114f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3124f718eabSDominik Dingel break; 3134f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3144f718eabSDominik Dingel mutex_lock(&kvm->lock); 3154f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 316a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 3174f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3184f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3194f718eabSDominik Dingel ret = 0; 3204f718eabSDominik Dingel break; 3218c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3228c0a7ce6SDominik Dingel unsigned long new_limit; 3238c0a7ce6SDominik Dingel 3248c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3258c0a7ce6SDominik Dingel return -EINVAL; 3268c0a7ce6SDominik Dingel 3278c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3288c0a7ce6SDominik Dingel return -EFAULT; 3298c0a7ce6SDominik Dingel 3308c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3318c0a7ce6SDominik Dingel return -E2BIG; 3328c0a7ce6SDominik Dingel 3338c0a7ce6SDominik Dingel ret = -EBUSY; 3348c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3358c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3368c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3378c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3388c0a7ce6SDominik Dingel 3398c0a7ce6SDominik Dingel if (!new) { 3408c0a7ce6SDominik Dingel ret = -ENOMEM; 3418c0a7ce6SDominik Dingel } else { 3428c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3438c0a7ce6SDominik Dingel new->private = kvm; 3448c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3458c0a7ce6SDominik Dingel ret = 0; 3468c0a7ce6SDominik Dingel } 3478c0a7ce6SDominik Dingel } 3488c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3498c0a7ce6SDominik Dingel break; 3508c0a7ce6SDominik Dingel } 3514f718eabSDominik Dingel default: 3524f718eabSDominik Dingel ret = -ENXIO; 3534f718eabSDominik Dingel break; 3544f718eabSDominik Dingel } 3554f718eabSDominik Dingel return ret; 3564f718eabSDominik Dingel } 3574f718eabSDominik Dingel 358a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 359a374e892STony Krowiak 360a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 361a374e892STony Krowiak { 362a374e892STony Krowiak struct kvm_vcpu *vcpu; 363a374e892STony Krowiak int i; 364a374e892STony Krowiak 3659d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 366a374e892STony Krowiak return -EINVAL; 367a374e892STony Krowiak 368a374e892STony Krowiak mutex_lock(&kvm->lock); 369a374e892STony Krowiak switch (attr->attr) { 370a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 371a374e892STony Krowiak get_random_bytes( 372a374e892STony Krowiak kvm->arch.crypto.crycb->aes_wrapping_key_mask, 373a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 374a374e892STony Krowiak kvm->arch.crypto.aes_kw = 1; 375a374e892STony Krowiak break; 376a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 377a374e892STony Krowiak get_random_bytes( 378a374e892STony Krowiak kvm->arch.crypto.crycb->dea_wrapping_key_mask, 379a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 380a374e892STony Krowiak kvm->arch.crypto.dea_kw = 1; 381a374e892STony Krowiak break; 382a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 383a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 384a374e892STony Krowiak memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 385a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 386a374e892STony Krowiak break; 387a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 388a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 389a374e892STony Krowiak memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 390a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 391a374e892STony Krowiak break; 392a374e892STony Krowiak default: 393a374e892STony Krowiak mutex_unlock(&kvm->lock); 394a374e892STony Krowiak return -ENXIO; 395a374e892STony Krowiak } 396a374e892STony Krowiak 397a374e892STony Krowiak kvm_for_each_vcpu(i, vcpu, kvm) { 398a374e892STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 399a374e892STony Krowiak exit_sie(vcpu); 400a374e892STony Krowiak } 401a374e892STony Krowiak mutex_unlock(&kvm->lock); 402a374e892STony Krowiak return 0; 403a374e892STony Krowiak } 404a374e892STony Krowiak 40572f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 40672f25020SJason J. Herne { 40772f25020SJason J. Herne u8 gtod_high; 40872f25020SJason J. Herne 40972f25020SJason J. Herne if (copy_from_user(>od_high, (void __user *)attr->addr, 41072f25020SJason J. Herne sizeof(gtod_high))) 41172f25020SJason J. Herne return -EFAULT; 41272f25020SJason J. Herne 41372f25020SJason J. Herne if (gtod_high != 0) 41472f25020SJason J. Herne return -EINVAL; 41572f25020SJason J. Herne 41672f25020SJason J. Herne return 0; 41772f25020SJason J. Herne } 41872f25020SJason J. Herne 41972f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 42072f25020SJason J. Herne { 42172f25020SJason J. Herne struct kvm_vcpu *cur_vcpu; 42272f25020SJason J. Herne unsigned int vcpu_idx; 42372f25020SJason J. Herne u64 host_tod, gtod; 42472f25020SJason J. Herne int r; 42572f25020SJason J. Herne 42672f25020SJason J. Herne if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 42772f25020SJason J. Herne return -EFAULT; 42872f25020SJason J. Herne 42972f25020SJason J. Herne r = store_tod_clock(&host_tod); 43072f25020SJason J. Herne if (r) 43172f25020SJason J. Herne return r; 43272f25020SJason J. Herne 43372f25020SJason J. Herne mutex_lock(&kvm->lock); 43472f25020SJason J. Herne kvm->arch.epoch = gtod - host_tod; 43572f25020SJason J. Herne kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 43672f25020SJason J. Herne cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 43772f25020SJason J. Herne exit_sie(cur_vcpu); 43872f25020SJason J. Herne } 43972f25020SJason J. Herne mutex_unlock(&kvm->lock); 44072f25020SJason J. Herne return 0; 44172f25020SJason J. Herne } 44272f25020SJason J. Herne 44372f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 44472f25020SJason J. Herne { 44572f25020SJason J. Herne int ret; 44672f25020SJason J. Herne 44772f25020SJason J. Herne if (attr->flags) 44872f25020SJason J. Herne return -EINVAL; 44972f25020SJason J. Herne 45072f25020SJason J. Herne switch (attr->attr) { 45172f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 45272f25020SJason J. Herne ret = kvm_s390_set_tod_high(kvm, attr); 45372f25020SJason J. Herne break; 45472f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 45572f25020SJason J. Herne ret = kvm_s390_set_tod_low(kvm, attr); 45672f25020SJason J. Herne break; 45772f25020SJason J. Herne default: 45872f25020SJason J. Herne ret = -ENXIO; 45972f25020SJason J. Herne break; 46072f25020SJason J. Herne } 46172f25020SJason J. Herne return ret; 46272f25020SJason J. Herne } 46372f25020SJason J. Herne 46472f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 46572f25020SJason J. Herne { 46672f25020SJason J. Herne u8 gtod_high = 0; 46772f25020SJason J. Herne 46872f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od_high, 46972f25020SJason J. Herne sizeof(gtod_high))) 47072f25020SJason J. Herne return -EFAULT; 47172f25020SJason J. Herne 47272f25020SJason J. Herne return 0; 47372f25020SJason J. Herne } 47472f25020SJason J. Herne 47572f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 47672f25020SJason J. Herne { 47772f25020SJason J. Herne u64 host_tod, gtod; 47872f25020SJason J. Herne int r; 47972f25020SJason J. Herne 48072f25020SJason J. Herne r = store_tod_clock(&host_tod); 48172f25020SJason J. Herne if (r) 48272f25020SJason J. Herne return r; 48372f25020SJason J. Herne 48472f25020SJason J. Herne gtod = host_tod + kvm->arch.epoch; 48572f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 48672f25020SJason J. Herne return -EFAULT; 48772f25020SJason J. Herne 48872f25020SJason J. Herne return 0; 48972f25020SJason J. Herne } 49072f25020SJason J. Herne 49172f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 49272f25020SJason J. Herne { 49372f25020SJason J. Herne int ret; 49472f25020SJason J. Herne 49572f25020SJason J. Herne if (attr->flags) 49672f25020SJason J. Herne return -EINVAL; 49772f25020SJason J. Herne 49872f25020SJason J. Herne switch (attr->attr) { 49972f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 50072f25020SJason J. Herne ret = kvm_s390_get_tod_high(kvm, attr); 50172f25020SJason J. Herne break; 50272f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 50372f25020SJason J. Herne ret = kvm_s390_get_tod_low(kvm, attr); 50472f25020SJason J. Herne break; 50572f25020SJason J. Herne default: 50672f25020SJason J. Herne ret = -ENXIO; 50772f25020SJason J. Herne break; 50872f25020SJason J. Herne } 50972f25020SJason J. Herne return ret; 51072f25020SJason J. Herne } 51172f25020SJason J. Herne 512658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) 513658b6edaSMichael Mueller { 514658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 515658b6edaSMichael Mueller int ret = 0; 516658b6edaSMichael Mueller 517658b6edaSMichael Mueller mutex_lock(&kvm->lock); 518658b6edaSMichael Mueller if (atomic_read(&kvm->online_vcpus)) { 519658b6edaSMichael Mueller ret = -EBUSY; 520658b6edaSMichael Mueller goto out; 521658b6edaSMichael Mueller } 522658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 523658b6edaSMichael Mueller if (!proc) { 524658b6edaSMichael Mueller ret = -ENOMEM; 525658b6edaSMichael Mueller goto out; 526658b6edaSMichael Mueller } 527658b6edaSMichael Mueller if (!copy_from_user(proc, (void __user *)attr->addr, 528658b6edaSMichael Mueller sizeof(*proc))) { 529658b6edaSMichael Mueller memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, 530658b6edaSMichael Mueller sizeof(struct cpuid)); 531658b6edaSMichael Mueller kvm->arch.model.ibc = proc->ibc; 532981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, proc->fac_list, 533658b6edaSMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 534658b6edaSMichael Mueller } else 535658b6edaSMichael Mueller ret = -EFAULT; 536658b6edaSMichael Mueller kfree(proc); 537658b6edaSMichael Mueller out: 538658b6edaSMichael Mueller mutex_unlock(&kvm->lock); 539658b6edaSMichael Mueller return ret; 540658b6edaSMichael Mueller } 541658b6edaSMichael Mueller 542658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 543658b6edaSMichael Mueller { 544658b6edaSMichael Mueller int ret = -ENXIO; 545658b6edaSMichael Mueller 546658b6edaSMichael Mueller switch (attr->attr) { 547658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 548658b6edaSMichael Mueller ret = kvm_s390_set_processor(kvm, attr); 549658b6edaSMichael Mueller break; 550658b6edaSMichael Mueller } 551658b6edaSMichael Mueller return ret; 552658b6edaSMichael Mueller } 553658b6edaSMichael Mueller 554658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) 555658b6edaSMichael Mueller { 556658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 557658b6edaSMichael Mueller int ret = 0; 558658b6edaSMichael Mueller 559658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 560658b6edaSMichael Mueller if (!proc) { 561658b6edaSMichael Mueller ret = -ENOMEM; 562658b6edaSMichael Mueller goto out; 563658b6edaSMichael Mueller } 564658b6edaSMichael Mueller memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); 565658b6edaSMichael Mueller proc->ibc = kvm->arch.model.ibc; 566981467c9SMichael Mueller memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); 567658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 568658b6edaSMichael Mueller ret = -EFAULT; 569658b6edaSMichael Mueller kfree(proc); 570658b6edaSMichael Mueller out: 571658b6edaSMichael Mueller return ret; 572658b6edaSMichael Mueller } 573658b6edaSMichael Mueller 574658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) 575658b6edaSMichael Mueller { 576658b6edaSMichael Mueller struct kvm_s390_vm_cpu_machine *mach; 577658b6edaSMichael Mueller int ret = 0; 578658b6edaSMichael Mueller 579658b6edaSMichael Mueller mach = kzalloc(sizeof(*mach), GFP_KERNEL); 580658b6edaSMichael Mueller if (!mach) { 581658b6edaSMichael Mueller ret = -ENOMEM; 582658b6edaSMichael Mueller goto out; 583658b6edaSMichael Mueller } 584658b6edaSMichael Mueller get_cpu_id((struct cpuid *) &mach->cpuid); 585658b6edaSMichael Mueller mach->ibc = sclp_get_ibc(); 586981467c9SMichael Mueller memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, 587981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 588658b6edaSMichael Mueller memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 58994422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 590658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 591658b6edaSMichael Mueller ret = -EFAULT; 592658b6edaSMichael Mueller kfree(mach); 593658b6edaSMichael Mueller out: 594658b6edaSMichael Mueller return ret; 595658b6edaSMichael Mueller } 596658b6edaSMichael Mueller 597658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 598658b6edaSMichael Mueller { 599658b6edaSMichael Mueller int ret = -ENXIO; 600658b6edaSMichael Mueller 601658b6edaSMichael Mueller switch (attr->attr) { 602658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 603658b6edaSMichael Mueller ret = kvm_s390_get_processor(kvm, attr); 604658b6edaSMichael Mueller break; 605658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 606658b6edaSMichael Mueller ret = kvm_s390_get_machine(kvm, attr); 607658b6edaSMichael Mueller break; 608658b6edaSMichael Mueller } 609658b6edaSMichael Mueller return ret; 610658b6edaSMichael Mueller } 611658b6edaSMichael Mueller 612f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 613f2061656SDominik Dingel { 614f2061656SDominik Dingel int ret; 615f2061656SDominik Dingel 616f2061656SDominik Dingel switch (attr->group) { 6174f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6188c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 6194f718eabSDominik Dingel break; 62072f25020SJason J. Herne case KVM_S390_VM_TOD: 62172f25020SJason J. Herne ret = kvm_s390_set_tod(kvm, attr); 62272f25020SJason J. Herne break; 623658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 624658b6edaSMichael Mueller ret = kvm_s390_set_cpu_model(kvm, attr); 625658b6edaSMichael Mueller break; 626a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 627a374e892STony Krowiak ret = kvm_s390_vm_set_crypto(kvm, attr); 628a374e892STony Krowiak break; 629f2061656SDominik Dingel default: 630f2061656SDominik Dingel ret = -ENXIO; 631f2061656SDominik Dingel break; 632f2061656SDominik Dingel } 633f2061656SDominik Dingel 634f2061656SDominik Dingel return ret; 635f2061656SDominik Dingel } 636f2061656SDominik Dingel 637f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 638f2061656SDominik Dingel { 6398c0a7ce6SDominik Dingel int ret; 6408c0a7ce6SDominik Dingel 6418c0a7ce6SDominik Dingel switch (attr->group) { 6428c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 6438c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 6448c0a7ce6SDominik Dingel break; 64572f25020SJason J. Herne case KVM_S390_VM_TOD: 64672f25020SJason J. Herne ret = kvm_s390_get_tod(kvm, attr); 64772f25020SJason J. Herne break; 648658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 649658b6edaSMichael Mueller ret = kvm_s390_get_cpu_model(kvm, attr); 650658b6edaSMichael Mueller break; 6518c0a7ce6SDominik Dingel default: 6528c0a7ce6SDominik Dingel ret = -ENXIO; 6538c0a7ce6SDominik Dingel break; 6548c0a7ce6SDominik Dingel } 6558c0a7ce6SDominik Dingel 6568c0a7ce6SDominik Dingel return ret; 657f2061656SDominik Dingel } 658f2061656SDominik Dingel 659f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 660f2061656SDominik Dingel { 661f2061656SDominik Dingel int ret; 662f2061656SDominik Dingel 663f2061656SDominik Dingel switch (attr->group) { 6644f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6654f718eabSDominik Dingel switch (attr->attr) { 6664f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 6674f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 6688c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 6694f718eabSDominik Dingel ret = 0; 6704f718eabSDominik Dingel break; 6714f718eabSDominik Dingel default: 6724f718eabSDominik Dingel ret = -ENXIO; 6734f718eabSDominik Dingel break; 6744f718eabSDominik Dingel } 6754f718eabSDominik Dingel break; 67672f25020SJason J. Herne case KVM_S390_VM_TOD: 67772f25020SJason J. Herne switch (attr->attr) { 67872f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 67972f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 68072f25020SJason J. Herne ret = 0; 68172f25020SJason J. Herne break; 68272f25020SJason J. Herne default: 68372f25020SJason J. Herne ret = -ENXIO; 68472f25020SJason J. Herne break; 68572f25020SJason J. Herne } 68672f25020SJason J. Herne break; 687658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 688658b6edaSMichael Mueller switch (attr->attr) { 689658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 690658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 691658b6edaSMichael Mueller ret = 0; 692658b6edaSMichael Mueller break; 693658b6edaSMichael Mueller default: 694658b6edaSMichael Mueller ret = -ENXIO; 695658b6edaSMichael Mueller break; 696658b6edaSMichael Mueller } 697658b6edaSMichael Mueller break; 698a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 699a374e892STony Krowiak switch (attr->attr) { 700a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 701a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 702a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 703a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 704a374e892STony Krowiak ret = 0; 705a374e892STony Krowiak break; 706a374e892STony Krowiak default: 707a374e892STony Krowiak ret = -ENXIO; 708a374e892STony Krowiak break; 709a374e892STony Krowiak } 710a374e892STony Krowiak break; 711f2061656SDominik Dingel default: 712f2061656SDominik Dingel ret = -ENXIO; 713f2061656SDominik Dingel break; 714f2061656SDominik Dingel } 715f2061656SDominik Dingel 716f2061656SDominik Dingel return ret; 717f2061656SDominik Dingel } 718f2061656SDominik Dingel 719b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 720b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 721b0c632dbSHeiko Carstens { 722b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 723b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 724f2061656SDominik Dingel struct kvm_device_attr attr; 725b0c632dbSHeiko Carstens int r; 726b0c632dbSHeiko Carstens 727b0c632dbSHeiko Carstens switch (ioctl) { 728ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 729ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 730ba5c1e9bSCarsten Otte 731ba5c1e9bSCarsten Otte r = -EFAULT; 732ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 733ba5c1e9bSCarsten Otte break; 734ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 735ba5c1e9bSCarsten Otte break; 736ba5c1e9bSCarsten Otte } 737d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 738d938dc55SCornelia Huck struct kvm_enable_cap cap; 739d938dc55SCornelia Huck r = -EFAULT; 740d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 741d938dc55SCornelia Huck break; 742d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 743d938dc55SCornelia Huck break; 744d938dc55SCornelia Huck } 74584223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 74684223598SCornelia Huck struct kvm_irq_routing_entry routing; 74784223598SCornelia Huck 74884223598SCornelia Huck r = -EINVAL; 74984223598SCornelia Huck if (kvm->arch.use_irqchip) { 75084223598SCornelia Huck /* Set up dummy routing. */ 75184223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 75284223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 75384223598SCornelia Huck r = 0; 75484223598SCornelia Huck } 75584223598SCornelia Huck break; 75684223598SCornelia Huck } 757f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 758f2061656SDominik Dingel r = -EFAULT; 759f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 760f2061656SDominik Dingel break; 761f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 762f2061656SDominik Dingel break; 763f2061656SDominik Dingel } 764f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 765f2061656SDominik Dingel r = -EFAULT; 766f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 767f2061656SDominik Dingel break; 768f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 769f2061656SDominik Dingel break; 770f2061656SDominik Dingel } 771f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 772f2061656SDominik Dingel r = -EFAULT; 773f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 774f2061656SDominik Dingel break; 775f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 776f2061656SDominik Dingel break; 777f2061656SDominik Dingel } 778b0c632dbSHeiko Carstens default: 779367e1319SAvi Kivity r = -ENOTTY; 780b0c632dbSHeiko Carstens } 781b0c632dbSHeiko Carstens 782b0c632dbSHeiko Carstens return r; 783b0c632dbSHeiko Carstens } 784b0c632dbSHeiko Carstens 78545c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config) 78645c9b47cSTony Krowiak { 78745c9b47cSTony Krowiak u32 fcn_code = 0x04000000UL; 78886044c8cSChristian Borntraeger u32 cc = 0; 78945c9b47cSTony Krowiak 79086044c8cSChristian Borntraeger memset(config, 0, 128); 79145c9b47cSTony Krowiak asm volatile( 79245c9b47cSTony Krowiak "lgr 0,%1\n" 79345c9b47cSTony Krowiak "lgr 2,%2\n" 79445c9b47cSTony Krowiak ".long 0xb2af0000\n" /* PQAP(QCI) */ 79586044c8cSChristian Borntraeger "0: ipm %0\n" 79645c9b47cSTony Krowiak "srl %0,28\n" 79786044c8cSChristian Borntraeger "1:\n" 79886044c8cSChristian Borntraeger EX_TABLE(0b, 1b) 79986044c8cSChristian Borntraeger : "+r" (cc) 80045c9b47cSTony Krowiak : "r" (fcn_code), "r" (config) 80145c9b47cSTony Krowiak : "cc", "0", "2", "memory" 80245c9b47cSTony Krowiak ); 80345c9b47cSTony Krowiak 80445c9b47cSTony Krowiak return cc; 80545c9b47cSTony Krowiak } 80645c9b47cSTony Krowiak 80745c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void) 80845c9b47cSTony Krowiak { 80945c9b47cSTony Krowiak u8 config[128]; 81045c9b47cSTony Krowiak int cc; 81145c9b47cSTony Krowiak 81245c9b47cSTony Krowiak if (test_facility(2) && test_facility(12)) { 81345c9b47cSTony Krowiak cc = kvm_s390_query_ap_config(config); 81445c9b47cSTony Krowiak 81545c9b47cSTony Krowiak if (cc) 81645c9b47cSTony Krowiak pr_err("PQAP(QCI) failed with cc=%d", cc); 81745c9b47cSTony Krowiak else 81845c9b47cSTony Krowiak return config[0] & 0x40; 81945c9b47cSTony Krowiak } 82045c9b47cSTony Krowiak 82145c9b47cSTony Krowiak return 0; 82245c9b47cSTony Krowiak } 82345c9b47cSTony Krowiak 82445c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm) 82545c9b47cSTony Krowiak { 82645c9b47cSTony Krowiak kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 82745c9b47cSTony Krowiak 82845c9b47cSTony Krowiak if (kvm_s390_apxa_installed()) 82945c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 83045c9b47cSTony Krowiak else 83145c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 83245c9b47cSTony Krowiak } 83345c9b47cSTony Krowiak 8349d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) 8359d8d5786SMichael Mueller { 8369d8d5786SMichael Mueller get_cpu_id(cpu_id); 8379d8d5786SMichael Mueller cpu_id->version = 0xff; 8389d8d5786SMichael Mueller } 8399d8d5786SMichael Mueller 8405102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 8415102ee87STony Krowiak { 8429d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 8435102ee87STony Krowiak return 0; 8445102ee87STony Krowiak 8455102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 8465102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 8475102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 8485102ee87STony Krowiak return -ENOMEM; 8495102ee87STony Krowiak 85045c9b47cSTony Krowiak kvm_s390_set_crycb_format(kvm); 8515102ee87STony Krowiak 852ed6f76b4STony Krowiak /* Enable AES/DEA protected key functions by default */ 853ed6f76b4STony Krowiak kvm->arch.crypto.aes_kw = 1; 854ed6f76b4STony Krowiak kvm->arch.crypto.dea_kw = 1; 855ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 856ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 857ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 858ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 859a374e892STony Krowiak 8605102ee87STony Krowiak return 0; 8615102ee87STony Krowiak } 8625102ee87STony Krowiak 863e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 864b0c632dbSHeiko Carstens { 8659d8d5786SMichael Mueller int i, rc; 866b0c632dbSHeiko Carstens char debug_name[16]; 867f6c137ffSChristian Borntraeger static unsigned long sca_offset; 868b0c632dbSHeiko Carstens 869e08b9637SCarsten Otte rc = -EINVAL; 870e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 871e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 872e08b9637SCarsten Otte goto out_err; 873e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 874e08b9637SCarsten Otte goto out_err; 875e08b9637SCarsten Otte #else 876e08b9637SCarsten Otte if (type) 877e08b9637SCarsten Otte goto out_err; 878e08b9637SCarsten Otte #endif 879e08b9637SCarsten Otte 880b0c632dbSHeiko Carstens rc = s390_enable_sie(); 881b0c632dbSHeiko Carstens if (rc) 882d89f5effSJan Kiszka goto out_err; 883b0c632dbSHeiko Carstens 884b290411aSCarsten Otte rc = -ENOMEM; 885b290411aSCarsten Otte 886b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 887b0c632dbSHeiko Carstens if (!kvm->arch.sca) 888d89f5effSJan Kiszka goto out_err; 889f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 890f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 891f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 892f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 893b0c632dbSHeiko Carstens 894b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 895b0c632dbSHeiko Carstens 896b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 897b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 898b0c632dbSHeiko Carstens goto out_nodbf; 899b0c632dbSHeiko Carstens 9009d8d5786SMichael Mueller /* 9019d8d5786SMichael Mueller * The architectural maximum amount of facilities is 16 kbit. To store 9029d8d5786SMichael Mueller * this amount, 2 kbyte of memory is required. Thus we need a full 903981467c9SMichael Mueller * page to hold the guest facility list (arch.model.fac->list) and the 904981467c9SMichael Mueller * facility mask (arch.model.fac->mask). Its address size has to be 9059d8d5786SMichael Mueller * 31 bits and word aligned. 9069d8d5786SMichael Mueller */ 9079d8d5786SMichael Mueller kvm->arch.model.fac = 908981467c9SMichael Mueller (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 9099d8d5786SMichael Mueller if (!kvm->arch.model.fac) 9109d8d5786SMichael Mueller goto out_nofac; 9119d8d5786SMichael Mueller 912fb5bf93fSMichael Mueller /* Populate the facility mask initially. */ 913981467c9SMichael Mueller memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, 91494422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 9159d8d5786SMichael Mueller for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 9169d8d5786SMichael Mueller if (i < kvm_s390_fac_list_mask_size()) 917981467c9SMichael Mueller kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; 9189d8d5786SMichael Mueller else 919981467c9SMichael Mueller kvm->arch.model.fac->mask[i] = 0UL; 9209d8d5786SMichael Mueller } 9219d8d5786SMichael Mueller 922981467c9SMichael Mueller /* Populate the facility list initially. */ 923981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, 924981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 925981467c9SMichael Mueller 9269d8d5786SMichael Mueller kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 927658b6edaSMichael Mueller kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 9289d8d5786SMichael Mueller 9295102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 9305102ee87STony Krowiak goto out_crypto; 9315102ee87STony Krowiak 932ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 933ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 9348a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 935a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 936ba5c1e9bSCarsten Otte 937b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 938b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 939b0c632dbSHeiko Carstens 940e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 941e08b9637SCarsten Otte kvm->arch.gmap = NULL; 942e08b9637SCarsten Otte } else { 9430349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 944598841caSCarsten Otte if (!kvm->arch.gmap) 945598841caSCarsten Otte goto out_nogmap; 9462c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 94724eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 948e08b9637SCarsten Otte } 949fa6b7fe9SCornelia Huck 950fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 95184223598SCornelia Huck kvm->arch.use_irqchip = 0; 952*68c55750SEric Farman kvm->arch.use_vectors = 0; 95372f25020SJason J. Herne kvm->arch.epoch = 0; 954fa6b7fe9SCornelia Huck 9558ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 9568ad35755SDavid Hildenbrand 957d89f5effSJan Kiszka return 0; 958598841caSCarsten Otte out_nogmap: 9595102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 9605102ee87STony Krowiak out_crypto: 9619d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 9629d8d5786SMichael Mueller out_nofac: 963598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 964b0c632dbSHeiko Carstens out_nodbf: 965b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 966d89f5effSJan Kiszka out_err: 967d89f5effSJan Kiszka return rc; 968b0c632dbSHeiko Carstens } 969b0c632dbSHeiko Carstens 970d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 971d329c035SChristian Borntraeger { 972d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 973ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 97467335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 9753c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 97658f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 97758f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 97858f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 979abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 980abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 981abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 98258f9460bSCarsten Otte } 983abf4a71eSCarsten Otte smp_mb(); 98427e0393fSCarsten Otte 98527e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 98627e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 98727e0393fSCarsten Otte 988b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 989b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 990d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 991b31288faSKonstantin Weitz 9926692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 993b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 994d329c035SChristian Borntraeger } 995d329c035SChristian Borntraeger 996d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 997d329c035SChristian Borntraeger { 998d329c035SChristian Borntraeger unsigned int i; 999988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 1000d329c035SChristian Borntraeger 1001988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 1002988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 1003988a2caeSGleb Natapov 1004988a2caeSGleb Natapov mutex_lock(&kvm->lock); 1005988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 1006d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 1007988a2caeSGleb Natapov 1008988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 1009988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 1010d329c035SChristian Borntraeger } 1011d329c035SChristian Borntraeger 1012b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 1013b0c632dbSHeiko Carstens { 1014d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 10159d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 1016b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 1017d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 10185102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 101927e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 1020598841caSCarsten Otte gmap_free(kvm->arch.gmap); 1021841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 102267335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 1023b0c632dbSHeiko Carstens } 1024b0c632dbSHeiko Carstens 1025b0c632dbSHeiko Carstens /* Section: vcpu related */ 1026dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 1027b0c632dbSHeiko Carstens { 1028c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 102927e0393fSCarsten Otte if (!vcpu->arch.gmap) 103027e0393fSCarsten Otte return -ENOMEM; 10312c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 1032dafd032aSDominik Dingel 103327e0393fSCarsten Otte return 0; 103427e0393fSCarsten Otte } 103527e0393fSCarsten Otte 1036dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 1037dafd032aSDominik Dingel { 1038dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 1039dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 104059674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 104159674c1aSChristian Borntraeger KVM_SYNC_GPRS | 10429eed0735SChristian Borntraeger KVM_SYNC_ACRS | 1043b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 1044b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 1045b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 1046*68c55750SEric Farman if (test_kvm_facility(vcpu->kvm, 129)) 1047*68c55750SEric Farman vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; 1048dafd032aSDominik Dingel 1049dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 1050dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 1051dafd032aSDominik Dingel 1052b0c632dbSHeiko Carstens return 0; 1053b0c632dbSHeiko Carstens } 1054b0c632dbSHeiko Carstens 1055b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1056b0c632dbSHeiko Carstens { 10574725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1058*68c55750SEric Farman if (vcpu->kvm->arch.use_vectors) 1059*68c55750SEric Farman save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1060*68c55750SEric Farman else 10614725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 1062b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 1063*68c55750SEric Farman if (vcpu->kvm->arch.use_vectors) { 1064*68c55750SEric Farman restore_fp_ctl(&vcpu->run->s.regs.fpc); 1065*68c55750SEric Farman restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1066*68c55750SEric Farman } else { 10674725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10684725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1069*68c55750SEric Farman } 107059674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1071480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 10729e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1073b0c632dbSHeiko Carstens } 1074b0c632dbSHeiko Carstens 1075b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1076b0c632dbSHeiko Carstens { 10779e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1078480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 1079*68c55750SEric Farman if (vcpu->kvm->arch.use_vectors) { 1080*68c55750SEric Farman save_fp_ctl(&vcpu->run->s.regs.fpc); 1081*68c55750SEric Farman save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1082*68c55750SEric Farman } else { 10834725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10844725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1085*68c55750SEric Farman } 108659674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 10874725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1088*68c55750SEric Farman if (vcpu->kvm->arch.use_vectors) 1089*68c55750SEric Farman restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1090*68c55750SEric Farman else 10914725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 1092b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 1093b0c632dbSHeiko Carstens } 1094b0c632dbSHeiko Carstens 1095b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 1096b0c632dbSHeiko Carstens { 1097b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 1098b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 1099b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 11008d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 1101b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 1102b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 1103b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 1104b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 1105b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 1106b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 1107b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 1108b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 1109b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 1110672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 11113c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 11123c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 11136352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 11146852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 11152ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 1116b0c632dbSHeiko Carstens } 1117b0c632dbSHeiko Carstens 111831928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 111942897d86SMarcelo Tosatti { 112072f25020SJason J. Herne mutex_lock(&vcpu->kvm->lock); 112172f25020SJason J. Herne vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 112272f25020SJason J. Herne mutex_unlock(&vcpu->kvm->lock); 1123dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 1124dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 112542897d86SMarcelo Tosatti } 112642897d86SMarcelo Tosatti 11275102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 11285102ee87STony Krowiak { 11299d8d5786SMichael Mueller if (!test_kvm_facility(vcpu->kvm, 76)) 11305102ee87STony Krowiak return; 11315102ee87STony Krowiak 1132a374e892STony Krowiak vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 1133a374e892STony Krowiak 1134a374e892STony Krowiak if (vcpu->kvm->arch.crypto.aes_kw) 1135a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_AES; 1136a374e892STony Krowiak if (vcpu->kvm->arch.crypto.dea_kw) 1137a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 1138a374e892STony Krowiak 11395102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 11405102ee87STony Krowiak } 11415102ee87STony Krowiak 1142b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 1143b31605c1SDominik Dingel { 1144b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 1145b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 1146b31605c1SDominik Dingel } 1147b31605c1SDominik Dingel 1148b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 1149b31605c1SDominik Dingel { 1150b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 1151b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 1152b31605c1SDominik Dingel return -ENOMEM; 1153b31605c1SDominik Dingel 1154b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 1155b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 1156b31605c1SDominik Dingel return 0; 1157b31605c1SDominik Dingel } 1158b31605c1SDominik Dingel 115991520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) 116091520f1aSMichael Mueller { 116191520f1aSMichael Mueller struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; 116291520f1aSMichael Mueller 116391520f1aSMichael Mueller vcpu->arch.cpu_id = model->cpu_id; 116491520f1aSMichael Mueller vcpu->arch.sie_block->ibc = model->ibc; 116591520f1aSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) model->fac->list; 116691520f1aSMichael Mueller } 116791520f1aSMichael Mueller 1168b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1169b0c632dbSHeiko Carstens { 1170b31605c1SDominik Dingel int rc = 0; 1171b31288faSKonstantin Weitz 11729e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 11739e6dabefSCornelia Huck CPUSTAT_SM | 117469d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 117569d0d3a3SChristian Borntraeger CPUSTAT_GED); 117691520f1aSMichael Mueller kvm_s390_vcpu_setup_model(vcpu); 117791520f1aSMichael Mueller 1178fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 11799d8d5786SMichael Mueller if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) 11807feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 11817feb6bb8SMichael Mueller 118269d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 1183ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 1184217a4406SHeiko Carstens if (sclp_has_siif()) 1185217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 1186ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 1187ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 1188492d8642SThomas Huth vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 11895a5e6536SMatthew Rosato 1190b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1191b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 1192b31605c1SDominik Dingel if (rc) 1193b31605c1SDominik Dingel return rc; 1194b31288faSKonstantin Weitz } 11950ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1196ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 11979d8d5786SMichael Mueller 11985102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 11995102ee87STony Krowiak 1200b31605c1SDominik Dingel return rc; 1201b0c632dbSHeiko Carstens } 1202b0c632dbSHeiko Carstens 1203b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 1204b0c632dbSHeiko Carstens unsigned int id) 1205b0c632dbSHeiko Carstens { 12064d47555aSCarsten Otte struct kvm_vcpu *vcpu; 12077feb6bb8SMichael Mueller struct sie_page *sie_page; 12084d47555aSCarsten Otte int rc = -EINVAL; 1209b0c632dbSHeiko Carstens 12104d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 12114d47555aSCarsten Otte goto out; 12124d47555aSCarsten Otte 12134d47555aSCarsten Otte rc = -ENOMEM; 12144d47555aSCarsten Otte 1215b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1216b0c632dbSHeiko Carstens if (!vcpu) 12174d47555aSCarsten Otte goto out; 1218b0c632dbSHeiko Carstens 12197feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 12207feb6bb8SMichael Mueller if (!sie_page) 1221b0c632dbSHeiko Carstens goto out_free_cpu; 1222b0c632dbSHeiko Carstens 12237feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 12247feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 1225*68c55750SEric Farman vcpu->arch.host_vregs = &sie_page->vregs; 12267feb6bb8SMichael Mueller 1227b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 122858f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 122958f9460bSCarsten Otte if (!kvm->arch.sca) { 123058f9460bSCarsten Otte WARN_ON_ONCE(1); 123158f9460bSCarsten Otte goto out_free_cpu; 123258f9460bSCarsten Otte } 1233abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 123458f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 123558f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 123658f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 123758f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 1238b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1239fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 124058f9460bSCarsten Otte } 1241b0c632dbSHeiko Carstens 1242ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 1243ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1244d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 12455288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 1246ba5c1e9bSCarsten Otte 1247b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 1248b0c632dbSHeiko Carstens if (rc) 12497b06bf2fSWei Yongjun goto out_free_sie_block; 1250b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 1251b0c632dbSHeiko Carstens vcpu->arch.sie_block); 1252ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 1253b0c632dbSHeiko Carstens 1254b0c632dbSHeiko Carstens return vcpu; 12557b06bf2fSWei Yongjun out_free_sie_block: 12567b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 1257b0c632dbSHeiko Carstens out_free_cpu: 1258b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 12594d47555aSCarsten Otte out: 1260b0c632dbSHeiko Carstens return ERR_PTR(rc); 1261b0c632dbSHeiko Carstens } 1262b0c632dbSHeiko Carstens 1263b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1264b0c632dbSHeiko Carstens { 12659a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 1266b0c632dbSHeiko Carstens } 1267b0c632dbSHeiko Carstens 126849b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 126949b99e1eSChristian Borntraeger { 127049b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 127149b99e1eSChristian Borntraeger } 127249b99e1eSChristian Borntraeger 127349b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 127449b99e1eSChristian Borntraeger { 127549b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 127649b99e1eSChristian Borntraeger } 127749b99e1eSChristian Borntraeger 127849b99e1eSChristian Borntraeger /* 127949b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 128049b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 128149b99e1eSChristian Borntraeger * return immediately. */ 128249b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 128349b99e1eSChristian Borntraeger { 128449b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 128549b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 128649b99e1eSChristian Borntraeger cpu_relax(); 128749b99e1eSChristian Borntraeger } 128849b99e1eSChristian Borntraeger 128949b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 129049b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 129149b99e1eSChristian Borntraeger { 129249b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 129349b99e1eSChristian Borntraeger exit_sie(vcpu); 129449b99e1eSChristian Borntraeger } 129549b99e1eSChristian Borntraeger 12962c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 12972c70fe44SChristian Borntraeger { 12982c70fe44SChristian Borntraeger int i; 12992c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 13002c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 13012c70fe44SChristian Borntraeger 13022c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 13032c70fe44SChristian Borntraeger /* match against both prefix pages */ 1304fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 13052c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 13062c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 13072c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 13082c70fe44SChristian Borntraeger } 13092c70fe44SChristian Borntraeger } 13102c70fe44SChristian Borntraeger } 13112c70fe44SChristian Borntraeger 1312b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 1313b6d33834SChristoffer Dall { 1314b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 1315b6d33834SChristoffer Dall BUG(); 1316b6d33834SChristoffer Dall return 0; 1317b6d33834SChristoffer Dall } 1318b6d33834SChristoffer Dall 131914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 132014eebd91SCarsten Otte struct kvm_one_reg *reg) 132114eebd91SCarsten Otte { 132214eebd91SCarsten Otte int r = -EINVAL; 132314eebd91SCarsten Otte 132414eebd91SCarsten Otte switch (reg->id) { 132529b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 132629b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 132729b7c71bSCarsten Otte (u32 __user *)reg->addr); 132829b7c71bSCarsten Otte break; 132929b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 133029b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 133129b7c71bSCarsten Otte (u64 __user *)reg->addr); 133229b7c71bSCarsten Otte break; 133346a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 133446a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 133546a6dd1cSJason J. herne (u64 __user *)reg->addr); 133646a6dd1cSJason J. herne break; 133746a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 133846a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 133946a6dd1cSJason J. herne (u64 __user *)reg->addr); 134046a6dd1cSJason J. herne break; 1341536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1342536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 1343536336c2SDominik Dingel (u64 __user *)reg->addr); 1344536336c2SDominik Dingel break; 1345536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1346536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 1347536336c2SDominik Dingel (u64 __user *)reg->addr); 1348536336c2SDominik Dingel break; 1349536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1350536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 1351536336c2SDominik Dingel (u64 __user *)reg->addr); 1352536336c2SDominik Dingel break; 1353672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1354672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 1355672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1356672550fbSChristian Borntraeger break; 1357afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1358afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 1359afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1360afa45ff5SChristian Borntraeger break; 136114eebd91SCarsten Otte default: 136214eebd91SCarsten Otte break; 136314eebd91SCarsten Otte } 136414eebd91SCarsten Otte 136514eebd91SCarsten Otte return r; 136614eebd91SCarsten Otte } 136714eebd91SCarsten Otte 136814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 136914eebd91SCarsten Otte struct kvm_one_reg *reg) 137014eebd91SCarsten Otte { 137114eebd91SCarsten Otte int r = -EINVAL; 137214eebd91SCarsten Otte 137314eebd91SCarsten Otte switch (reg->id) { 137429b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 137529b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 137629b7c71bSCarsten Otte (u32 __user *)reg->addr); 137729b7c71bSCarsten Otte break; 137829b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 137929b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 138029b7c71bSCarsten Otte (u64 __user *)reg->addr); 138129b7c71bSCarsten Otte break; 138246a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 138346a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 138446a6dd1cSJason J. herne (u64 __user *)reg->addr); 138546a6dd1cSJason J. herne break; 138646a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 138746a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 138846a6dd1cSJason J. herne (u64 __user *)reg->addr); 138946a6dd1cSJason J. herne break; 1390536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1391536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 1392536336c2SDominik Dingel (u64 __user *)reg->addr); 13939fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 13949fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1395536336c2SDominik Dingel break; 1396536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1397536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 1398536336c2SDominik Dingel (u64 __user *)reg->addr); 1399536336c2SDominik Dingel break; 1400536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1401536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 1402536336c2SDominik Dingel (u64 __user *)reg->addr); 1403536336c2SDominik Dingel break; 1404672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1405672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 1406672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1407672550fbSChristian Borntraeger break; 1408afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1409afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 1410afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1411afa45ff5SChristian Borntraeger break; 141214eebd91SCarsten Otte default: 141314eebd91SCarsten Otte break; 141414eebd91SCarsten Otte } 141514eebd91SCarsten Otte 141614eebd91SCarsten Otte return r; 141714eebd91SCarsten Otte } 1418b6d33834SChristoffer Dall 1419b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 1420b0c632dbSHeiko Carstens { 1421b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 1422b0c632dbSHeiko Carstens return 0; 1423b0c632dbSHeiko Carstens } 1424b0c632dbSHeiko Carstens 1425b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1426b0c632dbSHeiko Carstens { 14275a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 1428b0c632dbSHeiko Carstens return 0; 1429b0c632dbSHeiko Carstens } 1430b0c632dbSHeiko Carstens 1431b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1432b0c632dbSHeiko Carstens { 14335a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 1434b0c632dbSHeiko Carstens return 0; 1435b0c632dbSHeiko Carstens } 1436b0c632dbSHeiko Carstens 1437b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1438b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1439b0c632dbSHeiko Carstens { 144059674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 1441b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 144259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1443b0c632dbSHeiko Carstens return 0; 1444b0c632dbSHeiko Carstens } 1445b0c632dbSHeiko Carstens 1446b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1447b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1448b0c632dbSHeiko Carstens { 144959674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 1450b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 1451b0c632dbSHeiko Carstens return 0; 1452b0c632dbSHeiko Carstens } 1453b0c632dbSHeiko Carstens 1454b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1455b0c632dbSHeiko Carstens { 14564725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 14574725c860SMartin Schwidefsky return -EINVAL; 1458b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 14594725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 14604725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 14614725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1462b0c632dbSHeiko Carstens return 0; 1463b0c632dbSHeiko Carstens } 1464b0c632dbSHeiko Carstens 1465b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1466b0c632dbSHeiko Carstens { 1467b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1468b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1469b0c632dbSHeiko Carstens return 0; 1470b0c632dbSHeiko Carstens } 1471b0c632dbSHeiko Carstens 1472b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1473b0c632dbSHeiko Carstens { 1474b0c632dbSHeiko Carstens int rc = 0; 1475b0c632dbSHeiko Carstens 14767a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1477b0c632dbSHeiko Carstens rc = -EBUSY; 1478d7b0b5ebSCarsten Otte else { 1479d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1480d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1481d7b0b5ebSCarsten Otte } 1482b0c632dbSHeiko Carstens return rc; 1483b0c632dbSHeiko Carstens } 1484b0c632dbSHeiko Carstens 1485b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1486b0c632dbSHeiko Carstens struct kvm_translation *tr) 1487b0c632dbSHeiko Carstens { 1488b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1489b0c632dbSHeiko Carstens } 1490b0c632dbSHeiko Carstens 149127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 149227291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 149327291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 149427291e21SDavid Hildenbrand 1495d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1496d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1497b0c632dbSHeiko Carstens { 149827291e21SDavid Hildenbrand int rc = 0; 149927291e21SDavid Hildenbrand 150027291e21SDavid Hildenbrand vcpu->guest_debug = 0; 150127291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 150227291e21SDavid Hildenbrand 15032de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 150427291e21SDavid Hildenbrand return -EINVAL; 150527291e21SDavid Hildenbrand 150627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 150727291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 150827291e21SDavid Hildenbrand /* enforce guest PER */ 150927291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 151027291e21SDavid Hildenbrand 151127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 151227291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 151327291e21SDavid Hildenbrand } else { 151427291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 151527291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 151627291e21SDavid Hildenbrand } 151727291e21SDavid Hildenbrand 151827291e21SDavid Hildenbrand if (rc) { 151927291e21SDavid Hildenbrand vcpu->guest_debug = 0; 152027291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 152127291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 152227291e21SDavid Hildenbrand } 152327291e21SDavid Hildenbrand 152427291e21SDavid Hildenbrand return rc; 1525b0c632dbSHeiko Carstens } 1526b0c632dbSHeiko Carstens 152762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 152862d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 152962d9f0dbSMarcelo Tosatti { 15306352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 15316352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 15326352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 153362d9f0dbSMarcelo Tosatti } 153462d9f0dbSMarcelo Tosatti 153562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 153662d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 153762d9f0dbSMarcelo Tosatti { 15386352e4d2SDavid Hildenbrand int rc = 0; 15396352e4d2SDavid Hildenbrand 15406352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 15416352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 15426352e4d2SDavid Hildenbrand 15436352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 15446352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 15456352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 15466352e4d2SDavid Hildenbrand break; 15476352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 15486352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 15496352e4d2SDavid Hildenbrand break; 15506352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 15516352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 15526352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 15536352e4d2SDavid Hildenbrand default: 15546352e4d2SDavid Hildenbrand rc = -ENXIO; 15556352e4d2SDavid Hildenbrand } 15566352e4d2SDavid Hildenbrand 15576352e4d2SDavid Hildenbrand return rc; 155862d9f0dbSMarcelo Tosatti } 155962d9f0dbSMarcelo Tosatti 1560b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1561b31605c1SDominik Dingel { 1562b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1563b31605c1SDominik Dingel return false; 1564b31605c1SDominik Dingel /* only enable for z10 and later */ 1565b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1566b31605c1SDominik Dingel return false; 1567b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1568b31605c1SDominik Dingel return false; 1569b31605c1SDominik Dingel return true; 1570b31605c1SDominik Dingel } 1571b31605c1SDominik Dingel 15728ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 15738ad35755SDavid Hildenbrand { 15748ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 15758ad35755SDavid Hildenbrand } 15768ad35755SDavid Hildenbrand 15772c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 15782c70fe44SChristian Borntraeger { 15798ad35755SDavid Hildenbrand retry: 15808ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 15812c70fe44SChristian Borntraeger /* 15822c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 15832c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 15842c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 15852c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 15862c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 15872c70fe44SChristian Borntraeger */ 15888ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 15892c70fe44SChristian Borntraeger int rc; 15902c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1591fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 15922c70fe44SChristian Borntraeger PAGE_SIZE * 2); 15932c70fe44SChristian Borntraeger if (rc) 15942c70fe44SChristian Borntraeger return rc; 15958ad35755SDavid Hildenbrand goto retry; 15962c70fe44SChristian Borntraeger } 15978ad35755SDavid Hildenbrand 1598d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1599d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1600d3d692c8SDavid Hildenbrand goto retry; 1601d3d692c8SDavid Hildenbrand } 1602d3d692c8SDavid Hildenbrand 16038ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 16048ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 16058ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 16068ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 16078ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 16088ad35755SDavid Hildenbrand } 16098ad35755SDavid Hildenbrand goto retry; 16108ad35755SDavid Hildenbrand } 16118ad35755SDavid Hildenbrand 16128ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 16138ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 16148ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 16158ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 16168ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 16178ad35755SDavid Hildenbrand } 16188ad35755SDavid Hildenbrand goto retry; 16198ad35755SDavid Hildenbrand } 16208ad35755SDavid Hildenbrand 16210759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 16220759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 16230759d068SDavid Hildenbrand 16242c70fe44SChristian Borntraeger return 0; 16252c70fe44SChristian Borntraeger } 16262c70fe44SChristian Borntraeger 1627fa576c58SThomas Huth /** 1628fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1629fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1630fa576c58SThomas Huth * @gpa: Guest physical address 1631fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1632fa576c58SThomas Huth * 1633fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1634fa576c58SThomas Huth * 1635fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1636fa576c58SThomas Huth */ 1637fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 163824eb3a82SDominik Dingel { 1639527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1640527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 164124eb3a82SDominik Dingel } 164224eb3a82SDominik Dingel 16433c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 16443c038e6bSDominik Dingel unsigned long token) 16453c038e6bSDominik Dingel { 16463c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1647383d0b05SJens Freimann struct kvm_s390_irq irq; 16483c038e6bSDominik Dingel 16493c038e6bSDominik Dingel if (start_token) { 1650383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1651383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1652383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 16533c038e6bSDominik Dingel } else { 16543c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1655383d0b05SJens Freimann inti.parm64 = token; 16563c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 16573c038e6bSDominik Dingel } 16583c038e6bSDominik Dingel } 16593c038e6bSDominik Dingel 16603c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 16613c038e6bSDominik Dingel struct kvm_async_pf *work) 16623c038e6bSDominik Dingel { 16633c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 16643c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 16653c038e6bSDominik Dingel } 16663c038e6bSDominik Dingel 16673c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 16683c038e6bSDominik Dingel struct kvm_async_pf *work) 16693c038e6bSDominik Dingel { 16703c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 16713c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 16723c038e6bSDominik Dingel } 16733c038e6bSDominik Dingel 16743c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 16753c038e6bSDominik Dingel struct kvm_async_pf *work) 16763c038e6bSDominik Dingel { 16773c038e6bSDominik Dingel /* s390 will always inject the page directly */ 16783c038e6bSDominik Dingel } 16793c038e6bSDominik Dingel 16803c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 16813c038e6bSDominik Dingel { 16823c038e6bSDominik Dingel /* 16833c038e6bSDominik Dingel * s390 will always inject the page directly, 16843c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 16853c038e6bSDominik Dingel */ 16863c038e6bSDominik Dingel return true; 16873c038e6bSDominik Dingel } 16883c038e6bSDominik Dingel 16893c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 16903c038e6bSDominik Dingel { 16913c038e6bSDominik Dingel hva_t hva; 16923c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 16933c038e6bSDominik Dingel int rc; 16943c038e6bSDominik Dingel 16953c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 16963c038e6bSDominik Dingel return 0; 16973c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 16983c038e6bSDominik Dingel vcpu->arch.pfault_compare) 16993c038e6bSDominik Dingel return 0; 17003c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 17013c038e6bSDominik Dingel return 0; 17029a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 17033c038e6bSDominik Dingel return 0; 17043c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 17053c038e6bSDominik Dingel return 0; 17063c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 17073c038e6bSDominik Dingel return 0; 17083c038e6bSDominik Dingel 170981480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 171081480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 171181480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 17123c038e6bSDominik Dingel return 0; 17133c038e6bSDominik Dingel 17143c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 17153c038e6bSDominik Dingel return rc; 17163c038e6bSDominik Dingel } 17173c038e6bSDominik Dingel 17183fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1719b0c632dbSHeiko Carstens { 17203fb4c40fSThomas Huth int rc, cpuflags; 1721e168bf8dSCarsten Otte 17223c038e6bSDominik Dingel /* 17233c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 17243c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 17253c038e6bSDominik Dingel * handled outside the worker. 17263c038e6bSDominik Dingel */ 17273c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 17283c038e6bSDominik Dingel 17295a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1730b0c632dbSHeiko Carstens 1731b0c632dbSHeiko Carstens if (need_resched()) 1732b0c632dbSHeiko Carstens schedule(); 1733b0c632dbSHeiko Carstens 1734d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 173571cde587SChristian Borntraeger s390_handle_mcck(); 173671cde587SChristian Borntraeger 173779395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 173879395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 173979395031SJens Freimann if (rc) 174079395031SJens Freimann return rc; 174179395031SJens Freimann } 17420ff31867SCarsten Otte 17432c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 17442c70fe44SChristian Borntraeger if (rc) 17452c70fe44SChristian Borntraeger return rc; 17462c70fe44SChristian Borntraeger 174727291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 174827291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 174927291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 175027291e21SDavid Hildenbrand } 175127291e21SDavid Hildenbrand 1752b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 17533fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 17543fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 17553fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 17562b29a9fdSDominik Dingel 17573fb4c40fSThomas Huth return 0; 17583fb4c40fSThomas Huth } 17593fb4c40fSThomas Huth 1760492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) 1761492d8642SThomas Huth { 1762492d8642SThomas Huth psw_t *psw = &vcpu->arch.sie_block->gpsw; 1763492d8642SThomas Huth u8 opcode; 1764492d8642SThomas Huth int rc; 1765492d8642SThomas Huth 1766492d8642SThomas Huth VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1767492d8642SThomas Huth trace_kvm_s390_sie_fault(vcpu); 1768492d8642SThomas Huth 1769492d8642SThomas Huth /* 1770492d8642SThomas Huth * We want to inject an addressing exception, which is defined as a 1771492d8642SThomas Huth * suppressing or terminating exception. However, since we came here 1772492d8642SThomas Huth * by a DAT access exception, the PSW still points to the faulting 1773492d8642SThomas Huth * instruction since DAT exceptions are nullifying. So we've got 1774492d8642SThomas Huth * to look up the current opcode to get the length of the instruction 1775492d8642SThomas Huth * to be able to forward the PSW. 1776492d8642SThomas Huth */ 1777492d8642SThomas Huth rc = read_guest(vcpu, psw->addr, &opcode, 1); 1778492d8642SThomas Huth if (rc) 1779492d8642SThomas Huth return kvm_s390_inject_prog_cond(vcpu, rc); 1780492d8642SThomas Huth psw->addr = __rewind_psw(*psw, -insn_length(opcode)); 1781492d8642SThomas Huth 1782492d8642SThomas Huth return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1783492d8642SThomas Huth } 1784492d8642SThomas Huth 17853fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 17863fb4c40fSThomas Huth { 178724eb3a82SDominik Dingel int rc = -1; 17882b29a9fdSDominik Dingel 17892b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 17902b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 17912b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 17922b29a9fdSDominik Dingel 179327291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 179427291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 179527291e21SDavid Hildenbrand 17963fb4c40fSThomas Huth if (exit_reason >= 0) { 17977c470539SMartin Schwidefsky rc = 0; 1798210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1799210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1800210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1801210b1607SThomas Huth current->thread.gmap_addr; 1802210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1803210b1607SThomas Huth rc = -EREMOTE; 180424eb3a82SDominik Dingel 180524eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 18063c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 180724eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1808fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 180924eb3a82SDominik Dingel rc = 0; 1810fa576c58SThomas Huth } else { 1811fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1812fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1813fa576c58SThomas Huth } 181424eb3a82SDominik Dingel } 181524eb3a82SDominik Dingel 1816492d8642SThomas Huth if (rc == -1) 1817492d8642SThomas Huth rc = vcpu_post_run_fault_in_sie(vcpu); 1818b0c632dbSHeiko Carstens 18195a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 18203fb4c40fSThomas Huth 1821a76ccff6SThomas Huth if (rc == 0) { 1822a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 18232955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 18242955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1825a76ccff6SThomas Huth else 1826a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1827a76ccff6SThomas Huth } 1828a76ccff6SThomas Huth 18293fb4c40fSThomas Huth return rc; 18303fb4c40fSThomas Huth } 18313fb4c40fSThomas Huth 18323fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 18333fb4c40fSThomas Huth { 18343fb4c40fSThomas Huth int rc, exit_reason; 18353fb4c40fSThomas Huth 1836800c1065SThomas Huth /* 1837800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1838800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1839800c1065SThomas Huth */ 1840800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1841800c1065SThomas Huth 1842a76ccff6SThomas Huth do { 18433fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 18443fb4c40fSThomas Huth if (rc) 1845a76ccff6SThomas Huth break; 18463fb4c40fSThomas Huth 1847800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 18483fb4c40fSThomas Huth /* 1849a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1850a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 18513fb4c40fSThomas Huth */ 18523fb4c40fSThomas Huth preempt_disable(); 18533fb4c40fSThomas Huth kvm_guest_enter(); 18543fb4c40fSThomas Huth preempt_enable(); 1855a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1856a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 18573fb4c40fSThomas Huth kvm_guest_exit(); 1858800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 18593fb4c40fSThomas Huth 18603fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 186127291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 18623fb4c40fSThomas Huth 1863800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1864e168bf8dSCarsten Otte return rc; 1865b0c632dbSHeiko Carstens } 1866b0c632dbSHeiko Carstens 1867b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1868b028ee3eSDavid Hildenbrand { 1869b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1870b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1871b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1872b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1873b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1874b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1875d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1876d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1877b028ee3eSDavid Hildenbrand } 1878b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1879b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1880b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1881b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1882b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1883b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1884b028ee3eSDavid Hildenbrand } 1885b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1886b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1887b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1888b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 18899fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 18909fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1891b028ee3eSDavid Hildenbrand } 1892b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1893b028ee3eSDavid Hildenbrand } 1894b028ee3eSDavid Hildenbrand 1895b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1896b028ee3eSDavid Hildenbrand { 1897b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1898b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1899b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1900b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1901b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1902b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1903b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1904b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1905b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1906b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1907b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1908b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1909b028ee3eSDavid Hildenbrand } 1910b028ee3eSDavid Hildenbrand 1911b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1912b0c632dbSHeiko Carstens { 19138f2abe6aSChristian Borntraeger int rc; 1914b0c632dbSHeiko Carstens sigset_t sigsaved; 1915b0c632dbSHeiko Carstens 191627291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 191727291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 191827291e21SDavid Hildenbrand return 0; 191927291e21SDavid Hildenbrand } 192027291e21SDavid Hildenbrand 1921b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1922b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1923b0c632dbSHeiko Carstens 19246352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 19256852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 19266352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 19276352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 19286352e4d2SDavid Hildenbrand vcpu->vcpu_id); 19296352e4d2SDavid Hildenbrand return -EINVAL; 19306352e4d2SDavid Hildenbrand } 1931b0c632dbSHeiko Carstens 1932b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1933d7b0b5ebSCarsten Otte 1934dab4079dSHeiko Carstens might_fault(); 1935e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 19369ace903dSChristian Ehrhardt 1937b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1938b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 19398f2abe6aSChristian Borntraeger rc = -EINTR; 1940b1d16c49SChristian Ehrhardt } 19418f2abe6aSChristian Borntraeger 194227291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 194327291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 194427291e21SDavid Hildenbrand rc = 0; 194527291e21SDavid Hildenbrand } 194627291e21SDavid Hildenbrand 1947b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 19488f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 19498f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 19508f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 19518f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 19528f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 19538f2abe6aSChristian Borntraeger rc = 0; 19548f2abe6aSChristian Borntraeger } 19558f2abe6aSChristian Borntraeger 19568f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 19578f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 19588f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 19598f2abe6aSChristian Borntraeger rc = 0; 19608f2abe6aSChristian Borntraeger } 19618f2abe6aSChristian Borntraeger 1962b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1963d7b0b5ebSCarsten Otte 1964b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1965b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1966b0c632dbSHeiko Carstens 1967b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 19687e8e6ab4SHeiko Carstens return rc; 1969b0c632dbSHeiko Carstens } 1970b0c632dbSHeiko Carstens 1971b0c632dbSHeiko Carstens /* 1972b0c632dbSHeiko Carstens * store status at address 1973b0c632dbSHeiko Carstens * we use have two special cases: 1974b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1975b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1976b0c632dbSHeiko Carstens */ 1977d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1978b0c632dbSHeiko Carstens { 1979092670cdSCarsten Otte unsigned char archmode = 1; 1980fda902cbSMichael Mueller unsigned int px; 1981178bd789SThomas Huth u64 clkcomp; 1982d0bce605SHeiko Carstens int rc; 1983b0c632dbSHeiko Carstens 1984d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1985d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1986b0c632dbSHeiko Carstens return -EFAULT; 1987d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1988d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1989d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1990b0c632dbSHeiko Carstens return -EFAULT; 1991d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1992d0bce605SHeiko Carstens } 1993d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1994d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1995d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1996d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1997d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1998d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1999fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 2000d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 2001fda902cbSMichael Mueller &px, 4); 2002d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 2003d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 2004d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 2005d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 2006d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 2007d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 2008d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 2009178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 2010d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 2011d0bce605SHeiko Carstens &clkcomp, 8); 2012d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 2013d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 2014d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 2015d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 2016d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 2017b0c632dbSHeiko Carstens } 2018b0c632dbSHeiko Carstens 2019e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 2020e879892cSThomas Huth { 2021e879892cSThomas Huth /* 2022e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 2023e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 2024e879892cSThomas Huth * it into the save area 2025e879892cSThomas Huth */ 2026e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 2027e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 2028e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 2029e879892cSThomas Huth 2030e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 2031e879892cSThomas Huth } 2032e879892cSThomas Huth 20338ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 20348ad35755SDavid Hildenbrand { 20358ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 20368ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 20378ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 20388ad35755SDavid Hildenbrand } 20398ad35755SDavid Hildenbrand 20408ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 20418ad35755SDavid Hildenbrand { 20428ad35755SDavid Hildenbrand unsigned int i; 20438ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 20448ad35755SDavid Hildenbrand 20458ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 20468ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 20478ad35755SDavid Hildenbrand } 20488ad35755SDavid Hildenbrand } 20498ad35755SDavid Hildenbrand 20508ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 20518ad35755SDavid Hildenbrand { 20528ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 20538ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 20548ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 20558ad35755SDavid Hildenbrand } 20568ad35755SDavid Hildenbrand 20576852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 20586852d7b6SDavid Hildenbrand { 20598ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 20608ad35755SDavid Hildenbrand 20618ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 20628ad35755SDavid Hildenbrand return; 20638ad35755SDavid Hildenbrand 20646852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 20658ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2066433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 20678ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 20688ad35755SDavid Hildenbrand 20698ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 20708ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 20718ad35755SDavid Hildenbrand started_vcpus++; 20728ad35755SDavid Hildenbrand } 20738ad35755SDavid Hildenbrand 20748ad35755SDavid Hildenbrand if (started_vcpus == 0) { 20758ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 20768ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 20778ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 20788ad35755SDavid Hildenbrand /* 20798ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 20808ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 20818ad35755SDavid Hildenbrand * oustanding ENABLE requests. 20828ad35755SDavid Hildenbrand */ 20838ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 20848ad35755SDavid Hildenbrand } 20858ad35755SDavid Hildenbrand 20866852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 20878ad35755SDavid Hildenbrand /* 20888ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 20898ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 20908ad35755SDavid Hildenbrand */ 2091d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2092433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 20938ad35755SDavid Hildenbrand return; 20946852d7b6SDavid Hildenbrand } 20956852d7b6SDavid Hildenbrand 20966852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 20976852d7b6SDavid Hildenbrand { 20988ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 20998ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 21008ad35755SDavid Hildenbrand 21018ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 21028ad35755SDavid Hildenbrand return; 21038ad35755SDavid Hildenbrand 21046852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 21058ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2106433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 21078ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 21088ad35755SDavid Hildenbrand 210932f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 21106cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 211132f5ff63SDavid Hildenbrand 21126cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 21138ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 21148ad35755SDavid Hildenbrand 21158ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 21168ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 21178ad35755SDavid Hildenbrand started_vcpus++; 21188ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 21198ad35755SDavid Hildenbrand } 21208ad35755SDavid Hildenbrand } 21218ad35755SDavid Hildenbrand 21228ad35755SDavid Hildenbrand if (started_vcpus == 1) { 21238ad35755SDavid Hildenbrand /* 21248ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 21258ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 21268ad35755SDavid Hildenbrand */ 21278ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 21288ad35755SDavid Hildenbrand } 21298ad35755SDavid Hildenbrand 2130433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 21318ad35755SDavid Hildenbrand return; 21326852d7b6SDavid Hildenbrand } 21336852d7b6SDavid Hildenbrand 2134d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 2135d6712df9SCornelia Huck struct kvm_enable_cap *cap) 2136d6712df9SCornelia Huck { 2137d6712df9SCornelia Huck int r; 2138d6712df9SCornelia Huck 2139d6712df9SCornelia Huck if (cap->flags) 2140d6712df9SCornelia Huck return -EINVAL; 2141d6712df9SCornelia Huck 2142d6712df9SCornelia Huck switch (cap->cap) { 2143fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 2144fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 2145fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 2146fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 2147fa6b7fe9SCornelia Huck } 2148fa6b7fe9SCornelia Huck r = 0; 2149fa6b7fe9SCornelia Huck break; 2150d6712df9SCornelia Huck default: 2151d6712df9SCornelia Huck r = -EINVAL; 2152d6712df9SCornelia Huck break; 2153d6712df9SCornelia Huck } 2154d6712df9SCornelia Huck return r; 2155d6712df9SCornelia Huck } 2156d6712df9SCornelia Huck 2157b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 2158b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 2159b0c632dbSHeiko Carstens { 2160b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 2161b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 2162800c1065SThomas Huth int idx; 2163bc923cc9SAvi Kivity long r; 2164b0c632dbSHeiko Carstens 216593736624SAvi Kivity switch (ioctl) { 216693736624SAvi Kivity case KVM_S390_INTERRUPT: { 2167ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 2168383d0b05SJens Freimann struct kvm_s390_irq s390irq; 2169ba5c1e9bSCarsten Otte 217093736624SAvi Kivity r = -EFAULT; 2171ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 217293736624SAvi Kivity break; 2173383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 2174383d0b05SJens Freimann return -EINVAL; 2175383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 217693736624SAvi Kivity break; 2177ba5c1e9bSCarsten Otte } 2178b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 2179800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 2180bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 2181800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 2182bc923cc9SAvi Kivity break; 2183b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 2184b0c632dbSHeiko Carstens psw_t psw; 2185b0c632dbSHeiko Carstens 2186bc923cc9SAvi Kivity r = -EFAULT; 2187b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 2188bc923cc9SAvi Kivity break; 2189bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 2190bc923cc9SAvi Kivity break; 2191b0c632dbSHeiko Carstens } 2192b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 2193bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 2194bc923cc9SAvi Kivity break; 219514eebd91SCarsten Otte case KVM_SET_ONE_REG: 219614eebd91SCarsten Otte case KVM_GET_ONE_REG: { 219714eebd91SCarsten Otte struct kvm_one_reg reg; 219814eebd91SCarsten Otte r = -EFAULT; 219914eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 220014eebd91SCarsten Otte break; 220114eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 220214eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 220314eebd91SCarsten Otte else 220414eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 220514eebd91SCarsten Otte break; 220614eebd91SCarsten Otte } 220727e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 220827e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 220927e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 221027e0393fSCarsten Otte 221127e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 221227e0393fSCarsten Otte r = -EFAULT; 221327e0393fSCarsten Otte break; 221427e0393fSCarsten Otte } 221527e0393fSCarsten Otte 221627e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 221727e0393fSCarsten Otte r = -EINVAL; 221827e0393fSCarsten Otte break; 221927e0393fSCarsten Otte } 222027e0393fSCarsten Otte 222127e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 222227e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 222327e0393fSCarsten Otte break; 222427e0393fSCarsten Otte } 222527e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 222627e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 222727e0393fSCarsten Otte 222827e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 222927e0393fSCarsten Otte r = -EFAULT; 223027e0393fSCarsten Otte break; 223127e0393fSCarsten Otte } 223227e0393fSCarsten Otte 223327e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 223427e0393fSCarsten Otte r = -EINVAL; 223527e0393fSCarsten Otte break; 223627e0393fSCarsten Otte } 223727e0393fSCarsten Otte 223827e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 223927e0393fSCarsten Otte ucasmap.length); 224027e0393fSCarsten Otte break; 224127e0393fSCarsten Otte } 224227e0393fSCarsten Otte #endif 2243ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 2244527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 2245ccc7910fSCarsten Otte break; 2246ccc7910fSCarsten Otte } 2247d6712df9SCornelia Huck case KVM_ENABLE_CAP: 2248d6712df9SCornelia Huck { 2249d6712df9SCornelia Huck struct kvm_enable_cap cap; 2250d6712df9SCornelia Huck r = -EFAULT; 2251d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 2252d6712df9SCornelia Huck break; 2253d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2254d6712df9SCornelia Huck break; 2255d6712df9SCornelia Huck } 2256b0c632dbSHeiko Carstens default: 22573e6afcf1SCarsten Otte r = -ENOTTY; 2258b0c632dbSHeiko Carstens } 2259bc923cc9SAvi Kivity return r; 2260b0c632dbSHeiko Carstens } 2261b0c632dbSHeiko Carstens 22625b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 22635b1c1493SCarsten Otte { 22645b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 22655b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 22665b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 22675b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 22685b1c1493SCarsten Otte get_page(vmf->page); 22695b1c1493SCarsten Otte return 0; 22705b1c1493SCarsten Otte } 22715b1c1493SCarsten Otte #endif 22725b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 22735b1c1493SCarsten Otte } 22745b1c1493SCarsten Otte 22755587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 22765587027cSAneesh Kumar K.V unsigned long npages) 2277db3fe4ebSTakuya Yoshikawa { 2278db3fe4ebSTakuya Yoshikawa return 0; 2279db3fe4ebSTakuya Yoshikawa } 2280db3fe4ebSTakuya Yoshikawa 2281b0c632dbSHeiko Carstens /* Section: memory related */ 2282f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 2283f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 22847b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 22857b6195a9STakuya Yoshikawa enum kvm_mr_change change) 2286b0c632dbSHeiko Carstens { 2287dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 2288dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 2289dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 2290dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 2291b0c632dbSHeiko Carstens 2292598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 2293b0c632dbSHeiko Carstens return -EINVAL; 2294b0c632dbSHeiko Carstens 2295598841caSCarsten Otte if (mem->memory_size & 0xffffful) 2296b0c632dbSHeiko Carstens return -EINVAL; 2297b0c632dbSHeiko Carstens 2298f7784b8eSMarcelo Tosatti return 0; 2299f7784b8eSMarcelo Tosatti } 2300f7784b8eSMarcelo Tosatti 2301f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 2302f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 23038482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 23048482644aSTakuya Yoshikawa enum kvm_mr_change change) 2305f7784b8eSMarcelo Tosatti { 2306f7850c92SCarsten Otte int rc; 2307f7784b8eSMarcelo Tosatti 23082cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 23092cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 23102cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 23112cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 23122cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 23132cef4debSChristian Borntraeger */ 23142cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 23152cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 23162cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 23172cef4debSChristian Borntraeger return; 2318598841caSCarsten Otte 2319598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2320598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 2321598841caSCarsten Otte if (rc) 2322f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2323598841caSCarsten Otte return; 2324b0c632dbSHeiko Carstens } 2325b0c632dbSHeiko Carstens 2326b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 2327b0c632dbSHeiko Carstens { 23289d8d5786SMichael Mueller return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2329b0c632dbSHeiko Carstens } 2330b0c632dbSHeiko Carstens 2331b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 2332b0c632dbSHeiko Carstens { 2333b0c632dbSHeiko Carstens kvm_exit(); 2334b0c632dbSHeiko Carstens } 2335b0c632dbSHeiko Carstens 2336b0c632dbSHeiko Carstens module_init(kvm_s390_init); 2337b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 2338566af940SCornelia Huck 2339566af940SCornelia Huck /* 2340566af940SCornelia Huck * Enable autoloading of the kvm module. 2341566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 2342566af940SCornelia Huck * since x86 takes a different approach. 2343566af940SCornelia Huck */ 2344566af940SCornelia Huck #include <linux/miscdevice.h> 2345566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 2346566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 2347