1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25a374e892STony Krowiak #include <linux/random.h> 26b0c632dbSHeiko Carstens #include <linux/slab.h> 27ba5c1e9bSCarsten Otte #include <linux/timer.h> 28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 29b0c632dbSHeiko Carstens #include <asm/lowcore.h> 30b0c632dbSHeiko Carstens #include <asm/pgtable.h> 31f5daba1dSHeiko Carstens #include <asm/nmi.h> 32a0616cdeSDavid Howells #include <asm/switch_to.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f7819512SPaolo Bonzini { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 54ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 55f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 56ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 58aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 59ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 607697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 61ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 63ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 66ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 67ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6869d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 69453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 70453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 71453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 72453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 73453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 748a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 75453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 76453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 77b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 78453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 79453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 80bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 815288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 82bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 837697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8542cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8642cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 875288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8842cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 8942cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 905288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 925288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9342cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9542cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 96388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 97e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9841628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 99b0c632dbSHeiko Carstens { NULL } 100b0c632dbSHeiko Carstens }; 101b0c632dbSHeiko Carstens 1029d8d5786SMichael Mueller /* upper facilities limit for kvm */ 1039d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = { 1049d8d5786SMichael Mueller 0xff82fffbf4fc2000UL, 1059d8d5786SMichael Mueller 0x005c000000000000UL, 1069d8d5786SMichael Mueller }; 107b0c632dbSHeiko Carstens 1089d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void) 10978c4b59fSMichael Mueller { 1109d8d5786SMichael Mueller BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); 1119d8d5786SMichael Mueller return ARRAY_SIZE(kvm_s390_fac_list_mask); 11278c4b59fSMichael Mueller } 11378c4b59fSMichael Mueller 1149d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier; 1159d8d5786SMichael Mueller 116b0c632dbSHeiko Carstens /* Section: not file related */ 11713a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 118b0c632dbSHeiko Carstens { 119b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 12010474ae8SAlexander Graf return 0; 121b0c632dbSHeiko Carstens } 122b0c632dbSHeiko Carstens 1232c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1242c70fe44SChristian Borntraeger 125b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 126b0c632dbSHeiko Carstens { 1272c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1282c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 129b0c632dbSHeiko Carstens return 0; 130b0c632dbSHeiko Carstens } 131b0c632dbSHeiko Carstens 132b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 133b0c632dbSHeiko Carstens { 1342c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 135b0c632dbSHeiko Carstens } 136b0c632dbSHeiko Carstens 137b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 138b0c632dbSHeiko Carstens { 13984877d93SCornelia Huck /* Register floating interrupt controller interface. */ 14084877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 141b0c632dbSHeiko Carstens } 142b0c632dbSHeiko Carstens 143b0c632dbSHeiko Carstens /* Section: device related */ 144b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 145b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 146b0c632dbSHeiko Carstens { 147b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 148b0c632dbSHeiko Carstens return s390_enable_sie(); 149b0c632dbSHeiko Carstens return -EINVAL; 150b0c632dbSHeiko Carstens } 151b0c632dbSHeiko Carstens 152784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 153b0c632dbSHeiko Carstens { 154d7b0b5ebSCarsten Otte int r; 155d7b0b5ebSCarsten Otte 1562bd0ac4eSCarsten Otte switch (ext) { 157d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 158b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15952e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1601efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1611efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1621efd0f59SCarsten Otte #endif 1633c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16460b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16514eebd91SCarsten Otte case KVM_CAP_ONE_REG: 166d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 167fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 168ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16910ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 170c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 171d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 17278599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 173f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1746352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 1752444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 176d7b0b5ebSCarsten Otte r = 1; 177d7b0b5ebSCarsten Otte break; 178e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 179e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 180e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 181e726b1bdSChristian Borntraeger break; 182e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 183e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 184e1e2e605SNick Wang break; 1851526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 186abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1871526bf9cSChristian Borntraeger break; 1882bd0ac4eSCarsten Otte default: 189d7b0b5ebSCarsten Otte r = 0; 190b0c632dbSHeiko Carstens } 191d7b0b5ebSCarsten Otte return r; 1922bd0ac4eSCarsten Otte } 193b0c632dbSHeiko Carstens 19415f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 19515f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 19615f36ebdSJason J. Herne { 19715f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19815f36ebdSJason J. Herne unsigned long address; 19915f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 20015f36ebdSJason J. Herne 20115f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 20215f36ebdSJason J. Herne /* Loop over all guest pages */ 20315f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 20415f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 20515f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 20615f36ebdSJason J. Herne 20715f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20815f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20915f36ebdSJason J. Herne } 21015f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 21115f36ebdSJason J. Herne } 21215f36ebdSJason J. Herne 213b0c632dbSHeiko Carstens /* Section: vm related */ 214b0c632dbSHeiko Carstens /* 215b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 216b0c632dbSHeiko Carstens */ 217b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 218b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 219b0c632dbSHeiko Carstens { 22015f36ebdSJason J. Herne int r; 22115f36ebdSJason J. Herne unsigned long n; 22215f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 22315f36ebdSJason J. Herne int is_dirty = 0; 22415f36ebdSJason J. Herne 22515f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 22615f36ebdSJason J. Herne 22715f36ebdSJason J. Herne r = -EINVAL; 22815f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22915f36ebdSJason J. Herne goto out; 23015f36ebdSJason J. Herne 23115f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 23215f36ebdSJason J. Herne r = -ENOENT; 23315f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 23415f36ebdSJason J. Herne goto out; 23515f36ebdSJason J. Herne 23615f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23715f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23815f36ebdSJason J. Herne if (r) 23915f36ebdSJason J. Herne goto out; 24015f36ebdSJason J. Herne 24115f36ebdSJason J. Herne /* Clear the dirty log */ 24215f36ebdSJason J. Herne if (is_dirty) { 24315f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 24415f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 24515f36ebdSJason J. Herne } 24615f36ebdSJason J. Herne r = 0; 24715f36ebdSJason J. Herne out: 24815f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24915f36ebdSJason J. Herne return r; 250b0c632dbSHeiko Carstens } 251b0c632dbSHeiko Carstens 252d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 253d938dc55SCornelia Huck { 254d938dc55SCornelia Huck int r; 255d938dc55SCornelia Huck 256d938dc55SCornelia Huck if (cap->flags) 257d938dc55SCornelia Huck return -EINVAL; 258d938dc55SCornelia Huck 259d938dc55SCornelia Huck switch (cap->cap) { 26084223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 26184223598SCornelia Huck kvm->arch.use_irqchip = 1; 26284223598SCornelia Huck r = 0; 26384223598SCornelia Huck break; 2642444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 2652444b352SDavid Hildenbrand kvm->arch.user_sigp = 1; 2662444b352SDavid Hildenbrand r = 0; 2672444b352SDavid Hildenbrand break; 268d938dc55SCornelia Huck default: 269d938dc55SCornelia Huck r = -EINVAL; 270d938dc55SCornelia Huck break; 271d938dc55SCornelia Huck } 272d938dc55SCornelia Huck return r; 273d938dc55SCornelia Huck } 274d938dc55SCornelia Huck 2758c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2768c0a7ce6SDominik Dingel { 2778c0a7ce6SDominik Dingel int ret; 2788c0a7ce6SDominik Dingel 2798c0a7ce6SDominik Dingel switch (attr->attr) { 2808c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2818c0a7ce6SDominik Dingel ret = 0; 2828c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2838c0a7ce6SDominik Dingel ret = -EFAULT; 2848c0a7ce6SDominik Dingel break; 2858c0a7ce6SDominik Dingel default: 2868c0a7ce6SDominik Dingel ret = -ENXIO; 2878c0a7ce6SDominik Dingel break; 2888c0a7ce6SDominik Dingel } 2898c0a7ce6SDominik Dingel return ret; 2908c0a7ce6SDominik Dingel } 2918c0a7ce6SDominik Dingel 2928c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2934f718eabSDominik Dingel { 2944f718eabSDominik Dingel int ret; 2954f718eabSDominik Dingel unsigned int idx; 2964f718eabSDominik Dingel switch (attr->attr) { 2974f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2984f718eabSDominik Dingel ret = -EBUSY; 2994f718eabSDominik Dingel mutex_lock(&kvm->lock); 3004f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3014f718eabSDominik Dingel kvm->arch.use_cmma = 1; 3024f718eabSDominik Dingel ret = 0; 3034f718eabSDominik Dingel } 3044f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3054f718eabSDominik Dingel break; 3064f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3074f718eabSDominik Dingel mutex_lock(&kvm->lock); 3084f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 309a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 3104f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3114f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3124f718eabSDominik Dingel ret = 0; 3134f718eabSDominik Dingel break; 3148c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3158c0a7ce6SDominik Dingel unsigned long new_limit; 3168c0a7ce6SDominik Dingel 3178c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3188c0a7ce6SDominik Dingel return -EINVAL; 3198c0a7ce6SDominik Dingel 3208c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3218c0a7ce6SDominik Dingel return -EFAULT; 3228c0a7ce6SDominik Dingel 3238c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3248c0a7ce6SDominik Dingel return -E2BIG; 3258c0a7ce6SDominik Dingel 3268c0a7ce6SDominik Dingel ret = -EBUSY; 3278c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3288c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3298c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3308c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3318c0a7ce6SDominik Dingel 3328c0a7ce6SDominik Dingel if (!new) { 3338c0a7ce6SDominik Dingel ret = -ENOMEM; 3348c0a7ce6SDominik Dingel } else { 3358c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3368c0a7ce6SDominik Dingel new->private = kvm; 3378c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3388c0a7ce6SDominik Dingel ret = 0; 3398c0a7ce6SDominik Dingel } 3408c0a7ce6SDominik Dingel } 3418c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3428c0a7ce6SDominik Dingel break; 3438c0a7ce6SDominik Dingel } 3444f718eabSDominik Dingel default: 3454f718eabSDominik Dingel ret = -ENXIO; 3464f718eabSDominik Dingel break; 3474f718eabSDominik Dingel } 3484f718eabSDominik Dingel return ret; 3494f718eabSDominik Dingel } 3504f718eabSDominik Dingel 351a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 352a374e892STony Krowiak 353a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 354a374e892STony Krowiak { 355a374e892STony Krowiak struct kvm_vcpu *vcpu; 356a374e892STony Krowiak int i; 357a374e892STony Krowiak 3589d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 359a374e892STony Krowiak return -EINVAL; 360a374e892STony Krowiak 361a374e892STony Krowiak mutex_lock(&kvm->lock); 362a374e892STony Krowiak switch (attr->attr) { 363a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 364a374e892STony Krowiak get_random_bytes( 365a374e892STony Krowiak kvm->arch.crypto.crycb->aes_wrapping_key_mask, 366a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 367a374e892STony Krowiak kvm->arch.crypto.aes_kw = 1; 368a374e892STony Krowiak break; 369a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 370a374e892STony Krowiak get_random_bytes( 371a374e892STony Krowiak kvm->arch.crypto.crycb->dea_wrapping_key_mask, 372a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 373a374e892STony Krowiak kvm->arch.crypto.dea_kw = 1; 374a374e892STony Krowiak break; 375a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 376a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 377a374e892STony Krowiak memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 378a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 379a374e892STony Krowiak break; 380a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 381a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 382a374e892STony Krowiak memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 383a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 384a374e892STony Krowiak break; 385a374e892STony Krowiak default: 386a374e892STony Krowiak mutex_unlock(&kvm->lock); 387a374e892STony Krowiak return -ENXIO; 388a374e892STony Krowiak } 389a374e892STony Krowiak 390a374e892STony Krowiak kvm_for_each_vcpu(i, vcpu, kvm) { 391a374e892STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 392a374e892STony Krowiak exit_sie(vcpu); 393a374e892STony Krowiak } 394a374e892STony Krowiak mutex_unlock(&kvm->lock); 395a374e892STony Krowiak return 0; 396a374e892STony Krowiak } 397a374e892STony Krowiak 39872f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 39972f25020SJason J. Herne { 40072f25020SJason J. Herne u8 gtod_high; 40172f25020SJason J. Herne 40272f25020SJason J. Herne if (copy_from_user(>od_high, (void __user *)attr->addr, 40372f25020SJason J. Herne sizeof(gtod_high))) 40472f25020SJason J. Herne return -EFAULT; 40572f25020SJason J. Herne 40672f25020SJason J. Herne if (gtod_high != 0) 40772f25020SJason J. Herne return -EINVAL; 40872f25020SJason J. Herne 40972f25020SJason J. Herne return 0; 41072f25020SJason J. Herne } 41172f25020SJason J. Herne 41272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 41372f25020SJason J. Herne { 41472f25020SJason J. Herne struct kvm_vcpu *cur_vcpu; 41572f25020SJason J. Herne unsigned int vcpu_idx; 41672f25020SJason J. Herne u64 host_tod, gtod; 41772f25020SJason J. Herne int r; 41872f25020SJason J. Herne 41972f25020SJason J. Herne if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 42072f25020SJason J. Herne return -EFAULT; 42172f25020SJason J. Herne 42272f25020SJason J. Herne r = store_tod_clock(&host_tod); 42372f25020SJason J. Herne if (r) 42472f25020SJason J. Herne return r; 42572f25020SJason J. Herne 42672f25020SJason J. Herne mutex_lock(&kvm->lock); 42772f25020SJason J. Herne kvm->arch.epoch = gtod - host_tod; 42872f25020SJason J. Herne kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 42972f25020SJason J. Herne cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 43072f25020SJason J. Herne exit_sie(cur_vcpu); 43172f25020SJason J. Herne } 43272f25020SJason J. Herne mutex_unlock(&kvm->lock); 43372f25020SJason J. Herne return 0; 43472f25020SJason J. Herne } 43572f25020SJason J. Herne 43672f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 43772f25020SJason J. Herne { 43872f25020SJason J. Herne int ret; 43972f25020SJason J. Herne 44072f25020SJason J. Herne if (attr->flags) 44172f25020SJason J. Herne return -EINVAL; 44272f25020SJason J. Herne 44372f25020SJason J. Herne switch (attr->attr) { 44472f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 44572f25020SJason J. Herne ret = kvm_s390_set_tod_high(kvm, attr); 44672f25020SJason J. Herne break; 44772f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 44872f25020SJason J. Herne ret = kvm_s390_set_tod_low(kvm, attr); 44972f25020SJason J. Herne break; 45072f25020SJason J. Herne default: 45172f25020SJason J. Herne ret = -ENXIO; 45272f25020SJason J. Herne break; 45372f25020SJason J. Herne } 45472f25020SJason J. Herne return ret; 45572f25020SJason J. Herne } 45672f25020SJason J. Herne 45772f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 45872f25020SJason J. Herne { 45972f25020SJason J. Herne u8 gtod_high = 0; 46072f25020SJason J. Herne 46172f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od_high, 46272f25020SJason J. Herne sizeof(gtod_high))) 46372f25020SJason J. Herne return -EFAULT; 46472f25020SJason J. Herne 46572f25020SJason J. Herne return 0; 46672f25020SJason J. Herne } 46772f25020SJason J. Herne 46872f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 46972f25020SJason J. Herne { 47072f25020SJason J. Herne u64 host_tod, gtod; 47172f25020SJason J. Herne int r; 47272f25020SJason J. Herne 47372f25020SJason J. Herne r = store_tod_clock(&host_tod); 47472f25020SJason J. Herne if (r) 47572f25020SJason J. Herne return r; 47672f25020SJason J. Herne 47772f25020SJason J. Herne gtod = host_tod + kvm->arch.epoch; 47872f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 47972f25020SJason J. Herne return -EFAULT; 48072f25020SJason J. Herne 48172f25020SJason J. Herne return 0; 48272f25020SJason J. Herne } 48372f25020SJason J. Herne 48472f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 48572f25020SJason J. Herne { 48672f25020SJason J. Herne int ret; 48772f25020SJason J. Herne 48872f25020SJason J. Herne if (attr->flags) 48972f25020SJason J. Herne return -EINVAL; 49072f25020SJason J. Herne 49172f25020SJason J. Herne switch (attr->attr) { 49272f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 49372f25020SJason J. Herne ret = kvm_s390_get_tod_high(kvm, attr); 49472f25020SJason J. Herne break; 49572f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 49672f25020SJason J. Herne ret = kvm_s390_get_tod_low(kvm, attr); 49772f25020SJason J. Herne break; 49872f25020SJason J. Herne default: 49972f25020SJason J. Herne ret = -ENXIO; 50072f25020SJason J. Herne break; 50172f25020SJason J. Herne } 50272f25020SJason J. Herne return ret; 50372f25020SJason J. Herne } 50472f25020SJason J. Herne 505658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) 506658b6edaSMichael Mueller { 507658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 508658b6edaSMichael Mueller int ret = 0; 509658b6edaSMichael Mueller 510658b6edaSMichael Mueller mutex_lock(&kvm->lock); 511658b6edaSMichael Mueller if (atomic_read(&kvm->online_vcpus)) { 512658b6edaSMichael Mueller ret = -EBUSY; 513658b6edaSMichael Mueller goto out; 514658b6edaSMichael Mueller } 515658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 516658b6edaSMichael Mueller if (!proc) { 517658b6edaSMichael Mueller ret = -ENOMEM; 518658b6edaSMichael Mueller goto out; 519658b6edaSMichael Mueller } 520658b6edaSMichael Mueller if (!copy_from_user(proc, (void __user *)attr->addr, 521658b6edaSMichael Mueller sizeof(*proc))) { 522658b6edaSMichael Mueller memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, 523658b6edaSMichael Mueller sizeof(struct cpuid)); 524658b6edaSMichael Mueller kvm->arch.model.ibc = proc->ibc; 525981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, proc->fac_list, 526658b6edaSMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 527658b6edaSMichael Mueller } else 528658b6edaSMichael Mueller ret = -EFAULT; 529658b6edaSMichael Mueller kfree(proc); 530658b6edaSMichael Mueller out: 531658b6edaSMichael Mueller mutex_unlock(&kvm->lock); 532658b6edaSMichael Mueller return ret; 533658b6edaSMichael Mueller } 534658b6edaSMichael Mueller 535658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 536658b6edaSMichael Mueller { 537658b6edaSMichael Mueller int ret = -ENXIO; 538658b6edaSMichael Mueller 539658b6edaSMichael Mueller switch (attr->attr) { 540658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 541658b6edaSMichael Mueller ret = kvm_s390_set_processor(kvm, attr); 542658b6edaSMichael Mueller break; 543658b6edaSMichael Mueller } 544658b6edaSMichael Mueller return ret; 545658b6edaSMichael Mueller } 546658b6edaSMichael Mueller 547658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) 548658b6edaSMichael Mueller { 549658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 550658b6edaSMichael Mueller int ret = 0; 551658b6edaSMichael Mueller 552658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 553658b6edaSMichael Mueller if (!proc) { 554658b6edaSMichael Mueller ret = -ENOMEM; 555658b6edaSMichael Mueller goto out; 556658b6edaSMichael Mueller } 557658b6edaSMichael Mueller memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); 558658b6edaSMichael Mueller proc->ibc = kvm->arch.model.ibc; 559981467c9SMichael Mueller memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); 560658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 561658b6edaSMichael Mueller ret = -EFAULT; 562658b6edaSMichael Mueller kfree(proc); 563658b6edaSMichael Mueller out: 564658b6edaSMichael Mueller return ret; 565658b6edaSMichael Mueller } 566658b6edaSMichael Mueller 567658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) 568658b6edaSMichael Mueller { 569658b6edaSMichael Mueller struct kvm_s390_vm_cpu_machine *mach; 570658b6edaSMichael Mueller int ret = 0; 571658b6edaSMichael Mueller 572658b6edaSMichael Mueller mach = kzalloc(sizeof(*mach), GFP_KERNEL); 573658b6edaSMichael Mueller if (!mach) { 574658b6edaSMichael Mueller ret = -ENOMEM; 575658b6edaSMichael Mueller goto out; 576658b6edaSMichael Mueller } 577658b6edaSMichael Mueller get_cpu_id((struct cpuid *) &mach->cpuid); 578658b6edaSMichael Mueller mach->ibc = sclp_get_ibc(); 579981467c9SMichael Mueller memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, 580981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 581658b6edaSMichael Mueller memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 58294422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 583658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 584658b6edaSMichael Mueller ret = -EFAULT; 585658b6edaSMichael Mueller kfree(mach); 586658b6edaSMichael Mueller out: 587658b6edaSMichael Mueller return ret; 588658b6edaSMichael Mueller } 589658b6edaSMichael Mueller 590658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 591658b6edaSMichael Mueller { 592658b6edaSMichael Mueller int ret = -ENXIO; 593658b6edaSMichael Mueller 594658b6edaSMichael Mueller switch (attr->attr) { 595658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 596658b6edaSMichael Mueller ret = kvm_s390_get_processor(kvm, attr); 597658b6edaSMichael Mueller break; 598658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 599658b6edaSMichael Mueller ret = kvm_s390_get_machine(kvm, attr); 600658b6edaSMichael Mueller break; 601658b6edaSMichael Mueller } 602658b6edaSMichael Mueller return ret; 603658b6edaSMichael Mueller } 604658b6edaSMichael Mueller 605f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 606f2061656SDominik Dingel { 607f2061656SDominik Dingel int ret; 608f2061656SDominik Dingel 609f2061656SDominik Dingel switch (attr->group) { 6104f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6118c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 6124f718eabSDominik Dingel break; 61372f25020SJason J. Herne case KVM_S390_VM_TOD: 61472f25020SJason J. Herne ret = kvm_s390_set_tod(kvm, attr); 61572f25020SJason J. Herne break; 616658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 617658b6edaSMichael Mueller ret = kvm_s390_set_cpu_model(kvm, attr); 618658b6edaSMichael Mueller break; 619a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 620a374e892STony Krowiak ret = kvm_s390_vm_set_crypto(kvm, attr); 621a374e892STony Krowiak break; 622f2061656SDominik Dingel default: 623f2061656SDominik Dingel ret = -ENXIO; 624f2061656SDominik Dingel break; 625f2061656SDominik Dingel } 626f2061656SDominik Dingel 627f2061656SDominik Dingel return ret; 628f2061656SDominik Dingel } 629f2061656SDominik Dingel 630f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 631f2061656SDominik Dingel { 6328c0a7ce6SDominik Dingel int ret; 6338c0a7ce6SDominik Dingel 6348c0a7ce6SDominik Dingel switch (attr->group) { 6358c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 6368c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 6378c0a7ce6SDominik Dingel break; 63872f25020SJason J. Herne case KVM_S390_VM_TOD: 63972f25020SJason J. Herne ret = kvm_s390_get_tod(kvm, attr); 64072f25020SJason J. Herne break; 641658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 642658b6edaSMichael Mueller ret = kvm_s390_get_cpu_model(kvm, attr); 643658b6edaSMichael Mueller break; 6448c0a7ce6SDominik Dingel default: 6458c0a7ce6SDominik Dingel ret = -ENXIO; 6468c0a7ce6SDominik Dingel break; 6478c0a7ce6SDominik Dingel } 6488c0a7ce6SDominik Dingel 6498c0a7ce6SDominik Dingel return ret; 650f2061656SDominik Dingel } 651f2061656SDominik Dingel 652f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 653f2061656SDominik Dingel { 654f2061656SDominik Dingel int ret; 655f2061656SDominik Dingel 656f2061656SDominik Dingel switch (attr->group) { 6574f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6584f718eabSDominik Dingel switch (attr->attr) { 6594f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 6604f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 6618c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 6624f718eabSDominik Dingel ret = 0; 6634f718eabSDominik Dingel break; 6644f718eabSDominik Dingel default: 6654f718eabSDominik Dingel ret = -ENXIO; 6664f718eabSDominik Dingel break; 6674f718eabSDominik Dingel } 6684f718eabSDominik Dingel break; 66972f25020SJason J. Herne case KVM_S390_VM_TOD: 67072f25020SJason J. Herne switch (attr->attr) { 67172f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 67272f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 67372f25020SJason J. Herne ret = 0; 67472f25020SJason J. Herne break; 67572f25020SJason J. Herne default: 67672f25020SJason J. Herne ret = -ENXIO; 67772f25020SJason J. Herne break; 67872f25020SJason J. Herne } 67972f25020SJason J. Herne break; 680658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 681658b6edaSMichael Mueller switch (attr->attr) { 682658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 683658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 684658b6edaSMichael Mueller ret = 0; 685658b6edaSMichael Mueller break; 686658b6edaSMichael Mueller default: 687658b6edaSMichael Mueller ret = -ENXIO; 688658b6edaSMichael Mueller break; 689658b6edaSMichael Mueller } 690658b6edaSMichael Mueller break; 691a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 692a374e892STony Krowiak switch (attr->attr) { 693a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 694a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 695a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 696a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 697a374e892STony Krowiak ret = 0; 698a374e892STony Krowiak break; 699a374e892STony Krowiak default: 700a374e892STony Krowiak ret = -ENXIO; 701a374e892STony Krowiak break; 702a374e892STony Krowiak } 703a374e892STony Krowiak break; 704f2061656SDominik Dingel default: 705f2061656SDominik Dingel ret = -ENXIO; 706f2061656SDominik Dingel break; 707f2061656SDominik Dingel } 708f2061656SDominik Dingel 709f2061656SDominik Dingel return ret; 710f2061656SDominik Dingel } 711f2061656SDominik Dingel 712b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 713b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 714b0c632dbSHeiko Carstens { 715b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 716b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 717f2061656SDominik Dingel struct kvm_device_attr attr; 718b0c632dbSHeiko Carstens int r; 719b0c632dbSHeiko Carstens 720b0c632dbSHeiko Carstens switch (ioctl) { 721ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 722ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 723ba5c1e9bSCarsten Otte 724ba5c1e9bSCarsten Otte r = -EFAULT; 725ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 726ba5c1e9bSCarsten Otte break; 727ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 728ba5c1e9bSCarsten Otte break; 729ba5c1e9bSCarsten Otte } 730d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 731d938dc55SCornelia Huck struct kvm_enable_cap cap; 732d938dc55SCornelia Huck r = -EFAULT; 733d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 734d938dc55SCornelia Huck break; 735d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 736d938dc55SCornelia Huck break; 737d938dc55SCornelia Huck } 73884223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 73984223598SCornelia Huck struct kvm_irq_routing_entry routing; 74084223598SCornelia Huck 74184223598SCornelia Huck r = -EINVAL; 74284223598SCornelia Huck if (kvm->arch.use_irqchip) { 74384223598SCornelia Huck /* Set up dummy routing. */ 74484223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 74584223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 74684223598SCornelia Huck r = 0; 74784223598SCornelia Huck } 74884223598SCornelia Huck break; 74984223598SCornelia Huck } 750f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 751f2061656SDominik Dingel r = -EFAULT; 752f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 753f2061656SDominik Dingel break; 754f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 755f2061656SDominik Dingel break; 756f2061656SDominik Dingel } 757f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 758f2061656SDominik Dingel r = -EFAULT; 759f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 760f2061656SDominik Dingel break; 761f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 762f2061656SDominik Dingel break; 763f2061656SDominik Dingel } 764f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 765f2061656SDominik Dingel r = -EFAULT; 766f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 767f2061656SDominik Dingel break; 768f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 769f2061656SDominik Dingel break; 770f2061656SDominik Dingel } 771b0c632dbSHeiko Carstens default: 772367e1319SAvi Kivity r = -ENOTTY; 773b0c632dbSHeiko Carstens } 774b0c632dbSHeiko Carstens 775b0c632dbSHeiko Carstens return r; 776b0c632dbSHeiko Carstens } 777b0c632dbSHeiko Carstens 77845c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config) 77945c9b47cSTony Krowiak { 78045c9b47cSTony Krowiak u32 fcn_code = 0x04000000UL; 78186044c8cSChristian Borntraeger u32 cc = 0; 78245c9b47cSTony Krowiak 78386044c8cSChristian Borntraeger memset(config, 0, 128); 78445c9b47cSTony Krowiak asm volatile( 78545c9b47cSTony Krowiak "lgr 0,%1\n" 78645c9b47cSTony Krowiak "lgr 2,%2\n" 78745c9b47cSTony Krowiak ".long 0xb2af0000\n" /* PQAP(QCI) */ 78886044c8cSChristian Borntraeger "0: ipm %0\n" 78945c9b47cSTony Krowiak "srl %0,28\n" 79086044c8cSChristian Borntraeger "1:\n" 79186044c8cSChristian Borntraeger EX_TABLE(0b, 1b) 79286044c8cSChristian Borntraeger : "+r" (cc) 79345c9b47cSTony Krowiak : "r" (fcn_code), "r" (config) 79445c9b47cSTony Krowiak : "cc", "0", "2", "memory" 79545c9b47cSTony Krowiak ); 79645c9b47cSTony Krowiak 79745c9b47cSTony Krowiak return cc; 79845c9b47cSTony Krowiak } 79945c9b47cSTony Krowiak 80045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void) 80145c9b47cSTony Krowiak { 80245c9b47cSTony Krowiak u8 config[128]; 80345c9b47cSTony Krowiak int cc; 80445c9b47cSTony Krowiak 80545c9b47cSTony Krowiak if (test_facility(2) && test_facility(12)) { 80645c9b47cSTony Krowiak cc = kvm_s390_query_ap_config(config); 80745c9b47cSTony Krowiak 80845c9b47cSTony Krowiak if (cc) 80945c9b47cSTony Krowiak pr_err("PQAP(QCI) failed with cc=%d", cc); 81045c9b47cSTony Krowiak else 81145c9b47cSTony Krowiak return config[0] & 0x40; 81245c9b47cSTony Krowiak } 81345c9b47cSTony Krowiak 81445c9b47cSTony Krowiak return 0; 81545c9b47cSTony Krowiak } 81645c9b47cSTony Krowiak 81745c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm) 81845c9b47cSTony Krowiak { 81945c9b47cSTony Krowiak kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 82045c9b47cSTony Krowiak 82145c9b47cSTony Krowiak if (kvm_s390_apxa_installed()) 82245c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 82345c9b47cSTony Krowiak else 82445c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 82545c9b47cSTony Krowiak } 82645c9b47cSTony Krowiak 8279d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) 8289d8d5786SMichael Mueller { 8299d8d5786SMichael Mueller get_cpu_id(cpu_id); 8309d8d5786SMichael Mueller cpu_id->version = 0xff; 8319d8d5786SMichael Mueller } 8329d8d5786SMichael Mueller 8335102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 8345102ee87STony Krowiak { 8359d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 8365102ee87STony Krowiak return 0; 8375102ee87STony Krowiak 8385102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 8395102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 8405102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 8415102ee87STony Krowiak return -ENOMEM; 8425102ee87STony Krowiak 84345c9b47cSTony Krowiak kvm_s390_set_crycb_format(kvm); 8445102ee87STony Krowiak 845ed6f76b4STony Krowiak /* Enable AES/DEA protected key functions by default */ 846ed6f76b4STony Krowiak kvm->arch.crypto.aes_kw = 1; 847ed6f76b4STony Krowiak kvm->arch.crypto.dea_kw = 1; 848ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 849ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 850ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 851ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 852a374e892STony Krowiak 8535102ee87STony Krowiak return 0; 8545102ee87STony Krowiak } 8555102ee87STony Krowiak 856e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 857b0c632dbSHeiko Carstens { 8589d8d5786SMichael Mueller int i, rc; 859b0c632dbSHeiko Carstens char debug_name[16]; 860f6c137ffSChristian Borntraeger static unsigned long sca_offset; 861b0c632dbSHeiko Carstens 862e08b9637SCarsten Otte rc = -EINVAL; 863e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 864e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 865e08b9637SCarsten Otte goto out_err; 866e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 867e08b9637SCarsten Otte goto out_err; 868e08b9637SCarsten Otte #else 869e08b9637SCarsten Otte if (type) 870e08b9637SCarsten Otte goto out_err; 871e08b9637SCarsten Otte #endif 872e08b9637SCarsten Otte 873b0c632dbSHeiko Carstens rc = s390_enable_sie(); 874b0c632dbSHeiko Carstens if (rc) 875d89f5effSJan Kiszka goto out_err; 876b0c632dbSHeiko Carstens 877b290411aSCarsten Otte rc = -ENOMEM; 878b290411aSCarsten Otte 879b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 880b0c632dbSHeiko Carstens if (!kvm->arch.sca) 881d89f5effSJan Kiszka goto out_err; 882f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 883f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 884f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 885f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 886b0c632dbSHeiko Carstens 887b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 888b0c632dbSHeiko Carstens 889b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 890b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 891b0c632dbSHeiko Carstens goto out_nodbf; 892b0c632dbSHeiko Carstens 8939d8d5786SMichael Mueller /* 8949d8d5786SMichael Mueller * The architectural maximum amount of facilities is 16 kbit. To store 8959d8d5786SMichael Mueller * this amount, 2 kbyte of memory is required. Thus we need a full 896981467c9SMichael Mueller * page to hold the guest facility list (arch.model.fac->list) and the 897981467c9SMichael Mueller * facility mask (arch.model.fac->mask). Its address size has to be 8989d8d5786SMichael Mueller * 31 bits and word aligned. 8999d8d5786SMichael Mueller */ 9009d8d5786SMichael Mueller kvm->arch.model.fac = 901981467c9SMichael Mueller (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 9029d8d5786SMichael Mueller if (!kvm->arch.model.fac) 9039d8d5786SMichael Mueller goto out_nofac; 9049d8d5786SMichael Mueller 905*fb5bf93fSMichael Mueller /* Populate the facility mask initially. */ 906981467c9SMichael Mueller memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, 90794422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 9089d8d5786SMichael Mueller for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 9099d8d5786SMichael Mueller if (i < kvm_s390_fac_list_mask_size()) 910981467c9SMichael Mueller kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; 9119d8d5786SMichael Mueller else 912981467c9SMichael Mueller kvm->arch.model.fac->mask[i] = 0UL; 9139d8d5786SMichael Mueller } 9149d8d5786SMichael Mueller 915981467c9SMichael Mueller /* Populate the facility list initially. */ 916981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, 917981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 918981467c9SMichael Mueller 9199d8d5786SMichael Mueller kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 920658b6edaSMichael Mueller kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 9219d8d5786SMichael Mueller 9225102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 9235102ee87STony Krowiak goto out_crypto; 9245102ee87STony Krowiak 925ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 926ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 9278a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 928a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 929ba5c1e9bSCarsten Otte 930b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 931b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 932b0c632dbSHeiko Carstens 933e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 934e08b9637SCarsten Otte kvm->arch.gmap = NULL; 935e08b9637SCarsten Otte } else { 9360349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 937598841caSCarsten Otte if (!kvm->arch.gmap) 938598841caSCarsten Otte goto out_nogmap; 9392c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 94024eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 941e08b9637SCarsten Otte } 942fa6b7fe9SCornelia Huck 943fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 94484223598SCornelia Huck kvm->arch.use_irqchip = 0; 94572f25020SJason J. Herne kvm->arch.epoch = 0; 946fa6b7fe9SCornelia Huck 9478ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 9488ad35755SDavid Hildenbrand 949d89f5effSJan Kiszka return 0; 950598841caSCarsten Otte out_nogmap: 9515102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 9525102ee87STony Krowiak out_crypto: 9539d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 9549d8d5786SMichael Mueller out_nofac: 955598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 956b0c632dbSHeiko Carstens out_nodbf: 957b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 958d89f5effSJan Kiszka out_err: 959d89f5effSJan Kiszka return rc; 960b0c632dbSHeiko Carstens } 961b0c632dbSHeiko Carstens 962d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 963d329c035SChristian Borntraeger { 964d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 965ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 96667335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 9673c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 96858f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 96958f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 97058f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 971abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 972abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 973abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 97458f9460bSCarsten Otte } 975abf4a71eSCarsten Otte smp_mb(); 97627e0393fSCarsten Otte 97727e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 97827e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 97927e0393fSCarsten Otte 980b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 981b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 982d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 983b31288faSKonstantin Weitz 9846692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 985b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 986d329c035SChristian Borntraeger } 987d329c035SChristian Borntraeger 988d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 989d329c035SChristian Borntraeger { 990d329c035SChristian Borntraeger unsigned int i; 991988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 992d329c035SChristian Borntraeger 993988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 994988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 995988a2caeSGleb Natapov 996988a2caeSGleb Natapov mutex_lock(&kvm->lock); 997988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 998d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 999988a2caeSGleb Natapov 1000988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 1001988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 1002d329c035SChristian Borntraeger } 1003d329c035SChristian Borntraeger 1004b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 1005b0c632dbSHeiko Carstens { 1006d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 10079d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 1008b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 1009d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 10105102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 101127e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 1012598841caSCarsten Otte gmap_free(kvm->arch.gmap); 1013841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 101467335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 1015b0c632dbSHeiko Carstens } 1016b0c632dbSHeiko Carstens 1017b0c632dbSHeiko Carstens /* Section: vcpu related */ 1018dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 1019b0c632dbSHeiko Carstens { 1020c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 102127e0393fSCarsten Otte if (!vcpu->arch.gmap) 102227e0393fSCarsten Otte return -ENOMEM; 10232c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 1024dafd032aSDominik Dingel 102527e0393fSCarsten Otte return 0; 102627e0393fSCarsten Otte } 102727e0393fSCarsten Otte 1028dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 1029dafd032aSDominik Dingel { 1030dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 1031dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 103259674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 103359674c1aSChristian Borntraeger KVM_SYNC_GPRS | 10349eed0735SChristian Borntraeger KVM_SYNC_ACRS | 1035b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 1036b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 1037b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 1038dafd032aSDominik Dingel 1039dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 1040dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 1041dafd032aSDominik Dingel 1042b0c632dbSHeiko Carstens return 0; 1043b0c632dbSHeiko Carstens } 1044b0c632dbSHeiko Carstens 1045b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1046b0c632dbSHeiko Carstens { 10474725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 10484725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 1049b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 10504725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10514725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 105259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1053480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 10549e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1055b0c632dbSHeiko Carstens } 1056b0c632dbSHeiko Carstens 1057b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1058b0c632dbSHeiko Carstens { 10599e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1060480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 10614725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10624725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 106359674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 10644725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 10654725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 1066b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 1067b0c632dbSHeiko Carstens } 1068b0c632dbSHeiko Carstens 1069b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 1070b0c632dbSHeiko Carstens { 1071b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 1072b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 1073b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 10748d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 1075b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 1076b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 1077b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 1078b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 1079b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 1080b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 1081b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 1082b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 1083b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 1084672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 10853c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 10863c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 10876352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 10886852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 10892ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 1090b0c632dbSHeiko Carstens } 1091b0c632dbSHeiko Carstens 109231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 109342897d86SMarcelo Tosatti { 109472f25020SJason J. Herne mutex_lock(&vcpu->kvm->lock); 109572f25020SJason J. Herne vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 109672f25020SJason J. Herne mutex_unlock(&vcpu->kvm->lock); 1097dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 1098dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 109942897d86SMarcelo Tosatti } 110042897d86SMarcelo Tosatti 11015102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 11025102ee87STony Krowiak { 11039d8d5786SMichael Mueller if (!test_kvm_facility(vcpu->kvm, 76)) 11045102ee87STony Krowiak return; 11055102ee87STony Krowiak 1106a374e892STony Krowiak vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 1107a374e892STony Krowiak 1108a374e892STony Krowiak if (vcpu->kvm->arch.crypto.aes_kw) 1109a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_AES; 1110a374e892STony Krowiak if (vcpu->kvm->arch.crypto.dea_kw) 1111a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 1112a374e892STony Krowiak 11135102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 11145102ee87STony Krowiak } 11155102ee87STony Krowiak 1116b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 1117b31605c1SDominik Dingel { 1118b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 1119b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 1120b31605c1SDominik Dingel } 1121b31605c1SDominik Dingel 1122b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 1123b31605c1SDominik Dingel { 1124b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 1125b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 1126b31605c1SDominik Dingel return -ENOMEM; 1127b31605c1SDominik Dingel 1128b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 1129b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 1130b31605c1SDominik Dingel return 0; 1131b31605c1SDominik Dingel } 1132b31605c1SDominik Dingel 1133b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1134b0c632dbSHeiko Carstens { 1135b31605c1SDominik Dingel int rc = 0; 1136b31288faSKonstantin Weitz 11379e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 11389e6dabefSCornelia Huck CPUSTAT_SM | 113969d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 114069d0d3a3SChristian Borntraeger CPUSTAT_GED); 1141fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 11429d8d5786SMichael Mueller if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) 11437feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 11447feb6bb8SMichael Mueller 114569d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 1146ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 1147217a4406SHeiko Carstens if (sclp_has_siif()) 1148217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 1149ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 1150ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 11515a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 11525a5e6536SMatthew Rosato ICTL_TPROT; 11535a5e6536SMatthew Rosato 1154b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1155b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 1156b31605c1SDominik Dingel if (rc) 1157b31605c1SDominik Dingel return rc; 1158b31288faSKonstantin Weitz } 11590ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1160ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 11619d8d5786SMichael Mueller 1162658b6edaSMichael Mueller mutex_lock(&vcpu->kvm->lock); 11639d8d5786SMichael Mueller vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; 1164658b6edaSMichael Mueller vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc; 1165658b6edaSMichael Mueller mutex_unlock(&vcpu->kvm->lock); 11665102ee87STony Krowiak 11675102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 11685102ee87STony Krowiak 1169b31605c1SDominik Dingel return rc; 1170b0c632dbSHeiko Carstens } 1171b0c632dbSHeiko Carstens 1172b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 1173b0c632dbSHeiko Carstens unsigned int id) 1174b0c632dbSHeiko Carstens { 11754d47555aSCarsten Otte struct kvm_vcpu *vcpu; 11767feb6bb8SMichael Mueller struct sie_page *sie_page; 11774d47555aSCarsten Otte int rc = -EINVAL; 1178b0c632dbSHeiko Carstens 11794d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 11804d47555aSCarsten Otte goto out; 11814d47555aSCarsten Otte 11824d47555aSCarsten Otte rc = -ENOMEM; 11834d47555aSCarsten Otte 1184b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1185b0c632dbSHeiko Carstens if (!vcpu) 11864d47555aSCarsten Otte goto out; 1187b0c632dbSHeiko Carstens 11887feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 11897feb6bb8SMichael Mueller if (!sie_page) 1190b0c632dbSHeiko Carstens goto out_free_cpu; 1191b0c632dbSHeiko Carstens 11927feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 11937feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 11947feb6bb8SMichael Mueller 1195b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 119658f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 119758f9460bSCarsten Otte if (!kvm->arch.sca) { 119858f9460bSCarsten Otte WARN_ON_ONCE(1); 119958f9460bSCarsten Otte goto out_free_cpu; 120058f9460bSCarsten Otte } 1201abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 120258f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 120358f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 120458f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 120558f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 1206b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1207fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 120858f9460bSCarsten Otte } 1209981467c9SMichael Mueller vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list; 1210b0c632dbSHeiko Carstens 1211ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 1212ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1213d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 12145288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 1215ba5c1e9bSCarsten Otte 1216b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 1217b0c632dbSHeiko Carstens if (rc) 12187b06bf2fSWei Yongjun goto out_free_sie_block; 1219b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 1220b0c632dbSHeiko Carstens vcpu->arch.sie_block); 1221ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 1222b0c632dbSHeiko Carstens 1223b0c632dbSHeiko Carstens return vcpu; 12247b06bf2fSWei Yongjun out_free_sie_block: 12257b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 1226b0c632dbSHeiko Carstens out_free_cpu: 1227b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 12284d47555aSCarsten Otte out: 1229b0c632dbSHeiko Carstens return ERR_PTR(rc); 1230b0c632dbSHeiko Carstens } 1231b0c632dbSHeiko Carstens 1232b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1233b0c632dbSHeiko Carstens { 12349a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 1235b0c632dbSHeiko Carstens } 1236b0c632dbSHeiko Carstens 123749b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 123849b99e1eSChristian Borntraeger { 123949b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 124049b99e1eSChristian Borntraeger } 124149b99e1eSChristian Borntraeger 124249b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 124349b99e1eSChristian Borntraeger { 124449b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 124549b99e1eSChristian Borntraeger } 124649b99e1eSChristian Borntraeger 124749b99e1eSChristian Borntraeger /* 124849b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 124949b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 125049b99e1eSChristian Borntraeger * return immediately. */ 125149b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 125249b99e1eSChristian Borntraeger { 125349b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 125449b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 125549b99e1eSChristian Borntraeger cpu_relax(); 125649b99e1eSChristian Borntraeger } 125749b99e1eSChristian Borntraeger 125849b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 125949b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 126049b99e1eSChristian Borntraeger { 126149b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 126249b99e1eSChristian Borntraeger exit_sie(vcpu); 126349b99e1eSChristian Borntraeger } 126449b99e1eSChristian Borntraeger 12652c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 12662c70fe44SChristian Borntraeger { 12672c70fe44SChristian Borntraeger int i; 12682c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 12692c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 12702c70fe44SChristian Borntraeger 12712c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 12722c70fe44SChristian Borntraeger /* match against both prefix pages */ 1273fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 12742c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 12752c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 12762c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 12772c70fe44SChristian Borntraeger } 12782c70fe44SChristian Borntraeger } 12792c70fe44SChristian Borntraeger } 12802c70fe44SChristian Borntraeger 1281b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 1282b6d33834SChristoffer Dall { 1283b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 1284b6d33834SChristoffer Dall BUG(); 1285b6d33834SChristoffer Dall return 0; 1286b6d33834SChristoffer Dall } 1287b6d33834SChristoffer Dall 128814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 128914eebd91SCarsten Otte struct kvm_one_reg *reg) 129014eebd91SCarsten Otte { 129114eebd91SCarsten Otte int r = -EINVAL; 129214eebd91SCarsten Otte 129314eebd91SCarsten Otte switch (reg->id) { 129429b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 129529b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 129629b7c71bSCarsten Otte (u32 __user *)reg->addr); 129729b7c71bSCarsten Otte break; 129829b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 129929b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 130029b7c71bSCarsten Otte (u64 __user *)reg->addr); 130129b7c71bSCarsten Otte break; 130246a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 130346a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 130446a6dd1cSJason J. herne (u64 __user *)reg->addr); 130546a6dd1cSJason J. herne break; 130646a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 130746a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 130846a6dd1cSJason J. herne (u64 __user *)reg->addr); 130946a6dd1cSJason J. herne break; 1310536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1311536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 1312536336c2SDominik Dingel (u64 __user *)reg->addr); 1313536336c2SDominik Dingel break; 1314536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1315536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 1316536336c2SDominik Dingel (u64 __user *)reg->addr); 1317536336c2SDominik Dingel break; 1318536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1319536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 1320536336c2SDominik Dingel (u64 __user *)reg->addr); 1321536336c2SDominik Dingel break; 1322672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1323672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 1324672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1325672550fbSChristian Borntraeger break; 1326afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1327afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 1328afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1329afa45ff5SChristian Borntraeger break; 133014eebd91SCarsten Otte default: 133114eebd91SCarsten Otte break; 133214eebd91SCarsten Otte } 133314eebd91SCarsten Otte 133414eebd91SCarsten Otte return r; 133514eebd91SCarsten Otte } 133614eebd91SCarsten Otte 133714eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 133814eebd91SCarsten Otte struct kvm_one_reg *reg) 133914eebd91SCarsten Otte { 134014eebd91SCarsten Otte int r = -EINVAL; 134114eebd91SCarsten Otte 134214eebd91SCarsten Otte switch (reg->id) { 134329b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 134429b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 134529b7c71bSCarsten Otte (u32 __user *)reg->addr); 134629b7c71bSCarsten Otte break; 134729b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 134829b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 134929b7c71bSCarsten Otte (u64 __user *)reg->addr); 135029b7c71bSCarsten Otte break; 135146a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 135246a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 135346a6dd1cSJason J. herne (u64 __user *)reg->addr); 135446a6dd1cSJason J. herne break; 135546a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 135646a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 135746a6dd1cSJason J. herne (u64 __user *)reg->addr); 135846a6dd1cSJason J. herne break; 1359536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1360536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 1361536336c2SDominik Dingel (u64 __user *)reg->addr); 13629fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 13639fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1364536336c2SDominik Dingel break; 1365536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1366536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 1367536336c2SDominik Dingel (u64 __user *)reg->addr); 1368536336c2SDominik Dingel break; 1369536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1370536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 1371536336c2SDominik Dingel (u64 __user *)reg->addr); 1372536336c2SDominik Dingel break; 1373672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1374672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 1375672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1376672550fbSChristian Borntraeger break; 1377afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1378afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 1379afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1380afa45ff5SChristian Borntraeger break; 138114eebd91SCarsten Otte default: 138214eebd91SCarsten Otte break; 138314eebd91SCarsten Otte } 138414eebd91SCarsten Otte 138514eebd91SCarsten Otte return r; 138614eebd91SCarsten Otte } 1387b6d33834SChristoffer Dall 1388b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 1389b0c632dbSHeiko Carstens { 1390b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 1391b0c632dbSHeiko Carstens return 0; 1392b0c632dbSHeiko Carstens } 1393b0c632dbSHeiko Carstens 1394b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1395b0c632dbSHeiko Carstens { 13965a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 1397b0c632dbSHeiko Carstens return 0; 1398b0c632dbSHeiko Carstens } 1399b0c632dbSHeiko Carstens 1400b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1401b0c632dbSHeiko Carstens { 14025a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 1403b0c632dbSHeiko Carstens return 0; 1404b0c632dbSHeiko Carstens } 1405b0c632dbSHeiko Carstens 1406b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1407b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1408b0c632dbSHeiko Carstens { 140959674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 1410b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 141159674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1412b0c632dbSHeiko Carstens return 0; 1413b0c632dbSHeiko Carstens } 1414b0c632dbSHeiko Carstens 1415b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1416b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1417b0c632dbSHeiko Carstens { 141859674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 1419b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 1420b0c632dbSHeiko Carstens return 0; 1421b0c632dbSHeiko Carstens } 1422b0c632dbSHeiko Carstens 1423b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1424b0c632dbSHeiko Carstens { 14254725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 14264725c860SMartin Schwidefsky return -EINVAL; 1427b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 14284725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 14294725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 14304725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1431b0c632dbSHeiko Carstens return 0; 1432b0c632dbSHeiko Carstens } 1433b0c632dbSHeiko Carstens 1434b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1435b0c632dbSHeiko Carstens { 1436b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1437b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1438b0c632dbSHeiko Carstens return 0; 1439b0c632dbSHeiko Carstens } 1440b0c632dbSHeiko Carstens 1441b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1442b0c632dbSHeiko Carstens { 1443b0c632dbSHeiko Carstens int rc = 0; 1444b0c632dbSHeiko Carstens 14457a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1446b0c632dbSHeiko Carstens rc = -EBUSY; 1447d7b0b5ebSCarsten Otte else { 1448d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1449d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1450d7b0b5ebSCarsten Otte } 1451b0c632dbSHeiko Carstens return rc; 1452b0c632dbSHeiko Carstens } 1453b0c632dbSHeiko Carstens 1454b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1455b0c632dbSHeiko Carstens struct kvm_translation *tr) 1456b0c632dbSHeiko Carstens { 1457b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1458b0c632dbSHeiko Carstens } 1459b0c632dbSHeiko Carstens 146027291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 146127291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 146227291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 146327291e21SDavid Hildenbrand 1464d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1465d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1466b0c632dbSHeiko Carstens { 146727291e21SDavid Hildenbrand int rc = 0; 146827291e21SDavid Hildenbrand 146927291e21SDavid Hildenbrand vcpu->guest_debug = 0; 147027291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 147127291e21SDavid Hildenbrand 14722de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 147327291e21SDavid Hildenbrand return -EINVAL; 147427291e21SDavid Hildenbrand 147527291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 147627291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 147727291e21SDavid Hildenbrand /* enforce guest PER */ 147827291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 147927291e21SDavid Hildenbrand 148027291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 148127291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 148227291e21SDavid Hildenbrand } else { 148327291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 148427291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 148527291e21SDavid Hildenbrand } 148627291e21SDavid Hildenbrand 148727291e21SDavid Hildenbrand if (rc) { 148827291e21SDavid Hildenbrand vcpu->guest_debug = 0; 148927291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 149027291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 149127291e21SDavid Hildenbrand } 149227291e21SDavid Hildenbrand 149327291e21SDavid Hildenbrand return rc; 1494b0c632dbSHeiko Carstens } 1495b0c632dbSHeiko Carstens 149662d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 149762d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 149862d9f0dbSMarcelo Tosatti { 14996352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 15006352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 15016352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 150262d9f0dbSMarcelo Tosatti } 150362d9f0dbSMarcelo Tosatti 150462d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 150562d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 150662d9f0dbSMarcelo Tosatti { 15076352e4d2SDavid Hildenbrand int rc = 0; 15086352e4d2SDavid Hildenbrand 15096352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 15106352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 15116352e4d2SDavid Hildenbrand 15126352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 15136352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 15146352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 15156352e4d2SDavid Hildenbrand break; 15166352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 15176352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 15186352e4d2SDavid Hildenbrand break; 15196352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 15206352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 15216352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 15226352e4d2SDavid Hildenbrand default: 15236352e4d2SDavid Hildenbrand rc = -ENXIO; 15246352e4d2SDavid Hildenbrand } 15256352e4d2SDavid Hildenbrand 15266352e4d2SDavid Hildenbrand return rc; 152762d9f0dbSMarcelo Tosatti } 152862d9f0dbSMarcelo Tosatti 1529b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1530b31605c1SDominik Dingel { 1531b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1532b31605c1SDominik Dingel return false; 1533b31605c1SDominik Dingel /* only enable for z10 and later */ 1534b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1535b31605c1SDominik Dingel return false; 1536b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1537b31605c1SDominik Dingel return false; 1538b31605c1SDominik Dingel return true; 1539b31605c1SDominik Dingel } 1540b31605c1SDominik Dingel 15418ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 15428ad35755SDavid Hildenbrand { 15438ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 15448ad35755SDavid Hildenbrand } 15458ad35755SDavid Hildenbrand 15462c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 15472c70fe44SChristian Borntraeger { 15488ad35755SDavid Hildenbrand retry: 15498ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 15502c70fe44SChristian Borntraeger /* 15512c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 15522c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 15532c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 15542c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 15552c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 15562c70fe44SChristian Borntraeger */ 15578ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 15582c70fe44SChristian Borntraeger int rc; 15592c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1560fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 15612c70fe44SChristian Borntraeger PAGE_SIZE * 2); 15622c70fe44SChristian Borntraeger if (rc) 15632c70fe44SChristian Borntraeger return rc; 15648ad35755SDavid Hildenbrand goto retry; 15652c70fe44SChristian Borntraeger } 15668ad35755SDavid Hildenbrand 1567d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1568d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1569d3d692c8SDavid Hildenbrand goto retry; 1570d3d692c8SDavid Hildenbrand } 1571d3d692c8SDavid Hildenbrand 15728ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 15738ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 15748ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 15758ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 15768ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 15778ad35755SDavid Hildenbrand } 15788ad35755SDavid Hildenbrand goto retry; 15798ad35755SDavid Hildenbrand } 15808ad35755SDavid Hildenbrand 15818ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 15828ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 15838ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 15848ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 15858ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 15868ad35755SDavid Hildenbrand } 15878ad35755SDavid Hildenbrand goto retry; 15888ad35755SDavid Hildenbrand } 15898ad35755SDavid Hildenbrand 15900759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 15910759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 15920759d068SDavid Hildenbrand 15932c70fe44SChristian Borntraeger return 0; 15942c70fe44SChristian Borntraeger } 15952c70fe44SChristian Borntraeger 1596fa576c58SThomas Huth /** 1597fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1598fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1599fa576c58SThomas Huth * @gpa: Guest physical address 1600fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1601fa576c58SThomas Huth * 1602fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1603fa576c58SThomas Huth * 1604fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1605fa576c58SThomas Huth */ 1606fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 160724eb3a82SDominik Dingel { 1608527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1609527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 161024eb3a82SDominik Dingel } 161124eb3a82SDominik Dingel 16123c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 16133c038e6bSDominik Dingel unsigned long token) 16143c038e6bSDominik Dingel { 16153c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1616383d0b05SJens Freimann struct kvm_s390_irq irq; 16173c038e6bSDominik Dingel 16183c038e6bSDominik Dingel if (start_token) { 1619383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1620383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1621383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 16223c038e6bSDominik Dingel } else { 16233c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1624383d0b05SJens Freimann inti.parm64 = token; 16253c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 16263c038e6bSDominik Dingel } 16273c038e6bSDominik Dingel } 16283c038e6bSDominik Dingel 16293c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 16303c038e6bSDominik Dingel struct kvm_async_pf *work) 16313c038e6bSDominik Dingel { 16323c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 16333c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 16343c038e6bSDominik Dingel } 16353c038e6bSDominik Dingel 16363c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 16373c038e6bSDominik Dingel struct kvm_async_pf *work) 16383c038e6bSDominik Dingel { 16393c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 16403c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 16413c038e6bSDominik Dingel } 16423c038e6bSDominik Dingel 16433c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 16443c038e6bSDominik Dingel struct kvm_async_pf *work) 16453c038e6bSDominik Dingel { 16463c038e6bSDominik Dingel /* s390 will always inject the page directly */ 16473c038e6bSDominik Dingel } 16483c038e6bSDominik Dingel 16493c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 16503c038e6bSDominik Dingel { 16513c038e6bSDominik Dingel /* 16523c038e6bSDominik Dingel * s390 will always inject the page directly, 16533c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 16543c038e6bSDominik Dingel */ 16553c038e6bSDominik Dingel return true; 16563c038e6bSDominik Dingel } 16573c038e6bSDominik Dingel 16583c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 16593c038e6bSDominik Dingel { 16603c038e6bSDominik Dingel hva_t hva; 16613c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 16623c038e6bSDominik Dingel int rc; 16633c038e6bSDominik Dingel 16643c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 16653c038e6bSDominik Dingel return 0; 16663c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 16673c038e6bSDominik Dingel vcpu->arch.pfault_compare) 16683c038e6bSDominik Dingel return 0; 16693c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 16703c038e6bSDominik Dingel return 0; 16719a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 16723c038e6bSDominik Dingel return 0; 16733c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 16743c038e6bSDominik Dingel return 0; 16753c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 16763c038e6bSDominik Dingel return 0; 16773c038e6bSDominik Dingel 167881480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 167981480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 168081480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 16813c038e6bSDominik Dingel return 0; 16823c038e6bSDominik Dingel 16833c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 16843c038e6bSDominik Dingel return rc; 16853c038e6bSDominik Dingel } 16863c038e6bSDominik Dingel 16873fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1688b0c632dbSHeiko Carstens { 16893fb4c40fSThomas Huth int rc, cpuflags; 1690e168bf8dSCarsten Otte 16913c038e6bSDominik Dingel /* 16923c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 16933c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 16943c038e6bSDominik Dingel * handled outside the worker. 16953c038e6bSDominik Dingel */ 16963c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 16973c038e6bSDominik Dingel 16985a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1699b0c632dbSHeiko Carstens 1700b0c632dbSHeiko Carstens if (need_resched()) 1701b0c632dbSHeiko Carstens schedule(); 1702b0c632dbSHeiko Carstens 1703d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 170471cde587SChristian Borntraeger s390_handle_mcck(); 170571cde587SChristian Borntraeger 170679395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 170779395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 170879395031SJens Freimann if (rc) 170979395031SJens Freimann return rc; 171079395031SJens Freimann } 17110ff31867SCarsten Otte 17122c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 17132c70fe44SChristian Borntraeger if (rc) 17142c70fe44SChristian Borntraeger return rc; 17152c70fe44SChristian Borntraeger 171627291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 171727291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 171827291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 171927291e21SDavid Hildenbrand } 172027291e21SDavid Hildenbrand 1721b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 17223fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 17233fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 17243fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 17252b29a9fdSDominik Dingel 17263fb4c40fSThomas Huth return 0; 17273fb4c40fSThomas Huth } 17283fb4c40fSThomas Huth 17293fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 17303fb4c40fSThomas Huth { 173124eb3a82SDominik Dingel int rc = -1; 17322b29a9fdSDominik Dingel 17332b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 17342b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 17352b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 17362b29a9fdSDominik Dingel 173727291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 173827291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 173927291e21SDavid Hildenbrand 17403fb4c40fSThomas Huth if (exit_reason >= 0) { 17417c470539SMartin Schwidefsky rc = 0; 1742210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1743210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1744210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1745210b1607SThomas Huth current->thread.gmap_addr; 1746210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1747210b1607SThomas Huth rc = -EREMOTE; 174824eb3a82SDominik Dingel 174924eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 17503c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 175124eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1752fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 175324eb3a82SDominik Dingel rc = 0; 1754fa576c58SThomas Huth } else { 1755fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1756fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1757fa576c58SThomas Huth } 175824eb3a82SDominik Dingel } 175924eb3a82SDominik Dingel 176024eb3a82SDominik Dingel if (rc == -1) { 1761699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1762699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1763699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 17641f0d0f09SCarsten Otte } 1765b0c632dbSHeiko Carstens 17665a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 17673fb4c40fSThomas Huth 1768a76ccff6SThomas Huth if (rc == 0) { 1769a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 17702955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 17712955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1772a76ccff6SThomas Huth else 1773a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1774a76ccff6SThomas Huth } 1775a76ccff6SThomas Huth 17763fb4c40fSThomas Huth return rc; 17773fb4c40fSThomas Huth } 17783fb4c40fSThomas Huth 17793fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 17803fb4c40fSThomas Huth { 17813fb4c40fSThomas Huth int rc, exit_reason; 17823fb4c40fSThomas Huth 1783800c1065SThomas Huth /* 1784800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1785800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1786800c1065SThomas Huth */ 1787800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1788800c1065SThomas Huth 1789a76ccff6SThomas Huth do { 17903fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 17913fb4c40fSThomas Huth if (rc) 1792a76ccff6SThomas Huth break; 17933fb4c40fSThomas Huth 1794800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 17953fb4c40fSThomas Huth /* 1796a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1797a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 17983fb4c40fSThomas Huth */ 17993fb4c40fSThomas Huth preempt_disable(); 18003fb4c40fSThomas Huth kvm_guest_enter(); 18013fb4c40fSThomas Huth preempt_enable(); 1802a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1803a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 18043fb4c40fSThomas Huth kvm_guest_exit(); 1805800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 18063fb4c40fSThomas Huth 18073fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 180827291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 18093fb4c40fSThomas Huth 1810800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1811e168bf8dSCarsten Otte return rc; 1812b0c632dbSHeiko Carstens } 1813b0c632dbSHeiko Carstens 1814b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1815b028ee3eSDavid Hildenbrand { 1816b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1817b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1818b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1819b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1820b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1821b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1822d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1823d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1824b028ee3eSDavid Hildenbrand } 1825b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1826b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1827b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1828b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1829b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1830b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1831b028ee3eSDavid Hildenbrand } 1832b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1833b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1834b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1835b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 18369fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 18379fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1838b028ee3eSDavid Hildenbrand } 1839b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1840b028ee3eSDavid Hildenbrand } 1841b028ee3eSDavid Hildenbrand 1842b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1843b028ee3eSDavid Hildenbrand { 1844b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1845b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1846b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1847b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1848b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1849b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1850b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1851b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1852b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1853b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1854b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1855b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1856b028ee3eSDavid Hildenbrand } 1857b028ee3eSDavid Hildenbrand 1858b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1859b0c632dbSHeiko Carstens { 18608f2abe6aSChristian Borntraeger int rc; 1861b0c632dbSHeiko Carstens sigset_t sigsaved; 1862b0c632dbSHeiko Carstens 186327291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 186427291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 186527291e21SDavid Hildenbrand return 0; 186627291e21SDavid Hildenbrand } 186727291e21SDavid Hildenbrand 1868b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1869b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1870b0c632dbSHeiko Carstens 18716352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 18726852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 18736352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 18746352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 18756352e4d2SDavid Hildenbrand vcpu->vcpu_id); 18766352e4d2SDavid Hildenbrand return -EINVAL; 18776352e4d2SDavid Hildenbrand } 1878b0c632dbSHeiko Carstens 1879b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1880d7b0b5ebSCarsten Otte 1881dab4079dSHeiko Carstens might_fault(); 1882e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 18839ace903dSChristian Ehrhardt 1884b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1885b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 18868f2abe6aSChristian Borntraeger rc = -EINTR; 1887b1d16c49SChristian Ehrhardt } 18888f2abe6aSChristian Borntraeger 188927291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 189027291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 189127291e21SDavid Hildenbrand rc = 0; 189227291e21SDavid Hildenbrand } 189327291e21SDavid Hildenbrand 1894b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 18958f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 18968f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 18978f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 18988f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 18998f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 19008f2abe6aSChristian Borntraeger rc = 0; 19018f2abe6aSChristian Borntraeger } 19028f2abe6aSChristian Borntraeger 19038f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 19048f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 19058f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 19068f2abe6aSChristian Borntraeger rc = 0; 19078f2abe6aSChristian Borntraeger } 19088f2abe6aSChristian Borntraeger 1909b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1910d7b0b5ebSCarsten Otte 1911b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1912b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1913b0c632dbSHeiko Carstens 1914b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 19157e8e6ab4SHeiko Carstens return rc; 1916b0c632dbSHeiko Carstens } 1917b0c632dbSHeiko Carstens 1918b0c632dbSHeiko Carstens /* 1919b0c632dbSHeiko Carstens * store status at address 1920b0c632dbSHeiko Carstens * we use have two special cases: 1921b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1922b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1923b0c632dbSHeiko Carstens */ 1924d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1925b0c632dbSHeiko Carstens { 1926092670cdSCarsten Otte unsigned char archmode = 1; 1927fda902cbSMichael Mueller unsigned int px; 1928178bd789SThomas Huth u64 clkcomp; 1929d0bce605SHeiko Carstens int rc; 1930b0c632dbSHeiko Carstens 1931d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1932d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1933b0c632dbSHeiko Carstens return -EFAULT; 1934d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1935d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1936d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1937b0c632dbSHeiko Carstens return -EFAULT; 1938d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1939d0bce605SHeiko Carstens } 1940d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1941d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1942d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1943d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1944d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1945d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1946fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1947d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1948fda902cbSMichael Mueller &px, 4); 1949d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1950d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1951d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1952d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1953d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1954d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1955d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1956178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1957d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1958d0bce605SHeiko Carstens &clkcomp, 8); 1959d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1960d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1961d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1962d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1963d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1964b0c632dbSHeiko Carstens } 1965b0c632dbSHeiko Carstens 1966e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1967e879892cSThomas Huth { 1968e879892cSThomas Huth /* 1969e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1970e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1971e879892cSThomas Huth * it into the save area 1972e879892cSThomas Huth */ 1973e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1974e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1975e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1976e879892cSThomas Huth 1977e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1978e879892cSThomas Huth } 1979e879892cSThomas Huth 19808ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 19818ad35755SDavid Hildenbrand { 19828ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 19838ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 19848ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 19858ad35755SDavid Hildenbrand } 19868ad35755SDavid Hildenbrand 19878ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 19888ad35755SDavid Hildenbrand { 19898ad35755SDavid Hildenbrand unsigned int i; 19908ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 19918ad35755SDavid Hildenbrand 19928ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 19938ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 19948ad35755SDavid Hildenbrand } 19958ad35755SDavid Hildenbrand } 19968ad35755SDavid Hildenbrand 19978ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 19988ad35755SDavid Hildenbrand { 19998ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 20008ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 20018ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 20028ad35755SDavid Hildenbrand } 20038ad35755SDavid Hildenbrand 20046852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 20056852d7b6SDavid Hildenbrand { 20068ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 20078ad35755SDavid Hildenbrand 20088ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 20098ad35755SDavid Hildenbrand return; 20108ad35755SDavid Hildenbrand 20116852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 20128ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2013433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 20148ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 20158ad35755SDavid Hildenbrand 20168ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 20178ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 20188ad35755SDavid Hildenbrand started_vcpus++; 20198ad35755SDavid Hildenbrand } 20208ad35755SDavid Hildenbrand 20218ad35755SDavid Hildenbrand if (started_vcpus == 0) { 20228ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 20238ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 20248ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 20258ad35755SDavid Hildenbrand /* 20268ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 20278ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 20288ad35755SDavid Hildenbrand * oustanding ENABLE requests. 20298ad35755SDavid Hildenbrand */ 20308ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 20318ad35755SDavid Hildenbrand } 20328ad35755SDavid Hildenbrand 20336852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 20348ad35755SDavid Hildenbrand /* 20358ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 20368ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 20378ad35755SDavid Hildenbrand */ 2038d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2039433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 20408ad35755SDavid Hildenbrand return; 20416852d7b6SDavid Hildenbrand } 20426852d7b6SDavid Hildenbrand 20436852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 20446852d7b6SDavid Hildenbrand { 20458ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 20468ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 20478ad35755SDavid Hildenbrand 20488ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 20498ad35755SDavid Hildenbrand return; 20508ad35755SDavid Hildenbrand 20516852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 20528ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2053433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 20548ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 20558ad35755SDavid Hildenbrand 205632f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 20576cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 205832f5ff63SDavid Hildenbrand 20596cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 20608ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 20618ad35755SDavid Hildenbrand 20628ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 20638ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 20648ad35755SDavid Hildenbrand started_vcpus++; 20658ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 20668ad35755SDavid Hildenbrand } 20678ad35755SDavid Hildenbrand } 20688ad35755SDavid Hildenbrand 20698ad35755SDavid Hildenbrand if (started_vcpus == 1) { 20708ad35755SDavid Hildenbrand /* 20718ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 20728ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 20738ad35755SDavid Hildenbrand */ 20748ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 20758ad35755SDavid Hildenbrand } 20768ad35755SDavid Hildenbrand 2077433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 20788ad35755SDavid Hildenbrand return; 20796852d7b6SDavid Hildenbrand } 20806852d7b6SDavid Hildenbrand 2081d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 2082d6712df9SCornelia Huck struct kvm_enable_cap *cap) 2083d6712df9SCornelia Huck { 2084d6712df9SCornelia Huck int r; 2085d6712df9SCornelia Huck 2086d6712df9SCornelia Huck if (cap->flags) 2087d6712df9SCornelia Huck return -EINVAL; 2088d6712df9SCornelia Huck 2089d6712df9SCornelia Huck switch (cap->cap) { 2090fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 2091fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 2092fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 2093fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 2094fa6b7fe9SCornelia Huck } 2095fa6b7fe9SCornelia Huck r = 0; 2096fa6b7fe9SCornelia Huck break; 2097d6712df9SCornelia Huck default: 2098d6712df9SCornelia Huck r = -EINVAL; 2099d6712df9SCornelia Huck break; 2100d6712df9SCornelia Huck } 2101d6712df9SCornelia Huck return r; 2102d6712df9SCornelia Huck } 2103d6712df9SCornelia Huck 2104b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 2105b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 2106b0c632dbSHeiko Carstens { 2107b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 2108b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 2109800c1065SThomas Huth int idx; 2110bc923cc9SAvi Kivity long r; 2111b0c632dbSHeiko Carstens 211293736624SAvi Kivity switch (ioctl) { 211393736624SAvi Kivity case KVM_S390_INTERRUPT: { 2114ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 2115383d0b05SJens Freimann struct kvm_s390_irq s390irq; 2116ba5c1e9bSCarsten Otte 211793736624SAvi Kivity r = -EFAULT; 2118ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 211993736624SAvi Kivity break; 2120383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 2121383d0b05SJens Freimann return -EINVAL; 2122383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 212393736624SAvi Kivity break; 2124ba5c1e9bSCarsten Otte } 2125b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 2126800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 2127bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 2128800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 2129bc923cc9SAvi Kivity break; 2130b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 2131b0c632dbSHeiko Carstens psw_t psw; 2132b0c632dbSHeiko Carstens 2133bc923cc9SAvi Kivity r = -EFAULT; 2134b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 2135bc923cc9SAvi Kivity break; 2136bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 2137bc923cc9SAvi Kivity break; 2138b0c632dbSHeiko Carstens } 2139b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 2140bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 2141bc923cc9SAvi Kivity break; 214214eebd91SCarsten Otte case KVM_SET_ONE_REG: 214314eebd91SCarsten Otte case KVM_GET_ONE_REG: { 214414eebd91SCarsten Otte struct kvm_one_reg reg; 214514eebd91SCarsten Otte r = -EFAULT; 214614eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 214714eebd91SCarsten Otte break; 214814eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 214914eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 215014eebd91SCarsten Otte else 215114eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 215214eebd91SCarsten Otte break; 215314eebd91SCarsten Otte } 215427e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 215527e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 215627e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 215727e0393fSCarsten Otte 215827e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 215927e0393fSCarsten Otte r = -EFAULT; 216027e0393fSCarsten Otte break; 216127e0393fSCarsten Otte } 216227e0393fSCarsten Otte 216327e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 216427e0393fSCarsten Otte r = -EINVAL; 216527e0393fSCarsten Otte break; 216627e0393fSCarsten Otte } 216727e0393fSCarsten Otte 216827e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 216927e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 217027e0393fSCarsten Otte break; 217127e0393fSCarsten Otte } 217227e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 217327e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 217427e0393fSCarsten Otte 217527e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 217627e0393fSCarsten Otte r = -EFAULT; 217727e0393fSCarsten Otte break; 217827e0393fSCarsten Otte } 217927e0393fSCarsten Otte 218027e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 218127e0393fSCarsten Otte r = -EINVAL; 218227e0393fSCarsten Otte break; 218327e0393fSCarsten Otte } 218427e0393fSCarsten Otte 218527e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 218627e0393fSCarsten Otte ucasmap.length); 218727e0393fSCarsten Otte break; 218827e0393fSCarsten Otte } 218927e0393fSCarsten Otte #endif 2190ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 2191527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 2192ccc7910fSCarsten Otte break; 2193ccc7910fSCarsten Otte } 2194d6712df9SCornelia Huck case KVM_ENABLE_CAP: 2195d6712df9SCornelia Huck { 2196d6712df9SCornelia Huck struct kvm_enable_cap cap; 2197d6712df9SCornelia Huck r = -EFAULT; 2198d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 2199d6712df9SCornelia Huck break; 2200d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2201d6712df9SCornelia Huck break; 2202d6712df9SCornelia Huck } 2203b0c632dbSHeiko Carstens default: 22043e6afcf1SCarsten Otte r = -ENOTTY; 2205b0c632dbSHeiko Carstens } 2206bc923cc9SAvi Kivity return r; 2207b0c632dbSHeiko Carstens } 2208b0c632dbSHeiko Carstens 22095b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 22105b1c1493SCarsten Otte { 22115b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 22125b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 22135b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 22145b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 22155b1c1493SCarsten Otte get_page(vmf->page); 22165b1c1493SCarsten Otte return 0; 22175b1c1493SCarsten Otte } 22185b1c1493SCarsten Otte #endif 22195b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 22205b1c1493SCarsten Otte } 22215b1c1493SCarsten Otte 22225587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 22235587027cSAneesh Kumar K.V unsigned long npages) 2224db3fe4ebSTakuya Yoshikawa { 2225db3fe4ebSTakuya Yoshikawa return 0; 2226db3fe4ebSTakuya Yoshikawa } 2227db3fe4ebSTakuya Yoshikawa 2228b0c632dbSHeiko Carstens /* Section: memory related */ 2229f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 2230f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 22317b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 22327b6195a9STakuya Yoshikawa enum kvm_mr_change change) 2233b0c632dbSHeiko Carstens { 2234dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 2235dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 2236dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 2237dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 2238b0c632dbSHeiko Carstens 2239598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 2240b0c632dbSHeiko Carstens return -EINVAL; 2241b0c632dbSHeiko Carstens 2242598841caSCarsten Otte if (mem->memory_size & 0xffffful) 2243b0c632dbSHeiko Carstens return -EINVAL; 2244b0c632dbSHeiko Carstens 2245f7784b8eSMarcelo Tosatti return 0; 2246f7784b8eSMarcelo Tosatti } 2247f7784b8eSMarcelo Tosatti 2248f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 2249f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 22508482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 22518482644aSTakuya Yoshikawa enum kvm_mr_change change) 2252f7784b8eSMarcelo Tosatti { 2253f7850c92SCarsten Otte int rc; 2254f7784b8eSMarcelo Tosatti 22552cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 22562cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 22572cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 22582cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 22592cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 22602cef4debSChristian Borntraeger */ 22612cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 22622cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 22632cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 22642cef4debSChristian Borntraeger return; 2265598841caSCarsten Otte 2266598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2267598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 2268598841caSCarsten Otte if (rc) 2269f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2270598841caSCarsten Otte return; 2271b0c632dbSHeiko Carstens } 2272b0c632dbSHeiko Carstens 2273b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 2274b0c632dbSHeiko Carstens { 22759d8d5786SMichael Mueller return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2276b0c632dbSHeiko Carstens } 2277b0c632dbSHeiko Carstens 2278b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 2279b0c632dbSHeiko Carstens { 2280b0c632dbSHeiko Carstens kvm_exit(); 2281b0c632dbSHeiko Carstens } 2282b0c632dbSHeiko Carstens 2283b0c632dbSHeiko Carstens module_init(kvm_s390_init); 2284b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 2285566af940SCornelia Huck 2286566af940SCornelia Huck /* 2287566af940SCornelia Huck * Enable autoloading of the kvm module. 2288566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 2289566af940SCornelia Huck * since x86 takes a different approach. 2290566af940SCornelia Huck */ 2291566af940SCornelia Huck #include <linux/miscdevice.h> 2292566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 2293566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 2294