1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25a374e892STony Krowiak #include <linux/random.h> 26b0c632dbSHeiko Carstens #include <linux/slab.h> 27ba5c1e9bSCarsten Otte #include <linux/timer.h> 28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 29b0c632dbSHeiko Carstens #include <asm/lowcore.h> 30b0c632dbSHeiko Carstens #include <asm/pgtable.h> 31f5daba1dSHeiko Carstens #include <asm/nmi.h> 32a0616cdeSDavid Howells #include <asm/switch_to.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f7819512SPaolo Bonzini { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 54ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 55f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 56ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 58aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 59ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 607697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 61ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 63ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 66ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 67ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6869d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 69453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 70453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 71453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 72453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 73453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 748a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 75453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 76453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 77b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 78453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 79453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 80bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 815288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 82bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 837697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8542cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8642cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 875288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8842cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 8942cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 905288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 925288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9342cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9542cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 96388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 97e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9841628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 99b0c632dbSHeiko Carstens { NULL } 100b0c632dbSHeiko Carstens }; 101b0c632dbSHeiko Carstens 102*9d8d5786SMichael Mueller /* upper facilities limit for kvm */ 103*9d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = { 104*9d8d5786SMichael Mueller 0xff82fffbf4fc2000UL, 105*9d8d5786SMichael Mueller 0x005c000000000000UL, 106*9d8d5786SMichael Mueller }; 107b0c632dbSHeiko Carstens 108*9d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void) 10978c4b59fSMichael Mueller { 110*9d8d5786SMichael Mueller BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); 111*9d8d5786SMichael Mueller return ARRAY_SIZE(kvm_s390_fac_list_mask); 11278c4b59fSMichael Mueller } 11378c4b59fSMichael Mueller 114*9d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier; 115*9d8d5786SMichael Mueller 116b0c632dbSHeiko Carstens /* Section: not file related */ 11713a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 118b0c632dbSHeiko Carstens { 119b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 12010474ae8SAlexander Graf return 0; 121b0c632dbSHeiko Carstens } 122b0c632dbSHeiko Carstens 1232c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1242c70fe44SChristian Borntraeger 125b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 126b0c632dbSHeiko Carstens { 1272c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1282c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 129b0c632dbSHeiko Carstens return 0; 130b0c632dbSHeiko Carstens } 131b0c632dbSHeiko Carstens 132b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 133b0c632dbSHeiko Carstens { 1342c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 135b0c632dbSHeiko Carstens } 136b0c632dbSHeiko Carstens 137b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 138b0c632dbSHeiko Carstens { 13984877d93SCornelia Huck /* Register floating interrupt controller interface. */ 14084877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 141b0c632dbSHeiko Carstens } 142b0c632dbSHeiko Carstens 143b0c632dbSHeiko Carstens /* Section: device related */ 144b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 145b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 146b0c632dbSHeiko Carstens { 147b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 148b0c632dbSHeiko Carstens return s390_enable_sie(); 149b0c632dbSHeiko Carstens return -EINVAL; 150b0c632dbSHeiko Carstens } 151b0c632dbSHeiko Carstens 152784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 153b0c632dbSHeiko Carstens { 154d7b0b5ebSCarsten Otte int r; 155d7b0b5ebSCarsten Otte 1562bd0ac4eSCarsten Otte switch (ext) { 157d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 158b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15952e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1601efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1611efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1621efd0f59SCarsten Otte #endif 1633c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16460b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16514eebd91SCarsten Otte case KVM_CAP_ONE_REG: 166d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 167fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 168ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16910ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 170c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 171d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 17278599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 173f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1746352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 1752444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 176d7b0b5ebSCarsten Otte r = 1; 177d7b0b5ebSCarsten Otte break; 178e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 179e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 180e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 181e726b1bdSChristian Borntraeger break; 182e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 183e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 184e1e2e605SNick Wang break; 1851526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 186abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1871526bf9cSChristian Borntraeger break; 1882bd0ac4eSCarsten Otte default: 189d7b0b5ebSCarsten Otte r = 0; 190b0c632dbSHeiko Carstens } 191d7b0b5ebSCarsten Otte return r; 1922bd0ac4eSCarsten Otte } 193b0c632dbSHeiko Carstens 19415f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 19515f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 19615f36ebdSJason J. Herne { 19715f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19815f36ebdSJason J. Herne unsigned long address; 19915f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 20015f36ebdSJason J. Herne 20115f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 20215f36ebdSJason J. Herne /* Loop over all guest pages */ 20315f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 20415f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 20515f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 20615f36ebdSJason J. Herne 20715f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20815f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20915f36ebdSJason J. Herne } 21015f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 21115f36ebdSJason J. Herne } 21215f36ebdSJason J. Herne 213b0c632dbSHeiko Carstens /* Section: vm related */ 214b0c632dbSHeiko Carstens /* 215b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 216b0c632dbSHeiko Carstens */ 217b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 218b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 219b0c632dbSHeiko Carstens { 22015f36ebdSJason J. Herne int r; 22115f36ebdSJason J. Herne unsigned long n; 22215f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 22315f36ebdSJason J. Herne int is_dirty = 0; 22415f36ebdSJason J. Herne 22515f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 22615f36ebdSJason J. Herne 22715f36ebdSJason J. Herne r = -EINVAL; 22815f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22915f36ebdSJason J. Herne goto out; 23015f36ebdSJason J. Herne 23115f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 23215f36ebdSJason J. Herne r = -ENOENT; 23315f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 23415f36ebdSJason J. Herne goto out; 23515f36ebdSJason J. Herne 23615f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23715f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23815f36ebdSJason J. Herne if (r) 23915f36ebdSJason J. Herne goto out; 24015f36ebdSJason J. Herne 24115f36ebdSJason J. Herne /* Clear the dirty log */ 24215f36ebdSJason J. Herne if (is_dirty) { 24315f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 24415f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 24515f36ebdSJason J. Herne } 24615f36ebdSJason J. Herne r = 0; 24715f36ebdSJason J. Herne out: 24815f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24915f36ebdSJason J. Herne return r; 250b0c632dbSHeiko Carstens } 251b0c632dbSHeiko Carstens 252d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 253d938dc55SCornelia Huck { 254d938dc55SCornelia Huck int r; 255d938dc55SCornelia Huck 256d938dc55SCornelia Huck if (cap->flags) 257d938dc55SCornelia Huck return -EINVAL; 258d938dc55SCornelia Huck 259d938dc55SCornelia Huck switch (cap->cap) { 26084223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 26184223598SCornelia Huck kvm->arch.use_irqchip = 1; 26284223598SCornelia Huck r = 0; 26384223598SCornelia Huck break; 2642444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 2652444b352SDavid Hildenbrand kvm->arch.user_sigp = 1; 2662444b352SDavid Hildenbrand r = 0; 2672444b352SDavid Hildenbrand break; 268d938dc55SCornelia Huck default: 269d938dc55SCornelia Huck r = -EINVAL; 270d938dc55SCornelia Huck break; 271d938dc55SCornelia Huck } 272d938dc55SCornelia Huck return r; 273d938dc55SCornelia Huck } 274d938dc55SCornelia Huck 2758c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2768c0a7ce6SDominik Dingel { 2778c0a7ce6SDominik Dingel int ret; 2788c0a7ce6SDominik Dingel 2798c0a7ce6SDominik Dingel switch (attr->attr) { 2808c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2818c0a7ce6SDominik Dingel ret = 0; 2828c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2838c0a7ce6SDominik Dingel ret = -EFAULT; 2848c0a7ce6SDominik Dingel break; 2858c0a7ce6SDominik Dingel default: 2868c0a7ce6SDominik Dingel ret = -ENXIO; 2878c0a7ce6SDominik Dingel break; 2888c0a7ce6SDominik Dingel } 2898c0a7ce6SDominik Dingel return ret; 2908c0a7ce6SDominik Dingel } 2918c0a7ce6SDominik Dingel 2928c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2934f718eabSDominik Dingel { 2944f718eabSDominik Dingel int ret; 2954f718eabSDominik Dingel unsigned int idx; 2964f718eabSDominik Dingel switch (attr->attr) { 2974f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2984f718eabSDominik Dingel ret = -EBUSY; 2994f718eabSDominik Dingel mutex_lock(&kvm->lock); 3004f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3014f718eabSDominik Dingel kvm->arch.use_cmma = 1; 3024f718eabSDominik Dingel ret = 0; 3034f718eabSDominik Dingel } 3044f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3054f718eabSDominik Dingel break; 3064f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3074f718eabSDominik Dingel mutex_lock(&kvm->lock); 3084f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 309a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 3104f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3114f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3124f718eabSDominik Dingel ret = 0; 3134f718eabSDominik Dingel break; 3148c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3158c0a7ce6SDominik Dingel unsigned long new_limit; 3168c0a7ce6SDominik Dingel 3178c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3188c0a7ce6SDominik Dingel return -EINVAL; 3198c0a7ce6SDominik Dingel 3208c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3218c0a7ce6SDominik Dingel return -EFAULT; 3228c0a7ce6SDominik Dingel 3238c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3248c0a7ce6SDominik Dingel return -E2BIG; 3258c0a7ce6SDominik Dingel 3268c0a7ce6SDominik Dingel ret = -EBUSY; 3278c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3288c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3298c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3308c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3318c0a7ce6SDominik Dingel 3328c0a7ce6SDominik Dingel if (!new) { 3338c0a7ce6SDominik Dingel ret = -ENOMEM; 3348c0a7ce6SDominik Dingel } else { 3358c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3368c0a7ce6SDominik Dingel new->private = kvm; 3378c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3388c0a7ce6SDominik Dingel ret = 0; 3398c0a7ce6SDominik Dingel } 3408c0a7ce6SDominik Dingel } 3418c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3428c0a7ce6SDominik Dingel break; 3438c0a7ce6SDominik Dingel } 3444f718eabSDominik Dingel default: 3454f718eabSDominik Dingel ret = -ENXIO; 3464f718eabSDominik Dingel break; 3474f718eabSDominik Dingel } 3484f718eabSDominik Dingel return ret; 3494f718eabSDominik Dingel } 3504f718eabSDominik Dingel 351a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 352a374e892STony Krowiak 353a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 354a374e892STony Krowiak { 355a374e892STony Krowiak struct kvm_vcpu *vcpu; 356a374e892STony Krowiak int i; 357a374e892STony Krowiak 358*9d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 359a374e892STony Krowiak return -EINVAL; 360a374e892STony Krowiak 361a374e892STony Krowiak mutex_lock(&kvm->lock); 362a374e892STony Krowiak switch (attr->attr) { 363a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 364a374e892STony Krowiak get_random_bytes( 365a374e892STony Krowiak kvm->arch.crypto.crycb->aes_wrapping_key_mask, 366a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 367a374e892STony Krowiak kvm->arch.crypto.aes_kw = 1; 368a374e892STony Krowiak break; 369a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 370a374e892STony Krowiak get_random_bytes( 371a374e892STony Krowiak kvm->arch.crypto.crycb->dea_wrapping_key_mask, 372a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 373a374e892STony Krowiak kvm->arch.crypto.dea_kw = 1; 374a374e892STony Krowiak break; 375a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 376a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 377a374e892STony Krowiak memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 378a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 379a374e892STony Krowiak break; 380a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 381a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 382a374e892STony Krowiak memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 383a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 384a374e892STony Krowiak break; 385a374e892STony Krowiak default: 386a374e892STony Krowiak mutex_unlock(&kvm->lock); 387a374e892STony Krowiak return -ENXIO; 388a374e892STony Krowiak } 389a374e892STony Krowiak 390a374e892STony Krowiak kvm_for_each_vcpu(i, vcpu, kvm) { 391a374e892STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 392a374e892STony Krowiak exit_sie(vcpu); 393a374e892STony Krowiak } 394a374e892STony Krowiak mutex_unlock(&kvm->lock); 395a374e892STony Krowiak return 0; 396a374e892STony Krowiak } 397a374e892STony Krowiak 39872f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 39972f25020SJason J. Herne { 40072f25020SJason J. Herne u8 gtod_high; 40172f25020SJason J. Herne 40272f25020SJason J. Herne if (copy_from_user(>od_high, (void __user *)attr->addr, 40372f25020SJason J. Herne sizeof(gtod_high))) 40472f25020SJason J. Herne return -EFAULT; 40572f25020SJason J. Herne 40672f25020SJason J. Herne if (gtod_high != 0) 40772f25020SJason J. Herne return -EINVAL; 40872f25020SJason J. Herne 40972f25020SJason J. Herne return 0; 41072f25020SJason J. Herne } 41172f25020SJason J. Herne 41272f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 41372f25020SJason J. Herne { 41472f25020SJason J. Herne struct kvm_vcpu *cur_vcpu; 41572f25020SJason J. Herne unsigned int vcpu_idx; 41672f25020SJason J. Herne u64 host_tod, gtod; 41772f25020SJason J. Herne int r; 41872f25020SJason J. Herne 41972f25020SJason J. Herne if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 42072f25020SJason J. Herne return -EFAULT; 42172f25020SJason J. Herne 42272f25020SJason J. Herne r = store_tod_clock(&host_tod); 42372f25020SJason J. Herne if (r) 42472f25020SJason J. Herne return r; 42572f25020SJason J. Herne 42672f25020SJason J. Herne mutex_lock(&kvm->lock); 42772f25020SJason J. Herne kvm->arch.epoch = gtod - host_tod; 42872f25020SJason J. Herne kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 42972f25020SJason J. Herne cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 43072f25020SJason J. Herne exit_sie(cur_vcpu); 43172f25020SJason J. Herne } 43272f25020SJason J. Herne mutex_unlock(&kvm->lock); 43372f25020SJason J. Herne return 0; 43472f25020SJason J. Herne } 43572f25020SJason J. Herne 43672f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 43772f25020SJason J. Herne { 43872f25020SJason J. Herne int ret; 43972f25020SJason J. Herne 44072f25020SJason J. Herne if (attr->flags) 44172f25020SJason J. Herne return -EINVAL; 44272f25020SJason J. Herne 44372f25020SJason J. Herne switch (attr->attr) { 44472f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 44572f25020SJason J. Herne ret = kvm_s390_set_tod_high(kvm, attr); 44672f25020SJason J. Herne break; 44772f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 44872f25020SJason J. Herne ret = kvm_s390_set_tod_low(kvm, attr); 44972f25020SJason J. Herne break; 45072f25020SJason J. Herne default: 45172f25020SJason J. Herne ret = -ENXIO; 45272f25020SJason J. Herne break; 45372f25020SJason J. Herne } 45472f25020SJason J. Herne return ret; 45572f25020SJason J. Herne } 45672f25020SJason J. Herne 45772f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 45872f25020SJason J. Herne { 45972f25020SJason J. Herne u8 gtod_high = 0; 46072f25020SJason J. Herne 46172f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od_high, 46272f25020SJason J. Herne sizeof(gtod_high))) 46372f25020SJason J. Herne return -EFAULT; 46472f25020SJason J. Herne 46572f25020SJason J. Herne return 0; 46672f25020SJason J. Herne } 46772f25020SJason J. Herne 46872f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 46972f25020SJason J. Herne { 47072f25020SJason J. Herne u64 host_tod, gtod; 47172f25020SJason J. Herne int r; 47272f25020SJason J. Herne 47372f25020SJason J. Herne r = store_tod_clock(&host_tod); 47472f25020SJason J. Herne if (r) 47572f25020SJason J. Herne return r; 47672f25020SJason J. Herne 47772f25020SJason J. Herne gtod = host_tod + kvm->arch.epoch; 47872f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 47972f25020SJason J. Herne return -EFAULT; 48072f25020SJason J. Herne 48172f25020SJason J. Herne return 0; 48272f25020SJason J. Herne } 48372f25020SJason J. Herne 48472f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 48572f25020SJason J. Herne { 48672f25020SJason J. Herne int ret; 48772f25020SJason J. Herne 48872f25020SJason J. Herne if (attr->flags) 48972f25020SJason J. Herne return -EINVAL; 49072f25020SJason J. Herne 49172f25020SJason J. Herne switch (attr->attr) { 49272f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 49372f25020SJason J. Herne ret = kvm_s390_get_tod_high(kvm, attr); 49472f25020SJason J. Herne break; 49572f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 49672f25020SJason J. Herne ret = kvm_s390_get_tod_low(kvm, attr); 49772f25020SJason J. Herne break; 49872f25020SJason J. Herne default: 49972f25020SJason J. Herne ret = -ENXIO; 50072f25020SJason J. Herne break; 50172f25020SJason J. Herne } 50272f25020SJason J. Herne return ret; 50372f25020SJason J. Herne } 50472f25020SJason J. Herne 505f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 506f2061656SDominik Dingel { 507f2061656SDominik Dingel int ret; 508f2061656SDominik Dingel 509f2061656SDominik Dingel switch (attr->group) { 5104f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 5118c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 5124f718eabSDominik Dingel break; 51372f25020SJason J. Herne case KVM_S390_VM_TOD: 51472f25020SJason J. Herne ret = kvm_s390_set_tod(kvm, attr); 51572f25020SJason J. Herne break; 516a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 517a374e892STony Krowiak ret = kvm_s390_vm_set_crypto(kvm, attr); 518a374e892STony Krowiak break; 519f2061656SDominik Dingel default: 520f2061656SDominik Dingel ret = -ENXIO; 521f2061656SDominik Dingel break; 522f2061656SDominik Dingel } 523f2061656SDominik Dingel 524f2061656SDominik Dingel return ret; 525f2061656SDominik Dingel } 526f2061656SDominik Dingel 527f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 528f2061656SDominik Dingel { 5298c0a7ce6SDominik Dingel int ret; 5308c0a7ce6SDominik Dingel 5318c0a7ce6SDominik Dingel switch (attr->group) { 5328c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 5338c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 5348c0a7ce6SDominik Dingel break; 53572f25020SJason J. Herne case KVM_S390_VM_TOD: 53672f25020SJason J. Herne ret = kvm_s390_get_tod(kvm, attr); 53772f25020SJason J. Herne break; 5388c0a7ce6SDominik Dingel default: 5398c0a7ce6SDominik Dingel ret = -ENXIO; 5408c0a7ce6SDominik Dingel break; 5418c0a7ce6SDominik Dingel } 5428c0a7ce6SDominik Dingel 5438c0a7ce6SDominik Dingel return ret; 544f2061656SDominik Dingel } 545f2061656SDominik Dingel 546f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 547f2061656SDominik Dingel { 548f2061656SDominik Dingel int ret; 549f2061656SDominik Dingel 550f2061656SDominik Dingel switch (attr->group) { 5514f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 5524f718eabSDominik Dingel switch (attr->attr) { 5534f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 5544f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 5558c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 5564f718eabSDominik Dingel ret = 0; 5574f718eabSDominik Dingel break; 5584f718eabSDominik Dingel default: 5594f718eabSDominik Dingel ret = -ENXIO; 5604f718eabSDominik Dingel break; 5614f718eabSDominik Dingel } 5624f718eabSDominik Dingel break; 56372f25020SJason J. Herne case KVM_S390_VM_TOD: 56472f25020SJason J. Herne switch (attr->attr) { 56572f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 56672f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 56772f25020SJason J. Herne ret = 0; 56872f25020SJason J. Herne break; 56972f25020SJason J. Herne default: 57072f25020SJason J. Herne ret = -ENXIO; 57172f25020SJason J. Herne break; 57272f25020SJason J. Herne } 57372f25020SJason J. Herne break; 574a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 575a374e892STony Krowiak switch (attr->attr) { 576a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 577a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 578a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 579a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 580a374e892STony Krowiak ret = 0; 581a374e892STony Krowiak break; 582a374e892STony Krowiak default: 583a374e892STony Krowiak ret = -ENXIO; 584a374e892STony Krowiak break; 585a374e892STony Krowiak } 586a374e892STony Krowiak break; 587f2061656SDominik Dingel default: 588f2061656SDominik Dingel ret = -ENXIO; 589f2061656SDominik Dingel break; 590f2061656SDominik Dingel } 591f2061656SDominik Dingel 592f2061656SDominik Dingel return ret; 593f2061656SDominik Dingel } 594f2061656SDominik Dingel 595b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 596b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 597b0c632dbSHeiko Carstens { 598b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 599b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 600f2061656SDominik Dingel struct kvm_device_attr attr; 601b0c632dbSHeiko Carstens int r; 602b0c632dbSHeiko Carstens 603b0c632dbSHeiko Carstens switch (ioctl) { 604ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 605ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 606ba5c1e9bSCarsten Otte 607ba5c1e9bSCarsten Otte r = -EFAULT; 608ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 609ba5c1e9bSCarsten Otte break; 610ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 611ba5c1e9bSCarsten Otte break; 612ba5c1e9bSCarsten Otte } 613d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 614d938dc55SCornelia Huck struct kvm_enable_cap cap; 615d938dc55SCornelia Huck r = -EFAULT; 616d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 617d938dc55SCornelia Huck break; 618d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 619d938dc55SCornelia Huck break; 620d938dc55SCornelia Huck } 62184223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 62284223598SCornelia Huck struct kvm_irq_routing_entry routing; 62384223598SCornelia Huck 62484223598SCornelia Huck r = -EINVAL; 62584223598SCornelia Huck if (kvm->arch.use_irqchip) { 62684223598SCornelia Huck /* Set up dummy routing. */ 62784223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 62884223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 62984223598SCornelia Huck r = 0; 63084223598SCornelia Huck } 63184223598SCornelia Huck break; 63284223598SCornelia Huck } 633f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 634f2061656SDominik Dingel r = -EFAULT; 635f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 636f2061656SDominik Dingel break; 637f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 638f2061656SDominik Dingel break; 639f2061656SDominik Dingel } 640f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 641f2061656SDominik Dingel r = -EFAULT; 642f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 643f2061656SDominik Dingel break; 644f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 645f2061656SDominik Dingel break; 646f2061656SDominik Dingel } 647f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 648f2061656SDominik Dingel r = -EFAULT; 649f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 650f2061656SDominik Dingel break; 651f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 652f2061656SDominik Dingel break; 653f2061656SDominik Dingel } 654b0c632dbSHeiko Carstens default: 655367e1319SAvi Kivity r = -ENOTTY; 656b0c632dbSHeiko Carstens } 657b0c632dbSHeiko Carstens 658b0c632dbSHeiko Carstens return r; 659b0c632dbSHeiko Carstens } 660b0c632dbSHeiko Carstens 66145c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config) 66245c9b47cSTony Krowiak { 66345c9b47cSTony Krowiak u32 fcn_code = 0x04000000UL; 66445c9b47cSTony Krowiak u32 cc; 66545c9b47cSTony Krowiak 66645c9b47cSTony Krowiak asm volatile( 66745c9b47cSTony Krowiak "lgr 0,%1\n" 66845c9b47cSTony Krowiak "lgr 2,%2\n" 66945c9b47cSTony Krowiak ".long 0xb2af0000\n" /* PQAP(QCI) */ 67045c9b47cSTony Krowiak "ipm %0\n" 67145c9b47cSTony Krowiak "srl %0,28\n" 67245c9b47cSTony Krowiak : "=r" (cc) 67345c9b47cSTony Krowiak : "r" (fcn_code), "r" (config) 67445c9b47cSTony Krowiak : "cc", "0", "2", "memory" 67545c9b47cSTony Krowiak ); 67645c9b47cSTony Krowiak 67745c9b47cSTony Krowiak return cc; 67845c9b47cSTony Krowiak } 67945c9b47cSTony Krowiak 68045c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void) 68145c9b47cSTony Krowiak { 68245c9b47cSTony Krowiak u8 config[128]; 68345c9b47cSTony Krowiak int cc; 68445c9b47cSTony Krowiak 68545c9b47cSTony Krowiak if (test_facility(2) && test_facility(12)) { 68645c9b47cSTony Krowiak cc = kvm_s390_query_ap_config(config); 68745c9b47cSTony Krowiak 68845c9b47cSTony Krowiak if (cc) 68945c9b47cSTony Krowiak pr_err("PQAP(QCI) failed with cc=%d", cc); 69045c9b47cSTony Krowiak else 69145c9b47cSTony Krowiak return config[0] & 0x40; 69245c9b47cSTony Krowiak } 69345c9b47cSTony Krowiak 69445c9b47cSTony Krowiak return 0; 69545c9b47cSTony Krowiak } 69645c9b47cSTony Krowiak 69745c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm) 69845c9b47cSTony Krowiak { 69945c9b47cSTony Krowiak kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 70045c9b47cSTony Krowiak 70145c9b47cSTony Krowiak if (kvm_s390_apxa_installed()) 70245c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 70345c9b47cSTony Krowiak else 70445c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 70545c9b47cSTony Krowiak } 70645c9b47cSTony Krowiak 707*9d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) 708*9d8d5786SMichael Mueller { 709*9d8d5786SMichael Mueller get_cpu_id(cpu_id); 710*9d8d5786SMichael Mueller cpu_id->version = 0xff; 711*9d8d5786SMichael Mueller } 712*9d8d5786SMichael Mueller 7135102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 7145102ee87STony Krowiak { 715*9d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 7165102ee87STony Krowiak return 0; 7175102ee87STony Krowiak 7185102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 7195102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 7205102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 7215102ee87STony Krowiak return -ENOMEM; 7225102ee87STony Krowiak 72345c9b47cSTony Krowiak kvm_s390_set_crycb_format(kvm); 7245102ee87STony Krowiak 725a374e892STony Krowiak /* Disable AES/DEA protected key functions by default */ 726a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 727a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 728a374e892STony Krowiak 7295102ee87STony Krowiak return 0; 7305102ee87STony Krowiak } 7315102ee87STony Krowiak 732e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 733b0c632dbSHeiko Carstens { 734*9d8d5786SMichael Mueller int i, rc; 735b0c632dbSHeiko Carstens char debug_name[16]; 736f6c137ffSChristian Borntraeger static unsigned long sca_offset; 737b0c632dbSHeiko Carstens 738e08b9637SCarsten Otte rc = -EINVAL; 739e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 740e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 741e08b9637SCarsten Otte goto out_err; 742e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 743e08b9637SCarsten Otte goto out_err; 744e08b9637SCarsten Otte #else 745e08b9637SCarsten Otte if (type) 746e08b9637SCarsten Otte goto out_err; 747e08b9637SCarsten Otte #endif 748e08b9637SCarsten Otte 749b0c632dbSHeiko Carstens rc = s390_enable_sie(); 750b0c632dbSHeiko Carstens if (rc) 751d89f5effSJan Kiszka goto out_err; 752b0c632dbSHeiko Carstens 753b290411aSCarsten Otte rc = -ENOMEM; 754b290411aSCarsten Otte 755b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 756b0c632dbSHeiko Carstens if (!kvm->arch.sca) 757d89f5effSJan Kiszka goto out_err; 758f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 759f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 760f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 761f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 762b0c632dbSHeiko Carstens 763b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 764b0c632dbSHeiko Carstens 765b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 766b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 767b0c632dbSHeiko Carstens goto out_nodbf; 768b0c632dbSHeiko Carstens 769*9d8d5786SMichael Mueller /* 770*9d8d5786SMichael Mueller * The architectural maximum amount of facilities is 16 kbit. To store 771*9d8d5786SMichael Mueller * this amount, 2 kbyte of memory is required. Thus we need a full 772*9d8d5786SMichael Mueller * page to hold the active copy (arch.model.fac->sie) and the current 773*9d8d5786SMichael Mueller * facilities set (arch.model.fac->kvm). Its address size has to be 774*9d8d5786SMichael Mueller * 31 bits and word aligned. 775*9d8d5786SMichael Mueller */ 776*9d8d5786SMichael Mueller kvm->arch.model.fac = 777*9d8d5786SMichael Mueller (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 778*9d8d5786SMichael Mueller if (!kvm->arch.model.fac) 779*9d8d5786SMichael Mueller goto out_nofac; 780*9d8d5786SMichael Mueller 781*9d8d5786SMichael Mueller memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list, 782*9d8d5786SMichael Mueller S390_ARCH_FAC_LIST_SIZE_U64); 783*9d8d5786SMichael Mueller 784*9d8d5786SMichael Mueller /* 785*9d8d5786SMichael Mueller * Apply the kvm facility mask to limit the kvm supported/tolerated 786*9d8d5786SMichael Mueller * facility list. 787*9d8d5786SMichael Mueller */ 788*9d8d5786SMichael Mueller for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 789*9d8d5786SMichael Mueller if (i < kvm_s390_fac_list_mask_size()) 790*9d8d5786SMichael Mueller kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i]; 791*9d8d5786SMichael Mueller else 792*9d8d5786SMichael Mueller kvm->arch.model.fac->kvm[i] = 0UL; 793*9d8d5786SMichael Mueller } 794*9d8d5786SMichael Mueller 795*9d8d5786SMichael Mueller kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 796*9d8d5786SMichael Mueller 7975102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 7985102ee87STony Krowiak goto out_crypto; 7995102ee87STony Krowiak 800ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 801ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 8028a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 803a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 804ba5c1e9bSCarsten Otte 805b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 806b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 807b0c632dbSHeiko Carstens 808e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 809e08b9637SCarsten Otte kvm->arch.gmap = NULL; 810e08b9637SCarsten Otte } else { 8110349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 812598841caSCarsten Otte if (!kvm->arch.gmap) 813598841caSCarsten Otte goto out_nogmap; 8142c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 81524eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 816e08b9637SCarsten Otte } 817fa6b7fe9SCornelia Huck 818fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 81984223598SCornelia Huck kvm->arch.use_irqchip = 0; 82072f25020SJason J. Herne kvm->arch.epoch = 0; 821fa6b7fe9SCornelia Huck 8228ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 8238ad35755SDavid Hildenbrand 824d89f5effSJan Kiszka return 0; 825598841caSCarsten Otte out_nogmap: 8265102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 8275102ee87STony Krowiak out_crypto: 828*9d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 829*9d8d5786SMichael Mueller out_nofac: 830598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 831b0c632dbSHeiko Carstens out_nodbf: 832b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 833d89f5effSJan Kiszka out_err: 834d89f5effSJan Kiszka return rc; 835b0c632dbSHeiko Carstens } 836b0c632dbSHeiko Carstens 837d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 838d329c035SChristian Borntraeger { 839d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 840ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 84167335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 8423c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 84358f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 84458f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 84558f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 846abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 847abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 848abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 84958f9460bSCarsten Otte } 850abf4a71eSCarsten Otte smp_mb(); 85127e0393fSCarsten Otte 85227e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 85327e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 85427e0393fSCarsten Otte 855b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 856b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 857d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 858b31288faSKonstantin Weitz 8596692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 860b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 861d329c035SChristian Borntraeger } 862d329c035SChristian Borntraeger 863d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 864d329c035SChristian Borntraeger { 865d329c035SChristian Borntraeger unsigned int i; 866988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 867d329c035SChristian Borntraeger 868988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 869988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 870988a2caeSGleb Natapov 871988a2caeSGleb Natapov mutex_lock(&kvm->lock); 872988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 873d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 874988a2caeSGleb Natapov 875988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 876988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 877d329c035SChristian Borntraeger } 878d329c035SChristian Borntraeger 879b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 880b0c632dbSHeiko Carstens { 881d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 882*9d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 883b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 884d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 8855102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 88627e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 887598841caSCarsten Otte gmap_free(kvm->arch.gmap); 888841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 88967335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 890b0c632dbSHeiko Carstens } 891b0c632dbSHeiko Carstens 892b0c632dbSHeiko Carstens /* Section: vcpu related */ 893dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 894b0c632dbSHeiko Carstens { 895c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 89627e0393fSCarsten Otte if (!vcpu->arch.gmap) 89727e0393fSCarsten Otte return -ENOMEM; 8982c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 899dafd032aSDominik Dingel 90027e0393fSCarsten Otte return 0; 90127e0393fSCarsten Otte } 90227e0393fSCarsten Otte 903dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 904dafd032aSDominik Dingel { 905dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 906dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 90759674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 90859674c1aSChristian Borntraeger KVM_SYNC_GPRS | 9099eed0735SChristian Borntraeger KVM_SYNC_ACRS | 910b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 911b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 912b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 913dafd032aSDominik Dingel 914dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 915dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 916dafd032aSDominik Dingel 917b0c632dbSHeiko Carstens return 0; 918b0c632dbSHeiko Carstens } 919b0c632dbSHeiko Carstens 920b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 921b0c632dbSHeiko Carstens { 9224725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 9234725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 924b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 9254725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 9264725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 92759674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 928480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 9299e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 930b0c632dbSHeiko Carstens } 931b0c632dbSHeiko Carstens 932b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 933b0c632dbSHeiko Carstens { 9349e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 935480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 9364725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 9374725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 93859674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 9394725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 9404725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 941b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 942b0c632dbSHeiko Carstens } 943b0c632dbSHeiko Carstens 944b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 945b0c632dbSHeiko Carstens { 946b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 947b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 948b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 9498d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 950b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 951b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 952b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 953b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 954b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 955b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 956b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 957b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 958b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 959672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 9603c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 9613c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 9626352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 9636852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 9642ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 965b0c632dbSHeiko Carstens } 966b0c632dbSHeiko Carstens 96731928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 96842897d86SMarcelo Tosatti { 96972f25020SJason J. Herne mutex_lock(&vcpu->kvm->lock); 97072f25020SJason J. Herne vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 97172f25020SJason J. Herne mutex_unlock(&vcpu->kvm->lock); 972dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 973dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 97442897d86SMarcelo Tosatti } 97542897d86SMarcelo Tosatti 9765102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 9775102ee87STony Krowiak { 978*9d8d5786SMichael Mueller if (!test_kvm_facility(vcpu->kvm, 76)) 9795102ee87STony Krowiak return; 9805102ee87STony Krowiak 981a374e892STony Krowiak vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 982a374e892STony Krowiak 983a374e892STony Krowiak if (vcpu->kvm->arch.crypto.aes_kw) 984a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_AES; 985a374e892STony Krowiak if (vcpu->kvm->arch.crypto.dea_kw) 986a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 987a374e892STony Krowiak 9885102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 9895102ee87STony Krowiak } 9905102ee87STony Krowiak 991b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 992b31605c1SDominik Dingel { 993b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 994b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 995b31605c1SDominik Dingel } 996b31605c1SDominik Dingel 997b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 998b31605c1SDominik Dingel { 999b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 1000b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 1001b31605c1SDominik Dingel return -ENOMEM; 1002b31605c1SDominik Dingel 1003b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 1004b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 1005b31605c1SDominik Dingel return 0; 1006b31605c1SDominik Dingel } 1007b31605c1SDominik Dingel 1008b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1009b0c632dbSHeiko Carstens { 1010b31605c1SDominik Dingel int rc = 0; 1011b31288faSKonstantin Weitz 10129e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 10139e6dabefSCornelia Huck CPUSTAT_SM | 101469d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 101569d0d3a3SChristian Borntraeger CPUSTAT_GED); 1016fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 1017*9d8d5786SMichael Mueller if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) 10187feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 10197feb6bb8SMichael Mueller 102069d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 1021ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 1022217a4406SHeiko Carstens if (sclp_has_siif()) 1023217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 1024ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 1025ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 10265a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 10275a5e6536SMatthew Rosato ICTL_TPROT; 10285a5e6536SMatthew Rosato 1029b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1030b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 1031b31605c1SDominik Dingel if (rc) 1032b31605c1SDominik Dingel return rc; 1033b31288faSKonstantin Weitz } 10340ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1035ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 1036*9d8d5786SMichael Mueller 1037*9d8d5786SMichael Mueller vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id; 1038*9d8d5786SMichael Mueller memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm, 1039*9d8d5786SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 10405102ee87STony Krowiak 10415102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 10425102ee87STony Krowiak 1043b31605c1SDominik Dingel return rc; 1044b0c632dbSHeiko Carstens } 1045b0c632dbSHeiko Carstens 1046b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 1047b0c632dbSHeiko Carstens unsigned int id) 1048b0c632dbSHeiko Carstens { 10494d47555aSCarsten Otte struct kvm_vcpu *vcpu; 10507feb6bb8SMichael Mueller struct sie_page *sie_page; 10514d47555aSCarsten Otte int rc = -EINVAL; 1052b0c632dbSHeiko Carstens 10534d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 10544d47555aSCarsten Otte goto out; 10554d47555aSCarsten Otte 10564d47555aSCarsten Otte rc = -ENOMEM; 10574d47555aSCarsten Otte 1058b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1059b0c632dbSHeiko Carstens if (!vcpu) 10604d47555aSCarsten Otte goto out; 1061b0c632dbSHeiko Carstens 10627feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 10637feb6bb8SMichael Mueller if (!sie_page) 1064b0c632dbSHeiko Carstens goto out_free_cpu; 1065b0c632dbSHeiko Carstens 10667feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 10677feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 10687feb6bb8SMichael Mueller 1069b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 107058f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 107158f9460bSCarsten Otte if (!kvm->arch.sca) { 107258f9460bSCarsten Otte WARN_ON_ONCE(1); 107358f9460bSCarsten Otte goto out_free_cpu; 107458f9460bSCarsten Otte } 1075abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 107658f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 107758f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 107858f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 107958f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 1080b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1081fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 108258f9460bSCarsten Otte } 1083*9d8d5786SMichael Mueller vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie; 1084b0c632dbSHeiko Carstens 1085ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 1086ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1087d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 10885288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 1089ba5c1e9bSCarsten Otte 1090b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 1091b0c632dbSHeiko Carstens if (rc) 10927b06bf2fSWei Yongjun goto out_free_sie_block; 1093b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 1094b0c632dbSHeiko Carstens vcpu->arch.sie_block); 1095ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 1096b0c632dbSHeiko Carstens 1097b0c632dbSHeiko Carstens return vcpu; 10987b06bf2fSWei Yongjun out_free_sie_block: 10997b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 1100b0c632dbSHeiko Carstens out_free_cpu: 1101b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 11024d47555aSCarsten Otte out: 1103b0c632dbSHeiko Carstens return ERR_PTR(rc); 1104b0c632dbSHeiko Carstens } 1105b0c632dbSHeiko Carstens 1106b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1107b0c632dbSHeiko Carstens { 11089a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 1109b0c632dbSHeiko Carstens } 1110b0c632dbSHeiko Carstens 111149b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 111249b99e1eSChristian Borntraeger { 111349b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 111449b99e1eSChristian Borntraeger } 111549b99e1eSChristian Borntraeger 111649b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 111749b99e1eSChristian Borntraeger { 111849b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 111949b99e1eSChristian Borntraeger } 112049b99e1eSChristian Borntraeger 112149b99e1eSChristian Borntraeger /* 112249b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 112349b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 112449b99e1eSChristian Borntraeger * return immediately. */ 112549b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 112649b99e1eSChristian Borntraeger { 112749b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 112849b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 112949b99e1eSChristian Borntraeger cpu_relax(); 113049b99e1eSChristian Borntraeger } 113149b99e1eSChristian Borntraeger 113249b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 113349b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 113449b99e1eSChristian Borntraeger { 113549b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 113649b99e1eSChristian Borntraeger exit_sie(vcpu); 113749b99e1eSChristian Borntraeger } 113849b99e1eSChristian Borntraeger 11392c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 11402c70fe44SChristian Borntraeger { 11412c70fe44SChristian Borntraeger int i; 11422c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 11432c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 11442c70fe44SChristian Borntraeger 11452c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 11462c70fe44SChristian Borntraeger /* match against both prefix pages */ 1147fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 11482c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 11492c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 11502c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 11512c70fe44SChristian Borntraeger } 11522c70fe44SChristian Borntraeger } 11532c70fe44SChristian Borntraeger } 11542c70fe44SChristian Borntraeger 1155b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 1156b6d33834SChristoffer Dall { 1157b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 1158b6d33834SChristoffer Dall BUG(); 1159b6d33834SChristoffer Dall return 0; 1160b6d33834SChristoffer Dall } 1161b6d33834SChristoffer Dall 116214eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 116314eebd91SCarsten Otte struct kvm_one_reg *reg) 116414eebd91SCarsten Otte { 116514eebd91SCarsten Otte int r = -EINVAL; 116614eebd91SCarsten Otte 116714eebd91SCarsten Otte switch (reg->id) { 116829b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 116929b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 117029b7c71bSCarsten Otte (u32 __user *)reg->addr); 117129b7c71bSCarsten Otte break; 117229b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 117329b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 117429b7c71bSCarsten Otte (u64 __user *)reg->addr); 117529b7c71bSCarsten Otte break; 117646a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 117746a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 117846a6dd1cSJason J. herne (u64 __user *)reg->addr); 117946a6dd1cSJason J. herne break; 118046a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 118146a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 118246a6dd1cSJason J. herne (u64 __user *)reg->addr); 118346a6dd1cSJason J. herne break; 1184536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1185536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 1186536336c2SDominik Dingel (u64 __user *)reg->addr); 1187536336c2SDominik Dingel break; 1188536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1189536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 1190536336c2SDominik Dingel (u64 __user *)reg->addr); 1191536336c2SDominik Dingel break; 1192536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1193536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 1194536336c2SDominik Dingel (u64 __user *)reg->addr); 1195536336c2SDominik Dingel break; 1196672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1197672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 1198672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1199672550fbSChristian Borntraeger break; 1200afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1201afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 1202afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1203afa45ff5SChristian Borntraeger break; 120414eebd91SCarsten Otte default: 120514eebd91SCarsten Otte break; 120614eebd91SCarsten Otte } 120714eebd91SCarsten Otte 120814eebd91SCarsten Otte return r; 120914eebd91SCarsten Otte } 121014eebd91SCarsten Otte 121114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 121214eebd91SCarsten Otte struct kvm_one_reg *reg) 121314eebd91SCarsten Otte { 121414eebd91SCarsten Otte int r = -EINVAL; 121514eebd91SCarsten Otte 121614eebd91SCarsten Otte switch (reg->id) { 121729b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 121829b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 121929b7c71bSCarsten Otte (u32 __user *)reg->addr); 122029b7c71bSCarsten Otte break; 122129b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 122229b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 122329b7c71bSCarsten Otte (u64 __user *)reg->addr); 122429b7c71bSCarsten Otte break; 122546a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 122646a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 122746a6dd1cSJason J. herne (u64 __user *)reg->addr); 122846a6dd1cSJason J. herne break; 122946a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 123046a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 123146a6dd1cSJason J. herne (u64 __user *)reg->addr); 123246a6dd1cSJason J. herne break; 1233536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1234536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 1235536336c2SDominik Dingel (u64 __user *)reg->addr); 12369fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 12379fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1238536336c2SDominik Dingel break; 1239536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1240536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 1241536336c2SDominik Dingel (u64 __user *)reg->addr); 1242536336c2SDominik Dingel break; 1243536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1244536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 1245536336c2SDominik Dingel (u64 __user *)reg->addr); 1246536336c2SDominik Dingel break; 1247672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1248672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 1249672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1250672550fbSChristian Borntraeger break; 1251afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1252afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 1253afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1254afa45ff5SChristian Borntraeger break; 125514eebd91SCarsten Otte default: 125614eebd91SCarsten Otte break; 125714eebd91SCarsten Otte } 125814eebd91SCarsten Otte 125914eebd91SCarsten Otte return r; 126014eebd91SCarsten Otte } 1261b6d33834SChristoffer Dall 1262b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 1263b0c632dbSHeiko Carstens { 1264b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 1265b0c632dbSHeiko Carstens return 0; 1266b0c632dbSHeiko Carstens } 1267b0c632dbSHeiko Carstens 1268b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1269b0c632dbSHeiko Carstens { 12705a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 1271b0c632dbSHeiko Carstens return 0; 1272b0c632dbSHeiko Carstens } 1273b0c632dbSHeiko Carstens 1274b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1275b0c632dbSHeiko Carstens { 12765a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 1277b0c632dbSHeiko Carstens return 0; 1278b0c632dbSHeiko Carstens } 1279b0c632dbSHeiko Carstens 1280b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1281b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1282b0c632dbSHeiko Carstens { 128359674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 1284b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 128559674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1286b0c632dbSHeiko Carstens return 0; 1287b0c632dbSHeiko Carstens } 1288b0c632dbSHeiko Carstens 1289b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1290b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1291b0c632dbSHeiko Carstens { 129259674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 1293b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 1294b0c632dbSHeiko Carstens return 0; 1295b0c632dbSHeiko Carstens } 1296b0c632dbSHeiko Carstens 1297b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1298b0c632dbSHeiko Carstens { 12994725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 13004725c860SMartin Schwidefsky return -EINVAL; 1301b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 13024725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 13034725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 13044725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1305b0c632dbSHeiko Carstens return 0; 1306b0c632dbSHeiko Carstens } 1307b0c632dbSHeiko Carstens 1308b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1309b0c632dbSHeiko Carstens { 1310b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1311b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1312b0c632dbSHeiko Carstens return 0; 1313b0c632dbSHeiko Carstens } 1314b0c632dbSHeiko Carstens 1315b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1316b0c632dbSHeiko Carstens { 1317b0c632dbSHeiko Carstens int rc = 0; 1318b0c632dbSHeiko Carstens 13197a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1320b0c632dbSHeiko Carstens rc = -EBUSY; 1321d7b0b5ebSCarsten Otte else { 1322d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1323d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1324d7b0b5ebSCarsten Otte } 1325b0c632dbSHeiko Carstens return rc; 1326b0c632dbSHeiko Carstens } 1327b0c632dbSHeiko Carstens 1328b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1329b0c632dbSHeiko Carstens struct kvm_translation *tr) 1330b0c632dbSHeiko Carstens { 1331b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1332b0c632dbSHeiko Carstens } 1333b0c632dbSHeiko Carstens 133427291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 133527291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 133627291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 133727291e21SDavid Hildenbrand 1338d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1339d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1340b0c632dbSHeiko Carstens { 134127291e21SDavid Hildenbrand int rc = 0; 134227291e21SDavid Hildenbrand 134327291e21SDavid Hildenbrand vcpu->guest_debug = 0; 134427291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 134527291e21SDavid Hildenbrand 13462de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 134727291e21SDavid Hildenbrand return -EINVAL; 134827291e21SDavid Hildenbrand 134927291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 135027291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 135127291e21SDavid Hildenbrand /* enforce guest PER */ 135227291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 135327291e21SDavid Hildenbrand 135427291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 135527291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 135627291e21SDavid Hildenbrand } else { 135727291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 135827291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 135927291e21SDavid Hildenbrand } 136027291e21SDavid Hildenbrand 136127291e21SDavid Hildenbrand if (rc) { 136227291e21SDavid Hildenbrand vcpu->guest_debug = 0; 136327291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 136427291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 136527291e21SDavid Hildenbrand } 136627291e21SDavid Hildenbrand 136727291e21SDavid Hildenbrand return rc; 1368b0c632dbSHeiko Carstens } 1369b0c632dbSHeiko Carstens 137062d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 137162d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 137262d9f0dbSMarcelo Tosatti { 13736352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 13746352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 13756352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 137662d9f0dbSMarcelo Tosatti } 137762d9f0dbSMarcelo Tosatti 137862d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 137962d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 138062d9f0dbSMarcelo Tosatti { 13816352e4d2SDavid Hildenbrand int rc = 0; 13826352e4d2SDavid Hildenbrand 13836352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 13846352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 13856352e4d2SDavid Hildenbrand 13866352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 13876352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 13886352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 13896352e4d2SDavid Hildenbrand break; 13906352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 13916352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 13926352e4d2SDavid Hildenbrand break; 13936352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 13946352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 13956352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 13966352e4d2SDavid Hildenbrand default: 13976352e4d2SDavid Hildenbrand rc = -ENXIO; 13986352e4d2SDavid Hildenbrand } 13996352e4d2SDavid Hildenbrand 14006352e4d2SDavid Hildenbrand return rc; 140162d9f0dbSMarcelo Tosatti } 140262d9f0dbSMarcelo Tosatti 1403b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1404b31605c1SDominik Dingel { 1405b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1406b31605c1SDominik Dingel return false; 1407b31605c1SDominik Dingel /* only enable for z10 and later */ 1408b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1409b31605c1SDominik Dingel return false; 1410b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1411b31605c1SDominik Dingel return false; 1412b31605c1SDominik Dingel return true; 1413b31605c1SDominik Dingel } 1414b31605c1SDominik Dingel 14158ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 14168ad35755SDavid Hildenbrand { 14178ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 14188ad35755SDavid Hildenbrand } 14198ad35755SDavid Hildenbrand 14202c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 14212c70fe44SChristian Borntraeger { 14228ad35755SDavid Hildenbrand retry: 14238ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 14242c70fe44SChristian Borntraeger /* 14252c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 14262c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 14272c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 14282c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 14292c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 14302c70fe44SChristian Borntraeger */ 14318ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 14322c70fe44SChristian Borntraeger int rc; 14332c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1434fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 14352c70fe44SChristian Borntraeger PAGE_SIZE * 2); 14362c70fe44SChristian Borntraeger if (rc) 14372c70fe44SChristian Borntraeger return rc; 14388ad35755SDavid Hildenbrand goto retry; 14392c70fe44SChristian Borntraeger } 14408ad35755SDavid Hildenbrand 1441d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1442d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1443d3d692c8SDavid Hildenbrand goto retry; 1444d3d692c8SDavid Hildenbrand } 1445d3d692c8SDavid Hildenbrand 14468ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 14478ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 14488ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 14498ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 14508ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 14518ad35755SDavid Hildenbrand } 14528ad35755SDavid Hildenbrand goto retry; 14538ad35755SDavid Hildenbrand } 14548ad35755SDavid Hildenbrand 14558ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 14568ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 14578ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 14588ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 14598ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 14608ad35755SDavid Hildenbrand } 14618ad35755SDavid Hildenbrand goto retry; 14628ad35755SDavid Hildenbrand } 14638ad35755SDavid Hildenbrand 14640759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 14650759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 14660759d068SDavid Hildenbrand 14672c70fe44SChristian Borntraeger return 0; 14682c70fe44SChristian Borntraeger } 14692c70fe44SChristian Borntraeger 1470fa576c58SThomas Huth /** 1471fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1472fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1473fa576c58SThomas Huth * @gpa: Guest physical address 1474fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1475fa576c58SThomas Huth * 1476fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1477fa576c58SThomas Huth * 1478fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1479fa576c58SThomas Huth */ 1480fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 148124eb3a82SDominik Dingel { 1482527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1483527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 148424eb3a82SDominik Dingel } 148524eb3a82SDominik Dingel 14863c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 14873c038e6bSDominik Dingel unsigned long token) 14883c038e6bSDominik Dingel { 14893c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1490383d0b05SJens Freimann struct kvm_s390_irq irq; 14913c038e6bSDominik Dingel 14923c038e6bSDominik Dingel if (start_token) { 1493383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1494383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1495383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 14963c038e6bSDominik Dingel } else { 14973c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1498383d0b05SJens Freimann inti.parm64 = token; 14993c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 15003c038e6bSDominik Dingel } 15013c038e6bSDominik Dingel } 15023c038e6bSDominik Dingel 15033c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 15043c038e6bSDominik Dingel struct kvm_async_pf *work) 15053c038e6bSDominik Dingel { 15063c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 15073c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 15083c038e6bSDominik Dingel } 15093c038e6bSDominik Dingel 15103c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 15113c038e6bSDominik Dingel struct kvm_async_pf *work) 15123c038e6bSDominik Dingel { 15133c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 15143c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 15153c038e6bSDominik Dingel } 15163c038e6bSDominik Dingel 15173c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 15183c038e6bSDominik Dingel struct kvm_async_pf *work) 15193c038e6bSDominik Dingel { 15203c038e6bSDominik Dingel /* s390 will always inject the page directly */ 15213c038e6bSDominik Dingel } 15223c038e6bSDominik Dingel 15233c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 15243c038e6bSDominik Dingel { 15253c038e6bSDominik Dingel /* 15263c038e6bSDominik Dingel * s390 will always inject the page directly, 15273c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 15283c038e6bSDominik Dingel */ 15293c038e6bSDominik Dingel return true; 15303c038e6bSDominik Dingel } 15313c038e6bSDominik Dingel 15323c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 15333c038e6bSDominik Dingel { 15343c038e6bSDominik Dingel hva_t hva; 15353c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 15363c038e6bSDominik Dingel int rc; 15373c038e6bSDominik Dingel 15383c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 15393c038e6bSDominik Dingel return 0; 15403c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 15413c038e6bSDominik Dingel vcpu->arch.pfault_compare) 15423c038e6bSDominik Dingel return 0; 15433c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 15443c038e6bSDominik Dingel return 0; 15459a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 15463c038e6bSDominik Dingel return 0; 15473c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 15483c038e6bSDominik Dingel return 0; 15493c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 15503c038e6bSDominik Dingel return 0; 15513c038e6bSDominik Dingel 155281480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 155381480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 155481480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 15553c038e6bSDominik Dingel return 0; 15563c038e6bSDominik Dingel 15573c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 15583c038e6bSDominik Dingel return rc; 15593c038e6bSDominik Dingel } 15603c038e6bSDominik Dingel 15613fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1562b0c632dbSHeiko Carstens { 15633fb4c40fSThomas Huth int rc, cpuflags; 1564e168bf8dSCarsten Otte 15653c038e6bSDominik Dingel /* 15663c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 15673c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 15683c038e6bSDominik Dingel * handled outside the worker. 15693c038e6bSDominik Dingel */ 15703c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 15713c038e6bSDominik Dingel 15725a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1573b0c632dbSHeiko Carstens 1574b0c632dbSHeiko Carstens if (need_resched()) 1575b0c632dbSHeiko Carstens schedule(); 1576b0c632dbSHeiko Carstens 1577d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 157871cde587SChristian Borntraeger s390_handle_mcck(); 157971cde587SChristian Borntraeger 158079395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 158179395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 158279395031SJens Freimann if (rc) 158379395031SJens Freimann return rc; 158479395031SJens Freimann } 15850ff31867SCarsten Otte 15862c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 15872c70fe44SChristian Borntraeger if (rc) 15882c70fe44SChristian Borntraeger return rc; 15892c70fe44SChristian Borntraeger 159027291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 159127291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 159227291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 159327291e21SDavid Hildenbrand } 159427291e21SDavid Hildenbrand 1595b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 15963fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 15973fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 15983fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 15992b29a9fdSDominik Dingel 16003fb4c40fSThomas Huth return 0; 16013fb4c40fSThomas Huth } 16023fb4c40fSThomas Huth 16033fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 16043fb4c40fSThomas Huth { 160524eb3a82SDominik Dingel int rc = -1; 16062b29a9fdSDominik Dingel 16072b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 16082b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 16092b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 16102b29a9fdSDominik Dingel 161127291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 161227291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 161327291e21SDavid Hildenbrand 16143fb4c40fSThomas Huth if (exit_reason >= 0) { 16157c470539SMartin Schwidefsky rc = 0; 1616210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1617210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1618210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1619210b1607SThomas Huth current->thread.gmap_addr; 1620210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1621210b1607SThomas Huth rc = -EREMOTE; 162224eb3a82SDominik Dingel 162324eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 16243c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 162524eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1626fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 162724eb3a82SDominik Dingel rc = 0; 1628fa576c58SThomas Huth } else { 1629fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1630fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1631fa576c58SThomas Huth } 163224eb3a82SDominik Dingel } 163324eb3a82SDominik Dingel 163424eb3a82SDominik Dingel if (rc == -1) { 1635699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1636699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1637699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 16381f0d0f09SCarsten Otte } 1639b0c632dbSHeiko Carstens 16405a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 16413fb4c40fSThomas Huth 1642a76ccff6SThomas Huth if (rc == 0) { 1643a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 16442955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 16452955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1646a76ccff6SThomas Huth else 1647a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1648a76ccff6SThomas Huth } 1649a76ccff6SThomas Huth 16503fb4c40fSThomas Huth return rc; 16513fb4c40fSThomas Huth } 16523fb4c40fSThomas Huth 16533fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 16543fb4c40fSThomas Huth { 16553fb4c40fSThomas Huth int rc, exit_reason; 16563fb4c40fSThomas Huth 1657800c1065SThomas Huth /* 1658800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1659800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1660800c1065SThomas Huth */ 1661800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1662800c1065SThomas Huth 1663a76ccff6SThomas Huth do { 16643fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 16653fb4c40fSThomas Huth if (rc) 1666a76ccff6SThomas Huth break; 16673fb4c40fSThomas Huth 1668800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 16693fb4c40fSThomas Huth /* 1670a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1671a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 16723fb4c40fSThomas Huth */ 16733fb4c40fSThomas Huth preempt_disable(); 16743fb4c40fSThomas Huth kvm_guest_enter(); 16753fb4c40fSThomas Huth preempt_enable(); 1676a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1677a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 16783fb4c40fSThomas Huth kvm_guest_exit(); 1679800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 16803fb4c40fSThomas Huth 16813fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 168227291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 16833fb4c40fSThomas Huth 1684800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1685e168bf8dSCarsten Otte return rc; 1686b0c632dbSHeiko Carstens } 1687b0c632dbSHeiko Carstens 1688b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1689b028ee3eSDavid Hildenbrand { 1690b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1691b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1692b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1693b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1694b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1695b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1696d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1697d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1698b028ee3eSDavid Hildenbrand } 1699b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1700b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1701b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1702b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1703b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1704b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1705b028ee3eSDavid Hildenbrand } 1706b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1707b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1708b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1709b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 17109fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 17119fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1712b028ee3eSDavid Hildenbrand } 1713b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1714b028ee3eSDavid Hildenbrand } 1715b028ee3eSDavid Hildenbrand 1716b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1717b028ee3eSDavid Hildenbrand { 1718b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1719b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1720b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1721b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1722b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1723b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1724b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1725b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1726b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1727b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1728b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1729b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1730b028ee3eSDavid Hildenbrand } 1731b028ee3eSDavid Hildenbrand 1732b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1733b0c632dbSHeiko Carstens { 17348f2abe6aSChristian Borntraeger int rc; 1735b0c632dbSHeiko Carstens sigset_t sigsaved; 1736b0c632dbSHeiko Carstens 173727291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 173827291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 173927291e21SDavid Hildenbrand return 0; 174027291e21SDavid Hildenbrand } 174127291e21SDavid Hildenbrand 1742b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1743b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1744b0c632dbSHeiko Carstens 17456352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 17466852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 17476352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 17486352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 17496352e4d2SDavid Hildenbrand vcpu->vcpu_id); 17506352e4d2SDavid Hildenbrand return -EINVAL; 17516352e4d2SDavid Hildenbrand } 1752b0c632dbSHeiko Carstens 1753b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1754d7b0b5ebSCarsten Otte 1755dab4079dSHeiko Carstens might_fault(); 1756e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 17579ace903dSChristian Ehrhardt 1758b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1759b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 17608f2abe6aSChristian Borntraeger rc = -EINTR; 1761b1d16c49SChristian Ehrhardt } 17628f2abe6aSChristian Borntraeger 176327291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 176427291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 176527291e21SDavid Hildenbrand rc = 0; 176627291e21SDavid Hildenbrand } 176727291e21SDavid Hildenbrand 1768b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 17698f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 17708f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 17718f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 17728f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 17738f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 17748f2abe6aSChristian Borntraeger rc = 0; 17758f2abe6aSChristian Borntraeger } 17768f2abe6aSChristian Borntraeger 17778f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 17788f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 17798f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 17808f2abe6aSChristian Borntraeger rc = 0; 17818f2abe6aSChristian Borntraeger } 17828f2abe6aSChristian Borntraeger 1783b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1784d7b0b5ebSCarsten Otte 1785b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1786b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1787b0c632dbSHeiko Carstens 1788b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 17897e8e6ab4SHeiko Carstens return rc; 1790b0c632dbSHeiko Carstens } 1791b0c632dbSHeiko Carstens 1792b0c632dbSHeiko Carstens /* 1793b0c632dbSHeiko Carstens * store status at address 1794b0c632dbSHeiko Carstens * we use have two special cases: 1795b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1796b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1797b0c632dbSHeiko Carstens */ 1798d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1799b0c632dbSHeiko Carstens { 1800092670cdSCarsten Otte unsigned char archmode = 1; 1801fda902cbSMichael Mueller unsigned int px; 1802178bd789SThomas Huth u64 clkcomp; 1803d0bce605SHeiko Carstens int rc; 1804b0c632dbSHeiko Carstens 1805d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1806d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1807b0c632dbSHeiko Carstens return -EFAULT; 1808d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1809d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1810d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1811b0c632dbSHeiko Carstens return -EFAULT; 1812d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1813d0bce605SHeiko Carstens } 1814d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1815d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1816d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1817d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1818d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1819d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1820fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1821d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1822fda902cbSMichael Mueller &px, 4); 1823d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1824d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1825d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1826d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1827d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1828d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1829d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1830178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1831d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1832d0bce605SHeiko Carstens &clkcomp, 8); 1833d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1834d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1835d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1836d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1837d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1838b0c632dbSHeiko Carstens } 1839b0c632dbSHeiko Carstens 1840e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1841e879892cSThomas Huth { 1842e879892cSThomas Huth /* 1843e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1844e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1845e879892cSThomas Huth * it into the save area 1846e879892cSThomas Huth */ 1847e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1848e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1849e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1850e879892cSThomas Huth 1851e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1852e879892cSThomas Huth } 1853e879892cSThomas Huth 18548ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 18558ad35755SDavid Hildenbrand { 18568ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 18578ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 18588ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 18598ad35755SDavid Hildenbrand } 18608ad35755SDavid Hildenbrand 18618ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 18628ad35755SDavid Hildenbrand { 18638ad35755SDavid Hildenbrand unsigned int i; 18648ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 18658ad35755SDavid Hildenbrand 18668ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 18678ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 18688ad35755SDavid Hildenbrand } 18698ad35755SDavid Hildenbrand } 18708ad35755SDavid Hildenbrand 18718ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 18728ad35755SDavid Hildenbrand { 18738ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 18748ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 18758ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 18768ad35755SDavid Hildenbrand } 18778ad35755SDavid Hildenbrand 18786852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 18796852d7b6SDavid Hildenbrand { 18808ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 18818ad35755SDavid Hildenbrand 18828ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 18838ad35755SDavid Hildenbrand return; 18848ad35755SDavid Hildenbrand 18856852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 18868ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1887433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 18888ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 18898ad35755SDavid Hildenbrand 18908ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 18918ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 18928ad35755SDavid Hildenbrand started_vcpus++; 18938ad35755SDavid Hildenbrand } 18948ad35755SDavid Hildenbrand 18958ad35755SDavid Hildenbrand if (started_vcpus == 0) { 18968ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 18978ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 18988ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 18998ad35755SDavid Hildenbrand /* 19008ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 19018ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 19028ad35755SDavid Hildenbrand * oustanding ENABLE requests. 19038ad35755SDavid Hildenbrand */ 19048ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 19058ad35755SDavid Hildenbrand } 19068ad35755SDavid Hildenbrand 19076852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 19088ad35755SDavid Hildenbrand /* 19098ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 19108ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 19118ad35755SDavid Hildenbrand */ 1912d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1913433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 19148ad35755SDavid Hildenbrand return; 19156852d7b6SDavid Hildenbrand } 19166852d7b6SDavid Hildenbrand 19176852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 19186852d7b6SDavid Hildenbrand { 19198ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 19208ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 19218ad35755SDavid Hildenbrand 19228ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 19238ad35755SDavid Hildenbrand return; 19248ad35755SDavid Hildenbrand 19256852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 19268ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1927433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 19288ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 19298ad35755SDavid Hildenbrand 193032f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 19316cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 193232f5ff63SDavid Hildenbrand 19336cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 19348ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 19358ad35755SDavid Hildenbrand 19368ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 19378ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 19388ad35755SDavid Hildenbrand started_vcpus++; 19398ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 19408ad35755SDavid Hildenbrand } 19418ad35755SDavid Hildenbrand } 19428ad35755SDavid Hildenbrand 19438ad35755SDavid Hildenbrand if (started_vcpus == 1) { 19448ad35755SDavid Hildenbrand /* 19458ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 19468ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 19478ad35755SDavid Hildenbrand */ 19488ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 19498ad35755SDavid Hildenbrand } 19508ad35755SDavid Hildenbrand 1951433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 19528ad35755SDavid Hildenbrand return; 19536852d7b6SDavid Hildenbrand } 19546852d7b6SDavid Hildenbrand 1955d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1956d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1957d6712df9SCornelia Huck { 1958d6712df9SCornelia Huck int r; 1959d6712df9SCornelia Huck 1960d6712df9SCornelia Huck if (cap->flags) 1961d6712df9SCornelia Huck return -EINVAL; 1962d6712df9SCornelia Huck 1963d6712df9SCornelia Huck switch (cap->cap) { 1964fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1965fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1966fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1967fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1968fa6b7fe9SCornelia Huck } 1969fa6b7fe9SCornelia Huck r = 0; 1970fa6b7fe9SCornelia Huck break; 1971d6712df9SCornelia Huck default: 1972d6712df9SCornelia Huck r = -EINVAL; 1973d6712df9SCornelia Huck break; 1974d6712df9SCornelia Huck } 1975d6712df9SCornelia Huck return r; 1976d6712df9SCornelia Huck } 1977d6712df9SCornelia Huck 1978b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1979b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1980b0c632dbSHeiko Carstens { 1981b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1982b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1983800c1065SThomas Huth int idx; 1984bc923cc9SAvi Kivity long r; 1985b0c632dbSHeiko Carstens 198693736624SAvi Kivity switch (ioctl) { 198793736624SAvi Kivity case KVM_S390_INTERRUPT: { 1988ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1989383d0b05SJens Freimann struct kvm_s390_irq s390irq; 1990ba5c1e9bSCarsten Otte 199193736624SAvi Kivity r = -EFAULT; 1992ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 199393736624SAvi Kivity break; 1994383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 1995383d0b05SJens Freimann return -EINVAL; 1996383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 199793736624SAvi Kivity break; 1998ba5c1e9bSCarsten Otte } 1999b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 2000800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 2001bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 2002800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 2003bc923cc9SAvi Kivity break; 2004b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 2005b0c632dbSHeiko Carstens psw_t psw; 2006b0c632dbSHeiko Carstens 2007bc923cc9SAvi Kivity r = -EFAULT; 2008b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 2009bc923cc9SAvi Kivity break; 2010bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 2011bc923cc9SAvi Kivity break; 2012b0c632dbSHeiko Carstens } 2013b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 2014bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 2015bc923cc9SAvi Kivity break; 201614eebd91SCarsten Otte case KVM_SET_ONE_REG: 201714eebd91SCarsten Otte case KVM_GET_ONE_REG: { 201814eebd91SCarsten Otte struct kvm_one_reg reg; 201914eebd91SCarsten Otte r = -EFAULT; 202014eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 202114eebd91SCarsten Otte break; 202214eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 202314eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 202414eebd91SCarsten Otte else 202514eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 202614eebd91SCarsten Otte break; 202714eebd91SCarsten Otte } 202827e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 202927e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 203027e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 203127e0393fSCarsten Otte 203227e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 203327e0393fSCarsten Otte r = -EFAULT; 203427e0393fSCarsten Otte break; 203527e0393fSCarsten Otte } 203627e0393fSCarsten Otte 203727e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 203827e0393fSCarsten Otte r = -EINVAL; 203927e0393fSCarsten Otte break; 204027e0393fSCarsten Otte } 204127e0393fSCarsten Otte 204227e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 204327e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 204427e0393fSCarsten Otte break; 204527e0393fSCarsten Otte } 204627e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 204727e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 204827e0393fSCarsten Otte 204927e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 205027e0393fSCarsten Otte r = -EFAULT; 205127e0393fSCarsten Otte break; 205227e0393fSCarsten Otte } 205327e0393fSCarsten Otte 205427e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 205527e0393fSCarsten Otte r = -EINVAL; 205627e0393fSCarsten Otte break; 205727e0393fSCarsten Otte } 205827e0393fSCarsten Otte 205927e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 206027e0393fSCarsten Otte ucasmap.length); 206127e0393fSCarsten Otte break; 206227e0393fSCarsten Otte } 206327e0393fSCarsten Otte #endif 2064ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 2065527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 2066ccc7910fSCarsten Otte break; 2067ccc7910fSCarsten Otte } 2068d6712df9SCornelia Huck case KVM_ENABLE_CAP: 2069d6712df9SCornelia Huck { 2070d6712df9SCornelia Huck struct kvm_enable_cap cap; 2071d6712df9SCornelia Huck r = -EFAULT; 2072d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 2073d6712df9SCornelia Huck break; 2074d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2075d6712df9SCornelia Huck break; 2076d6712df9SCornelia Huck } 2077b0c632dbSHeiko Carstens default: 20783e6afcf1SCarsten Otte r = -ENOTTY; 2079b0c632dbSHeiko Carstens } 2080bc923cc9SAvi Kivity return r; 2081b0c632dbSHeiko Carstens } 2082b0c632dbSHeiko Carstens 20835b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 20845b1c1493SCarsten Otte { 20855b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 20865b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 20875b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 20885b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 20895b1c1493SCarsten Otte get_page(vmf->page); 20905b1c1493SCarsten Otte return 0; 20915b1c1493SCarsten Otte } 20925b1c1493SCarsten Otte #endif 20935b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 20945b1c1493SCarsten Otte } 20955b1c1493SCarsten Otte 20965587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 20975587027cSAneesh Kumar K.V unsigned long npages) 2098db3fe4ebSTakuya Yoshikawa { 2099db3fe4ebSTakuya Yoshikawa return 0; 2100db3fe4ebSTakuya Yoshikawa } 2101db3fe4ebSTakuya Yoshikawa 2102b0c632dbSHeiko Carstens /* Section: memory related */ 2103f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 2104f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 21057b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 21067b6195a9STakuya Yoshikawa enum kvm_mr_change change) 2107b0c632dbSHeiko Carstens { 2108dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 2109dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 2110dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 2111dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 2112b0c632dbSHeiko Carstens 2113598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 2114b0c632dbSHeiko Carstens return -EINVAL; 2115b0c632dbSHeiko Carstens 2116598841caSCarsten Otte if (mem->memory_size & 0xffffful) 2117b0c632dbSHeiko Carstens return -EINVAL; 2118b0c632dbSHeiko Carstens 2119f7784b8eSMarcelo Tosatti return 0; 2120f7784b8eSMarcelo Tosatti } 2121f7784b8eSMarcelo Tosatti 2122f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 2123f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 21248482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 21258482644aSTakuya Yoshikawa enum kvm_mr_change change) 2126f7784b8eSMarcelo Tosatti { 2127f7850c92SCarsten Otte int rc; 2128f7784b8eSMarcelo Tosatti 21292cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 21302cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 21312cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 21322cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 21332cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 21342cef4debSChristian Borntraeger */ 21352cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 21362cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 21372cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 21382cef4debSChristian Borntraeger return; 2139598841caSCarsten Otte 2140598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2141598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 2142598841caSCarsten Otte if (rc) 2143f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2144598841caSCarsten Otte return; 2145b0c632dbSHeiko Carstens } 2146b0c632dbSHeiko Carstens 2147b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 2148b0c632dbSHeiko Carstens { 2149*9d8d5786SMichael Mueller return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2150b0c632dbSHeiko Carstens } 2151b0c632dbSHeiko Carstens 2152b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 2153b0c632dbSHeiko Carstens { 2154b0c632dbSHeiko Carstens kvm_exit(); 2155b0c632dbSHeiko Carstens } 2156b0c632dbSHeiko Carstens 2157b0c632dbSHeiko Carstens module_init(kvm_s390_init); 2158b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 2159566af940SCornelia Huck 2160566af940SCornelia Huck /* 2161566af940SCornelia Huck * Enable autoloading of the kvm module. 2162566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 2163566af940SCornelia Huck * since x86 takes a different approach. 2164566af940SCornelia Huck */ 2165566af940SCornelia Huck #include <linux/miscdevice.h> 2166566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 2167566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 2168