1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25a374e892STony Krowiak #include <linux/random.h> 26b0c632dbSHeiko Carstens #include <linux/slab.h> 27ba5c1e9bSCarsten Otte #include <linux/timer.h> 28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 29b0c632dbSHeiko Carstens #include <asm/lowcore.h> 30b0c632dbSHeiko Carstens #include <asm/pgtable.h> 31f5daba1dSHeiko Carstens #include <asm/nmi.h> 32a0616cdeSDavid Howells #include <asm/switch_to.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53f7819512SPaolo Bonzini { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 54ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 55f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 56ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 58aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 59ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 607697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 61ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 62ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 63ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 66ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 67ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6869d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 69453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 70453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 71453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 72453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 73453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 748a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 75453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 76453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 77b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 78453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 79453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 80bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 815288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 82bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 837697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 845288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8542cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8642cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 875288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8842cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 8942cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 90cd7b4b61SEric Farman { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 925288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 935288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9542cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9642cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 97388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 98e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9941628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 100b0c632dbSHeiko Carstens { NULL } 101b0c632dbSHeiko Carstens }; 102b0c632dbSHeiko Carstens 1039d8d5786SMichael Mueller /* upper facilities limit for kvm */ 1049d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask[] = { 1059d8d5786SMichael Mueller 0xff82fffbf4fc2000UL, 1069d8d5786SMichael Mueller 0x005c000000000000UL, 10713211ea7SEric Farman 0x4000000000000000UL, 1089d8d5786SMichael Mueller }; 109b0c632dbSHeiko Carstens 1109d8d5786SMichael Mueller unsigned long kvm_s390_fac_list_mask_size(void) 11178c4b59fSMichael Mueller { 1129d8d5786SMichael Mueller BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64); 1139d8d5786SMichael Mueller return ARRAY_SIZE(kvm_s390_fac_list_mask); 11478c4b59fSMichael Mueller } 11578c4b59fSMichael Mueller 1169d8d5786SMichael Mueller static struct gmap_notifier gmap_notifier; 1179d8d5786SMichael Mueller 118b0c632dbSHeiko Carstens /* Section: not file related */ 11913a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 120b0c632dbSHeiko Carstens { 121b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 12210474ae8SAlexander Graf return 0; 123b0c632dbSHeiko Carstens } 124b0c632dbSHeiko Carstens 1252c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1262c70fe44SChristian Borntraeger 127b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 128b0c632dbSHeiko Carstens { 1292c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1302c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 131b0c632dbSHeiko Carstens return 0; 132b0c632dbSHeiko Carstens } 133b0c632dbSHeiko Carstens 134b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 135b0c632dbSHeiko Carstens { 1362c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 137b0c632dbSHeiko Carstens } 138b0c632dbSHeiko Carstens 139b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 140b0c632dbSHeiko Carstens { 14184877d93SCornelia Huck /* Register floating interrupt controller interface. */ 14284877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 143b0c632dbSHeiko Carstens } 144b0c632dbSHeiko Carstens 145b0c632dbSHeiko Carstens /* Section: device related */ 146b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 147b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 148b0c632dbSHeiko Carstens { 149b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 150b0c632dbSHeiko Carstens return s390_enable_sie(); 151b0c632dbSHeiko Carstens return -EINVAL; 152b0c632dbSHeiko Carstens } 153b0c632dbSHeiko Carstens 154784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 155b0c632dbSHeiko Carstens { 156d7b0b5ebSCarsten Otte int r; 157d7b0b5ebSCarsten Otte 1582bd0ac4eSCarsten Otte switch (ext) { 159d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 160b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 16152e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1621efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1631efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1641efd0f59SCarsten Otte #endif 1653c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16660b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16714eebd91SCarsten Otte case KVM_CAP_ONE_REG: 168d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 169fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 170ebc32262SCornelia Huck case KVM_CAP_IRQFD: 17110ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 172c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 173d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 17478599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 175f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1766352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 1772444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 178d7b0b5ebSCarsten Otte r = 1; 179d7b0b5ebSCarsten Otte break; 180e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 181e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 182e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 183e726b1bdSChristian Borntraeger break; 184e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 185e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 186e1e2e605SNick Wang break; 1871526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 188abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1891526bf9cSChristian Borntraeger break; 19068c55750SEric Farman case KVM_CAP_S390_VECTOR_REGISTERS: 19168c55750SEric Farman r = MACHINE_HAS_VX; 19268c55750SEric Farman break; 1932bd0ac4eSCarsten Otte default: 194d7b0b5ebSCarsten Otte r = 0; 195b0c632dbSHeiko Carstens } 196d7b0b5ebSCarsten Otte return r; 1972bd0ac4eSCarsten Otte } 198b0c632dbSHeiko Carstens 19915f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 20015f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 20115f36ebdSJason J. Herne { 20215f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 20315f36ebdSJason J. Herne unsigned long address; 20415f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 20515f36ebdSJason J. Herne 20615f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 20715f36ebdSJason J. Herne /* Loop over all guest pages */ 20815f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 20915f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 21015f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 21115f36ebdSJason J. Herne 21215f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 21315f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 21415f36ebdSJason J. Herne } 21515f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 21615f36ebdSJason J. Herne } 21715f36ebdSJason J. Herne 218b0c632dbSHeiko Carstens /* Section: vm related */ 219b0c632dbSHeiko Carstens /* 220b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 221b0c632dbSHeiko Carstens */ 222b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 223b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 224b0c632dbSHeiko Carstens { 22515f36ebdSJason J. Herne int r; 22615f36ebdSJason J. Herne unsigned long n; 22715f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 22815f36ebdSJason J. Herne int is_dirty = 0; 22915f36ebdSJason J. Herne 23015f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 23115f36ebdSJason J. Herne 23215f36ebdSJason J. Herne r = -EINVAL; 23315f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 23415f36ebdSJason J. Herne goto out; 23515f36ebdSJason J. Herne 23615f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 23715f36ebdSJason J. Herne r = -ENOENT; 23815f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 23915f36ebdSJason J. Herne goto out; 24015f36ebdSJason J. Herne 24115f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 24215f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 24315f36ebdSJason J. Herne if (r) 24415f36ebdSJason J. Herne goto out; 24515f36ebdSJason J. Herne 24615f36ebdSJason J. Herne /* Clear the dirty log */ 24715f36ebdSJason J. Herne if (is_dirty) { 24815f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 24915f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 25015f36ebdSJason J. Herne } 25115f36ebdSJason J. Herne r = 0; 25215f36ebdSJason J. Herne out: 25315f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 25415f36ebdSJason J. Herne return r; 255b0c632dbSHeiko Carstens } 256b0c632dbSHeiko Carstens 257d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 258d938dc55SCornelia Huck { 259d938dc55SCornelia Huck int r; 260d938dc55SCornelia Huck 261d938dc55SCornelia Huck if (cap->flags) 262d938dc55SCornelia Huck return -EINVAL; 263d938dc55SCornelia Huck 264d938dc55SCornelia Huck switch (cap->cap) { 26584223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 26684223598SCornelia Huck kvm->arch.use_irqchip = 1; 26784223598SCornelia Huck r = 0; 26884223598SCornelia Huck break; 2692444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 2702444b352SDavid Hildenbrand kvm->arch.user_sigp = 1; 2712444b352SDavid Hildenbrand r = 0; 2722444b352SDavid Hildenbrand break; 27368c55750SEric Farman case KVM_CAP_S390_VECTOR_REGISTERS: 27468c55750SEric Farman kvm->arch.use_vectors = MACHINE_HAS_VX; 27568c55750SEric Farman r = MACHINE_HAS_VX ? 0 : -EINVAL; 27668c55750SEric Farman break; 277d938dc55SCornelia Huck default: 278d938dc55SCornelia Huck r = -EINVAL; 279d938dc55SCornelia Huck break; 280d938dc55SCornelia Huck } 281d938dc55SCornelia Huck return r; 282d938dc55SCornelia Huck } 283d938dc55SCornelia Huck 2848c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2858c0a7ce6SDominik Dingel { 2868c0a7ce6SDominik Dingel int ret; 2878c0a7ce6SDominik Dingel 2888c0a7ce6SDominik Dingel switch (attr->attr) { 2898c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2908c0a7ce6SDominik Dingel ret = 0; 2918c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2928c0a7ce6SDominik Dingel ret = -EFAULT; 2938c0a7ce6SDominik Dingel break; 2948c0a7ce6SDominik Dingel default: 2958c0a7ce6SDominik Dingel ret = -ENXIO; 2968c0a7ce6SDominik Dingel break; 2978c0a7ce6SDominik Dingel } 2988c0a7ce6SDominik Dingel return ret; 2998c0a7ce6SDominik Dingel } 3008c0a7ce6SDominik Dingel 3018c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 3024f718eabSDominik Dingel { 3034f718eabSDominik Dingel int ret; 3044f718eabSDominik Dingel unsigned int idx; 3054f718eabSDominik Dingel switch (attr->attr) { 3064f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3074f718eabSDominik Dingel ret = -EBUSY; 3084f718eabSDominik Dingel mutex_lock(&kvm->lock); 3094f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3104f718eabSDominik Dingel kvm->arch.use_cmma = 1; 3114f718eabSDominik Dingel ret = 0; 3124f718eabSDominik Dingel } 3134f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3144f718eabSDominik Dingel break; 3154f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3164f718eabSDominik Dingel mutex_lock(&kvm->lock); 3174f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 318a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 3194f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3204f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3214f718eabSDominik Dingel ret = 0; 3224f718eabSDominik Dingel break; 3238c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3248c0a7ce6SDominik Dingel unsigned long new_limit; 3258c0a7ce6SDominik Dingel 3268c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3278c0a7ce6SDominik Dingel return -EINVAL; 3288c0a7ce6SDominik Dingel 3298c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3308c0a7ce6SDominik Dingel return -EFAULT; 3318c0a7ce6SDominik Dingel 3328c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3338c0a7ce6SDominik Dingel return -E2BIG; 3348c0a7ce6SDominik Dingel 3358c0a7ce6SDominik Dingel ret = -EBUSY; 3368c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3378c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3388c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3398c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3408c0a7ce6SDominik Dingel 3418c0a7ce6SDominik Dingel if (!new) { 3428c0a7ce6SDominik Dingel ret = -ENOMEM; 3438c0a7ce6SDominik Dingel } else { 3448c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3458c0a7ce6SDominik Dingel new->private = kvm; 3468c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3478c0a7ce6SDominik Dingel ret = 0; 3488c0a7ce6SDominik Dingel } 3498c0a7ce6SDominik Dingel } 3508c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3518c0a7ce6SDominik Dingel break; 3528c0a7ce6SDominik Dingel } 3534f718eabSDominik Dingel default: 3544f718eabSDominik Dingel ret = -ENXIO; 3554f718eabSDominik Dingel break; 3564f718eabSDominik Dingel } 3574f718eabSDominik Dingel return ret; 3584f718eabSDominik Dingel } 3594f718eabSDominik Dingel 360a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 361a374e892STony Krowiak 362a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 363a374e892STony Krowiak { 364a374e892STony Krowiak struct kvm_vcpu *vcpu; 365a374e892STony Krowiak int i; 366a374e892STony Krowiak 3679d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 368a374e892STony Krowiak return -EINVAL; 369a374e892STony Krowiak 370a374e892STony Krowiak mutex_lock(&kvm->lock); 371a374e892STony Krowiak switch (attr->attr) { 372a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 373a374e892STony Krowiak get_random_bytes( 374a374e892STony Krowiak kvm->arch.crypto.crycb->aes_wrapping_key_mask, 375a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 376a374e892STony Krowiak kvm->arch.crypto.aes_kw = 1; 377a374e892STony Krowiak break; 378a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 379a374e892STony Krowiak get_random_bytes( 380a374e892STony Krowiak kvm->arch.crypto.crycb->dea_wrapping_key_mask, 381a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 382a374e892STony Krowiak kvm->arch.crypto.dea_kw = 1; 383a374e892STony Krowiak break; 384a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 385a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 386a374e892STony Krowiak memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 387a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 388a374e892STony Krowiak break; 389a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 390a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 391a374e892STony Krowiak memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 392a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 393a374e892STony Krowiak break; 394a374e892STony Krowiak default: 395a374e892STony Krowiak mutex_unlock(&kvm->lock); 396a374e892STony Krowiak return -ENXIO; 397a374e892STony Krowiak } 398a374e892STony Krowiak 399a374e892STony Krowiak kvm_for_each_vcpu(i, vcpu, kvm) { 400a374e892STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 401a374e892STony Krowiak exit_sie(vcpu); 402a374e892STony Krowiak } 403a374e892STony Krowiak mutex_unlock(&kvm->lock); 404a374e892STony Krowiak return 0; 405a374e892STony Krowiak } 406a374e892STony Krowiak 40772f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 40872f25020SJason J. Herne { 40972f25020SJason J. Herne u8 gtod_high; 41072f25020SJason J. Herne 41172f25020SJason J. Herne if (copy_from_user(>od_high, (void __user *)attr->addr, 41272f25020SJason J. Herne sizeof(gtod_high))) 41372f25020SJason J. Herne return -EFAULT; 41472f25020SJason J. Herne 41572f25020SJason J. Herne if (gtod_high != 0) 41672f25020SJason J. Herne return -EINVAL; 41772f25020SJason J. Herne 41872f25020SJason J. Herne return 0; 41972f25020SJason J. Herne } 42072f25020SJason J. Herne 42172f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 42272f25020SJason J. Herne { 42372f25020SJason J. Herne struct kvm_vcpu *cur_vcpu; 42472f25020SJason J. Herne unsigned int vcpu_idx; 42572f25020SJason J. Herne u64 host_tod, gtod; 42672f25020SJason J. Herne int r; 42772f25020SJason J. Herne 42872f25020SJason J. Herne if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 42972f25020SJason J. Herne return -EFAULT; 43072f25020SJason J. Herne 43172f25020SJason J. Herne r = store_tod_clock(&host_tod); 43272f25020SJason J. Herne if (r) 43372f25020SJason J. Herne return r; 43472f25020SJason J. Herne 43572f25020SJason J. Herne mutex_lock(&kvm->lock); 43672f25020SJason J. Herne kvm->arch.epoch = gtod - host_tod; 43772f25020SJason J. Herne kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 43872f25020SJason J. Herne cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 43972f25020SJason J. Herne exit_sie(cur_vcpu); 44072f25020SJason J. Herne } 44172f25020SJason J. Herne mutex_unlock(&kvm->lock); 44272f25020SJason J. Herne return 0; 44372f25020SJason J. Herne } 44472f25020SJason J. Herne 44572f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 44672f25020SJason J. Herne { 44772f25020SJason J. Herne int ret; 44872f25020SJason J. Herne 44972f25020SJason J. Herne if (attr->flags) 45072f25020SJason J. Herne return -EINVAL; 45172f25020SJason J. Herne 45272f25020SJason J. Herne switch (attr->attr) { 45372f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 45472f25020SJason J. Herne ret = kvm_s390_set_tod_high(kvm, attr); 45572f25020SJason J. Herne break; 45672f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 45772f25020SJason J. Herne ret = kvm_s390_set_tod_low(kvm, attr); 45872f25020SJason J. Herne break; 45972f25020SJason J. Herne default: 46072f25020SJason J. Herne ret = -ENXIO; 46172f25020SJason J. Herne break; 46272f25020SJason J. Herne } 46372f25020SJason J. Herne return ret; 46472f25020SJason J. Herne } 46572f25020SJason J. Herne 46672f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 46772f25020SJason J. Herne { 46872f25020SJason J. Herne u8 gtod_high = 0; 46972f25020SJason J. Herne 47072f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od_high, 47172f25020SJason J. Herne sizeof(gtod_high))) 47272f25020SJason J. Herne return -EFAULT; 47372f25020SJason J. Herne 47472f25020SJason J. Herne return 0; 47572f25020SJason J. Herne } 47672f25020SJason J. Herne 47772f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 47872f25020SJason J. Herne { 47972f25020SJason J. Herne u64 host_tod, gtod; 48072f25020SJason J. Herne int r; 48172f25020SJason J. Herne 48272f25020SJason J. Herne r = store_tod_clock(&host_tod); 48372f25020SJason J. Herne if (r) 48472f25020SJason J. Herne return r; 48572f25020SJason J. Herne 48672f25020SJason J. Herne gtod = host_tod + kvm->arch.epoch; 48772f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 48872f25020SJason J. Herne return -EFAULT; 48972f25020SJason J. Herne 49072f25020SJason J. Herne return 0; 49172f25020SJason J. Herne } 49272f25020SJason J. Herne 49372f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 49472f25020SJason J. Herne { 49572f25020SJason J. Herne int ret; 49672f25020SJason J. Herne 49772f25020SJason J. Herne if (attr->flags) 49872f25020SJason J. Herne return -EINVAL; 49972f25020SJason J. Herne 50072f25020SJason J. Herne switch (attr->attr) { 50172f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 50272f25020SJason J. Herne ret = kvm_s390_get_tod_high(kvm, attr); 50372f25020SJason J. Herne break; 50472f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 50572f25020SJason J. Herne ret = kvm_s390_get_tod_low(kvm, attr); 50672f25020SJason J. Herne break; 50772f25020SJason J. Herne default: 50872f25020SJason J. Herne ret = -ENXIO; 50972f25020SJason J. Herne break; 51072f25020SJason J. Herne } 51172f25020SJason J. Herne return ret; 51272f25020SJason J. Herne } 51372f25020SJason J. Herne 514658b6edaSMichael Mueller static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) 515658b6edaSMichael Mueller { 516658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 517658b6edaSMichael Mueller int ret = 0; 518658b6edaSMichael Mueller 519658b6edaSMichael Mueller mutex_lock(&kvm->lock); 520658b6edaSMichael Mueller if (atomic_read(&kvm->online_vcpus)) { 521658b6edaSMichael Mueller ret = -EBUSY; 522658b6edaSMichael Mueller goto out; 523658b6edaSMichael Mueller } 524658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 525658b6edaSMichael Mueller if (!proc) { 526658b6edaSMichael Mueller ret = -ENOMEM; 527658b6edaSMichael Mueller goto out; 528658b6edaSMichael Mueller } 529658b6edaSMichael Mueller if (!copy_from_user(proc, (void __user *)attr->addr, 530658b6edaSMichael Mueller sizeof(*proc))) { 531658b6edaSMichael Mueller memcpy(&kvm->arch.model.cpu_id, &proc->cpuid, 532658b6edaSMichael Mueller sizeof(struct cpuid)); 533658b6edaSMichael Mueller kvm->arch.model.ibc = proc->ibc; 534981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, proc->fac_list, 535658b6edaSMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 536658b6edaSMichael Mueller } else 537658b6edaSMichael Mueller ret = -EFAULT; 538658b6edaSMichael Mueller kfree(proc); 539658b6edaSMichael Mueller out: 540658b6edaSMichael Mueller mutex_unlock(&kvm->lock); 541658b6edaSMichael Mueller return ret; 542658b6edaSMichael Mueller } 543658b6edaSMichael Mueller 544658b6edaSMichael Mueller static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 545658b6edaSMichael Mueller { 546658b6edaSMichael Mueller int ret = -ENXIO; 547658b6edaSMichael Mueller 548658b6edaSMichael Mueller switch (attr->attr) { 549658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 550658b6edaSMichael Mueller ret = kvm_s390_set_processor(kvm, attr); 551658b6edaSMichael Mueller break; 552658b6edaSMichael Mueller } 553658b6edaSMichael Mueller return ret; 554658b6edaSMichael Mueller } 555658b6edaSMichael Mueller 556658b6edaSMichael Mueller static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) 557658b6edaSMichael Mueller { 558658b6edaSMichael Mueller struct kvm_s390_vm_cpu_processor *proc; 559658b6edaSMichael Mueller int ret = 0; 560658b6edaSMichael Mueller 561658b6edaSMichael Mueller proc = kzalloc(sizeof(*proc), GFP_KERNEL); 562658b6edaSMichael Mueller if (!proc) { 563658b6edaSMichael Mueller ret = -ENOMEM; 564658b6edaSMichael Mueller goto out; 565658b6edaSMichael Mueller } 566658b6edaSMichael Mueller memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid)); 567658b6edaSMichael Mueller proc->ibc = kvm->arch.model.ibc; 568981467c9SMichael Mueller memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE); 569658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) 570658b6edaSMichael Mueller ret = -EFAULT; 571658b6edaSMichael Mueller kfree(proc); 572658b6edaSMichael Mueller out: 573658b6edaSMichael Mueller return ret; 574658b6edaSMichael Mueller } 575658b6edaSMichael Mueller 576658b6edaSMichael Mueller static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) 577658b6edaSMichael Mueller { 578658b6edaSMichael Mueller struct kvm_s390_vm_cpu_machine *mach; 579658b6edaSMichael Mueller int ret = 0; 580658b6edaSMichael Mueller 581658b6edaSMichael Mueller mach = kzalloc(sizeof(*mach), GFP_KERNEL); 582658b6edaSMichael Mueller if (!mach) { 583658b6edaSMichael Mueller ret = -ENOMEM; 584658b6edaSMichael Mueller goto out; 585658b6edaSMichael Mueller } 586658b6edaSMichael Mueller get_cpu_id((struct cpuid *) &mach->cpuid); 587658b6edaSMichael Mueller mach->ibc = sclp_get_ibc(); 588981467c9SMichael Mueller memcpy(&mach->fac_mask, kvm->arch.model.fac->mask, 589981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 590658b6edaSMichael Mueller memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list, 59194422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 592658b6edaSMichael Mueller if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) 593658b6edaSMichael Mueller ret = -EFAULT; 594658b6edaSMichael Mueller kfree(mach); 595658b6edaSMichael Mueller out: 596658b6edaSMichael Mueller return ret; 597658b6edaSMichael Mueller } 598658b6edaSMichael Mueller 599658b6edaSMichael Mueller static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) 600658b6edaSMichael Mueller { 601658b6edaSMichael Mueller int ret = -ENXIO; 602658b6edaSMichael Mueller 603658b6edaSMichael Mueller switch (attr->attr) { 604658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 605658b6edaSMichael Mueller ret = kvm_s390_get_processor(kvm, attr); 606658b6edaSMichael Mueller break; 607658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 608658b6edaSMichael Mueller ret = kvm_s390_get_machine(kvm, attr); 609658b6edaSMichael Mueller break; 610658b6edaSMichael Mueller } 611658b6edaSMichael Mueller return ret; 612658b6edaSMichael Mueller } 613658b6edaSMichael Mueller 614f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 615f2061656SDominik Dingel { 616f2061656SDominik Dingel int ret; 617f2061656SDominik Dingel 618f2061656SDominik Dingel switch (attr->group) { 6194f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6208c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 6214f718eabSDominik Dingel break; 62272f25020SJason J. Herne case KVM_S390_VM_TOD: 62372f25020SJason J. Herne ret = kvm_s390_set_tod(kvm, attr); 62472f25020SJason J. Herne break; 625658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 626658b6edaSMichael Mueller ret = kvm_s390_set_cpu_model(kvm, attr); 627658b6edaSMichael Mueller break; 628a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 629a374e892STony Krowiak ret = kvm_s390_vm_set_crypto(kvm, attr); 630a374e892STony Krowiak break; 631f2061656SDominik Dingel default: 632f2061656SDominik Dingel ret = -ENXIO; 633f2061656SDominik Dingel break; 634f2061656SDominik Dingel } 635f2061656SDominik Dingel 636f2061656SDominik Dingel return ret; 637f2061656SDominik Dingel } 638f2061656SDominik Dingel 639f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 640f2061656SDominik Dingel { 6418c0a7ce6SDominik Dingel int ret; 6428c0a7ce6SDominik Dingel 6438c0a7ce6SDominik Dingel switch (attr->group) { 6448c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 6458c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 6468c0a7ce6SDominik Dingel break; 64772f25020SJason J. Herne case KVM_S390_VM_TOD: 64872f25020SJason J. Herne ret = kvm_s390_get_tod(kvm, attr); 64972f25020SJason J. Herne break; 650658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 651658b6edaSMichael Mueller ret = kvm_s390_get_cpu_model(kvm, attr); 652658b6edaSMichael Mueller break; 6538c0a7ce6SDominik Dingel default: 6548c0a7ce6SDominik Dingel ret = -ENXIO; 6558c0a7ce6SDominik Dingel break; 6568c0a7ce6SDominik Dingel } 6578c0a7ce6SDominik Dingel 6588c0a7ce6SDominik Dingel return ret; 659f2061656SDominik Dingel } 660f2061656SDominik Dingel 661f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 662f2061656SDominik Dingel { 663f2061656SDominik Dingel int ret; 664f2061656SDominik Dingel 665f2061656SDominik Dingel switch (attr->group) { 6664f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 6674f718eabSDominik Dingel switch (attr->attr) { 6684f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 6694f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 6708c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 6714f718eabSDominik Dingel ret = 0; 6724f718eabSDominik Dingel break; 6734f718eabSDominik Dingel default: 6744f718eabSDominik Dingel ret = -ENXIO; 6754f718eabSDominik Dingel break; 6764f718eabSDominik Dingel } 6774f718eabSDominik Dingel break; 67872f25020SJason J. Herne case KVM_S390_VM_TOD: 67972f25020SJason J. Herne switch (attr->attr) { 68072f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 68172f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 68272f25020SJason J. Herne ret = 0; 68372f25020SJason J. Herne break; 68472f25020SJason J. Herne default: 68572f25020SJason J. Herne ret = -ENXIO; 68672f25020SJason J. Herne break; 68772f25020SJason J. Herne } 68872f25020SJason J. Herne break; 689658b6edaSMichael Mueller case KVM_S390_VM_CPU_MODEL: 690658b6edaSMichael Mueller switch (attr->attr) { 691658b6edaSMichael Mueller case KVM_S390_VM_CPU_PROCESSOR: 692658b6edaSMichael Mueller case KVM_S390_VM_CPU_MACHINE: 693658b6edaSMichael Mueller ret = 0; 694658b6edaSMichael Mueller break; 695658b6edaSMichael Mueller default: 696658b6edaSMichael Mueller ret = -ENXIO; 697658b6edaSMichael Mueller break; 698658b6edaSMichael Mueller } 699658b6edaSMichael Mueller break; 700a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 701a374e892STony Krowiak switch (attr->attr) { 702a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 703a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 704a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 705a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 706a374e892STony Krowiak ret = 0; 707a374e892STony Krowiak break; 708a374e892STony Krowiak default: 709a374e892STony Krowiak ret = -ENXIO; 710a374e892STony Krowiak break; 711a374e892STony Krowiak } 712a374e892STony Krowiak break; 713f2061656SDominik Dingel default: 714f2061656SDominik Dingel ret = -ENXIO; 715f2061656SDominik Dingel break; 716f2061656SDominik Dingel } 717f2061656SDominik Dingel 718f2061656SDominik Dingel return ret; 719f2061656SDominik Dingel } 720f2061656SDominik Dingel 721b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 722b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 723b0c632dbSHeiko Carstens { 724b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 725b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 726f2061656SDominik Dingel struct kvm_device_attr attr; 727b0c632dbSHeiko Carstens int r; 728b0c632dbSHeiko Carstens 729b0c632dbSHeiko Carstens switch (ioctl) { 730ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 731ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 732ba5c1e9bSCarsten Otte 733ba5c1e9bSCarsten Otte r = -EFAULT; 734ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 735ba5c1e9bSCarsten Otte break; 736ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 737ba5c1e9bSCarsten Otte break; 738ba5c1e9bSCarsten Otte } 739d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 740d938dc55SCornelia Huck struct kvm_enable_cap cap; 741d938dc55SCornelia Huck r = -EFAULT; 742d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 743d938dc55SCornelia Huck break; 744d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 745d938dc55SCornelia Huck break; 746d938dc55SCornelia Huck } 74784223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 74884223598SCornelia Huck struct kvm_irq_routing_entry routing; 74984223598SCornelia Huck 75084223598SCornelia Huck r = -EINVAL; 75184223598SCornelia Huck if (kvm->arch.use_irqchip) { 75284223598SCornelia Huck /* Set up dummy routing. */ 75384223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 75484223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 75584223598SCornelia Huck r = 0; 75684223598SCornelia Huck } 75784223598SCornelia Huck break; 75884223598SCornelia Huck } 759f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 760f2061656SDominik Dingel r = -EFAULT; 761f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 762f2061656SDominik Dingel break; 763f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 764f2061656SDominik Dingel break; 765f2061656SDominik Dingel } 766f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 767f2061656SDominik Dingel r = -EFAULT; 768f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 769f2061656SDominik Dingel break; 770f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 771f2061656SDominik Dingel break; 772f2061656SDominik Dingel } 773f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 774f2061656SDominik Dingel r = -EFAULT; 775f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 776f2061656SDominik Dingel break; 777f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 778f2061656SDominik Dingel break; 779f2061656SDominik Dingel } 780b0c632dbSHeiko Carstens default: 781367e1319SAvi Kivity r = -ENOTTY; 782b0c632dbSHeiko Carstens } 783b0c632dbSHeiko Carstens 784b0c632dbSHeiko Carstens return r; 785b0c632dbSHeiko Carstens } 786b0c632dbSHeiko Carstens 78745c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config) 78845c9b47cSTony Krowiak { 78945c9b47cSTony Krowiak u32 fcn_code = 0x04000000UL; 79086044c8cSChristian Borntraeger u32 cc = 0; 79145c9b47cSTony Krowiak 79286044c8cSChristian Borntraeger memset(config, 0, 128); 79345c9b47cSTony Krowiak asm volatile( 79445c9b47cSTony Krowiak "lgr 0,%1\n" 79545c9b47cSTony Krowiak "lgr 2,%2\n" 79645c9b47cSTony Krowiak ".long 0xb2af0000\n" /* PQAP(QCI) */ 79786044c8cSChristian Borntraeger "0: ipm %0\n" 79845c9b47cSTony Krowiak "srl %0,28\n" 79986044c8cSChristian Borntraeger "1:\n" 80086044c8cSChristian Borntraeger EX_TABLE(0b, 1b) 80186044c8cSChristian Borntraeger : "+r" (cc) 80245c9b47cSTony Krowiak : "r" (fcn_code), "r" (config) 80345c9b47cSTony Krowiak : "cc", "0", "2", "memory" 80445c9b47cSTony Krowiak ); 80545c9b47cSTony Krowiak 80645c9b47cSTony Krowiak return cc; 80745c9b47cSTony Krowiak } 80845c9b47cSTony Krowiak 80945c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void) 81045c9b47cSTony Krowiak { 81145c9b47cSTony Krowiak u8 config[128]; 81245c9b47cSTony Krowiak int cc; 81345c9b47cSTony Krowiak 81445c9b47cSTony Krowiak if (test_facility(2) && test_facility(12)) { 81545c9b47cSTony Krowiak cc = kvm_s390_query_ap_config(config); 81645c9b47cSTony Krowiak 81745c9b47cSTony Krowiak if (cc) 81845c9b47cSTony Krowiak pr_err("PQAP(QCI) failed with cc=%d", cc); 81945c9b47cSTony Krowiak else 82045c9b47cSTony Krowiak return config[0] & 0x40; 82145c9b47cSTony Krowiak } 82245c9b47cSTony Krowiak 82345c9b47cSTony Krowiak return 0; 82445c9b47cSTony Krowiak } 82545c9b47cSTony Krowiak 82645c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm) 82745c9b47cSTony Krowiak { 82845c9b47cSTony Krowiak kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 82945c9b47cSTony Krowiak 83045c9b47cSTony Krowiak if (kvm_s390_apxa_installed()) 83145c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 83245c9b47cSTony Krowiak else 83345c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 83445c9b47cSTony Krowiak } 83545c9b47cSTony Krowiak 8369d8d5786SMichael Mueller static void kvm_s390_get_cpu_id(struct cpuid *cpu_id) 8379d8d5786SMichael Mueller { 8389d8d5786SMichael Mueller get_cpu_id(cpu_id); 8399d8d5786SMichael Mueller cpu_id->version = 0xff; 8409d8d5786SMichael Mueller } 8419d8d5786SMichael Mueller 8425102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 8435102ee87STony Krowiak { 8449d8d5786SMichael Mueller if (!test_kvm_facility(kvm, 76)) 8455102ee87STony Krowiak return 0; 8465102ee87STony Krowiak 8475102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 8485102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 8495102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 8505102ee87STony Krowiak return -ENOMEM; 8515102ee87STony Krowiak 85245c9b47cSTony Krowiak kvm_s390_set_crycb_format(kvm); 8535102ee87STony Krowiak 854ed6f76b4STony Krowiak /* Enable AES/DEA protected key functions by default */ 855ed6f76b4STony Krowiak kvm->arch.crypto.aes_kw = 1; 856ed6f76b4STony Krowiak kvm->arch.crypto.dea_kw = 1; 857ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 858ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 859ed6f76b4STony Krowiak get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 860ed6f76b4STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 861a374e892STony Krowiak 8625102ee87STony Krowiak return 0; 8635102ee87STony Krowiak } 8645102ee87STony Krowiak 865e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 866b0c632dbSHeiko Carstens { 8679d8d5786SMichael Mueller int i, rc; 868b0c632dbSHeiko Carstens char debug_name[16]; 869f6c137ffSChristian Borntraeger static unsigned long sca_offset; 870b0c632dbSHeiko Carstens 871e08b9637SCarsten Otte rc = -EINVAL; 872e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 873e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 874e08b9637SCarsten Otte goto out_err; 875e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 876e08b9637SCarsten Otte goto out_err; 877e08b9637SCarsten Otte #else 878e08b9637SCarsten Otte if (type) 879e08b9637SCarsten Otte goto out_err; 880e08b9637SCarsten Otte #endif 881e08b9637SCarsten Otte 882b0c632dbSHeiko Carstens rc = s390_enable_sie(); 883b0c632dbSHeiko Carstens if (rc) 884d89f5effSJan Kiszka goto out_err; 885b0c632dbSHeiko Carstens 886b290411aSCarsten Otte rc = -ENOMEM; 887b290411aSCarsten Otte 888b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 889b0c632dbSHeiko Carstens if (!kvm->arch.sca) 890d89f5effSJan Kiszka goto out_err; 891f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 892f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 893f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 894f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 895b0c632dbSHeiko Carstens 896b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 897b0c632dbSHeiko Carstens 898b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 899b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 90040f5b735SDominik Dingel goto out_err; 901b0c632dbSHeiko Carstens 9029d8d5786SMichael Mueller /* 9039d8d5786SMichael Mueller * The architectural maximum amount of facilities is 16 kbit. To store 9049d8d5786SMichael Mueller * this amount, 2 kbyte of memory is required. Thus we need a full 905981467c9SMichael Mueller * page to hold the guest facility list (arch.model.fac->list) and the 906981467c9SMichael Mueller * facility mask (arch.model.fac->mask). Its address size has to be 9079d8d5786SMichael Mueller * 31 bits and word aligned. 9089d8d5786SMichael Mueller */ 9099d8d5786SMichael Mueller kvm->arch.model.fac = 910981467c9SMichael Mueller (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 9119d8d5786SMichael Mueller if (!kvm->arch.model.fac) 91240f5b735SDominik Dingel goto out_err; 9139d8d5786SMichael Mueller 914fb5bf93fSMichael Mueller /* Populate the facility mask initially. */ 915981467c9SMichael Mueller memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, 91694422ee8SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 9179d8d5786SMichael Mueller for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) { 9189d8d5786SMichael Mueller if (i < kvm_s390_fac_list_mask_size()) 919981467c9SMichael Mueller kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i]; 9209d8d5786SMichael Mueller else 921981467c9SMichael Mueller kvm->arch.model.fac->mask[i] = 0UL; 9229d8d5786SMichael Mueller } 9239d8d5786SMichael Mueller 924981467c9SMichael Mueller /* Populate the facility list initially. */ 925981467c9SMichael Mueller memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask, 926981467c9SMichael Mueller S390_ARCH_FAC_LIST_SIZE_BYTE); 927981467c9SMichael Mueller 9289d8d5786SMichael Mueller kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id); 929658b6edaSMichael Mueller kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 9309d8d5786SMichael Mueller 9315102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 93240f5b735SDominik Dingel goto out_err; 9335102ee87STony Krowiak 934ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 935ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 9368a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 937a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 938ba5c1e9bSCarsten Otte 939b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 940b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 941b0c632dbSHeiko Carstens 942e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 943e08b9637SCarsten Otte kvm->arch.gmap = NULL; 944e08b9637SCarsten Otte } else { 9450349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 946598841caSCarsten Otte if (!kvm->arch.gmap) 94740f5b735SDominik Dingel goto out_err; 9482c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 94924eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 950e08b9637SCarsten Otte } 951fa6b7fe9SCornelia Huck 952fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 95384223598SCornelia Huck kvm->arch.use_irqchip = 0; 95468c55750SEric Farman kvm->arch.use_vectors = 0; 95572f25020SJason J. Herne kvm->arch.epoch = 0; 956fa6b7fe9SCornelia Huck 9578ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 9588ad35755SDavid Hildenbrand 959d89f5effSJan Kiszka return 0; 960d89f5effSJan Kiszka out_err: 96140f5b735SDominik Dingel kfree(kvm->arch.crypto.crycb); 96240f5b735SDominik Dingel free_page((unsigned long)kvm->arch.model.fac); 96340f5b735SDominik Dingel debug_unregister(kvm->arch.dbf); 96440f5b735SDominik Dingel free_page((unsigned long)(kvm->arch.sca)); 965d89f5effSJan Kiszka return rc; 966b0c632dbSHeiko Carstens } 967b0c632dbSHeiko Carstens 968d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 969d329c035SChristian Borntraeger { 970d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 971ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 97267335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 9733c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 97458f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 97558f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 97658f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 977abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 978abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 979abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 98058f9460bSCarsten Otte } 981abf4a71eSCarsten Otte smp_mb(); 98227e0393fSCarsten Otte 98327e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 98427e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 98527e0393fSCarsten Otte 986b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 987b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 988d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 989b31288faSKonstantin Weitz 9906692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 991b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 992d329c035SChristian Borntraeger } 993d329c035SChristian Borntraeger 994d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 995d329c035SChristian Borntraeger { 996d329c035SChristian Borntraeger unsigned int i; 997988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 998d329c035SChristian Borntraeger 999988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 1000988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 1001988a2caeSGleb Natapov 1002988a2caeSGleb Natapov mutex_lock(&kvm->lock); 1003988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 1004d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 1005988a2caeSGleb Natapov 1006988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 1007988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 1008d329c035SChristian Borntraeger } 1009d329c035SChristian Borntraeger 1010b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 1011b0c632dbSHeiko Carstens { 1012d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 10139d8d5786SMichael Mueller free_page((unsigned long)kvm->arch.model.fac); 1014b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 1015d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 10165102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 101727e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 1018598841caSCarsten Otte gmap_free(kvm->arch.gmap); 1019841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 102067335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 1021b0c632dbSHeiko Carstens } 1022b0c632dbSHeiko Carstens 1023b0c632dbSHeiko Carstens /* Section: vcpu related */ 1024dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 1025b0c632dbSHeiko Carstens { 1026c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 102727e0393fSCarsten Otte if (!vcpu->arch.gmap) 102827e0393fSCarsten Otte return -ENOMEM; 10292c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 1030dafd032aSDominik Dingel 103127e0393fSCarsten Otte return 0; 103227e0393fSCarsten Otte } 103327e0393fSCarsten Otte 1034dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 1035dafd032aSDominik Dingel { 1036dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 1037dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 103859674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 103959674c1aSChristian Borntraeger KVM_SYNC_GPRS | 10409eed0735SChristian Borntraeger KVM_SYNC_ACRS | 1041b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 1042b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 1043b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 104468c55750SEric Farman if (test_kvm_facility(vcpu->kvm, 129)) 104568c55750SEric Farman vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; 1046dafd032aSDominik Dingel 1047dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 1048dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 1049dafd032aSDominik Dingel 1050b0c632dbSHeiko Carstens return 0; 1051b0c632dbSHeiko Carstens } 1052b0c632dbSHeiko Carstens 1053b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1054b0c632dbSHeiko Carstens { 10554725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 105668c55750SEric Farman if (vcpu->kvm->arch.use_vectors) 105768c55750SEric Farman save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 105868c55750SEric Farman else 10594725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 1060b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 106168c55750SEric Farman if (vcpu->kvm->arch.use_vectors) { 106268c55750SEric Farman restore_fp_ctl(&vcpu->run->s.regs.fpc); 106368c55750SEric Farman restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 106468c55750SEric Farman } else { 10654725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10664725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 106768c55750SEric Farman } 106859674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1069480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 10709e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1071b0c632dbSHeiko Carstens } 1072b0c632dbSHeiko Carstens 1073b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1074b0c632dbSHeiko Carstens { 10759e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1076480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 107768c55750SEric Farman if (vcpu->kvm->arch.use_vectors) { 107868c55750SEric Farman save_fp_ctl(&vcpu->run->s.regs.fpc); 107968c55750SEric Farman save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 108068c55750SEric Farman } else { 10814725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10824725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 108368c55750SEric Farman } 108459674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 10854725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 108668c55750SEric Farman if (vcpu->kvm->arch.use_vectors) 108768c55750SEric Farman restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 108868c55750SEric Farman else 10894725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 1090b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 1091b0c632dbSHeiko Carstens } 1092b0c632dbSHeiko Carstens 1093b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 1094b0c632dbSHeiko Carstens { 1095b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 1096b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 1097b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 10988d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 1099b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 1100b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 1101b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 1102b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 1103b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 1104b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 1105b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 1106b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 1107b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 1108672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 11093c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 11103c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 11116352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 11126852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 11132ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 1114b0c632dbSHeiko Carstens } 1115b0c632dbSHeiko Carstens 111631928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 111742897d86SMarcelo Tosatti { 111872f25020SJason J. Herne mutex_lock(&vcpu->kvm->lock); 111972f25020SJason J. Herne vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 112072f25020SJason J. Herne mutex_unlock(&vcpu->kvm->lock); 1121dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 1122dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 112342897d86SMarcelo Tosatti } 112442897d86SMarcelo Tosatti 11255102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 11265102ee87STony Krowiak { 11279d8d5786SMichael Mueller if (!test_kvm_facility(vcpu->kvm, 76)) 11285102ee87STony Krowiak return; 11295102ee87STony Krowiak 1130a374e892STony Krowiak vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 1131a374e892STony Krowiak 1132a374e892STony Krowiak if (vcpu->kvm->arch.crypto.aes_kw) 1133a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_AES; 1134a374e892STony Krowiak if (vcpu->kvm->arch.crypto.dea_kw) 1135a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 1136a374e892STony Krowiak 11375102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 11385102ee87STony Krowiak } 11395102ee87STony Krowiak 1140b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 1141b31605c1SDominik Dingel { 1142b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 1143b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 1144b31605c1SDominik Dingel } 1145b31605c1SDominik Dingel 1146b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 1147b31605c1SDominik Dingel { 1148b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 1149b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 1150b31605c1SDominik Dingel return -ENOMEM; 1151b31605c1SDominik Dingel 1152b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 1153b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 1154b31605c1SDominik Dingel return 0; 1155b31605c1SDominik Dingel } 1156b31605c1SDominik Dingel 115791520f1aSMichael Mueller static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu) 115891520f1aSMichael Mueller { 115991520f1aSMichael Mueller struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; 116091520f1aSMichael Mueller 116191520f1aSMichael Mueller vcpu->arch.cpu_id = model->cpu_id; 116291520f1aSMichael Mueller vcpu->arch.sie_block->ibc = model->ibc; 116391520f1aSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) model->fac->list; 116491520f1aSMichael Mueller } 116591520f1aSMichael Mueller 1166b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1167b0c632dbSHeiko Carstens { 1168b31605c1SDominik Dingel int rc = 0; 1169b31288faSKonstantin Weitz 11709e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 11719e6dabefSCornelia Huck CPUSTAT_SM | 117269d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 117369d0d3a3SChristian Borntraeger CPUSTAT_GED); 117491520f1aSMichael Mueller kvm_s390_vcpu_setup_model(vcpu); 117591520f1aSMichael Mueller 1176fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 11779d8d5786SMichael Mueller if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73)) 11787feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 11797feb6bb8SMichael Mueller 118069d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 1181ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 1182217a4406SHeiko Carstens if (sclp_has_siif()) 1183217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 1184ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 1185ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 118613211ea7SEric Farman if (vcpu->kvm->arch.use_vectors) { 118713211ea7SEric Farman vcpu->arch.sie_block->eca |= 0x00020000; 118813211ea7SEric Farman vcpu->arch.sie_block->ecd |= 0x20000000; 118913211ea7SEric Farman } 1190492d8642SThomas Huth vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 11915a5e6536SMatthew Rosato 1192b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 1193b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 1194b31605c1SDominik Dingel if (rc) 1195b31605c1SDominik Dingel return rc; 1196b31288faSKonstantin Weitz } 11970ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1198ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 11999d8d5786SMichael Mueller 12005102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 12015102ee87STony Krowiak 1202b31605c1SDominik Dingel return rc; 1203b0c632dbSHeiko Carstens } 1204b0c632dbSHeiko Carstens 1205b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 1206b0c632dbSHeiko Carstens unsigned int id) 1207b0c632dbSHeiko Carstens { 12084d47555aSCarsten Otte struct kvm_vcpu *vcpu; 12097feb6bb8SMichael Mueller struct sie_page *sie_page; 12104d47555aSCarsten Otte int rc = -EINVAL; 1211b0c632dbSHeiko Carstens 12124d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 12134d47555aSCarsten Otte goto out; 12144d47555aSCarsten Otte 12154d47555aSCarsten Otte rc = -ENOMEM; 12164d47555aSCarsten Otte 1217b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1218b0c632dbSHeiko Carstens if (!vcpu) 12194d47555aSCarsten Otte goto out; 1220b0c632dbSHeiko Carstens 12217feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 12227feb6bb8SMichael Mueller if (!sie_page) 1223b0c632dbSHeiko Carstens goto out_free_cpu; 1224b0c632dbSHeiko Carstens 12257feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 12267feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 122768c55750SEric Farman vcpu->arch.host_vregs = &sie_page->vregs; 12287feb6bb8SMichael Mueller 1229b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 123058f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 123158f9460bSCarsten Otte if (!kvm->arch.sca) { 123258f9460bSCarsten Otte WARN_ON_ONCE(1); 123358f9460bSCarsten Otte goto out_free_cpu; 123458f9460bSCarsten Otte } 1235abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 123658f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 123758f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 123858f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 123958f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 1240b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1241fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 124258f9460bSCarsten Otte } 1243b0c632dbSHeiko Carstens 1244ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 1245ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1246d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 12475288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 1248ba5c1e9bSCarsten Otte 1249b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 1250b0c632dbSHeiko Carstens if (rc) 12517b06bf2fSWei Yongjun goto out_free_sie_block; 1252b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 1253b0c632dbSHeiko Carstens vcpu->arch.sie_block); 1254ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 1255b0c632dbSHeiko Carstens 1256b0c632dbSHeiko Carstens return vcpu; 12577b06bf2fSWei Yongjun out_free_sie_block: 12587b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 1259b0c632dbSHeiko Carstens out_free_cpu: 1260b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 12614d47555aSCarsten Otte out: 1262b0c632dbSHeiko Carstens return ERR_PTR(rc); 1263b0c632dbSHeiko Carstens } 1264b0c632dbSHeiko Carstens 1265b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1266b0c632dbSHeiko Carstens { 12679a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 1268b0c632dbSHeiko Carstens } 1269b0c632dbSHeiko Carstens 127049b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 127149b99e1eSChristian Borntraeger { 127249b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 127349b99e1eSChristian Borntraeger } 127449b99e1eSChristian Borntraeger 127549b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 127649b99e1eSChristian Borntraeger { 127749b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 127849b99e1eSChristian Borntraeger } 127949b99e1eSChristian Borntraeger 128049b99e1eSChristian Borntraeger /* 128149b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 128249b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 128349b99e1eSChristian Borntraeger * return immediately. */ 128449b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 128549b99e1eSChristian Borntraeger { 128649b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 128749b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 128849b99e1eSChristian Borntraeger cpu_relax(); 128949b99e1eSChristian Borntraeger } 129049b99e1eSChristian Borntraeger 129149b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 129249b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 129349b99e1eSChristian Borntraeger { 129449b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 129549b99e1eSChristian Borntraeger exit_sie(vcpu); 129649b99e1eSChristian Borntraeger } 129749b99e1eSChristian Borntraeger 12982c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 12992c70fe44SChristian Borntraeger { 13002c70fe44SChristian Borntraeger int i; 13012c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 13022c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 13032c70fe44SChristian Borntraeger 13042c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 13052c70fe44SChristian Borntraeger /* match against both prefix pages */ 1306fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 13072c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 13082c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 13092c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 13102c70fe44SChristian Borntraeger } 13112c70fe44SChristian Borntraeger } 13122c70fe44SChristian Borntraeger } 13132c70fe44SChristian Borntraeger 1314b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 1315b6d33834SChristoffer Dall { 1316b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 1317b6d33834SChristoffer Dall BUG(); 1318b6d33834SChristoffer Dall return 0; 1319b6d33834SChristoffer Dall } 1320b6d33834SChristoffer Dall 132114eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 132214eebd91SCarsten Otte struct kvm_one_reg *reg) 132314eebd91SCarsten Otte { 132414eebd91SCarsten Otte int r = -EINVAL; 132514eebd91SCarsten Otte 132614eebd91SCarsten Otte switch (reg->id) { 132729b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 132829b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 132929b7c71bSCarsten Otte (u32 __user *)reg->addr); 133029b7c71bSCarsten Otte break; 133129b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 133229b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 133329b7c71bSCarsten Otte (u64 __user *)reg->addr); 133429b7c71bSCarsten Otte break; 133546a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 133646a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 133746a6dd1cSJason J. herne (u64 __user *)reg->addr); 133846a6dd1cSJason J. herne break; 133946a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 134046a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 134146a6dd1cSJason J. herne (u64 __user *)reg->addr); 134246a6dd1cSJason J. herne break; 1343536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1344536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 1345536336c2SDominik Dingel (u64 __user *)reg->addr); 1346536336c2SDominik Dingel break; 1347536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1348536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 1349536336c2SDominik Dingel (u64 __user *)reg->addr); 1350536336c2SDominik Dingel break; 1351536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1352536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 1353536336c2SDominik Dingel (u64 __user *)reg->addr); 1354536336c2SDominik Dingel break; 1355672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1356672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 1357672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1358672550fbSChristian Borntraeger break; 1359afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1360afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 1361afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1362afa45ff5SChristian Borntraeger break; 136314eebd91SCarsten Otte default: 136414eebd91SCarsten Otte break; 136514eebd91SCarsten Otte } 136614eebd91SCarsten Otte 136714eebd91SCarsten Otte return r; 136814eebd91SCarsten Otte } 136914eebd91SCarsten Otte 137014eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 137114eebd91SCarsten Otte struct kvm_one_reg *reg) 137214eebd91SCarsten Otte { 137314eebd91SCarsten Otte int r = -EINVAL; 137414eebd91SCarsten Otte 137514eebd91SCarsten Otte switch (reg->id) { 137629b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 137729b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 137829b7c71bSCarsten Otte (u32 __user *)reg->addr); 137929b7c71bSCarsten Otte break; 138029b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 138129b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 138229b7c71bSCarsten Otte (u64 __user *)reg->addr); 138329b7c71bSCarsten Otte break; 138446a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 138546a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 138646a6dd1cSJason J. herne (u64 __user *)reg->addr); 138746a6dd1cSJason J. herne break; 138846a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 138946a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 139046a6dd1cSJason J. herne (u64 __user *)reg->addr); 139146a6dd1cSJason J. herne break; 1392536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1393536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 1394536336c2SDominik Dingel (u64 __user *)reg->addr); 13959fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 13969fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1397536336c2SDominik Dingel break; 1398536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1399536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 1400536336c2SDominik Dingel (u64 __user *)reg->addr); 1401536336c2SDominik Dingel break; 1402536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1403536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 1404536336c2SDominik Dingel (u64 __user *)reg->addr); 1405536336c2SDominik Dingel break; 1406672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1407672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 1408672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1409672550fbSChristian Borntraeger break; 1410afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1411afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 1412afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1413afa45ff5SChristian Borntraeger break; 141414eebd91SCarsten Otte default: 141514eebd91SCarsten Otte break; 141614eebd91SCarsten Otte } 141714eebd91SCarsten Otte 141814eebd91SCarsten Otte return r; 141914eebd91SCarsten Otte } 1420b6d33834SChristoffer Dall 1421b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 1422b0c632dbSHeiko Carstens { 1423b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 1424b0c632dbSHeiko Carstens return 0; 1425b0c632dbSHeiko Carstens } 1426b0c632dbSHeiko Carstens 1427b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1428b0c632dbSHeiko Carstens { 14295a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 1430b0c632dbSHeiko Carstens return 0; 1431b0c632dbSHeiko Carstens } 1432b0c632dbSHeiko Carstens 1433b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1434b0c632dbSHeiko Carstens { 14355a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 1436b0c632dbSHeiko Carstens return 0; 1437b0c632dbSHeiko Carstens } 1438b0c632dbSHeiko Carstens 1439b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1440b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1441b0c632dbSHeiko Carstens { 144259674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 1443b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 144459674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1445b0c632dbSHeiko Carstens return 0; 1446b0c632dbSHeiko Carstens } 1447b0c632dbSHeiko Carstens 1448b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1449b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1450b0c632dbSHeiko Carstens { 145159674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 1452b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 1453b0c632dbSHeiko Carstens return 0; 1454b0c632dbSHeiko Carstens } 1455b0c632dbSHeiko Carstens 1456b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1457b0c632dbSHeiko Carstens { 14584725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 14594725c860SMartin Schwidefsky return -EINVAL; 1460b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 14614725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 14624725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 14634725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1464b0c632dbSHeiko Carstens return 0; 1465b0c632dbSHeiko Carstens } 1466b0c632dbSHeiko Carstens 1467b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1468b0c632dbSHeiko Carstens { 1469b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1470b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1471b0c632dbSHeiko Carstens return 0; 1472b0c632dbSHeiko Carstens } 1473b0c632dbSHeiko Carstens 1474b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1475b0c632dbSHeiko Carstens { 1476b0c632dbSHeiko Carstens int rc = 0; 1477b0c632dbSHeiko Carstens 14787a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1479b0c632dbSHeiko Carstens rc = -EBUSY; 1480d7b0b5ebSCarsten Otte else { 1481d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1482d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1483d7b0b5ebSCarsten Otte } 1484b0c632dbSHeiko Carstens return rc; 1485b0c632dbSHeiko Carstens } 1486b0c632dbSHeiko Carstens 1487b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1488b0c632dbSHeiko Carstens struct kvm_translation *tr) 1489b0c632dbSHeiko Carstens { 1490b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1491b0c632dbSHeiko Carstens } 1492b0c632dbSHeiko Carstens 149327291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 149427291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 149527291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 149627291e21SDavid Hildenbrand 1497d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1498d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1499b0c632dbSHeiko Carstens { 150027291e21SDavid Hildenbrand int rc = 0; 150127291e21SDavid Hildenbrand 150227291e21SDavid Hildenbrand vcpu->guest_debug = 0; 150327291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 150427291e21SDavid Hildenbrand 15052de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 150627291e21SDavid Hildenbrand return -EINVAL; 150727291e21SDavid Hildenbrand 150827291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 150927291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 151027291e21SDavid Hildenbrand /* enforce guest PER */ 151127291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 151227291e21SDavid Hildenbrand 151327291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 151427291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 151527291e21SDavid Hildenbrand } else { 151627291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 151727291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 151827291e21SDavid Hildenbrand } 151927291e21SDavid Hildenbrand 152027291e21SDavid Hildenbrand if (rc) { 152127291e21SDavid Hildenbrand vcpu->guest_debug = 0; 152227291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 152327291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 152427291e21SDavid Hildenbrand } 152527291e21SDavid Hildenbrand 152627291e21SDavid Hildenbrand return rc; 1527b0c632dbSHeiko Carstens } 1528b0c632dbSHeiko Carstens 152962d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 153062d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 153162d9f0dbSMarcelo Tosatti { 15326352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 15336352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 15346352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 153562d9f0dbSMarcelo Tosatti } 153662d9f0dbSMarcelo Tosatti 153762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 153862d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 153962d9f0dbSMarcelo Tosatti { 15406352e4d2SDavid Hildenbrand int rc = 0; 15416352e4d2SDavid Hildenbrand 15426352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 15436352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 15446352e4d2SDavid Hildenbrand 15456352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 15466352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 15476352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 15486352e4d2SDavid Hildenbrand break; 15496352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 15506352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 15516352e4d2SDavid Hildenbrand break; 15526352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 15536352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 15546352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 15556352e4d2SDavid Hildenbrand default: 15566352e4d2SDavid Hildenbrand rc = -ENXIO; 15576352e4d2SDavid Hildenbrand } 15586352e4d2SDavid Hildenbrand 15596352e4d2SDavid Hildenbrand return rc; 156062d9f0dbSMarcelo Tosatti } 156162d9f0dbSMarcelo Tosatti 1562b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1563b31605c1SDominik Dingel { 1564b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1565b31605c1SDominik Dingel return false; 1566b31605c1SDominik Dingel /* only enable for z10 and later */ 1567b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1568b31605c1SDominik Dingel return false; 1569b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1570b31605c1SDominik Dingel return false; 1571b31605c1SDominik Dingel return true; 1572b31605c1SDominik Dingel } 1573b31605c1SDominik Dingel 15748ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 15758ad35755SDavid Hildenbrand { 15768ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 15778ad35755SDavid Hildenbrand } 15788ad35755SDavid Hildenbrand 15792c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 15802c70fe44SChristian Borntraeger { 15818ad35755SDavid Hildenbrand retry: 15828ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 15832c70fe44SChristian Borntraeger /* 15842c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 15852c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 15862c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 15872c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 15882c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 15892c70fe44SChristian Borntraeger */ 15908ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 15912c70fe44SChristian Borntraeger int rc; 15922c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1593fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 15942c70fe44SChristian Borntraeger PAGE_SIZE * 2); 15952c70fe44SChristian Borntraeger if (rc) 15962c70fe44SChristian Borntraeger return rc; 15978ad35755SDavid Hildenbrand goto retry; 15982c70fe44SChristian Borntraeger } 15998ad35755SDavid Hildenbrand 1600d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1601d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1602d3d692c8SDavid Hildenbrand goto retry; 1603d3d692c8SDavid Hildenbrand } 1604d3d692c8SDavid Hildenbrand 16058ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 16068ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 16078ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 16088ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 16098ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 16108ad35755SDavid Hildenbrand } 16118ad35755SDavid Hildenbrand goto retry; 16128ad35755SDavid Hildenbrand } 16138ad35755SDavid Hildenbrand 16148ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 16158ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 16168ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 16178ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 16188ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 16198ad35755SDavid Hildenbrand } 16208ad35755SDavid Hildenbrand goto retry; 16218ad35755SDavid Hildenbrand } 16228ad35755SDavid Hildenbrand 16230759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 16240759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 16250759d068SDavid Hildenbrand 16262c70fe44SChristian Borntraeger return 0; 16272c70fe44SChristian Borntraeger } 16282c70fe44SChristian Borntraeger 1629fa576c58SThomas Huth /** 1630fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1631fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1632fa576c58SThomas Huth * @gpa: Guest physical address 1633fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1634fa576c58SThomas Huth * 1635fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1636fa576c58SThomas Huth * 1637fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1638fa576c58SThomas Huth */ 1639fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 164024eb3a82SDominik Dingel { 1641527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1642527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 164324eb3a82SDominik Dingel } 164424eb3a82SDominik Dingel 16453c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 16463c038e6bSDominik Dingel unsigned long token) 16473c038e6bSDominik Dingel { 16483c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1649383d0b05SJens Freimann struct kvm_s390_irq irq; 16503c038e6bSDominik Dingel 16513c038e6bSDominik Dingel if (start_token) { 1652383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1653383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1654383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 16553c038e6bSDominik Dingel } else { 16563c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1657383d0b05SJens Freimann inti.parm64 = token; 16583c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 16593c038e6bSDominik Dingel } 16603c038e6bSDominik Dingel } 16613c038e6bSDominik Dingel 16623c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 16633c038e6bSDominik Dingel struct kvm_async_pf *work) 16643c038e6bSDominik Dingel { 16653c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 16663c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 16673c038e6bSDominik Dingel } 16683c038e6bSDominik Dingel 16693c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 16703c038e6bSDominik Dingel struct kvm_async_pf *work) 16713c038e6bSDominik Dingel { 16723c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 16733c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 16743c038e6bSDominik Dingel } 16753c038e6bSDominik Dingel 16763c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 16773c038e6bSDominik Dingel struct kvm_async_pf *work) 16783c038e6bSDominik Dingel { 16793c038e6bSDominik Dingel /* s390 will always inject the page directly */ 16803c038e6bSDominik Dingel } 16813c038e6bSDominik Dingel 16823c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 16833c038e6bSDominik Dingel { 16843c038e6bSDominik Dingel /* 16853c038e6bSDominik Dingel * s390 will always inject the page directly, 16863c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 16873c038e6bSDominik Dingel */ 16883c038e6bSDominik Dingel return true; 16893c038e6bSDominik Dingel } 16903c038e6bSDominik Dingel 16913c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 16923c038e6bSDominik Dingel { 16933c038e6bSDominik Dingel hva_t hva; 16943c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 16953c038e6bSDominik Dingel int rc; 16963c038e6bSDominik Dingel 16973c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 16983c038e6bSDominik Dingel return 0; 16993c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 17003c038e6bSDominik Dingel vcpu->arch.pfault_compare) 17013c038e6bSDominik Dingel return 0; 17023c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 17033c038e6bSDominik Dingel return 0; 17049a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 17053c038e6bSDominik Dingel return 0; 17063c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 17073c038e6bSDominik Dingel return 0; 17083c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 17093c038e6bSDominik Dingel return 0; 17103c038e6bSDominik Dingel 171181480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 171281480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 171381480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 17143c038e6bSDominik Dingel return 0; 17153c038e6bSDominik Dingel 17163c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 17173c038e6bSDominik Dingel return rc; 17183c038e6bSDominik Dingel } 17193c038e6bSDominik Dingel 17203fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1721b0c632dbSHeiko Carstens { 17223fb4c40fSThomas Huth int rc, cpuflags; 1723e168bf8dSCarsten Otte 17243c038e6bSDominik Dingel /* 17253c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 17263c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 17273c038e6bSDominik Dingel * handled outside the worker. 17283c038e6bSDominik Dingel */ 17293c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 17303c038e6bSDominik Dingel 17315a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1732b0c632dbSHeiko Carstens 1733b0c632dbSHeiko Carstens if (need_resched()) 1734b0c632dbSHeiko Carstens schedule(); 1735b0c632dbSHeiko Carstens 1736d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 173771cde587SChristian Borntraeger s390_handle_mcck(); 173871cde587SChristian Borntraeger 173979395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 174079395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 174179395031SJens Freimann if (rc) 174279395031SJens Freimann return rc; 174379395031SJens Freimann } 17440ff31867SCarsten Otte 17452c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 17462c70fe44SChristian Borntraeger if (rc) 17472c70fe44SChristian Borntraeger return rc; 17482c70fe44SChristian Borntraeger 174927291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 175027291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 175127291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 175227291e21SDavid Hildenbrand } 175327291e21SDavid Hildenbrand 1754b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 17553fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 17563fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 17573fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 17582b29a9fdSDominik Dingel 17593fb4c40fSThomas Huth return 0; 17603fb4c40fSThomas Huth } 17613fb4c40fSThomas Huth 1762492d8642SThomas Huth static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu) 1763492d8642SThomas Huth { 1764492d8642SThomas Huth psw_t *psw = &vcpu->arch.sie_block->gpsw; 1765492d8642SThomas Huth u8 opcode; 1766492d8642SThomas Huth int rc; 1767492d8642SThomas Huth 1768492d8642SThomas Huth VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1769492d8642SThomas Huth trace_kvm_s390_sie_fault(vcpu); 1770492d8642SThomas Huth 1771492d8642SThomas Huth /* 1772492d8642SThomas Huth * We want to inject an addressing exception, which is defined as a 1773492d8642SThomas Huth * suppressing or terminating exception. However, since we came here 1774492d8642SThomas Huth * by a DAT access exception, the PSW still points to the faulting 1775492d8642SThomas Huth * instruction since DAT exceptions are nullifying. So we've got 1776492d8642SThomas Huth * to look up the current opcode to get the length of the instruction 1777492d8642SThomas Huth * to be able to forward the PSW. 1778492d8642SThomas Huth */ 1779*8ae04b8fSAlexander Yarygin rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); 1780492d8642SThomas Huth if (rc) 1781492d8642SThomas Huth return kvm_s390_inject_prog_cond(vcpu, rc); 1782492d8642SThomas Huth psw->addr = __rewind_psw(*psw, -insn_length(opcode)); 1783492d8642SThomas Huth 1784492d8642SThomas Huth return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 1785492d8642SThomas Huth } 1786492d8642SThomas Huth 17873fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 17883fb4c40fSThomas Huth { 178924eb3a82SDominik Dingel int rc = -1; 17902b29a9fdSDominik Dingel 17912b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 17922b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 17932b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 17942b29a9fdSDominik Dingel 179527291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 179627291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 179727291e21SDavid Hildenbrand 17983fb4c40fSThomas Huth if (exit_reason >= 0) { 17997c470539SMartin Schwidefsky rc = 0; 1800210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1801210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1802210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1803210b1607SThomas Huth current->thread.gmap_addr; 1804210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1805210b1607SThomas Huth rc = -EREMOTE; 180624eb3a82SDominik Dingel 180724eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 18083c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 180924eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1810fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 181124eb3a82SDominik Dingel rc = 0; 1812fa576c58SThomas Huth } else { 1813fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1814fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1815fa576c58SThomas Huth } 181624eb3a82SDominik Dingel } 181724eb3a82SDominik Dingel 1818492d8642SThomas Huth if (rc == -1) 1819492d8642SThomas Huth rc = vcpu_post_run_fault_in_sie(vcpu); 1820b0c632dbSHeiko Carstens 18215a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 18223fb4c40fSThomas Huth 1823a76ccff6SThomas Huth if (rc == 0) { 1824a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 18252955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 18262955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1827a76ccff6SThomas Huth else 1828a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1829a76ccff6SThomas Huth } 1830a76ccff6SThomas Huth 18313fb4c40fSThomas Huth return rc; 18323fb4c40fSThomas Huth } 18333fb4c40fSThomas Huth 18343fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 18353fb4c40fSThomas Huth { 18363fb4c40fSThomas Huth int rc, exit_reason; 18373fb4c40fSThomas Huth 1838800c1065SThomas Huth /* 1839800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1840800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1841800c1065SThomas Huth */ 1842800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1843800c1065SThomas Huth 1844a76ccff6SThomas Huth do { 18453fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 18463fb4c40fSThomas Huth if (rc) 1847a76ccff6SThomas Huth break; 18483fb4c40fSThomas Huth 1849800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 18503fb4c40fSThomas Huth /* 1851a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1852a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 18533fb4c40fSThomas Huth */ 18543fb4c40fSThomas Huth preempt_disable(); 18553fb4c40fSThomas Huth kvm_guest_enter(); 18563fb4c40fSThomas Huth preempt_enable(); 1857a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1858a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 18593fb4c40fSThomas Huth kvm_guest_exit(); 1860800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 18613fb4c40fSThomas Huth 18623fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 186327291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 18643fb4c40fSThomas Huth 1865800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1866e168bf8dSCarsten Otte return rc; 1867b0c632dbSHeiko Carstens } 1868b0c632dbSHeiko Carstens 1869b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1870b028ee3eSDavid Hildenbrand { 1871b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1872b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1873b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1874b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1875b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1876b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1877d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1878d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1879b028ee3eSDavid Hildenbrand } 1880b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1881b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1882b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1883b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1884b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1885b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1886b028ee3eSDavid Hildenbrand } 1887b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1888b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1889b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1890b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 18919fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 18929fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1893b028ee3eSDavid Hildenbrand } 1894b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1895b028ee3eSDavid Hildenbrand } 1896b028ee3eSDavid Hildenbrand 1897b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1898b028ee3eSDavid Hildenbrand { 1899b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1900b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1901b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1902b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1903b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1904b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1905b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1906b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1907b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1908b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1909b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1910b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1911b028ee3eSDavid Hildenbrand } 1912b028ee3eSDavid Hildenbrand 1913b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1914b0c632dbSHeiko Carstens { 19158f2abe6aSChristian Borntraeger int rc; 1916b0c632dbSHeiko Carstens sigset_t sigsaved; 1917b0c632dbSHeiko Carstens 191827291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 191927291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 192027291e21SDavid Hildenbrand return 0; 192127291e21SDavid Hildenbrand } 192227291e21SDavid Hildenbrand 1923b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1924b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1925b0c632dbSHeiko Carstens 19266352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 19276852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 19286352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 19296352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 19306352e4d2SDavid Hildenbrand vcpu->vcpu_id); 19316352e4d2SDavid Hildenbrand return -EINVAL; 19326352e4d2SDavid Hildenbrand } 1933b0c632dbSHeiko Carstens 1934b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1935d7b0b5ebSCarsten Otte 1936dab4079dSHeiko Carstens might_fault(); 1937e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 19389ace903dSChristian Ehrhardt 1939b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1940b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 19418f2abe6aSChristian Borntraeger rc = -EINTR; 1942b1d16c49SChristian Ehrhardt } 19438f2abe6aSChristian Borntraeger 194427291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 194527291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 194627291e21SDavid Hildenbrand rc = 0; 194727291e21SDavid Hildenbrand } 194827291e21SDavid Hildenbrand 1949b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 19508f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 19518f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 19528f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 19538f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 19548f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 19558f2abe6aSChristian Borntraeger rc = 0; 19568f2abe6aSChristian Borntraeger } 19578f2abe6aSChristian Borntraeger 19588f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 19598f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 19608f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 19618f2abe6aSChristian Borntraeger rc = 0; 19628f2abe6aSChristian Borntraeger } 19638f2abe6aSChristian Borntraeger 1964b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1965d7b0b5ebSCarsten Otte 1966b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1967b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1968b0c632dbSHeiko Carstens 1969b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 19707e8e6ab4SHeiko Carstens return rc; 1971b0c632dbSHeiko Carstens } 1972b0c632dbSHeiko Carstens 1973b0c632dbSHeiko Carstens /* 1974b0c632dbSHeiko Carstens * store status at address 1975b0c632dbSHeiko Carstens * we use have two special cases: 1976b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1977b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1978b0c632dbSHeiko Carstens */ 1979d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1980b0c632dbSHeiko Carstens { 1981092670cdSCarsten Otte unsigned char archmode = 1; 1982fda902cbSMichael Mueller unsigned int px; 1983178bd789SThomas Huth u64 clkcomp; 1984d0bce605SHeiko Carstens int rc; 1985b0c632dbSHeiko Carstens 1986d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1987d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1988b0c632dbSHeiko Carstens return -EFAULT; 1989d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1990d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1991d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1992b0c632dbSHeiko Carstens return -EFAULT; 1993d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1994d0bce605SHeiko Carstens } 1995d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1996d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1997d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1998d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1999d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 2000d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 2001fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 2002d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 2003fda902cbSMichael Mueller &px, 4); 2004d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 2005d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 2006d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 2007d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 2008d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 2009d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 2010d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 2011178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 2012d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 2013d0bce605SHeiko Carstens &clkcomp, 8); 2014d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 2015d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 2016d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 2017d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 2018d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 2019b0c632dbSHeiko Carstens } 2020b0c632dbSHeiko Carstens 2021e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 2022e879892cSThomas Huth { 2023e879892cSThomas Huth /* 2024e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 2025e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 2026e879892cSThomas Huth * it into the save area 2027e879892cSThomas Huth */ 2028e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 2029e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 2030e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 2031e879892cSThomas Huth 2032e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 2033e879892cSThomas Huth } 2034e879892cSThomas Huth 2035bc17de7cSEric Farman /* 2036bc17de7cSEric Farman * store additional status at address 2037bc17de7cSEric Farman */ 2038bc17de7cSEric Farman int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu, 2039bc17de7cSEric Farman unsigned long gpa) 2040bc17de7cSEric Farman { 2041bc17de7cSEric Farman /* Only bits 0-53 are used for address formation */ 2042bc17de7cSEric Farman if (!(gpa & ~0x3ff)) 2043bc17de7cSEric Farman return 0; 2044bc17de7cSEric Farman 2045bc17de7cSEric Farman return write_guest_abs(vcpu, gpa & ~0x3ff, 2046bc17de7cSEric Farman (void *)&vcpu->run->s.regs.vrs, 512); 2047bc17de7cSEric Farman } 2048bc17de7cSEric Farman 2049bc17de7cSEric Farman int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr) 2050bc17de7cSEric Farman { 2051bc17de7cSEric Farman if (!test_kvm_facility(vcpu->kvm, 129)) 2052bc17de7cSEric Farman return 0; 2053bc17de7cSEric Farman 2054bc17de7cSEric Farman /* 2055bc17de7cSEric Farman * The guest VXRS are in the host VXRs due to the lazy 2056bc17de7cSEric Farman * copying in vcpu load/put. Let's update our copies before we save 2057bc17de7cSEric Farman * it into the save area. 2058bc17de7cSEric Farman */ 2059bc17de7cSEric Farman save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 2060bc17de7cSEric Farman 2061bc17de7cSEric Farman return kvm_s390_store_adtl_status_unloaded(vcpu, addr); 2062bc17de7cSEric Farman } 2063bc17de7cSEric Farman 20648ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 20658ad35755SDavid Hildenbrand { 20668ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 20678ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 20688ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 20698ad35755SDavid Hildenbrand } 20708ad35755SDavid Hildenbrand 20718ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 20728ad35755SDavid Hildenbrand { 20738ad35755SDavid Hildenbrand unsigned int i; 20748ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 20758ad35755SDavid Hildenbrand 20768ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 20778ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 20788ad35755SDavid Hildenbrand } 20798ad35755SDavid Hildenbrand } 20808ad35755SDavid Hildenbrand 20818ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 20828ad35755SDavid Hildenbrand { 20838ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 20848ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 20858ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 20868ad35755SDavid Hildenbrand } 20878ad35755SDavid Hildenbrand 20886852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 20896852d7b6SDavid Hildenbrand { 20908ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 20918ad35755SDavid Hildenbrand 20928ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 20938ad35755SDavid Hildenbrand return; 20948ad35755SDavid Hildenbrand 20956852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 20968ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2097433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 20988ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 20998ad35755SDavid Hildenbrand 21008ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 21018ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 21028ad35755SDavid Hildenbrand started_vcpus++; 21038ad35755SDavid Hildenbrand } 21048ad35755SDavid Hildenbrand 21058ad35755SDavid Hildenbrand if (started_vcpus == 0) { 21068ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 21078ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 21088ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 21098ad35755SDavid Hildenbrand /* 21108ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 21118ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 21128ad35755SDavid Hildenbrand * oustanding ENABLE requests. 21138ad35755SDavid Hildenbrand */ 21148ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 21158ad35755SDavid Hildenbrand } 21168ad35755SDavid Hildenbrand 21176852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 21188ad35755SDavid Hildenbrand /* 21198ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 21208ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 21218ad35755SDavid Hildenbrand */ 2122d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 2123433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 21248ad35755SDavid Hildenbrand return; 21256852d7b6SDavid Hildenbrand } 21266852d7b6SDavid Hildenbrand 21276852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 21286852d7b6SDavid Hildenbrand { 21298ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 21308ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 21318ad35755SDavid Hildenbrand 21328ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 21338ad35755SDavid Hildenbrand return; 21348ad35755SDavid Hildenbrand 21356852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 21368ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 2137433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 21388ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 21398ad35755SDavid Hildenbrand 214032f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 21416cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 214232f5ff63SDavid Hildenbrand 21436cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 21448ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 21458ad35755SDavid Hildenbrand 21468ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 21478ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 21488ad35755SDavid Hildenbrand started_vcpus++; 21498ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 21508ad35755SDavid Hildenbrand } 21518ad35755SDavid Hildenbrand } 21528ad35755SDavid Hildenbrand 21538ad35755SDavid Hildenbrand if (started_vcpus == 1) { 21548ad35755SDavid Hildenbrand /* 21558ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 21568ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 21578ad35755SDavid Hildenbrand */ 21588ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 21598ad35755SDavid Hildenbrand } 21608ad35755SDavid Hildenbrand 2161433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 21628ad35755SDavid Hildenbrand return; 21636852d7b6SDavid Hildenbrand } 21646852d7b6SDavid Hildenbrand 2165d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 2166d6712df9SCornelia Huck struct kvm_enable_cap *cap) 2167d6712df9SCornelia Huck { 2168d6712df9SCornelia Huck int r; 2169d6712df9SCornelia Huck 2170d6712df9SCornelia Huck if (cap->flags) 2171d6712df9SCornelia Huck return -EINVAL; 2172d6712df9SCornelia Huck 2173d6712df9SCornelia Huck switch (cap->cap) { 2174fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 2175fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 2176fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 2177fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 2178fa6b7fe9SCornelia Huck } 2179fa6b7fe9SCornelia Huck r = 0; 2180fa6b7fe9SCornelia Huck break; 2181d6712df9SCornelia Huck default: 2182d6712df9SCornelia Huck r = -EINVAL; 2183d6712df9SCornelia Huck break; 2184d6712df9SCornelia Huck } 2185d6712df9SCornelia Huck return r; 2186d6712df9SCornelia Huck } 2187d6712df9SCornelia Huck 2188b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 2189b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 2190b0c632dbSHeiko Carstens { 2191b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 2192b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 2193800c1065SThomas Huth int idx; 2194bc923cc9SAvi Kivity long r; 2195b0c632dbSHeiko Carstens 219693736624SAvi Kivity switch (ioctl) { 219793736624SAvi Kivity case KVM_S390_INTERRUPT: { 2198ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 2199383d0b05SJens Freimann struct kvm_s390_irq s390irq; 2200ba5c1e9bSCarsten Otte 220193736624SAvi Kivity r = -EFAULT; 2202ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 220393736624SAvi Kivity break; 2204383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 2205383d0b05SJens Freimann return -EINVAL; 2206383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 220793736624SAvi Kivity break; 2208ba5c1e9bSCarsten Otte } 2209b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 2210800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 2211bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 2212800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 2213bc923cc9SAvi Kivity break; 2214b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 2215b0c632dbSHeiko Carstens psw_t psw; 2216b0c632dbSHeiko Carstens 2217bc923cc9SAvi Kivity r = -EFAULT; 2218b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 2219bc923cc9SAvi Kivity break; 2220bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 2221bc923cc9SAvi Kivity break; 2222b0c632dbSHeiko Carstens } 2223b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 2224bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 2225bc923cc9SAvi Kivity break; 222614eebd91SCarsten Otte case KVM_SET_ONE_REG: 222714eebd91SCarsten Otte case KVM_GET_ONE_REG: { 222814eebd91SCarsten Otte struct kvm_one_reg reg; 222914eebd91SCarsten Otte r = -EFAULT; 223014eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 223114eebd91SCarsten Otte break; 223214eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 223314eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 223414eebd91SCarsten Otte else 223514eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 223614eebd91SCarsten Otte break; 223714eebd91SCarsten Otte } 223827e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 223927e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 224027e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 224127e0393fSCarsten Otte 224227e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 224327e0393fSCarsten Otte r = -EFAULT; 224427e0393fSCarsten Otte break; 224527e0393fSCarsten Otte } 224627e0393fSCarsten Otte 224727e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 224827e0393fSCarsten Otte r = -EINVAL; 224927e0393fSCarsten Otte break; 225027e0393fSCarsten Otte } 225127e0393fSCarsten Otte 225227e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 225327e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 225427e0393fSCarsten Otte break; 225527e0393fSCarsten Otte } 225627e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 225727e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 225827e0393fSCarsten Otte 225927e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 226027e0393fSCarsten Otte r = -EFAULT; 226127e0393fSCarsten Otte break; 226227e0393fSCarsten Otte } 226327e0393fSCarsten Otte 226427e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 226527e0393fSCarsten Otte r = -EINVAL; 226627e0393fSCarsten Otte break; 226727e0393fSCarsten Otte } 226827e0393fSCarsten Otte 226927e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 227027e0393fSCarsten Otte ucasmap.length); 227127e0393fSCarsten Otte break; 227227e0393fSCarsten Otte } 227327e0393fSCarsten Otte #endif 2274ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 2275527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 2276ccc7910fSCarsten Otte break; 2277ccc7910fSCarsten Otte } 2278d6712df9SCornelia Huck case KVM_ENABLE_CAP: 2279d6712df9SCornelia Huck { 2280d6712df9SCornelia Huck struct kvm_enable_cap cap; 2281d6712df9SCornelia Huck r = -EFAULT; 2282d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 2283d6712df9SCornelia Huck break; 2284d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2285d6712df9SCornelia Huck break; 2286d6712df9SCornelia Huck } 2287b0c632dbSHeiko Carstens default: 22883e6afcf1SCarsten Otte r = -ENOTTY; 2289b0c632dbSHeiko Carstens } 2290bc923cc9SAvi Kivity return r; 2291b0c632dbSHeiko Carstens } 2292b0c632dbSHeiko Carstens 22935b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 22945b1c1493SCarsten Otte { 22955b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 22965b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 22975b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 22985b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 22995b1c1493SCarsten Otte get_page(vmf->page); 23005b1c1493SCarsten Otte return 0; 23015b1c1493SCarsten Otte } 23025b1c1493SCarsten Otte #endif 23035b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 23045b1c1493SCarsten Otte } 23055b1c1493SCarsten Otte 23065587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 23075587027cSAneesh Kumar K.V unsigned long npages) 2308db3fe4ebSTakuya Yoshikawa { 2309db3fe4ebSTakuya Yoshikawa return 0; 2310db3fe4ebSTakuya Yoshikawa } 2311db3fe4ebSTakuya Yoshikawa 2312b0c632dbSHeiko Carstens /* Section: memory related */ 2313f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 2314f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 23157b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 23167b6195a9STakuya Yoshikawa enum kvm_mr_change change) 2317b0c632dbSHeiko Carstens { 2318dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 2319dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 2320dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 2321dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 2322b0c632dbSHeiko Carstens 2323598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 2324b0c632dbSHeiko Carstens return -EINVAL; 2325b0c632dbSHeiko Carstens 2326598841caSCarsten Otte if (mem->memory_size & 0xffffful) 2327b0c632dbSHeiko Carstens return -EINVAL; 2328b0c632dbSHeiko Carstens 2329f7784b8eSMarcelo Tosatti return 0; 2330f7784b8eSMarcelo Tosatti } 2331f7784b8eSMarcelo Tosatti 2332f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 2333f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 23348482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 23358482644aSTakuya Yoshikawa enum kvm_mr_change change) 2336f7784b8eSMarcelo Tosatti { 2337f7850c92SCarsten Otte int rc; 2338f7784b8eSMarcelo Tosatti 23392cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 23402cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 23412cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 23422cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 23432cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 23442cef4debSChristian Borntraeger */ 23452cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 23462cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 23472cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 23482cef4debSChristian Borntraeger return; 2349598841caSCarsten Otte 2350598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2351598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 2352598841caSCarsten Otte if (rc) 2353f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2354598841caSCarsten Otte return; 2355b0c632dbSHeiko Carstens } 2356b0c632dbSHeiko Carstens 2357b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 2358b0c632dbSHeiko Carstens { 23599d8d5786SMichael Mueller return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2360b0c632dbSHeiko Carstens } 2361b0c632dbSHeiko Carstens 2362b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 2363b0c632dbSHeiko Carstens { 2364b0c632dbSHeiko Carstens kvm_exit(); 2365b0c632dbSHeiko Carstens } 2366b0c632dbSHeiko Carstens 2367b0c632dbSHeiko Carstens module_init(kvm_s390_init); 2368b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 2369566af940SCornelia Huck 2370566af940SCornelia Huck /* 2371566af940SCornelia Huck * Enable autoloading of the kvm module. 2372566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 2373566af940SCornelia Huck * since x86 takes a different approach. 2374566af940SCornelia Huck */ 2375566af940SCornelia Huck #include <linux/miscdevice.h> 2376566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 2377566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 2378