1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25b0c632dbSHeiko Carstens #include <linux/slab.h> 26ba5c1e9bSCarsten Otte #include <linux/timer.h> 27cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 28b0c632dbSHeiko Carstens #include <asm/lowcore.h> 29b0c632dbSHeiko Carstens #include <asm/pgtable.h> 30f5daba1dSHeiko Carstens #include <asm/nmi.h> 31a0616cdeSDavid Howells #include <asm/switch_to.h> 3278c4b59fSMichael Mueller #include <asm/facility.h> 331526bf9cSChristian Borntraeger #include <asm/sclp.h> 348f2abe6aSChristian Borntraeger #include "kvm-s390.h" 35b0c632dbSHeiko Carstens #include "gaccess.h" 36b0c632dbSHeiko Carstens 375786fffaSCornelia Huck #define CREATE_TRACE_POINTS 385786fffaSCornelia Huck #include "trace.h" 39ade38c31SCornelia Huck #include "trace-s390.h" 405786fffaSCornelia Huck 41b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 42b0c632dbSHeiko Carstens 43b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 44b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 450eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 468f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 478f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 488f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 50ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 51ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 52ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 53ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 54f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 55ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 56aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 57aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 58ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 597697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 60ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 61ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 62ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 64ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 66ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6769d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 68453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 69453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 70453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 71453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 72453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 738a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 74453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 75453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 76b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 77453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 78453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 79bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 805288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 81bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 827697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 835288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8442cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8542cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 865288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8742cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 8842cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 895288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 905288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9242cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9342cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 95388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 96e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9741628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 98b0c632dbSHeiko Carstens { NULL } 99b0c632dbSHeiko Carstens }; 100b0c632dbSHeiko Carstens 10178c4b59fSMichael Mueller unsigned long *vfacilities; 1022c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 103b0c632dbSHeiko Carstens 10478c4b59fSMichael Mueller /* test availability of vfacility */ 105280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 10678c4b59fSMichael Mueller { 10778c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 10878c4b59fSMichael Mueller } 10978c4b59fSMichael Mueller 110b0c632dbSHeiko Carstens /* Section: not file related */ 11113a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 112b0c632dbSHeiko Carstens { 113b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 11410474ae8SAlexander Graf return 0; 115b0c632dbSHeiko Carstens } 116b0c632dbSHeiko Carstens 1172c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1182c70fe44SChristian Borntraeger 119b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 120b0c632dbSHeiko Carstens { 1212c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1222c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 123b0c632dbSHeiko Carstens return 0; 124b0c632dbSHeiko Carstens } 125b0c632dbSHeiko Carstens 126b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 127b0c632dbSHeiko Carstens { 1282c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 129b0c632dbSHeiko Carstens } 130b0c632dbSHeiko Carstens 131b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 132b0c632dbSHeiko Carstens { 13384877d93SCornelia Huck /* Register floating interrupt controller interface. */ 13484877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 135b0c632dbSHeiko Carstens } 136b0c632dbSHeiko Carstens 137b0c632dbSHeiko Carstens /* Section: device related */ 138b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 139b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 140b0c632dbSHeiko Carstens { 141b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 142b0c632dbSHeiko Carstens return s390_enable_sie(); 143b0c632dbSHeiko Carstens return -EINVAL; 144b0c632dbSHeiko Carstens } 145b0c632dbSHeiko Carstens 146784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 147b0c632dbSHeiko Carstens { 148d7b0b5ebSCarsten Otte int r; 149d7b0b5ebSCarsten Otte 1502bd0ac4eSCarsten Otte switch (ext) { 151d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 152b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15352e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1541efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1551efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1561efd0f59SCarsten Otte #endif 1573c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 15860b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 15914eebd91SCarsten Otte case KVM_CAP_ONE_REG: 160d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 161fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 162ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16310ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 164c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 165d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 16678599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 167f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1686352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 169d7b0b5ebSCarsten Otte r = 1; 170d7b0b5ebSCarsten Otte break; 171e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 172e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 173e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 174e726b1bdSChristian Borntraeger break; 175e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 176e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 177e1e2e605SNick Wang break; 1781526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 179abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1801526bf9cSChristian Borntraeger break; 1812bd0ac4eSCarsten Otte default: 182d7b0b5ebSCarsten Otte r = 0; 183b0c632dbSHeiko Carstens } 184d7b0b5ebSCarsten Otte return r; 1852bd0ac4eSCarsten Otte } 186b0c632dbSHeiko Carstens 18715f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 18815f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 18915f36ebdSJason J. Herne { 19015f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19115f36ebdSJason J. Herne unsigned long address; 19215f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 19315f36ebdSJason J. Herne 19415f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 19515f36ebdSJason J. Herne /* Loop over all guest pages */ 19615f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 19715f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 19815f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 19915f36ebdSJason J. Herne 20015f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20115f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20215f36ebdSJason J. Herne } 20315f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 20415f36ebdSJason J. Herne } 20515f36ebdSJason J. Herne 206b0c632dbSHeiko Carstens /* Section: vm related */ 207b0c632dbSHeiko Carstens /* 208b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 209b0c632dbSHeiko Carstens */ 210b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 211b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 212b0c632dbSHeiko Carstens { 21315f36ebdSJason J. Herne int r; 21415f36ebdSJason J. Herne unsigned long n; 21515f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 21615f36ebdSJason J. Herne int is_dirty = 0; 21715f36ebdSJason J. Herne 21815f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 21915f36ebdSJason J. Herne 22015f36ebdSJason J. Herne r = -EINVAL; 22115f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22215f36ebdSJason J. Herne goto out; 22315f36ebdSJason J. Herne 22415f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 22515f36ebdSJason J. Herne r = -ENOENT; 22615f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 22715f36ebdSJason J. Herne goto out; 22815f36ebdSJason J. Herne 22915f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23015f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23115f36ebdSJason J. Herne if (r) 23215f36ebdSJason J. Herne goto out; 23315f36ebdSJason J. Herne 23415f36ebdSJason J. Herne /* Clear the dirty log */ 23515f36ebdSJason J. Herne if (is_dirty) { 23615f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 23715f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 23815f36ebdSJason J. Herne } 23915f36ebdSJason J. Herne r = 0; 24015f36ebdSJason J. Herne out: 24115f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24215f36ebdSJason J. Herne return r; 243b0c632dbSHeiko Carstens } 244b0c632dbSHeiko Carstens 245d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 246d938dc55SCornelia Huck { 247d938dc55SCornelia Huck int r; 248d938dc55SCornelia Huck 249d938dc55SCornelia Huck if (cap->flags) 250d938dc55SCornelia Huck return -EINVAL; 251d938dc55SCornelia Huck 252d938dc55SCornelia Huck switch (cap->cap) { 25384223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 25484223598SCornelia Huck kvm->arch.use_irqchip = 1; 25584223598SCornelia Huck r = 0; 25684223598SCornelia Huck break; 257d938dc55SCornelia Huck default: 258d938dc55SCornelia Huck r = -EINVAL; 259d938dc55SCornelia Huck break; 260d938dc55SCornelia Huck } 261d938dc55SCornelia Huck return r; 262d938dc55SCornelia Huck } 263d938dc55SCornelia Huck 2648c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2658c0a7ce6SDominik Dingel { 2668c0a7ce6SDominik Dingel int ret; 2678c0a7ce6SDominik Dingel 2688c0a7ce6SDominik Dingel switch (attr->attr) { 2698c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2708c0a7ce6SDominik Dingel ret = 0; 2718c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2728c0a7ce6SDominik Dingel ret = -EFAULT; 2738c0a7ce6SDominik Dingel break; 2748c0a7ce6SDominik Dingel default: 2758c0a7ce6SDominik Dingel ret = -ENXIO; 2768c0a7ce6SDominik Dingel break; 2778c0a7ce6SDominik Dingel } 2788c0a7ce6SDominik Dingel return ret; 2798c0a7ce6SDominik Dingel } 2808c0a7ce6SDominik Dingel 2818c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2824f718eabSDominik Dingel { 2834f718eabSDominik Dingel int ret; 2844f718eabSDominik Dingel unsigned int idx; 2854f718eabSDominik Dingel switch (attr->attr) { 2864f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2874f718eabSDominik Dingel ret = -EBUSY; 2884f718eabSDominik Dingel mutex_lock(&kvm->lock); 2894f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2904f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2914f718eabSDominik Dingel ret = 0; 2924f718eabSDominik Dingel } 2934f718eabSDominik Dingel mutex_unlock(&kvm->lock); 2944f718eabSDominik Dingel break; 2954f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 2964f718eabSDominik Dingel mutex_lock(&kvm->lock); 2974f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 298a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 2994f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3004f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3014f718eabSDominik Dingel ret = 0; 3024f718eabSDominik Dingel break; 3038c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3048c0a7ce6SDominik Dingel unsigned long new_limit; 3058c0a7ce6SDominik Dingel 3068c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3078c0a7ce6SDominik Dingel return -EINVAL; 3088c0a7ce6SDominik Dingel 3098c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3108c0a7ce6SDominik Dingel return -EFAULT; 3118c0a7ce6SDominik Dingel 3128c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3138c0a7ce6SDominik Dingel return -E2BIG; 3148c0a7ce6SDominik Dingel 3158c0a7ce6SDominik Dingel ret = -EBUSY; 3168c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3178c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3188c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3198c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3208c0a7ce6SDominik Dingel 3218c0a7ce6SDominik Dingel if (!new) { 3228c0a7ce6SDominik Dingel ret = -ENOMEM; 3238c0a7ce6SDominik Dingel } else { 3248c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3258c0a7ce6SDominik Dingel new->private = kvm; 3268c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3278c0a7ce6SDominik Dingel ret = 0; 3288c0a7ce6SDominik Dingel } 3298c0a7ce6SDominik Dingel } 3308c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3318c0a7ce6SDominik Dingel break; 3328c0a7ce6SDominik Dingel } 3334f718eabSDominik Dingel default: 3344f718eabSDominik Dingel ret = -ENXIO; 3354f718eabSDominik Dingel break; 3364f718eabSDominik Dingel } 3374f718eabSDominik Dingel return ret; 3384f718eabSDominik Dingel } 3394f718eabSDominik Dingel 340f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 341f2061656SDominik Dingel { 342f2061656SDominik Dingel int ret; 343f2061656SDominik Dingel 344f2061656SDominik Dingel switch (attr->group) { 3454f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3468c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 3474f718eabSDominik Dingel break; 348f2061656SDominik Dingel default: 349f2061656SDominik Dingel ret = -ENXIO; 350f2061656SDominik Dingel break; 351f2061656SDominik Dingel } 352f2061656SDominik Dingel 353f2061656SDominik Dingel return ret; 354f2061656SDominik Dingel } 355f2061656SDominik Dingel 356f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 357f2061656SDominik Dingel { 3588c0a7ce6SDominik Dingel int ret; 3598c0a7ce6SDominik Dingel 3608c0a7ce6SDominik Dingel switch (attr->group) { 3618c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 3628c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 3638c0a7ce6SDominik Dingel break; 3648c0a7ce6SDominik Dingel default: 3658c0a7ce6SDominik Dingel ret = -ENXIO; 3668c0a7ce6SDominik Dingel break; 3678c0a7ce6SDominik Dingel } 3688c0a7ce6SDominik Dingel 3698c0a7ce6SDominik Dingel return ret; 370f2061656SDominik Dingel } 371f2061656SDominik Dingel 372f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 373f2061656SDominik Dingel { 374f2061656SDominik Dingel int ret; 375f2061656SDominik Dingel 376f2061656SDominik Dingel switch (attr->group) { 3774f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 3784f718eabSDominik Dingel switch (attr->attr) { 3794f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 3804f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3818c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 3824f718eabSDominik Dingel ret = 0; 3834f718eabSDominik Dingel break; 3844f718eabSDominik Dingel default: 3854f718eabSDominik Dingel ret = -ENXIO; 3864f718eabSDominik Dingel break; 3874f718eabSDominik Dingel } 3884f718eabSDominik Dingel break; 389f2061656SDominik Dingel default: 390f2061656SDominik Dingel ret = -ENXIO; 391f2061656SDominik Dingel break; 392f2061656SDominik Dingel } 393f2061656SDominik Dingel 394f2061656SDominik Dingel return ret; 395f2061656SDominik Dingel } 396f2061656SDominik Dingel 397b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 398b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 399b0c632dbSHeiko Carstens { 400b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 401b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 402f2061656SDominik Dingel struct kvm_device_attr attr; 403b0c632dbSHeiko Carstens int r; 404b0c632dbSHeiko Carstens 405b0c632dbSHeiko Carstens switch (ioctl) { 406ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 407ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 408ba5c1e9bSCarsten Otte 409ba5c1e9bSCarsten Otte r = -EFAULT; 410ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 411ba5c1e9bSCarsten Otte break; 412ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 413ba5c1e9bSCarsten Otte break; 414ba5c1e9bSCarsten Otte } 415d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 416d938dc55SCornelia Huck struct kvm_enable_cap cap; 417d938dc55SCornelia Huck r = -EFAULT; 418d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 419d938dc55SCornelia Huck break; 420d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 421d938dc55SCornelia Huck break; 422d938dc55SCornelia Huck } 42384223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 42484223598SCornelia Huck struct kvm_irq_routing_entry routing; 42584223598SCornelia Huck 42684223598SCornelia Huck r = -EINVAL; 42784223598SCornelia Huck if (kvm->arch.use_irqchip) { 42884223598SCornelia Huck /* Set up dummy routing. */ 42984223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 43084223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 43184223598SCornelia Huck r = 0; 43284223598SCornelia Huck } 43384223598SCornelia Huck break; 43484223598SCornelia Huck } 435f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 436f2061656SDominik Dingel r = -EFAULT; 437f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 438f2061656SDominik Dingel break; 439f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 440f2061656SDominik Dingel break; 441f2061656SDominik Dingel } 442f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 443f2061656SDominik Dingel r = -EFAULT; 444f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 445f2061656SDominik Dingel break; 446f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 447f2061656SDominik Dingel break; 448f2061656SDominik Dingel } 449f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 450f2061656SDominik Dingel r = -EFAULT; 451f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 452f2061656SDominik Dingel break; 453f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 454f2061656SDominik Dingel break; 455f2061656SDominik Dingel } 456b0c632dbSHeiko Carstens default: 457367e1319SAvi Kivity r = -ENOTTY; 458b0c632dbSHeiko Carstens } 459b0c632dbSHeiko Carstens 460b0c632dbSHeiko Carstens return r; 461b0c632dbSHeiko Carstens } 462b0c632dbSHeiko Carstens 4635102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 4645102ee87STony Krowiak { 4655102ee87STony Krowiak if (!test_vfacility(76)) 4665102ee87STony Krowiak return 0; 4675102ee87STony Krowiak 4685102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 4695102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 4705102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 4715102ee87STony Krowiak return -ENOMEM; 4725102ee87STony Krowiak 4735102ee87STony Krowiak kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb | 4745102ee87STony Krowiak CRYCB_FORMAT1; 4755102ee87STony Krowiak 4765102ee87STony Krowiak return 0; 4775102ee87STony Krowiak } 4785102ee87STony Krowiak 479e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 480b0c632dbSHeiko Carstens { 481b0c632dbSHeiko Carstens int rc; 482b0c632dbSHeiko Carstens char debug_name[16]; 483f6c137ffSChristian Borntraeger static unsigned long sca_offset; 484b0c632dbSHeiko Carstens 485e08b9637SCarsten Otte rc = -EINVAL; 486e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 487e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 488e08b9637SCarsten Otte goto out_err; 489e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 490e08b9637SCarsten Otte goto out_err; 491e08b9637SCarsten Otte #else 492e08b9637SCarsten Otte if (type) 493e08b9637SCarsten Otte goto out_err; 494e08b9637SCarsten Otte #endif 495e08b9637SCarsten Otte 496b0c632dbSHeiko Carstens rc = s390_enable_sie(); 497b0c632dbSHeiko Carstens if (rc) 498d89f5effSJan Kiszka goto out_err; 499b0c632dbSHeiko Carstens 500b290411aSCarsten Otte rc = -ENOMEM; 501b290411aSCarsten Otte 502b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 503b0c632dbSHeiko Carstens if (!kvm->arch.sca) 504d89f5effSJan Kiszka goto out_err; 505f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 506f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 507f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 508f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 509b0c632dbSHeiko Carstens 510b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 511b0c632dbSHeiko Carstens 512b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 513b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 514b0c632dbSHeiko Carstens goto out_nodbf; 515b0c632dbSHeiko Carstens 5165102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 5175102ee87STony Krowiak goto out_crypto; 5185102ee87STony Krowiak 519ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 520ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 5218a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 522a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 523ba5c1e9bSCarsten Otte 524b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 525b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 526b0c632dbSHeiko Carstens 527e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 528e08b9637SCarsten Otte kvm->arch.gmap = NULL; 529e08b9637SCarsten Otte } else { 5300349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 531598841caSCarsten Otte if (!kvm->arch.gmap) 532598841caSCarsten Otte goto out_nogmap; 5332c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 53424eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 535e08b9637SCarsten Otte } 536fa6b7fe9SCornelia Huck 537fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 53884223598SCornelia Huck kvm->arch.use_irqchip = 0; 539fa6b7fe9SCornelia Huck 5408ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 5418ad35755SDavid Hildenbrand 542d89f5effSJan Kiszka return 0; 543598841caSCarsten Otte out_nogmap: 5445102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 5455102ee87STony Krowiak out_crypto: 546598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 547b0c632dbSHeiko Carstens out_nodbf: 548b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 549d89f5effSJan Kiszka out_err: 550d89f5effSJan Kiszka return rc; 551b0c632dbSHeiko Carstens } 552b0c632dbSHeiko Carstens 553d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 554d329c035SChristian Borntraeger { 555d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 556ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 55767335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 5583c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 55958f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 56058f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 56158f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 562abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 563abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 564abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 56558f9460bSCarsten Otte } 566abf4a71eSCarsten Otte smp_mb(); 56727e0393fSCarsten Otte 56827e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 56927e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 57027e0393fSCarsten Otte 571b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 572b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 573d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 574b31288faSKonstantin Weitz 5756692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 576b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 577d329c035SChristian Borntraeger } 578d329c035SChristian Borntraeger 579d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 580d329c035SChristian Borntraeger { 581d329c035SChristian Borntraeger unsigned int i; 582988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 583d329c035SChristian Borntraeger 584988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 585988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 586988a2caeSGleb Natapov 587988a2caeSGleb Natapov mutex_lock(&kvm->lock); 588988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 589d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 590988a2caeSGleb Natapov 591988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 592988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 593d329c035SChristian Borntraeger } 594d329c035SChristian Borntraeger 595b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 596b0c632dbSHeiko Carstens { 597d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 598b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 599d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 6005102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 60127e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 602598841caSCarsten Otte gmap_free(kvm->arch.gmap); 603841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 60467335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 605b0c632dbSHeiko Carstens } 606b0c632dbSHeiko Carstens 607b0c632dbSHeiko Carstens /* Section: vcpu related */ 608dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 609b0c632dbSHeiko Carstens { 610c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 61127e0393fSCarsten Otte if (!vcpu->arch.gmap) 61227e0393fSCarsten Otte return -ENOMEM; 6132c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 614dafd032aSDominik Dingel 61527e0393fSCarsten Otte return 0; 61627e0393fSCarsten Otte } 61727e0393fSCarsten Otte 618dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 619dafd032aSDominik Dingel { 620dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 621dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 62259674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 62359674c1aSChristian Borntraeger KVM_SYNC_GPRS | 6249eed0735SChristian Borntraeger KVM_SYNC_ACRS | 625b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 626b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 627b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 628dafd032aSDominik Dingel 629dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 630dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 631dafd032aSDominik Dingel 632b0c632dbSHeiko Carstens return 0; 633b0c632dbSHeiko Carstens } 634b0c632dbSHeiko Carstens 635b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 636b0c632dbSHeiko Carstens { 6374725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 6384725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 639b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 6404725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 6414725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 64259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 643480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 6449e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 645b0c632dbSHeiko Carstens } 646b0c632dbSHeiko Carstens 647b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 648b0c632dbSHeiko Carstens { 6499e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 650480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 6514725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 6524725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 65359674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 6544725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 6554725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 656b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 657b0c632dbSHeiko Carstens } 658b0c632dbSHeiko Carstens 659b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 660b0c632dbSHeiko Carstens { 661b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 662b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 663b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 6648d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 665b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 666b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 667b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 668b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 669b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 670b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 671b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 672b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 673b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 674672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 6753c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 6763c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 6776352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 6786852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 6792ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 680b0c632dbSHeiko Carstens } 681b0c632dbSHeiko Carstens 68231928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 68342897d86SMarcelo Tosatti { 684dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 685dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 68642897d86SMarcelo Tosatti } 68742897d86SMarcelo Tosatti 6885102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 6895102ee87STony Krowiak { 6905102ee87STony Krowiak if (!test_vfacility(76)) 6915102ee87STony Krowiak return; 6925102ee87STony Krowiak 6935102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 6945102ee87STony Krowiak } 6955102ee87STony Krowiak 696b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 697b31605c1SDominik Dingel { 698b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 699b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 700b31605c1SDominik Dingel } 701b31605c1SDominik Dingel 702b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 703b31605c1SDominik Dingel { 704b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 705b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 706b31605c1SDominik Dingel return -ENOMEM; 707b31605c1SDominik Dingel 708b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 709b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 710b31605c1SDominik Dingel return 0; 711b31605c1SDominik Dingel } 712b31605c1SDominik Dingel 713b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 714b0c632dbSHeiko Carstens { 715b31605c1SDominik Dingel int rc = 0; 716b31288faSKonstantin Weitz 7179e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 7189e6dabefSCornelia Huck CPUSTAT_SM | 71969d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 72069d0d3a3SChristian Borntraeger CPUSTAT_GED); 721fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 7227feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 7237feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 7247feb6bb8SMichael Mueller 72569d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 726*ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 727217a4406SHeiko Carstens if (sclp_has_siif()) 728217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 729*ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 730*ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 73178c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 7325a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 7335a5e6536SMatthew Rosato ICTL_TPROT; 7345a5e6536SMatthew Rosato 735b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 736b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 737b31605c1SDominik Dingel if (rc) 738b31605c1SDominik Dingel return rc; 739b31288faSKonstantin Weitz } 7400ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 741ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 742453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 74392e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 7445102ee87STony Krowiak 7455102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 7465102ee87STony Krowiak 747b31605c1SDominik Dingel return rc; 748b0c632dbSHeiko Carstens } 749b0c632dbSHeiko Carstens 750b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 751b0c632dbSHeiko Carstens unsigned int id) 752b0c632dbSHeiko Carstens { 7534d47555aSCarsten Otte struct kvm_vcpu *vcpu; 7547feb6bb8SMichael Mueller struct sie_page *sie_page; 7554d47555aSCarsten Otte int rc = -EINVAL; 756b0c632dbSHeiko Carstens 7574d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 7584d47555aSCarsten Otte goto out; 7594d47555aSCarsten Otte 7604d47555aSCarsten Otte rc = -ENOMEM; 7614d47555aSCarsten Otte 762b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 763b0c632dbSHeiko Carstens if (!vcpu) 7644d47555aSCarsten Otte goto out; 765b0c632dbSHeiko Carstens 7667feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 7677feb6bb8SMichael Mueller if (!sie_page) 768b0c632dbSHeiko Carstens goto out_free_cpu; 769b0c632dbSHeiko Carstens 7707feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 7717feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 7727feb6bb8SMichael Mueller 773b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 77458f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 77558f9460bSCarsten Otte if (!kvm->arch.sca) { 77658f9460bSCarsten Otte WARN_ON_ONCE(1); 77758f9460bSCarsten Otte goto out_free_cpu; 77858f9460bSCarsten Otte } 779abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 78058f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 78158f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 78258f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 78358f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 784b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 785fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 78658f9460bSCarsten Otte } 787b0c632dbSHeiko Carstens 788ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 789ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 790d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 7915288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 792ba5c1e9bSCarsten Otte 793b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 794b0c632dbSHeiko Carstens if (rc) 7957b06bf2fSWei Yongjun goto out_free_sie_block; 796b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 797b0c632dbSHeiko Carstens vcpu->arch.sie_block); 798ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 799b0c632dbSHeiko Carstens 800b0c632dbSHeiko Carstens return vcpu; 8017b06bf2fSWei Yongjun out_free_sie_block: 8027b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 803b0c632dbSHeiko Carstens out_free_cpu: 804b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 8054d47555aSCarsten Otte out: 806b0c632dbSHeiko Carstens return ERR_PTR(rc); 807b0c632dbSHeiko Carstens } 808b0c632dbSHeiko Carstens 809b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 810b0c632dbSHeiko Carstens { 8119a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 812b0c632dbSHeiko Carstens } 813b0c632dbSHeiko Carstens 81449b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 81549b99e1eSChristian Borntraeger { 81649b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 81749b99e1eSChristian Borntraeger } 81849b99e1eSChristian Borntraeger 81949b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 82049b99e1eSChristian Borntraeger { 82149b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 82249b99e1eSChristian Borntraeger } 82349b99e1eSChristian Borntraeger 82449b99e1eSChristian Borntraeger /* 82549b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 82649b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 82749b99e1eSChristian Borntraeger * return immediately. */ 82849b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 82949b99e1eSChristian Borntraeger { 83049b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 83149b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 83249b99e1eSChristian Borntraeger cpu_relax(); 83349b99e1eSChristian Borntraeger } 83449b99e1eSChristian Borntraeger 83549b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 83649b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 83749b99e1eSChristian Borntraeger { 83849b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 83949b99e1eSChristian Borntraeger exit_sie(vcpu); 84049b99e1eSChristian Borntraeger } 84149b99e1eSChristian Borntraeger 8422c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 8432c70fe44SChristian Borntraeger { 8442c70fe44SChristian Borntraeger int i; 8452c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 8462c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 8472c70fe44SChristian Borntraeger 8482c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 8492c70fe44SChristian Borntraeger /* match against both prefix pages */ 850fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 8512c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 8522c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 8532c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 8542c70fe44SChristian Borntraeger } 8552c70fe44SChristian Borntraeger } 8562c70fe44SChristian Borntraeger } 8572c70fe44SChristian Borntraeger 858b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 859b6d33834SChristoffer Dall { 860b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 861b6d33834SChristoffer Dall BUG(); 862b6d33834SChristoffer Dall return 0; 863b6d33834SChristoffer Dall } 864b6d33834SChristoffer Dall 86514eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 86614eebd91SCarsten Otte struct kvm_one_reg *reg) 86714eebd91SCarsten Otte { 86814eebd91SCarsten Otte int r = -EINVAL; 86914eebd91SCarsten Otte 87014eebd91SCarsten Otte switch (reg->id) { 87129b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 87229b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 87329b7c71bSCarsten Otte (u32 __user *)reg->addr); 87429b7c71bSCarsten Otte break; 87529b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 87629b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 87729b7c71bSCarsten Otte (u64 __user *)reg->addr); 87829b7c71bSCarsten Otte break; 87946a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 88046a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 88146a6dd1cSJason J. herne (u64 __user *)reg->addr); 88246a6dd1cSJason J. herne break; 88346a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 88446a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 88546a6dd1cSJason J. herne (u64 __user *)reg->addr); 88646a6dd1cSJason J. herne break; 887536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 888536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 889536336c2SDominik Dingel (u64 __user *)reg->addr); 890536336c2SDominik Dingel break; 891536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 892536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 893536336c2SDominik Dingel (u64 __user *)reg->addr); 894536336c2SDominik Dingel break; 895536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 896536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 897536336c2SDominik Dingel (u64 __user *)reg->addr); 898536336c2SDominik Dingel break; 899672550fbSChristian Borntraeger case KVM_REG_S390_PP: 900672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 901672550fbSChristian Borntraeger (u64 __user *)reg->addr); 902672550fbSChristian Borntraeger break; 903afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 904afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 905afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 906afa45ff5SChristian Borntraeger break; 90714eebd91SCarsten Otte default: 90814eebd91SCarsten Otte break; 90914eebd91SCarsten Otte } 91014eebd91SCarsten Otte 91114eebd91SCarsten Otte return r; 91214eebd91SCarsten Otte } 91314eebd91SCarsten Otte 91414eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 91514eebd91SCarsten Otte struct kvm_one_reg *reg) 91614eebd91SCarsten Otte { 91714eebd91SCarsten Otte int r = -EINVAL; 91814eebd91SCarsten Otte 91914eebd91SCarsten Otte switch (reg->id) { 92029b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 92129b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 92229b7c71bSCarsten Otte (u32 __user *)reg->addr); 92329b7c71bSCarsten Otte break; 92429b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 92529b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 92629b7c71bSCarsten Otte (u64 __user *)reg->addr); 92729b7c71bSCarsten Otte break; 92846a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 92946a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 93046a6dd1cSJason J. herne (u64 __user *)reg->addr); 93146a6dd1cSJason J. herne break; 93246a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 93346a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 93446a6dd1cSJason J. herne (u64 __user *)reg->addr); 93546a6dd1cSJason J. herne break; 936536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 937536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 938536336c2SDominik Dingel (u64 __user *)reg->addr); 939536336c2SDominik Dingel break; 940536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 941536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 942536336c2SDominik Dingel (u64 __user *)reg->addr); 943536336c2SDominik Dingel break; 944536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 945536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 946536336c2SDominik Dingel (u64 __user *)reg->addr); 947536336c2SDominik Dingel break; 948672550fbSChristian Borntraeger case KVM_REG_S390_PP: 949672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 950672550fbSChristian Borntraeger (u64 __user *)reg->addr); 951672550fbSChristian Borntraeger break; 952afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 953afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 954afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 955afa45ff5SChristian Borntraeger break; 95614eebd91SCarsten Otte default: 95714eebd91SCarsten Otte break; 95814eebd91SCarsten Otte } 95914eebd91SCarsten Otte 96014eebd91SCarsten Otte return r; 96114eebd91SCarsten Otte } 962b6d33834SChristoffer Dall 963b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 964b0c632dbSHeiko Carstens { 965b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 966b0c632dbSHeiko Carstens return 0; 967b0c632dbSHeiko Carstens } 968b0c632dbSHeiko Carstens 969b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 970b0c632dbSHeiko Carstens { 9715a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 972b0c632dbSHeiko Carstens return 0; 973b0c632dbSHeiko Carstens } 974b0c632dbSHeiko Carstens 975b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 976b0c632dbSHeiko Carstens { 9775a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 978b0c632dbSHeiko Carstens return 0; 979b0c632dbSHeiko Carstens } 980b0c632dbSHeiko Carstens 981b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 982b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 983b0c632dbSHeiko Carstens { 98459674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 985b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 98659674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 987b0c632dbSHeiko Carstens return 0; 988b0c632dbSHeiko Carstens } 989b0c632dbSHeiko Carstens 990b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 991b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 992b0c632dbSHeiko Carstens { 99359674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 994b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 995b0c632dbSHeiko Carstens return 0; 996b0c632dbSHeiko Carstens } 997b0c632dbSHeiko Carstens 998b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 999b0c632dbSHeiko Carstens { 10004725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 10014725c860SMartin Schwidefsky return -EINVAL; 1002b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 10034725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 10044725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 10054725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1006b0c632dbSHeiko Carstens return 0; 1007b0c632dbSHeiko Carstens } 1008b0c632dbSHeiko Carstens 1009b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1010b0c632dbSHeiko Carstens { 1011b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1012b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1013b0c632dbSHeiko Carstens return 0; 1014b0c632dbSHeiko Carstens } 1015b0c632dbSHeiko Carstens 1016b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1017b0c632dbSHeiko Carstens { 1018b0c632dbSHeiko Carstens int rc = 0; 1019b0c632dbSHeiko Carstens 10207a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1021b0c632dbSHeiko Carstens rc = -EBUSY; 1022d7b0b5ebSCarsten Otte else { 1023d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1024d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1025d7b0b5ebSCarsten Otte } 1026b0c632dbSHeiko Carstens return rc; 1027b0c632dbSHeiko Carstens } 1028b0c632dbSHeiko Carstens 1029b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1030b0c632dbSHeiko Carstens struct kvm_translation *tr) 1031b0c632dbSHeiko Carstens { 1032b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1033b0c632dbSHeiko Carstens } 1034b0c632dbSHeiko Carstens 103527291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 103627291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 103727291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 103827291e21SDavid Hildenbrand 1039d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1040d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1041b0c632dbSHeiko Carstens { 104227291e21SDavid Hildenbrand int rc = 0; 104327291e21SDavid Hildenbrand 104427291e21SDavid Hildenbrand vcpu->guest_debug = 0; 104527291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 104627291e21SDavid Hildenbrand 10472de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 104827291e21SDavid Hildenbrand return -EINVAL; 104927291e21SDavid Hildenbrand 105027291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 105127291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 105227291e21SDavid Hildenbrand /* enforce guest PER */ 105327291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 105427291e21SDavid Hildenbrand 105527291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 105627291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 105727291e21SDavid Hildenbrand } else { 105827291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 105927291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 106027291e21SDavid Hildenbrand } 106127291e21SDavid Hildenbrand 106227291e21SDavid Hildenbrand if (rc) { 106327291e21SDavid Hildenbrand vcpu->guest_debug = 0; 106427291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 106527291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 106627291e21SDavid Hildenbrand } 106727291e21SDavid Hildenbrand 106827291e21SDavid Hildenbrand return rc; 1069b0c632dbSHeiko Carstens } 1070b0c632dbSHeiko Carstens 107162d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 107262d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 107362d9f0dbSMarcelo Tosatti { 10746352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 10756352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 10766352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 107762d9f0dbSMarcelo Tosatti } 107862d9f0dbSMarcelo Tosatti 107962d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 108062d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 108162d9f0dbSMarcelo Tosatti { 10826352e4d2SDavid Hildenbrand int rc = 0; 10836352e4d2SDavid Hildenbrand 10846352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 10856352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 10866352e4d2SDavid Hildenbrand 10876352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 10886352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 10896352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 10906352e4d2SDavid Hildenbrand break; 10916352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 10926352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 10936352e4d2SDavid Hildenbrand break; 10946352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 10956352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 10966352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 10976352e4d2SDavid Hildenbrand default: 10986352e4d2SDavid Hildenbrand rc = -ENXIO; 10996352e4d2SDavid Hildenbrand } 11006352e4d2SDavid Hildenbrand 11016352e4d2SDavid Hildenbrand return rc; 110262d9f0dbSMarcelo Tosatti } 110362d9f0dbSMarcelo Tosatti 1104b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1105b31605c1SDominik Dingel { 1106b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1107b31605c1SDominik Dingel return false; 1108b31605c1SDominik Dingel /* only enable for z10 and later */ 1109b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1110b31605c1SDominik Dingel return false; 1111b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1112b31605c1SDominik Dingel return false; 1113b31605c1SDominik Dingel return true; 1114b31605c1SDominik Dingel } 1115b31605c1SDominik Dingel 11168ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 11178ad35755SDavid Hildenbrand { 11188ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 11198ad35755SDavid Hildenbrand } 11208ad35755SDavid Hildenbrand 11212c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 11222c70fe44SChristian Borntraeger { 11238ad35755SDavid Hildenbrand retry: 11248ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 11252c70fe44SChristian Borntraeger /* 11262c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 11272c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 11282c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 11292c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 11302c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 11312c70fe44SChristian Borntraeger */ 11328ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 11332c70fe44SChristian Borntraeger int rc; 11342c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1135fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 11362c70fe44SChristian Borntraeger PAGE_SIZE * 2); 11372c70fe44SChristian Borntraeger if (rc) 11382c70fe44SChristian Borntraeger return rc; 11398ad35755SDavid Hildenbrand goto retry; 11402c70fe44SChristian Borntraeger } 11418ad35755SDavid Hildenbrand 1142d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1143d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1144d3d692c8SDavid Hildenbrand goto retry; 1145d3d692c8SDavid Hildenbrand } 1146d3d692c8SDavid Hildenbrand 11478ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 11488ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 11498ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 11508ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 11518ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 11528ad35755SDavid Hildenbrand } 11538ad35755SDavid Hildenbrand goto retry; 11548ad35755SDavid Hildenbrand } 11558ad35755SDavid Hildenbrand 11568ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 11578ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 11588ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 11598ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 11608ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 11618ad35755SDavid Hildenbrand } 11628ad35755SDavid Hildenbrand goto retry; 11638ad35755SDavid Hildenbrand } 11648ad35755SDavid Hildenbrand 11650759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 11660759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 11670759d068SDavid Hildenbrand 11682c70fe44SChristian Borntraeger return 0; 11692c70fe44SChristian Borntraeger } 11702c70fe44SChristian Borntraeger 1171fa576c58SThomas Huth /** 1172fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1173fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1174fa576c58SThomas Huth * @gpa: Guest physical address 1175fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1176fa576c58SThomas Huth * 1177fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1178fa576c58SThomas Huth * 1179fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1180fa576c58SThomas Huth */ 1181fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 118224eb3a82SDominik Dingel { 1183527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1184527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 118524eb3a82SDominik Dingel } 118624eb3a82SDominik Dingel 11873c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 11883c038e6bSDominik Dingel unsigned long token) 11893c038e6bSDominik Dingel { 11903c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1191383d0b05SJens Freimann struct kvm_s390_irq irq; 11923c038e6bSDominik Dingel 11933c038e6bSDominik Dingel if (start_token) { 1194383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1195383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1196383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 11973c038e6bSDominik Dingel } else { 11983c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1199383d0b05SJens Freimann inti.parm64 = token; 12003c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 12013c038e6bSDominik Dingel } 12023c038e6bSDominik Dingel } 12033c038e6bSDominik Dingel 12043c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 12053c038e6bSDominik Dingel struct kvm_async_pf *work) 12063c038e6bSDominik Dingel { 12073c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 12083c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 12093c038e6bSDominik Dingel } 12103c038e6bSDominik Dingel 12113c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 12123c038e6bSDominik Dingel struct kvm_async_pf *work) 12133c038e6bSDominik Dingel { 12143c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 12153c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 12163c038e6bSDominik Dingel } 12173c038e6bSDominik Dingel 12183c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 12193c038e6bSDominik Dingel struct kvm_async_pf *work) 12203c038e6bSDominik Dingel { 12213c038e6bSDominik Dingel /* s390 will always inject the page directly */ 12223c038e6bSDominik Dingel } 12233c038e6bSDominik Dingel 12243c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 12253c038e6bSDominik Dingel { 12263c038e6bSDominik Dingel /* 12273c038e6bSDominik Dingel * s390 will always inject the page directly, 12283c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 12293c038e6bSDominik Dingel */ 12303c038e6bSDominik Dingel return true; 12313c038e6bSDominik Dingel } 12323c038e6bSDominik Dingel 12333c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 12343c038e6bSDominik Dingel { 12353c038e6bSDominik Dingel hva_t hva; 12363c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 12373c038e6bSDominik Dingel int rc; 12383c038e6bSDominik Dingel 12393c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 12403c038e6bSDominik Dingel return 0; 12413c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 12423c038e6bSDominik Dingel vcpu->arch.pfault_compare) 12433c038e6bSDominik Dingel return 0; 12443c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 12453c038e6bSDominik Dingel return 0; 12469a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 12473c038e6bSDominik Dingel return 0; 12483c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 12493c038e6bSDominik Dingel return 0; 12503c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 12513c038e6bSDominik Dingel return 0; 12523c038e6bSDominik Dingel 125381480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 125481480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 125581480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 12563c038e6bSDominik Dingel return 0; 12573c038e6bSDominik Dingel 12583c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 12593c038e6bSDominik Dingel return rc; 12603c038e6bSDominik Dingel } 12613c038e6bSDominik Dingel 12623fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1263b0c632dbSHeiko Carstens { 12643fb4c40fSThomas Huth int rc, cpuflags; 1265e168bf8dSCarsten Otte 12663c038e6bSDominik Dingel /* 12673c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 12683c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 12693c038e6bSDominik Dingel * handled outside the worker. 12703c038e6bSDominik Dingel */ 12713c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 12723c038e6bSDominik Dingel 12735a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1274b0c632dbSHeiko Carstens 1275b0c632dbSHeiko Carstens if (need_resched()) 1276b0c632dbSHeiko Carstens schedule(); 1277b0c632dbSHeiko Carstens 1278d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 127971cde587SChristian Borntraeger s390_handle_mcck(); 128071cde587SChristian Borntraeger 128179395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 128279395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 128379395031SJens Freimann if (rc) 128479395031SJens Freimann return rc; 128579395031SJens Freimann } 12860ff31867SCarsten Otte 12872c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 12882c70fe44SChristian Borntraeger if (rc) 12892c70fe44SChristian Borntraeger return rc; 12902c70fe44SChristian Borntraeger 129127291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 129227291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 129327291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 129427291e21SDavid Hildenbrand } 129527291e21SDavid Hildenbrand 1296b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 12973fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 12983fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 12993fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 13002b29a9fdSDominik Dingel 13013fb4c40fSThomas Huth return 0; 13023fb4c40fSThomas Huth } 13033fb4c40fSThomas Huth 13043fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 13053fb4c40fSThomas Huth { 130624eb3a82SDominik Dingel int rc = -1; 13072b29a9fdSDominik Dingel 13082b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 13092b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 13102b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 13112b29a9fdSDominik Dingel 131227291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 131327291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 131427291e21SDavid Hildenbrand 13153fb4c40fSThomas Huth if (exit_reason >= 0) { 13167c470539SMartin Schwidefsky rc = 0; 1317210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1318210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1319210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1320210b1607SThomas Huth current->thread.gmap_addr; 1321210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1322210b1607SThomas Huth rc = -EREMOTE; 132324eb3a82SDominik Dingel 132424eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 13253c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 132624eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1327fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 132824eb3a82SDominik Dingel rc = 0; 1329fa576c58SThomas Huth } else { 1330fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1331fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1332fa576c58SThomas Huth } 133324eb3a82SDominik Dingel } 133424eb3a82SDominik Dingel 133524eb3a82SDominik Dingel if (rc == -1) { 1336699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1337699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1338699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 13391f0d0f09SCarsten Otte } 1340b0c632dbSHeiko Carstens 13415a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 13423fb4c40fSThomas Huth 1343a76ccff6SThomas Huth if (rc == 0) { 1344a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 13452955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 13462955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1347a76ccff6SThomas Huth else 1348a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1349a76ccff6SThomas Huth } 1350a76ccff6SThomas Huth 13513fb4c40fSThomas Huth return rc; 13523fb4c40fSThomas Huth } 13533fb4c40fSThomas Huth 13543fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 13553fb4c40fSThomas Huth { 13563fb4c40fSThomas Huth int rc, exit_reason; 13573fb4c40fSThomas Huth 1358800c1065SThomas Huth /* 1359800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1360800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1361800c1065SThomas Huth */ 1362800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1363800c1065SThomas Huth 1364a76ccff6SThomas Huth do { 13653fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 13663fb4c40fSThomas Huth if (rc) 1367a76ccff6SThomas Huth break; 13683fb4c40fSThomas Huth 1369800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 13703fb4c40fSThomas Huth /* 1371a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1372a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 13733fb4c40fSThomas Huth */ 13743fb4c40fSThomas Huth preempt_disable(); 13753fb4c40fSThomas Huth kvm_guest_enter(); 13763fb4c40fSThomas Huth preempt_enable(); 1377a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1378a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 13793fb4c40fSThomas Huth kvm_guest_exit(); 1380800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 13813fb4c40fSThomas Huth 13823fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 138327291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 13843fb4c40fSThomas Huth 1385800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1386e168bf8dSCarsten Otte return rc; 1387b0c632dbSHeiko Carstens } 1388b0c632dbSHeiko Carstens 1389b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1390b028ee3eSDavid Hildenbrand { 1391b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1392b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1393b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1394b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1395b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1396b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1397d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1398d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1399b028ee3eSDavid Hildenbrand } 1400b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1401b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1402b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1403b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1404b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1405b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1406b028ee3eSDavid Hildenbrand } 1407b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1408b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1409b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1410b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 1411b028ee3eSDavid Hildenbrand } 1412b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1413b028ee3eSDavid Hildenbrand } 1414b028ee3eSDavid Hildenbrand 1415b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1416b028ee3eSDavid Hildenbrand { 1417b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1418b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1419b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1420b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1421b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1422b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1423b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1424b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1425b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1426b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1427b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1428b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1429b028ee3eSDavid Hildenbrand } 1430b028ee3eSDavid Hildenbrand 1431b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1432b0c632dbSHeiko Carstens { 14338f2abe6aSChristian Borntraeger int rc; 1434b0c632dbSHeiko Carstens sigset_t sigsaved; 1435b0c632dbSHeiko Carstens 143627291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 143727291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 143827291e21SDavid Hildenbrand return 0; 143927291e21SDavid Hildenbrand } 144027291e21SDavid Hildenbrand 1441b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1442b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1443b0c632dbSHeiko Carstens 14446352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 14456852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 14466352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 14476352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 14486352e4d2SDavid Hildenbrand vcpu->vcpu_id); 14496352e4d2SDavid Hildenbrand return -EINVAL; 14506352e4d2SDavid Hildenbrand } 1451b0c632dbSHeiko Carstens 1452b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1453d7b0b5ebSCarsten Otte 1454dab4079dSHeiko Carstens might_fault(); 1455e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 14569ace903dSChristian Ehrhardt 1457b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1458b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 14598f2abe6aSChristian Borntraeger rc = -EINTR; 1460b1d16c49SChristian Ehrhardt } 14618f2abe6aSChristian Borntraeger 146227291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 146327291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 146427291e21SDavid Hildenbrand rc = 0; 146527291e21SDavid Hildenbrand } 146627291e21SDavid Hildenbrand 1467b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 14688f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 14698f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 14708f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 14718f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 14728f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 14738f2abe6aSChristian Borntraeger rc = 0; 14748f2abe6aSChristian Borntraeger } 14758f2abe6aSChristian Borntraeger 14768f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 14778f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 14788f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 14798f2abe6aSChristian Borntraeger rc = 0; 14808f2abe6aSChristian Borntraeger } 14818f2abe6aSChristian Borntraeger 1482b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1483d7b0b5ebSCarsten Otte 1484b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1485b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1486b0c632dbSHeiko Carstens 1487b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 14887e8e6ab4SHeiko Carstens return rc; 1489b0c632dbSHeiko Carstens } 1490b0c632dbSHeiko Carstens 1491b0c632dbSHeiko Carstens /* 1492b0c632dbSHeiko Carstens * store status at address 1493b0c632dbSHeiko Carstens * we use have two special cases: 1494b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1495b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1496b0c632dbSHeiko Carstens */ 1497d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1498b0c632dbSHeiko Carstens { 1499092670cdSCarsten Otte unsigned char archmode = 1; 1500fda902cbSMichael Mueller unsigned int px; 1501178bd789SThomas Huth u64 clkcomp; 1502d0bce605SHeiko Carstens int rc; 1503b0c632dbSHeiko Carstens 1504d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1505d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1506b0c632dbSHeiko Carstens return -EFAULT; 1507d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1508d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1509d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1510b0c632dbSHeiko Carstens return -EFAULT; 1511d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1512d0bce605SHeiko Carstens } 1513d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1514d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1515d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1516d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1517d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1518d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1519fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1520d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1521fda902cbSMichael Mueller &px, 4); 1522d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1523d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1524d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1525d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1526d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1527d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1528d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1529178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1530d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1531d0bce605SHeiko Carstens &clkcomp, 8); 1532d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1533d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1534d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1535d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1536d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1537b0c632dbSHeiko Carstens } 1538b0c632dbSHeiko Carstens 1539e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1540e879892cSThomas Huth { 1541e879892cSThomas Huth /* 1542e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1543e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1544e879892cSThomas Huth * it into the save area 1545e879892cSThomas Huth */ 1546e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1547e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1548e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1549e879892cSThomas Huth 1550e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1551e879892cSThomas Huth } 1552e879892cSThomas Huth 15538ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 15548ad35755SDavid Hildenbrand { 15558ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 15568ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 15578ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 15588ad35755SDavid Hildenbrand } 15598ad35755SDavid Hildenbrand 15608ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 15618ad35755SDavid Hildenbrand { 15628ad35755SDavid Hildenbrand unsigned int i; 15638ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 15648ad35755SDavid Hildenbrand 15658ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 15668ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 15678ad35755SDavid Hildenbrand } 15688ad35755SDavid Hildenbrand } 15698ad35755SDavid Hildenbrand 15708ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 15718ad35755SDavid Hildenbrand { 15728ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 15738ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 15748ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 15758ad35755SDavid Hildenbrand } 15768ad35755SDavid Hildenbrand 15776852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 15786852d7b6SDavid Hildenbrand { 15798ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 15808ad35755SDavid Hildenbrand 15818ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 15828ad35755SDavid Hildenbrand return; 15838ad35755SDavid Hildenbrand 15846852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 15858ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1586433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 15878ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 15888ad35755SDavid Hildenbrand 15898ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 15908ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 15918ad35755SDavid Hildenbrand started_vcpus++; 15928ad35755SDavid Hildenbrand } 15938ad35755SDavid Hildenbrand 15948ad35755SDavid Hildenbrand if (started_vcpus == 0) { 15958ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 15968ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 15978ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 15988ad35755SDavid Hildenbrand /* 15998ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 16008ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 16018ad35755SDavid Hildenbrand * oustanding ENABLE requests. 16028ad35755SDavid Hildenbrand */ 16038ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 16048ad35755SDavid Hildenbrand } 16058ad35755SDavid Hildenbrand 16066852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 16078ad35755SDavid Hildenbrand /* 16088ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 16098ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 16108ad35755SDavid Hildenbrand */ 1611d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1612433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 16138ad35755SDavid Hildenbrand return; 16146852d7b6SDavid Hildenbrand } 16156852d7b6SDavid Hildenbrand 16166852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 16176852d7b6SDavid Hildenbrand { 16188ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 16198ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 16208ad35755SDavid Hildenbrand 16218ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 16228ad35755SDavid Hildenbrand return; 16238ad35755SDavid Hildenbrand 16246852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 16258ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1626433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 16278ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 16288ad35755SDavid Hildenbrand 162932f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 16306cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 163132f5ff63SDavid Hildenbrand 16326cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 16338ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 16348ad35755SDavid Hildenbrand 16358ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 16368ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 16378ad35755SDavid Hildenbrand started_vcpus++; 16388ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 16398ad35755SDavid Hildenbrand } 16408ad35755SDavid Hildenbrand } 16418ad35755SDavid Hildenbrand 16428ad35755SDavid Hildenbrand if (started_vcpus == 1) { 16438ad35755SDavid Hildenbrand /* 16448ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 16458ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 16468ad35755SDavid Hildenbrand */ 16478ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 16488ad35755SDavid Hildenbrand } 16498ad35755SDavid Hildenbrand 1650433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 16518ad35755SDavid Hildenbrand return; 16526852d7b6SDavid Hildenbrand } 16536852d7b6SDavid Hildenbrand 1654d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1655d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1656d6712df9SCornelia Huck { 1657d6712df9SCornelia Huck int r; 1658d6712df9SCornelia Huck 1659d6712df9SCornelia Huck if (cap->flags) 1660d6712df9SCornelia Huck return -EINVAL; 1661d6712df9SCornelia Huck 1662d6712df9SCornelia Huck switch (cap->cap) { 1663fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1664fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1665fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1666fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1667fa6b7fe9SCornelia Huck } 1668fa6b7fe9SCornelia Huck r = 0; 1669fa6b7fe9SCornelia Huck break; 1670d6712df9SCornelia Huck default: 1671d6712df9SCornelia Huck r = -EINVAL; 1672d6712df9SCornelia Huck break; 1673d6712df9SCornelia Huck } 1674d6712df9SCornelia Huck return r; 1675d6712df9SCornelia Huck } 1676d6712df9SCornelia Huck 1677b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1678b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1679b0c632dbSHeiko Carstens { 1680b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1681b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1682800c1065SThomas Huth int idx; 1683bc923cc9SAvi Kivity long r; 1684b0c632dbSHeiko Carstens 168593736624SAvi Kivity switch (ioctl) { 168693736624SAvi Kivity case KVM_S390_INTERRUPT: { 1687ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1688383d0b05SJens Freimann struct kvm_s390_irq s390irq; 1689ba5c1e9bSCarsten Otte 169093736624SAvi Kivity r = -EFAULT; 1691ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 169293736624SAvi Kivity break; 1693383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 1694383d0b05SJens Freimann return -EINVAL; 1695383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 169693736624SAvi Kivity break; 1697ba5c1e9bSCarsten Otte } 1698b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1699800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1700bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1701800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1702bc923cc9SAvi Kivity break; 1703b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1704b0c632dbSHeiko Carstens psw_t psw; 1705b0c632dbSHeiko Carstens 1706bc923cc9SAvi Kivity r = -EFAULT; 1707b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1708bc923cc9SAvi Kivity break; 1709bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1710bc923cc9SAvi Kivity break; 1711b0c632dbSHeiko Carstens } 1712b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1713bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1714bc923cc9SAvi Kivity break; 171514eebd91SCarsten Otte case KVM_SET_ONE_REG: 171614eebd91SCarsten Otte case KVM_GET_ONE_REG: { 171714eebd91SCarsten Otte struct kvm_one_reg reg; 171814eebd91SCarsten Otte r = -EFAULT; 171914eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 172014eebd91SCarsten Otte break; 172114eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 172214eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 172314eebd91SCarsten Otte else 172414eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 172514eebd91SCarsten Otte break; 172614eebd91SCarsten Otte } 172727e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 172827e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 172927e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 173027e0393fSCarsten Otte 173127e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 173227e0393fSCarsten Otte r = -EFAULT; 173327e0393fSCarsten Otte break; 173427e0393fSCarsten Otte } 173527e0393fSCarsten Otte 173627e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 173727e0393fSCarsten Otte r = -EINVAL; 173827e0393fSCarsten Otte break; 173927e0393fSCarsten Otte } 174027e0393fSCarsten Otte 174127e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 174227e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 174327e0393fSCarsten Otte break; 174427e0393fSCarsten Otte } 174527e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 174627e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 174727e0393fSCarsten Otte 174827e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 174927e0393fSCarsten Otte r = -EFAULT; 175027e0393fSCarsten Otte break; 175127e0393fSCarsten Otte } 175227e0393fSCarsten Otte 175327e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 175427e0393fSCarsten Otte r = -EINVAL; 175527e0393fSCarsten Otte break; 175627e0393fSCarsten Otte } 175727e0393fSCarsten Otte 175827e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 175927e0393fSCarsten Otte ucasmap.length); 176027e0393fSCarsten Otte break; 176127e0393fSCarsten Otte } 176227e0393fSCarsten Otte #endif 1763ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 1764527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 1765ccc7910fSCarsten Otte break; 1766ccc7910fSCarsten Otte } 1767d6712df9SCornelia Huck case KVM_ENABLE_CAP: 1768d6712df9SCornelia Huck { 1769d6712df9SCornelia Huck struct kvm_enable_cap cap; 1770d6712df9SCornelia Huck r = -EFAULT; 1771d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 1772d6712df9SCornelia Huck break; 1773d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 1774d6712df9SCornelia Huck break; 1775d6712df9SCornelia Huck } 1776b0c632dbSHeiko Carstens default: 17773e6afcf1SCarsten Otte r = -ENOTTY; 1778b0c632dbSHeiko Carstens } 1779bc923cc9SAvi Kivity return r; 1780b0c632dbSHeiko Carstens } 1781b0c632dbSHeiko Carstens 17825b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 17835b1c1493SCarsten Otte { 17845b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 17855b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 17865b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 17875b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 17885b1c1493SCarsten Otte get_page(vmf->page); 17895b1c1493SCarsten Otte return 0; 17905b1c1493SCarsten Otte } 17915b1c1493SCarsten Otte #endif 17925b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 17935b1c1493SCarsten Otte } 17945b1c1493SCarsten Otte 17955587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 17965587027cSAneesh Kumar K.V unsigned long npages) 1797db3fe4ebSTakuya Yoshikawa { 1798db3fe4ebSTakuya Yoshikawa return 0; 1799db3fe4ebSTakuya Yoshikawa } 1800db3fe4ebSTakuya Yoshikawa 1801b0c632dbSHeiko Carstens /* Section: memory related */ 1802f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 1803f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 18047b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 18057b6195a9STakuya Yoshikawa enum kvm_mr_change change) 1806b0c632dbSHeiko Carstens { 1807dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 1808dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 1809dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 1810dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 1811b0c632dbSHeiko Carstens 1812598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 1813b0c632dbSHeiko Carstens return -EINVAL; 1814b0c632dbSHeiko Carstens 1815598841caSCarsten Otte if (mem->memory_size & 0xffffful) 1816b0c632dbSHeiko Carstens return -EINVAL; 1817b0c632dbSHeiko Carstens 1818f7784b8eSMarcelo Tosatti return 0; 1819f7784b8eSMarcelo Tosatti } 1820f7784b8eSMarcelo Tosatti 1821f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 1822f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 18238482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 18248482644aSTakuya Yoshikawa enum kvm_mr_change change) 1825f7784b8eSMarcelo Tosatti { 1826f7850c92SCarsten Otte int rc; 1827f7784b8eSMarcelo Tosatti 18282cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 18292cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 18302cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 18312cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 18322cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 18332cef4debSChristian Borntraeger */ 18342cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 18352cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 18362cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 18372cef4debSChristian Borntraeger return; 1838598841caSCarsten Otte 1839598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 1840598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 1841598841caSCarsten Otte if (rc) 1842f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 1843598841caSCarsten Otte return; 1844b0c632dbSHeiko Carstens } 1845b0c632dbSHeiko Carstens 1846b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 1847b0c632dbSHeiko Carstens { 1848ef50f7acSChristian Borntraeger int ret; 18490ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1850ef50f7acSChristian Borntraeger if (ret) 1851ef50f7acSChristian Borntraeger return ret; 1852ef50f7acSChristian Borntraeger 1853ef50f7acSChristian Borntraeger /* 1854ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 185525985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 1856ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 1857ef50f7acSChristian Borntraeger */ 185878c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 185978c4b59fSMichael Mueller if (!vfacilities) { 1860ef50f7acSChristian Borntraeger kvm_exit(); 1861ef50f7acSChristian Borntraeger return -ENOMEM; 1862ef50f7acSChristian Borntraeger } 186378c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 18647be81a46SChristian Borntraeger vfacilities[0] &= 0xff82fffbf47c2000UL; 18657feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 1866ef50f7acSChristian Borntraeger return 0; 1867b0c632dbSHeiko Carstens } 1868b0c632dbSHeiko Carstens 1869b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 1870b0c632dbSHeiko Carstens { 187178c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 1872b0c632dbSHeiko Carstens kvm_exit(); 1873b0c632dbSHeiko Carstens } 1874b0c632dbSHeiko Carstens 1875b0c632dbSHeiko Carstens module_init(kvm_s390_init); 1876b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 1877566af940SCornelia Huck 1878566af940SCornelia Huck /* 1879566af940SCornelia Huck * Enable autoloading of the kvm module. 1880566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 1881566af940SCornelia Huck * since x86 takes a different approach. 1882566af940SCornelia Huck */ 1883566af940SCornelia Huck #include <linux/miscdevice.h> 1884566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 1885566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 1886