1b0c632dbSHeiko Carstens /* 2a53c8fabSHeiko Carstens * hosting zSeries kernel virtual machines 3b0c632dbSHeiko Carstens * 4628eb9b8SChristian Ehrhardt * Copyright IBM Corp. 2008, 2009 5b0c632dbSHeiko Carstens * 6b0c632dbSHeiko Carstens * This program is free software; you can redistribute it and/or modify 7b0c632dbSHeiko Carstens * it under the terms of the GNU General Public License (version 2 only) 8b0c632dbSHeiko Carstens * as published by the Free Software Foundation. 9b0c632dbSHeiko Carstens * 10b0c632dbSHeiko Carstens * Author(s): Carsten Otte <cotte@de.ibm.com> 11b0c632dbSHeiko Carstens * Christian Borntraeger <borntraeger@de.ibm.com> 12b0c632dbSHeiko Carstens * Heiko Carstens <heiko.carstens@de.ibm.com> 13628eb9b8SChristian Ehrhardt * Christian Ehrhardt <ehrhardt@de.ibm.com> 1415f36ebdSJason J. Herne * Jason J. Herne <jjherne@us.ibm.com> 15b0c632dbSHeiko Carstens */ 16b0c632dbSHeiko Carstens 17b0c632dbSHeiko Carstens #include <linux/compiler.h> 18b0c632dbSHeiko Carstens #include <linux/err.h> 19b0c632dbSHeiko Carstens #include <linux/fs.h> 20ca872302SChristian Borntraeger #include <linux/hrtimer.h> 21b0c632dbSHeiko Carstens #include <linux/init.h> 22b0c632dbSHeiko Carstens #include <linux/kvm.h> 23b0c632dbSHeiko Carstens #include <linux/kvm_host.h> 24b0c632dbSHeiko Carstens #include <linux/module.h> 25a374e892STony Krowiak #include <linux/random.h> 26b0c632dbSHeiko Carstens #include <linux/slab.h> 27ba5c1e9bSCarsten Otte #include <linux/timer.h> 28cbb870c8SHeiko Carstens #include <asm/asm-offsets.h> 29b0c632dbSHeiko Carstens #include <asm/lowcore.h> 30b0c632dbSHeiko Carstens #include <asm/pgtable.h> 31f5daba1dSHeiko Carstens #include <asm/nmi.h> 32a0616cdeSDavid Howells #include <asm/switch_to.h> 3378c4b59fSMichael Mueller #include <asm/facility.h> 341526bf9cSChristian Borntraeger #include <asm/sclp.h> 358f2abe6aSChristian Borntraeger #include "kvm-s390.h" 36b0c632dbSHeiko Carstens #include "gaccess.h" 37b0c632dbSHeiko Carstens 385786fffaSCornelia Huck #define CREATE_TRACE_POINTS 395786fffaSCornelia Huck #include "trace.h" 40ade38c31SCornelia Huck #include "trace-s390.h" 415786fffaSCornelia Huck 42b0c632dbSHeiko Carstens #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43b0c632dbSHeiko Carstens 44b0c632dbSHeiko Carstens struct kvm_stats_debugfs_item debugfs_entries[] = { 45b0c632dbSHeiko Carstens { "userspace_handled", VCPU_STAT(exit_userspace) }, 460eaeafa1SChristian Borntraeger { "exit_null", VCPU_STAT(exit_null) }, 478f2abe6aSChristian Borntraeger { "exit_validity", VCPU_STAT(exit_validity) }, 488f2abe6aSChristian Borntraeger { "exit_stop_request", VCPU_STAT(exit_stop_request) }, 498f2abe6aSChristian Borntraeger { "exit_external_request", VCPU_STAT(exit_external_request) }, 508f2abe6aSChristian Borntraeger { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, 51ba5c1e9bSCarsten Otte { "exit_instruction", VCPU_STAT(exit_instruction) }, 52ba5c1e9bSCarsten Otte { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, 53ba5c1e9bSCarsten Otte { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, 54f7819512SPaolo Bonzini { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, 55ce2e4f0bSDavid Hildenbrand { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 56f5e10b09SChristian Borntraeger { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, 57ba5c1e9bSCarsten Otte { "instruction_lctl", VCPU_STAT(instruction_lctl) }, 58aba07508SDavid Hildenbrand { "instruction_stctl", VCPU_STAT(instruction_stctl) }, 59aba07508SDavid Hildenbrand { "instruction_stctg", VCPU_STAT(instruction_stctg) }, 60ba5c1e9bSCarsten Otte { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, 617697e71fSChristian Ehrhardt { "deliver_external_call", VCPU_STAT(deliver_external_call) }, 62ba5c1e9bSCarsten Otte { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, 63ba5c1e9bSCarsten Otte { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) }, 64ba5c1e9bSCarsten Otte { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, 65ba5c1e9bSCarsten Otte { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, 66ba5c1e9bSCarsten Otte { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, 67ba5c1e9bSCarsten Otte { "deliver_program_interruption", VCPU_STAT(deliver_program_int) }, 68ba5c1e9bSCarsten Otte { "exit_wait_state", VCPU_STAT(exit_wait_state) }, 6969d0d3a3SChristian Borntraeger { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, 70453423dcSChristian Borntraeger { "instruction_stidp", VCPU_STAT(instruction_stidp) }, 71453423dcSChristian Borntraeger { "instruction_spx", VCPU_STAT(instruction_spx) }, 72453423dcSChristian Borntraeger { "instruction_stpx", VCPU_STAT(instruction_stpx) }, 73453423dcSChristian Borntraeger { "instruction_stap", VCPU_STAT(instruction_stap) }, 74453423dcSChristian Borntraeger { "instruction_storage_key", VCPU_STAT(instruction_storage_key) }, 758a242234SHeiko Carstens { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, 76453423dcSChristian Borntraeger { "instruction_stsch", VCPU_STAT(instruction_stsch) }, 77453423dcSChristian Borntraeger { "instruction_chsc", VCPU_STAT(instruction_chsc) }, 78b31288faSKonstantin Weitz { "instruction_essa", VCPU_STAT(instruction_essa) }, 79453423dcSChristian Borntraeger { "instruction_stsi", VCPU_STAT(instruction_stsi) }, 80453423dcSChristian Borntraeger { "instruction_stfl", VCPU_STAT(instruction_stfl) }, 81bb25b9baSChristian Borntraeger { "instruction_tprot", VCPU_STAT(instruction_tprot) }, 825288fbf0SChristian Borntraeger { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, 83bd59d3a4SCornelia Huck { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, 847697e71fSChristian Ehrhardt { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, 855288fbf0SChristian Borntraeger { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, 8642cb0c9fSDavid Hildenbrand { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, 8742cb0c9fSDavid Hildenbrand { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, 885288fbf0SChristian Borntraeger { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, 8942cb0c9fSDavid Hildenbrand { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, 9042cb0c9fSDavid Hildenbrand { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, 915288fbf0SChristian Borntraeger { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, 925288fbf0SChristian Borntraeger { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, 935288fbf0SChristian Borntraeger { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 9442cb0c9fSDavid Hildenbrand { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, 9542cb0c9fSDavid Hildenbrand { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, 9642cb0c9fSDavid Hildenbrand { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, 97388186bcSChristian Borntraeger { "diagnose_10", VCPU_STAT(diagnose_10) }, 98e28acfeaSChristian Borntraeger { "diagnose_44", VCPU_STAT(diagnose_44) }, 9941628d33SKonstantin Weitz { "diagnose_9c", VCPU_STAT(diagnose_9c) }, 100b0c632dbSHeiko Carstens { NULL } 101b0c632dbSHeiko Carstens }; 102b0c632dbSHeiko Carstens 10378c4b59fSMichael Mueller unsigned long *vfacilities; 1042c70fe44SChristian Borntraeger static struct gmap_notifier gmap_notifier; 105b0c632dbSHeiko Carstens 10678c4b59fSMichael Mueller /* test availability of vfacility */ 107280ef0f1SHeiko Carstens int test_vfacility(unsigned long nr) 10878c4b59fSMichael Mueller { 10978c4b59fSMichael Mueller return __test_facility(nr, (void *) vfacilities); 11078c4b59fSMichael Mueller } 11178c4b59fSMichael Mueller 112b0c632dbSHeiko Carstens /* Section: not file related */ 11313a34e06SRadim Krčmář int kvm_arch_hardware_enable(void) 114b0c632dbSHeiko Carstens { 115b0c632dbSHeiko Carstens /* every s390 is virtualization enabled ;-) */ 11610474ae8SAlexander Graf return 0; 117b0c632dbSHeiko Carstens } 118b0c632dbSHeiko Carstens 1192c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address); 1202c70fe44SChristian Borntraeger 121b0c632dbSHeiko Carstens int kvm_arch_hardware_setup(void) 122b0c632dbSHeiko Carstens { 1232c70fe44SChristian Borntraeger gmap_notifier.notifier_call = kvm_gmap_notifier; 1242c70fe44SChristian Borntraeger gmap_register_ipte_notifier(&gmap_notifier); 125b0c632dbSHeiko Carstens return 0; 126b0c632dbSHeiko Carstens } 127b0c632dbSHeiko Carstens 128b0c632dbSHeiko Carstens void kvm_arch_hardware_unsetup(void) 129b0c632dbSHeiko Carstens { 1302c70fe44SChristian Borntraeger gmap_unregister_ipte_notifier(&gmap_notifier); 131b0c632dbSHeiko Carstens } 132b0c632dbSHeiko Carstens 133b0c632dbSHeiko Carstens int kvm_arch_init(void *opaque) 134b0c632dbSHeiko Carstens { 13584877d93SCornelia Huck /* Register floating interrupt controller interface. */ 13684877d93SCornelia Huck return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); 137b0c632dbSHeiko Carstens } 138b0c632dbSHeiko Carstens 139b0c632dbSHeiko Carstens /* Section: device related */ 140b0c632dbSHeiko Carstens long kvm_arch_dev_ioctl(struct file *filp, 141b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 142b0c632dbSHeiko Carstens { 143b0c632dbSHeiko Carstens if (ioctl == KVM_S390_ENABLE_SIE) 144b0c632dbSHeiko Carstens return s390_enable_sie(); 145b0c632dbSHeiko Carstens return -EINVAL; 146b0c632dbSHeiko Carstens } 147b0c632dbSHeiko Carstens 148784aa3d7SAlexander Graf int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) 149b0c632dbSHeiko Carstens { 150d7b0b5ebSCarsten Otte int r; 151d7b0b5ebSCarsten Otte 1522bd0ac4eSCarsten Otte switch (ext) { 153d7b0b5ebSCarsten Otte case KVM_CAP_S390_PSW: 154b6cf8788SChristian Borntraeger case KVM_CAP_S390_GMAP: 15552e16b18SChristian Borntraeger case KVM_CAP_SYNC_MMU: 1561efd0f59SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 1571efd0f59SCarsten Otte case KVM_CAP_S390_UCONTROL: 1581efd0f59SCarsten Otte #endif 1593c038e6bSDominik Dingel case KVM_CAP_ASYNC_PF: 16060b413c9SChristian Borntraeger case KVM_CAP_SYNC_REGS: 16114eebd91SCarsten Otte case KVM_CAP_ONE_REG: 162d6712df9SCornelia Huck case KVM_CAP_ENABLE_CAP: 163fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 164ebc32262SCornelia Huck case KVM_CAP_IRQFD: 16510ccaa1eSCornelia Huck case KVM_CAP_IOEVENTFD: 166c05c4186SJens Freimann case KVM_CAP_DEVICE_CTRL: 167d938dc55SCornelia Huck case KVM_CAP_ENABLE_CAP_VM: 16878599d90SCornelia Huck case KVM_CAP_S390_IRQCHIP: 169f2061656SDominik Dingel case KVM_CAP_VM_ATTRIBUTES: 1706352e4d2SDavid Hildenbrand case KVM_CAP_MP_STATE: 1712444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 172d7b0b5ebSCarsten Otte r = 1; 173d7b0b5ebSCarsten Otte break; 174e726b1bdSChristian Borntraeger case KVM_CAP_NR_VCPUS: 175e726b1bdSChristian Borntraeger case KVM_CAP_MAX_VCPUS: 176e726b1bdSChristian Borntraeger r = KVM_MAX_VCPUS; 177e726b1bdSChristian Borntraeger break; 178e1e2e605SNick Wang case KVM_CAP_NR_MEMSLOTS: 179e1e2e605SNick Wang r = KVM_USER_MEM_SLOTS; 180e1e2e605SNick Wang break; 1811526bf9cSChristian Borntraeger case KVM_CAP_S390_COW: 182abf09bedSMartin Schwidefsky r = MACHINE_HAS_ESOP; 1831526bf9cSChristian Borntraeger break; 1842bd0ac4eSCarsten Otte default: 185d7b0b5ebSCarsten Otte r = 0; 186b0c632dbSHeiko Carstens } 187d7b0b5ebSCarsten Otte return r; 1882bd0ac4eSCarsten Otte } 189b0c632dbSHeiko Carstens 19015f36ebdSJason J. Herne static void kvm_s390_sync_dirty_log(struct kvm *kvm, 19115f36ebdSJason J. Herne struct kvm_memory_slot *memslot) 19215f36ebdSJason J. Herne { 19315f36ebdSJason J. Herne gfn_t cur_gfn, last_gfn; 19415f36ebdSJason J. Herne unsigned long address; 19515f36ebdSJason J. Herne struct gmap *gmap = kvm->arch.gmap; 19615f36ebdSJason J. Herne 19715f36ebdSJason J. Herne down_read(&gmap->mm->mmap_sem); 19815f36ebdSJason J. Herne /* Loop over all guest pages */ 19915f36ebdSJason J. Herne last_gfn = memslot->base_gfn + memslot->npages; 20015f36ebdSJason J. Herne for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) { 20115f36ebdSJason J. Herne address = gfn_to_hva_memslot(memslot, cur_gfn); 20215f36ebdSJason J. Herne 20315f36ebdSJason J. Herne if (gmap_test_and_clear_dirty(address, gmap)) 20415f36ebdSJason J. Herne mark_page_dirty(kvm, cur_gfn); 20515f36ebdSJason J. Herne } 20615f36ebdSJason J. Herne up_read(&gmap->mm->mmap_sem); 20715f36ebdSJason J. Herne } 20815f36ebdSJason J. Herne 209b0c632dbSHeiko Carstens /* Section: vm related */ 210b0c632dbSHeiko Carstens /* 211b0c632dbSHeiko Carstens * Get (and clear) the dirty memory log for a memory slot. 212b0c632dbSHeiko Carstens */ 213b0c632dbSHeiko Carstens int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 214b0c632dbSHeiko Carstens struct kvm_dirty_log *log) 215b0c632dbSHeiko Carstens { 21615f36ebdSJason J. Herne int r; 21715f36ebdSJason J. Herne unsigned long n; 21815f36ebdSJason J. Herne struct kvm_memory_slot *memslot; 21915f36ebdSJason J. Herne int is_dirty = 0; 22015f36ebdSJason J. Herne 22115f36ebdSJason J. Herne mutex_lock(&kvm->slots_lock); 22215f36ebdSJason J. Herne 22315f36ebdSJason J. Herne r = -EINVAL; 22415f36ebdSJason J. Herne if (log->slot >= KVM_USER_MEM_SLOTS) 22515f36ebdSJason J. Herne goto out; 22615f36ebdSJason J. Herne 22715f36ebdSJason J. Herne memslot = id_to_memslot(kvm->memslots, log->slot); 22815f36ebdSJason J. Herne r = -ENOENT; 22915f36ebdSJason J. Herne if (!memslot->dirty_bitmap) 23015f36ebdSJason J. Herne goto out; 23115f36ebdSJason J. Herne 23215f36ebdSJason J. Herne kvm_s390_sync_dirty_log(kvm, memslot); 23315f36ebdSJason J. Herne r = kvm_get_dirty_log(kvm, log, &is_dirty); 23415f36ebdSJason J. Herne if (r) 23515f36ebdSJason J. Herne goto out; 23615f36ebdSJason J. Herne 23715f36ebdSJason J. Herne /* Clear the dirty log */ 23815f36ebdSJason J. Herne if (is_dirty) { 23915f36ebdSJason J. Herne n = kvm_dirty_bitmap_bytes(memslot); 24015f36ebdSJason J. Herne memset(memslot->dirty_bitmap, 0, n); 24115f36ebdSJason J. Herne } 24215f36ebdSJason J. Herne r = 0; 24315f36ebdSJason J. Herne out: 24415f36ebdSJason J. Herne mutex_unlock(&kvm->slots_lock); 24515f36ebdSJason J. Herne return r; 246b0c632dbSHeiko Carstens } 247b0c632dbSHeiko Carstens 248d938dc55SCornelia Huck static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) 249d938dc55SCornelia Huck { 250d938dc55SCornelia Huck int r; 251d938dc55SCornelia Huck 252d938dc55SCornelia Huck if (cap->flags) 253d938dc55SCornelia Huck return -EINVAL; 254d938dc55SCornelia Huck 255d938dc55SCornelia Huck switch (cap->cap) { 25684223598SCornelia Huck case KVM_CAP_S390_IRQCHIP: 25784223598SCornelia Huck kvm->arch.use_irqchip = 1; 25884223598SCornelia Huck r = 0; 25984223598SCornelia Huck break; 2602444b352SDavid Hildenbrand case KVM_CAP_S390_USER_SIGP: 2612444b352SDavid Hildenbrand kvm->arch.user_sigp = 1; 2622444b352SDavid Hildenbrand r = 0; 2632444b352SDavid Hildenbrand break; 264d938dc55SCornelia Huck default: 265d938dc55SCornelia Huck r = -EINVAL; 266d938dc55SCornelia Huck break; 267d938dc55SCornelia Huck } 268d938dc55SCornelia Huck return r; 269d938dc55SCornelia Huck } 270d938dc55SCornelia Huck 2718c0a7ce6SDominik Dingel static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2728c0a7ce6SDominik Dingel { 2738c0a7ce6SDominik Dingel int ret; 2748c0a7ce6SDominik Dingel 2758c0a7ce6SDominik Dingel switch (attr->attr) { 2768c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 2778c0a7ce6SDominik Dingel ret = 0; 2788c0a7ce6SDominik Dingel if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr)) 2798c0a7ce6SDominik Dingel ret = -EFAULT; 2808c0a7ce6SDominik Dingel break; 2818c0a7ce6SDominik Dingel default: 2828c0a7ce6SDominik Dingel ret = -ENXIO; 2838c0a7ce6SDominik Dingel break; 2848c0a7ce6SDominik Dingel } 2858c0a7ce6SDominik Dingel return ret; 2868c0a7ce6SDominik Dingel } 2878c0a7ce6SDominik Dingel 2888c0a7ce6SDominik Dingel static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) 2894f718eabSDominik Dingel { 2904f718eabSDominik Dingel int ret; 2914f718eabSDominik Dingel unsigned int idx; 2924f718eabSDominik Dingel switch (attr->attr) { 2934f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 2944f718eabSDominik Dingel ret = -EBUSY; 2954f718eabSDominik Dingel mutex_lock(&kvm->lock); 2964f718eabSDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 2974f718eabSDominik Dingel kvm->arch.use_cmma = 1; 2984f718eabSDominik Dingel ret = 0; 2994f718eabSDominik Dingel } 3004f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3014f718eabSDominik Dingel break; 3024f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 3034f718eabSDominik Dingel mutex_lock(&kvm->lock); 3044f718eabSDominik Dingel idx = srcu_read_lock(&kvm->srcu); 305a13cff31SDominik Dingel s390_reset_cmma(kvm->arch.gmap->mm); 3064f718eabSDominik Dingel srcu_read_unlock(&kvm->srcu, idx); 3074f718eabSDominik Dingel mutex_unlock(&kvm->lock); 3084f718eabSDominik Dingel ret = 0; 3094f718eabSDominik Dingel break; 3108c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: { 3118c0a7ce6SDominik Dingel unsigned long new_limit; 3128c0a7ce6SDominik Dingel 3138c0a7ce6SDominik Dingel if (kvm_is_ucontrol(kvm)) 3148c0a7ce6SDominik Dingel return -EINVAL; 3158c0a7ce6SDominik Dingel 3168c0a7ce6SDominik Dingel if (get_user(new_limit, (u64 __user *)attr->addr)) 3178c0a7ce6SDominik Dingel return -EFAULT; 3188c0a7ce6SDominik Dingel 3198c0a7ce6SDominik Dingel if (new_limit > kvm->arch.gmap->asce_end) 3208c0a7ce6SDominik Dingel return -E2BIG; 3218c0a7ce6SDominik Dingel 3228c0a7ce6SDominik Dingel ret = -EBUSY; 3238c0a7ce6SDominik Dingel mutex_lock(&kvm->lock); 3248c0a7ce6SDominik Dingel if (atomic_read(&kvm->online_vcpus) == 0) { 3258c0a7ce6SDominik Dingel /* gmap_alloc will round the limit up */ 3268c0a7ce6SDominik Dingel struct gmap *new = gmap_alloc(current->mm, new_limit); 3278c0a7ce6SDominik Dingel 3288c0a7ce6SDominik Dingel if (!new) { 3298c0a7ce6SDominik Dingel ret = -ENOMEM; 3308c0a7ce6SDominik Dingel } else { 3318c0a7ce6SDominik Dingel gmap_free(kvm->arch.gmap); 3328c0a7ce6SDominik Dingel new->private = kvm; 3338c0a7ce6SDominik Dingel kvm->arch.gmap = new; 3348c0a7ce6SDominik Dingel ret = 0; 3358c0a7ce6SDominik Dingel } 3368c0a7ce6SDominik Dingel } 3378c0a7ce6SDominik Dingel mutex_unlock(&kvm->lock); 3388c0a7ce6SDominik Dingel break; 3398c0a7ce6SDominik Dingel } 3404f718eabSDominik Dingel default: 3414f718eabSDominik Dingel ret = -ENXIO; 3424f718eabSDominik Dingel break; 3434f718eabSDominik Dingel } 3444f718eabSDominik Dingel return ret; 3454f718eabSDominik Dingel } 3464f718eabSDominik Dingel 347a374e892STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu); 348a374e892STony Krowiak 349a374e892STony Krowiak static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) 350a374e892STony Krowiak { 351a374e892STony Krowiak struct kvm_vcpu *vcpu; 352a374e892STony Krowiak int i; 353a374e892STony Krowiak 354a374e892STony Krowiak if (!test_vfacility(76)) 355a374e892STony Krowiak return -EINVAL; 356a374e892STony Krowiak 357a374e892STony Krowiak mutex_lock(&kvm->lock); 358a374e892STony Krowiak switch (attr->attr) { 359a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 360a374e892STony Krowiak get_random_bytes( 361a374e892STony Krowiak kvm->arch.crypto.crycb->aes_wrapping_key_mask, 362a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 363a374e892STony Krowiak kvm->arch.crypto.aes_kw = 1; 364a374e892STony Krowiak break; 365a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 366a374e892STony Krowiak get_random_bytes( 367a374e892STony Krowiak kvm->arch.crypto.crycb->dea_wrapping_key_mask, 368a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 369a374e892STony Krowiak kvm->arch.crypto.dea_kw = 1; 370a374e892STony Krowiak break; 371a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 372a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 373a374e892STony Krowiak memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, 374a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); 375a374e892STony Krowiak break; 376a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 377a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 378a374e892STony Krowiak memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, 379a374e892STony Krowiak sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); 380a374e892STony Krowiak break; 381a374e892STony Krowiak default: 382a374e892STony Krowiak mutex_unlock(&kvm->lock); 383a374e892STony Krowiak return -ENXIO; 384a374e892STony Krowiak } 385a374e892STony Krowiak 386a374e892STony Krowiak kvm_for_each_vcpu(i, vcpu, kvm) { 387a374e892STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 388a374e892STony Krowiak exit_sie(vcpu); 389a374e892STony Krowiak } 390a374e892STony Krowiak mutex_unlock(&kvm->lock); 391a374e892STony Krowiak return 0; 392a374e892STony Krowiak } 393a374e892STony Krowiak 39472f25020SJason J. Herne static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 39572f25020SJason J. Herne { 39672f25020SJason J. Herne u8 gtod_high; 39772f25020SJason J. Herne 39872f25020SJason J. Herne if (copy_from_user(>od_high, (void __user *)attr->addr, 39972f25020SJason J. Herne sizeof(gtod_high))) 40072f25020SJason J. Herne return -EFAULT; 40172f25020SJason J. Herne 40272f25020SJason J. Herne if (gtod_high != 0) 40372f25020SJason J. Herne return -EINVAL; 40472f25020SJason J. Herne 40572f25020SJason J. Herne return 0; 40672f25020SJason J. Herne } 40772f25020SJason J. Herne 40872f25020SJason J. Herne static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 40972f25020SJason J. Herne { 41072f25020SJason J. Herne struct kvm_vcpu *cur_vcpu; 41172f25020SJason J. Herne unsigned int vcpu_idx; 41272f25020SJason J. Herne u64 host_tod, gtod; 41372f25020SJason J. Herne int r; 41472f25020SJason J. Herne 41572f25020SJason J. Herne if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) 41672f25020SJason J. Herne return -EFAULT; 41772f25020SJason J. Herne 41872f25020SJason J. Herne r = store_tod_clock(&host_tod); 41972f25020SJason J. Herne if (r) 42072f25020SJason J. Herne return r; 42172f25020SJason J. Herne 42272f25020SJason J. Herne mutex_lock(&kvm->lock); 42372f25020SJason J. Herne kvm->arch.epoch = gtod - host_tod; 42472f25020SJason J. Herne kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) { 42572f25020SJason J. Herne cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch; 42672f25020SJason J. Herne exit_sie(cur_vcpu); 42772f25020SJason J. Herne } 42872f25020SJason J. Herne mutex_unlock(&kvm->lock); 42972f25020SJason J. Herne return 0; 43072f25020SJason J. Herne } 43172f25020SJason J. Herne 43272f25020SJason J. Herne static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) 43372f25020SJason J. Herne { 43472f25020SJason J. Herne int ret; 43572f25020SJason J. Herne 43672f25020SJason J. Herne if (attr->flags) 43772f25020SJason J. Herne return -EINVAL; 43872f25020SJason J. Herne 43972f25020SJason J. Herne switch (attr->attr) { 44072f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 44172f25020SJason J. Herne ret = kvm_s390_set_tod_high(kvm, attr); 44272f25020SJason J. Herne break; 44372f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 44472f25020SJason J. Herne ret = kvm_s390_set_tod_low(kvm, attr); 44572f25020SJason J. Herne break; 44672f25020SJason J. Herne default: 44772f25020SJason J. Herne ret = -ENXIO; 44872f25020SJason J. Herne break; 44972f25020SJason J. Herne } 45072f25020SJason J. Herne return ret; 45172f25020SJason J. Herne } 45272f25020SJason J. Herne 45372f25020SJason J. Herne static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) 45472f25020SJason J. Herne { 45572f25020SJason J. Herne u8 gtod_high = 0; 45672f25020SJason J. Herne 45772f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od_high, 45872f25020SJason J. Herne sizeof(gtod_high))) 45972f25020SJason J. Herne return -EFAULT; 46072f25020SJason J. Herne 46172f25020SJason J. Herne return 0; 46272f25020SJason J. Herne } 46372f25020SJason J. Herne 46472f25020SJason J. Herne static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) 46572f25020SJason J. Herne { 46672f25020SJason J. Herne u64 host_tod, gtod; 46772f25020SJason J. Herne int r; 46872f25020SJason J. Herne 46972f25020SJason J. Herne r = store_tod_clock(&host_tod); 47072f25020SJason J. Herne if (r) 47172f25020SJason J. Herne return r; 47272f25020SJason J. Herne 47372f25020SJason J. Herne gtod = host_tod + kvm->arch.epoch; 47472f25020SJason J. Herne if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) 47572f25020SJason J. Herne return -EFAULT; 47672f25020SJason J. Herne 47772f25020SJason J. Herne return 0; 47872f25020SJason J. Herne } 47972f25020SJason J. Herne 48072f25020SJason J. Herne static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) 48172f25020SJason J. Herne { 48272f25020SJason J. Herne int ret; 48372f25020SJason J. Herne 48472f25020SJason J. Herne if (attr->flags) 48572f25020SJason J. Herne return -EINVAL; 48672f25020SJason J. Herne 48772f25020SJason J. Herne switch (attr->attr) { 48872f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 48972f25020SJason J. Herne ret = kvm_s390_get_tod_high(kvm, attr); 49072f25020SJason J. Herne break; 49172f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 49272f25020SJason J. Herne ret = kvm_s390_get_tod_low(kvm, attr); 49372f25020SJason J. Herne break; 49472f25020SJason J. Herne default: 49572f25020SJason J. Herne ret = -ENXIO; 49672f25020SJason J. Herne break; 49772f25020SJason J. Herne } 49872f25020SJason J. Herne return ret; 49972f25020SJason J. Herne } 50072f25020SJason J. Herne 501f2061656SDominik Dingel static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) 502f2061656SDominik Dingel { 503f2061656SDominik Dingel int ret; 504f2061656SDominik Dingel 505f2061656SDominik Dingel switch (attr->group) { 5064f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 5078c0a7ce6SDominik Dingel ret = kvm_s390_set_mem_control(kvm, attr); 5084f718eabSDominik Dingel break; 50972f25020SJason J. Herne case KVM_S390_VM_TOD: 51072f25020SJason J. Herne ret = kvm_s390_set_tod(kvm, attr); 51172f25020SJason J. Herne break; 512a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 513a374e892STony Krowiak ret = kvm_s390_vm_set_crypto(kvm, attr); 514a374e892STony Krowiak break; 515f2061656SDominik Dingel default: 516f2061656SDominik Dingel ret = -ENXIO; 517f2061656SDominik Dingel break; 518f2061656SDominik Dingel } 519f2061656SDominik Dingel 520f2061656SDominik Dingel return ret; 521f2061656SDominik Dingel } 522f2061656SDominik Dingel 523f2061656SDominik Dingel static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) 524f2061656SDominik Dingel { 5258c0a7ce6SDominik Dingel int ret; 5268c0a7ce6SDominik Dingel 5278c0a7ce6SDominik Dingel switch (attr->group) { 5288c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_CTRL: 5298c0a7ce6SDominik Dingel ret = kvm_s390_get_mem_control(kvm, attr); 5308c0a7ce6SDominik Dingel break; 53172f25020SJason J. Herne case KVM_S390_VM_TOD: 53272f25020SJason J. Herne ret = kvm_s390_get_tod(kvm, attr); 53372f25020SJason J. Herne break; 5348c0a7ce6SDominik Dingel default: 5358c0a7ce6SDominik Dingel ret = -ENXIO; 5368c0a7ce6SDominik Dingel break; 5378c0a7ce6SDominik Dingel } 5388c0a7ce6SDominik Dingel 5398c0a7ce6SDominik Dingel return ret; 540f2061656SDominik Dingel } 541f2061656SDominik Dingel 542f2061656SDominik Dingel static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) 543f2061656SDominik Dingel { 544f2061656SDominik Dingel int ret; 545f2061656SDominik Dingel 546f2061656SDominik Dingel switch (attr->group) { 5474f718eabSDominik Dingel case KVM_S390_VM_MEM_CTRL: 5484f718eabSDominik Dingel switch (attr->attr) { 5494f718eabSDominik Dingel case KVM_S390_VM_MEM_ENABLE_CMMA: 5504f718eabSDominik Dingel case KVM_S390_VM_MEM_CLR_CMMA: 5518c0a7ce6SDominik Dingel case KVM_S390_VM_MEM_LIMIT_SIZE: 5524f718eabSDominik Dingel ret = 0; 5534f718eabSDominik Dingel break; 5544f718eabSDominik Dingel default: 5554f718eabSDominik Dingel ret = -ENXIO; 5564f718eabSDominik Dingel break; 5574f718eabSDominik Dingel } 5584f718eabSDominik Dingel break; 55972f25020SJason J. Herne case KVM_S390_VM_TOD: 56072f25020SJason J. Herne switch (attr->attr) { 56172f25020SJason J. Herne case KVM_S390_VM_TOD_LOW: 56272f25020SJason J. Herne case KVM_S390_VM_TOD_HIGH: 56372f25020SJason J. Herne ret = 0; 56472f25020SJason J. Herne break; 56572f25020SJason J. Herne default: 56672f25020SJason J. Herne ret = -ENXIO; 56772f25020SJason J. Herne break; 56872f25020SJason J. Herne } 56972f25020SJason J. Herne break; 570a374e892STony Krowiak case KVM_S390_VM_CRYPTO: 571a374e892STony Krowiak switch (attr->attr) { 572a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: 573a374e892STony Krowiak case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: 574a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: 575a374e892STony Krowiak case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: 576a374e892STony Krowiak ret = 0; 577a374e892STony Krowiak break; 578a374e892STony Krowiak default: 579a374e892STony Krowiak ret = -ENXIO; 580a374e892STony Krowiak break; 581a374e892STony Krowiak } 582a374e892STony Krowiak break; 583f2061656SDominik Dingel default: 584f2061656SDominik Dingel ret = -ENXIO; 585f2061656SDominik Dingel break; 586f2061656SDominik Dingel } 587f2061656SDominik Dingel 588f2061656SDominik Dingel return ret; 589f2061656SDominik Dingel } 590f2061656SDominik Dingel 591b0c632dbSHeiko Carstens long kvm_arch_vm_ioctl(struct file *filp, 592b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 593b0c632dbSHeiko Carstens { 594b0c632dbSHeiko Carstens struct kvm *kvm = filp->private_data; 595b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 596f2061656SDominik Dingel struct kvm_device_attr attr; 597b0c632dbSHeiko Carstens int r; 598b0c632dbSHeiko Carstens 599b0c632dbSHeiko Carstens switch (ioctl) { 600ba5c1e9bSCarsten Otte case KVM_S390_INTERRUPT: { 601ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 602ba5c1e9bSCarsten Otte 603ba5c1e9bSCarsten Otte r = -EFAULT; 604ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 605ba5c1e9bSCarsten Otte break; 606ba5c1e9bSCarsten Otte r = kvm_s390_inject_vm(kvm, &s390int); 607ba5c1e9bSCarsten Otte break; 608ba5c1e9bSCarsten Otte } 609d938dc55SCornelia Huck case KVM_ENABLE_CAP: { 610d938dc55SCornelia Huck struct kvm_enable_cap cap; 611d938dc55SCornelia Huck r = -EFAULT; 612d938dc55SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 613d938dc55SCornelia Huck break; 614d938dc55SCornelia Huck r = kvm_vm_ioctl_enable_cap(kvm, &cap); 615d938dc55SCornelia Huck break; 616d938dc55SCornelia Huck } 61784223598SCornelia Huck case KVM_CREATE_IRQCHIP: { 61884223598SCornelia Huck struct kvm_irq_routing_entry routing; 61984223598SCornelia Huck 62084223598SCornelia Huck r = -EINVAL; 62184223598SCornelia Huck if (kvm->arch.use_irqchip) { 62284223598SCornelia Huck /* Set up dummy routing. */ 62384223598SCornelia Huck memset(&routing, 0, sizeof(routing)); 62484223598SCornelia Huck kvm_set_irq_routing(kvm, &routing, 0, 0); 62584223598SCornelia Huck r = 0; 62684223598SCornelia Huck } 62784223598SCornelia Huck break; 62884223598SCornelia Huck } 629f2061656SDominik Dingel case KVM_SET_DEVICE_ATTR: { 630f2061656SDominik Dingel r = -EFAULT; 631f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 632f2061656SDominik Dingel break; 633f2061656SDominik Dingel r = kvm_s390_vm_set_attr(kvm, &attr); 634f2061656SDominik Dingel break; 635f2061656SDominik Dingel } 636f2061656SDominik Dingel case KVM_GET_DEVICE_ATTR: { 637f2061656SDominik Dingel r = -EFAULT; 638f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 639f2061656SDominik Dingel break; 640f2061656SDominik Dingel r = kvm_s390_vm_get_attr(kvm, &attr); 641f2061656SDominik Dingel break; 642f2061656SDominik Dingel } 643f2061656SDominik Dingel case KVM_HAS_DEVICE_ATTR: { 644f2061656SDominik Dingel r = -EFAULT; 645f2061656SDominik Dingel if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 646f2061656SDominik Dingel break; 647f2061656SDominik Dingel r = kvm_s390_vm_has_attr(kvm, &attr); 648f2061656SDominik Dingel break; 649f2061656SDominik Dingel } 650b0c632dbSHeiko Carstens default: 651367e1319SAvi Kivity r = -ENOTTY; 652b0c632dbSHeiko Carstens } 653b0c632dbSHeiko Carstens 654b0c632dbSHeiko Carstens return r; 655b0c632dbSHeiko Carstens } 656b0c632dbSHeiko Carstens 657*45c9b47cSTony Krowiak static int kvm_s390_query_ap_config(u8 *config) 658*45c9b47cSTony Krowiak { 659*45c9b47cSTony Krowiak u32 fcn_code = 0x04000000UL; 660*45c9b47cSTony Krowiak u32 cc; 661*45c9b47cSTony Krowiak 662*45c9b47cSTony Krowiak asm volatile( 663*45c9b47cSTony Krowiak "lgr 0,%1\n" 664*45c9b47cSTony Krowiak "lgr 2,%2\n" 665*45c9b47cSTony Krowiak ".long 0xb2af0000\n" /* PQAP(QCI) */ 666*45c9b47cSTony Krowiak "ipm %0\n" 667*45c9b47cSTony Krowiak "srl %0,28\n" 668*45c9b47cSTony Krowiak : "=r" (cc) 669*45c9b47cSTony Krowiak : "r" (fcn_code), "r" (config) 670*45c9b47cSTony Krowiak : "cc", "0", "2", "memory" 671*45c9b47cSTony Krowiak ); 672*45c9b47cSTony Krowiak 673*45c9b47cSTony Krowiak return cc; 674*45c9b47cSTony Krowiak } 675*45c9b47cSTony Krowiak 676*45c9b47cSTony Krowiak static int kvm_s390_apxa_installed(void) 677*45c9b47cSTony Krowiak { 678*45c9b47cSTony Krowiak u8 config[128]; 679*45c9b47cSTony Krowiak int cc; 680*45c9b47cSTony Krowiak 681*45c9b47cSTony Krowiak if (test_facility(2) && test_facility(12)) { 682*45c9b47cSTony Krowiak cc = kvm_s390_query_ap_config(config); 683*45c9b47cSTony Krowiak 684*45c9b47cSTony Krowiak if (cc) 685*45c9b47cSTony Krowiak pr_err("PQAP(QCI) failed with cc=%d", cc); 686*45c9b47cSTony Krowiak else 687*45c9b47cSTony Krowiak return config[0] & 0x40; 688*45c9b47cSTony Krowiak } 689*45c9b47cSTony Krowiak 690*45c9b47cSTony Krowiak return 0; 691*45c9b47cSTony Krowiak } 692*45c9b47cSTony Krowiak 693*45c9b47cSTony Krowiak static void kvm_s390_set_crycb_format(struct kvm *kvm) 694*45c9b47cSTony Krowiak { 695*45c9b47cSTony Krowiak kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; 696*45c9b47cSTony Krowiak 697*45c9b47cSTony Krowiak if (kvm_s390_apxa_installed()) 698*45c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; 699*45c9b47cSTony Krowiak else 700*45c9b47cSTony Krowiak kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; 701*45c9b47cSTony Krowiak } 702*45c9b47cSTony Krowiak 7035102ee87STony Krowiak static int kvm_s390_crypto_init(struct kvm *kvm) 7045102ee87STony Krowiak { 7055102ee87STony Krowiak if (!test_vfacility(76)) 7065102ee87STony Krowiak return 0; 7075102ee87STony Krowiak 7085102ee87STony Krowiak kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb), 7095102ee87STony Krowiak GFP_KERNEL | GFP_DMA); 7105102ee87STony Krowiak if (!kvm->arch.crypto.crycb) 7115102ee87STony Krowiak return -ENOMEM; 7125102ee87STony Krowiak 713*45c9b47cSTony Krowiak kvm_s390_set_crycb_format(kvm); 7145102ee87STony Krowiak 715a374e892STony Krowiak /* Disable AES/DEA protected key functions by default */ 716a374e892STony Krowiak kvm->arch.crypto.aes_kw = 0; 717a374e892STony Krowiak kvm->arch.crypto.dea_kw = 0; 718a374e892STony Krowiak 7195102ee87STony Krowiak return 0; 7205102ee87STony Krowiak } 7215102ee87STony Krowiak 722e08b9637SCarsten Otte int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 723b0c632dbSHeiko Carstens { 724b0c632dbSHeiko Carstens int rc; 725b0c632dbSHeiko Carstens char debug_name[16]; 726f6c137ffSChristian Borntraeger static unsigned long sca_offset; 727b0c632dbSHeiko Carstens 728e08b9637SCarsten Otte rc = -EINVAL; 729e08b9637SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 730e08b9637SCarsten Otte if (type & ~KVM_VM_S390_UCONTROL) 731e08b9637SCarsten Otte goto out_err; 732e08b9637SCarsten Otte if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN))) 733e08b9637SCarsten Otte goto out_err; 734e08b9637SCarsten Otte #else 735e08b9637SCarsten Otte if (type) 736e08b9637SCarsten Otte goto out_err; 737e08b9637SCarsten Otte #endif 738e08b9637SCarsten Otte 739b0c632dbSHeiko Carstens rc = s390_enable_sie(); 740b0c632dbSHeiko Carstens if (rc) 741d89f5effSJan Kiszka goto out_err; 742b0c632dbSHeiko Carstens 743b290411aSCarsten Otte rc = -ENOMEM; 744b290411aSCarsten Otte 745b0c632dbSHeiko Carstens kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL); 746b0c632dbSHeiko Carstens if (!kvm->arch.sca) 747d89f5effSJan Kiszka goto out_err; 748f6c137ffSChristian Borntraeger spin_lock(&kvm_lock); 749f6c137ffSChristian Borntraeger sca_offset = (sca_offset + 16) & 0x7f0; 750f6c137ffSChristian Borntraeger kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset); 751f6c137ffSChristian Borntraeger spin_unlock(&kvm_lock); 752b0c632dbSHeiko Carstens 753b0c632dbSHeiko Carstens sprintf(debug_name, "kvm-%u", current->pid); 754b0c632dbSHeiko Carstens 755b0c632dbSHeiko Carstens kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 756b0c632dbSHeiko Carstens if (!kvm->arch.dbf) 757b0c632dbSHeiko Carstens goto out_nodbf; 758b0c632dbSHeiko Carstens 7595102ee87STony Krowiak if (kvm_s390_crypto_init(kvm) < 0) 7605102ee87STony Krowiak goto out_crypto; 7615102ee87STony Krowiak 762ba5c1e9bSCarsten Otte spin_lock_init(&kvm->arch.float_int.lock); 763ba5c1e9bSCarsten Otte INIT_LIST_HEAD(&kvm->arch.float_int.list); 7648a242234SHeiko Carstens init_waitqueue_head(&kvm->arch.ipte_wq); 765a6b7e459SThomas Huth mutex_init(&kvm->arch.ipte_mutex); 766ba5c1e9bSCarsten Otte 767b0c632dbSHeiko Carstens debug_register_view(kvm->arch.dbf, &debug_sprintf_view); 768b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "%s", "vm created"); 769b0c632dbSHeiko Carstens 770e08b9637SCarsten Otte if (type & KVM_VM_S390_UCONTROL) { 771e08b9637SCarsten Otte kvm->arch.gmap = NULL; 772e08b9637SCarsten Otte } else { 7730349985aSChristian Borntraeger kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 774598841caSCarsten Otte if (!kvm->arch.gmap) 775598841caSCarsten Otte goto out_nogmap; 7762c70fe44SChristian Borntraeger kvm->arch.gmap->private = kvm; 77724eb3a82SDominik Dingel kvm->arch.gmap->pfault_enabled = 0; 778e08b9637SCarsten Otte } 779fa6b7fe9SCornelia Huck 780fa6b7fe9SCornelia Huck kvm->arch.css_support = 0; 78184223598SCornelia Huck kvm->arch.use_irqchip = 0; 78272f25020SJason J. Herne kvm->arch.epoch = 0; 783fa6b7fe9SCornelia Huck 7848ad35755SDavid Hildenbrand spin_lock_init(&kvm->arch.start_stop_lock); 7858ad35755SDavid Hildenbrand 786d89f5effSJan Kiszka return 0; 787598841caSCarsten Otte out_nogmap: 7885102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 7895102ee87STony Krowiak out_crypto: 790598841caSCarsten Otte debug_unregister(kvm->arch.dbf); 791b0c632dbSHeiko Carstens out_nodbf: 792b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 793d89f5effSJan Kiszka out_err: 794d89f5effSJan Kiszka return rc; 795b0c632dbSHeiko Carstens } 796b0c632dbSHeiko Carstens 797d329c035SChristian Borntraeger void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 798d329c035SChristian Borntraeger { 799d329c035SChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "free cpu"); 800ade38c31SCornelia Huck trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); 80167335e63SChristian Borntraeger kvm_s390_clear_local_irqs(vcpu); 8023c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 80358f9460bSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 80458f9460bSCarsten Otte clear_bit(63 - vcpu->vcpu_id, 80558f9460bSCarsten Otte (unsigned long *) &vcpu->kvm->arch.sca->mcn); 806abf4a71eSCarsten Otte if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda == 807abf4a71eSCarsten Otte (__u64) vcpu->arch.sie_block) 808abf4a71eSCarsten Otte vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0; 80958f9460bSCarsten Otte } 810abf4a71eSCarsten Otte smp_mb(); 81127e0393fSCarsten Otte 81227e0393fSCarsten Otte if (kvm_is_ucontrol(vcpu->kvm)) 81327e0393fSCarsten Otte gmap_free(vcpu->arch.gmap); 81427e0393fSCarsten Otte 815b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) 816b31605c1SDominik Dingel kvm_s390_vcpu_unsetup_cmma(vcpu); 817d329c035SChristian Borntraeger free_page((unsigned long)(vcpu->arch.sie_block)); 818b31288faSKonstantin Weitz 8196692cef3SChristian Borntraeger kvm_vcpu_uninit(vcpu); 820b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 821d329c035SChristian Borntraeger } 822d329c035SChristian Borntraeger 823d329c035SChristian Borntraeger static void kvm_free_vcpus(struct kvm *kvm) 824d329c035SChristian Borntraeger { 825d329c035SChristian Borntraeger unsigned int i; 826988a2caeSGleb Natapov struct kvm_vcpu *vcpu; 827d329c035SChristian Borntraeger 828988a2caeSGleb Natapov kvm_for_each_vcpu(i, vcpu, kvm) 829988a2caeSGleb Natapov kvm_arch_vcpu_destroy(vcpu); 830988a2caeSGleb Natapov 831988a2caeSGleb Natapov mutex_lock(&kvm->lock); 832988a2caeSGleb Natapov for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 833d329c035SChristian Borntraeger kvm->vcpus[i] = NULL; 834988a2caeSGleb Natapov 835988a2caeSGleb Natapov atomic_set(&kvm->online_vcpus, 0); 836988a2caeSGleb Natapov mutex_unlock(&kvm->lock); 837d329c035SChristian Borntraeger } 838d329c035SChristian Borntraeger 839b0c632dbSHeiko Carstens void kvm_arch_destroy_vm(struct kvm *kvm) 840b0c632dbSHeiko Carstens { 841d329c035SChristian Borntraeger kvm_free_vcpus(kvm); 842b0c632dbSHeiko Carstens free_page((unsigned long)(kvm->arch.sca)); 843d329c035SChristian Borntraeger debug_unregister(kvm->arch.dbf); 8445102ee87STony Krowiak kfree(kvm->arch.crypto.crycb); 84527e0393fSCarsten Otte if (!kvm_is_ucontrol(kvm)) 846598841caSCarsten Otte gmap_free(kvm->arch.gmap); 847841b91c5SCornelia Huck kvm_s390_destroy_adapters(kvm); 84867335e63SChristian Borntraeger kvm_s390_clear_float_irqs(kvm); 849b0c632dbSHeiko Carstens } 850b0c632dbSHeiko Carstens 851b0c632dbSHeiko Carstens /* Section: vcpu related */ 852dafd032aSDominik Dingel static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu) 853b0c632dbSHeiko Carstens { 854c6c956b8SMartin Schwidefsky vcpu->arch.gmap = gmap_alloc(current->mm, -1UL); 85527e0393fSCarsten Otte if (!vcpu->arch.gmap) 85627e0393fSCarsten Otte return -ENOMEM; 8572c70fe44SChristian Borntraeger vcpu->arch.gmap->private = vcpu->kvm; 858dafd032aSDominik Dingel 85927e0393fSCarsten Otte return 0; 86027e0393fSCarsten Otte } 86127e0393fSCarsten Otte 862dafd032aSDominik Dingel int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 863dafd032aSDominik Dingel { 864dafd032aSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 865dafd032aSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 86659674c1aSChristian Borntraeger vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | 86759674c1aSChristian Borntraeger KVM_SYNC_GPRS | 8689eed0735SChristian Borntraeger KVM_SYNC_ACRS | 869b028ee3eSDavid Hildenbrand KVM_SYNC_CRS | 870b028ee3eSDavid Hildenbrand KVM_SYNC_ARCH0 | 871b028ee3eSDavid Hildenbrand KVM_SYNC_PFAULT; 872dafd032aSDominik Dingel 873dafd032aSDominik Dingel if (kvm_is_ucontrol(vcpu->kvm)) 874dafd032aSDominik Dingel return __kvm_ucontrol_vcpu_init(vcpu); 875dafd032aSDominik Dingel 876b0c632dbSHeiko Carstens return 0; 877b0c632dbSHeiko Carstens } 878b0c632dbSHeiko Carstens 879b0c632dbSHeiko Carstens void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 880b0c632dbSHeiko Carstens { 8814725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 8824725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.host_fpregs.fprs); 883b0c632dbSHeiko Carstens save_access_regs(vcpu->arch.host_acrs); 8844725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 8854725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 88659674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 887480e5926SChristian Borntraeger gmap_enable(vcpu->arch.gmap); 8889e6dabefSCornelia Huck atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 889b0c632dbSHeiko Carstens } 890b0c632dbSHeiko Carstens 891b0c632dbSHeiko Carstens void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 892b0c632dbSHeiko Carstens { 8939e6dabefSCornelia Huck atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 894480e5926SChristian Borntraeger gmap_disable(vcpu->arch.gmap); 8954725c860SMartin Schwidefsky save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 8964725c860SMartin Schwidefsky save_fp_regs(vcpu->arch.guest_fpregs.fprs); 89759674c1aSChristian Borntraeger save_access_regs(vcpu->run->s.regs.acrs); 8984725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 8994725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.host_fpregs.fprs); 900b0c632dbSHeiko Carstens restore_access_regs(vcpu->arch.host_acrs); 901b0c632dbSHeiko Carstens } 902b0c632dbSHeiko Carstens 903b0c632dbSHeiko Carstens static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) 904b0c632dbSHeiko Carstens { 905b0c632dbSHeiko Carstens /* this equals initial cpu reset in pop, but we don't switch to ESA */ 906b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.mask = 0UL; 907b0c632dbSHeiko Carstens vcpu->arch.sie_block->gpsw.addr = 0UL; 9088d26cf7bSChristian Borntraeger kvm_s390_set_prefix(vcpu, 0); 909b0c632dbSHeiko Carstens vcpu->arch.sie_block->cputm = 0UL; 910b0c632dbSHeiko Carstens vcpu->arch.sie_block->ckc = 0UL; 911b0c632dbSHeiko Carstens vcpu->arch.sie_block->todpr = 0; 912b0c632dbSHeiko Carstens memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); 913b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[0] = 0xE0UL; 914b0c632dbSHeiko Carstens vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; 915b0c632dbSHeiko Carstens vcpu->arch.guest_fpregs.fpc = 0; 916b0c632dbSHeiko Carstens asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); 917b0c632dbSHeiko Carstens vcpu->arch.sie_block->gbea = 1; 918672550fbSChristian Borntraeger vcpu->arch.sie_block->pp = 0; 9193c038e6bSDominik Dingel vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; 9203c038e6bSDominik Dingel kvm_clear_async_pf_completion_queue(vcpu); 9216352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) 9226852d7b6SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 9232ed10cc1SJens Freimann kvm_s390_clear_local_irqs(vcpu); 924b0c632dbSHeiko Carstens } 925b0c632dbSHeiko Carstens 92631928aa5SDominik Dingel void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 92742897d86SMarcelo Tosatti { 92872f25020SJason J. Herne mutex_lock(&vcpu->kvm->lock); 92972f25020SJason J. Herne vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; 93072f25020SJason J. Herne mutex_unlock(&vcpu->kvm->lock); 931dafd032aSDominik Dingel if (!kvm_is_ucontrol(vcpu->kvm)) 932dafd032aSDominik Dingel vcpu->arch.gmap = vcpu->kvm->arch.gmap; 93342897d86SMarcelo Tosatti } 93442897d86SMarcelo Tosatti 9355102ee87STony Krowiak static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 9365102ee87STony Krowiak { 9375102ee87STony Krowiak if (!test_vfacility(76)) 9385102ee87STony Krowiak return; 9395102ee87STony Krowiak 940a374e892STony Krowiak vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 941a374e892STony Krowiak 942a374e892STony Krowiak if (vcpu->kvm->arch.crypto.aes_kw) 943a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_AES; 944a374e892STony Krowiak if (vcpu->kvm->arch.crypto.dea_kw) 945a374e892STony Krowiak vcpu->arch.sie_block->ecb3 |= ECB3_DEA; 946a374e892STony Krowiak 9475102ee87STony Krowiak vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 9485102ee87STony Krowiak } 9495102ee87STony Krowiak 950b31605c1SDominik Dingel void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) 951b31605c1SDominik Dingel { 952b31605c1SDominik Dingel free_page(vcpu->arch.sie_block->cbrlo); 953b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = 0; 954b31605c1SDominik Dingel } 955b31605c1SDominik Dingel 956b31605c1SDominik Dingel int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu) 957b31605c1SDominik Dingel { 958b31605c1SDominik Dingel vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL); 959b31605c1SDominik Dingel if (!vcpu->arch.sie_block->cbrlo) 960b31605c1SDominik Dingel return -ENOMEM; 961b31605c1SDominik Dingel 962b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 |= 0x80; 963b31605c1SDominik Dingel vcpu->arch.sie_block->ecb2 &= ~0x08; 964b31605c1SDominik Dingel return 0; 965b31605c1SDominik Dingel } 966b31605c1SDominik Dingel 967b0c632dbSHeiko Carstens int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 968b0c632dbSHeiko Carstens { 969b31605c1SDominik Dingel int rc = 0; 970b31288faSKonstantin Weitz 9719e6dabefSCornelia Huck atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | 9729e6dabefSCornelia Huck CPUSTAT_SM | 97369d0d3a3SChristian Borntraeger CPUSTAT_STOPPED | 97469d0d3a3SChristian Borntraeger CPUSTAT_GED); 975fc34531dSChristian Borntraeger vcpu->arch.sie_block->ecb = 6; 9767feb6bb8SMichael Mueller if (test_vfacility(50) && test_vfacility(73)) 9777feb6bb8SMichael Mueller vcpu->arch.sie_block->ecb |= 0x10; 9787feb6bb8SMichael Mueller 97969d0d3a3SChristian Borntraeger vcpu->arch.sie_block->ecb2 = 8; 980ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca = 0xC1002000U; 981217a4406SHeiko Carstens if (sclp_has_siif()) 982217a4406SHeiko Carstens vcpu->arch.sie_block->eca |= 1; 983ea5f4969SDavid Hildenbrand if (sclp_has_sigpif()) 984ea5f4969SDavid Hildenbrand vcpu->arch.sie_block->eca |= 0x10000000U; 98578c4b59fSMichael Mueller vcpu->arch.sie_block->fac = (int) (long) vfacilities; 9865a5e6536SMatthew Rosato vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE | 9875a5e6536SMatthew Rosato ICTL_TPROT; 9885a5e6536SMatthew Rosato 989b31605c1SDominik Dingel if (kvm_s390_cmma_enabled(vcpu->kvm)) { 990b31605c1SDominik Dingel rc = kvm_s390_vcpu_setup_cmma(vcpu); 991b31605c1SDominik Dingel if (rc) 992b31605c1SDominik Dingel return rc; 993b31288faSKonstantin Weitz } 9940ac96cafSDavid Hildenbrand hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 995ca872302SChristian Borntraeger vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; 996453423dcSChristian Borntraeger get_cpu_id(&vcpu->arch.cpu_id); 99792e6ecf3SChristian Borntraeger vcpu->arch.cpu_id.version = 0xff; 9985102ee87STony Krowiak 9995102ee87STony Krowiak kvm_s390_vcpu_crypto_setup(vcpu); 10005102ee87STony Krowiak 1001b31605c1SDominik Dingel return rc; 1002b0c632dbSHeiko Carstens } 1003b0c632dbSHeiko Carstens 1004b0c632dbSHeiko Carstens struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, 1005b0c632dbSHeiko Carstens unsigned int id) 1006b0c632dbSHeiko Carstens { 10074d47555aSCarsten Otte struct kvm_vcpu *vcpu; 10087feb6bb8SMichael Mueller struct sie_page *sie_page; 10094d47555aSCarsten Otte int rc = -EINVAL; 1010b0c632dbSHeiko Carstens 10114d47555aSCarsten Otte if (id >= KVM_MAX_VCPUS) 10124d47555aSCarsten Otte goto out; 10134d47555aSCarsten Otte 10144d47555aSCarsten Otte rc = -ENOMEM; 10154d47555aSCarsten Otte 1016b110feafSMichael Mueller vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 1017b0c632dbSHeiko Carstens if (!vcpu) 10184d47555aSCarsten Otte goto out; 1019b0c632dbSHeiko Carstens 10207feb6bb8SMichael Mueller sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); 10217feb6bb8SMichael Mueller if (!sie_page) 1022b0c632dbSHeiko Carstens goto out_free_cpu; 1023b0c632dbSHeiko Carstens 10247feb6bb8SMichael Mueller vcpu->arch.sie_block = &sie_page->sie_block; 10257feb6bb8SMichael Mueller vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; 10267feb6bb8SMichael Mueller 1027b0c632dbSHeiko Carstens vcpu->arch.sie_block->icpua = id; 102858f9460bSCarsten Otte if (!kvm_is_ucontrol(kvm)) { 102958f9460bSCarsten Otte if (!kvm->arch.sca) { 103058f9460bSCarsten Otte WARN_ON_ONCE(1); 103158f9460bSCarsten Otte goto out_free_cpu; 103258f9460bSCarsten Otte } 1033abf4a71eSCarsten Otte if (!kvm->arch.sca->cpu[id].sda) 103458f9460bSCarsten Otte kvm->arch.sca->cpu[id].sda = 103558f9460bSCarsten Otte (__u64) vcpu->arch.sie_block; 103658f9460bSCarsten Otte vcpu->arch.sie_block->scaoh = 103758f9460bSCarsten Otte (__u32)(((__u64)kvm->arch.sca) >> 32); 1038b0c632dbSHeiko Carstens vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca; 1039fc34531dSChristian Borntraeger set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn); 104058f9460bSCarsten Otte } 1041b0c632dbSHeiko Carstens 1042ba5c1e9bSCarsten Otte spin_lock_init(&vcpu->arch.local_int.lock); 1043ba5c1e9bSCarsten Otte vcpu->arch.local_int.float_int = &kvm->arch.float_int; 1044d0321a24SChristian Borntraeger vcpu->arch.local_int.wq = &vcpu->wq; 10455288fbf0SChristian Borntraeger vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; 1046ba5c1e9bSCarsten Otte 1047b0c632dbSHeiko Carstens rc = kvm_vcpu_init(vcpu, kvm, id); 1048b0c632dbSHeiko Carstens if (rc) 10497b06bf2fSWei Yongjun goto out_free_sie_block; 1050b0c632dbSHeiko Carstens VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, 1051b0c632dbSHeiko Carstens vcpu->arch.sie_block); 1052ade38c31SCornelia Huck trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); 1053b0c632dbSHeiko Carstens 1054b0c632dbSHeiko Carstens return vcpu; 10557b06bf2fSWei Yongjun out_free_sie_block: 10567b06bf2fSWei Yongjun free_page((unsigned long)(vcpu->arch.sie_block)); 1057b0c632dbSHeiko Carstens out_free_cpu: 1058b110feafSMichael Mueller kmem_cache_free(kvm_vcpu_cache, vcpu); 10594d47555aSCarsten Otte out: 1060b0c632dbSHeiko Carstens return ERR_PTR(rc); 1061b0c632dbSHeiko Carstens } 1062b0c632dbSHeiko Carstens 1063b0c632dbSHeiko Carstens int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 1064b0c632dbSHeiko Carstens { 10659a022067SDavid Hildenbrand return kvm_s390_vcpu_has_irq(vcpu, 0); 1066b0c632dbSHeiko Carstens } 1067b0c632dbSHeiko Carstens 106849b99e1eSChristian Borntraeger void s390_vcpu_block(struct kvm_vcpu *vcpu) 106949b99e1eSChristian Borntraeger { 107049b99e1eSChristian Borntraeger atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 107149b99e1eSChristian Borntraeger } 107249b99e1eSChristian Borntraeger 107349b99e1eSChristian Borntraeger void s390_vcpu_unblock(struct kvm_vcpu *vcpu) 107449b99e1eSChristian Borntraeger { 107549b99e1eSChristian Borntraeger atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); 107649b99e1eSChristian Borntraeger } 107749b99e1eSChristian Borntraeger 107849b99e1eSChristian Borntraeger /* 107949b99e1eSChristian Borntraeger * Kick a guest cpu out of SIE and wait until SIE is not running. 108049b99e1eSChristian Borntraeger * If the CPU is not running (e.g. waiting as idle) the function will 108149b99e1eSChristian Borntraeger * return immediately. */ 108249b99e1eSChristian Borntraeger void exit_sie(struct kvm_vcpu *vcpu) 108349b99e1eSChristian Borntraeger { 108449b99e1eSChristian Borntraeger atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); 108549b99e1eSChristian Borntraeger while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) 108649b99e1eSChristian Borntraeger cpu_relax(); 108749b99e1eSChristian Borntraeger } 108849b99e1eSChristian Borntraeger 108949b99e1eSChristian Borntraeger /* Kick a guest cpu out of SIE and prevent SIE-reentry */ 109049b99e1eSChristian Borntraeger void exit_sie_sync(struct kvm_vcpu *vcpu) 109149b99e1eSChristian Borntraeger { 109249b99e1eSChristian Borntraeger s390_vcpu_block(vcpu); 109349b99e1eSChristian Borntraeger exit_sie(vcpu); 109449b99e1eSChristian Borntraeger } 109549b99e1eSChristian Borntraeger 10962c70fe44SChristian Borntraeger static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address) 10972c70fe44SChristian Borntraeger { 10982c70fe44SChristian Borntraeger int i; 10992c70fe44SChristian Borntraeger struct kvm *kvm = gmap->private; 11002c70fe44SChristian Borntraeger struct kvm_vcpu *vcpu; 11012c70fe44SChristian Borntraeger 11022c70fe44SChristian Borntraeger kvm_for_each_vcpu(i, vcpu, kvm) { 11032c70fe44SChristian Borntraeger /* match against both prefix pages */ 1104fda902cbSMichael Mueller if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) { 11052c70fe44SChristian Borntraeger VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address); 11062c70fe44SChristian Borntraeger kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 11072c70fe44SChristian Borntraeger exit_sie_sync(vcpu); 11082c70fe44SChristian Borntraeger } 11092c70fe44SChristian Borntraeger } 11102c70fe44SChristian Borntraeger } 11112c70fe44SChristian Borntraeger 1112b6d33834SChristoffer Dall int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 1113b6d33834SChristoffer Dall { 1114b6d33834SChristoffer Dall /* kvm common code refers to this, but never calls it */ 1115b6d33834SChristoffer Dall BUG(); 1116b6d33834SChristoffer Dall return 0; 1117b6d33834SChristoffer Dall } 1118b6d33834SChristoffer Dall 111914eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, 112014eebd91SCarsten Otte struct kvm_one_reg *reg) 112114eebd91SCarsten Otte { 112214eebd91SCarsten Otte int r = -EINVAL; 112314eebd91SCarsten Otte 112414eebd91SCarsten Otte switch (reg->id) { 112529b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 112629b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->todpr, 112729b7c71bSCarsten Otte (u32 __user *)reg->addr); 112829b7c71bSCarsten Otte break; 112929b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 113029b7c71bSCarsten Otte r = put_user(vcpu->arch.sie_block->epoch, 113129b7c71bSCarsten Otte (u64 __user *)reg->addr); 113229b7c71bSCarsten Otte break; 113346a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 113446a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->cputm, 113546a6dd1cSJason J. herne (u64 __user *)reg->addr); 113646a6dd1cSJason J. herne break; 113746a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 113846a6dd1cSJason J. herne r = put_user(vcpu->arch.sie_block->ckc, 113946a6dd1cSJason J. herne (u64 __user *)reg->addr); 114046a6dd1cSJason J. herne break; 1141536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1142536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_token, 1143536336c2SDominik Dingel (u64 __user *)reg->addr); 1144536336c2SDominik Dingel break; 1145536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1146536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_compare, 1147536336c2SDominik Dingel (u64 __user *)reg->addr); 1148536336c2SDominik Dingel break; 1149536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1150536336c2SDominik Dingel r = put_user(vcpu->arch.pfault_select, 1151536336c2SDominik Dingel (u64 __user *)reg->addr); 1152536336c2SDominik Dingel break; 1153672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1154672550fbSChristian Borntraeger r = put_user(vcpu->arch.sie_block->pp, 1155672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1156672550fbSChristian Borntraeger break; 1157afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1158afa45ff5SChristian Borntraeger r = put_user(vcpu->arch.sie_block->gbea, 1159afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1160afa45ff5SChristian Borntraeger break; 116114eebd91SCarsten Otte default: 116214eebd91SCarsten Otte break; 116314eebd91SCarsten Otte } 116414eebd91SCarsten Otte 116514eebd91SCarsten Otte return r; 116614eebd91SCarsten Otte } 116714eebd91SCarsten Otte 116814eebd91SCarsten Otte static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, 116914eebd91SCarsten Otte struct kvm_one_reg *reg) 117014eebd91SCarsten Otte { 117114eebd91SCarsten Otte int r = -EINVAL; 117214eebd91SCarsten Otte 117314eebd91SCarsten Otte switch (reg->id) { 117429b7c71bSCarsten Otte case KVM_REG_S390_TODPR: 117529b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->todpr, 117629b7c71bSCarsten Otte (u32 __user *)reg->addr); 117729b7c71bSCarsten Otte break; 117829b7c71bSCarsten Otte case KVM_REG_S390_EPOCHDIFF: 117929b7c71bSCarsten Otte r = get_user(vcpu->arch.sie_block->epoch, 118029b7c71bSCarsten Otte (u64 __user *)reg->addr); 118129b7c71bSCarsten Otte break; 118246a6dd1cSJason J. herne case KVM_REG_S390_CPU_TIMER: 118346a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->cputm, 118446a6dd1cSJason J. herne (u64 __user *)reg->addr); 118546a6dd1cSJason J. herne break; 118646a6dd1cSJason J. herne case KVM_REG_S390_CLOCK_COMP: 118746a6dd1cSJason J. herne r = get_user(vcpu->arch.sie_block->ckc, 118846a6dd1cSJason J. herne (u64 __user *)reg->addr); 118946a6dd1cSJason J. herne break; 1190536336c2SDominik Dingel case KVM_REG_S390_PFTOKEN: 1191536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_token, 1192536336c2SDominik Dingel (u64 __user *)reg->addr); 11939fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 11949fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1195536336c2SDominik Dingel break; 1196536336c2SDominik Dingel case KVM_REG_S390_PFCOMPARE: 1197536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_compare, 1198536336c2SDominik Dingel (u64 __user *)reg->addr); 1199536336c2SDominik Dingel break; 1200536336c2SDominik Dingel case KVM_REG_S390_PFSELECT: 1201536336c2SDominik Dingel r = get_user(vcpu->arch.pfault_select, 1202536336c2SDominik Dingel (u64 __user *)reg->addr); 1203536336c2SDominik Dingel break; 1204672550fbSChristian Borntraeger case KVM_REG_S390_PP: 1205672550fbSChristian Borntraeger r = get_user(vcpu->arch.sie_block->pp, 1206672550fbSChristian Borntraeger (u64 __user *)reg->addr); 1207672550fbSChristian Borntraeger break; 1208afa45ff5SChristian Borntraeger case KVM_REG_S390_GBEA: 1209afa45ff5SChristian Borntraeger r = get_user(vcpu->arch.sie_block->gbea, 1210afa45ff5SChristian Borntraeger (u64 __user *)reg->addr); 1211afa45ff5SChristian Borntraeger break; 121214eebd91SCarsten Otte default: 121314eebd91SCarsten Otte break; 121414eebd91SCarsten Otte } 121514eebd91SCarsten Otte 121614eebd91SCarsten Otte return r; 121714eebd91SCarsten Otte } 1218b6d33834SChristoffer Dall 1219b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 1220b0c632dbSHeiko Carstens { 1221b0c632dbSHeiko Carstens kvm_s390_vcpu_initial_reset(vcpu); 1222b0c632dbSHeiko Carstens return 0; 1223b0c632dbSHeiko Carstens } 1224b0c632dbSHeiko Carstens 1225b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1226b0c632dbSHeiko Carstens { 12275a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); 1228b0c632dbSHeiko Carstens return 0; 1229b0c632dbSHeiko Carstens } 1230b0c632dbSHeiko Carstens 1231b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1232b0c632dbSHeiko Carstens { 12335a32c1afSChristian Borntraeger memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); 1234b0c632dbSHeiko Carstens return 0; 1235b0c632dbSHeiko Carstens } 1236b0c632dbSHeiko Carstens 1237b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1238b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1239b0c632dbSHeiko Carstens { 124059674c1aSChristian Borntraeger memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); 1241b0c632dbSHeiko Carstens memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); 124259674c1aSChristian Borntraeger restore_access_regs(vcpu->run->s.regs.acrs); 1243b0c632dbSHeiko Carstens return 0; 1244b0c632dbSHeiko Carstens } 1245b0c632dbSHeiko Carstens 1246b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1247b0c632dbSHeiko Carstens struct kvm_sregs *sregs) 1248b0c632dbSHeiko Carstens { 124959674c1aSChristian Borntraeger memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); 1250b0c632dbSHeiko Carstens memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); 1251b0c632dbSHeiko Carstens return 0; 1252b0c632dbSHeiko Carstens } 1253b0c632dbSHeiko Carstens 1254b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1255b0c632dbSHeiko Carstens { 12564725c860SMartin Schwidefsky if (test_fp_ctl(fpu->fpc)) 12574725c860SMartin Schwidefsky return -EINVAL; 1258b0c632dbSHeiko Carstens memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); 12594725c860SMartin Schwidefsky vcpu->arch.guest_fpregs.fpc = fpu->fpc; 12604725c860SMartin Schwidefsky restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 12614725c860SMartin Schwidefsky restore_fp_regs(vcpu->arch.guest_fpregs.fprs); 1262b0c632dbSHeiko Carstens return 0; 1263b0c632dbSHeiko Carstens } 1264b0c632dbSHeiko Carstens 1265b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1266b0c632dbSHeiko Carstens { 1267b0c632dbSHeiko Carstens memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); 1268b0c632dbSHeiko Carstens fpu->fpc = vcpu->arch.guest_fpregs.fpc; 1269b0c632dbSHeiko Carstens return 0; 1270b0c632dbSHeiko Carstens } 1271b0c632dbSHeiko Carstens 1272b0c632dbSHeiko Carstens static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) 1273b0c632dbSHeiko Carstens { 1274b0c632dbSHeiko Carstens int rc = 0; 1275b0c632dbSHeiko Carstens 12767a42fdc2SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 1277b0c632dbSHeiko Carstens rc = -EBUSY; 1278d7b0b5ebSCarsten Otte else { 1279d7b0b5ebSCarsten Otte vcpu->run->psw_mask = psw.mask; 1280d7b0b5ebSCarsten Otte vcpu->run->psw_addr = psw.addr; 1281d7b0b5ebSCarsten Otte } 1282b0c632dbSHeiko Carstens return rc; 1283b0c632dbSHeiko Carstens } 1284b0c632dbSHeiko Carstens 1285b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1286b0c632dbSHeiko Carstens struct kvm_translation *tr) 1287b0c632dbSHeiko Carstens { 1288b0c632dbSHeiko Carstens return -EINVAL; /* not implemented yet */ 1289b0c632dbSHeiko Carstens } 1290b0c632dbSHeiko Carstens 129127291e21SDavid Hildenbrand #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \ 129227291e21SDavid Hildenbrand KVM_GUESTDBG_USE_HW_BP | \ 129327291e21SDavid Hildenbrand KVM_GUESTDBG_ENABLE) 129427291e21SDavid Hildenbrand 1295d0bfb940SJan Kiszka int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1296d0bfb940SJan Kiszka struct kvm_guest_debug *dbg) 1297b0c632dbSHeiko Carstens { 129827291e21SDavid Hildenbrand int rc = 0; 129927291e21SDavid Hildenbrand 130027291e21SDavid Hildenbrand vcpu->guest_debug = 0; 130127291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 130227291e21SDavid Hildenbrand 13032de3bfc2SDavid Hildenbrand if (dbg->control & ~VALID_GUESTDBG_FLAGS) 130427291e21SDavid Hildenbrand return -EINVAL; 130527291e21SDavid Hildenbrand 130627291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_ENABLE) { 130727291e21SDavid Hildenbrand vcpu->guest_debug = dbg->control; 130827291e21SDavid Hildenbrand /* enforce guest PER */ 130927291e21SDavid Hildenbrand atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 131027291e21SDavid Hildenbrand 131127291e21SDavid Hildenbrand if (dbg->control & KVM_GUESTDBG_USE_HW_BP) 131227291e21SDavid Hildenbrand rc = kvm_s390_import_bp_data(vcpu, dbg); 131327291e21SDavid Hildenbrand } else { 131427291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 131527291e21SDavid Hildenbrand vcpu->arch.guestdbg.last_bp = 0; 131627291e21SDavid Hildenbrand } 131727291e21SDavid Hildenbrand 131827291e21SDavid Hildenbrand if (rc) { 131927291e21SDavid Hildenbrand vcpu->guest_debug = 0; 132027291e21SDavid Hildenbrand kvm_s390_clear_bp_data(vcpu); 132127291e21SDavid Hildenbrand atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags); 132227291e21SDavid Hildenbrand } 132327291e21SDavid Hildenbrand 132427291e21SDavid Hildenbrand return rc; 1325b0c632dbSHeiko Carstens } 1326b0c632dbSHeiko Carstens 132762d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 132862d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 132962d9f0dbSMarcelo Tosatti { 13306352e4d2SDavid Hildenbrand /* CHECK_STOP and LOAD are not supported yet */ 13316352e4d2SDavid Hildenbrand return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : 13326352e4d2SDavid Hildenbrand KVM_MP_STATE_OPERATING; 133362d9f0dbSMarcelo Tosatti } 133462d9f0dbSMarcelo Tosatti 133562d9f0dbSMarcelo Tosatti int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 133662d9f0dbSMarcelo Tosatti struct kvm_mp_state *mp_state) 133762d9f0dbSMarcelo Tosatti { 13386352e4d2SDavid Hildenbrand int rc = 0; 13396352e4d2SDavid Hildenbrand 13406352e4d2SDavid Hildenbrand /* user space knows about this interface - let it control the state */ 13416352e4d2SDavid Hildenbrand vcpu->kvm->arch.user_cpu_state_ctrl = 1; 13426352e4d2SDavid Hildenbrand 13436352e4d2SDavid Hildenbrand switch (mp_state->mp_state) { 13446352e4d2SDavid Hildenbrand case KVM_MP_STATE_STOPPED: 13456352e4d2SDavid Hildenbrand kvm_s390_vcpu_stop(vcpu); 13466352e4d2SDavid Hildenbrand break; 13476352e4d2SDavid Hildenbrand case KVM_MP_STATE_OPERATING: 13486352e4d2SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 13496352e4d2SDavid Hildenbrand break; 13506352e4d2SDavid Hildenbrand case KVM_MP_STATE_LOAD: 13516352e4d2SDavid Hildenbrand case KVM_MP_STATE_CHECK_STOP: 13526352e4d2SDavid Hildenbrand /* fall through - CHECK_STOP and LOAD are not supported yet */ 13536352e4d2SDavid Hildenbrand default: 13546352e4d2SDavid Hildenbrand rc = -ENXIO; 13556352e4d2SDavid Hildenbrand } 13566352e4d2SDavid Hildenbrand 13576352e4d2SDavid Hildenbrand return rc; 135862d9f0dbSMarcelo Tosatti } 135962d9f0dbSMarcelo Tosatti 1360b31605c1SDominik Dingel bool kvm_s390_cmma_enabled(struct kvm *kvm) 1361b31605c1SDominik Dingel { 1362b31605c1SDominik Dingel if (!MACHINE_IS_LPAR) 1363b31605c1SDominik Dingel return false; 1364b31605c1SDominik Dingel /* only enable for z10 and later */ 1365b31605c1SDominik Dingel if (!MACHINE_HAS_EDAT1) 1366b31605c1SDominik Dingel return false; 1367b31605c1SDominik Dingel if (!kvm->arch.use_cmma) 1368b31605c1SDominik Dingel return false; 1369b31605c1SDominik Dingel return true; 1370b31605c1SDominik Dingel } 1371b31605c1SDominik Dingel 13728ad35755SDavid Hildenbrand static bool ibs_enabled(struct kvm_vcpu *vcpu) 13738ad35755SDavid Hildenbrand { 13748ad35755SDavid Hildenbrand return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS; 13758ad35755SDavid Hildenbrand } 13768ad35755SDavid Hildenbrand 13772c70fe44SChristian Borntraeger static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 13782c70fe44SChristian Borntraeger { 13798ad35755SDavid Hildenbrand retry: 13808ad35755SDavid Hildenbrand s390_vcpu_unblock(vcpu); 13812c70fe44SChristian Borntraeger /* 13822c70fe44SChristian Borntraeger * We use MMU_RELOAD just to re-arm the ipte notifier for the 13832c70fe44SChristian Borntraeger * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 13842c70fe44SChristian Borntraeger * This ensures that the ipte instruction for this request has 13852c70fe44SChristian Borntraeger * already finished. We might race against a second unmapper that 13862c70fe44SChristian Borntraeger * wants to set the blocking bit. Lets just retry the request loop. 13872c70fe44SChristian Borntraeger */ 13888ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) { 13892c70fe44SChristian Borntraeger int rc; 13902c70fe44SChristian Borntraeger rc = gmap_ipte_notify(vcpu->arch.gmap, 1391fda902cbSMichael Mueller kvm_s390_get_prefix(vcpu), 13922c70fe44SChristian Borntraeger PAGE_SIZE * 2); 13932c70fe44SChristian Borntraeger if (rc) 13942c70fe44SChristian Borntraeger return rc; 13958ad35755SDavid Hildenbrand goto retry; 13962c70fe44SChristian Borntraeger } 13978ad35755SDavid Hildenbrand 1398d3d692c8SDavid Hildenbrand if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { 1399d3d692c8SDavid Hildenbrand vcpu->arch.sie_block->ihcpu = 0xffff; 1400d3d692c8SDavid Hildenbrand goto retry; 1401d3d692c8SDavid Hildenbrand } 1402d3d692c8SDavid Hildenbrand 14038ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) { 14048ad35755SDavid Hildenbrand if (!ibs_enabled(vcpu)) { 14058ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); 14068ad35755SDavid Hildenbrand atomic_set_mask(CPUSTAT_IBS, 14078ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 14088ad35755SDavid Hildenbrand } 14098ad35755SDavid Hildenbrand goto retry; 14108ad35755SDavid Hildenbrand } 14118ad35755SDavid Hildenbrand 14128ad35755SDavid Hildenbrand if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) { 14138ad35755SDavid Hildenbrand if (ibs_enabled(vcpu)) { 14148ad35755SDavid Hildenbrand trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); 14158ad35755SDavid Hildenbrand atomic_clear_mask(CPUSTAT_IBS, 14168ad35755SDavid Hildenbrand &vcpu->arch.sie_block->cpuflags); 14178ad35755SDavid Hildenbrand } 14188ad35755SDavid Hildenbrand goto retry; 14198ad35755SDavid Hildenbrand } 14208ad35755SDavid Hildenbrand 14210759d068SDavid Hildenbrand /* nothing to do, just clear the request */ 14220759d068SDavid Hildenbrand clear_bit(KVM_REQ_UNHALT, &vcpu->requests); 14230759d068SDavid Hildenbrand 14242c70fe44SChristian Borntraeger return 0; 14252c70fe44SChristian Borntraeger } 14262c70fe44SChristian Borntraeger 1427fa576c58SThomas Huth /** 1428fa576c58SThomas Huth * kvm_arch_fault_in_page - fault-in guest page if necessary 1429fa576c58SThomas Huth * @vcpu: The corresponding virtual cpu 1430fa576c58SThomas Huth * @gpa: Guest physical address 1431fa576c58SThomas Huth * @writable: Whether the page should be writable or not 1432fa576c58SThomas Huth * 1433fa576c58SThomas Huth * Make sure that a guest page has been faulted-in on the host. 1434fa576c58SThomas Huth * 1435fa576c58SThomas Huth * Return: Zero on success, negative error code otherwise. 1436fa576c58SThomas Huth */ 1437fa576c58SThomas Huth long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable) 143824eb3a82SDominik Dingel { 1439527e30b4SMartin Schwidefsky return gmap_fault(vcpu->arch.gmap, gpa, 1440527e30b4SMartin Schwidefsky writable ? FAULT_FLAG_WRITE : 0); 144124eb3a82SDominik Dingel } 144224eb3a82SDominik Dingel 14433c038e6bSDominik Dingel static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token, 14443c038e6bSDominik Dingel unsigned long token) 14453c038e6bSDominik Dingel { 14463c038e6bSDominik Dingel struct kvm_s390_interrupt inti; 1447383d0b05SJens Freimann struct kvm_s390_irq irq; 14483c038e6bSDominik Dingel 14493c038e6bSDominik Dingel if (start_token) { 1450383d0b05SJens Freimann irq.u.ext.ext_params2 = token; 1451383d0b05SJens Freimann irq.type = KVM_S390_INT_PFAULT_INIT; 1452383d0b05SJens Freimann WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq)); 14533c038e6bSDominik Dingel } else { 14543c038e6bSDominik Dingel inti.type = KVM_S390_INT_PFAULT_DONE; 1455383d0b05SJens Freimann inti.parm64 = token; 14563c038e6bSDominik Dingel WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); 14573c038e6bSDominik Dingel } 14583c038e6bSDominik Dingel } 14593c038e6bSDominik Dingel 14603c038e6bSDominik Dingel void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, 14613c038e6bSDominik Dingel struct kvm_async_pf *work) 14623c038e6bSDominik Dingel { 14633c038e6bSDominik Dingel trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); 14643c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); 14653c038e6bSDominik Dingel } 14663c038e6bSDominik Dingel 14673c038e6bSDominik Dingel void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 14683c038e6bSDominik Dingel struct kvm_async_pf *work) 14693c038e6bSDominik Dingel { 14703c038e6bSDominik Dingel trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); 14713c038e6bSDominik Dingel __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); 14723c038e6bSDominik Dingel } 14733c038e6bSDominik Dingel 14743c038e6bSDominik Dingel void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, 14753c038e6bSDominik Dingel struct kvm_async_pf *work) 14763c038e6bSDominik Dingel { 14773c038e6bSDominik Dingel /* s390 will always inject the page directly */ 14783c038e6bSDominik Dingel } 14793c038e6bSDominik Dingel 14803c038e6bSDominik Dingel bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) 14813c038e6bSDominik Dingel { 14823c038e6bSDominik Dingel /* 14833c038e6bSDominik Dingel * s390 will always inject the page directly, 14843c038e6bSDominik Dingel * but we still want check_async_completion to cleanup 14853c038e6bSDominik Dingel */ 14863c038e6bSDominik Dingel return true; 14873c038e6bSDominik Dingel } 14883c038e6bSDominik Dingel 14893c038e6bSDominik Dingel static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) 14903c038e6bSDominik Dingel { 14913c038e6bSDominik Dingel hva_t hva; 14923c038e6bSDominik Dingel struct kvm_arch_async_pf arch; 14933c038e6bSDominik Dingel int rc; 14943c038e6bSDominik Dingel 14953c038e6bSDominik Dingel if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 14963c038e6bSDominik Dingel return 0; 14973c038e6bSDominik Dingel if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != 14983c038e6bSDominik Dingel vcpu->arch.pfault_compare) 14993c038e6bSDominik Dingel return 0; 15003c038e6bSDominik Dingel if (psw_extint_disabled(vcpu)) 15013c038e6bSDominik Dingel return 0; 15029a022067SDavid Hildenbrand if (kvm_s390_vcpu_has_irq(vcpu, 0)) 15033c038e6bSDominik Dingel return 0; 15043c038e6bSDominik Dingel if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul)) 15053c038e6bSDominik Dingel return 0; 15063c038e6bSDominik Dingel if (!vcpu->arch.gmap->pfault_enabled) 15073c038e6bSDominik Dingel return 0; 15083c038e6bSDominik Dingel 150981480cc1SHeiko Carstens hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); 151081480cc1SHeiko Carstens hva += current->thread.gmap_addr & ~PAGE_MASK; 151181480cc1SHeiko Carstens if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) 15123c038e6bSDominik Dingel return 0; 15133c038e6bSDominik Dingel 15143c038e6bSDominik Dingel rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); 15153c038e6bSDominik Dingel return rc; 15163c038e6bSDominik Dingel } 15173c038e6bSDominik Dingel 15183fb4c40fSThomas Huth static int vcpu_pre_run(struct kvm_vcpu *vcpu) 1519b0c632dbSHeiko Carstens { 15203fb4c40fSThomas Huth int rc, cpuflags; 1521e168bf8dSCarsten Otte 15223c038e6bSDominik Dingel /* 15233c038e6bSDominik Dingel * On s390 notifications for arriving pages will be delivered directly 15243c038e6bSDominik Dingel * to the guest but the house keeping for completed pfaults is 15253c038e6bSDominik Dingel * handled outside the worker. 15263c038e6bSDominik Dingel */ 15273c038e6bSDominik Dingel kvm_check_async_pf_completion(vcpu); 15283c038e6bSDominik Dingel 15295a32c1afSChristian Borntraeger memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16); 1530b0c632dbSHeiko Carstens 1531b0c632dbSHeiko Carstens if (need_resched()) 1532b0c632dbSHeiko Carstens schedule(); 1533b0c632dbSHeiko Carstens 1534d3a73acbSMartin Schwidefsky if (test_cpu_flag(CIF_MCCK_PENDING)) 153571cde587SChristian Borntraeger s390_handle_mcck(); 153671cde587SChristian Borntraeger 153779395031SJens Freimann if (!kvm_is_ucontrol(vcpu->kvm)) { 153879395031SJens Freimann rc = kvm_s390_deliver_pending_interrupts(vcpu); 153979395031SJens Freimann if (rc) 154079395031SJens Freimann return rc; 154179395031SJens Freimann } 15420ff31867SCarsten Otte 15432c70fe44SChristian Borntraeger rc = kvm_s390_handle_requests(vcpu); 15442c70fe44SChristian Borntraeger if (rc) 15452c70fe44SChristian Borntraeger return rc; 15462c70fe44SChristian Borntraeger 154727291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) { 154827291e21SDavid Hildenbrand kvm_s390_backup_guest_per_regs(vcpu); 154927291e21SDavid Hildenbrand kvm_s390_patch_guest_per_regs(vcpu); 155027291e21SDavid Hildenbrand } 155127291e21SDavid Hildenbrand 1552b0c632dbSHeiko Carstens vcpu->arch.sie_block->icptcode = 0; 15533fb4c40fSThomas Huth cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); 15543fb4c40fSThomas Huth VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags); 15553fb4c40fSThomas Huth trace_kvm_s390_sie_enter(vcpu, cpuflags); 15562b29a9fdSDominik Dingel 15573fb4c40fSThomas Huth return 0; 15583fb4c40fSThomas Huth } 15593fb4c40fSThomas Huth 15603fb4c40fSThomas Huth static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason) 15613fb4c40fSThomas Huth { 156224eb3a82SDominik Dingel int rc = -1; 15632b29a9fdSDominik Dingel 15642b29a9fdSDominik Dingel VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", 15652b29a9fdSDominik Dingel vcpu->arch.sie_block->icptcode); 15662b29a9fdSDominik Dingel trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); 15672b29a9fdSDominik Dingel 156827291e21SDavid Hildenbrand if (guestdbg_enabled(vcpu)) 156927291e21SDavid Hildenbrand kvm_s390_restore_guest_per_regs(vcpu); 157027291e21SDavid Hildenbrand 15713fb4c40fSThomas Huth if (exit_reason >= 0) { 15727c470539SMartin Schwidefsky rc = 0; 1573210b1607SThomas Huth } else if (kvm_is_ucontrol(vcpu->kvm)) { 1574210b1607SThomas Huth vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; 1575210b1607SThomas Huth vcpu->run->s390_ucontrol.trans_exc_code = 1576210b1607SThomas Huth current->thread.gmap_addr; 1577210b1607SThomas Huth vcpu->run->s390_ucontrol.pgm_code = 0x10; 1578210b1607SThomas Huth rc = -EREMOTE; 157924eb3a82SDominik Dingel 158024eb3a82SDominik Dingel } else if (current->thread.gmap_pfault) { 15813c038e6bSDominik Dingel trace_kvm_s390_major_guest_pfault(vcpu); 158224eb3a82SDominik Dingel current->thread.gmap_pfault = 0; 1583fa576c58SThomas Huth if (kvm_arch_setup_async_pf(vcpu)) { 158424eb3a82SDominik Dingel rc = 0; 1585fa576c58SThomas Huth } else { 1586fa576c58SThomas Huth gpa_t gpa = current->thread.gmap_addr; 1587fa576c58SThomas Huth rc = kvm_arch_fault_in_page(vcpu, gpa, 1); 1588fa576c58SThomas Huth } 158924eb3a82SDominik Dingel } 159024eb3a82SDominik Dingel 159124eb3a82SDominik Dingel if (rc == -1) { 1592699bde3bSChristian Borntraeger VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); 1593699bde3bSChristian Borntraeger trace_kvm_s390_sie_fault(vcpu); 1594699bde3bSChristian Borntraeger rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 15951f0d0f09SCarsten Otte } 1596b0c632dbSHeiko Carstens 15975a32c1afSChristian Borntraeger memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 15983fb4c40fSThomas Huth 1599a76ccff6SThomas Huth if (rc == 0) { 1600a76ccff6SThomas Huth if (kvm_is_ucontrol(vcpu->kvm)) 16012955c83fSChristian Borntraeger /* Don't exit for host interrupts. */ 16022955c83fSChristian Borntraeger rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0; 1603a76ccff6SThomas Huth else 1604a76ccff6SThomas Huth rc = kvm_handle_sie_intercept(vcpu); 1605a76ccff6SThomas Huth } 1606a76ccff6SThomas Huth 16073fb4c40fSThomas Huth return rc; 16083fb4c40fSThomas Huth } 16093fb4c40fSThomas Huth 16103fb4c40fSThomas Huth static int __vcpu_run(struct kvm_vcpu *vcpu) 16113fb4c40fSThomas Huth { 16123fb4c40fSThomas Huth int rc, exit_reason; 16133fb4c40fSThomas Huth 1614800c1065SThomas Huth /* 1615800c1065SThomas Huth * We try to hold kvm->srcu during most of vcpu_run (except when run- 1616800c1065SThomas Huth * ning the guest), so that memslots (and other stuff) are protected 1617800c1065SThomas Huth */ 1618800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 1619800c1065SThomas Huth 1620a76ccff6SThomas Huth do { 16213fb4c40fSThomas Huth rc = vcpu_pre_run(vcpu); 16223fb4c40fSThomas Huth if (rc) 1623a76ccff6SThomas Huth break; 16243fb4c40fSThomas Huth 1625800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 16263fb4c40fSThomas Huth /* 1627a76ccff6SThomas Huth * As PF_VCPU will be used in fault handler, between 1628a76ccff6SThomas Huth * guest_enter and guest_exit should be no uaccess. 16293fb4c40fSThomas Huth */ 16303fb4c40fSThomas Huth preempt_disable(); 16313fb4c40fSThomas Huth kvm_guest_enter(); 16323fb4c40fSThomas Huth preempt_enable(); 1633a76ccff6SThomas Huth exit_reason = sie64a(vcpu->arch.sie_block, 1634a76ccff6SThomas Huth vcpu->run->s.regs.gprs); 16353fb4c40fSThomas Huth kvm_guest_exit(); 1636800c1065SThomas Huth vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 16373fb4c40fSThomas Huth 16383fb4c40fSThomas Huth rc = vcpu_post_run(vcpu, exit_reason); 163927291e21SDavid Hildenbrand } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc); 16403fb4c40fSThomas Huth 1641800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 1642e168bf8dSCarsten Otte return rc; 1643b0c632dbSHeiko Carstens } 1644b0c632dbSHeiko Carstens 1645b028ee3eSDavid Hildenbrand static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1646b028ee3eSDavid Hildenbrand { 1647b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; 1648b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; 1649b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) 1650b028ee3eSDavid Hildenbrand kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); 1651b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { 1652b028ee3eSDavid Hildenbrand memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); 1653d3d692c8SDavid Hildenbrand /* some control register changes require a tlb flush */ 1654d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1655b028ee3eSDavid Hildenbrand } 1656b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { 1657b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm; 1658b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; 1659b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; 1660b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; 1661b028ee3eSDavid Hildenbrand vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; 1662b028ee3eSDavid Hildenbrand } 1663b028ee3eSDavid Hildenbrand if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { 1664b028ee3eSDavid Hildenbrand vcpu->arch.pfault_token = kvm_run->s.regs.pft; 1665b028ee3eSDavid Hildenbrand vcpu->arch.pfault_select = kvm_run->s.regs.pfs; 1666b028ee3eSDavid Hildenbrand vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; 16679fbd8082SDavid Hildenbrand if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) 16689fbd8082SDavid Hildenbrand kvm_clear_async_pf_completion_queue(vcpu); 1669b028ee3eSDavid Hildenbrand } 1670b028ee3eSDavid Hildenbrand kvm_run->kvm_dirty_regs = 0; 1671b028ee3eSDavid Hildenbrand } 1672b028ee3eSDavid Hildenbrand 1673b028ee3eSDavid Hildenbrand static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1674b028ee3eSDavid Hildenbrand { 1675b028ee3eSDavid Hildenbrand kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; 1676b028ee3eSDavid Hildenbrand kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; 1677b028ee3eSDavid Hildenbrand kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); 1678b028ee3eSDavid Hildenbrand memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); 1679b028ee3eSDavid Hildenbrand kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm; 1680b028ee3eSDavid Hildenbrand kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; 1681b028ee3eSDavid Hildenbrand kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; 1682b028ee3eSDavid Hildenbrand kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; 1683b028ee3eSDavid Hildenbrand kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; 1684b028ee3eSDavid Hildenbrand kvm_run->s.regs.pft = vcpu->arch.pfault_token; 1685b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfs = vcpu->arch.pfault_select; 1686b028ee3eSDavid Hildenbrand kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; 1687b028ee3eSDavid Hildenbrand } 1688b028ee3eSDavid Hildenbrand 1689b0c632dbSHeiko Carstens int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1690b0c632dbSHeiko Carstens { 16918f2abe6aSChristian Borntraeger int rc; 1692b0c632dbSHeiko Carstens sigset_t sigsaved; 1693b0c632dbSHeiko Carstens 169427291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu)) { 169527291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 169627291e21SDavid Hildenbrand return 0; 169727291e21SDavid Hildenbrand } 169827291e21SDavid Hildenbrand 1699b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1700b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 1701b0c632dbSHeiko Carstens 17026352e4d2SDavid Hildenbrand if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { 17036852d7b6SDavid Hildenbrand kvm_s390_vcpu_start(vcpu); 17046352e4d2SDavid Hildenbrand } else if (is_vcpu_stopped(vcpu)) { 17056352e4d2SDavid Hildenbrand pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", 17066352e4d2SDavid Hildenbrand vcpu->vcpu_id); 17076352e4d2SDavid Hildenbrand return -EINVAL; 17086352e4d2SDavid Hildenbrand } 1709b0c632dbSHeiko Carstens 1710b028ee3eSDavid Hildenbrand sync_regs(vcpu, kvm_run); 1711d7b0b5ebSCarsten Otte 1712dab4079dSHeiko Carstens might_fault(); 1713e168bf8dSCarsten Otte rc = __vcpu_run(vcpu); 17149ace903dSChristian Ehrhardt 1715b1d16c49SChristian Ehrhardt if (signal_pending(current) && !rc) { 1716b1d16c49SChristian Ehrhardt kvm_run->exit_reason = KVM_EXIT_INTR; 17178f2abe6aSChristian Borntraeger rc = -EINTR; 1718b1d16c49SChristian Ehrhardt } 17198f2abe6aSChristian Borntraeger 172027291e21SDavid Hildenbrand if (guestdbg_exit_pending(vcpu) && !rc) { 172127291e21SDavid Hildenbrand kvm_s390_prepare_debug_exit(vcpu); 172227291e21SDavid Hildenbrand rc = 0; 172327291e21SDavid Hildenbrand } 172427291e21SDavid Hildenbrand 1725b8e660b8SHeiko Carstens if (rc == -EOPNOTSUPP) { 17268f2abe6aSChristian Borntraeger /* intercept cannot be handled in-kernel, prepare kvm-run */ 17278f2abe6aSChristian Borntraeger kvm_run->exit_reason = KVM_EXIT_S390_SIEIC; 17288f2abe6aSChristian Borntraeger kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; 17298f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; 17308f2abe6aSChristian Borntraeger kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; 17318f2abe6aSChristian Borntraeger rc = 0; 17328f2abe6aSChristian Borntraeger } 17338f2abe6aSChristian Borntraeger 17348f2abe6aSChristian Borntraeger if (rc == -EREMOTE) { 17358f2abe6aSChristian Borntraeger /* intercept was handled, but userspace support is needed 17368f2abe6aSChristian Borntraeger * kvm_run has been prepared by the handler */ 17378f2abe6aSChristian Borntraeger rc = 0; 17388f2abe6aSChristian Borntraeger } 17398f2abe6aSChristian Borntraeger 1740b028ee3eSDavid Hildenbrand store_regs(vcpu, kvm_run); 1741d7b0b5ebSCarsten Otte 1742b0c632dbSHeiko Carstens if (vcpu->sigset_active) 1743b0c632dbSHeiko Carstens sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1744b0c632dbSHeiko Carstens 1745b0c632dbSHeiko Carstens vcpu->stat.exit_userspace++; 17467e8e6ab4SHeiko Carstens return rc; 1747b0c632dbSHeiko Carstens } 1748b0c632dbSHeiko Carstens 1749b0c632dbSHeiko Carstens /* 1750b0c632dbSHeiko Carstens * store status at address 1751b0c632dbSHeiko Carstens * we use have two special cases: 1752b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit 1753b0c632dbSHeiko Carstens * KVM_S390_STORE_STATUS_PREFIXED: -> prefix 1754b0c632dbSHeiko Carstens */ 1755d0bce605SHeiko Carstens int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) 1756b0c632dbSHeiko Carstens { 1757092670cdSCarsten Otte unsigned char archmode = 1; 1758fda902cbSMichael Mueller unsigned int px; 1759178bd789SThomas Huth u64 clkcomp; 1760d0bce605SHeiko Carstens int rc; 1761b0c632dbSHeiko Carstens 1762d0bce605SHeiko Carstens if (gpa == KVM_S390_STORE_STATUS_NOADDR) { 1763d0bce605SHeiko Carstens if (write_guest_abs(vcpu, 163, &archmode, 1)) 1764b0c632dbSHeiko Carstens return -EFAULT; 1765d0bce605SHeiko Carstens gpa = SAVE_AREA_BASE; 1766d0bce605SHeiko Carstens } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) { 1767d0bce605SHeiko Carstens if (write_guest_real(vcpu, 163, &archmode, 1)) 1768b0c632dbSHeiko Carstens return -EFAULT; 1769d0bce605SHeiko Carstens gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE); 1770d0bce605SHeiko Carstens } 1771d0bce605SHeiko Carstens rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs), 1772d0bce605SHeiko Carstens vcpu->arch.guest_fpregs.fprs, 128); 1773d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs), 1774d0bce605SHeiko Carstens vcpu->run->s.regs.gprs, 128); 1775d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw), 1776d0bce605SHeiko Carstens &vcpu->arch.sie_block->gpsw, 16); 1777fda902cbSMichael Mueller px = kvm_s390_get_prefix(vcpu); 1778d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg), 1779fda902cbSMichael Mueller &px, 4); 1780d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, 1781d0bce605SHeiko Carstens gpa + offsetof(struct save_area, fp_ctrl_reg), 1782d0bce605SHeiko Carstens &vcpu->arch.guest_fpregs.fpc, 4); 1783d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg), 1784d0bce605SHeiko Carstens &vcpu->arch.sie_block->todpr, 4); 1785d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer), 1786d0bce605SHeiko Carstens &vcpu->arch.sie_block->cputm, 8); 1787178bd789SThomas Huth clkcomp = vcpu->arch.sie_block->ckc >> 8; 1788d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp), 1789d0bce605SHeiko Carstens &clkcomp, 8); 1790d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs), 1791d0bce605SHeiko Carstens &vcpu->run->s.regs.acrs, 64); 1792d0bce605SHeiko Carstens rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs), 1793d0bce605SHeiko Carstens &vcpu->arch.sie_block->gcr, 128); 1794d0bce605SHeiko Carstens return rc ? -EFAULT : 0; 1795b0c632dbSHeiko Carstens } 1796b0c632dbSHeiko Carstens 1797e879892cSThomas Huth int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) 1798e879892cSThomas Huth { 1799e879892cSThomas Huth /* 1800e879892cSThomas Huth * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy 1801e879892cSThomas Huth * copying in vcpu load/put. Lets update our copies before we save 1802e879892cSThomas Huth * it into the save area 1803e879892cSThomas Huth */ 1804e879892cSThomas Huth save_fp_ctl(&vcpu->arch.guest_fpregs.fpc); 1805e879892cSThomas Huth save_fp_regs(vcpu->arch.guest_fpregs.fprs); 1806e879892cSThomas Huth save_access_regs(vcpu->run->s.regs.acrs); 1807e879892cSThomas Huth 1808e879892cSThomas Huth return kvm_s390_store_status_unloaded(vcpu, addr); 1809e879892cSThomas Huth } 1810e879892cSThomas Huth 18118ad35755SDavid Hildenbrand static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 18128ad35755SDavid Hildenbrand { 18138ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); 18148ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu); 18158ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 18168ad35755SDavid Hildenbrand } 18178ad35755SDavid Hildenbrand 18188ad35755SDavid Hildenbrand static void __disable_ibs_on_all_vcpus(struct kvm *kvm) 18198ad35755SDavid Hildenbrand { 18208ad35755SDavid Hildenbrand unsigned int i; 18218ad35755SDavid Hildenbrand struct kvm_vcpu *vcpu; 18228ad35755SDavid Hildenbrand 18238ad35755SDavid Hildenbrand kvm_for_each_vcpu(i, vcpu, kvm) { 18248ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 18258ad35755SDavid Hildenbrand } 18268ad35755SDavid Hildenbrand } 18278ad35755SDavid Hildenbrand 18288ad35755SDavid Hildenbrand static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu) 18298ad35755SDavid Hildenbrand { 18308ad35755SDavid Hildenbrand kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu); 18318ad35755SDavid Hildenbrand kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu); 18328ad35755SDavid Hildenbrand exit_sie_sync(vcpu); 18338ad35755SDavid Hildenbrand } 18348ad35755SDavid Hildenbrand 18356852d7b6SDavid Hildenbrand void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) 18366852d7b6SDavid Hildenbrand { 18378ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 18388ad35755SDavid Hildenbrand 18398ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu)) 18408ad35755SDavid Hildenbrand return; 18418ad35755SDavid Hildenbrand 18426852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); 18438ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1844433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 18458ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 18468ad35755SDavid Hildenbrand 18478ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 18488ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) 18498ad35755SDavid Hildenbrand started_vcpus++; 18508ad35755SDavid Hildenbrand } 18518ad35755SDavid Hildenbrand 18528ad35755SDavid Hildenbrand if (started_vcpus == 0) { 18538ad35755SDavid Hildenbrand /* we're the only active VCPU -> speed it up */ 18548ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(vcpu); 18558ad35755SDavid Hildenbrand } else if (started_vcpus == 1) { 18568ad35755SDavid Hildenbrand /* 18578ad35755SDavid Hildenbrand * As we are starting a second VCPU, we have to disable 18588ad35755SDavid Hildenbrand * the IBS facility on all VCPUs to remove potentially 18598ad35755SDavid Hildenbrand * oustanding ENABLE requests. 18608ad35755SDavid Hildenbrand */ 18618ad35755SDavid Hildenbrand __disable_ibs_on_all_vcpus(vcpu->kvm); 18628ad35755SDavid Hildenbrand } 18638ad35755SDavid Hildenbrand 18646852d7b6SDavid Hildenbrand atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 18658ad35755SDavid Hildenbrand /* 18668ad35755SDavid Hildenbrand * Another VCPU might have used IBS while we were offline. 18678ad35755SDavid Hildenbrand * Let's play safe and flush the VCPU at startup. 18688ad35755SDavid Hildenbrand */ 1869d3d692c8SDavid Hildenbrand kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 1870433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 18718ad35755SDavid Hildenbrand return; 18726852d7b6SDavid Hildenbrand } 18736852d7b6SDavid Hildenbrand 18746852d7b6SDavid Hildenbrand void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) 18756852d7b6SDavid Hildenbrand { 18768ad35755SDavid Hildenbrand int i, online_vcpus, started_vcpus = 0; 18778ad35755SDavid Hildenbrand struct kvm_vcpu *started_vcpu = NULL; 18788ad35755SDavid Hildenbrand 18798ad35755SDavid Hildenbrand if (is_vcpu_stopped(vcpu)) 18808ad35755SDavid Hildenbrand return; 18818ad35755SDavid Hildenbrand 18826852d7b6SDavid Hildenbrand trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); 18838ad35755SDavid Hildenbrand /* Only one cpu at a time may enter/leave the STOPPED state. */ 1884433b9ee4SDavid Hildenbrand spin_lock(&vcpu->kvm->arch.start_stop_lock); 18858ad35755SDavid Hildenbrand online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); 18868ad35755SDavid Hildenbrand 188732f5ff63SDavid Hildenbrand /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ 18886cddd432SDavid Hildenbrand kvm_s390_clear_stop_irq(vcpu); 188932f5ff63SDavid Hildenbrand 18906cddd432SDavid Hildenbrand atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); 18918ad35755SDavid Hildenbrand __disable_ibs_on_vcpu(vcpu); 18928ad35755SDavid Hildenbrand 18938ad35755SDavid Hildenbrand for (i = 0; i < online_vcpus; i++) { 18948ad35755SDavid Hildenbrand if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) { 18958ad35755SDavid Hildenbrand started_vcpus++; 18968ad35755SDavid Hildenbrand started_vcpu = vcpu->kvm->vcpus[i]; 18978ad35755SDavid Hildenbrand } 18988ad35755SDavid Hildenbrand } 18998ad35755SDavid Hildenbrand 19008ad35755SDavid Hildenbrand if (started_vcpus == 1) { 19018ad35755SDavid Hildenbrand /* 19028ad35755SDavid Hildenbrand * As we only have one VCPU left, we want to enable the 19038ad35755SDavid Hildenbrand * IBS facility for that VCPU to speed it up. 19048ad35755SDavid Hildenbrand */ 19058ad35755SDavid Hildenbrand __enable_ibs_on_vcpu(started_vcpu); 19068ad35755SDavid Hildenbrand } 19078ad35755SDavid Hildenbrand 1908433b9ee4SDavid Hildenbrand spin_unlock(&vcpu->kvm->arch.start_stop_lock); 19098ad35755SDavid Hildenbrand return; 19106852d7b6SDavid Hildenbrand } 19116852d7b6SDavid Hildenbrand 1912d6712df9SCornelia Huck static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, 1913d6712df9SCornelia Huck struct kvm_enable_cap *cap) 1914d6712df9SCornelia Huck { 1915d6712df9SCornelia Huck int r; 1916d6712df9SCornelia Huck 1917d6712df9SCornelia Huck if (cap->flags) 1918d6712df9SCornelia Huck return -EINVAL; 1919d6712df9SCornelia Huck 1920d6712df9SCornelia Huck switch (cap->cap) { 1921fa6b7fe9SCornelia Huck case KVM_CAP_S390_CSS_SUPPORT: 1922fa6b7fe9SCornelia Huck if (!vcpu->kvm->arch.css_support) { 1923fa6b7fe9SCornelia Huck vcpu->kvm->arch.css_support = 1; 1924fa6b7fe9SCornelia Huck trace_kvm_s390_enable_css(vcpu->kvm); 1925fa6b7fe9SCornelia Huck } 1926fa6b7fe9SCornelia Huck r = 0; 1927fa6b7fe9SCornelia Huck break; 1928d6712df9SCornelia Huck default: 1929d6712df9SCornelia Huck r = -EINVAL; 1930d6712df9SCornelia Huck break; 1931d6712df9SCornelia Huck } 1932d6712df9SCornelia Huck return r; 1933d6712df9SCornelia Huck } 1934d6712df9SCornelia Huck 1935b0c632dbSHeiko Carstens long kvm_arch_vcpu_ioctl(struct file *filp, 1936b0c632dbSHeiko Carstens unsigned int ioctl, unsigned long arg) 1937b0c632dbSHeiko Carstens { 1938b0c632dbSHeiko Carstens struct kvm_vcpu *vcpu = filp->private_data; 1939b0c632dbSHeiko Carstens void __user *argp = (void __user *)arg; 1940800c1065SThomas Huth int idx; 1941bc923cc9SAvi Kivity long r; 1942b0c632dbSHeiko Carstens 194393736624SAvi Kivity switch (ioctl) { 194493736624SAvi Kivity case KVM_S390_INTERRUPT: { 1945ba5c1e9bSCarsten Otte struct kvm_s390_interrupt s390int; 1946383d0b05SJens Freimann struct kvm_s390_irq s390irq; 1947ba5c1e9bSCarsten Otte 194893736624SAvi Kivity r = -EFAULT; 1949ba5c1e9bSCarsten Otte if (copy_from_user(&s390int, argp, sizeof(s390int))) 195093736624SAvi Kivity break; 1951383d0b05SJens Freimann if (s390int_to_s390irq(&s390int, &s390irq)) 1952383d0b05SJens Freimann return -EINVAL; 1953383d0b05SJens Freimann r = kvm_s390_inject_vcpu(vcpu, &s390irq); 195493736624SAvi Kivity break; 1955ba5c1e9bSCarsten Otte } 1956b0c632dbSHeiko Carstens case KVM_S390_STORE_STATUS: 1957800c1065SThomas Huth idx = srcu_read_lock(&vcpu->kvm->srcu); 1958bc923cc9SAvi Kivity r = kvm_s390_vcpu_store_status(vcpu, arg); 1959800c1065SThomas Huth srcu_read_unlock(&vcpu->kvm->srcu, idx); 1960bc923cc9SAvi Kivity break; 1961b0c632dbSHeiko Carstens case KVM_S390_SET_INITIAL_PSW: { 1962b0c632dbSHeiko Carstens psw_t psw; 1963b0c632dbSHeiko Carstens 1964bc923cc9SAvi Kivity r = -EFAULT; 1965b0c632dbSHeiko Carstens if (copy_from_user(&psw, argp, sizeof(psw))) 1966bc923cc9SAvi Kivity break; 1967bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); 1968bc923cc9SAvi Kivity break; 1969b0c632dbSHeiko Carstens } 1970b0c632dbSHeiko Carstens case KVM_S390_INITIAL_RESET: 1971bc923cc9SAvi Kivity r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 1972bc923cc9SAvi Kivity break; 197314eebd91SCarsten Otte case KVM_SET_ONE_REG: 197414eebd91SCarsten Otte case KVM_GET_ONE_REG: { 197514eebd91SCarsten Otte struct kvm_one_reg reg; 197614eebd91SCarsten Otte r = -EFAULT; 197714eebd91SCarsten Otte if (copy_from_user(®, argp, sizeof(reg))) 197814eebd91SCarsten Otte break; 197914eebd91SCarsten Otte if (ioctl == KVM_SET_ONE_REG) 198014eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®); 198114eebd91SCarsten Otte else 198214eebd91SCarsten Otte r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®); 198314eebd91SCarsten Otte break; 198414eebd91SCarsten Otte } 198527e0393fSCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 198627e0393fSCarsten Otte case KVM_S390_UCAS_MAP: { 198727e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 198827e0393fSCarsten Otte 198927e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 199027e0393fSCarsten Otte r = -EFAULT; 199127e0393fSCarsten Otte break; 199227e0393fSCarsten Otte } 199327e0393fSCarsten Otte 199427e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 199527e0393fSCarsten Otte r = -EINVAL; 199627e0393fSCarsten Otte break; 199727e0393fSCarsten Otte } 199827e0393fSCarsten Otte 199927e0393fSCarsten Otte r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, 200027e0393fSCarsten Otte ucasmap.vcpu_addr, ucasmap.length); 200127e0393fSCarsten Otte break; 200227e0393fSCarsten Otte } 200327e0393fSCarsten Otte case KVM_S390_UCAS_UNMAP: { 200427e0393fSCarsten Otte struct kvm_s390_ucas_mapping ucasmap; 200527e0393fSCarsten Otte 200627e0393fSCarsten Otte if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) { 200727e0393fSCarsten Otte r = -EFAULT; 200827e0393fSCarsten Otte break; 200927e0393fSCarsten Otte } 201027e0393fSCarsten Otte 201127e0393fSCarsten Otte if (!kvm_is_ucontrol(vcpu->kvm)) { 201227e0393fSCarsten Otte r = -EINVAL; 201327e0393fSCarsten Otte break; 201427e0393fSCarsten Otte } 201527e0393fSCarsten Otte 201627e0393fSCarsten Otte r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, 201727e0393fSCarsten Otte ucasmap.length); 201827e0393fSCarsten Otte break; 201927e0393fSCarsten Otte } 202027e0393fSCarsten Otte #endif 2021ccc7910fSCarsten Otte case KVM_S390_VCPU_FAULT: { 2022527e30b4SMartin Schwidefsky r = gmap_fault(vcpu->arch.gmap, arg, 0); 2023ccc7910fSCarsten Otte break; 2024ccc7910fSCarsten Otte } 2025d6712df9SCornelia Huck case KVM_ENABLE_CAP: 2026d6712df9SCornelia Huck { 2027d6712df9SCornelia Huck struct kvm_enable_cap cap; 2028d6712df9SCornelia Huck r = -EFAULT; 2029d6712df9SCornelia Huck if (copy_from_user(&cap, argp, sizeof(cap))) 2030d6712df9SCornelia Huck break; 2031d6712df9SCornelia Huck r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2032d6712df9SCornelia Huck break; 2033d6712df9SCornelia Huck } 2034b0c632dbSHeiko Carstens default: 20353e6afcf1SCarsten Otte r = -ENOTTY; 2036b0c632dbSHeiko Carstens } 2037bc923cc9SAvi Kivity return r; 2038b0c632dbSHeiko Carstens } 2039b0c632dbSHeiko Carstens 20405b1c1493SCarsten Otte int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 20415b1c1493SCarsten Otte { 20425b1c1493SCarsten Otte #ifdef CONFIG_KVM_S390_UCONTROL 20435b1c1493SCarsten Otte if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) 20445b1c1493SCarsten Otte && (kvm_is_ucontrol(vcpu->kvm))) { 20455b1c1493SCarsten Otte vmf->page = virt_to_page(vcpu->arch.sie_block); 20465b1c1493SCarsten Otte get_page(vmf->page); 20475b1c1493SCarsten Otte return 0; 20485b1c1493SCarsten Otte } 20495b1c1493SCarsten Otte #endif 20505b1c1493SCarsten Otte return VM_FAULT_SIGBUS; 20515b1c1493SCarsten Otte } 20525b1c1493SCarsten Otte 20535587027cSAneesh Kumar K.V int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 20545587027cSAneesh Kumar K.V unsigned long npages) 2055db3fe4ebSTakuya Yoshikawa { 2056db3fe4ebSTakuya Yoshikawa return 0; 2057db3fe4ebSTakuya Yoshikawa } 2058db3fe4ebSTakuya Yoshikawa 2059b0c632dbSHeiko Carstens /* Section: memory related */ 2060f7784b8eSMarcelo Tosatti int kvm_arch_prepare_memory_region(struct kvm *kvm, 2061f7784b8eSMarcelo Tosatti struct kvm_memory_slot *memslot, 20627b6195a9STakuya Yoshikawa struct kvm_userspace_memory_region *mem, 20637b6195a9STakuya Yoshikawa enum kvm_mr_change change) 2064b0c632dbSHeiko Carstens { 2065dd2887e7SNick Wang /* A few sanity checks. We can have memory slots which have to be 2066dd2887e7SNick Wang located/ended at a segment boundary (1MB). The memory in userland is 2067dd2887e7SNick Wang ok to be fragmented into various different vmas. It is okay to mmap() 2068dd2887e7SNick Wang and munmap() stuff in this slot after doing this call at any time */ 2069b0c632dbSHeiko Carstens 2070598841caSCarsten Otte if (mem->userspace_addr & 0xffffful) 2071b0c632dbSHeiko Carstens return -EINVAL; 2072b0c632dbSHeiko Carstens 2073598841caSCarsten Otte if (mem->memory_size & 0xffffful) 2074b0c632dbSHeiko Carstens return -EINVAL; 2075b0c632dbSHeiko Carstens 2076f7784b8eSMarcelo Tosatti return 0; 2077f7784b8eSMarcelo Tosatti } 2078f7784b8eSMarcelo Tosatti 2079f7784b8eSMarcelo Tosatti void kvm_arch_commit_memory_region(struct kvm *kvm, 2080f7784b8eSMarcelo Tosatti struct kvm_userspace_memory_region *mem, 20818482644aSTakuya Yoshikawa const struct kvm_memory_slot *old, 20828482644aSTakuya Yoshikawa enum kvm_mr_change change) 2083f7784b8eSMarcelo Tosatti { 2084f7850c92SCarsten Otte int rc; 2085f7784b8eSMarcelo Tosatti 20862cef4debSChristian Borntraeger /* If the basics of the memslot do not change, we do not want 20872cef4debSChristian Borntraeger * to update the gmap. Every update causes several unnecessary 20882cef4debSChristian Borntraeger * segment translation exceptions. This is usually handled just 20892cef4debSChristian Borntraeger * fine by the normal fault handler + gmap, but it will also 20902cef4debSChristian Borntraeger * cause faults on the prefix page of running guest CPUs. 20912cef4debSChristian Borntraeger */ 20922cef4debSChristian Borntraeger if (old->userspace_addr == mem->userspace_addr && 20932cef4debSChristian Borntraeger old->base_gfn * PAGE_SIZE == mem->guest_phys_addr && 20942cef4debSChristian Borntraeger old->npages * PAGE_SIZE == mem->memory_size) 20952cef4debSChristian Borntraeger return; 2096598841caSCarsten Otte 2097598841caSCarsten Otte rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, 2098598841caSCarsten Otte mem->guest_phys_addr, mem->memory_size); 2099598841caSCarsten Otte if (rc) 2100f7850c92SCarsten Otte printk(KERN_WARNING "kvm-s390: failed to commit memory region\n"); 2101598841caSCarsten Otte return; 2102b0c632dbSHeiko Carstens } 2103b0c632dbSHeiko Carstens 2104b0c632dbSHeiko Carstens static int __init kvm_s390_init(void) 2105b0c632dbSHeiko Carstens { 2106ef50f7acSChristian Borntraeger int ret; 21070ee75beaSAvi Kivity ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 2108ef50f7acSChristian Borntraeger if (ret) 2109ef50f7acSChristian Borntraeger return ret; 2110ef50f7acSChristian Borntraeger 2111ef50f7acSChristian Borntraeger /* 2112ef50f7acSChristian Borntraeger * guests can ask for up to 255+1 double words, we need a full page 211325985edcSLucas De Marchi * to hold the maximum amount of facilities. On the other hand, we 2114ef50f7acSChristian Borntraeger * only set facilities that are known to work in KVM. 2115ef50f7acSChristian Borntraeger */ 211678c4b59fSMichael Mueller vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 211778c4b59fSMichael Mueller if (!vfacilities) { 2118ef50f7acSChristian Borntraeger kvm_exit(); 2119ef50f7acSChristian Borntraeger return -ENOMEM; 2120ef50f7acSChristian Borntraeger } 212178c4b59fSMichael Mueller memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); 2122c23f397cSChristian Borntraeger vfacilities[0] &= 0xff82fffbf4fc2000UL; 21237feb6bb8SMichael Mueller vfacilities[1] &= 0x005c000000000000UL; 2124ef50f7acSChristian Borntraeger return 0; 2125b0c632dbSHeiko Carstens } 2126b0c632dbSHeiko Carstens 2127b0c632dbSHeiko Carstens static void __exit kvm_s390_exit(void) 2128b0c632dbSHeiko Carstens { 212978c4b59fSMichael Mueller free_page((unsigned long) vfacilities); 2130b0c632dbSHeiko Carstens kvm_exit(); 2131b0c632dbSHeiko Carstens } 2132b0c632dbSHeiko Carstens 2133b0c632dbSHeiko Carstens module_init(kvm_s390_init); 2134b0c632dbSHeiko Carstens module_exit(kvm_s390_exit); 2135566af940SCornelia Huck 2136566af940SCornelia Huck /* 2137566af940SCornelia Huck * Enable autoloading of the kvm module. 2138566af940SCornelia Huck * Note that we add the module alias here instead of virt/kvm/kvm_main.c 2139566af940SCornelia Huck * since x86 takes a different approach. 2140566af940SCornelia Huck */ 2141566af940SCornelia Huck #include <linux/miscdevice.h> 2142566af940SCornelia Huck MODULE_ALIAS_MISCDEV(KVM_MINOR); 2143566af940SCornelia Huck MODULE_ALIAS("devname:kvm"); 2144