Lines Matching +full:- +full:kvm
1 // SPDX-License-Identifier: GPL-2.0
13 #define KMSG_COMPONENT "kvm-s390"
21 #include <linux/kvm.h>
36 #include <asm/asm-offsets.h>
49 #include "kvm-s390.h"
55 #include "trace-s390.h"
195 /* allow nested virtualization in KVM (if enabled by user space) */
222 * the feature is opt-in anyway
237 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
256 /* available cpu features supported by kvm */
270 static int sca_switch_to_extended(struct kvm *kvm);
278 * -delta to the epoch. in kvm_clock_sync_scb()
280 delta = -delta; in kvm_clock_sync_scb()
282 /* sign-extension - we're adding to signed values below */ in kvm_clock_sync_scb()
284 delta_idx = -1; in kvm_clock_sync_scb()
286 scb->epoch += delta; in kvm_clock_sync_scb()
287 if (scb->ecd & ECD_MEF) { in kvm_clock_sync_scb()
288 scb->epdx += delta_idx; in kvm_clock_sync_scb()
289 if (scb->epoch < delta) in kvm_clock_sync_scb()
290 scb->epdx += 1; in kvm_clock_sync_scb()
297 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
303 struct kvm *kvm; in kvm_clock_sync() local
308 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
309 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
310 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta); in kvm_clock_sync()
312 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
313 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
315 if (vcpu->arch.cputm_enabled) in kvm_clock_sync()
316 vcpu->arch.cputm_start += *delta; in kvm_clock_sync()
317 if (vcpu->arch.vsie_block) in kvm_clock_sync()
318 kvm_clock_sync_scb(vcpu->arch.vsie_block, in kvm_clock_sync()
375 if (test_facility(28)) /* TOD-clock steering */ in kvm_s390_cpu_feat_init()
471 int rc = -ENOMEM; in __kvm_s390_init()
473 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
475 return -ENOMEM; in __kvm_s390_init()
477 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); in __kvm_s390_init()
547 return -EINVAL; in kvm_arch_dev_ioctl()
550 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
592 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
671 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
676 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
680 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
681 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
692 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
707 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
715 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
716 return -EINVAL; in kvm_vm_ioctl_get_dirty_log()
718 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
720 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log()
721 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log()
724 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
731 memset(memslot->dirty_bitmap, 0, n); in kvm_vm_ioctl_get_dirty_log()
735 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
739 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
744 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
749 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
753 if (cap->flags) in kvm_vm_ioctl_enable_cap()
754 return -EINVAL; in kvm_vm_ioctl_enable_cap()
756 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
758 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
759 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
763 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
764 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
768 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
769 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
770 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
772 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
773 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
775 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
776 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
780 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
783 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
784 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
787 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
788 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
791 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
792 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
796 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
797 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
798 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
802 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
803 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
804 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
805 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
807 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
808 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
811 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
812 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
816 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
817 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
818 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
821 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
824 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
825 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
829 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
830 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
831 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
832 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
834 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
835 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
838 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
839 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
843 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
844 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
845 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
846 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
847 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
850 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
851 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
852 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
858 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
859 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
861 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
862 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
866 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
867 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
871 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
872 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
873 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
877 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
878 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
879 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
880 r = -EBUSY; in kvm_vm_ioctl_enable_cap()
882 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
883 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
886 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
887 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s", in kvm_vm_ioctl_enable_cap()
891 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
897 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
901 switch (attr->attr) { in kvm_s390_get_mem_control()
904 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
905 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
906 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
907 ret = -EFAULT; in kvm_s390_get_mem_control()
910 ret = -ENXIO; in kvm_s390_get_mem_control()
916 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
920 switch (attr->attr) { in kvm_s390_set_mem_control()
922 ret = -ENXIO; in kvm_s390_set_mem_control()
926 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
927 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
928 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
929 ret = -EBUSY; in kvm_s390_set_mem_control()
930 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
931 ret = -EINVAL; in kvm_s390_set_mem_control()
933 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
935 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
938 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
941 ret = -ENXIO; in kvm_s390_set_mem_control()
944 ret = -EINVAL; in kvm_s390_set_mem_control()
945 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
948 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
949 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
950 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
951 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
952 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
953 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
959 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
960 return -EINVAL; in kvm_s390_set_mem_control()
962 if (get_user(new_limit, (u64 __user *)attr->addr)) in kvm_s390_set_mem_control()
963 return -EFAULT; in kvm_s390_set_mem_control()
965 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
966 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
967 return -E2BIG; in kvm_s390_set_mem_control()
970 return -EINVAL; in kvm_s390_set_mem_control()
974 new_limit -= 1; in kvm_s390_set_mem_control()
976 ret = -EBUSY; in kvm_s390_set_mem_control()
977 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
978 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
980 struct gmap *new = gmap_create(current->mm, new_limit); in kvm_s390_set_mem_control()
983 ret = -ENOMEM; in kvm_s390_set_mem_control()
985 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
986 new->private = kvm; in kvm_s390_set_mem_control()
987 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
991 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
992 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
993 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
994 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
998 ret = -ENXIO; in kvm_s390_set_mem_control()
1006 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
1011 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1013 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
1019 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1022 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
1024 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1025 switch (attr->attr) { in kvm_s390_vm_set_crypto()
1027 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1028 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1029 return -EINVAL; in kvm_s390_vm_set_crypto()
1032 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1033 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1034 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1035 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1038 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1039 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1040 return -EINVAL; in kvm_s390_vm_set_crypto()
1043 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1044 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1045 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1046 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1049 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1050 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1051 return -EINVAL; in kvm_s390_vm_set_crypto()
1053 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1054 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1055 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1056 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1059 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1060 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1061 return -EINVAL; in kvm_s390_vm_set_crypto()
1063 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1064 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1065 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1066 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1070 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1071 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1073 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1077 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1078 return -EOPNOTSUPP; in kvm_s390_vm_set_crypto()
1080 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1083 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1084 return -ENXIO; in kvm_s390_vm_set_crypto()
1087 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
1088 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1095 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1098 vcpu->arch.sie_block->ecb2 |= ECB2_ZPCI_LSI; in kvm_s390_vcpu_pci_setup()
1099 vcpu->arch.sie_block->ecb3 |= ECB3_AISII + ECB3_AISI; in kvm_s390_vcpu_pci_setup()
1102 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) in kvm_s390_vcpu_pci_enable_interp() argument
1107 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1116 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1118 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1120 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_pci_enable_interp()
1125 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1128 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1133 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1138 * Must be called with kvm->srcu held to avoid races on memslots, and with
1139 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
1141 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1149 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1151 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1153 return -EINVAL; in kvm_s390_vm_start_migration()
1155 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1156 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1161 if (!ms->dirty_bitmap) in kvm_s390_vm_start_migration()
1162 return -EINVAL; in kvm_s390_vm_start_migration()
1170 ram_pages += ms->npages; in kvm_s390_vm_start_migration()
1172 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1173 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1174 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1179 * Must be called with kvm->slots_lock to avoid races with ourselves and
1182 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1185 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1187 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1188 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1189 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1193 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1196 int res = -ENXIO; in kvm_s390_vm_set_migration()
1198 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1199 switch (attr->attr) { in kvm_s390_vm_set_migration()
1201 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1204 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1209 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1214 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1217 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1219 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS) in kvm_s390_vm_get_migration()
1220 return -ENXIO; in kvm_s390_vm_get_migration()
1222 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig))) in kvm_s390_vm_get_migration()
1223 return -EFAULT; in kvm_s390_vm_get_migration()
1227 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1229 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1233 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod))) in kvm_s390_set_tod_ext()
1234 return -EFAULT; in kvm_s390_set_tod_ext()
1236 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1237 return -EINVAL; in kvm_s390_set_tod_ext()
1238 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_ext()
1240 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1246 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1250 if (copy_from_user(>od_high, (void __user *)attr->addr, in kvm_s390_set_tod_high()
1252 return -EFAULT; in kvm_s390_set_tod_high()
1255 return -EINVAL; in kvm_s390_set_tod_high()
1256 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1261 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1265 if (copy_from_user(>od.tod, (void __user *)attr->addr, in kvm_s390_set_tod_low()
1267 return -EFAULT; in kvm_s390_set_tod_low()
1269 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_low()
1270 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1274 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1278 if (attr->flags) in kvm_s390_set_tod()
1279 return -EINVAL; in kvm_s390_set_tod()
1281 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1286 if (kvm_s390_pv_is_protected(kvm)) { in kvm_s390_set_tod()
1287 ret = -EOPNOTSUPP; in kvm_s390_set_tod()
1291 switch (attr->attr) { in kvm_s390_set_tod()
1293 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1296 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1299 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1302 ret = -ENXIO; in kvm_s390_set_tod()
1307 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1311 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1320 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1321 gtod->epoch_idx = 0; in kvm_s390_get_tod_clock()
1322 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1323 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1324 if (gtod->tod < clk.tod) in kvm_s390_get_tod_clock()
1325 gtod->epoch_idx += 1; in kvm_s390_get_tod_clock()
1331 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1336 kvm_s390_get_tod_clock(kvm, >od); in kvm_s390_get_tod_ext()
1337 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_ext()
1338 return -EFAULT; in kvm_s390_get_tod_ext()
1340 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1345 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1349 if (copy_to_user((void __user *)attr->addr, >od_high, in kvm_s390_get_tod_high()
1351 return -EFAULT; in kvm_s390_get_tod_high()
1352 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1357 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1361 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1362 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod))) in kvm_s390_get_tod_low()
1363 return -EFAULT; in kvm_s390_get_tod_low()
1364 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1369 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1373 if (attr->flags) in kvm_s390_get_tod()
1374 return -EINVAL; in kvm_s390_get_tod()
1376 switch (attr->attr) { in kvm_s390_get_tod()
1378 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1381 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1384 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1387 ret = -ENXIO; in kvm_s390_get_tod()
1393 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1399 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1400 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1401 ret = -EBUSY; in kvm_s390_set_processor()
1406 ret = -ENOMEM; in kvm_s390_set_processor()
1409 if (!copy_from_user(proc, (void __user *)attr->addr, in kvm_s390_set_processor()
1411 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1414 if (lowest_ibc && proc->ibc) { in kvm_s390_set_processor()
1415 if (proc->ibc > unblocked_ibc) in kvm_s390_set_processor()
1416 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1417 else if (proc->ibc < lowest_ibc) in kvm_s390_set_processor()
1418 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1420 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1422 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1424 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1425 kvm->arch.model.ibc, in kvm_s390_set_processor()
1426 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1427 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1428 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1429 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1430 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1432 ret = -EFAULT; in kvm_s390_set_processor()
1435 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1439 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1444 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data))) in kvm_s390_set_processor_feat()
1445 return -EFAULT; in kvm_s390_set_processor_feat()
1449 return -EINVAL; in kvm_s390_set_processor_feat()
1451 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1452 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1453 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1454 return -EBUSY; in kvm_s390_set_processor_feat()
1456 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1457 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1458 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1465 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1468 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1469 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1470 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1471 return -EBUSY; in kvm_s390_set_processor_subfunc()
1474 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1476 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1477 return -EFAULT; in kvm_s390_set_processor_subfunc()
1479 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1481 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1482 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1483 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1484 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1485 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1486 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1487 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1488 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1489 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1491 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1492 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1493 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1494 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1495 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1496 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1497 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1498 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1501 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1502 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1503 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1504 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1505 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1506 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1507 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1508 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1509 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1510 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1512 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1513 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1516 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1519 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1522 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1525 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1528 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1531 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1533 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1551 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_uv_feat() argument
1553 struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr; in kvm_s390_set_uv_feat()
1557 if (get_user(data, &ptr->feat)) in kvm_s390_set_uv_feat()
1558 return -EFAULT; in kvm_s390_set_uv_feat()
1560 return -EINVAL; in kvm_s390_set_uv_feat()
1562 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1563 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1564 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1565 return -EBUSY; in kvm_s390_set_uv_feat()
1567 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1568 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1570 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1575 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1577 int ret = -ENXIO; in kvm_s390_set_cpu_model()
1579 switch (attr->attr) { in kvm_s390_set_cpu_model()
1581 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1584 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1587 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1590 ret = kvm_s390_set_uv_feat(kvm, attr); in kvm_s390_set_cpu_model()
1596 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1603 ret = -ENOMEM; in kvm_s390_get_processor()
1606 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1607 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1608 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1610 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1611 kvm->arch.model.ibc, in kvm_s390_get_processor()
1612 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1613 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1614 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1615 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1616 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1617 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc))) in kvm_s390_get_processor()
1618 ret = -EFAULT; in kvm_s390_get_processor()
1624 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1631 ret = -ENOMEM; in kvm_s390_get_machine()
1634 get_cpu_id((struct cpuid *) &mach->cpuid); in kvm_s390_get_machine()
1635 mach->ibc = sclp.ibc; in kvm_s390_get_machine()
1636 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1638 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list, in kvm_s390_get_machine()
1640 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1641 kvm->arch.model.ibc, in kvm_s390_get_machine()
1642 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1643 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1644 mach->fac_mask[0], in kvm_s390_get_machine()
1645 mach->fac_mask[1], in kvm_s390_get_machine()
1646 mach->fac_mask[2]); in kvm_s390_get_machine()
1647 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1648 mach->fac_list[0], in kvm_s390_get_machine()
1649 mach->fac_list[1], in kvm_s390_get_machine()
1650 mach->fac_list[2]); in kvm_s390_get_machine()
1651 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach))) in kvm_s390_get_machine()
1652 ret = -EFAULT; in kvm_s390_get_machine()
1658 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1663 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1664 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_processor_feat()
1665 return -EFAULT; in kvm_s390_get_processor_feat()
1666 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1673 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1679 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data))) in kvm_s390_get_machine_feat()
1680 return -EFAULT; in kvm_s390_get_machine_feat()
1681 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1688 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1691 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1693 return -EFAULT; in kvm_s390_get_processor_subfunc()
1695 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1696 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1697 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1698 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1699 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1700 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1701 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1702 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1703 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1704 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1705 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1706 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1707 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1708 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1709 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1710 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1711 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1712 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1713 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1714 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1715 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1716 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1717 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1718 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1719 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1720 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1721 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1722 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1723 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1724 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1725 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1726 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1727 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1728 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1729 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1730 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1733 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1736 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1739 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1742 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1745 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1747 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1748 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1751 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1756 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1759 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, in kvm_s390_get_machine_subfunc()
1761 return -EFAULT; in kvm_s390_get_machine_subfunc()
1763 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1768 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1771 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1774 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1777 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1780 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1783 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1786 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1789 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1792 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1795 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1798 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1801 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1804 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1807 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1810 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1815 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1824 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor_uv_feat() argument
1826 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_processor_uv_feat()
1827 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1829 if (put_user(feat, &dst->feat)) in kvm_s390_get_processor_uv_feat()
1830 return -EFAULT; in kvm_s390_get_processor_uv_feat()
1831 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1836 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine_uv_feat() argument
1838 struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr; in kvm_s390_get_machine_uv_feat()
1844 if (put_user(feat, &dst->feat)) in kvm_s390_get_machine_uv_feat()
1845 return -EFAULT; in kvm_s390_get_machine_uv_feat()
1846 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1851 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1853 int ret = -ENXIO; in kvm_s390_get_cpu_model()
1855 switch (attr->attr) { in kvm_s390_get_cpu_model()
1857 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1860 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1863 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1866 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1869 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1872 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1875 ret = kvm_s390_get_processor_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1878 ret = kvm_s390_get_machine_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1885 * kvm_s390_update_topology_change_report - update CPU topology change report
1886 * @kvm: guest KVM description
1889 * Updates the Multiprocessor Topology-Change-Report bit to signal
1895 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) in kvm_s390_update_topology_change_report() argument
1900 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1901 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1903 old = READ_ONCE(sca->utility); in kvm_s390_update_topology_change_report()
1906 } while (cmpxchg(&sca->utility.val, old.val, new.val) != old.val); in kvm_s390_update_topology_change_report()
1907 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1910 static int kvm_s390_set_topo_change_indication(struct kvm *kvm, in kvm_s390_set_topo_change_indication() argument
1913 if (!test_kvm_facility(kvm, 11)) in kvm_s390_set_topo_change_indication()
1914 return -ENXIO; in kvm_s390_set_topo_change_indication()
1916 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1920 static int kvm_s390_get_topo_change_indication(struct kvm *kvm, in kvm_s390_get_topo_change_indication() argument
1925 if (!test_kvm_facility(kvm, 11)) in kvm_s390_get_topo_change_indication()
1926 return -ENXIO; in kvm_s390_get_topo_change_indication()
1928 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1929 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1930 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1932 return put_user(topo, (u8 __user *)attr->addr); in kvm_s390_get_topo_change_indication()
1935 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1939 switch (attr->group) { in kvm_s390_vm_set_attr()
1941 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1944 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1947 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1950 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1953 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1956 ret = kvm_s390_set_topo_change_indication(kvm, attr); in kvm_s390_vm_set_attr()
1959 ret = -ENXIO; in kvm_s390_vm_set_attr()
1966 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
1970 switch (attr->group) { in kvm_s390_vm_get_attr()
1972 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
1975 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
1978 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
1981 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
1984 ret = kvm_s390_get_topo_change_indication(kvm, attr); in kvm_s390_vm_get_attr()
1987 ret = -ENXIO; in kvm_s390_vm_get_attr()
1994 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
1998 switch (attr->group) { in kvm_s390_vm_has_attr()
2000 switch (attr->attr) { in kvm_s390_vm_has_attr()
2003 ret = sclp.has_cmma ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2009 ret = -ENXIO; in kvm_s390_vm_has_attr()
2014 switch (attr->attr) { in kvm_s390_vm_has_attr()
2020 ret = -ENXIO; in kvm_s390_vm_has_attr()
2025 switch (attr->attr) { in kvm_s390_vm_has_attr()
2037 ret = -ENXIO; in kvm_s390_vm_has_attr()
2042 switch (attr->attr) { in kvm_s390_vm_has_attr()
2051 ret = ap_instructions_available() ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2054 ret = -ENXIO; in kvm_s390_vm_has_attr()
2062 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2065 ret = -ENXIO; in kvm_s390_vm_has_attr()
2072 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
2078 if (args->flags != 0) in kvm_s390_get_skeys()
2079 return -EINVAL; in kvm_s390_get_skeys()
2082 if (!mm_uses_skeys(current->mm)) in kvm_s390_get_skeys()
2086 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_get_skeys()
2087 return -EINVAL; in kvm_s390_get_skeys()
2089 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_get_skeys()
2091 return -ENOMEM; in kvm_s390_get_skeys()
2093 mmap_read_lock(current->mm); in kvm_s390_get_skeys()
2094 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2095 for (i = 0; i < args->count; i++) { in kvm_s390_get_skeys()
2096 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2098 r = -EFAULT; in kvm_s390_get_skeys()
2102 r = get_guest_storage_key(current->mm, hva, &keys[i]); in kvm_s390_get_skeys()
2106 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2107 mmap_read_unlock(current->mm); in kvm_s390_get_skeys()
2110 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, in kvm_s390_get_skeys()
2111 sizeof(uint8_t) * args->count); in kvm_s390_get_skeys()
2113 r = -EFAULT; in kvm_s390_get_skeys()
2120 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
2127 if (args->flags != 0) in kvm_s390_set_skeys()
2128 return -EINVAL; in kvm_s390_set_skeys()
2131 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) in kvm_s390_set_skeys()
2132 return -EINVAL; in kvm_s390_set_skeys()
2134 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT); in kvm_s390_set_skeys()
2136 return -ENOMEM; in kvm_s390_set_skeys()
2138 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, in kvm_s390_set_skeys()
2139 sizeof(uint8_t) * args->count); in kvm_s390_set_skeys()
2141 r = -EFAULT; in kvm_s390_set_skeys()
2151 mmap_read_lock(current->mm); in kvm_s390_set_skeys()
2152 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2153 while (i < args->count) { in kvm_s390_set_skeys()
2155 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2157 r = -EFAULT; in kvm_s390_set_skeys()
2163 r = -EINVAL; in kvm_s390_set_skeys()
2167 r = set_guest_storage_key(current->mm, hva, keys[i], 0); in kvm_s390_set_skeys()
2169 r = fixup_user_fault(current->mm, hva, in kvm_s390_set_skeys()
2177 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2178 mmap_read_unlock(current->mm); in kvm_s390_set_skeys()
2193 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
2196 unsigned long pgstev, hva, cur_gfn = args->start_gfn; in kvm_s390_peek_cmma()
2198 args->count = 0; in kvm_s390_peek_cmma()
2199 while (args->count < bufsize) { in kvm_s390_peek_cmma()
2200 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
2206 return args->count ? 0 : -EFAULT; in kvm_s390_peek_cmma()
2207 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2209 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_peek_cmma()
2226 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2227 struct rb_node *mnode = &ms->gfn_node[slots->node_idx]; in kvm_s390_next_dirty_cmma()
2229 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2233 mnode = rb_first(&slots->gfn_tree); in kvm_s390_next_dirty_cmma()
2235 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2239 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma()
2242 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); in kvm_s390_next_dirty_cmma()
2243 while (ofs >= ms->npages && (mnode = rb_next(mnode))) { in kvm_s390_next_dirty_cmma()
2244 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]); in kvm_s390_next_dirty_cmma()
2245 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages); in kvm_s390_next_dirty_cmma()
2247 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2250 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2254 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2260 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); in kvm_s390_get_cmma()
2261 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2262 args->count = 0; in kvm_s390_get_cmma()
2263 args->start_gfn = cur_gfn; in kvm_s390_get_cmma()
2269 while (args->count < bufsize) { in kvm_s390_get_cmma()
2270 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2274 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2275 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2276 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2279 res[args->count++] = (pgstev >> 24) & 0x43; in kvm_s390_get_cmma()
2288 (next_gfn - args->start_gfn >= bufsize)) in kvm_s390_get_cmma()
2292 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
2293 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2309 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2316 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2317 return -ENXIO; in kvm_s390_get_cmma_bits()
2319 if (args->flags & ~KVM_S390_CMMA_PEEK) in kvm_s390_get_cmma_bits()
2320 return -EINVAL; in kvm_s390_get_cmma_bits()
2322 peek = !!(args->flags & KVM_S390_CMMA_PEEK); in kvm_s390_get_cmma_bits()
2323 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2324 return -EINVAL; in kvm_s390_get_cmma_bits()
2326 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX); in kvm_s390_get_cmma_bits()
2327 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2332 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2339 return -ENOMEM; in kvm_s390_get_cmma_bits()
2341 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2342 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2344 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2346 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2347 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2348 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2350 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2351 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2353 args->remaining = 0; in kvm_s390_get_cmma_bits()
2355 if (copy_to_user((void __user *)args->values, values, args->count)) in kvm_s390_get_cmma_bits()
2356 ret = -EFAULT; in kvm_s390_get_cmma_bits()
2365 * set and the mm->context.uses_cmm flag is set.
2367 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2374 mask = args->mask; in kvm_s390_set_cmma_bits()
2376 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2377 return -ENXIO; in kvm_s390_set_cmma_bits()
2379 if (args->flags != 0) in kvm_s390_set_cmma_bits()
2380 return -EINVAL; in kvm_s390_set_cmma_bits()
2382 if (args->count > KVM_S390_CMMA_SIZE_MAX) in kvm_s390_set_cmma_bits()
2383 return -EINVAL; in kvm_s390_set_cmma_bits()
2385 if (args->count == 0) in kvm_s390_set_cmma_bits()
2388 bits = vmalloc(array_size(sizeof(*bits), args->count)); in kvm_s390_set_cmma_bits()
2390 return -ENOMEM; in kvm_s390_set_cmma_bits()
2392 r = copy_from_user(bits, (void __user *)args->values, args->count); in kvm_s390_set_cmma_bits()
2394 r = -EFAULT; in kvm_s390_set_cmma_bits()
2398 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2399 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2400 for (i = 0; i < args->count; i++) { in kvm_s390_set_cmma_bits()
2401 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2403 r = -EFAULT; in kvm_s390_set_cmma_bits()
2410 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2412 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2413 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2415 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2416 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2417 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2418 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2426 * kvm_s390_cpus_from_pv - Convert all protected vCPUs in a protected VM to
2428 * @kvm: the VM whose protected vCPUs are to be converted
2436 * Return: 0 in case of success, otherwise -EIO
2438 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_from_pv() argument
2453 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2454 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2458 ret = -EIO; in kvm_s390_cpus_from_pv()
2460 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_from_pv()
2462 /* Ensure that we re-enable gisa if the non-PV guest used it but the PV guest did not. */ in kvm_s390_cpus_from_pv()
2464 kvm_s390_gisa_enable(kvm); in kvm_s390_cpus_from_pv()
2469 * kvm_s390_cpus_to_pv - Convert all non-protected vCPUs in a protected VM
2471 * @kvm: the VM whose protected vCPUs are to be converted
2477 * Return: 0 in case of success, otherwise -EIO
2479 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2489 kvm_s390_gisa_disable(kvm); in kvm_s390_cpus_to_pv()
2491 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2492 mutex_lock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2494 mutex_unlock(&vcpu->mutex); in kvm_s390_cpus_to_pv()
2499 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2515 switch (info->header.id) { in kvm_s390_handle_pv_info()
2517 len_min = sizeof(info->header) + sizeof(info->vm); in kvm_s390_handle_pv_info()
2519 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2520 return -EINVAL; in kvm_s390_handle_pv_info()
2522 memcpy(info->vm.inst_calls_list, in kvm_s390_handle_pv_info()
2527 info->vm.max_cpus = uv_info.max_guest_cpu_id + 1; in kvm_s390_handle_pv_info()
2528 info->vm.max_guests = uv_info.max_num_sec_conf; in kvm_s390_handle_pv_info()
2529 info->vm.max_guest_addr = uv_info.max_sec_stor_addr; in kvm_s390_handle_pv_info()
2530 info->vm.feature_indication = uv_info.uv_feature_indications; in kvm_s390_handle_pv_info()
2535 len_min = sizeof(info->header) + sizeof(info->dump); in kvm_s390_handle_pv_info()
2537 if (info->header.len_max < len_min) in kvm_s390_handle_pv_info()
2538 return -EINVAL; in kvm_s390_handle_pv_info()
2540 info->dump.dump_cpu_buffer_len = uv_info.guest_cpu_stor_len; in kvm_s390_handle_pv_info()
2541 info->dump.dump_config_mem_buffer_per_1m = uv_info.conf_dump_storage_state_len; in kvm_s390_handle_pv_info()
2542 info->dump.dump_config_finalize_len = uv_info.conf_dump_finalize_len; in kvm_s390_handle_pv_info()
2546 return -EINVAL; in kvm_s390_handle_pv_info()
2550 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, in kvm_s390_pv_dmp() argument
2553 int r = -EINVAL; in kvm_s390_pv_dmp()
2558 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2565 kvm_s390_vcpu_block_all(kvm); in kvm_s390_pv_dmp()
2567 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dmp()
2568 UVC_CMD_DUMP_INIT, &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2569 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x", in kvm_s390_pv_dmp()
2570 cmd->rc, cmd->rrc); in kvm_s390_pv_dmp()
2572 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2574 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dmp()
2575 r = -EINVAL; in kvm_s390_pv_dmp()
2580 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2588 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len, in kvm_s390_pv_dmp()
2589 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2593 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2596 r = -EINVAL; in kvm_s390_pv_dmp()
2600 r = kvm_s390_pv_dump_complete(kvm, result_buff, in kvm_s390_pv_dmp()
2601 &cmd->rc, &cmd->rrc); in kvm_s390_pv_dmp()
2605 r = -ENOTTY; in kvm_s390_pv_dmp()
2612 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2614 const bool need_lock = (cmd->cmd != KVM_PV_ASYNC_CLEANUP_PERFORM); in kvm_s390_handle_pv()
2615 void __user *argp = (void __user *)cmd->data; in kvm_s390_handle_pv()
2620 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2622 switch (cmd->cmd) { in kvm_s390_handle_pv()
2624 r = -EINVAL; in kvm_s390_handle_pv()
2625 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2632 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2640 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2644 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2646 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2649 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2653 r = -EINVAL; in kvm_s390_handle_pv()
2654 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) in kvm_s390_handle_pv()
2657 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2665 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2668 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2671 r = -EINVAL; in kvm_s390_handle_pv()
2674 /* kvm->lock must not be held; this is asserted inside the function. */ in kvm_s390_handle_pv()
2675 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2678 r = -EINVAL; in kvm_s390_handle_pv()
2679 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2682 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2690 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2693 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2700 r = -EINVAL; in kvm_s390_handle_pv()
2701 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2704 r = -EFAULT; in kvm_s390_handle_pv()
2709 r = -EINVAL; in kvm_s390_handle_pv()
2713 r = -ENOMEM; in kvm_s390_handle_pv()
2718 r = -EFAULT; in kvm_s390_handle_pv()
2721 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2722 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2730 r = -EINVAL; in kvm_s390_handle_pv()
2731 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2734 r = -EFAULT; in kvm_s390_handle_pv()
2738 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2739 &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2743 r = -EINVAL; in kvm_s390_handle_pv()
2744 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2747 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2748 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2749 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2750 cmd->rrc); in kvm_s390_handle_pv()
2754 r = -EINVAL; in kvm_s390_handle_pv()
2755 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2758 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2759 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2760 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2761 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2765 r = -EINVAL; in kvm_s390_handle_pv()
2766 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2769 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2770 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2771 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2772 cmd->rc, cmd->rrc); in kvm_s390_handle_pv()
2788 r = -EFAULT; in kvm_s390_handle_pv()
2792 r = -EINVAL; in kvm_s390_handle_pv()
2808 r = -EFAULT; in kvm_s390_handle_pv()
2818 r = -EINVAL; in kvm_s390_handle_pv()
2819 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2822 r = -EFAULT; in kvm_s390_handle_pv()
2826 r = kvm_s390_pv_dmp(kvm, cmd, dmp); in kvm_s390_handle_pv()
2831 r = -EFAULT; in kvm_s390_handle_pv()
2838 r = -ENOTTY; in kvm_s390_handle_pv()
2841 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2848 if (mop->flags & ~supported_flags || !mop->size) in mem_op_validate_common()
2849 return -EINVAL; in mem_op_validate_common()
2850 if (mop->size > MEM_OP_MAX_SIZE) in mem_op_validate_common()
2851 return -E2BIG; in mem_op_validate_common()
2852 if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) { in mem_op_validate_common()
2853 if (mop->key > 0xf) in mem_op_validate_common()
2854 return -EINVAL; in mem_op_validate_common()
2856 mop->key = 0; in mem_op_validate_common()
2861 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_abs() argument
2863 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_abs()
2873 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vm_mem_op_abs()
2874 tmpbuf = vmalloc(mop->size); in kvm_s390_vm_mem_op_abs()
2876 return -ENOMEM; in kvm_s390_vm_mem_op_abs()
2879 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2881 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2886 acc_mode = mop->op == KVM_S390_MEMOP_ABSOLUTE_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vm_mem_op_abs()
2887 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vm_mem_op_abs()
2888 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2892 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2893 mop->size, GACC_FETCH, mop->key); in kvm_s390_vm_mem_op_abs()
2896 if (copy_to_user(uaddr, tmpbuf, mop->size)) in kvm_s390_vm_mem_op_abs()
2897 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2899 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vm_mem_op_abs()
2900 r = -EFAULT; in kvm_s390_vm_mem_op_abs()
2903 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2904 mop->size, GACC_STORE, mop->key); in kvm_s390_vm_mem_op_abs()
2908 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2914 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_cmpxchg() argument
2916 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vm_mem_op_cmpxchg()
2917 void __user *old_addr = (void __user *)mop->old_addr; in kvm_s390_vm_mem_op_cmpxchg()
2922 unsigned int off_in_quad = sizeof(new) - mop->size; in kvm_s390_vm_mem_op_cmpxchg()
2934 if (mop->size > sizeof(new)) in kvm_s390_vm_mem_op_cmpxchg()
2935 return -EINVAL; in kvm_s390_vm_mem_op_cmpxchg()
2936 if (copy_from_user(&new.raw[off_in_quad], uaddr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2937 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2938 if (copy_from_user(&old.raw[off_in_quad], old_addr, mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2939 return -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2941 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2943 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2948 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2949 new.quad, mop->key, &success); in kvm_s390_vm_mem_op_cmpxchg()
2950 if (!success && copy_to_user(old_addr, &old.raw[off_in_quad], mop->size)) in kvm_s390_vm_mem_op_cmpxchg()
2951 r = -EFAULT; in kvm_s390_vm_mem_op_cmpxchg()
2954 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
2958 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op() argument
2961 * This is technically a heuristic only, if the kvm->lock is not in kvm_s390_vm_mem_op()
2962 * taken, it is not guaranteed that the vm is/remains non-protected. in kvm_s390_vm_mem_op()
2964 * on the access, -EFAULT is returned and the vm may crash the in kvm_s390_vm_mem_op()
2969 if (kvm_s390_pv_get_handle(kvm)) in kvm_s390_vm_mem_op()
2970 return -EINVAL; in kvm_s390_vm_mem_op()
2972 switch (mop->op) { in kvm_s390_vm_mem_op()
2975 return kvm_s390_vm_mem_op_abs(kvm, mop); in kvm_s390_vm_mem_op()
2977 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); in kvm_s390_vm_mem_op()
2979 return -EINVAL; in kvm_s390_vm_mem_op()
2985 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
2994 r = -EFAULT; in kvm_arch_vm_ioctl()
2997 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
3003 r = -EINVAL; in kvm_arch_vm_ioctl()
3004 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
3007 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
3012 r = -EFAULT; in kvm_arch_vm_ioctl()
3015 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3019 r = -EFAULT; in kvm_arch_vm_ioctl()
3022 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3026 r = -EFAULT; in kvm_arch_vm_ioctl()
3029 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3035 r = -EFAULT; in kvm_arch_vm_ioctl()
3039 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3045 r = -EFAULT; in kvm_arch_vm_ioctl()
3049 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3055 r = -EFAULT; in kvm_arch_vm_ioctl()
3058 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3059 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3060 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3064 r = -EFAULT; in kvm_arch_vm_ioctl()
3071 r = -EFAULT; in kvm_arch_vm_ioctl()
3074 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3075 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3076 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3083 kvm_s390_set_user_cpu_state_ctrl(kvm); in kvm_arch_vm_ioctl()
3086 r = -EINVAL; in kvm_arch_vm_ioctl()
3090 r = -EFAULT; in kvm_arch_vm_ioctl()
3094 r = -EINVAL; in kvm_arch_vm_ioctl()
3097 /* must be called without kvm->lock */ in kvm_arch_vm_ioctl()
3098 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
3100 r = -EFAULT; in kvm_arch_vm_ioctl()
3109 r = kvm_s390_vm_mem_op(kvm, &mem_op); in kvm_arch_vm_ioctl()
3111 r = -EFAULT; in kvm_arch_vm_ioctl()
3117 r = -EINVAL; in kvm_arch_vm_ioctl()
3121 r = -EFAULT; in kvm_arch_vm_ioctl()
3124 r = kvm_s390_pci_zpci_op(kvm, &args); in kvm_arch_vm_ioctl()
3128 r = -ENOTTY; in kvm_arch_vm_ioctl()
3154 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
3156 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
3158 /* Clear the CRYCB format bits - i.e., set format 0 by default */ in kvm_s390_set_crycb_format()
3159 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3162 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
3166 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3168 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3174 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3181 * which the KVM guest is granted access.
3183 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3186 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
3189 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3191 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
3193 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3195 memcpy(crycb->apcb1.apm, apm, 32); in kvm_arch_crypto_set_masks()
3196 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3198 memcpy(crycb->apcb1.aqm, aqm, 32); in kvm_arch_crypto_set_masks()
3199 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3201 memcpy(crycb->apcb1.adm, adm, 32); in kvm_arch_crypto_set_masks()
3202 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3207 memcpy(crycb->apcb0.apm, apm, 8); in kvm_arch_crypto_set_masks()
3208 memcpy(crycb->apcb0.aqm, aqm, 2); in kvm_arch_crypto_set_masks()
3209 memcpy(crycb->apcb0.adm, adm, 2); in kvm_arch_crypto_set_masks()
3210 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
3219 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
3220 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
3227 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
3231 * which the KVM guest is granted access.
3233 * Note: The kvm->lock mutex must be locked by the caller before invoking this
3236 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
3238 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
3240 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3241 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3242 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3243 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3245 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
3247 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
3248 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
3261 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
3263 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3264 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
3265 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3267 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
3271 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3272 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3273 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3274 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3275 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3276 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3279 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
3281 if (kvm->arch.use_esca) in sca_dispose()
3282 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3284 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3285 kvm->arch.sca = NULL; in sca_dispose()
3288 void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
3291 kvm_s390_pci_clear_list(kvm); in kvm_arch_free_vm()
3293 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
3296 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
3303 rc = -EINVAL; in kvm_arch_init_vm()
3318 rc = -ENOMEM; in kvm_arch_init_vm()
3322 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3324 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3325 if (!kvm->arch.sca) in kvm_arch_init_vm()
3331 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3332 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3335 sprintf(debug_name, "kvm-%u", current->pid); in kvm_arch_init_vm()
3337 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3338 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3342 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3344 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3347 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3348 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3351 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3354 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3357 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3359 /* we are always in czam mode - even on pre z14 machines */ in kvm_arch_init_vm()
3360 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3361 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3362 /* we emulate STHYI in kvm */ in kvm_arch_init_vm()
3363 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3364 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3366 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3367 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3371 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3373 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3374 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3376 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3378 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
3381 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3382 kvm_s390_pci_init_list(kvm); in kvm_arch_init_vm()
3383 kvm_s390_vcpu_pci_enable_interp(kvm); in kvm_arch_init_vm()
3384 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3387 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3388 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3390 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3391 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3392 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3394 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3395 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
3398 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3399 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3402 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3404 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3406 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3407 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3409 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3410 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3413 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3414 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3415 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3416 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
3418 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
3419 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3420 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3421 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3425 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3426 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3427 sca_dispose(kvm); in kvm_arch_init_vm()
3437 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); in kvm_arch_vcpu_destroy()
3440 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3442 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3444 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3445 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_destroy()
3447 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3452 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy()
3455 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
3459 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm()
3460 sca_dispose(kvm); in kvm_arch_destroy_vm()
3461 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
3463 * We are already at the end of life and kvm->lock is not taken. in kvm_arch_destroy_vm()
3467 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
3469 * Remove the mmu notifier only when the whole KVM VM is torn down, in kvm_arch_destroy_vm()
3474 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3475 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3477 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3478 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3479 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
3480 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3481 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
3482 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
3483 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
3484 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
3490 vcpu->arch.gmap = gmap_create(current->mm, -1UL); in __kvm_ucontrol_vcpu_init()
3491 if (!vcpu->arch.gmap) in __kvm_ucontrol_vcpu_init()
3492 return -ENOMEM; in __kvm_ucontrol_vcpu_init()
3493 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3502 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3503 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3504 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3506 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_del_vcpu()
3507 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3509 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3511 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_del_vcpu()
3512 sca->cpu[vcpu->vcpu_id].sda = 0; in sca_del_vcpu()
3514 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3520 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3523 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3524 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3527 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3528 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3529 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3532 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3533 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3534 vcpu->arch.sie_block->scaol = sca_phys & ESCA_SCAOL_MASK; in sca_add_vcpu()
3535 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_add_vcpu()
3536 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn); in sca_add_vcpu()
3538 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3541 sca->cpu[vcpu->vcpu_id].sda = virt_to_phys(vcpu->arch.sie_block); in sca_add_vcpu()
3542 vcpu->arch.sie_block->scaoh = sca_phys >> 32; in sca_add_vcpu()
3543 vcpu->arch.sie_block->scaol = sca_phys; in sca_add_vcpu()
3544 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn); in sca_add_vcpu()
3546 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3552 d->sda = s->sda; in sca_copy_entry()
3553 d->sigp_ctrl.c = s->sigp_ctrl.c; in sca_copy_entry()
3554 d->sigp_ctrl.scn = s->sigp_ctrl.scn; in sca_copy_entry()
3561 d->ipte_control = s->ipte_control; in sca_copy_b_to_e()
3562 d->mcn[0] = s->mcn; in sca_copy_b_to_e()
3564 sca_copy_entry(&d->cpu[i], &s->cpu[i]); in sca_copy_b_to_e()
3567 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
3569 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3576 if (kvm->arch.use_esca) in sca_switch_to_extended()
3581 return -ENOMEM; in sca_switch_to_extended()
3587 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
3588 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3592 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
3593 vcpu->arch.sie_block->scaoh = scaoh; in sca_switch_to_extended()
3594 vcpu->arch.sie_block->scaol = scaol; in sca_switch_to_extended()
3595 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA; in sca_switch_to_extended()
3597 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3598 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3600 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3601 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
3605 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3606 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3610 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
3624 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3632 WARN_ON_ONCE(vcpu->arch.cputm_start != 0); in __start_cpu_timer_accounting()
3633 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3634 vcpu->arch.cputm_start = get_tod_clock_fast(); in __start_cpu_timer_accounting()
3635 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __start_cpu_timer_accounting()
3641 WARN_ON_ONCE(vcpu->arch.cputm_start == 0); in __stop_cpu_timer_accounting()
3642 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3643 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start; in __stop_cpu_timer_accounting()
3644 vcpu->arch.cputm_start = 0; in __stop_cpu_timer_accounting()
3645 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in __stop_cpu_timer_accounting()
3651 WARN_ON_ONCE(vcpu->arch.cputm_enabled); in __enable_cpu_timer_accounting()
3652 vcpu->arch.cputm_enabled = true; in __enable_cpu_timer_accounting()
3659 WARN_ON_ONCE(!vcpu->arch.cputm_enabled); in __disable_cpu_timer_accounting()
3661 vcpu->arch.cputm_enabled = false; in __disable_cpu_timer_accounting()
3678 /* set the cpu timer - may only be called from the VCPU thread itself */
3682 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3683 if (vcpu->arch.cputm_enabled) in kvm_s390_set_cpu_timer()
3684 vcpu->arch.cputm_start = get_tod_clock_fast(); in kvm_s390_set_cpu_timer()
3685 vcpu->arch.sie_block->cputm = cputm; in kvm_s390_set_cpu_timer()
3686 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount); in kvm_s390_set_cpu_timer()
3690 /* update and get the cpu timer - can also be called from other VCPU threads */
3696 if (unlikely(!vcpu->arch.cputm_enabled)) in kvm_s390_get_cpu_timer()
3697 return vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3701 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount); in kvm_s390_get_cpu_timer()
3706 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu); in kvm_s390_get_cpu_timer()
3707 value = vcpu->arch.sie_block->cputm; in kvm_s390_get_cpu_timer()
3709 if (likely(vcpu->arch.cputm_start)) in kvm_s390_get_cpu_timer()
3710 value -= get_tod_clock_fast() - vcpu->arch.cputm_start; in kvm_s390_get_cpu_timer()
3711 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1)); in kvm_s390_get_cpu_timer()
3719 gmap_enable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_load()
3721 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_load()
3723 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3728 vcpu->cpu = -1; in kvm_arch_vcpu_put()
3729 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu)) in kvm_arch_vcpu_put()
3732 vcpu->arch.enabled_gmap = gmap_get_enabled(); in kvm_arch_vcpu_put()
3733 gmap_disable(vcpu->arch.enabled_gmap); in kvm_arch_vcpu_put()
3739 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3741 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3742 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3744 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3745 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3746 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3749 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3750 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_arch_vcpu_postcreate()
3752 vcpu->arch.enabled_gmap = vcpu->arch.gmap; in kvm_arch_vcpu_postcreate()
3755 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3757 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3763 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3766 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3767 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3768 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3769 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3770 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3780 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3783 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3784 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); in kvm_s390_vcpu_crypto_setup()
3785 vcpu->arch.sie_block->eca &= ~ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3786 vcpu->arch.sie_block->ecd &= ~ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3788 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3789 vcpu->arch.sie_block->eca |= ECA_APIE; in kvm_s390_vcpu_crypto_setup()
3792 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3793 vcpu->arch.sie_block->ecb3 |= ECB3_AES; in kvm_s390_vcpu_crypto_setup()
3795 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3796 vcpu->arch.sie_block->ecd |= ECD_ECC; in kvm_s390_vcpu_crypto_setup()
3799 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3800 vcpu->arch.sie_block->ecb3 |= ECB3_DEA; in kvm_s390_vcpu_crypto_setup()
3805 free_page((unsigned long)phys_to_virt(vcpu->arch.sie_block->cbrlo)); in kvm_s390_vcpu_unsetup_cmma()
3806 vcpu->arch.sie_block->cbrlo = 0; in kvm_s390_vcpu_unsetup_cmma()
3814 return -ENOMEM; in kvm_s390_vcpu_setup_cmma()
3816 vcpu->arch.sie_block->cbrlo = virt_to_phys(cbrlo_page); in kvm_s390_vcpu_setup_cmma()
3822 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3824 vcpu->arch.sie_block->ibc = model->ibc; in kvm_s390_vcpu_setup_model()
3825 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3826 vcpu->arch.sie_block->fac = virt_to_phys(model->fac_list); in kvm_s390_vcpu_setup_model()
3834 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | in kvm_s390_vcpu_setup()
3838 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3840 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3847 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT; in kvm_s390_vcpu_setup()
3848 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3849 vcpu->arch.sie_block->ecb |= ECB_SRSI; in kvm_s390_vcpu_setup()
3850 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3851 vcpu->arch.sie_block->ecb |= ECB_PTF; in kvm_s390_vcpu_setup()
3852 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3853 vcpu->arch.sie_block->ecb |= ECB_TE; in kvm_s390_vcpu_setup()
3854 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3855 vcpu->arch.sie_block->ecb |= ECB_SPECI; in kvm_s390_vcpu_setup()
3857 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3858 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI; in kvm_s390_vcpu_setup()
3859 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3860 vcpu->arch.sie_block->ecb2 |= ECB2_IEP; in kvm_s390_vcpu_setup()
3861 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI; in kvm_s390_vcpu_setup()
3863 vcpu->arch.sie_block->eca |= ECA_CEI; in kvm_s390_vcpu_setup()
3865 vcpu->arch.sie_block->eca |= ECA_IB; in kvm_s390_vcpu_setup()
3867 vcpu->arch.sie_block->eca |= ECA_SII; in kvm_s390_vcpu_setup()
3869 vcpu->arch.sie_block->eca |= ECA_SIGPI; in kvm_s390_vcpu_setup()
3870 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3871 vcpu->arch.sie_block->eca |= ECA_VX; in kvm_s390_vcpu_setup()
3872 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in kvm_s390_vcpu_setup()
3874 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3875 vcpu->arch.sie_block->ecd |= ECD_MEF; in kvm_s390_vcpu_setup()
3876 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3877 vcpu->arch.sie_block->ecd |= ECD_ETOKENF; in kvm_s390_vcpu_setup()
3878 if (vcpu->arch.sie_block->gd) { in kvm_s390_vcpu_setup()
3879 vcpu->arch.sie_block->eca |= ECA_AIV; in kvm_s390_vcpu_setup()
3880 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u", in kvm_s390_vcpu_setup()
3881 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id); in kvm_s390_vcpu_setup()
3883 vcpu->arch.sie_block->sdnxo = virt_to_phys(&vcpu->run->s.regs.sdnx) | SDNXC; in kvm_s390_vcpu_setup()
3884 vcpu->arch.sie_block->riccbd = virt_to_phys(&vcpu->run->s.regs.riccb); in kvm_s390_vcpu_setup()
3889 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; in kvm_s390_vcpu_setup()
3891 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3896 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in kvm_s390_vcpu_setup()
3897 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; in kvm_s390_vcpu_setup()
3899 vcpu->arch.sie_block->hpid = HPID_KVM; in kvm_s390_vcpu_setup()
3905 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3906 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3911 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3916 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3918 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3919 return -EINVAL; in kvm_arch_vcpu_precreate()
3931 return -ENOMEM; in kvm_arch_vcpu_create()
3933 vcpu->arch.sie_block = &sie_page->sie_block; in kvm_arch_vcpu_create()
3934 vcpu->arch.sie_block->itdba = virt_to_phys(&sie_page->itdb); in kvm_arch_vcpu_create()
3937 vcpu->arch.sie_block->mso = 0; in kvm_arch_vcpu_create()
3938 vcpu->arch.sie_block->msl = sclp.hamax; in kvm_arch_vcpu_create()
3940 vcpu->arch.sie_block->icpua = vcpu->vcpu_id; in kvm_arch_vcpu_create()
3941 spin_lock_init(&vcpu->arch.local_int.lock); in kvm_arch_vcpu_create()
3942 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3943 seqcount_init(&vcpu->arch.cputm_seqcount); in kvm_arch_vcpu_create()
3945 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_create()
3947 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | in kvm_arch_vcpu_create()
3955 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3956 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; in kvm_arch_vcpu_create()
3957 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3958 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; in kvm_arch_vcpu_create()
3959 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3960 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; in kvm_arch_vcpu_create()
3961 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3962 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; in kvm_arch_vcpu_create()
3967 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; in kvm_arch_vcpu_create()
3969 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; in kvm_arch_vcpu_create()
3971 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3977 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3978 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3979 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); in kvm_arch_vcpu_create()
3985 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
3989 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3990 gmap_remove(vcpu->arch.gmap); in kvm_arch_vcpu_create()
3992 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_create()
3998 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4004 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE); in kvm_arch_vcpu_in_kernel()
4009 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_block()
4015 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_unblock()
4020 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request()
4026 return atomic_read(&vcpu->arch.sie_block->prog20) & in kvm_s390_vcpu_sie_inhibited()
4032 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); in kvm_s390_vcpu_request_handled()
4043 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) in exit_sie()
4057 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
4067 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
4070 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) { in kvm_gmap_notifier()
4071 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx", in kvm_gmap_notifier()
4083 vcpu->stat.halt_no_poll_steal++; in kvm_arch_no_poll()
4091 /* kvm common code refers to this, but never calls it */ in kvm_arch_vcpu_should_kick()
4099 int r = -EINVAL; in kvm_arch_vcpu_ioctl_get_one_reg()
4101 switch (reg->id) { in kvm_arch_vcpu_ioctl_get_one_reg()
4103 r = put_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_get_one_reg()
4104 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4107 r = put_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_get_one_reg()
4108 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4112 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4115 r = put_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_get_one_reg()
4116 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4119 r = put_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_get_one_reg()
4120 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4123 r = put_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_get_one_reg()
4124 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4127 r = put_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_get_one_reg()
4128 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4131 r = put_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_get_one_reg()
4132 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4135 r = put_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_get_one_reg()
4136 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_get_one_reg()
4148 int r = -EINVAL; in kvm_arch_vcpu_ioctl_set_one_reg()
4151 switch (reg->id) { in kvm_arch_vcpu_ioctl_set_one_reg()
4153 r = get_user(vcpu->arch.sie_block->todpr, in kvm_arch_vcpu_ioctl_set_one_reg()
4154 (u32 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4157 r = get_user(vcpu->arch.sie_block->epoch, in kvm_arch_vcpu_ioctl_set_one_reg()
4158 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4161 r = get_user(val, (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4166 r = get_user(vcpu->arch.sie_block->ckc, in kvm_arch_vcpu_ioctl_set_one_reg()
4167 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4170 r = get_user(vcpu->arch.pfault_token, in kvm_arch_vcpu_ioctl_set_one_reg()
4171 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4172 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_vcpu_ioctl_set_one_reg()
4176 r = get_user(vcpu->arch.pfault_compare, in kvm_arch_vcpu_ioctl_set_one_reg()
4177 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4180 r = get_user(vcpu->arch.pfault_select, in kvm_arch_vcpu_ioctl_set_one_reg()
4181 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4184 r = get_user(vcpu->arch.sie_block->pp, in kvm_arch_vcpu_ioctl_set_one_reg()
4185 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4188 r = get_user(vcpu->arch.sie_block->gbea, in kvm_arch_vcpu_ioctl_set_one_reg()
4189 (u64 __user *)reg->addr); in kvm_arch_vcpu_ioctl_set_one_reg()
4200 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; in kvm_arch_vcpu_ioctl_normal_reset()
4201 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; in kvm_arch_vcpu_ioctl_normal_reset()
4202 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); in kvm_arch_vcpu_ioctl_normal_reset()
4205 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4219 vcpu->arch.sie_block->gpsw.mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4220 vcpu->arch.sie_block->gpsw.addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4223 vcpu->arch.sie_block->ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4224 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); in kvm_arch_vcpu_ioctl_initial_reset()
4225 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4226 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4229 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); in kvm_arch_vcpu_ioctl_initial_reset()
4230 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4231 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4232 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; in kvm_arch_vcpu_ioctl_initial_reset()
4233 vcpu->run->psw_addr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4234 vcpu->run->psw_mask = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4235 vcpu->run->s.regs.todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4236 vcpu->run->s.regs.cputm = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4237 vcpu->run->s.regs.ckc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4238 vcpu->run->s.regs.pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4239 vcpu->run->s.regs.gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4240 vcpu->run->s.regs.fpc = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4247 vcpu->arch.sie_block->gbea = 1; in kvm_arch_vcpu_ioctl_initial_reset()
4248 vcpu->arch.sie_block->pp = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4249 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in kvm_arch_vcpu_ioctl_initial_reset()
4250 vcpu->arch.sie_block->todpr = 0; in kvm_arch_vcpu_ioctl_initial_reset()
4256 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_arch_vcpu_ioctl_clear_reset()
4261 memset(®s->gprs, 0, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_clear_reset()
4262 memset(®s->vrs, 0, sizeof(regs->vrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4263 memset(®s->acrs, 0, sizeof(regs->acrs)); in kvm_arch_vcpu_ioctl_clear_reset()
4264 memset(®s->gscb, 0, sizeof(regs->gscb)); in kvm_arch_vcpu_ioctl_clear_reset()
4266 regs->etoken = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4267 regs->etoken_extension = 0; in kvm_arch_vcpu_ioctl_clear_reset()
4273 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_set_regs()
4281 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs)); in kvm_arch_vcpu_ioctl_get_regs()
4291 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_set_sregs()
4292 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_set_sregs()
4303 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs)); in kvm_arch_vcpu_ioctl_get_sregs()
4304 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs)); in kvm_arch_vcpu_ioctl_get_sregs()
4316 vcpu->run->s.regs.fpc = fpu->fpc; in kvm_arch_vcpu_ioctl_set_fpu()
4318 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs, in kvm_arch_vcpu_ioctl_set_fpu()
4319 (freg_t *) fpu->fprs); in kvm_arch_vcpu_ioctl_set_fpu()
4321 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_set_fpu()
4334 convert_vx_to_fp((freg_t *) fpu->fprs, in kvm_arch_vcpu_ioctl_get_fpu()
4335 (__vector128 *) vcpu->run->s.regs.vrs); in kvm_arch_vcpu_ioctl_get_fpu()
4337 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs)); in kvm_arch_vcpu_ioctl_get_fpu()
4338 fpu->fpc = vcpu->run->s.regs.fpc; in kvm_arch_vcpu_ioctl_get_fpu()
4349 rc = -EBUSY; in kvm_arch_vcpu_ioctl_set_initial_psw()
4351 vcpu->run->psw_mask = psw.mask; in kvm_arch_vcpu_ioctl_set_initial_psw()
4352 vcpu->run->psw_addr = psw.addr; in kvm_arch_vcpu_ioctl_set_initial_psw()
4360 return -EINVAL; /* not implemented yet */ in kvm_arch_vcpu_ioctl_translate()
4374 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4377 if (dbg->control & ~VALID_GUESTDBG_FLAGS) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4378 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4382 rc = -EINVAL; in kvm_arch_vcpu_ioctl_set_guest_debug()
4386 if (dbg->control & KVM_GUESTDBG_ENABLE) { in kvm_arch_vcpu_ioctl_set_guest_debug()
4387 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
4391 if (dbg->control & KVM_GUESTDBG_USE_HW_BP) in kvm_arch_vcpu_ioctl_set_guest_debug()
4395 vcpu->arch.guestdbg.last_bp = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4399 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
4431 /* user space knows about this interface - let it control the state */ in kvm_arch_vcpu_ioctl_set_mpstate()
4432 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4434 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
4443 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4451 rc = -ENXIO; in kvm_arch_vcpu_ioctl_set_mpstate()
4470 * If the guest prefix changed, re-arm the ipte notifier for the in kvm_s390_handle_requests()
4478 rc = gmap_mprotect_notify(vcpu->arch.gmap, in kvm_s390_handle_requests()
4489 vcpu->arch.sie_block->ihcpu = 0xffff; in kvm_s390_handle_requests()
4495 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1); in kvm_s390_handle_requests()
4503 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0); in kvm_s390_handle_requests()
4510 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC; in kvm_s390_handle_requests()
4520 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA; in kvm_s390_handle_requests()
4526 * Re-enable CMM virtualization if CMMA is available and in kvm_s390_handle_requests()
4529 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4530 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4531 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA; in kvm_s390_handle_requests()
4541 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in __kvm_s390_set_tod_clock() argument
4551 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4552 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4553 if (test_kvm_facility(kvm, 139)) { in __kvm_s390_set_tod_clock()
4554 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4555 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4556 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4559 kvm_s390_vcpu_block_all(kvm); in __kvm_s390_set_tod_clock()
4560 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvm_s390_set_tod_clock()
4561 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4562 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4565 kvm_s390_vcpu_unblock_all(kvm); in __kvm_s390_set_tod_clock()
4569 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in kvm_s390_try_set_tod_clock() argument
4571 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4573 __kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_try_set_tod_clock()
4574 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4579 * kvm_arch_fault_in_page - fault-in guest page if necessary
4584 * Make sure that a guest page has been faulted-in on the host.
4590 return gmap_fault(vcpu->arch.gmap, gpa, in kvm_arch_fault_in_page()
4607 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4614 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4615 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); in kvm_arch_async_page_not_present()
4623 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token); in kvm_arch_async_page_present()
4624 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token); in kvm_arch_async_page_present()
4647 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in kvm_arch_setup_async_pf()
4649 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != in kvm_arch_setup_async_pf()
4650 vcpu->arch.pfault_compare) in kvm_arch_setup_async_pf()
4656 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) in kvm_arch_setup_async_pf()
4658 if (!vcpu->arch.gmap->pfault_enabled) in kvm_arch_setup_async_pf()
4661 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4662 hva += current->thread.gmap_addr & ~PAGE_MASK; in kvm_arch_setup_async_pf()
4663 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) in kvm_arch_setup_async_pf()
4666 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); in kvm_arch_setup_async_pf()
4680 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14]; in vcpu_pre_run()
4681 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15]; in vcpu_pre_run()
4686 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4701 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4703 vcpu->arch.sie_block->icptcode = 0; in vcpu_pre_run()
4704 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); in vcpu_pre_run()
4730 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1); in vcpu_post_run_fault_in_sie()
4735 /* Instruction-Fetching Exceptions - we can't detect the ilen. in vcpu_post_run_fault_in_sie()
4739 pgm_info = vcpu->arch.pgm; in vcpu_post_run_fault_in_sie()
4753 vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4754 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); in vcpu_post_run()
4759 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14; in vcpu_post_run()
4760 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15; in vcpu_post_run()
4762 if (exit_reason == -EINTR) { in vcpu_post_run()
4764 sie_page = container_of(vcpu->arch.sie_block, in vcpu_post_run()
4766 mcck_info = &sie_page->mcck_info; in vcpu_post_run()
4771 if (vcpu->arch.sie_block->icptcode > 0) { in vcpu_post_run()
4774 if (rc != -EOPNOTSUPP) in vcpu_post_run()
4776 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC; in vcpu_post_run()
4777 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode; in vcpu_post_run()
4778 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa; in vcpu_post_run()
4779 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb; in vcpu_post_run()
4780 return -EREMOTE; in vcpu_post_run()
4781 } else if (exit_reason != -EFAULT) { in vcpu_post_run()
4782 vcpu->stat.exit_null++; in vcpu_post_run()
4784 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4785 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL; in vcpu_post_run()
4786 vcpu->run->s390_ucontrol.trans_exc_code = in vcpu_post_run()
4787 current->thread.gmap_addr; in vcpu_post_run()
4788 vcpu->run->s390_ucontrol.pgm_code = 0x10; in vcpu_post_run()
4789 return -EREMOTE; in vcpu_post_run()
4790 } else if (current->thread.gmap_pfault) { in vcpu_post_run()
4792 current->thread.gmap_pfault = 0; in vcpu_post_run()
4795 vcpu->stat.pfault_sync++; in vcpu_post_run()
4796 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1); in vcpu_post_run()
4805 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; in __vcpu_run()
4808 * We try to hold kvm->srcu during most of vcpu_run (except when run- in __vcpu_run()
4828 memcpy(sie_page->pv_grregs, in __vcpu_run()
4829 vcpu->run->s.regs.gprs, in __vcpu_run()
4830 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4834 exit_reason = sie64a(vcpu->arch.sie_block, in __vcpu_run()
4835 vcpu->run->s.regs.gprs); in __vcpu_run()
4837 memcpy(vcpu->run->s.regs.gprs, in __vcpu_run()
4838 sie_page->pv_grregs, in __vcpu_run()
4839 sizeof(sie_page->pv_grregs)); in __vcpu_run()
4842 * that leave the guest state in an "in-between" state in __vcpu_run()
4846 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || in __vcpu_run()
4847 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { in __vcpu_run()
4848 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in __vcpu_run()
4866 struct kvm_run *kvm_run = vcpu->run; in sync_regs_fmt2()
4870 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb; in sync_regs_fmt2()
4871 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; in sync_regs_fmt2()
4872 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; in sync_regs_fmt2()
4873 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; in sync_regs_fmt2()
4874 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs_fmt2()
4875 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; in sync_regs_fmt2()
4876 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; in sync_regs_fmt2()
4877 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; in sync_regs_fmt2()
4879 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) { in sync_regs_fmt2()
4880 vcpu->arch.pfault_token = kvm_run->s.regs.pft; in sync_regs_fmt2()
4881 vcpu->arch.pfault_select = kvm_run->s.regs.pfs; in sync_regs_fmt2()
4882 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; in sync_regs_fmt2()
4883 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) in sync_regs_fmt2()
4886 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { in sync_regs_fmt2()
4887 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; in sync_regs_fmt2()
4888 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; in sync_regs_fmt2()
4889 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc); in sync_regs_fmt2()
4895 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) && in sync_regs_fmt2()
4896 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4897 riccb->v && in sync_regs_fmt2()
4898 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) { in sync_regs_fmt2()
4900 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in sync_regs_fmt2()
4903 * If userspace sets the gscb (e.g. after migration) to non-zero, in sync_regs_fmt2()
4906 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) && in sync_regs_fmt2()
4907 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4908 gscb->gssm && in sync_regs_fmt2()
4909 !vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4911 vcpu->arch.sie_block->ecb |= ECB_GS; in sync_regs_fmt2()
4912 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT; in sync_regs_fmt2()
4913 vcpu->arch.gs_enabled = 1; in sync_regs_fmt2()
4915 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) && in sync_regs_fmt2()
4916 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
4917 vcpu->arch.sie_block->fpf &= ~FPF_BPBC; in sync_regs_fmt2()
4918 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; in sync_regs_fmt2()
4923 if (current->thread.gs_cb) { in sync_regs_fmt2()
4924 vcpu->arch.host_gscb = current->thread.gs_cb; in sync_regs_fmt2()
4925 save_gs_cb(vcpu->arch.host_gscb); in sync_regs_fmt2()
4927 if (vcpu->arch.gs_enabled) { in sync_regs_fmt2()
4928 current->thread.gs_cb = (struct gs_cb *) in sync_regs_fmt2()
4929 &vcpu->run->s.regs.gscb; in sync_regs_fmt2()
4930 restore_gs_cb(current->thread.gs_cb); in sync_regs_fmt2()
4939 struct kvm_run *kvm_run = vcpu->run; in sync_regs()
4941 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) in sync_regs()
4942 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); in sync_regs()
4943 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { in sync_regs()
4944 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); in sync_regs()
4948 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { in sync_regs()
4949 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); in sync_regs()
4950 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; in sync_regs()
4952 save_access_regs(vcpu->arch.host_acrs); in sync_regs()
4953 restore_access_regs(vcpu->run->s.regs.acrs); in sync_regs()
4956 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; in sync_regs()
4957 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; in sync_regs()
4959 current->thread.fpu.regs = vcpu->run->s.regs.vrs; in sync_regs()
4961 current->thread.fpu.regs = vcpu->run->s.regs.fprs; in sync_regs()
4962 current->thread.fpu.fpc = vcpu->run->s.regs.fpc; in sync_regs()
4963 if (test_fp_ctl(current->thread.fpu.fpc)) in sync_regs()
4965 current->thread.fpu.fpc = 0; in sync_regs()
4980 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; in sync_regs()
4981 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & in sync_regs()
4985 kvm_run->kvm_dirty_regs = 0; in sync_regs()
4990 struct kvm_run *kvm_run = vcpu->run; in store_regs_fmt2()
4992 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; in store_regs_fmt2()
4993 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; in store_regs_fmt2()
4994 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; in store_regs_fmt2()
4995 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; in store_regs_fmt2()
4996 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; in store_regs_fmt2()
5000 if (vcpu->arch.gs_enabled) in store_regs_fmt2()
5001 save_gs_cb(current->thread.gs_cb); in store_regs_fmt2()
5002 current->thread.gs_cb = vcpu->arch.host_gscb; in store_regs_fmt2()
5003 restore_gs_cb(vcpu->arch.host_gscb); in store_regs_fmt2()
5004 if (!vcpu->arch.host_gscb) in store_regs_fmt2()
5006 vcpu->arch.host_gscb = NULL; in store_regs_fmt2()
5014 struct kvm_run *kvm_run = vcpu->run; in store_regs()
5016 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; in store_regs()
5017 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; in store_regs()
5018 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); in store_regs()
5019 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); in store_regs()
5020 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); in store_regs()
5021 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; in store_regs()
5022 kvm_run->s.regs.pft = vcpu->arch.pfault_token; in store_regs()
5023 kvm_run->s.regs.pfs = vcpu->arch.pfault_select; in store_regs()
5024 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; in store_regs()
5025 save_access_regs(vcpu->run->s.regs.acrs); in store_regs()
5026 restore_access_regs(vcpu->arch.host_acrs); in store_regs()
5029 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in store_regs()
5031 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; in store_regs()
5032 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; in store_regs()
5039 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
5048 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5049 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5051 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
5052 return -EINTR; in kvm_arch_vcpu_ioctl_run()
5054 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || in kvm_arch_vcpu_ioctl_run()
5055 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) in kvm_arch_vcpu_ioctl_run()
5056 return -EINVAL; in kvm_arch_vcpu_ioctl_run()
5072 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5076 vcpu->vcpu_id); in kvm_arch_vcpu_ioctl_run()
5077 rc = -EINVAL; in kvm_arch_vcpu_ioctl_run()
5088 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
5089 rc = -EINTR; in kvm_arch_vcpu_ioctl_run()
5097 if (rc == -EREMOTE) { in kvm_arch_vcpu_ioctl_run()
5107 vcpu->stat.exit_userspace++; in kvm_arch_vcpu_ioctl_run()
5116 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
5117 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
5130 return -EFAULT; in kvm_s390_store_status_unloaded()
5134 return -EFAULT; in kvm_s390_store_status_unloaded()
5137 gpa -= __LC_FPREGS_SAVE_AREA; in kvm_s390_store_status_unloaded()
5141 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs); in kvm_s390_store_status_unloaded()
5146 vcpu->run->s.regs.fprs, 128); in kvm_s390_store_status_unloaded()
5149 vcpu->run->s.regs.gprs, 128); in kvm_s390_store_status_unloaded()
5151 &vcpu->arch.sie_block->gpsw, 16); in kvm_s390_store_status_unloaded()
5155 &vcpu->run->s.regs.fpc, 4); in kvm_s390_store_status_unloaded()
5157 &vcpu->arch.sie_block->todpr, 4); in kvm_s390_store_status_unloaded()
5161 clkcomp = vcpu->arch.sie_block->ckc >> 8; in kvm_s390_store_status_unloaded()
5165 &vcpu->run->s.regs.acrs, 64); in kvm_s390_store_status_unloaded()
5167 &vcpu->arch.sie_block->gcr, 128); in kvm_s390_store_status_unloaded()
5168 return rc ? -EFAULT : 0; in kvm_s390_store_status_unloaded()
5179 vcpu->run->s.regs.fpc = current->thread.fpu.fpc; in kvm_s390_vcpu_store_status()
5180 save_access_regs(vcpu->run->s.regs.acrs); in kvm_s390_vcpu_store_status()
5191 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
5196 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
5216 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); in kvm_s390_vcpu_start()
5218 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5219 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5225 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5231 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5236 /* we're the only active VCPU -> speed it up */ in kvm_s390_vcpu_start()
5244 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5254 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; in kvm_s390_vcpu_start()
5260 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5272 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); in kvm_s390_vcpu_stop()
5274 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5275 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5281 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5298 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5314 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5323 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
5324 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5326 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
5328 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5329 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5330 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5331 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5336 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
5345 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_sida_op()
5349 if (mop->flags || !mop->size) in kvm_s390_vcpu_sida_op()
5350 return -EINVAL; in kvm_s390_vcpu_sida_op()
5351 if (mop->size + mop->sida_offset < mop->size) in kvm_s390_vcpu_sida_op()
5352 return -EINVAL; in kvm_s390_vcpu_sida_op()
5353 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) in kvm_s390_vcpu_sida_op()
5354 return -E2BIG; in kvm_s390_vcpu_sida_op()
5356 return -EINVAL; in kvm_s390_vcpu_sida_op()
5358 sida_addr = (char *)sida_addr(vcpu->arch.sie_block) + mop->sida_offset; in kvm_s390_vcpu_sida_op()
5360 switch (mop->op) { in kvm_s390_vcpu_sida_op()
5362 if (copy_to_user(uaddr, sida_addr, mop->size)) in kvm_s390_vcpu_sida_op()
5363 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5367 if (copy_from_user(sida_addr, uaddr, mop->size)) in kvm_s390_vcpu_sida_op()
5368 r = -EFAULT; in kvm_s390_vcpu_sida_op()
5377 void __user *uaddr = (void __user *)mop->buf; in kvm_s390_vcpu_mem_op()
5387 if (mop->ar >= NUM_ACRS) in kvm_s390_vcpu_mem_op()
5388 return -EINVAL; in kvm_s390_vcpu_mem_op()
5390 return -EINVAL; in kvm_s390_vcpu_mem_op()
5391 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { in kvm_s390_vcpu_mem_op()
5392 tmpbuf = vmalloc(mop->size); in kvm_s390_vcpu_mem_op()
5394 return -ENOMEM; in kvm_s390_vcpu_mem_op()
5397 acc_mode = mop->op == KVM_S390_MEMOP_LOGICAL_READ ? GACC_FETCH : GACC_STORE; in kvm_s390_vcpu_mem_op()
5398 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { in kvm_s390_vcpu_mem_op()
5399 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, in kvm_s390_vcpu_mem_op()
5400 acc_mode, mop->key); in kvm_s390_vcpu_mem_op()
5404 r = read_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5405 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5408 if (copy_to_user(uaddr, tmpbuf, mop->size)) { in kvm_s390_vcpu_mem_op()
5409 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5413 if (copy_from_user(tmpbuf, uaddr, mop->size)) { in kvm_s390_vcpu_mem_op()
5414 r = -EFAULT; in kvm_s390_vcpu_mem_op()
5417 r = write_guest_with_key(vcpu, mop->gaddr, mop->ar, tmpbuf, in kvm_s390_vcpu_mem_op()
5418 mop->size, mop->key); in kvm_s390_vcpu_mem_op()
5422 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) in kvm_s390_vcpu_mem_op()
5423 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); in kvm_s390_vcpu_mem_op()
5435 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5437 switch (mop->op) { in kvm_s390_vcpu_memsida_op()
5444 /* we are locked against sida going away by the vcpu->mutex */ in kvm_s390_vcpu_memsida_op()
5448 r = -EINVAL; in kvm_s390_vcpu_memsida_op()
5451 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5458 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
5467 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5476 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
5478 return -EINVAL; in kvm_arch_vcpu_async_ioctl()
5483 rc = -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
5488 * To simplify single stepping of userspace-emulated instructions, in kvm_arch_vcpu_async_ioctl()
5495 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; in kvm_arch_vcpu_async_ioctl()
5508 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5509 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5511 if (copy_from_user(&dmp, (__u8 __user *)cmd->data, sizeof(dmp))) in kvm_s390_handle_pv_vcpu_dump()
5512 return -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5516 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5520 return -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5524 return -ENOMEM; in kvm_s390_handle_pv_vcpu_dump()
5526 ret = kvm_s390_pv_dump_cpu(vcpu, data, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5529 vcpu->vcpu_id, cmd->rc, cmd->rrc); in kvm_s390_handle_pv_vcpu_dump()
5532 ret = -EINVAL; in kvm_s390_handle_pv_vcpu_dump()
5536 ret = -EFAULT; in kvm_s390_handle_pv_vcpu_dump()
5545 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
5555 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5557 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5562 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5602 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5605 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5619 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5623 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5624 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5628 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr, in kvm_arch_vcpu_ioctl()
5636 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5640 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5641 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5645 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr, in kvm_arch_vcpu_ioctl()
5651 r = gmap_fault(vcpu->arch.gmap, arg, 0); in kvm_arch_vcpu_ioctl()
5657 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5669 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5675 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5681 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5693 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5697 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5709 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5713 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5717 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5730 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5734 r = -ENOTTY; in kvm_arch_vcpu_ioctl()
5744 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET) in kvm_arch_vcpu_fault()
5745 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5746 vmf->page = virt_to_page(vcpu->arch.sie_block); in kvm_arch_vcpu_fault()
5747 get_page(vmf->page); in kvm_arch_vcpu_fault()
5754 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument
5760 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
5768 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
5769 return -EINVAL; in kvm_arch_prepare_memory_region()
5779 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region()
5780 return -EINVAL; in kvm_arch_prepare_memory_region()
5782 size = new->npages * PAGE_SIZE; in kvm_arch_prepare_memory_region()
5784 return -EINVAL; in kvm_arch_prepare_memory_region()
5786 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5787 return -EINVAL; in kvm_arch_prepare_memory_region()
5790 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5795 * - userspace creates a new memslot with dirty logging off, in kvm_arch_prepare_memory_region()
5796 * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and in kvm_arch_prepare_memory_region()
5802 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_arch_prepare_memory_region()
5803 WARN(kvm_s390_vm_stop_migration(kvm), in kvm_arch_prepare_memory_region()
5809 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
5818 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5819 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5822 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5823 old->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5828 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
5829 new->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5830 new->npages * PAGE_SIZE); in kvm_arch_commit_memory_region()
5835 WARN(1, "Unknown KVM MR CHANGE: %d\n", change); in kvm_arch_commit_memory_region()
5855 return -ENODEV; in kvm_s390_init()
5859 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n"); in kvm_s390_init()
5860 return -EINVAL; in kvm_s390_init()
5890 * Enable autoloading of the kvm module.
5891 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5896 MODULE_ALIAS("devname:kvm");