Lines Matching refs:kvm
270 static int sca_switch_to_extended(struct kvm *kvm);
303 struct kvm *kvm; in kvm_clock_sync() local
308 list_for_each_entry(kvm, &vm_list, vm_list) { in kvm_clock_sync()
309 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_clock_sync()
312 kvm->arch.epoch = vcpu->arch.sie_block->epoch; in kvm_clock_sync()
313 kvm->arch.epdx = vcpu->arch.sie_block->epdx; in kvm_clock_sync()
550 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
592 if (hpage && !kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_check_extension()
671 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
676 struct gmap *gmap = kvm->arch.gmap; in kvm_arch_sync_dirty_log()
692 mark_page_dirty(kvm, cur_gfn + i); in kvm_arch_sync_dirty_log()
707 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
715 if (kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_get_dirty_log()
718 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
724 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); in kvm_vm_ioctl_get_dirty_log()
735 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
739 static void icpt_operexc_on_all_vcpus(struct kvm *kvm) in icpt_operexc_on_all_vcpus() argument
744 kvm_for_each_vcpu(i, vcpu, kvm) { in icpt_operexc_on_all_vcpus()
749 int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) in kvm_vm_ioctl_enable_cap() argument
758 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP"); in kvm_vm_ioctl_enable_cap()
759 kvm->arch.use_irqchip = 1; in kvm_vm_ioctl_enable_cap()
763 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP"); in kvm_vm_ioctl_enable_cap()
764 kvm->arch.user_sigp = 1; in kvm_vm_ioctl_enable_cap()
768 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
769 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
772 set_kvm_facility(kvm->arch.model.fac_mask, 129); in kvm_vm_ioctl_enable_cap()
773 set_kvm_facility(kvm->arch.model.fac_list, 129); in kvm_vm_ioctl_enable_cap()
775 set_kvm_facility(kvm->arch.model.fac_mask, 134); in kvm_vm_ioctl_enable_cap()
776 set_kvm_facility(kvm->arch.model.fac_list, 134); in kvm_vm_ioctl_enable_cap()
779 set_kvm_facility(kvm->arch.model.fac_mask, 135); in kvm_vm_ioctl_enable_cap()
780 set_kvm_facility(kvm->arch.model.fac_list, 135); in kvm_vm_ioctl_enable_cap()
783 set_kvm_facility(kvm->arch.model.fac_mask, 148); in kvm_vm_ioctl_enable_cap()
784 set_kvm_facility(kvm->arch.model.fac_list, 148); in kvm_vm_ioctl_enable_cap()
787 set_kvm_facility(kvm->arch.model.fac_mask, 152); in kvm_vm_ioctl_enable_cap()
788 set_kvm_facility(kvm->arch.model.fac_list, 152); in kvm_vm_ioctl_enable_cap()
791 set_kvm_facility(kvm->arch.model.fac_mask, 192); in kvm_vm_ioctl_enable_cap()
792 set_kvm_facility(kvm->arch.model.fac_list, 192); in kvm_vm_ioctl_enable_cap()
797 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
798 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", in kvm_vm_ioctl_enable_cap()
803 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
804 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
807 set_kvm_facility(kvm->arch.model.fac_mask, 64); in kvm_vm_ioctl_enable_cap()
808 set_kvm_facility(kvm->arch.model.fac_list, 64); in kvm_vm_ioctl_enable_cap()
811 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
812 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s", in kvm_vm_ioctl_enable_cap()
816 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
817 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
820 set_kvm_facility(kvm->arch.model.fac_mask, 72); in kvm_vm_ioctl_enable_cap()
821 set_kvm_facility(kvm->arch.model.fac_list, 72); in kvm_vm_ioctl_enable_cap()
824 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
825 VM_EVENT(kvm, 3, "ENABLE: AIS %s", in kvm_vm_ioctl_enable_cap()
830 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
831 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
834 set_kvm_facility(kvm->arch.model.fac_mask, 133); in kvm_vm_ioctl_enable_cap()
835 set_kvm_facility(kvm->arch.model.fac_list, 133); in kvm_vm_ioctl_enable_cap()
838 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
839 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s", in kvm_vm_ioctl_enable_cap()
843 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
844 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
846 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm)) in kvm_vm_ioctl_enable_cap()
850 mmap_write_lock(kvm->mm); in kvm_vm_ioctl_enable_cap()
851 kvm->mm->context.allow_gmap_hpage_1m = 1; in kvm_vm_ioctl_enable_cap()
852 mmap_write_unlock(kvm->mm); in kvm_vm_ioctl_enable_cap()
858 kvm->arch.use_skf = 0; in kvm_vm_ioctl_enable_cap()
859 kvm->arch.use_pfmfi = 0; in kvm_vm_ioctl_enable_cap()
861 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
862 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s", in kvm_vm_ioctl_enable_cap()
866 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI"); in kvm_vm_ioctl_enable_cap()
867 kvm->arch.user_stsi = 1; in kvm_vm_ioctl_enable_cap()
871 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0"); in kvm_vm_ioctl_enable_cap()
872 kvm->arch.user_instr0 = 1; in kvm_vm_ioctl_enable_cap()
873 icpt_operexc_on_all_vcpus(kvm); in kvm_vm_ioctl_enable_cap()
878 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
879 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
882 set_kvm_facility(kvm->arch.model.fac_mask, 11); in kvm_vm_ioctl_enable_cap()
883 set_kvm_facility(kvm->arch.model.fac_list, 11); in kvm_vm_ioctl_enable_cap()
886 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
887 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_CPU_TOPOLOGY %s", in kvm_vm_ioctl_enable_cap()
897 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_mem_control() argument
904 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes", in kvm_s390_get_mem_control()
905 kvm->arch.mem_limit); in kvm_s390_get_mem_control()
906 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr)) in kvm_s390_get_mem_control()
916 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_mem_control() argument
926 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support"); in kvm_s390_set_mem_control()
927 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
928 if (kvm->created_vcpus) in kvm_s390_set_mem_control()
930 else if (kvm->mm->context.allow_gmap_hpage_1m) in kvm_s390_set_mem_control()
933 kvm->arch.use_cmma = 1; in kvm_s390_set_mem_control()
935 kvm->arch.use_pfmfi = 0; in kvm_s390_set_mem_control()
938 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
945 if (!kvm->arch.use_cmma) in kvm_s390_set_mem_control()
948 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states"); in kvm_s390_set_mem_control()
949 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
950 idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_mem_control()
951 s390_reset_cmma(kvm->arch.gmap->mm); in kvm_s390_set_mem_control()
952 srcu_read_unlock(&kvm->srcu, idx); in kvm_s390_set_mem_control()
953 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
959 if (kvm_is_ucontrol(kvm)) in kvm_s390_set_mem_control()
965 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT && in kvm_s390_set_mem_control()
966 new_limit > kvm->arch.mem_limit) in kvm_s390_set_mem_control()
977 mutex_lock(&kvm->lock); in kvm_s390_set_mem_control()
978 if (!kvm->created_vcpus) { in kvm_s390_set_mem_control()
985 gmap_remove(kvm->arch.gmap); in kvm_s390_set_mem_control()
986 new->private = kvm; in kvm_s390_set_mem_control()
987 kvm->arch.gmap = new; in kvm_s390_set_mem_control()
991 mutex_unlock(&kvm->lock); in kvm_s390_set_mem_control()
992 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit); in kvm_s390_set_mem_control()
993 VM_EVENT(kvm, 3, "New guest asce: 0x%pK", in kvm_s390_set_mem_control()
994 (void *) kvm->arch.gmap->asce); in kvm_s390_set_mem_control()
1006 void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm) in kvm_s390_vcpu_crypto_reset_all() argument
1011 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1013 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_crypto_reset_all()
1019 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_crypto_reset_all()
1022 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_crypto() argument
1024 mutex_lock(&kvm->lock); in kvm_s390_vm_set_crypto()
1027 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1028 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1032 kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1033 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1034 kvm->arch.crypto.aes_kw = 1; in kvm_s390_vm_set_crypto()
1035 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1038 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1039 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1043 kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_vm_set_crypto()
1044 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1045 kvm->arch.crypto.dea_kw = 1; in kvm_s390_vm_set_crypto()
1046 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1049 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1050 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1053 kvm->arch.crypto.aes_kw = 0; in kvm_s390_vm_set_crypto()
1054 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1055 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1056 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); in kvm_s390_vm_set_crypto()
1059 if (!test_kvm_facility(kvm, 76)) { in kvm_s390_vm_set_crypto()
1060 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1063 kvm->arch.crypto.dea_kw = 0; in kvm_s390_vm_set_crypto()
1064 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, in kvm_s390_vm_set_crypto()
1065 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_vm_set_crypto()
1066 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); in kvm_s390_vm_set_crypto()
1070 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1073 kvm->arch.crypto.apie = 1; in kvm_s390_vm_set_crypto()
1077 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1080 kvm->arch.crypto.apie = 0; in kvm_s390_vm_set_crypto()
1083 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1087 kvm_s390_vcpu_crypto_reset_all(kvm); in kvm_s390_vm_set_crypto()
1088 mutex_unlock(&kvm->lock); in kvm_s390_vm_set_crypto()
1095 if (!vcpu->kvm->arch.use_zpci_interp) in kvm_s390_vcpu_pci_setup()
1102 void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm) in kvm_s390_vcpu_pci_enable_interp() argument
1107 lockdep_assert_held(&kvm->lock); in kvm_s390_vcpu_pci_enable_interp()
1116 kvm->arch.use_zpci_interp = 1; in kvm_s390_vcpu_pci_enable_interp()
1118 kvm_s390_vcpu_block_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1120 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_vcpu_pci_enable_interp()
1125 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_vcpu_pci_enable_interp()
1128 static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req) in kvm_s390_sync_request_broadcast() argument
1133 kvm_for_each_vcpu(cx, vcpu, kvm) in kvm_s390_sync_request_broadcast()
1141 static int kvm_s390_vm_start_migration(struct kvm *kvm) in kvm_s390_vm_start_migration() argument
1149 if (kvm->arch.migration_mode) in kvm_s390_vm_start_migration()
1151 slots = kvm_memslots(kvm); in kvm_s390_vm_start_migration()
1155 if (!kvm->arch.use_cmma) { in kvm_s390_vm_start_migration()
1156 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1172 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages); in kvm_s390_vm_start_migration()
1173 kvm->arch.migration_mode = 1; in kvm_s390_vm_start_migration()
1174 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION); in kvm_s390_vm_start_migration()
1182 static int kvm_s390_vm_stop_migration(struct kvm *kvm) in kvm_s390_vm_stop_migration() argument
1185 if (!kvm->arch.migration_mode) in kvm_s390_vm_stop_migration()
1187 kvm->arch.migration_mode = 0; in kvm_s390_vm_stop_migration()
1188 if (kvm->arch.use_cmma) in kvm_s390_vm_stop_migration()
1189 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION); in kvm_s390_vm_stop_migration()
1193 static int kvm_s390_vm_set_migration(struct kvm *kvm, in kvm_s390_vm_set_migration() argument
1198 mutex_lock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1201 res = kvm_s390_vm_start_migration(kvm); in kvm_s390_vm_set_migration()
1204 res = kvm_s390_vm_stop_migration(kvm); in kvm_s390_vm_set_migration()
1209 mutex_unlock(&kvm->slots_lock); in kvm_s390_vm_set_migration()
1214 static int kvm_s390_vm_get_migration(struct kvm *kvm, in kvm_s390_vm_get_migration() argument
1217 u64 mig = kvm->arch.migration_mode; in kvm_s390_vm_get_migration()
1227 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
1229 static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_ext() argument
1236 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) in kvm_s390_set_tod_ext()
1238 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_ext()
1240 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_set_tod_ext()
1246 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_high() argument
1256 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high); in kvm_s390_set_tod_high()
1261 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod_low() argument
1269 __kvm_s390_set_tod_clock(kvm, >od); in kvm_s390_set_tod_low()
1270 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); in kvm_s390_set_tod_low()
1274 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_tod() argument
1281 mutex_lock(&kvm->lock); in kvm_s390_set_tod()
1286 if (kvm_s390_pv_is_protected(kvm)) { in kvm_s390_set_tod()
1293 ret = kvm_s390_set_tod_ext(kvm, attr); in kvm_s390_set_tod()
1296 ret = kvm_s390_set_tod_high(kvm, attr); in kvm_s390_set_tod()
1299 ret = kvm_s390_set_tod_low(kvm, attr); in kvm_s390_set_tod()
1307 mutex_unlock(&kvm->lock); in kvm_s390_set_tod()
1311 static void kvm_s390_get_tod_clock(struct kvm *kvm, in kvm_s390_get_tod_clock() argument
1320 gtod->tod = clk.tod + kvm->arch.epoch; in kvm_s390_get_tod_clock()
1322 if (test_kvm_facility(kvm, 139)) { in kvm_s390_get_tod_clock()
1323 gtod->epoch_idx = clk.ei + kvm->arch.epdx; in kvm_s390_get_tod_clock()
1331 static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_ext() argument
1336 kvm_s390_get_tod_clock(kvm, >od); in kvm_s390_get_tod_ext()
1340 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx", in kvm_s390_get_tod_ext()
1345 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_high() argument
1352 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high); in kvm_s390_get_tod_high()
1357 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod_low() argument
1361 gtod = kvm_s390_get_tod_clock_fast(kvm); in kvm_s390_get_tod_low()
1364 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod); in kvm_s390_get_tod_low()
1369 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_tod() argument
1378 ret = kvm_s390_get_tod_ext(kvm, attr); in kvm_s390_get_tod()
1381 ret = kvm_s390_get_tod_high(kvm, attr); in kvm_s390_get_tod()
1384 ret = kvm_s390_get_tod_low(kvm, attr); in kvm_s390_get_tod()
1393 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_processor() argument
1399 mutex_lock(&kvm->lock); in kvm_s390_set_processor()
1400 if (kvm->created_vcpus) { in kvm_s390_set_processor()
1411 kvm->arch.model.cpuid = proc->cpuid; in kvm_s390_set_processor()
1416 kvm->arch.model.ibc = unblocked_ibc; in kvm_s390_set_processor()
1418 kvm->arch.model.ibc = lowest_ibc; in kvm_s390_set_processor()
1420 kvm->arch.model.ibc = proc->ibc; in kvm_s390_set_processor()
1422 memcpy(kvm->arch.model.fac_list, proc->fac_list, in kvm_s390_set_processor()
1424 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_set_processor()
1425 kvm->arch.model.ibc, in kvm_s390_set_processor()
1426 kvm->arch.model.cpuid); in kvm_s390_set_processor()
1427 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_set_processor()
1428 kvm->arch.model.fac_list[0], in kvm_s390_set_processor()
1429 kvm->arch.model.fac_list[1], in kvm_s390_set_processor()
1430 kvm->arch.model.fac_list[2]); in kvm_s390_set_processor()
1435 mutex_unlock(&kvm->lock); in kvm_s390_set_processor()
1439 static int kvm_s390_set_processor_feat(struct kvm *kvm, in kvm_s390_set_processor_feat() argument
1451 mutex_lock(&kvm->lock); in kvm_s390_set_processor_feat()
1452 if (kvm->created_vcpus) { in kvm_s390_set_processor_feat()
1453 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1456 bitmap_from_arr64(kvm->arch.cpu_feat, data.feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_set_processor_feat()
1457 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_feat()
1458 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_set_processor_feat()
1465 static int kvm_s390_set_processor_subfunc(struct kvm *kvm, in kvm_s390_set_processor_subfunc() argument
1468 mutex_lock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1469 if (kvm->created_vcpus) { in kvm_s390_set_processor_subfunc()
1470 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1474 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, in kvm_s390_set_processor_subfunc()
1476 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1479 mutex_unlock(&kvm->lock); in kvm_s390_set_processor_subfunc()
1481 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1482 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_set_processor_subfunc()
1483 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_set_processor_subfunc()
1484 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_set_processor_subfunc()
1485 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_set_processor_subfunc()
1486 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1487 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_set_processor_subfunc()
1488 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_set_processor_subfunc()
1489 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1490 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_set_processor_subfunc()
1491 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_set_processor_subfunc()
1492 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1493 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_set_processor_subfunc()
1494 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_set_processor_subfunc()
1495 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1496 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_set_processor_subfunc()
1497 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_set_processor_subfunc()
1498 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1499 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_set_processor_subfunc()
1500 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_set_processor_subfunc()
1501 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1502 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_set_processor_subfunc()
1503 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_set_processor_subfunc()
1504 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1505 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_set_processor_subfunc()
1506 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_set_processor_subfunc()
1507 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1508 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_set_processor_subfunc()
1509 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_set_processor_subfunc()
1510 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1511 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_set_processor_subfunc()
1512 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_set_processor_subfunc()
1513 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_set_processor_subfunc()
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_set_processor_subfunc()
1516 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1517 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_set_processor_subfunc()
1518 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_set_processor_subfunc()
1519 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1520 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_set_processor_subfunc()
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_set_processor_subfunc()
1522 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_set_processor_subfunc()
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_set_processor_subfunc()
1525 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_set_processor_subfunc()
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_set_processor_subfunc()
1528 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1529 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_set_processor_subfunc()
1530 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_set_processor_subfunc()
1531 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_set_processor_subfunc()
1532 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_set_processor_subfunc()
1533 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_set_processor_subfunc()
1534 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_set_processor_subfunc()
1535 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_set_processor_subfunc()
1536 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_set_processor_subfunc()
1537 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_set_processor_subfunc()
1551 static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_uv_feat() argument
1562 mutex_lock(&kvm->lock); in kvm_s390_set_uv_feat()
1563 if (kvm->created_vcpus) { in kvm_s390_set_uv_feat()
1564 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1567 kvm->arch.model.uv_feat_guest.feat = data; in kvm_s390_set_uv_feat()
1568 mutex_unlock(&kvm->lock); in kvm_s390_set_uv_feat()
1570 VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data); in kvm_s390_set_uv_feat()
1575 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_set_cpu_model() argument
1581 ret = kvm_s390_set_processor(kvm, attr); in kvm_s390_set_cpu_model()
1584 ret = kvm_s390_set_processor_feat(kvm, attr); in kvm_s390_set_cpu_model()
1587 ret = kvm_s390_set_processor_subfunc(kvm, attr); in kvm_s390_set_cpu_model()
1590 ret = kvm_s390_set_uv_feat(kvm, attr); in kvm_s390_set_cpu_model()
1596 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor() argument
1606 proc->cpuid = kvm->arch.model.cpuid; in kvm_s390_get_processor()
1607 proc->ibc = kvm->arch.model.ibc; in kvm_s390_get_processor()
1608 memcpy(&proc->fac_list, kvm->arch.model.fac_list, in kvm_s390_get_processor()
1610 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx", in kvm_s390_get_processor()
1611 kvm->arch.model.ibc, in kvm_s390_get_processor()
1612 kvm->arch.model.cpuid); in kvm_s390_get_processor()
1613 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_processor()
1614 kvm->arch.model.fac_list[0], in kvm_s390_get_processor()
1615 kvm->arch.model.fac_list[1], in kvm_s390_get_processor()
1616 kvm->arch.model.fac_list[2]); in kvm_s390_get_processor()
1624 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine() argument
1636 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask, in kvm_s390_get_machine()
1640 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx", in kvm_s390_get_machine()
1641 kvm->arch.model.ibc, in kvm_s390_get_machine()
1642 kvm->arch.model.cpuid); in kvm_s390_get_machine()
1643 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1647 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx", in kvm_s390_get_machine()
1658 static int kvm_s390_get_processor_feat(struct kvm *kvm, in kvm_s390_get_processor_feat() argument
1663 bitmap_to_arr64(data.feat, kvm->arch.cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS); in kvm_s390_get_processor_feat()
1666 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_processor_feat()
1673 static int kvm_s390_get_machine_feat(struct kvm *kvm, in kvm_s390_get_machine_feat() argument
1681 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx", in kvm_s390_get_machine_feat()
1688 static int kvm_s390_get_processor_subfunc(struct kvm *kvm, in kvm_s390_get_processor_subfunc() argument
1691 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, in kvm_s390_get_processor_subfunc()
1695 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1696 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], in kvm_s390_get_processor_subfunc()
1697 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], in kvm_s390_get_processor_subfunc()
1698 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], in kvm_s390_get_processor_subfunc()
1699 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); in kvm_s390_get_processor_subfunc()
1700 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1701 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], in kvm_s390_get_processor_subfunc()
1702 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); in kvm_s390_get_processor_subfunc()
1703 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1704 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], in kvm_s390_get_processor_subfunc()
1705 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); in kvm_s390_get_processor_subfunc()
1706 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1707 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], in kvm_s390_get_processor_subfunc()
1708 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); in kvm_s390_get_processor_subfunc()
1709 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1710 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], in kvm_s390_get_processor_subfunc()
1711 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); in kvm_s390_get_processor_subfunc()
1712 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1713 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], in kvm_s390_get_processor_subfunc()
1714 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); in kvm_s390_get_processor_subfunc()
1715 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1716 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], in kvm_s390_get_processor_subfunc()
1717 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); in kvm_s390_get_processor_subfunc()
1718 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1719 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], in kvm_s390_get_processor_subfunc()
1720 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); in kvm_s390_get_processor_subfunc()
1721 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1722 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], in kvm_s390_get_processor_subfunc()
1723 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); in kvm_s390_get_processor_subfunc()
1724 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1725 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], in kvm_s390_get_processor_subfunc()
1726 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); in kvm_s390_get_processor_subfunc()
1727 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1728 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], in kvm_s390_get_processor_subfunc()
1729 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); in kvm_s390_get_processor_subfunc()
1730 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1731 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], in kvm_s390_get_processor_subfunc()
1732 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); in kvm_s390_get_processor_subfunc()
1733 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1734 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], in kvm_s390_get_processor_subfunc()
1735 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); in kvm_s390_get_processor_subfunc()
1736 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1737 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], in kvm_s390_get_processor_subfunc()
1738 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); in kvm_s390_get_processor_subfunc()
1739 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1740 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], in kvm_s390_get_processor_subfunc()
1741 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); in kvm_s390_get_processor_subfunc()
1742 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1743 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], in kvm_s390_get_processor_subfunc()
1744 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], in kvm_s390_get_processor_subfunc()
1745 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], in kvm_s390_get_processor_subfunc()
1746 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); in kvm_s390_get_processor_subfunc()
1747 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_processor_subfunc()
1748 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], in kvm_s390_get_processor_subfunc()
1749 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], in kvm_s390_get_processor_subfunc()
1750 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], in kvm_s390_get_processor_subfunc()
1751 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); in kvm_s390_get_processor_subfunc()
1756 static int kvm_s390_get_machine_subfunc(struct kvm *kvm, in kvm_s390_get_machine_subfunc() argument
1763 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1768 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1771 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1774 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1777 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1780 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1783 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1786 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1789 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1792 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1795 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1798 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1801 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1804 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1807 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1810 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1815 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", in kvm_s390_get_machine_subfunc()
1824 static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_processor_uv_feat() argument
1827 unsigned long feat = kvm->arch.model.uv_feat_guest.feat; in kvm_s390_get_processor_uv_feat()
1831 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_processor_uv_feat()
1836 static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_machine_uv_feat() argument
1846 VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat); in kvm_s390_get_machine_uv_feat()
1851 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_get_cpu_model() argument
1857 ret = kvm_s390_get_processor(kvm, attr); in kvm_s390_get_cpu_model()
1860 ret = kvm_s390_get_machine(kvm, attr); in kvm_s390_get_cpu_model()
1863 ret = kvm_s390_get_processor_feat(kvm, attr); in kvm_s390_get_cpu_model()
1866 ret = kvm_s390_get_machine_feat(kvm, attr); in kvm_s390_get_cpu_model()
1869 ret = kvm_s390_get_processor_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1872 ret = kvm_s390_get_machine_subfunc(kvm, attr); in kvm_s390_get_cpu_model()
1875 ret = kvm_s390_get_processor_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1878 ret = kvm_s390_get_machine_uv_feat(kvm, attr); in kvm_s390_get_cpu_model()
1895 static void kvm_s390_update_topology_change_report(struct kvm *kvm, bool val) in kvm_s390_update_topology_change_report() argument
1900 read_lock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1901 sca = kvm->arch.sca; in kvm_s390_update_topology_change_report()
1907 read_unlock(&kvm->arch.sca_lock); in kvm_s390_update_topology_change_report()
1910 static int kvm_s390_set_topo_change_indication(struct kvm *kvm, in kvm_s390_set_topo_change_indication() argument
1913 if (!test_kvm_facility(kvm, 11)) in kvm_s390_set_topo_change_indication()
1916 kvm_s390_update_topology_change_report(kvm, !!attr->attr); in kvm_s390_set_topo_change_indication()
1920 static int kvm_s390_get_topo_change_indication(struct kvm *kvm, in kvm_s390_get_topo_change_indication() argument
1925 if (!test_kvm_facility(kvm, 11)) in kvm_s390_get_topo_change_indication()
1928 read_lock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1929 topo = ((struct bsca_block *)kvm->arch.sca)->utility.mtcr; in kvm_s390_get_topo_change_indication()
1930 read_unlock(&kvm->arch.sca_lock); in kvm_s390_get_topo_change_indication()
1935 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_set_attr() argument
1941 ret = kvm_s390_set_mem_control(kvm, attr); in kvm_s390_vm_set_attr()
1944 ret = kvm_s390_set_tod(kvm, attr); in kvm_s390_vm_set_attr()
1947 ret = kvm_s390_set_cpu_model(kvm, attr); in kvm_s390_vm_set_attr()
1950 ret = kvm_s390_vm_set_crypto(kvm, attr); in kvm_s390_vm_set_attr()
1953 ret = kvm_s390_vm_set_migration(kvm, attr); in kvm_s390_vm_set_attr()
1956 ret = kvm_s390_set_topo_change_indication(kvm, attr); in kvm_s390_vm_set_attr()
1966 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_get_attr() argument
1972 ret = kvm_s390_get_mem_control(kvm, attr); in kvm_s390_vm_get_attr()
1975 ret = kvm_s390_get_tod(kvm, attr); in kvm_s390_vm_get_attr()
1978 ret = kvm_s390_get_cpu_model(kvm, attr); in kvm_s390_vm_get_attr()
1981 ret = kvm_s390_vm_get_migration(kvm, attr); in kvm_s390_vm_get_attr()
1984 ret = kvm_s390_get_topo_change_indication(kvm, attr); in kvm_s390_vm_get_attr()
1994 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) in kvm_s390_vm_has_attr() argument
2062 ret = test_kvm_facility(kvm, 11) ? 0 : -ENXIO; in kvm_s390_vm_has_attr()
2072 static int kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_get_skeys() argument
2094 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_skeys()
2096 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_get_skeys()
2106 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_skeys()
2120 static int kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) in kvm_s390_set_skeys() argument
2152 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_skeys()
2155 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_skeys()
2177 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_skeys()
2193 static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_peek_cmma() argument
2200 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_peek_cmma()
2207 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_peek_cmma()
2250 static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args, in kvm_s390_get_cmma() argument
2254 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_s390_get_cmma()
2261 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2270 hva = gfn_to_hva(kvm, cur_gfn); in kvm_s390_get_cmma()
2275 atomic64_dec(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma()
2276 if (get_pgste(kvm->mm, hva, &pgstev) < 0) in kvm_s390_get_cmma()
2293 ms = gfn_to_memslot(kvm, cur_gfn); in kvm_s390_get_cmma()
2309 static int kvm_s390_get_cmma_bits(struct kvm *kvm, in kvm_s390_get_cmma_bits() argument
2316 if (!kvm->arch.use_cmma) in kvm_s390_get_cmma_bits()
2323 if (!peek && !kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2327 if (!bufsize || !kvm->mm->context.uses_cmm) { in kvm_s390_get_cmma_bits()
2332 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) { in kvm_s390_get_cmma_bits()
2341 mmap_read_lock(kvm->mm); in kvm_s390_get_cmma_bits()
2342 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_get_cmma_bits()
2344 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2346 ret = kvm_s390_get_cmma(kvm, args, values, bufsize); in kvm_s390_get_cmma_bits()
2347 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_get_cmma_bits()
2348 mmap_read_unlock(kvm->mm); in kvm_s390_get_cmma_bits()
2350 if (kvm->arch.migration_mode) in kvm_s390_get_cmma_bits()
2351 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); in kvm_s390_get_cmma_bits()
2367 static int kvm_s390_set_cmma_bits(struct kvm *kvm, in kvm_s390_set_cmma_bits() argument
2376 if (!kvm->arch.use_cmma) in kvm_s390_set_cmma_bits()
2398 mmap_read_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2399 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_set_cmma_bits()
2401 hva = gfn_to_hva(kvm, args->start_gfn + i); in kvm_s390_set_cmma_bits()
2410 set_pgste_bits(kvm->mm, hva, mask, pgstev); in kvm_s390_set_cmma_bits()
2412 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_set_cmma_bits()
2413 mmap_read_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2415 if (!kvm->mm->context.uses_cmm) { in kvm_s390_set_cmma_bits()
2416 mmap_write_lock(kvm->mm); in kvm_s390_set_cmma_bits()
2417 kvm->mm->context.uses_cmm = 1; in kvm_s390_set_cmma_bits()
2418 mmap_write_unlock(kvm->mm); in kvm_s390_set_cmma_bits()
2438 int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_from_pv() argument
2453 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_from_pv()
2464 kvm_s390_gisa_enable(kvm); in kvm_s390_cpus_from_pv()
2479 static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) in kvm_s390_cpus_to_pv() argument
2489 kvm_s390_gisa_disable(kvm); in kvm_s390_cpus_to_pv()
2491 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_s390_cpus_to_pv()
2499 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); in kvm_s390_cpus_to_pv()
2550 static int kvm_s390_pv_dmp(struct kvm *kvm, struct kvm_pv_cmd *cmd, in kvm_s390_pv_dmp() argument
2558 if (kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2565 kvm_s390_vcpu_block_all(kvm); in kvm_s390_pv_dmp()
2567 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_pv_dmp()
2569 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP INIT: rc %x rrc %x", in kvm_s390_pv_dmp()
2572 kvm->arch.pv.dumping = true; in kvm_s390_pv_dmp()
2574 kvm_s390_vcpu_unblock_all(kvm); in kvm_s390_pv_dmp()
2580 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2588 r = kvm_s390_pv_dump_stor_state(kvm, result_buff, &dmp.gaddr, dmp.buff_len, in kvm_s390_pv_dmp()
2593 if (!kvm->arch.pv.dumping) in kvm_s390_pv_dmp()
2600 r = kvm_s390_pv_dump_complete(kvm, result_buff, in kvm_s390_pv_dmp()
2612 static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) in kvm_s390_handle_pv() argument
2620 mutex_lock(&kvm->lock); in kvm_s390_handle_pv()
2625 if (kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2632 r = sca_switch_to_extended(kvm); in kvm_s390_handle_pv()
2640 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2644 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2646 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); in kvm_s390_handle_pv()
2649 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2654 if (!kvm_s390_pv_is_protected(kvm) || !async_destroy) in kvm_s390_handle_pv()
2657 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2665 r = kvm_s390_pv_set_aside(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2668 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2675 r = kvm_s390_pv_deinit_aside_vm(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2679 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2682 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2690 r = kvm_s390_pv_deinit_cleanup_all(kvm, &cmd->rc, &cmd->rrc); in kvm_s390_handle_pv()
2693 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); in kvm_s390_handle_pv()
2701 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2721 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, in kvm_s390_handle_pv()
2731 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) in kvm_s390_handle_pv()
2738 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, in kvm_s390_handle_pv()
2744 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2747 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2749 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, in kvm_s390_handle_pv()
2755 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2758 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2760 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", in kvm_s390_handle_pv()
2766 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2769 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), in kvm_s390_handle_pv()
2771 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", in kvm_s390_handle_pv()
2819 if (!kvm_s390_pv_is_protected(kvm)) in kvm_s390_handle_pv()
2826 r = kvm_s390_pv_dmp(kvm, cmd, dmp); in kvm_s390_handle_pv()
2841 mutex_unlock(&kvm->lock); in kvm_s390_handle_pv()
2861 static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_abs() argument
2879 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_abs()
2881 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_abs()
2888 r = check_gpa_range(kvm, mop->gaddr, mop->size, acc_mode, mop->key); in kvm_s390_vm_mem_op_abs()
2892 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2903 r = access_guest_abs_with_key(kvm, mop->gaddr, tmpbuf, in kvm_s390_vm_mem_op_abs()
2908 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_abs()
2914 static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op_cmpxchg() argument
2941 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_s390_vm_mem_op_cmpxchg()
2943 if (kvm_is_error_gpa(kvm, mop->gaddr)) { in kvm_s390_vm_mem_op_cmpxchg()
2948 r = cmpxchg_guest_abs_with_key(kvm, mop->gaddr, mop->size, &old.quad, in kvm_s390_vm_mem_op_cmpxchg()
2954 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_s390_vm_mem_op_cmpxchg()
2958 static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop) in kvm_s390_vm_mem_op() argument
2969 if (kvm_s390_pv_get_handle(kvm)) in kvm_s390_vm_mem_op()
2975 return kvm_s390_vm_mem_op_abs(kvm, mop); in kvm_s390_vm_mem_op()
2977 return kvm_s390_vm_mem_op_cmpxchg(kvm, mop); in kvm_s390_vm_mem_op()
2985 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
2997 r = kvm_s390_inject_vm(kvm, &s390int); in kvm_arch_vm_ioctl()
3004 if (kvm->arch.use_irqchip) { in kvm_arch_vm_ioctl()
3007 r = kvm_set_irq_routing(kvm, &routing, 0, 0); in kvm_arch_vm_ioctl()
3015 r = kvm_s390_vm_set_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3022 r = kvm_s390_vm_get_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3029 r = kvm_s390_vm_has_attr(kvm, &attr); in kvm_arch_vm_ioctl()
3039 r = kvm_s390_get_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3049 r = kvm_s390_set_skeys(kvm, &args); in kvm_arch_vm_ioctl()
3058 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3059 r = kvm_s390_get_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3060 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3074 mutex_lock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3075 r = kvm_s390_set_cmma_bits(kvm, &args); in kvm_arch_vm_ioctl()
3076 mutex_unlock(&kvm->slots_lock); in kvm_arch_vm_ioctl()
3083 kvm_s390_set_user_cpu_state_ctrl(kvm); in kvm_arch_vm_ioctl()
3098 r = kvm_s390_handle_pv(kvm, &args); in kvm_arch_vm_ioctl()
3109 r = kvm_s390_vm_mem_op(kvm, &mem_op); in kvm_arch_vm_ioctl()
3124 r = kvm_s390_pci_zpci_op(kvm, &args); in kvm_arch_vm_ioctl()
3154 static void kvm_s390_set_crycb_format(struct kvm *kvm) in kvm_s390_set_crycb_format() argument
3156 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; in kvm_s390_set_crycb_format()
3159 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); in kvm_s390_set_crycb_format()
3162 if (!test_kvm_facility(kvm, 76)) in kvm_s390_set_crycb_format()
3166 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; in kvm_s390_set_crycb_format()
3168 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; in kvm_s390_set_crycb_format()
3186 void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, in kvm_arch_crypto_set_masks() argument
3189 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; in kvm_arch_crypto_set_masks()
3191 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_set_masks()
3193 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { in kvm_arch_crypto_set_masks()
3196 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3199 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3202 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", in kvm_arch_crypto_set_masks()
3210 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", in kvm_arch_crypto_set_masks()
3219 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_set_masks()
3220 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_set_masks()
3236 void kvm_arch_crypto_clear_masks(struct kvm *kvm) in kvm_arch_crypto_clear_masks() argument
3238 kvm_s390_vcpu_block_all(kvm); in kvm_arch_crypto_clear_masks()
3240 memset(&kvm->arch.crypto.crycb->apcb0, 0, in kvm_arch_crypto_clear_masks()
3241 sizeof(kvm->arch.crypto.crycb->apcb0)); in kvm_arch_crypto_clear_masks()
3242 memset(&kvm->arch.crypto.crycb->apcb1, 0, in kvm_arch_crypto_clear_masks()
3243 sizeof(kvm->arch.crypto.crycb->apcb1)); in kvm_arch_crypto_clear_masks()
3245 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); in kvm_arch_crypto_clear_masks()
3247 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); in kvm_arch_crypto_clear_masks()
3248 kvm_s390_vcpu_unblock_all(kvm); in kvm_arch_crypto_clear_masks()
3261 static void kvm_s390_crypto_init(struct kvm *kvm) in kvm_s390_crypto_init() argument
3263 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; in kvm_s390_crypto_init()
3264 kvm_s390_set_crycb_format(kvm); in kvm_s390_crypto_init()
3265 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem); in kvm_s390_crypto_init()
3267 if (!test_kvm_facility(kvm, 76)) in kvm_s390_crypto_init()
3271 kvm->arch.crypto.aes_kw = 1; in kvm_s390_crypto_init()
3272 kvm->arch.crypto.dea_kw = 1; in kvm_s390_crypto_init()
3273 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask, in kvm_s390_crypto_init()
3274 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); in kvm_s390_crypto_init()
3275 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask, in kvm_s390_crypto_init()
3276 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); in kvm_s390_crypto_init()
3279 static void sca_dispose(struct kvm *kvm) in sca_dispose() argument
3281 if (kvm->arch.use_esca) in sca_dispose()
3282 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block)); in sca_dispose()
3284 free_page((unsigned long)(kvm->arch.sca)); in sca_dispose()
3285 kvm->arch.sca = NULL; in sca_dispose()
3288 void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
3291 kvm_s390_pci_clear_list(kvm); in kvm_arch_free_vm()
3293 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
3296 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
3322 rwlock_init(&kvm->arch.sca_lock); in kvm_arch_init_vm()
3324 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags); in kvm_arch_init_vm()
3325 if (!kvm->arch.sca) in kvm_arch_init_vm()
3331 kvm->arch.sca = (struct bsca_block *) in kvm_arch_init_vm()
3332 ((char *) kvm->arch.sca + sca_offset); in kvm_arch_init_vm()
3337 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long)); in kvm_arch_init_vm()
3338 if (!kvm->arch.dbf) in kvm_arch_init_vm()
3342 kvm->arch.sie_page2 = in kvm_arch_init_vm()
3344 if (!kvm->arch.sie_page2) in kvm_arch_init_vm()
3347 kvm->arch.sie_page2->kvm = kvm; in kvm_arch_init_vm()
3348 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; in kvm_arch_init_vm()
3351 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3354 kvm->arch.model.fac_list[i] = stfle_fac_list[i] & in kvm_arch_init_vm()
3357 kvm->arch.model.subfuncs = kvm_s390_available_subfunc; in kvm_arch_init_vm()
3360 set_kvm_facility(kvm->arch.model.fac_mask, 138); in kvm_arch_init_vm()
3361 set_kvm_facility(kvm->arch.model.fac_list, 138); in kvm_arch_init_vm()
3363 set_kvm_facility(kvm->arch.model.fac_mask, 74); in kvm_arch_init_vm()
3364 set_kvm_facility(kvm->arch.model.fac_list, 74); in kvm_arch_init_vm()
3366 set_kvm_facility(kvm->arch.model.fac_mask, 147); in kvm_arch_init_vm()
3367 set_kvm_facility(kvm->arch.model.fac_list, 147); in kvm_arch_init_vm()
3371 set_kvm_facility(kvm->arch.model.fac_mask, 65); in kvm_arch_init_vm()
3373 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); in kvm_arch_init_vm()
3374 kvm->arch.model.ibc = sclp.ibc & 0x0fff; in kvm_arch_init_vm()
3376 kvm->arch.model.uv_feat_guest.feat = 0; in kvm_arch_init_vm()
3378 kvm_s390_crypto_init(kvm); in kvm_arch_init_vm()
3381 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
3382 kvm_s390_pci_init_list(kvm); in kvm_arch_init_vm()
3383 kvm_s390_vcpu_pci_enable_interp(kvm); in kvm_arch_init_vm()
3384 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
3387 mutex_init(&kvm->arch.float_int.ais_lock); in kvm_arch_init_vm()
3388 spin_lock_init(&kvm->arch.float_int.lock); in kvm_arch_init_vm()
3390 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]); in kvm_arch_init_vm()
3391 init_waitqueue_head(&kvm->arch.ipte_wq); in kvm_arch_init_vm()
3392 mutex_init(&kvm->arch.ipte_mutex); in kvm_arch_init_vm()
3394 debug_register_view(kvm->arch.dbf, &debug_sprintf_view); in kvm_arch_init_vm()
3395 VM_EVENT(kvm, 3, "vm created with type %lu", type); in kvm_arch_init_vm()
3398 kvm->arch.gmap = NULL; in kvm_arch_init_vm()
3399 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT; in kvm_arch_init_vm()
3402 kvm->arch.mem_limit = TASK_SIZE_MAX; in kvm_arch_init_vm()
3404 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX, in kvm_arch_init_vm()
3406 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1); in kvm_arch_init_vm()
3407 if (!kvm->arch.gmap) in kvm_arch_init_vm()
3409 kvm->arch.gmap->private = kvm; in kvm_arch_init_vm()
3410 kvm->arch.gmap->pfault_enabled = 0; in kvm_arch_init_vm()
3413 kvm->arch.use_pfmfi = sclp.has_pfmfi; in kvm_arch_init_vm()
3414 kvm->arch.use_skf = sclp.has_skey; in kvm_arch_init_vm()
3415 spin_lock_init(&kvm->arch.start_stop_lock); in kvm_arch_init_vm()
3416 kvm_s390_vsie_init(kvm); in kvm_arch_init_vm()
3418 kvm_s390_gisa_init(kvm); in kvm_arch_init_vm()
3419 INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup); in kvm_arch_init_vm()
3420 kvm->arch.pv.set_aside = NULL; in kvm_arch_init_vm()
3421 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); in kvm_arch_init_vm()
3425 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_init_vm()
3426 debug_unregister(kvm->arch.dbf); in kvm_arch_init_vm()
3427 sca_dispose(kvm); in kvm_arch_init_vm()
3440 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3442 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_destroy()
3444 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_destroy()
3447 if (vcpu->kvm->arch.use_cmma) in kvm_arch_vcpu_destroy()
3455 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
3459 kvm_destroy_vcpus(kvm); in kvm_arch_destroy_vm()
3460 sca_dispose(kvm); in kvm_arch_destroy_vm()
3461 kvm_s390_gisa_destroy(kvm); in kvm_arch_destroy_vm()
3467 kvm_s390_pv_deinit_cleanup_all(kvm, &rc, &rrc); in kvm_arch_destroy_vm()
3474 if (kvm->arch.pv.mmu_notifier.ops) in kvm_arch_destroy_vm()
3475 mmu_notifier_unregister(&kvm->arch.pv.mmu_notifier, kvm->mm); in kvm_arch_destroy_vm()
3477 debug_unregister(kvm->arch.dbf); in kvm_arch_destroy_vm()
3478 free_page((unsigned long)kvm->arch.sie_page2); in kvm_arch_destroy_vm()
3479 if (!kvm_is_ucontrol(kvm)) in kvm_arch_destroy_vm()
3480 gmap_remove(kvm->arch.gmap); in kvm_arch_destroy_vm()
3481 kvm_s390_destroy_adapters(kvm); in kvm_arch_destroy_vm()
3482 kvm_s390_clear_float_irqs(kvm); in kvm_arch_destroy_vm()
3483 kvm_s390_vsie_destroy(kvm); in kvm_arch_destroy_vm()
3484 KVM_EVENT(3, "vm 0x%pK destroyed", kvm); in kvm_arch_destroy_vm()
3493 vcpu->arch.gmap->private = vcpu->kvm; in __kvm_ucontrol_vcpu_init()
3502 read_lock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3503 if (vcpu->kvm->arch.use_esca) { in sca_del_vcpu()
3504 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3509 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_del_vcpu()
3514 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_del_vcpu()
3520 phys_addr_t sca_phys = virt_to_phys(vcpu->kvm->arch.sca); in sca_add_vcpu()
3527 read_lock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3528 if (vcpu->kvm->arch.use_esca) { in sca_add_vcpu()
3529 struct esca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3538 struct bsca_block *sca = vcpu->kvm->arch.sca; in sca_add_vcpu()
3546 read_unlock(&vcpu->kvm->arch.sca_lock); in sca_add_vcpu()
3567 static int sca_switch_to_extended(struct kvm *kvm) in sca_switch_to_extended() argument
3569 struct bsca_block *old_sca = kvm->arch.sca; in sca_switch_to_extended()
3576 if (kvm->arch.use_esca) in sca_switch_to_extended()
3587 kvm_s390_vcpu_block_all(kvm); in sca_switch_to_extended()
3588 write_lock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3592 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) { in sca_switch_to_extended()
3597 kvm->arch.sca = new_sca; in sca_switch_to_extended()
3598 kvm->arch.use_esca = 1; in sca_switch_to_extended()
3600 write_unlock(&kvm->arch.sca_lock); in sca_switch_to_extended()
3601 kvm_s390_vcpu_unblock_all(kvm); in sca_switch_to_extended()
3605 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)", in sca_switch_to_extended()
3606 old_sca, kvm->arch.sca); in sca_switch_to_extended()
3610 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id) in sca_can_add_vcpu() argument
3624 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm); in sca_can_add_vcpu()
3739 mutex_lock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3741 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch; in kvm_arch_vcpu_postcreate()
3742 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx; in kvm_arch_vcpu_postcreate()
3744 mutex_unlock(&vcpu->kvm->lock); in kvm_arch_vcpu_postcreate()
3745 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_postcreate()
3746 vcpu->arch.gmap = vcpu->kvm->arch.gmap; in kvm_arch_vcpu_postcreate()
3749 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0) in kvm_arch_vcpu_postcreate()
3755 static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) in kvm_has_pckmo_subfunc() argument
3757 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && in kvm_has_pckmo_subfunc()
3763 static bool kvm_has_pckmo_ecc(struct kvm *kvm) in kvm_has_pckmo_ecc() argument
3766 return kvm_has_pckmo_subfunc(kvm, 32) || in kvm_has_pckmo_ecc()
3767 kvm_has_pckmo_subfunc(kvm, 33) || in kvm_has_pckmo_ecc()
3768 kvm_has_pckmo_subfunc(kvm, 34) || in kvm_has_pckmo_ecc()
3769 kvm_has_pckmo_subfunc(kvm, 40) || in kvm_has_pckmo_ecc()
3770 kvm_has_pckmo_subfunc(kvm, 41); in kvm_has_pckmo_ecc()
3780 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) in kvm_s390_vcpu_crypto_setup()
3783 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; in kvm_s390_vcpu_crypto_setup()
3788 if (vcpu->kvm->arch.crypto.apie) in kvm_s390_vcpu_crypto_setup()
3792 if (vcpu->kvm->arch.crypto.aes_kw) { in kvm_s390_vcpu_crypto_setup()
3795 if (kvm_has_pckmo_ecc(vcpu->kvm)) in kvm_s390_vcpu_crypto_setup()
3799 if (vcpu->kvm->arch.crypto.dea_kw) in kvm_s390_vcpu_crypto_setup()
3822 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model; in kvm_s390_vcpu_setup_model()
3825 if (test_kvm_facility(vcpu->kvm, 7)) in kvm_s390_vcpu_setup_model()
3838 if (test_kvm_facility(vcpu->kvm, 78)) in kvm_s390_vcpu_setup()
3840 else if (test_kvm_facility(vcpu->kvm, 8)) in kvm_s390_vcpu_setup()
3848 if (test_kvm_facility(vcpu->kvm, 9)) in kvm_s390_vcpu_setup()
3850 if (test_kvm_facility(vcpu->kvm, 11)) in kvm_s390_vcpu_setup()
3852 if (test_kvm_facility(vcpu->kvm, 73)) in kvm_s390_vcpu_setup()
3854 if (!kvm_is_ucontrol(vcpu->kvm)) in kvm_s390_vcpu_setup()
3857 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi) in kvm_s390_vcpu_setup()
3859 if (test_kvm_facility(vcpu->kvm, 130)) in kvm_s390_vcpu_setup()
3870 if (test_kvm_facility(vcpu->kvm, 129)) { in kvm_s390_vcpu_setup()
3874 if (test_kvm_facility(vcpu->kvm, 139)) in kvm_s390_vcpu_setup()
3876 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_s390_vcpu_setup()
3891 if (vcpu->kvm->arch.use_cmma) { in kvm_s390_vcpu_setup()
3905 mutex_lock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3906 if (kvm_s390_pv_is_protected(vcpu->kvm)) { in kvm_s390_vcpu_setup()
3911 mutex_unlock(&vcpu->kvm->lock); in kvm_s390_vcpu_setup()
3916 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_precreate() argument
3918 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) in kvm_arch_vcpu_precreate()
3942 vcpu->arch.sie_block->gd = kvm_s390_get_gisa_desc(vcpu->kvm); in kvm_arch_vcpu_create()
3955 if (test_kvm_facility(vcpu->kvm, 64)) in kvm_arch_vcpu_create()
3957 if (test_kvm_facility(vcpu->kvm, 82)) in kvm_arch_vcpu_create()
3959 if (test_kvm_facility(vcpu->kvm, 133)) in kvm_arch_vcpu_create()
3961 if (test_kvm_facility(vcpu->kvm, 156)) in kvm_arch_vcpu_create()
3971 if (kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_create()
3977 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", in kvm_arch_vcpu_create()
3985 kvm_s390_update_topology_change_report(vcpu->kvm, 1); in kvm_arch_vcpu_create()
3989 if (kvm_is_ucontrol(vcpu->kvm)) in kvm_arch_vcpu_create()
3998 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in kvm_arch_vcpu_runnable()
4057 struct kvm *kvm = gmap->private; in kvm_gmap_notifier() local
4067 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_gmap_notifier()
4205 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) in kvm_arch_vcpu_ioctl_normal_reset()
4432 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_mpstate()
4529 if ((vcpu->kvm->arch.use_cmma) && in kvm_s390_handle_requests()
4530 (vcpu->kvm->mm->context.uses_cmm)) in kvm_s390_handle_requests()
4541 static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in __kvm_s390_set_tod_clock() argument
4551 kvm->arch.epoch = gtod->tod - clk.tod; in __kvm_s390_set_tod_clock()
4552 kvm->arch.epdx = 0; in __kvm_s390_set_tod_clock()
4553 if (test_kvm_facility(kvm, 139)) { in __kvm_s390_set_tod_clock()
4554 kvm->arch.epdx = gtod->epoch_idx - clk.ei; in __kvm_s390_set_tod_clock()
4555 if (kvm->arch.epoch > gtod->tod) in __kvm_s390_set_tod_clock()
4556 kvm->arch.epdx -= 1; in __kvm_s390_set_tod_clock()
4559 kvm_s390_vcpu_block_all(kvm); in __kvm_s390_set_tod_clock()
4560 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvm_s390_set_tod_clock()
4561 vcpu->arch.sie_block->epoch = kvm->arch.epoch; in __kvm_s390_set_tod_clock()
4562 vcpu->arch.sie_block->epdx = kvm->arch.epdx; in __kvm_s390_set_tod_clock()
4565 kvm_s390_vcpu_unblock_all(kvm); in __kvm_s390_set_tod_clock()
4569 int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) in kvm_s390_try_set_tod_clock() argument
4571 if (!mutex_trylock(&kvm->lock)) in kvm_s390_try_set_tod_clock()
4573 __kvm_s390_set_tod_clock(kvm, gtod); in kvm_s390_try_set_tod_clock()
4574 mutex_unlock(&kvm->lock); in kvm_s390_try_set_tod_clock()
4607 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti)); in __kvm_inject_pfault_token()
4661 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); in kvm_arch_setup_async_pf()
4686 if (!kvm_is_ucontrol(vcpu->kvm)) { in vcpu_pre_run()
4701 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); in vcpu_pre_run()
4784 } else if (kvm_is_ucontrol(vcpu->kvm)) { in vcpu_post_run()
4896 test_kvm_facility(vcpu->kvm, 64) && in sync_regs_fmt2()
4907 test_kvm_facility(vcpu->kvm, 133) && in sync_regs_fmt2()
4916 test_kvm_facility(vcpu->kvm, 82)) { in sync_regs_fmt2()
5048 if (vcpu->kvm->arch.pv.dumping) in kvm_arch_vcpu_ioctl_run()
5072 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { in kvm_arch_vcpu_ioctl_run()
5191 static void __disable_ibs_on_all_vcpus(struct kvm *kvm) in __disable_ibs_on_all_vcpus() argument
5196 kvm_for_each_vcpu(i, vcpu, kvm) { in __disable_ibs_on_all_vcpus()
5218 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5219 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_start()
5225 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5231 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i))) in kvm_s390_vcpu_start()
5244 __disable_ibs_on_all_vcpus(vcpu->kvm); in kvm_s390_vcpu_start()
5260 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_start()
5274 spin_lock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5275 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); in kvm_s390_vcpu_stop()
5281 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5298 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i); in kvm_s390_vcpu_stop()
5314 spin_unlock(&vcpu->kvm->arch.start_stop_lock); in kvm_s390_vcpu_stop()
5328 if (!vcpu->kvm->arch.css_support) { in kvm_vcpu_ioctl_enable_cap()
5329 vcpu->kvm->arch.css_support = 1; in kvm_vcpu_ioctl_enable_cap()
5330 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support"); in kvm_vcpu_ioctl_enable_cap()
5331 trace_kvm_s390_enable_css(vcpu->kvm); in kvm_vcpu_ioctl_enable_cap()
5435 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_s390_vcpu_memsida_op()
5451 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvm_s390_vcpu_memsida_op()
5508 if (!vcpu->kvm->arch.pv.dumping) in kvm_s390_handle_pv_vcpu_dump()
5555 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5557 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5623 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5640 if (!kvm_is_ucontrol(vcpu->kvm)) { in kvm_arch_vcpu_ioctl()
5745 && (kvm_is_ucontrol(vcpu->kvm))) { in kvm_arch_vcpu_fault()
5754 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) in kvm_arch_irqchip_in_kernel() argument
5760 int kvm_arch_prepare_memory_region(struct kvm *kvm, in kvm_arch_prepare_memory_region() argument
5768 if (kvm_s390_pv_get_handle(kvm)) in kvm_arch_prepare_memory_region()
5786 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5790 if (!kvm->arch.migration_mode) in kvm_arch_prepare_memory_region()
5803 WARN(kvm_s390_vm_stop_migration(kvm), in kvm_arch_prepare_memory_region()
5809 void kvm_arch_commit_memory_region(struct kvm *kvm, in kvm_arch_commit_memory_region() argument
5818 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5822 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5828 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()