kvm-s390.c (0b5eca67bd2d0e6f6d0ccdc316aced0cc4bf2e9f) kvm-s390.c (19114beb73f774e466d9e39b8e8b961812c9f881)
1// SPDX-License-Identifier: GPL-2.0
1/*
2/*
2 * hosting zSeries kernel virtual machines
3 * hosting IBM Z kernel virtual machines (s390x)
3 *
4 *
4 * Copyright IBM Corp. 2008, 2009
5 * Copyright IBM Corp. 2008, 2018
5 *
6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
15 */
16
17#include <linux/compiler.h>

--- 67 unchanged lines hidden (view full) ---

85 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
86 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
10 * Christian Ehrhardt <ehrhardt@de.ibm.com>
11 * Jason J. Herne <jjherne@us.ibm.com>
12 */
13
14#include <linux/compiler.h>

--- 67 unchanged lines hidden (view full) ---

82 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
83 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
84 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
85 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
86 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
87 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
88 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
89 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
90 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
91 { "instruction_gs", VCPU_STAT(instruction_gs) },
92 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
93 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
94 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
93 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
95 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
96 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
94 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
97 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
98 { "instruction_sck", VCPU_STAT(instruction_sck) },
99 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
100 { "instruction_spx", VCPU_STAT(instruction_spx) },
101 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
102 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
103 { "instruction_iske", VCPU_STAT(instruction_iske) },
104 { "instruction_ri", VCPU_STAT(instruction_ri) },
105 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
106 { "instruction_sske", VCPU_STAT(instruction_sske) },
99 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
107 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
102 { "instruction_essa", VCPU_STAT(instruction_essa) },
103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
108 { "instruction_essa", VCPU_STAT(instruction_essa) },
109 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
110 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
111 { "instruction_tb", VCPU_STAT(instruction_tb) },
112 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
113 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
114 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
107 { "instruction_sie", VCPU_STAT(instruction_sie) },
108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
115 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
116 { "instruction_sie", VCPU_STAT(instruction_sie) },
117 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
118 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
119 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
120 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
121 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
122 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
123 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
124 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
125 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
126 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
127 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
128 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
129 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
130 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
131 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
132 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
124 { "diagnose_10", VCPU_STAT(diagnose_10) },
125 { "diagnose_44", VCPU_STAT(diagnose_44) },
126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
133 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
134 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
135 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
136 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
137 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
138 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
139 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
130 { NULL }
131};
132
133struct kvm_s390_tod_clock_ext {
134 __u8 epoch_idx;
135 __u64 tod;
136 __u8 reserved[7];
137} __packed;

--- 433 unchanged lines hidden (view full) ---

571 }
572 mutex_unlock(&kvm->lock);
573 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
574 r ? "(not available)" : "(success)");
575 break;
576 case KVM_CAP_S390_GS:
577 r = -EINVAL;
578 mutex_lock(&kvm->lock);
140 { NULL }
141};
142
143struct kvm_s390_tod_clock_ext {
144 __u8 epoch_idx;
145 __u64 tod;
146 __u8 reserved[7];
147} __packed;

--- 433 unchanged lines hidden (view full) ---

581 }
582 mutex_unlock(&kvm->lock);
583 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
584 r ? "(not available)" : "(success)");
585 break;
586 case KVM_CAP_S390_GS:
587 r = -EINVAL;
588 mutex_lock(&kvm->lock);
579 if (atomic_read(&kvm->online_vcpus)) {
589 if (kvm->created_vcpus) {
580 r = -EBUSY;
581 } else if (test_facility(133)) {
582 set_kvm_facility(kvm->arch.model.fac_mask, 133);
583 set_kvm_facility(kvm->arch.model.fac_list, 133);
584 r = 0;
585 }
586 mutex_unlock(&kvm->lock);
587 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",

--- 494 unchanged lines hidden (view full) ---

1082 mutex_unlock(&kvm->lock);
1083 return ret;
1084}
1085
1086static int kvm_s390_set_processor_feat(struct kvm *kvm,
1087 struct kvm_device_attr *attr)
1088{
1089 struct kvm_s390_vm_cpu_feat data;
590 r = -EBUSY;
591 } else if (test_facility(133)) {
592 set_kvm_facility(kvm->arch.model.fac_mask, 133);
593 set_kvm_facility(kvm->arch.model.fac_list, 133);
594 r = 0;
595 }
596 mutex_unlock(&kvm->lock);
597 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",

--- 494 unchanged lines hidden (view full) ---

1092 mutex_unlock(&kvm->lock);
1093 return ret;
1094}
1095
1096static int kvm_s390_set_processor_feat(struct kvm *kvm,
1097 struct kvm_device_attr *attr)
1098{
1099 struct kvm_s390_vm_cpu_feat data;
1090 int ret = -EBUSY;
1091
1092 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1093 return -EFAULT;
1094 if (!bitmap_subset((unsigned long *) data.feat,
1095 kvm_s390_available_cpu_feat,
1096 KVM_S390_VM_CPU_FEAT_NR_BITS))
1097 return -EINVAL;
1098
1099 mutex_lock(&kvm->lock);
1100
1101 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1102 return -EFAULT;
1103 if (!bitmap_subset((unsigned long *) data.feat,
1104 kvm_s390_available_cpu_feat,
1105 KVM_S390_VM_CPU_FEAT_NR_BITS))
1106 return -EINVAL;
1107
1108 mutex_lock(&kvm->lock);
1100 if (!atomic_read(&kvm->online_vcpus)) {
1101 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1102 KVM_S390_VM_CPU_FEAT_NR_BITS);
1103 ret = 0;
1109 if (kvm->created_vcpus) {
1110 mutex_unlock(&kvm->lock);
1111 return -EBUSY;
1104 }
1112 }
1113 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1114 KVM_S390_VM_CPU_FEAT_NR_BITS);
1105 mutex_unlock(&kvm->lock);
1115 mutex_unlock(&kvm->lock);
1106 return ret;
1116 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1117 data.feat[0],
1118 data.feat[1],
1119 data.feat[2]);
1120 return 0;
1107}
1108
1109static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1110 struct kvm_device_attr *attr)
1111{
1112 /*
1113 * Once supported by kernel + hw, we have to store the subfunctions
1114 * in kvm->arch and remember that user space configured them.

--- 85 unchanged lines hidden (view full) ---

1200 struct kvm_device_attr *attr)
1201{
1202 struct kvm_s390_vm_cpu_feat data;
1203
1204 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1205 KVM_S390_VM_CPU_FEAT_NR_BITS);
1206 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1207 return -EFAULT;
1121}
1122
1123static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1124 struct kvm_device_attr *attr)
1125{
1126 /*
1127 * Once supported by kernel + hw, we have to store the subfunctions
1128 * in kvm->arch and remember that user space configured them.

--- 85 unchanged lines hidden (view full) ---

1214 struct kvm_device_attr *attr)
1215{
1216 struct kvm_s390_vm_cpu_feat data;
1217
1218 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1219 KVM_S390_VM_CPU_FEAT_NR_BITS);
1220 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1221 return -EFAULT;
1222 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1223 data.feat[0],
1224 data.feat[1],
1225 data.feat[2]);
1208 return 0;
1209}
1210
1211static int kvm_s390_get_machine_feat(struct kvm *kvm,
1212 struct kvm_device_attr *attr)
1213{
1214 struct kvm_s390_vm_cpu_feat data;
1215
1216 bitmap_copy((unsigned long *) data.feat,
1217 kvm_s390_available_cpu_feat,
1218 KVM_S390_VM_CPU_FEAT_NR_BITS);
1219 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1220 return -EFAULT;
1226 return 0;
1227}
1228
1229static int kvm_s390_get_machine_feat(struct kvm *kvm,
1230 struct kvm_device_attr *attr)
1231{
1232 struct kvm_s390_vm_cpu_feat data;
1233
1234 bitmap_copy((unsigned long *) data.feat,
1235 kvm_s390_available_cpu_feat,
1236 KVM_S390_VM_CPU_FEAT_NR_BITS);
1237 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1238 return -EFAULT;
1239 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1240 data.feat[0],
1241 data.feat[1],
1242 data.feat[2]);
1221 return 0;
1222}
1223
1224static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1225 struct kvm_device_attr *attr)
1226{
1227 /*
1228 * Once we can actually configure subfunctions (kernel + hw support),

--- 672 unchanged lines hidden (view full) ---

1901 spin_unlock(&kvm_lock);
1902
1903 sprintf(debug_name, "kvm-%u", current->pid);
1904
1905 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1906 if (!kvm->arch.dbf)
1907 goto out_err;
1908
1243 return 0;
1244}
1245
1246static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1247 struct kvm_device_attr *attr)
1248{
1249 /*
1250 * Once we can actually configure subfunctions (kernel + hw support),

--- 672 unchanged lines hidden (view full) ---

1923 spin_unlock(&kvm_lock);
1924
1925 sprintf(debug_name, "kvm-%u", current->pid);
1926
1927 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1928 if (!kvm->arch.dbf)
1929 goto out_err;
1930
1931 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
1909 kvm->arch.sie_page2 =
1910 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1911 if (!kvm->arch.sie_page2)
1912 goto out_err;
1913
1914 /* Populate the facility mask initially. */
1915 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1916 sizeof(S390_lowcore.stfle_fac_list));

--- 385 unchanged lines hidden (view full) ---

2302 preempt_enable();
2303 return value;
2304}
2305
2306void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2307{
2308
2309 gmap_enable(vcpu->arch.enabled_gmap);
1932 kvm->arch.sie_page2 =
1933 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1934 if (!kvm->arch.sie_page2)
1935 goto out_err;
1936
1937 /* Populate the facility mask initially. */
1938 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
1939 sizeof(S390_lowcore.stfle_fac_list));

--- 385 unchanged lines hidden (view full) ---

2325 preempt_enable();
2326 return value;
2327}
2328
2329void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2330{
2331
2332 gmap_enable(vcpu->arch.enabled_gmap);
2310 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
2333 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
2311 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2312 __start_cpu_timer_accounting(vcpu);
2313 vcpu->cpu = cpu;
2314}
2315
2316void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2317{
2318 vcpu->cpu = -1;
2319 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2320 __stop_cpu_timer_accounting(vcpu);
2334 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2335 __start_cpu_timer_accounting(vcpu);
2336 vcpu->cpu = cpu;
2337}
2338
2339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2340{
2341 vcpu->cpu = -1;
2342 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
2343 __stop_cpu_timer_accounting(vcpu);
2321 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
2344 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
2322 vcpu->arch.enabled_gmap = gmap_get_enabled();
2323 gmap_disable(vcpu->arch.enabled_gmap);
2324
2325}
2326
2327static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2328{
2329 /* this equals initial cpu reset in pop, but we don't switch to ESA */

--- 79 unchanged lines hidden (view full) ---

2409{
2410 int rc = 0;
2411
2412 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2413 CPUSTAT_SM |
2414 CPUSTAT_STOPPED);
2415
2416 if (test_kvm_facility(vcpu->kvm, 78))
2345 vcpu->arch.enabled_gmap = gmap_get_enabled();
2346 gmap_disable(vcpu->arch.enabled_gmap);
2347
2348}
2349
2350static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2351{
2352 /* this equals initial cpu reset in pop, but we don't switch to ESA */

--- 79 unchanged lines hidden (view full) ---

2432{
2433 int rc = 0;
2434
2435 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2436 CPUSTAT_SM |
2437 CPUSTAT_STOPPED);
2438
2439 if (test_kvm_facility(vcpu->kvm, 78))
2417 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
2440 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
2418 else if (test_kvm_facility(vcpu->kvm, 8))
2441 else if (test_kvm_facility(vcpu->kvm, 8))
2419 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
2442 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
2420
2421 kvm_s390_vcpu_setup_model(vcpu);
2422
2423 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2424 if (MACHINE_HAS_ESOP)
2425 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2426 if (test_kvm_facility(vcpu->kvm, 9))
2427 vcpu->arch.sie_block->ecb |= ECB_SRSI;

--- 20 unchanged lines hidden (view full) ---

2448 if (test_kvm_facility(vcpu->kvm, 139))
2449 vcpu->arch.sie_block->ecd |= ECD_MEF;
2450
2451 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2452 | SDNXC;
2453 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
2454
2455 if (sclp.has_kss)
2443
2444 kvm_s390_vcpu_setup_model(vcpu);
2445
2446 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2447 if (MACHINE_HAS_ESOP)
2448 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
2449 if (test_kvm_facility(vcpu->kvm, 9))
2450 vcpu->arch.sie_block->ecb |= ECB_SRSI;

--- 20 unchanged lines hidden (view full) ---

2471 if (test_kvm_facility(vcpu->kvm, 139))
2472 vcpu->arch.sie_block->ecd |= ECD_MEF;
2473
2474 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2475 | SDNXC;
2476 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
2477
2478 if (sclp.has_kss)
2456 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2479 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
2457 else
2458 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
2459
2460 if (vcpu->kvm->arch.use_cmma) {
2461 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2462 if (rc)
2463 return rc;
2464 }

--- 30 unchanged lines hidden (view full) ---

2495 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2496
2497 /* the real guest size will always be smaller than msl */
2498 vcpu->arch.sie_block->mso = 0;
2499 vcpu->arch.sie_block->msl = sclp.hamax;
2500
2501 vcpu->arch.sie_block->icpua = id;
2502 spin_lock_init(&vcpu->arch.local_int.lock);
2480 else
2481 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
2482
2483 if (vcpu->kvm->arch.use_cmma) {
2484 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2485 if (rc)
2486 return rc;
2487 }

--- 30 unchanged lines hidden (view full) ---

2518 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2519
2520 /* the real guest size will always be smaller than msl */
2521 vcpu->arch.sie_block->mso = 0;
2522 vcpu->arch.sie_block->msl = sclp.hamax;
2523
2524 vcpu->arch.sie_block->icpua = id;
2525 spin_lock_init(&vcpu->arch.local_int.lock);
2503 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
2504 vcpu->arch.local_int.wq = &vcpu->wq;
2505 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
2506 seqcount_init(&vcpu->arch.cputm_seqcount);
2507
2508 rc = kvm_vcpu_init(vcpu, kvm, id);
2509 if (rc)
2510 goto out_free_sie_block;
2511 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2512 vcpu->arch.sie_block);
2513 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);

--- 40 unchanged lines hidden (view full) ---

2554}
2555
2556/*
2557 * Kick a guest cpu out of SIE and wait until SIE is not running.
2558 * If the CPU is not running (e.g. waiting as idle) the function will
2559 * return immediately. */
2560void exit_sie(struct kvm_vcpu *vcpu)
2561{
2526 seqcount_init(&vcpu->arch.cputm_seqcount);
2527
2528 rc = kvm_vcpu_init(vcpu, kvm, id);
2529 if (rc)
2530 goto out_free_sie_block;
2531 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
2532 vcpu->arch.sie_block);
2533 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);

--- 40 unchanged lines hidden (view full) ---

2574}
2575
2576/*
2577 * Kick a guest cpu out of SIE and wait until SIE is not running.
2578 * If the CPU is not running (e.g. waiting as idle) the function will
2579 * return immediately. */
2580void exit_sie(struct kvm_vcpu *vcpu)
2581{
2562 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
2582 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
2563 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2564 cpu_relax();
2565}
2566
2567/* Kick a guest cpu out of SIE to process a request synchronously */
2568void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
2569{
2570 kvm_make_request(req, vcpu);

--- 227 unchanged lines hidden (view full) ---

2798 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2799 return -EINVAL;
2800 if (!sclp.has_gpere)
2801 return -EINVAL;
2802
2803 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2804 vcpu->guest_debug = dbg->control;
2805 /* enforce guest PER */
2583 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2584 cpu_relax();
2585}
2586
2587/* Kick a guest cpu out of SIE to process a request synchronously */
2588void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
2589{
2590 kvm_make_request(req, vcpu);

--- 227 unchanged lines hidden (view full) ---

2818 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
2819 return -EINVAL;
2820 if (!sclp.has_gpere)
2821 return -EINVAL;
2822
2823 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2824 vcpu->guest_debug = dbg->control;
2825 /* enforce guest PER */
2806 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2826 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
2807
2808 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2809 rc = kvm_s390_import_bp_data(vcpu, dbg);
2810 } else {
2827
2828 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2829 rc = kvm_s390_import_bp_data(vcpu, dbg);
2830 } else {
2811 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2831 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
2812 vcpu->arch.guestdbg.last_bp = 0;
2813 }
2814
2815 if (rc) {
2816 vcpu->guest_debug = 0;
2817 kvm_s390_clear_bp_data(vcpu);
2832 vcpu->arch.guestdbg.last_bp = 0;
2833 }
2834
2835 if (rc) {
2836 vcpu->guest_debug = 0;
2837 kvm_s390_clear_bp_data(vcpu);
2818 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
2838 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
2819 }
2820
2821 return rc;
2822}
2823
2824int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2825 struct kvm_mp_state *mp_state)
2826{

--- 24 unchanged lines hidden (view full) ---

2851 rc = -ENXIO;
2852 }
2853
2854 return rc;
2855}
2856
2857static bool ibs_enabled(struct kvm_vcpu *vcpu)
2858{
2839 }
2840
2841 return rc;
2842}
2843
2844int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2845 struct kvm_mp_state *mp_state)
2846{

--- 24 unchanged lines hidden (view full) ---

2871 rc = -ENXIO;
2872 }
2873
2874 return rc;
2875}
2876
2877static bool ibs_enabled(struct kvm_vcpu *vcpu)
2878{
2859 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2879 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
2860}
2861
2862static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2863{
2864retry:
2865 kvm_s390_vcpu_request_handled(vcpu);
2866 if (!kvm_request_pending(vcpu))
2867 return 0;

--- 19 unchanged lines hidden (view full) ---

2887 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2888 vcpu->arch.sie_block->ihcpu = 0xffff;
2889 goto retry;
2890 }
2891
2892 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2893 if (!ibs_enabled(vcpu)) {
2894 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2880}
2881
2882static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2883{
2884retry:
2885 kvm_s390_vcpu_request_handled(vcpu);
2886 if (!kvm_request_pending(vcpu))
2887 return 0;

--- 19 unchanged lines hidden (view full) ---

2907 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2908 vcpu->arch.sie_block->ihcpu = 0xffff;
2909 goto retry;
2910 }
2911
2912 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2913 if (!ibs_enabled(vcpu)) {
2914 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2895 atomic_or(CPUSTAT_IBS,
2896 &vcpu->arch.sie_block->cpuflags);
2915 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
2897 }
2898 goto retry;
2899 }
2900
2901 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2902 if (ibs_enabled(vcpu)) {
2903 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2916 }
2917 goto retry;
2918 }
2919
2920 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2921 if (ibs_enabled(vcpu)) {
2922 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2904 atomic_andnot(CPUSTAT_IBS,
2905 &vcpu->arch.sie_block->cpuflags);
2923 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
2906 }
2907 goto retry;
2908 }
2909
2910 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2911 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2912 goto retry;
2913 }

--- 453 unchanged lines hidden (view full) ---

3367 vcpu->arch.host_gscb = NULL;
3368 }
3369
3370}
3371
3372int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3373{
3374 int rc;
2924 }
2925 goto retry;
2926 }
2927
2928 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2929 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2930 goto retry;
2931 }

--- 453 unchanged lines hidden (view full) ---

3385 vcpu->arch.host_gscb = NULL;
3386 }
3387
3388}
3389
3390int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3391{
3392 int rc;
3375 sigset_t sigsaved;
3376
3377 if (kvm_run->immediate_exit)
3378 return -EINTR;
3379
3380 if (guestdbg_exit_pending(vcpu)) {
3381 kvm_s390_prepare_debug_exit(vcpu);
3382 return 0;
3383 }
3384
3393
3394 if (kvm_run->immediate_exit)
3395 return -EINTR;
3396
3397 if (guestdbg_exit_pending(vcpu)) {
3398 kvm_s390_prepare_debug_exit(vcpu);
3399 return 0;
3400 }
3401
3385 if (vcpu->sigset_active)
3386 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3402 kvm_sigset_activate(vcpu);
3387
3388 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3389 kvm_s390_vcpu_start(vcpu);
3390 } else if (is_vcpu_stopped(vcpu)) {
3391 pr_err_ratelimited("can't run stopped vcpu %d\n",
3392 vcpu->vcpu_id);
3393 return -EINVAL;
3394 }

--- 17 unchanged lines hidden (view full) ---

3412 if (rc == -EREMOTE) {
3413 /* userspace support is needed, kvm_run has been prepared */
3414 rc = 0;
3415 }
3416
3417 disable_cpu_timer_accounting(vcpu);
3418 store_regs(vcpu, kvm_run);
3419
3403
3404 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3405 kvm_s390_vcpu_start(vcpu);
3406 } else if (is_vcpu_stopped(vcpu)) {
3407 pr_err_ratelimited("can't run stopped vcpu %d\n",
3408 vcpu->vcpu_id);
3409 return -EINVAL;
3410 }

--- 17 unchanged lines hidden (view full) ---

3428 if (rc == -EREMOTE) {
3429 /* userspace support is needed, kvm_run has been prepared */
3430 rc = 0;
3431 }
3432
3433 disable_cpu_timer_accounting(vcpu);
3434 store_regs(vcpu, kvm_run);
3435
3420 if (vcpu->sigset_active)
3421 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3436 kvm_sigset_deactivate(vcpu);
3422
3423 vcpu->stat.exit_userspace++;
3424 return rc;
3425}
3426
3427/*
3428 * store status at address
3429 * we use have two special cases:

--- 114 unchanged lines hidden (view full) ---

3544 /*
3545 * As we are starting a second VCPU, we have to disable
3546 * the IBS facility on all VCPUs to remove potentially
3547 * oustanding ENABLE requests.
3548 */
3549 __disable_ibs_on_all_vcpus(vcpu->kvm);
3550 }
3551
3437
3438 vcpu->stat.exit_userspace++;
3439 return rc;
3440}
3441
3442/*
3443 * store status at address
3444 * we use have two special cases:

--- 114 unchanged lines hidden (view full) ---

3559 /*
3560 * As we are starting a second VCPU, we have to disable
3561 * the IBS facility on all VCPUs to remove potentially
3562 * oustanding ENABLE requests.
3563 */
3564 __disable_ibs_on_all_vcpus(vcpu->kvm);
3565 }
3566
3552 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
3567 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
3553 /*
3554 * Another VCPU might have used IBS while we were offline.
3555 * Let's play safe and flush the VCPU at startup.
3556 */
3557 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3558 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
3559 return;
3560}

--- 9 unchanged lines hidden (view full) ---

3570 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
3571 /* Only one cpu at a time may enter/leave the STOPPED state. */
3572 spin_lock(&vcpu->kvm->arch.start_stop_lock);
3573 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3574
3575 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
3576 kvm_s390_clear_stop_irq(vcpu);
3577
3568 /*
3569 * Another VCPU might have used IBS while we were offline.
3570 * Let's play safe and flush the VCPU at startup.
3571 */
3572 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3573 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
3574 return;
3575}

--- 9 unchanged lines hidden (view full) ---

3585 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
3586 /* Only one cpu at a time may enter/leave the STOPPED state. */
3587 spin_lock(&vcpu->kvm->arch.start_stop_lock);
3588 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3589
3590 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
3591 kvm_s390_clear_stop_irq(vcpu);
3592
3578 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
3593 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
3579 __disable_ibs_on_vcpu(vcpu);
3580
3581 for (i = 0; i < online_vcpus; i++) {
3582 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3583 started_vcpus++;
3584 started_vcpu = vcpu->kvm->vcpus[i];
3585 }
3586 }

--- 219 unchanged lines hidden (view full) ---

3806 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3807 break;
3808 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3809 irq_state.len == 0 ||
3810 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3811 r = -EINVAL;
3812 break;
3813 }
3594 __disable_ibs_on_vcpu(vcpu);
3595
3596 for (i = 0; i < online_vcpus; i++) {
3597 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3598 started_vcpus++;
3599 started_vcpu = vcpu->kvm->vcpus[i];
3600 }
3601 }

--- 219 unchanged lines hidden (view full) ---

3821 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3822 break;
3823 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3824 irq_state.len == 0 ||
3825 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3826 r = -EINVAL;
3827 break;
3828 }
3829 /* do not use irq_state.flags, it will break old QEMUs */
3814 r = kvm_s390_set_irq_state(vcpu,
3815 (void __user *) irq_state.buf,
3816 irq_state.len);
3817 break;
3818 }
3819 case KVM_S390_GET_IRQ_STATE: {
3820 struct kvm_s390_irq_state irq_state;
3821
3822 r = -EFAULT;
3823 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3824 break;
3825 if (irq_state.len == 0) {
3826 r = -EINVAL;
3827 break;
3828 }
3830 r = kvm_s390_set_irq_state(vcpu,
3831 (void __user *) irq_state.buf,
3832 irq_state.len);
3833 break;
3834 }
3835 case KVM_S390_GET_IRQ_STATE: {
3836 struct kvm_s390_irq_state irq_state;
3837
3838 r = -EFAULT;
3839 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3840 break;
3841 if (irq_state.len == 0) {
3842 r = -EINVAL;
3843 break;
3844 }
3845 /* do not use irq_state.flags, it will break old QEMUs */
3829 r = kvm_s390_get_irq_state(vcpu,
3830 (__u8 __user *) irq_state.buf,
3831 irq_state.len);
3832 break;
3833 }
3834 default:
3835 r = -ENOTTY;
3836 }

--- 115 unchanged lines hidden ---
3846 r = kvm_s390_get_irq_state(vcpu,
3847 (__u8 __user *) irq_state.buf,
3848 irq_state.len);
3849 break;
3850 }
3851 default:
3852 r = -ENOTTY;
3853 }

--- 115 unchanged lines hidden ---