hyperv.c (e880c6ea55b9805294ecc100ee95e0c9860ae90e) hyperv.c (cc9cfddb0433961107bb156fa769fdd7eb6718de)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.

--- 506 unchanged lines hidden (view full) ---

515
516static u64 get_time_ref_counter(struct kvm *kvm)
517{
518 struct kvm_hv *hv = to_kvm_hv(kvm);
519 struct kvm_vcpu *vcpu;
520 u64 tsc;
521
522 /*
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.

--- 506 unchanged lines hidden (view full) ---

515
516static u64 get_time_ref_counter(struct kvm *kvm)
517{
518 struct kvm_hv *hv = to_kvm_hv(kvm);
519 struct kvm_vcpu *vcpu;
520 u64 tsc;
521
522 /*
523 * The guest has not set up the TSC page or the clock isn't
524 * stable, fall back to get_kvmclock_ns.
523 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
524 * is broken, disabled or being updated.
525 */
525 */
526 if (!hv->tsc_ref.tsc_sequence)
526 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
527 return div_u64(get_kvmclock_ns(kvm), 100);
528
529 vcpu = kvm_get_vcpu(kvm, 0);
530 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
531 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
532 + hv->tsc_ref.tsc_offset;
533}
534

--- 547 unchanged lines hidden (view full) ---

1082{
1083 struct kvm_hv *hv = to_kvm_hv(kvm);
1084 u32 tsc_seq;
1085 u64 gfn;
1086
1087 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1088 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1089
527 return div_u64(get_kvmclock_ns(kvm), 100);
528
529 vcpu = kvm_get_vcpu(kvm, 0);
530 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
531 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
532 + hv->tsc_ref.tsc_offset;
533}
534

--- 547 unchanged lines hidden (view full) ---

1082{
1083 struct kvm_hv *hv = to_kvm_hv(kvm);
1084 u32 tsc_seq;
1085 u64 gfn;
1086
1087 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1088 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1089
1090 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1090 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1091 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1091 return;
1092
1093 mutex_lock(&hv->hv_lock);
1094 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1095 goto out_unlock;
1096
1097 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1098 /*
1099 * Because the TSC parameters only vary when there is a
1100 * change in the master clock, do not bother with caching.
1101 */
1102 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1103 &tsc_seq, sizeof(tsc_seq))))
1092 return;
1093
1094 mutex_lock(&hv->hv_lock);
1095 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1096 goto out_unlock;
1097
1098 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1099 /*
1100 * Because the TSC parameters only vary when there is a
1101 * change in the master clock, do not bother with caching.
1102 */
1103 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1104 &tsc_seq, sizeof(tsc_seq))))
1104 goto out_unlock;
1105 goto out_err;
1105
1106 /*
1107 * While we're computing and writing the parameters, force the
1108 * guest to use the time reference count MSR.
1109 */
1110 hv->tsc_ref.tsc_sequence = 0;
1111 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1112 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1106
1107 /*
1108 * While we're computing and writing the parameters, force the
1109 * guest to use the time reference count MSR.
1110 */
1111 hv->tsc_ref.tsc_sequence = 0;
1112 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1113 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1113 goto out_unlock;
1114 goto out_err;
1114
1115 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1115
1116 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1116 goto out_unlock;
1117 goto out_err;
1117
1118 /* Ensure sequence is zero before writing the rest of the struct. */
1119 smp_wmb();
1120 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1118
1119 /* Ensure sequence is zero before writing the rest of the struct. */
1120 smp_wmb();
1121 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1121 goto out_unlock;
1122 goto out_err;
1122
1123 /*
1124 * Now switch to the TSC page mechanism by writing the sequence.
1125 */
1126 tsc_seq++;
1127 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1128 tsc_seq = 1;
1129
1130 /* Write the struct entirely before the non-zero sequence. */
1131 smp_wmb();
1132
1133 hv->tsc_ref.tsc_sequence = tsc_seq;
1123
1124 /*
1125 * Now switch to the TSC page mechanism by writing the sequence.
1126 */
1127 tsc_seq++;
1128 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1129 tsc_seq = 1;
1130
1131 /* Write the struct entirely before the non-zero sequence. */
1132 smp_wmb();
1133
1134 hv->tsc_ref.tsc_sequence = tsc_seq;
1134 kvm_write_guest(kvm, gfn_to_gpa(gfn),
1135 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
1135 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1136 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1137 goto out_err;
1138
1139 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1140 goto out_unlock;
1141
1142out_err:
1143 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1136out_unlock:
1137 mutex_unlock(&hv->hv_lock);
1138}
1139
1140void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1141{
1142 struct kvm_hv *hv = to_kvm_hv(kvm);
1143 u64 gfn;
1144
1144out_unlock:
1145 mutex_unlock(&hv->hv_lock);
1146}
1147
1148void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
1149{
1150 struct kvm_hv *hv = to_kvm_hv(kvm);
1151 u64 gfn;
1152
1145 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1153 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1154 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1146 return;
1147
1148 mutex_lock(&hv->hv_lock);
1149
1150 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1151 goto out_unlock;
1152
1155 return;
1156
1157 mutex_lock(&hv->hv_lock);
1158
1159 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1160 goto out_unlock;
1161
1162 /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
1163 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
1164 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
1165
1153 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1154
1155 hv->tsc_ref.tsc_sequence = 0;
1166 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1167
1168 hv->tsc_ref.tsc_sequence = 0;
1156 kvm_write_guest(kvm, gfn_to_gpa(gfn),
1157 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
1169 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1170 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1171 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1158
1159out_unlock:
1160 mutex_unlock(&hv->hv_lock);
1161}
1162
1163static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1164 bool host)
1165{

--- 45 unchanged lines hidden (view full) ---

1211 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1212 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1213 return 1;
1214 hv->hv_hypercall = data;
1215 break;
1216 }
1217 case HV_X64_MSR_REFERENCE_TSC:
1218 hv->hv_tsc_page = data;
1172
1173out_unlock:
1174 mutex_unlock(&hv->hv_lock);
1175}
1176
1177static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1178 bool host)
1179{

--- 45 unchanged lines hidden (view full) ---

1225 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1226 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1227 return 1;
1228 hv->hv_hypercall = data;
1229 break;
1230 }
1231 case HV_X64_MSR_REFERENCE_TSC:
1232 hv->hv_tsc_page = data;
1219 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
1233 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1234 if (!host)
1235 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1236 else
1237 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1220 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1238 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1239 } else {
1240 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1241 }
1221 break;
1222 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1223 return kvm_hv_msr_set_crash_data(kvm,
1224 msr - HV_X64_MSR_CRASH_P0,
1225 data);
1226 case HV_X64_MSR_CRASH_CTL:
1227 if (host)
1228 return kvm_hv_msr_set_crash_ctl(kvm, data);

--- 980 unchanged lines hidden ---
1242 break;
1243 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1244 return kvm_hv_msr_set_crash_data(kvm,
1245 msr - HV_X64_MSR_CRASH_P0,
1246 data);
1247 case HV_X64_MSR_CRASH_CTL:
1248 if (host)
1249 return kvm_hv_msr_set_crash_ctl(kvm, data);

--- 980 unchanged lines hidden ---