1 /* 2 * KVM Microsoft Hyper-V emulation 3 * 4 * derived from arch/x86/kvm/x86.c 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc. 8 * Copyright IBM Corporation, 2008 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 * Andrey Smetanin <asmetanin@virtuozzo.com> 18 * 19 * This work is licensed under the terms of the GNU GPL, version 2. See 20 * the COPYING file in the top-level directory. 21 * 22 */ 23 24 #include "x86.h" 25 #include "lapic.h" 26 #include "ioapic.h" 27 #include "hyperv.h" 28 29 #include <linux/kvm_host.h> 30 #include <linux/highmem.h> 31 #include <asm/apicdef.h> 32 #include <trace/events/kvm.h> 33 34 #include "trace.h" 35 36 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) 37 { 38 return atomic64_read(&synic->sint[sint]); 39 } 40 41 static inline int synic_get_sint_vector(u64 sint_value) 42 { 43 if (sint_value & HV_SYNIC_SINT_MASKED) 44 return -1; 45 return sint_value & HV_SYNIC_SINT_VECTOR_MASK; 46 } 47 48 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, 49 int vector) 50 { 51 int i; 52 53 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 54 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) 55 return true; 56 } 57 return false; 58 } 59 60 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, 61 int vector) 62 { 63 int i; 64 u64 sint_value; 65 66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 67 sint_value = synic_read_sint(synic, i); 68 if (synic_get_sint_vector(sint_value) == vector && 69 sint_value & HV_SYNIC_SINT_AUTO_EOI) 70 return true; 71 } 72 return false; 73 } 74 75 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, 76 u64 data, bool host) 77 { 78 int vector; 79 80 vector = data & HV_SYNIC_SINT_VECTOR_MASK; 81 if (vector < 16 && !host) 82 return 1; 83 /* 84 * Guest may configure multiple SINTs to use the same vector, so 85 * we maintain a bitmap of vectors handled by synic, and a 86 * bitmap of vectors with auto-eoi behavior. The bitmaps are 87 * updated here, and atomically queried on fast paths. 88 */ 89 90 atomic64_set(&synic->sint[sint], data); 91 92 if (synic_has_vector_connected(synic, vector)) 93 __set_bit(vector, synic->vec_bitmap); 94 else 95 __clear_bit(vector, synic->vec_bitmap); 96 97 if (synic_has_vector_auto_eoi(synic, vector)) 98 __set_bit(vector, synic->auto_eoi_bitmap); 99 else 100 __clear_bit(vector, synic->auto_eoi_bitmap); 101 102 /* Load SynIC vectors into EOI exit bitmap */ 103 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); 104 return 0; 105 } 106 107 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id) 108 { 109 struct kvm_vcpu *vcpu; 110 struct kvm_vcpu_hv_synic *synic; 111 112 if (vcpu_id >= atomic_read(&kvm->online_vcpus)) 113 return NULL; 114 vcpu = kvm_get_vcpu(kvm, vcpu_id); 115 if (!vcpu) 116 return NULL; 117 synic = vcpu_to_synic(vcpu); 118 return (synic->active) ? synic : NULL; 119 } 120 121 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, 122 u32 sint) 123 { 124 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 125 struct page *page; 126 gpa_t gpa; 127 struct hv_message *msg; 128 struct hv_message_page *msg_page; 129 130 gpa = synic->msg_page & PAGE_MASK; 131 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); 132 if (is_error_page(page)) { 133 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", 134 gpa); 135 return; 136 } 137 msg_page = kmap_atomic(page); 138 139 msg = &msg_page->sint_message[sint]; 140 msg->header.message_flags.msg_pending = 0; 141 142 kunmap_atomic(msg_page); 143 kvm_release_page_dirty(page); 144 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 145 } 146 147 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) 148 { 149 struct kvm *kvm = vcpu->kvm; 150 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 151 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 152 struct kvm_vcpu_hv_stimer *stimer; 153 int gsi, idx, stimers_pending; 154 155 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); 156 157 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) 158 synic_clear_sint_msg_pending(synic, sint); 159 160 /* Try to deliver pending Hyper-V SynIC timers messages */ 161 stimers_pending = 0; 162 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { 163 stimer = &hv_vcpu->stimer[idx]; 164 if (stimer->msg_pending && 165 (stimer->config & HV_STIMER_ENABLE) && 166 HV_STIMER_SINT(stimer->config) == sint) { 167 set_bit(stimer->index, 168 hv_vcpu->stimer_pending_bitmap); 169 stimers_pending++; 170 } 171 } 172 if (stimers_pending) 173 kvm_make_request(KVM_REQ_HV_STIMER, vcpu); 174 175 idx = srcu_read_lock(&kvm->irq_srcu); 176 gsi = atomic_read(&synic->sint_to_gsi[sint]); 177 if (gsi != -1) 178 kvm_notify_acked_gsi(kvm, gsi); 179 srcu_read_unlock(&kvm->irq_srcu, idx); 180 } 181 182 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) 183 { 184 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 185 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; 186 187 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; 188 hv_vcpu->exit.u.synic.msr = msr; 189 hv_vcpu->exit.u.synic.control = synic->control; 190 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; 191 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; 192 193 kvm_make_request(KVM_REQ_HV_EXIT, vcpu); 194 } 195 196 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, 197 u32 msr, u64 data, bool host) 198 { 199 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 200 int ret; 201 202 if (!synic->active) 203 return 1; 204 205 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); 206 207 ret = 0; 208 switch (msr) { 209 case HV_X64_MSR_SCONTROL: 210 synic->control = data; 211 if (!host) 212 synic_exit(synic, msr); 213 break; 214 case HV_X64_MSR_SVERSION: 215 if (!host) { 216 ret = 1; 217 break; 218 } 219 synic->version = data; 220 break; 221 case HV_X64_MSR_SIEFP: 222 if (data & HV_SYNIC_SIEFP_ENABLE) 223 if (kvm_clear_guest(vcpu->kvm, 224 data & PAGE_MASK, PAGE_SIZE)) { 225 ret = 1; 226 break; 227 } 228 synic->evt_page = data; 229 if (!host) 230 synic_exit(synic, msr); 231 break; 232 case HV_X64_MSR_SIMP: 233 if (data & HV_SYNIC_SIMP_ENABLE) 234 if (kvm_clear_guest(vcpu->kvm, 235 data & PAGE_MASK, PAGE_SIZE)) { 236 ret = 1; 237 break; 238 } 239 synic->msg_page = data; 240 if (!host) 241 synic_exit(synic, msr); 242 break; 243 case HV_X64_MSR_EOM: { 244 int i; 245 246 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) 247 kvm_hv_notify_acked_sint(vcpu, i); 248 break; 249 } 250 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 251 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); 252 break; 253 default: 254 ret = 1; 255 break; 256 } 257 return ret; 258 } 259 260 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) 261 { 262 int ret; 263 264 if (!synic->active) 265 return 1; 266 267 ret = 0; 268 switch (msr) { 269 case HV_X64_MSR_SCONTROL: 270 *pdata = synic->control; 271 break; 272 case HV_X64_MSR_SVERSION: 273 *pdata = synic->version; 274 break; 275 case HV_X64_MSR_SIEFP: 276 *pdata = synic->evt_page; 277 break; 278 case HV_X64_MSR_SIMP: 279 *pdata = synic->msg_page; 280 break; 281 case HV_X64_MSR_EOM: 282 *pdata = 0; 283 break; 284 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 285 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); 286 break; 287 default: 288 ret = 1; 289 break; 290 } 291 return ret; 292 } 293 294 int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) 295 { 296 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 297 struct kvm_lapic_irq irq; 298 int ret, vector; 299 300 if (sint >= ARRAY_SIZE(synic->sint)) 301 return -EINVAL; 302 303 vector = synic_get_sint_vector(synic_read_sint(synic, sint)); 304 if (vector < 0) 305 return -ENOENT; 306 307 memset(&irq, 0, sizeof(irq)); 308 irq.dest_id = kvm_apic_id(vcpu->arch.apic); 309 irq.dest_mode = APIC_DEST_PHYSICAL; 310 irq.delivery_mode = APIC_DM_FIXED; 311 irq.vector = vector; 312 irq.level = 1; 313 314 ret = kvm_irq_delivery_to_apic(vcpu->kvm, NULL, &irq, NULL); 315 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); 316 return ret; 317 } 318 319 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint) 320 { 321 struct kvm_vcpu_hv_synic *synic; 322 323 synic = synic_get(kvm, vcpu_id); 324 if (!synic) 325 return -EINVAL; 326 327 return synic_set_irq(synic, sint); 328 } 329 330 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) 331 { 332 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 333 int i; 334 335 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); 336 337 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) 338 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) 339 kvm_hv_notify_acked_sint(vcpu, i); 340 } 341 342 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi) 343 { 344 struct kvm_vcpu_hv_synic *synic; 345 346 synic = synic_get(kvm, vcpu_id); 347 if (!synic) 348 return -EINVAL; 349 350 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) 351 return -EINVAL; 352 353 atomic_set(&synic->sint_to_gsi[sint], gsi); 354 return 0; 355 } 356 357 void kvm_hv_irq_routing_update(struct kvm *kvm) 358 { 359 struct kvm_irq_routing_table *irq_rt; 360 struct kvm_kernel_irq_routing_entry *e; 361 u32 gsi; 362 363 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, 364 lockdep_is_held(&kvm->irq_lock)); 365 366 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { 367 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { 368 if (e->type == KVM_IRQ_ROUTING_HV_SINT) 369 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, 370 e->hv_sint.sint, gsi); 371 } 372 } 373 } 374 375 static void synic_init(struct kvm_vcpu_hv_synic *synic) 376 { 377 int i; 378 379 memset(synic, 0, sizeof(*synic)); 380 synic->version = HV_SYNIC_VERSION_1; 381 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 382 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); 383 atomic_set(&synic->sint_to_gsi[i], -1); 384 } 385 } 386 387 static u64 get_time_ref_counter(struct kvm *kvm) 388 { 389 struct kvm_hv *hv = &kvm->arch.hyperv; 390 struct kvm_vcpu *vcpu; 391 u64 tsc; 392 393 /* 394 * The guest has not set up the TSC page or the clock isn't 395 * stable, fall back to get_kvmclock_ns. 396 */ 397 if (!hv->tsc_ref.tsc_sequence) 398 return div_u64(get_kvmclock_ns(kvm), 100); 399 400 vcpu = kvm_get_vcpu(kvm, 0); 401 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 402 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) 403 + hv->tsc_ref.tsc_offset; 404 } 405 406 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, 407 bool vcpu_kick) 408 { 409 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 410 411 set_bit(stimer->index, 412 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); 413 kvm_make_request(KVM_REQ_HV_STIMER, vcpu); 414 if (vcpu_kick) 415 kvm_vcpu_kick(vcpu); 416 } 417 418 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) 419 { 420 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 421 422 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, 423 stimer->index); 424 425 hrtimer_cancel(&stimer->timer); 426 clear_bit(stimer->index, 427 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); 428 stimer->msg_pending = false; 429 stimer->exp_time = 0; 430 } 431 432 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) 433 { 434 struct kvm_vcpu_hv_stimer *stimer; 435 436 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); 437 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, 438 stimer->index); 439 stimer_mark_pending(stimer, true); 440 441 return HRTIMER_NORESTART; 442 } 443 444 /* 445 * stimer_start() assumptions: 446 * a) stimer->count is not equal to 0 447 * b) stimer->config has HV_STIMER_ENABLE flag 448 */ 449 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) 450 { 451 u64 time_now; 452 ktime_t ktime_now; 453 454 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); 455 ktime_now = ktime_get(); 456 457 if (stimer->config & HV_STIMER_PERIODIC) { 458 if (stimer->exp_time) { 459 if (time_now >= stimer->exp_time) { 460 u64 remainder; 461 462 div64_u64_rem(time_now - stimer->exp_time, 463 stimer->count, &remainder); 464 stimer->exp_time = 465 time_now + (stimer->count - remainder); 466 } 467 } else 468 stimer->exp_time = time_now + stimer->count; 469 470 trace_kvm_hv_stimer_start_periodic( 471 stimer_to_vcpu(stimer)->vcpu_id, 472 stimer->index, 473 time_now, stimer->exp_time); 474 475 hrtimer_start(&stimer->timer, 476 ktime_add_ns(ktime_now, 477 100 * (stimer->exp_time - time_now)), 478 HRTIMER_MODE_ABS); 479 return 0; 480 } 481 stimer->exp_time = stimer->count; 482 if (time_now >= stimer->count) { 483 /* 484 * Expire timer according to Hypervisor Top-Level Functional 485 * specification v4(15.3.1): 486 * "If a one shot is enabled and the specified count is in 487 * the past, it will expire immediately." 488 */ 489 stimer_mark_pending(stimer, false); 490 return 0; 491 } 492 493 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, 494 stimer->index, 495 time_now, stimer->count); 496 497 hrtimer_start(&stimer->timer, 498 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), 499 HRTIMER_MODE_ABS); 500 return 0; 501 } 502 503 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, 504 bool host) 505 { 506 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, 507 stimer->index, config, host); 508 509 stimer_cleanup(stimer); 510 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0) 511 config &= ~HV_STIMER_ENABLE; 512 stimer->config = config; 513 stimer_mark_pending(stimer, false); 514 return 0; 515 } 516 517 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, 518 bool host) 519 { 520 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, 521 stimer->index, count, host); 522 523 stimer_cleanup(stimer); 524 stimer->count = count; 525 if (stimer->count == 0) 526 stimer->config &= ~HV_STIMER_ENABLE; 527 else if (stimer->config & HV_STIMER_AUTOENABLE) 528 stimer->config |= HV_STIMER_ENABLE; 529 stimer_mark_pending(stimer, false); 530 return 0; 531 } 532 533 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) 534 { 535 *pconfig = stimer->config; 536 return 0; 537 } 538 539 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) 540 { 541 *pcount = stimer->count; 542 return 0; 543 } 544 545 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, 546 struct hv_message *src_msg) 547 { 548 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 549 struct page *page; 550 gpa_t gpa; 551 struct hv_message *dst_msg; 552 int r; 553 struct hv_message_page *msg_page; 554 555 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) 556 return -ENOENT; 557 558 gpa = synic->msg_page & PAGE_MASK; 559 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); 560 if (is_error_page(page)) 561 return -EFAULT; 562 563 msg_page = kmap_atomic(page); 564 dst_msg = &msg_page->sint_message[sint]; 565 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, 566 src_msg->header.message_type) != HVMSG_NONE) { 567 dst_msg->header.message_flags.msg_pending = 1; 568 r = -EAGAIN; 569 } else { 570 memcpy(&dst_msg->u.payload, &src_msg->u.payload, 571 src_msg->header.payload_size); 572 dst_msg->header.message_type = src_msg->header.message_type; 573 dst_msg->header.payload_size = src_msg->header.payload_size; 574 r = synic_set_irq(synic, sint); 575 if (r >= 1) 576 r = 0; 577 else if (r == 0) 578 r = -EFAULT; 579 } 580 kunmap_atomic(msg_page); 581 kvm_release_page_dirty(page); 582 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 583 return r; 584 } 585 586 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) 587 { 588 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 589 struct hv_message *msg = &stimer->msg; 590 struct hv_timer_message_payload *payload = 591 (struct hv_timer_message_payload *)&msg->u.payload; 592 593 payload->expiration_time = stimer->exp_time; 594 payload->delivery_time = get_time_ref_counter(vcpu->kvm); 595 return synic_deliver_msg(vcpu_to_synic(vcpu), 596 HV_STIMER_SINT(stimer->config), msg); 597 } 598 599 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) 600 { 601 int r; 602 603 stimer->msg_pending = true; 604 r = stimer_send_msg(stimer); 605 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, 606 stimer->index, r); 607 if (!r) { 608 stimer->msg_pending = false; 609 if (!(stimer->config & HV_STIMER_PERIODIC)) 610 stimer->config &= ~HV_STIMER_ENABLE; 611 } 612 } 613 614 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) 615 { 616 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 617 struct kvm_vcpu_hv_stimer *stimer; 618 u64 time_now, exp_time; 619 int i; 620 621 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 622 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { 623 stimer = &hv_vcpu->stimer[i]; 624 if (stimer->config & HV_STIMER_ENABLE) { 625 exp_time = stimer->exp_time; 626 627 if (exp_time) { 628 time_now = 629 get_time_ref_counter(vcpu->kvm); 630 if (time_now >= exp_time) 631 stimer_expiration(stimer); 632 } 633 634 if ((stimer->config & HV_STIMER_ENABLE) && 635 stimer->count) 636 stimer_start(stimer); 637 else 638 stimer_cleanup(stimer); 639 } 640 } 641 } 642 643 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) 644 { 645 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 646 int i; 647 648 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 649 stimer_cleanup(&hv_vcpu->stimer[i]); 650 } 651 652 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) 653 { 654 struct hv_message *msg = &stimer->msg; 655 struct hv_timer_message_payload *payload = 656 (struct hv_timer_message_payload *)&msg->u.payload; 657 658 memset(&msg->header, 0, sizeof(msg->header)); 659 msg->header.message_type = HVMSG_TIMER_EXPIRED; 660 msg->header.payload_size = sizeof(*payload); 661 662 payload->timer_index = stimer->index; 663 payload->expiration_time = 0; 664 payload->delivery_time = 0; 665 } 666 667 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) 668 { 669 memset(stimer, 0, sizeof(*stimer)); 670 stimer->index = timer_index; 671 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 672 stimer->timer.function = stimer_timer_callback; 673 stimer_prepare_msg(stimer); 674 } 675 676 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) 677 { 678 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 679 int i; 680 681 synic_init(&hv_vcpu->synic); 682 683 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); 684 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 685 stimer_init(&hv_vcpu->stimer[i], i); 686 } 687 688 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu) 689 { 690 /* 691 * Hyper-V SynIC auto EOI SINT's are 692 * not compatible with APICV, so deactivate APICV 693 */ 694 kvm_vcpu_deactivate_apicv(vcpu); 695 vcpu_to_synic(vcpu)->active = true; 696 return 0; 697 } 698 699 static bool kvm_hv_msr_partition_wide(u32 msr) 700 { 701 bool r = false; 702 703 switch (msr) { 704 case HV_X64_MSR_GUEST_OS_ID: 705 case HV_X64_MSR_HYPERCALL: 706 case HV_X64_MSR_REFERENCE_TSC: 707 case HV_X64_MSR_TIME_REF_COUNT: 708 case HV_X64_MSR_CRASH_CTL: 709 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 710 case HV_X64_MSR_RESET: 711 r = true; 712 break; 713 } 714 715 return r; 716 } 717 718 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, 719 u32 index, u64 *pdata) 720 { 721 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 722 723 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) 724 return -EINVAL; 725 726 *pdata = hv->hv_crash_param[index]; 727 return 0; 728 } 729 730 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) 731 { 732 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 733 734 *pdata = hv->hv_crash_ctl; 735 return 0; 736 } 737 738 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) 739 { 740 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 741 742 if (host) 743 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; 744 745 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { 746 747 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", 748 hv->hv_crash_param[0], 749 hv->hv_crash_param[1], 750 hv->hv_crash_param[2], 751 hv->hv_crash_param[3], 752 hv->hv_crash_param[4]); 753 754 /* Send notification about crash to user space */ 755 kvm_make_request(KVM_REQ_HV_CRASH, vcpu); 756 } 757 758 return 0; 759 } 760 761 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, 762 u32 index, u64 data) 763 { 764 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 765 766 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) 767 return -EINVAL; 768 769 hv->hv_crash_param[index] = data; 770 return 0; 771 } 772 773 /* 774 * The kvmclock and Hyper-V TSC page use similar formulas, and converting 775 * between them is possible: 776 * 777 * kvmclock formula: 778 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) 779 * + system_time 780 * 781 * Hyper-V formula: 782 * nsec/100 = ticks * scale / 2^64 + offset 783 * 784 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. 785 * By dividing the kvmclock formula by 100 and equating what's left we get: 786 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 787 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 788 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 789 * 790 * Now expand the kvmclock formula and divide by 100: 791 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) 792 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) 793 * + system_time 794 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 795 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 796 * + system_time / 100 797 * 798 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: 799 * nsec/100 = ticks * scale / 2^64 800 * - tsc_timestamp * scale / 2^64 801 * + system_time / 100 802 * 803 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: 804 * offset = system_time / 100 - tsc_timestamp * scale / 2^64 805 * 806 * These two equivalencies are implemented in this function. 807 */ 808 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, 809 HV_REFERENCE_TSC_PAGE *tsc_ref) 810 { 811 u64 max_mul; 812 813 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) 814 return false; 815 816 /* 817 * check if scale would overflow, if so we use the time ref counter 818 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 819 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) 820 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) 821 */ 822 max_mul = 100ull << (32 - hv_clock->tsc_shift); 823 if (hv_clock->tsc_to_system_mul >= max_mul) 824 return false; 825 826 /* 827 * Otherwise compute the scale and offset according to the formulas 828 * derived above. 829 */ 830 tsc_ref->tsc_scale = 831 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), 832 hv_clock->tsc_to_system_mul, 833 100); 834 835 tsc_ref->tsc_offset = hv_clock->system_time; 836 do_div(tsc_ref->tsc_offset, 100); 837 tsc_ref->tsc_offset -= 838 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); 839 return true; 840 } 841 842 void kvm_hv_setup_tsc_page(struct kvm *kvm, 843 struct pvclock_vcpu_time_info *hv_clock) 844 { 845 struct kvm_hv *hv = &kvm->arch.hyperv; 846 u32 tsc_seq; 847 u64 gfn; 848 849 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); 850 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0); 851 852 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 853 return; 854 855 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; 856 /* 857 * Because the TSC parameters only vary when there is a 858 * change in the master clock, do not bother with caching. 859 */ 860 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), 861 &tsc_seq, sizeof(tsc_seq)))) 862 return; 863 864 /* 865 * While we're computing and writing the parameters, force the 866 * guest to use the time reference count MSR. 867 */ 868 hv->tsc_ref.tsc_sequence = 0; 869 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), 870 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) 871 return; 872 873 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) 874 return; 875 876 /* Ensure sequence is zero before writing the rest of the struct. */ 877 smp_wmb(); 878 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) 879 return; 880 881 /* 882 * Now switch to the TSC page mechanism by writing the sequence. 883 */ 884 tsc_seq++; 885 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) 886 tsc_seq = 1; 887 888 /* Write the struct entirely before the non-zero sequence. */ 889 smp_wmb(); 890 891 hv->tsc_ref.tsc_sequence = tsc_seq; 892 kvm_write_guest(kvm, gfn_to_gpa(gfn), 893 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); 894 } 895 896 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, 897 bool host) 898 { 899 struct kvm *kvm = vcpu->kvm; 900 struct kvm_hv *hv = &kvm->arch.hyperv; 901 902 switch (msr) { 903 case HV_X64_MSR_GUEST_OS_ID: 904 hv->hv_guest_os_id = data; 905 /* setting guest os id to zero disables hypercall page */ 906 if (!hv->hv_guest_os_id) 907 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; 908 break; 909 case HV_X64_MSR_HYPERCALL: { 910 u64 gfn; 911 unsigned long addr; 912 u8 instructions[4]; 913 914 /* if guest os id is not set hypercall should remain disabled */ 915 if (!hv->hv_guest_os_id) 916 break; 917 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { 918 hv->hv_hypercall = data; 919 break; 920 } 921 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 922 addr = gfn_to_hva(kvm, gfn); 923 if (kvm_is_error_hva(addr)) 924 return 1; 925 kvm_x86_ops->patch_hypercall(vcpu, instructions); 926 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 927 if (__copy_to_user((void __user *)addr, instructions, 4)) 928 return 1; 929 hv->hv_hypercall = data; 930 mark_page_dirty(kvm, gfn); 931 break; 932 } 933 case HV_X64_MSR_REFERENCE_TSC: 934 hv->hv_tsc_page = data; 935 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) 936 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 937 break; 938 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 939 return kvm_hv_msr_set_crash_data(vcpu, 940 msr - HV_X64_MSR_CRASH_P0, 941 data); 942 case HV_X64_MSR_CRASH_CTL: 943 return kvm_hv_msr_set_crash_ctl(vcpu, data, host); 944 case HV_X64_MSR_RESET: 945 if (data == 1) { 946 vcpu_debug(vcpu, "hyper-v reset requested\n"); 947 kvm_make_request(KVM_REQ_HV_RESET, vcpu); 948 } 949 break; 950 default: 951 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", 952 msr, data); 953 return 1; 954 } 955 return 0; 956 } 957 958 /* Calculate cpu time spent by current task in 100ns units */ 959 static u64 current_task_runtime_100ns(void) 960 { 961 cputime_t utime, stime; 962 963 task_cputime_adjusted(current, &utime, &stime); 964 return div_u64(cputime_to_nsecs(utime + stime), 100); 965 } 966 967 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) 968 { 969 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; 970 971 switch (msr) { 972 case HV_X64_MSR_APIC_ASSIST_PAGE: { 973 u64 gfn; 974 unsigned long addr; 975 976 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { 977 hv->hv_vapic = data; 978 if (kvm_lapic_enable_pv_eoi(vcpu, 0)) 979 return 1; 980 break; 981 } 982 gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; 983 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); 984 if (kvm_is_error_hva(addr)) 985 return 1; 986 if (__clear_user((void __user *)addr, PAGE_SIZE)) 987 return 1; 988 hv->hv_vapic = data; 989 kvm_vcpu_mark_page_dirty(vcpu, gfn); 990 if (kvm_lapic_enable_pv_eoi(vcpu, 991 gfn_to_gpa(gfn) | KVM_MSR_ENABLED)) 992 return 1; 993 break; 994 } 995 case HV_X64_MSR_EOI: 996 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); 997 case HV_X64_MSR_ICR: 998 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); 999 case HV_X64_MSR_TPR: 1000 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); 1001 case HV_X64_MSR_VP_RUNTIME: 1002 if (!host) 1003 return 1; 1004 hv->runtime_offset = data - current_task_runtime_100ns(); 1005 break; 1006 case HV_X64_MSR_SCONTROL: 1007 case HV_X64_MSR_SVERSION: 1008 case HV_X64_MSR_SIEFP: 1009 case HV_X64_MSR_SIMP: 1010 case HV_X64_MSR_EOM: 1011 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 1012 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); 1013 case HV_X64_MSR_STIMER0_CONFIG: 1014 case HV_X64_MSR_STIMER1_CONFIG: 1015 case HV_X64_MSR_STIMER2_CONFIG: 1016 case HV_X64_MSR_STIMER3_CONFIG: { 1017 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; 1018 1019 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), 1020 data, host); 1021 } 1022 case HV_X64_MSR_STIMER0_COUNT: 1023 case HV_X64_MSR_STIMER1_COUNT: 1024 case HV_X64_MSR_STIMER2_COUNT: 1025 case HV_X64_MSR_STIMER3_COUNT: { 1026 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; 1027 1028 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), 1029 data, host); 1030 } 1031 default: 1032 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", 1033 msr, data); 1034 return 1; 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1041 { 1042 u64 data = 0; 1043 struct kvm *kvm = vcpu->kvm; 1044 struct kvm_hv *hv = &kvm->arch.hyperv; 1045 1046 switch (msr) { 1047 case HV_X64_MSR_GUEST_OS_ID: 1048 data = hv->hv_guest_os_id; 1049 break; 1050 case HV_X64_MSR_HYPERCALL: 1051 data = hv->hv_hypercall; 1052 break; 1053 case HV_X64_MSR_TIME_REF_COUNT: 1054 data = get_time_ref_counter(kvm); 1055 break; 1056 case HV_X64_MSR_REFERENCE_TSC: 1057 data = hv->hv_tsc_page; 1058 break; 1059 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 1060 return kvm_hv_msr_get_crash_data(vcpu, 1061 msr - HV_X64_MSR_CRASH_P0, 1062 pdata); 1063 case HV_X64_MSR_CRASH_CTL: 1064 return kvm_hv_msr_get_crash_ctl(vcpu, pdata); 1065 case HV_X64_MSR_RESET: 1066 data = 0; 1067 break; 1068 default: 1069 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 1070 return 1; 1071 } 1072 1073 *pdata = data; 1074 return 0; 1075 } 1076 1077 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1078 { 1079 u64 data = 0; 1080 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; 1081 1082 switch (msr) { 1083 case HV_X64_MSR_VP_INDEX: { 1084 int r; 1085 struct kvm_vcpu *v; 1086 1087 kvm_for_each_vcpu(r, v, vcpu->kvm) { 1088 if (v == vcpu) { 1089 data = r; 1090 break; 1091 } 1092 } 1093 break; 1094 } 1095 case HV_X64_MSR_EOI: 1096 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); 1097 case HV_X64_MSR_ICR: 1098 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); 1099 case HV_X64_MSR_TPR: 1100 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); 1101 case HV_X64_MSR_APIC_ASSIST_PAGE: 1102 data = hv->hv_vapic; 1103 break; 1104 case HV_X64_MSR_VP_RUNTIME: 1105 data = current_task_runtime_100ns() + hv->runtime_offset; 1106 break; 1107 case HV_X64_MSR_SCONTROL: 1108 case HV_X64_MSR_SVERSION: 1109 case HV_X64_MSR_SIEFP: 1110 case HV_X64_MSR_SIMP: 1111 case HV_X64_MSR_EOM: 1112 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 1113 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); 1114 case HV_X64_MSR_STIMER0_CONFIG: 1115 case HV_X64_MSR_STIMER1_CONFIG: 1116 case HV_X64_MSR_STIMER2_CONFIG: 1117 case HV_X64_MSR_STIMER3_CONFIG: { 1118 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; 1119 1120 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), 1121 pdata); 1122 } 1123 case HV_X64_MSR_STIMER0_COUNT: 1124 case HV_X64_MSR_STIMER1_COUNT: 1125 case HV_X64_MSR_STIMER2_COUNT: 1126 case HV_X64_MSR_STIMER3_COUNT: { 1127 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; 1128 1129 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), 1130 pdata); 1131 } 1132 default: 1133 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 1134 return 1; 1135 } 1136 *pdata = data; 1137 return 0; 1138 } 1139 1140 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) 1141 { 1142 if (kvm_hv_msr_partition_wide(msr)) { 1143 int r; 1144 1145 mutex_lock(&vcpu->kvm->lock); 1146 r = kvm_hv_set_msr_pw(vcpu, msr, data, host); 1147 mutex_unlock(&vcpu->kvm->lock); 1148 return r; 1149 } else 1150 return kvm_hv_set_msr(vcpu, msr, data, host); 1151 } 1152 1153 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1154 { 1155 if (kvm_hv_msr_partition_wide(msr)) { 1156 int r; 1157 1158 mutex_lock(&vcpu->kvm->lock); 1159 r = kvm_hv_get_msr_pw(vcpu, msr, pdata); 1160 mutex_unlock(&vcpu->kvm->lock); 1161 return r; 1162 } else 1163 return kvm_hv_get_msr(vcpu, msr, pdata); 1164 } 1165 1166 bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1167 { 1168 return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE; 1169 } 1170 1171 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) 1172 { 1173 bool longmode; 1174 1175 longmode = is_64_bit_mode(vcpu); 1176 if (longmode) 1177 kvm_register_write(vcpu, VCPU_REGS_RAX, result); 1178 else { 1179 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32); 1180 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff); 1181 } 1182 } 1183 1184 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1185 { 1186 struct kvm_run *run = vcpu->run; 1187 1188 kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result); 1189 return 1; 1190 } 1191 1192 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 1193 { 1194 u64 param, ingpa, outgpa, ret; 1195 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; 1196 bool fast, longmode; 1197 1198 /* 1199 * hypercall generates UD from non zero cpl and real mode 1200 * per HYPER-V spec 1201 */ 1202 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 1203 kvm_queue_exception(vcpu, UD_VECTOR); 1204 return 1; 1205 } 1206 1207 longmode = is_64_bit_mode(vcpu); 1208 1209 if (!longmode) { 1210 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | 1211 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); 1212 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | 1213 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); 1214 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | 1215 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); 1216 } 1217 #ifdef CONFIG_X86_64 1218 else { 1219 param = kvm_register_read(vcpu, VCPU_REGS_RCX); 1220 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); 1221 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); 1222 } 1223 #endif 1224 1225 code = param & 0xffff; 1226 fast = (param >> 16) & 0x1; 1227 rep_cnt = (param >> 32) & 0xfff; 1228 rep_idx = (param >> 48) & 0xfff; 1229 1230 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); 1231 1232 /* Hypercall continuation is not supported yet */ 1233 if (rep_cnt || rep_idx) { 1234 res = HV_STATUS_INVALID_HYPERCALL_CODE; 1235 goto set_result; 1236 } 1237 1238 switch (code) { 1239 case HVCALL_NOTIFY_LONG_SPIN_WAIT: 1240 kvm_vcpu_on_spin(vcpu); 1241 break; 1242 case HVCALL_POST_MESSAGE: 1243 case HVCALL_SIGNAL_EVENT: 1244 /* don't bother userspace if it has no way to handle it */ 1245 if (!vcpu_to_synic(vcpu)->active) { 1246 res = HV_STATUS_INVALID_HYPERCALL_CODE; 1247 break; 1248 } 1249 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 1250 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; 1251 vcpu->run->hyperv.u.hcall.input = param; 1252 vcpu->run->hyperv.u.hcall.params[0] = ingpa; 1253 vcpu->run->hyperv.u.hcall.params[1] = outgpa; 1254 vcpu->arch.complete_userspace_io = 1255 kvm_hv_hypercall_complete_userspace; 1256 return 0; 1257 default: 1258 res = HV_STATUS_INVALID_HYPERCALL_CODE; 1259 break; 1260 } 1261 1262 set_result: 1263 ret = res | (((u64)rep_done & 0xfff) << 32); 1264 kvm_hv_hypercall_set_result(vcpu, ret); 1265 return 1; 1266 } 1267