1 /* 2 * KVM Microsoft Hyper-V emulation 3 * 4 * derived from arch/x86/kvm/x86.c 5 * 6 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2008 Qumranet, Inc. 8 * Copyright IBM Corporation, 2008 9 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 10 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> 11 * 12 * Authors: 13 * Avi Kivity <avi@qumranet.com> 14 * Yaniv Kamay <yaniv@qumranet.com> 15 * Amit Shah <amit.shah@qumranet.com> 16 * Ben-Ami Yassour <benami@il.ibm.com> 17 * Andrey Smetanin <asmetanin@virtuozzo.com> 18 * 19 * This work is licensed under the terms of the GNU GPL, version 2. See 20 * the COPYING file in the top-level directory. 21 * 22 */ 23 24 #include "x86.h" 25 #include "lapic.h" 26 #include "ioapic.h" 27 #include "hyperv.h" 28 29 #include <linux/kvm_host.h> 30 #include <linux/highmem.h> 31 #include <linux/sched/cputime.h> 32 #include <linux/eventfd.h> 33 34 #include <asm/apicdef.h> 35 #include <trace/events/kvm.h> 36 37 #include "trace.h" 38 39 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64) 40 41 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) 42 { 43 return atomic64_read(&synic->sint[sint]); 44 } 45 46 static inline int synic_get_sint_vector(u64 sint_value) 47 { 48 if (sint_value & HV_SYNIC_SINT_MASKED) 49 return -1; 50 return sint_value & HV_SYNIC_SINT_VECTOR_MASK; 51 } 52 53 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, 54 int vector) 55 { 56 int i; 57 58 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 59 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) 60 return true; 61 } 62 return false; 63 } 64 65 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, 66 int vector) 67 { 68 int i; 69 u64 sint_value; 70 71 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 72 sint_value = synic_read_sint(synic, i); 73 if (synic_get_sint_vector(sint_value) == vector && 74 sint_value & HV_SYNIC_SINT_AUTO_EOI) 75 return true; 76 } 77 return false; 78 } 79 80 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, 81 int vector) 82 { 83 if (vector < HV_SYNIC_FIRST_VALID_VECTOR) 84 return; 85 86 if (synic_has_vector_connected(synic, vector)) 87 __set_bit(vector, synic->vec_bitmap); 88 else 89 __clear_bit(vector, synic->vec_bitmap); 90 91 if (synic_has_vector_auto_eoi(synic, vector)) 92 __set_bit(vector, synic->auto_eoi_bitmap); 93 else 94 __clear_bit(vector, synic->auto_eoi_bitmap); 95 } 96 97 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, 98 u64 data, bool host) 99 { 100 int vector, old_vector; 101 bool masked; 102 103 vector = data & HV_SYNIC_SINT_VECTOR_MASK; 104 masked = data & HV_SYNIC_SINT_MASKED; 105 106 /* 107 * Valid vectors are 16-255, however, nested Hyper-V attempts to write 108 * default '0x10000' value on boot and this should not #GP. We need to 109 * allow zero-initing the register from host as well. 110 */ 111 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked) 112 return 1; 113 /* 114 * Guest may configure multiple SINTs to use the same vector, so 115 * we maintain a bitmap of vectors handled by synic, and a 116 * bitmap of vectors with auto-eoi behavior. The bitmaps are 117 * updated here, and atomically queried on fast paths. 118 */ 119 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK; 120 121 atomic64_set(&synic->sint[sint], data); 122 123 synic_update_vector(synic, old_vector); 124 125 synic_update_vector(synic, vector); 126 127 /* Load SynIC vectors into EOI exit bitmap */ 128 kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); 129 return 0; 130 } 131 132 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) 133 { 134 struct kvm_vcpu *vcpu = NULL; 135 int i; 136 137 if (vpidx >= KVM_MAX_VCPUS) 138 return NULL; 139 140 vcpu = kvm_get_vcpu(kvm, vpidx); 141 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) 142 return vcpu; 143 kvm_for_each_vcpu(i, vcpu, kvm) 144 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) 145 return vcpu; 146 return NULL; 147 } 148 149 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) 150 { 151 struct kvm_vcpu *vcpu; 152 struct kvm_vcpu_hv_synic *synic; 153 154 vcpu = get_vcpu_by_vpidx(kvm, vpidx); 155 if (!vcpu) 156 return NULL; 157 synic = vcpu_to_synic(vcpu); 158 return (synic->active) ? synic : NULL; 159 } 160 161 static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic, 162 u32 sint) 163 { 164 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 165 struct page *page; 166 gpa_t gpa; 167 struct hv_message *msg; 168 struct hv_message_page *msg_page; 169 170 gpa = synic->msg_page & PAGE_MASK; 171 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); 172 if (is_error_page(page)) { 173 vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n", 174 gpa); 175 return; 176 } 177 msg_page = kmap_atomic(page); 178 179 msg = &msg_page->sint_message[sint]; 180 msg->header.message_flags.msg_pending = 0; 181 182 kunmap_atomic(msg_page); 183 kvm_release_page_dirty(page); 184 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 185 } 186 187 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) 188 { 189 struct kvm *kvm = vcpu->kvm; 190 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 191 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 192 struct kvm_vcpu_hv_stimer *stimer; 193 int gsi, idx, stimers_pending; 194 195 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); 196 197 if (synic->msg_page & HV_SYNIC_SIMP_ENABLE) 198 synic_clear_sint_msg_pending(synic, sint); 199 200 /* Try to deliver pending Hyper-V SynIC timers messages */ 201 stimers_pending = 0; 202 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { 203 stimer = &hv_vcpu->stimer[idx]; 204 if (stimer->msg_pending && 205 (stimer->config & HV_STIMER_ENABLE) && 206 HV_STIMER_SINT(stimer->config) == sint) { 207 set_bit(stimer->index, 208 hv_vcpu->stimer_pending_bitmap); 209 stimers_pending++; 210 } 211 } 212 if (stimers_pending) 213 kvm_make_request(KVM_REQ_HV_STIMER, vcpu); 214 215 idx = srcu_read_lock(&kvm->irq_srcu); 216 gsi = atomic_read(&synic->sint_to_gsi[sint]); 217 if (gsi != -1) 218 kvm_notify_acked_gsi(kvm, gsi); 219 srcu_read_unlock(&kvm->irq_srcu, idx); 220 } 221 222 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) 223 { 224 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 225 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; 226 227 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; 228 hv_vcpu->exit.u.synic.msr = msr; 229 hv_vcpu->exit.u.synic.control = synic->control; 230 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; 231 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; 232 233 kvm_make_request(KVM_REQ_HV_EXIT, vcpu); 234 } 235 236 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, 237 u32 msr, u64 data, bool host) 238 { 239 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 240 int ret; 241 242 if (!synic->active && !host) 243 return 1; 244 245 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); 246 247 ret = 0; 248 switch (msr) { 249 case HV_X64_MSR_SCONTROL: 250 synic->control = data; 251 if (!host) 252 synic_exit(synic, msr); 253 break; 254 case HV_X64_MSR_SVERSION: 255 if (!host) { 256 ret = 1; 257 break; 258 } 259 synic->version = data; 260 break; 261 case HV_X64_MSR_SIEFP: 262 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host && 263 !synic->dont_zero_synic_pages) 264 if (kvm_clear_guest(vcpu->kvm, 265 data & PAGE_MASK, PAGE_SIZE)) { 266 ret = 1; 267 break; 268 } 269 synic->evt_page = data; 270 if (!host) 271 synic_exit(synic, msr); 272 break; 273 case HV_X64_MSR_SIMP: 274 if ((data & HV_SYNIC_SIMP_ENABLE) && !host && 275 !synic->dont_zero_synic_pages) 276 if (kvm_clear_guest(vcpu->kvm, 277 data & PAGE_MASK, PAGE_SIZE)) { 278 ret = 1; 279 break; 280 } 281 synic->msg_page = data; 282 if (!host) 283 synic_exit(synic, msr); 284 break; 285 case HV_X64_MSR_EOM: { 286 int i; 287 288 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) 289 kvm_hv_notify_acked_sint(vcpu, i); 290 break; 291 } 292 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 293 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); 294 break; 295 default: 296 ret = 1; 297 break; 298 } 299 return ret; 300 } 301 302 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, 303 bool host) 304 { 305 int ret; 306 307 if (!synic->active && !host) 308 return 1; 309 310 ret = 0; 311 switch (msr) { 312 case HV_X64_MSR_SCONTROL: 313 *pdata = synic->control; 314 break; 315 case HV_X64_MSR_SVERSION: 316 *pdata = synic->version; 317 break; 318 case HV_X64_MSR_SIEFP: 319 *pdata = synic->evt_page; 320 break; 321 case HV_X64_MSR_SIMP: 322 *pdata = synic->msg_page; 323 break; 324 case HV_X64_MSR_EOM: 325 *pdata = 0; 326 break; 327 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 328 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); 329 break; 330 default: 331 ret = 1; 332 break; 333 } 334 return ret; 335 } 336 337 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) 338 { 339 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 340 struct kvm_lapic_irq irq; 341 int ret, vector; 342 343 if (sint >= ARRAY_SIZE(synic->sint)) 344 return -EINVAL; 345 346 vector = synic_get_sint_vector(synic_read_sint(synic, sint)); 347 if (vector < 0) 348 return -ENOENT; 349 350 memset(&irq, 0, sizeof(irq)); 351 irq.shorthand = APIC_DEST_SELF; 352 irq.dest_mode = APIC_DEST_PHYSICAL; 353 irq.delivery_mode = APIC_DM_FIXED; 354 irq.vector = vector; 355 irq.level = 1; 356 357 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); 358 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); 359 return ret; 360 } 361 362 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) 363 { 364 struct kvm_vcpu_hv_synic *synic; 365 366 synic = synic_get(kvm, vpidx); 367 if (!synic) 368 return -EINVAL; 369 370 return synic_set_irq(synic, sint); 371 } 372 373 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) 374 { 375 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 376 int i; 377 378 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); 379 380 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) 381 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) 382 kvm_hv_notify_acked_sint(vcpu, i); 383 } 384 385 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi) 386 { 387 struct kvm_vcpu_hv_synic *synic; 388 389 synic = synic_get(kvm, vpidx); 390 if (!synic) 391 return -EINVAL; 392 393 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) 394 return -EINVAL; 395 396 atomic_set(&synic->sint_to_gsi[sint], gsi); 397 return 0; 398 } 399 400 void kvm_hv_irq_routing_update(struct kvm *kvm) 401 { 402 struct kvm_irq_routing_table *irq_rt; 403 struct kvm_kernel_irq_routing_entry *e; 404 u32 gsi; 405 406 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, 407 lockdep_is_held(&kvm->irq_lock)); 408 409 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { 410 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { 411 if (e->type == KVM_IRQ_ROUTING_HV_SINT) 412 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, 413 e->hv_sint.sint, gsi); 414 } 415 } 416 } 417 418 static void synic_init(struct kvm_vcpu_hv_synic *synic) 419 { 420 int i; 421 422 memset(synic, 0, sizeof(*synic)); 423 synic->version = HV_SYNIC_VERSION_1; 424 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { 425 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); 426 atomic_set(&synic->sint_to_gsi[i], -1); 427 } 428 } 429 430 static u64 get_time_ref_counter(struct kvm *kvm) 431 { 432 struct kvm_hv *hv = &kvm->arch.hyperv; 433 struct kvm_vcpu *vcpu; 434 u64 tsc; 435 436 /* 437 * The guest has not set up the TSC page or the clock isn't 438 * stable, fall back to get_kvmclock_ns. 439 */ 440 if (!hv->tsc_ref.tsc_sequence) 441 return div_u64(get_kvmclock_ns(kvm), 100); 442 443 vcpu = kvm_get_vcpu(kvm, 0); 444 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); 445 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) 446 + hv->tsc_ref.tsc_offset; 447 } 448 449 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, 450 bool vcpu_kick) 451 { 452 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 453 454 set_bit(stimer->index, 455 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); 456 kvm_make_request(KVM_REQ_HV_STIMER, vcpu); 457 if (vcpu_kick) 458 kvm_vcpu_kick(vcpu); 459 } 460 461 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) 462 { 463 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 464 465 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, 466 stimer->index); 467 468 hrtimer_cancel(&stimer->timer); 469 clear_bit(stimer->index, 470 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); 471 stimer->msg_pending = false; 472 stimer->exp_time = 0; 473 } 474 475 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) 476 { 477 struct kvm_vcpu_hv_stimer *stimer; 478 479 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); 480 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, 481 stimer->index); 482 stimer_mark_pending(stimer, true); 483 484 return HRTIMER_NORESTART; 485 } 486 487 /* 488 * stimer_start() assumptions: 489 * a) stimer->count is not equal to 0 490 * b) stimer->config has HV_STIMER_ENABLE flag 491 */ 492 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) 493 { 494 u64 time_now; 495 ktime_t ktime_now; 496 497 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); 498 ktime_now = ktime_get(); 499 500 if (stimer->config & HV_STIMER_PERIODIC) { 501 if (stimer->exp_time) { 502 if (time_now >= stimer->exp_time) { 503 u64 remainder; 504 505 div64_u64_rem(time_now - stimer->exp_time, 506 stimer->count, &remainder); 507 stimer->exp_time = 508 time_now + (stimer->count - remainder); 509 } 510 } else 511 stimer->exp_time = time_now + stimer->count; 512 513 trace_kvm_hv_stimer_start_periodic( 514 stimer_to_vcpu(stimer)->vcpu_id, 515 stimer->index, 516 time_now, stimer->exp_time); 517 518 hrtimer_start(&stimer->timer, 519 ktime_add_ns(ktime_now, 520 100 * (stimer->exp_time - time_now)), 521 HRTIMER_MODE_ABS); 522 return 0; 523 } 524 stimer->exp_time = stimer->count; 525 if (time_now >= stimer->count) { 526 /* 527 * Expire timer according to Hypervisor Top-Level Functional 528 * specification v4(15.3.1): 529 * "If a one shot is enabled and the specified count is in 530 * the past, it will expire immediately." 531 */ 532 stimer_mark_pending(stimer, false); 533 return 0; 534 } 535 536 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, 537 stimer->index, 538 time_now, stimer->count); 539 540 hrtimer_start(&stimer->timer, 541 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), 542 HRTIMER_MODE_ABS); 543 return 0; 544 } 545 546 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, 547 bool host) 548 { 549 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, 550 stimer->index, config, host); 551 552 stimer_cleanup(stimer); 553 if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0) 554 config &= ~HV_STIMER_ENABLE; 555 stimer->config = config; 556 stimer_mark_pending(stimer, false); 557 return 0; 558 } 559 560 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, 561 bool host) 562 { 563 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, 564 stimer->index, count, host); 565 566 stimer_cleanup(stimer); 567 stimer->count = count; 568 if (stimer->count == 0) 569 stimer->config &= ~HV_STIMER_ENABLE; 570 else if (stimer->config & HV_STIMER_AUTOENABLE) 571 stimer->config |= HV_STIMER_ENABLE; 572 stimer_mark_pending(stimer, false); 573 return 0; 574 } 575 576 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) 577 { 578 *pconfig = stimer->config; 579 return 0; 580 } 581 582 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) 583 { 584 *pcount = stimer->count; 585 return 0; 586 } 587 588 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, 589 struct hv_message *src_msg) 590 { 591 struct kvm_vcpu *vcpu = synic_to_vcpu(synic); 592 struct page *page; 593 gpa_t gpa; 594 struct hv_message *dst_msg; 595 int r; 596 struct hv_message_page *msg_page; 597 598 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) 599 return -ENOENT; 600 601 gpa = synic->msg_page & PAGE_MASK; 602 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); 603 if (is_error_page(page)) 604 return -EFAULT; 605 606 msg_page = kmap_atomic(page); 607 dst_msg = &msg_page->sint_message[sint]; 608 if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE, 609 src_msg->header.message_type) != HVMSG_NONE) { 610 dst_msg->header.message_flags.msg_pending = 1; 611 r = -EAGAIN; 612 } else { 613 memcpy(&dst_msg->u.payload, &src_msg->u.payload, 614 src_msg->header.payload_size); 615 dst_msg->header.message_type = src_msg->header.message_type; 616 dst_msg->header.payload_size = src_msg->header.payload_size; 617 r = synic_set_irq(synic, sint); 618 if (r >= 1) 619 r = 0; 620 else if (r == 0) 621 r = -EFAULT; 622 } 623 kunmap_atomic(msg_page); 624 kvm_release_page_dirty(page); 625 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); 626 return r; 627 } 628 629 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) 630 { 631 struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); 632 struct hv_message *msg = &stimer->msg; 633 struct hv_timer_message_payload *payload = 634 (struct hv_timer_message_payload *)&msg->u.payload; 635 636 payload->expiration_time = stimer->exp_time; 637 payload->delivery_time = get_time_ref_counter(vcpu->kvm); 638 return synic_deliver_msg(vcpu_to_synic(vcpu), 639 HV_STIMER_SINT(stimer->config), msg); 640 } 641 642 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) 643 { 644 int r; 645 646 stimer->msg_pending = true; 647 r = stimer_send_msg(stimer); 648 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, 649 stimer->index, r); 650 if (!r) { 651 stimer->msg_pending = false; 652 if (!(stimer->config & HV_STIMER_PERIODIC)) 653 stimer->config &= ~HV_STIMER_ENABLE; 654 } 655 } 656 657 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) 658 { 659 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 660 struct kvm_vcpu_hv_stimer *stimer; 661 u64 time_now, exp_time; 662 int i; 663 664 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 665 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { 666 stimer = &hv_vcpu->stimer[i]; 667 if (stimer->config & HV_STIMER_ENABLE) { 668 exp_time = stimer->exp_time; 669 670 if (exp_time) { 671 time_now = 672 get_time_ref_counter(vcpu->kvm); 673 if (time_now >= exp_time) 674 stimer_expiration(stimer); 675 } 676 677 if ((stimer->config & HV_STIMER_ENABLE) && 678 stimer->count) { 679 if (!stimer->msg_pending) 680 stimer_start(stimer); 681 } else 682 stimer_cleanup(stimer); 683 } 684 } 685 } 686 687 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) 688 { 689 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 690 int i; 691 692 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 693 stimer_cleanup(&hv_vcpu->stimer[i]); 694 } 695 696 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) 697 { 698 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) 699 return false; 700 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; 701 } 702 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); 703 704 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, 705 struct hv_vp_assist_page *assist_page) 706 { 707 if (!kvm_hv_assist_page_enabled(vcpu)) 708 return false; 709 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, 710 assist_page, sizeof(*assist_page)); 711 } 712 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); 713 714 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) 715 { 716 struct hv_message *msg = &stimer->msg; 717 struct hv_timer_message_payload *payload = 718 (struct hv_timer_message_payload *)&msg->u.payload; 719 720 memset(&msg->header, 0, sizeof(msg->header)); 721 msg->header.message_type = HVMSG_TIMER_EXPIRED; 722 msg->header.payload_size = sizeof(*payload); 723 724 payload->timer_index = stimer->index; 725 payload->expiration_time = 0; 726 payload->delivery_time = 0; 727 } 728 729 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) 730 { 731 memset(stimer, 0, sizeof(*stimer)); 732 stimer->index = timer_index; 733 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 734 stimer->timer.function = stimer_timer_callback; 735 stimer_prepare_msg(stimer); 736 } 737 738 void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) 739 { 740 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 741 int i; 742 743 synic_init(&hv_vcpu->synic); 744 745 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); 746 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) 747 stimer_init(&hv_vcpu->stimer[i], i); 748 } 749 750 void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) 751 { 752 struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu); 753 754 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); 755 } 756 757 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) 758 { 759 struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); 760 761 /* 762 * Hyper-V SynIC auto EOI SINT's are 763 * not compatible with APICV, so deactivate APICV 764 */ 765 kvm_vcpu_deactivate_apicv(vcpu); 766 synic->active = true; 767 synic->dont_zero_synic_pages = dont_zero_synic_pages; 768 return 0; 769 } 770 771 static bool kvm_hv_msr_partition_wide(u32 msr) 772 { 773 bool r = false; 774 775 switch (msr) { 776 case HV_X64_MSR_GUEST_OS_ID: 777 case HV_X64_MSR_HYPERCALL: 778 case HV_X64_MSR_REFERENCE_TSC: 779 case HV_X64_MSR_TIME_REF_COUNT: 780 case HV_X64_MSR_CRASH_CTL: 781 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 782 case HV_X64_MSR_RESET: 783 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 784 case HV_X64_MSR_TSC_EMULATION_CONTROL: 785 case HV_X64_MSR_TSC_EMULATION_STATUS: 786 r = true; 787 break; 788 } 789 790 return r; 791 } 792 793 static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu, 794 u32 index, u64 *pdata) 795 { 796 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 797 798 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) 799 return -EINVAL; 800 801 *pdata = hv->hv_crash_param[index]; 802 return 0; 803 } 804 805 static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata) 806 { 807 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 808 809 *pdata = hv->hv_crash_ctl; 810 return 0; 811 } 812 813 static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host) 814 { 815 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 816 817 if (host) 818 hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY; 819 820 if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) { 821 822 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", 823 hv->hv_crash_param[0], 824 hv->hv_crash_param[1], 825 hv->hv_crash_param[2], 826 hv->hv_crash_param[3], 827 hv->hv_crash_param[4]); 828 829 /* Send notification about crash to user space */ 830 kvm_make_request(KVM_REQ_HV_CRASH, vcpu); 831 } 832 833 return 0; 834 } 835 836 static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu, 837 u32 index, u64 data) 838 { 839 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 840 841 if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param))) 842 return -EINVAL; 843 844 hv->hv_crash_param[index] = data; 845 return 0; 846 } 847 848 /* 849 * The kvmclock and Hyper-V TSC page use similar formulas, and converting 850 * between them is possible: 851 * 852 * kvmclock formula: 853 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) 854 * + system_time 855 * 856 * Hyper-V formula: 857 * nsec/100 = ticks * scale / 2^64 + offset 858 * 859 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. 860 * By dividing the kvmclock formula by 100 and equating what's left we get: 861 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 862 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 863 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 864 * 865 * Now expand the kvmclock formula and divide by 100: 866 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) 867 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) 868 * + system_time 869 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 870 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 871 * + system_time / 100 872 * 873 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: 874 * nsec/100 = ticks * scale / 2^64 875 * - tsc_timestamp * scale / 2^64 876 * + system_time / 100 877 * 878 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: 879 * offset = system_time / 100 - tsc_timestamp * scale / 2^64 880 * 881 * These two equivalencies are implemented in this function. 882 */ 883 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, 884 HV_REFERENCE_TSC_PAGE *tsc_ref) 885 { 886 u64 max_mul; 887 888 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) 889 return false; 890 891 /* 892 * check if scale would overflow, if so we use the time ref counter 893 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 894 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) 895 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) 896 */ 897 max_mul = 100ull << (32 - hv_clock->tsc_shift); 898 if (hv_clock->tsc_to_system_mul >= max_mul) 899 return false; 900 901 /* 902 * Otherwise compute the scale and offset according to the formulas 903 * derived above. 904 */ 905 tsc_ref->tsc_scale = 906 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), 907 hv_clock->tsc_to_system_mul, 908 100); 909 910 tsc_ref->tsc_offset = hv_clock->system_time; 911 do_div(tsc_ref->tsc_offset, 100); 912 tsc_ref->tsc_offset -= 913 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); 914 return true; 915 } 916 917 void kvm_hv_setup_tsc_page(struct kvm *kvm, 918 struct pvclock_vcpu_time_info *hv_clock) 919 { 920 struct kvm_hv *hv = &kvm->arch.hyperv; 921 u32 tsc_seq; 922 u64 gfn; 923 924 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); 925 BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0); 926 927 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 928 return; 929 930 mutex_lock(&kvm->arch.hyperv.hv_lock); 931 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 932 goto out_unlock; 933 934 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; 935 /* 936 * Because the TSC parameters only vary when there is a 937 * change in the master clock, do not bother with caching. 938 */ 939 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), 940 &tsc_seq, sizeof(tsc_seq)))) 941 goto out_unlock; 942 943 /* 944 * While we're computing and writing the parameters, force the 945 * guest to use the time reference count MSR. 946 */ 947 hv->tsc_ref.tsc_sequence = 0; 948 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), 949 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) 950 goto out_unlock; 951 952 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) 953 goto out_unlock; 954 955 /* Ensure sequence is zero before writing the rest of the struct. */ 956 smp_wmb(); 957 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) 958 goto out_unlock; 959 960 /* 961 * Now switch to the TSC page mechanism by writing the sequence. 962 */ 963 tsc_seq++; 964 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) 965 tsc_seq = 1; 966 967 /* Write the struct entirely before the non-zero sequence. */ 968 smp_wmb(); 969 970 hv->tsc_ref.tsc_sequence = tsc_seq; 971 kvm_write_guest(kvm, gfn_to_gpa(gfn), 972 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); 973 out_unlock: 974 mutex_unlock(&kvm->arch.hyperv.hv_lock); 975 } 976 977 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, 978 bool host) 979 { 980 struct kvm *kvm = vcpu->kvm; 981 struct kvm_hv *hv = &kvm->arch.hyperv; 982 983 switch (msr) { 984 case HV_X64_MSR_GUEST_OS_ID: 985 hv->hv_guest_os_id = data; 986 /* setting guest os id to zero disables hypercall page */ 987 if (!hv->hv_guest_os_id) 988 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; 989 break; 990 case HV_X64_MSR_HYPERCALL: { 991 u64 gfn; 992 unsigned long addr; 993 u8 instructions[4]; 994 995 /* if guest os id is not set hypercall should remain disabled */ 996 if (!hv->hv_guest_os_id) 997 break; 998 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { 999 hv->hv_hypercall = data; 1000 break; 1001 } 1002 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 1003 addr = gfn_to_hva(kvm, gfn); 1004 if (kvm_is_error_hva(addr)) 1005 return 1; 1006 kvm_x86_ops->patch_hypercall(vcpu, instructions); 1007 ((unsigned char *)instructions)[3] = 0xc3; /* ret */ 1008 if (__copy_to_user((void __user *)addr, instructions, 4)) 1009 return 1; 1010 hv->hv_hypercall = data; 1011 mark_page_dirty(kvm, gfn); 1012 break; 1013 } 1014 case HV_X64_MSR_REFERENCE_TSC: 1015 hv->hv_tsc_page = data; 1016 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) 1017 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); 1018 break; 1019 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 1020 return kvm_hv_msr_set_crash_data(vcpu, 1021 msr - HV_X64_MSR_CRASH_P0, 1022 data); 1023 case HV_X64_MSR_CRASH_CTL: 1024 return kvm_hv_msr_set_crash_ctl(vcpu, data, host); 1025 case HV_X64_MSR_RESET: 1026 if (data == 1) { 1027 vcpu_debug(vcpu, "hyper-v reset requested\n"); 1028 kvm_make_request(KVM_REQ_HV_RESET, vcpu); 1029 } 1030 break; 1031 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 1032 hv->hv_reenlightenment_control = data; 1033 break; 1034 case HV_X64_MSR_TSC_EMULATION_CONTROL: 1035 hv->hv_tsc_emulation_control = data; 1036 break; 1037 case HV_X64_MSR_TSC_EMULATION_STATUS: 1038 hv->hv_tsc_emulation_status = data; 1039 break; 1040 case HV_X64_MSR_TIME_REF_COUNT: 1041 /* read-only, but still ignore it if host-initiated */ 1042 if (!host) 1043 return 1; 1044 break; 1045 default: 1046 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", 1047 msr, data); 1048 return 1; 1049 } 1050 return 0; 1051 } 1052 1053 /* Calculate cpu time spent by current task in 100ns units */ 1054 static u64 current_task_runtime_100ns(void) 1055 { 1056 u64 utime, stime; 1057 1058 task_cputime_adjusted(current, &utime, &stime); 1059 1060 return div_u64(utime + stime, 100); 1061 } 1062 1063 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) 1064 { 1065 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; 1066 1067 switch (msr) { 1068 case HV_X64_MSR_VP_INDEX: { 1069 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; 1070 int vcpu_idx = kvm_vcpu_get_idx(vcpu); 1071 u32 new_vp_index = (u32)data; 1072 1073 if (!host || new_vp_index >= KVM_MAX_VCPUS) 1074 return 1; 1075 1076 if (new_vp_index == hv_vcpu->vp_index) 1077 return 0; 1078 1079 /* 1080 * The VP index is initialized to vcpu_index by 1081 * kvm_hv_vcpu_postcreate so they initially match. Now the 1082 * VP index is changing, adjust num_mismatched_vp_indexes if 1083 * it now matches or no longer matches vcpu_idx. 1084 */ 1085 if (hv_vcpu->vp_index == vcpu_idx) 1086 atomic_inc(&hv->num_mismatched_vp_indexes); 1087 else if (new_vp_index == vcpu_idx) 1088 atomic_dec(&hv->num_mismatched_vp_indexes); 1089 1090 hv_vcpu->vp_index = new_vp_index; 1091 break; 1092 } 1093 case HV_X64_MSR_VP_ASSIST_PAGE: { 1094 u64 gfn; 1095 unsigned long addr; 1096 1097 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { 1098 hv_vcpu->hv_vapic = data; 1099 if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0)) 1100 return 1; 1101 break; 1102 } 1103 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; 1104 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); 1105 if (kvm_is_error_hva(addr)) 1106 return 1; 1107 1108 /* 1109 * Clear apic_assist portion of f(struct hv_vp_assist_page 1110 * only, there can be valuable data in the rest which needs 1111 * to be preserved e.g. on migration. 1112 */ 1113 if (__clear_user((void __user *)addr, sizeof(u32))) 1114 return 1; 1115 hv_vcpu->hv_vapic = data; 1116 kvm_vcpu_mark_page_dirty(vcpu, gfn); 1117 if (kvm_lapic_enable_pv_eoi(vcpu, 1118 gfn_to_gpa(gfn) | KVM_MSR_ENABLED, 1119 sizeof(struct hv_vp_assist_page))) 1120 return 1; 1121 break; 1122 } 1123 case HV_X64_MSR_EOI: 1124 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); 1125 case HV_X64_MSR_ICR: 1126 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); 1127 case HV_X64_MSR_TPR: 1128 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); 1129 case HV_X64_MSR_VP_RUNTIME: 1130 if (!host) 1131 return 1; 1132 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); 1133 break; 1134 case HV_X64_MSR_SCONTROL: 1135 case HV_X64_MSR_SVERSION: 1136 case HV_X64_MSR_SIEFP: 1137 case HV_X64_MSR_SIMP: 1138 case HV_X64_MSR_EOM: 1139 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 1140 return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); 1141 case HV_X64_MSR_STIMER0_CONFIG: 1142 case HV_X64_MSR_STIMER1_CONFIG: 1143 case HV_X64_MSR_STIMER2_CONFIG: 1144 case HV_X64_MSR_STIMER3_CONFIG: { 1145 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; 1146 1147 return stimer_set_config(vcpu_to_stimer(vcpu, timer_index), 1148 data, host); 1149 } 1150 case HV_X64_MSR_STIMER0_COUNT: 1151 case HV_X64_MSR_STIMER1_COUNT: 1152 case HV_X64_MSR_STIMER2_COUNT: 1153 case HV_X64_MSR_STIMER3_COUNT: { 1154 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; 1155 1156 return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), 1157 data, host); 1158 } 1159 case HV_X64_MSR_TSC_FREQUENCY: 1160 case HV_X64_MSR_APIC_FREQUENCY: 1161 /* read-only, but still ignore it if host-initiated */ 1162 if (!host) 1163 return 1; 1164 break; 1165 default: 1166 vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", 1167 msr, data); 1168 return 1; 1169 } 1170 1171 return 0; 1172 } 1173 1174 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1175 { 1176 u64 data = 0; 1177 struct kvm *kvm = vcpu->kvm; 1178 struct kvm_hv *hv = &kvm->arch.hyperv; 1179 1180 switch (msr) { 1181 case HV_X64_MSR_GUEST_OS_ID: 1182 data = hv->hv_guest_os_id; 1183 break; 1184 case HV_X64_MSR_HYPERCALL: 1185 data = hv->hv_hypercall; 1186 break; 1187 case HV_X64_MSR_TIME_REF_COUNT: 1188 data = get_time_ref_counter(kvm); 1189 break; 1190 case HV_X64_MSR_REFERENCE_TSC: 1191 data = hv->hv_tsc_page; 1192 break; 1193 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: 1194 return kvm_hv_msr_get_crash_data(vcpu, 1195 msr - HV_X64_MSR_CRASH_P0, 1196 pdata); 1197 case HV_X64_MSR_CRASH_CTL: 1198 return kvm_hv_msr_get_crash_ctl(vcpu, pdata); 1199 case HV_X64_MSR_RESET: 1200 data = 0; 1201 break; 1202 case HV_X64_MSR_REENLIGHTENMENT_CONTROL: 1203 data = hv->hv_reenlightenment_control; 1204 break; 1205 case HV_X64_MSR_TSC_EMULATION_CONTROL: 1206 data = hv->hv_tsc_emulation_control; 1207 break; 1208 case HV_X64_MSR_TSC_EMULATION_STATUS: 1209 data = hv->hv_tsc_emulation_status; 1210 break; 1211 default: 1212 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 1213 return 1; 1214 } 1215 1216 *pdata = data; 1217 return 0; 1218 } 1219 1220 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, 1221 bool host) 1222 { 1223 u64 data = 0; 1224 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; 1225 1226 switch (msr) { 1227 case HV_X64_MSR_VP_INDEX: 1228 data = hv_vcpu->vp_index; 1229 break; 1230 case HV_X64_MSR_EOI: 1231 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); 1232 case HV_X64_MSR_ICR: 1233 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); 1234 case HV_X64_MSR_TPR: 1235 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); 1236 case HV_X64_MSR_VP_ASSIST_PAGE: 1237 data = hv_vcpu->hv_vapic; 1238 break; 1239 case HV_X64_MSR_VP_RUNTIME: 1240 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; 1241 break; 1242 case HV_X64_MSR_SCONTROL: 1243 case HV_X64_MSR_SVERSION: 1244 case HV_X64_MSR_SIEFP: 1245 case HV_X64_MSR_SIMP: 1246 case HV_X64_MSR_EOM: 1247 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: 1248 return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host); 1249 case HV_X64_MSR_STIMER0_CONFIG: 1250 case HV_X64_MSR_STIMER1_CONFIG: 1251 case HV_X64_MSR_STIMER2_CONFIG: 1252 case HV_X64_MSR_STIMER3_CONFIG: { 1253 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; 1254 1255 return stimer_get_config(vcpu_to_stimer(vcpu, timer_index), 1256 pdata); 1257 } 1258 case HV_X64_MSR_STIMER0_COUNT: 1259 case HV_X64_MSR_STIMER1_COUNT: 1260 case HV_X64_MSR_STIMER2_COUNT: 1261 case HV_X64_MSR_STIMER3_COUNT: { 1262 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; 1263 1264 return stimer_get_count(vcpu_to_stimer(vcpu, timer_index), 1265 pdata); 1266 } 1267 case HV_X64_MSR_TSC_FREQUENCY: 1268 data = (u64)vcpu->arch.virtual_tsc_khz * 1000; 1269 break; 1270 case HV_X64_MSR_APIC_FREQUENCY: 1271 data = APIC_BUS_FREQUENCY; 1272 break; 1273 default: 1274 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); 1275 return 1; 1276 } 1277 *pdata = data; 1278 return 0; 1279 } 1280 1281 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) 1282 { 1283 if (kvm_hv_msr_partition_wide(msr)) { 1284 int r; 1285 1286 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); 1287 r = kvm_hv_set_msr_pw(vcpu, msr, data, host); 1288 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); 1289 return r; 1290 } else 1291 return kvm_hv_set_msr(vcpu, msr, data, host); 1292 } 1293 1294 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) 1295 { 1296 if (kvm_hv_msr_partition_wide(msr)) { 1297 int r; 1298 1299 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); 1300 r = kvm_hv_get_msr_pw(vcpu, msr, pdata); 1301 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); 1302 return r; 1303 } else 1304 return kvm_hv_get_msr(vcpu, msr, pdata, host); 1305 } 1306 1307 static __always_inline unsigned long *sparse_set_to_vcpu_mask( 1308 struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask, 1309 u64 *vp_bitmap, unsigned long *vcpu_bitmap) 1310 { 1311 struct kvm_hv *hv = &kvm->arch.hyperv; 1312 struct kvm_vcpu *vcpu; 1313 int i, bank, sbank = 0; 1314 1315 memset(vp_bitmap, 0, 1316 KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap)); 1317 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, 1318 KVM_HV_MAX_SPARSE_VCPU_SET_BITS) 1319 vp_bitmap[bank] = sparse_banks[sbank++]; 1320 1321 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { 1322 /* for all vcpus vp_index == vcpu_idx */ 1323 return (unsigned long *)vp_bitmap; 1324 } 1325 1326 bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); 1327 kvm_for_each_vcpu(i, vcpu, kvm) { 1328 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, 1329 (unsigned long *)vp_bitmap)) 1330 __set_bit(i, vcpu_bitmap); 1331 } 1332 return vcpu_bitmap; 1333 } 1334 1335 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, 1336 u16 rep_cnt, bool ex) 1337 { 1338 struct kvm *kvm = current_vcpu->kvm; 1339 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; 1340 struct hv_tlb_flush_ex flush_ex; 1341 struct hv_tlb_flush flush; 1342 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; 1343 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); 1344 unsigned long *vcpu_mask; 1345 u64 valid_bank_mask; 1346 u64 sparse_banks[64]; 1347 int sparse_banks_len; 1348 bool all_cpus; 1349 1350 if (!ex) { 1351 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush)))) 1352 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1353 1354 trace_kvm_hv_flush_tlb(flush.processor_mask, 1355 flush.address_space, flush.flags); 1356 1357 valid_bank_mask = BIT_ULL(0); 1358 sparse_banks[0] = flush.processor_mask; 1359 all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS; 1360 } else { 1361 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex, 1362 sizeof(flush_ex)))) 1363 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1364 1365 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, 1366 flush_ex.hv_vp_set.format, 1367 flush_ex.address_space, 1368 flush_ex.flags); 1369 1370 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; 1371 all_cpus = flush_ex.hv_vp_set.format != 1372 HV_GENERIC_SET_SPARSE_4K; 1373 1374 sparse_banks_len = 1375 bitmap_weight((unsigned long *)&valid_bank_mask, 64) * 1376 sizeof(sparse_banks[0]); 1377 1378 if (!sparse_banks_len && !all_cpus) 1379 goto ret_success; 1380 1381 if (!all_cpus && 1382 kvm_read_guest(kvm, 1383 ingpa + offsetof(struct hv_tlb_flush_ex, 1384 hv_vp_set.bank_contents), 1385 sparse_banks, 1386 sparse_banks_len)) 1387 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1388 } 1389 1390 cpumask_clear(&hv_vcpu->tlb_flush); 1391 1392 vcpu_mask = all_cpus ? NULL : 1393 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, 1394 vp_bitmap, vcpu_bitmap); 1395 1396 /* 1397 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't 1398 * analyze it here, flush TLB regardless of the specified address space. 1399 */ 1400 kvm_make_vcpus_request_mask(kvm, 1401 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, 1402 vcpu_mask, &hv_vcpu->tlb_flush); 1403 1404 ret_success: 1405 /* We always do full TLB flush, set rep_done = rep_cnt. */ 1406 return (u64)HV_STATUS_SUCCESS | 1407 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); 1408 } 1409 1410 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, 1411 unsigned long *vcpu_bitmap) 1412 { 1413 struct kvm_lapic_irq irq = { 1414 .delivery_mode = APIC_DM_FIXED, 1415 .vector = vector 1416 }; 1417 struct kvm_vcpu *vcpu; 1418 int i; 1419 1420 kvm_for_each_vcpu(i, vcpu, kvm) { 1421 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) 1422 continue; 1423 1424 /* We fail only when APIC is disabled */ 1425 kvm_apic_set_irq(vcpu, &irq, NULL); 1426 } 1427 } 1428 1429 static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa, 1430 bool ex, bool fast) 1431 { 1432 struct kvm *kvm = current_vcpu->kvm; 1433 struct hv_send_ipi_ex send_ipi_ex; 1434 struct hv_send_ipi send_ipi; 1435 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; 1436 DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS); 1437 unsigned long *vcpu_mask; 1438 unsigned long valid_bank_mask; 1439 u64 sparse_banks[64]; 1440 int sparse_banks_len; 1441 u32 vector; 1442 bool all_cpus; 1443 1444 if (!ex) { 1445 if (!fast) { 1446 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi, 1447 sizeof(send_ipi)))) 1448 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1449 sparse_banks[0] = send_ipi.cpu_mask; 1450 vector = send_ipi.vector; 1451 } else { 1452 /* 'reserved' part of hv_send_ipi should be 0 */ 1453 if (unlikely(ingpa >> 32 != 0)) 1454 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1455 sparse_banks[0] = outgpa; 1456 vector = (u32)ingpa; 1457 } 1458 all_cpus = false; 1459 valid_bank_mask = BIT_ULL(0); 1460 1461 trace_kvm_hv_send_ipi(vector, sparse_banks[0]); 1462 } else { 1463 if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex, 1464 sizeof(send_ipi_ex)))) 1465 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1466 1467 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, 1468 send_ipi_ex.vp_set.format, 1469 send_ipi_ex.vp_set.valid_bank_mask); 1470 1471 vector = send_ipi_ex.vector; 1472 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; 1473 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) * 1474 sizeof(sparse_banks[0]); 1475 1476 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; 1477 1478 if (!sparse_banks_len) 1479 goto ret_success; 1480 1481 if (!all_cpus && 1482 kvm_read_guest(kvm, 1483 ingpa + offsetof(struct hv_send_ipi_ex, 1484 vp_set.bank_contents), 1485 sparse_banks, 1486 sparse_banks_len)) 1487 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1488 } 1489 1490 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) 1491 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1492 1493 vcpu_mask = all_cpus ? NULL : 1494 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, 1495 vp_bitmap, vcpu_bitmap); 1496 1497 kvm_send_ipi_to_many(kvm, vector, vcpu_mask); 1498 1499 ret_success: 1500 return HV_STATUS_SUCCESS; 1501 } 1502 1503 bool kvm_hv_hypercall_enabled(struct kvm *kvm) 1504 { 1505 return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE; 1506 } 1507 1508 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) 1509 { 1510 bool longmode; 1511 1512 longmode = is_64_bit_mode(vcpu); 1513 if (longmode) 1514 kvm_register_write(vcpu, VCPU_REGS_RAX, result); 1515 else { 1516 kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32); 1517 kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff); 1518 } 1519 } 1520 1521 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) 1522 { 1523 kvm_hv_hypercall_set_result(vcpu, result); 1524 ++vcpu->stat.hypercalls; 1525 return kvm_skip_emulated_instruction(vcpu); 1526 } 1527 1528 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) 1529 { 1530 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); 1531 } 1532 1533 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param) 1534 { 1535 struct eventfd_ctx *eventfd; 1536 1537 if (unlikely(!fast)) { 1538 int ret; 1539 gpa_t gpa = param; 1540 1541 if ((gpa & (__alignof__(param) - 1)) || 1542 offset_in_page(gpa) + sizeof(param) > PAGE_SIZE) 1543 return HV_STATUS_INVALID_ALIGNMENT; 1544 1545 ret = kvm_vcpu_read_guest(vcpu, gpa, ¶m, sizeof(param)); 1546 if (ret < 0) 1547 return HV_STATUS_INVALID_ALIGNMENT; 1548 } 1549 1550 /* 1551 * Per spec, bits 32-47 contain the extra "flag number". However, we 1552 * have no use for it, and in all known usecases it is zero, so just 1553 * report lookup failure if it isn't. 1554 */ 1555 if (param & 0xffff00000000ULL) 1556 return HV_STATUS_INVALID_PORT_ID; 1557 /* remaining bits are reserved-zero */ 1558 if (param & ~KVM_HYPERV_CONN_ID_MASK) 1559 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1560 1561 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ 1562 rcu_read_lock(); 1563 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); 1564 rcu_read_unlock(); 1565 if (!eventfd) 1566 return HV_STATUS_INVALID_PORT_ID; 1567 1568 eventfd_signal(eventfd, 1); 1569 return HV_STATUS_SUCCESS; 1570 } 1571 1572 int kvm_hv_hypercall(struct kvm_vcpu *vcpu) 1573 { 1574 u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS; 1575 uint16_t code, rep_idx, rep_cnt; 1576 bool fast, longmode, rep; 1577 1578 /* 1579 * hypercall generates UD from non zero cpl and real mode 1580 * per HYPER-V spec 1581 */ 1582 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) { 1583 kvm_queue_exception(vcpu, UD_VECTOR); 1584 return 1; 1585 } 1586 1587 longmode = is_64_bit_mode(vcpu); 1588 1589 if (!longmode) { 1590 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | 1591 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff); 1592 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) | 1593 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff); 1594 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) | 1595 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff); 1596 } 1597 #ifdef CONFIG_X86_64 1598 else { 1599 param = kvm_register_read(vcpu, VCPU_REGS_RCX); 1600 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX); 1601 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8); 1602 } 1603 #endif 1604 1605 code = param & 0xffff; 1606 fast = !!(param & HV_HYPERCALL_FAST_BIT); 1607 rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff; 1608 rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; 1609 rep = !!(rep_cnt || rep_idx); 1610 1611 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa); 1612 1613 switch (code) { 1614 case HVCALL_NOTIFY_LONG_SPIN_WAIT: 1615 if (unlikely(rep)) { 1616 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1617 break; 1618 } 1619 kvm_vcpu_on_spin(vcpu, true); 1620 break; 1621 case HVCALL_SIGNAL_EVENT: 1622 if (unlikely(rep)) { 1623 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1624 break; 1625 } 1626 ret = kvm_hvcall_signal_event(vcpu, fast, ingpa); 1627 if (ret != HV_STATUS_INVALID_PORT_ID) 1628 break; 1629 /* maybe userspace knows this conn_id: fall through */ 1630 case HVCALL_POST_MESSAGE: 1631 /* don't bother userspace if it has no way to handle it */ 1632 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { 1633 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1634 break; 1635 } 1636 vcpu->run->exit_reason = KVM_EXIT_HYPERV; 1637 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; 1638 vcpu->run->hyperv.u.hcall.input = param; 1639 vcpu->run->hyperv.u.hcall.params[0] = ingpa; 1640 vcpu->run->hyperv.u.hcall.params[1] = outgpa; 1641 vcpu->arch.complete_userspace_io = 1642 kvm_hv_hypercall_complete_userspace; 1643 return 0; 1644 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: 1645 if (unlikely(fast || !rep_cnt || rep_idx)) { 1646 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1647 break; 1648 } 1649 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); 1650 break; 1651 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: 1652 if (unlikely(fast || rep)) { 1653 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1654 break; 1655 } 1656 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false); 1657 break; 1658 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: 1659 if (unlikely(fast || !rep_cnt || rep_idx)) { 1660 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1661 break; 1662 } 1663 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); 1664 break; 1665 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: 1666 if (unlikely(fast || rep)) { 1667 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1668 break; 1669 } 1670 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true); 1671 break; 1672 case HVCALL_SEND_IPI: 1673 if (unlikely(rep)) { 1674 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1675 break; 1676 } 1677 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast); 1678 break; 1679 case HVCALL_SEND_IPI_EX: 1680 if (unlikely(fast || rep)) { 1681 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1682 break; 1683 } 1684 ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false); 1685 break; 1686 default: 1687 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1688 break; 1689 } 1690 1691 return kvm_hv_hypercall_complete(vcpu, ret); 1692 } 1693 1694 void kvm_hv_init_vm(struct kvm *kvm) 1695 { 1696 mutex_init(&kvm->arch.hyperv.hv_lock); 1697 idr_init(&kvm->arch.hyperv.conn_to_evt); 1698 } 1699 1700 void kvm_hv_destroy_vm(struct kvm *kvm) 1701 { 1702 struct eventfd_ctx *eventfd; 1703 int i; 1704 1705 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) 1706 eventfd_ctx_put(eventfd); 1707 idr_destroy(&kvm->arch.hyperv.conn_to_evt); 1708 } 1709 1710 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) 1711 { 1712 struct kvm_hv *hv = &kvm->arch.hyperv; 1713 struct eventfd_ctx *eventfd; 1714 int ret; 1715 1716 eventfd = eventfd_ctx_fdget(fd); 1717 if (IS_ERR(eventfd)) 1718 return PTR_ERR(eventfd); 1719 1720 mutex_lock(&hv->hv_lock); 1721 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, 1722 GFP_KERNEL); 1723 mutex_unlock(&hv->hv_lock); 1724 1725 if (ret >= 0) 1726 return 0; 1727 1728 if (ret == -ENOSPC) 1729 ret = -EEXIST; 1730 eventfd_ctx_put(eventfd); 1731 return ret; 1732 } 1733 1734 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) 1735 { 1736 struct kvm_hv *hv = &kvm->arch.hyperv; 1737 struct eventfd_ctx *eventfd; 1738 1739 mutex_lock(&hv->hv_lock); 1740 eventfd = idr_remove(&hv->conn_to_evt, conn_id); 1741 mutex_unlock(&hv->hv_lock); 1742 1743 if (!eventfd) 1744 return -ENOENT; 1745 1746 synchronize_srcu(&kvm->srcu); 1747 eventfd_ctx_put(eventfd); 1748 return 0; 1749 } 1750 1751 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) 1752 { 1753 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || 1754 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) 1755 return -EINVAL; 1756 1757 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) 1758 return kvm_hv_eventfd_deassign(kvm, args->conn_id); 1759 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); 1760 } 1761