1 /* 2 * Copyright (C) 2001 MandrakeSoft S.A. 3 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 4 * 5 * MandrakeSoft S.A. 6 * 43, rue d'Aboukir 7 * 75002 Paris - France 8 * http://www.linux-mandrake.com/ 9 * http://www.mandrakesoft.com/ 10 * 11 * This library is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU Lesser General Public 13 * License as published by the Free Software Foundation; either 14 * version 2 of the License, or (at your option) any later version. 15 * 16 * This library is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with this library; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 * 25 * Yunhong Jiang <yunhong.jiang@intel.com> 26 * Yaozu (Eddie) Dong <eddie.dong@intel.com> 27 * Based on Xen 3.1 code. 28 */ 29 30 #include <linux/kvm_host.h> 31 #include <linux/kvm.h> 32 #include <linux/mm.h> 33 #include <linux/highmem.h> 34 #include <linux/smp.h> 35 #include <linux/hrtimer.h> 36 #include <linux/io.h> 37 #include <linux/slab.h> 38 #include <linux/export.h> 39 #include <asm/processor.h> 40 #include <asm/page.h> 41 #include <asm/current.h> 42 #include <trace/events/kvm.h> 43 44 #include "ioapic.h" 45 #include "lapic.h" 46 #include "irq.h" 47 48 static int ioapic_service(struct kvm_ioapic *vioapic, int irq, 49 bool line_status); 50 51 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, 52 unsigned long addr, 53 unsigned long length) 54 { 55 unsigned long result = 0; 56 57 switch (ioapic->ioregsel) { 58 case IOAPIC_REG_VERSION: 59 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16) 60 | (IOAPIC_VERSION_ID & 0xff)); 61 break; 62 63 case IOAPIC_REG_APIC_ID: 64 case IOAPIC_REG_ARB_ID: 65 result = ((ioapic->id & 0xf) << 24); 66 break; 67 68 default: 69 { 70 u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; 71 u64 redir_content; 72 73 if (redir_index < IOAPIC_NUM_PINS) 74 redir_content = 75 ioapic->redirtbl[redir_index].bits; 76 else 77 redir_content = ~0ULL; 78 79 result = (ioapic->ioregsel & 0x1) ? 80 (redir_content >> 32) & 0xffffffff : 81 redir_content & 0xffffffff; 82 break; 83 } 84 } 85 86 return result; 87 } 88 89 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) 90 { 91 ioapic->rtc_status.pending_eoi = 0; 92 bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); 93 } 94 95 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); 96 97 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic) 98 { 99 if (WARN_ON(ioapic->rtc_status.pending_eoi < 0)) 100 kvm_rtc_eoi_tracking_restore_all(ioapic); 101 } 102 103 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 104 { 105 bool new_val, old_val; 106 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 107 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 108 union kvm_ioapic_redirect_entry *e; 109 110 e = &ioapic->redirtbl[RTC_GSI]; 111 if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id, 112 e->fields.dest_mode)) 113 return; 114 115 new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector); 116 old_val = test_bit(vcpu->vcpu_id, dest_map->map); 117 118 if (new_val == old_val) 119 return; 120 121 if (new_val) { 122 __set_bit(vcpu->vcpu_id, dest_map->map); 123 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector; 124 ioapic->rtc_status.pending_eoi++; 125 } else { 126 __clear_bit(vcpu->vcpu_id, dest_map->map); 127 ioapic->rtc_status.pending_eoi--; 128 rtc_status_pending_eoi_check_valid(ioapic); 129 } 130 } 131 132 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu) 133 { 134 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 135 136 spin_lock(&ioapic->lock); 137 __rtc_irq_eoi_tracking_restore_one(vcpu); 138 spin_unlock(&ioapic->lock); 139 } 140 141 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic) 142 { 143 struct kvm_vcpu *vcpu; 144 int i; 145 146 if (RTC_GSI >= IOAPIC_NUM_PINS) 147 return; 148 149 rtc_irq_eoi_tracking_reset(ioapic); 150 kvm_for_each_vcpu(i, vcpu, ioapic->kvm) 151 __rtc_irq_eoi_tracking_restore_one(vcpu); 152 } 153 154 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu) 155 { 156 if (test_and_clear_bit(vcpu->vcpu_id, 157 ioapic->rtc_status.dest_map.map)) { 158 --ioapic->rtc_status.pending_eoi; 159 rtc_status_pending_eoi_check_valid(ioapic); 160 } 161 } 162 163 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic) 164 { 165 if (ioapic->rtc_status.pending_eoi > 0) 166 return true; /* coalesced */ 167 168 return false; 169 } 170 171 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq, 172 int irq_level, bool line_status) 173 { 174 union kvm_ioapic_redirect_entry entry; 175 u32 mask = 1 << irq; 176 u32 old_irr; 177 int edge, ret; 178 179 entry = ioapic->redirtbl[irq]; 180 edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG); 181 182 if (!irq_level) { 183 ioapic->irr &= ~mask; 184 ret = 1; 185 goto out; 186 } 187 188 /* 189 * Return 0 for coalesced interrupts; for edge-triggered interrupts, 190 * this only happens if a previous edge has not been delivered due 191 * do masking. For level interrupts, the remote_irr field tells 192 * us if the interrupt is waiting for an EOI. 193 * 194 * RTC is special: it is edge-triggered, but userspace likes to know 195 * if it has been already ack-ed via EOI because coalesced RTC 196 * interrupts lead to time drift in Windows guests. So we track 197 * EOI manually for the RTC interrupt. 198 */ 199 if (irq == RTC_GSI && line_status && 200 rtc_irq_check_coalesced(ioapic)) { 201 ret = 0; 202 goto out; 203 } 204 205 old_irr = ioapic->irr; 206 ioapic->irr |= mask; 207 if (edge) { 208 ioapic->irr_delivered &= ~mask; 209 if (old_irr == ioapic->irr) { 210 ret = 0; 211 goto out; 212 } 213 } 214 215 ret = ioapic_service(ioapic, irq, line_status); 216 217 out: 218 trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0); 219 return ret; 220 } 221 222 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr) 223 { 224 u32 idx; 225 226 rtc_irq_eoi_tracking_reset(ioapic); 227 for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS) 228 ioapic_set_irq(ioapic, idx, 1, true); 229 230 kvm_rtc_eoi_tracking_restore_all(ioapic); 231 } 232 233 234 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) 235 { 236 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 237 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 238 union kvm_ioapic_redirect_entry *e; 239 int index; 240 241 spin_lock(&ioapic->lock); 242 243 /* Make sure we see any missing RTC EOI */ 244 if (test_bit(vcpu->vcpu_id, dest_map->map)) 245 __set_bit(dest_map->vectors[vcpu->vcpu_id], 246 ioapic_handled_vectors); 247 248 for (index = 0; index < IOAPIC_NUM_PINS; index++) { 249 e = &ioapic->redirtbl[index]; 250 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || 251 kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || 252 index == RTC_GSI) { 253 if (kvm_apic_match_dest(vcpu, NULL, 0, 254 e->fields.dest_id, e->fields.dest_mode) || 255 kvm_apic_pending_eoi(vcpu, e->fields.vector)) 256 __set_bit(e->fields.vector, 257 ioapic_handled_vectors); 258 } 259 } 260 spin_unlock(&ioapic->lock); 261 } 262 263 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 264 { 265 if (!ioapic_in_kernel(kvm)) 266 return; 267 kvm_make_scan_ioapic_request(kvm); 268 } 269 270 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) 271 { 272 unsigned index; 273 bool mask_before, mask_after; 274 union kvm_ioapic_redirect_entry *e; 275 unsigned long vcpu_bitmap; 276 int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode; 277 278 switch (ioapic->ioregsel) { 279 case IOAPIC_REG_VERSION: 280 /* Writes are ignored. */ 281 break; 282 283 case IOAPIC_REG_APIC_ID: 284 ioapic->id = (val >> 24) & 0xf; 285 break; 286 287 case IOAPIC_REG_ARB_ID: 288 break; 289 290 default: 291 index = (ioapic->ioregsel - 0x10) >> 1; 292 293 if (index >= IOAPIC_NUM_PINS) 294 return; 295 e = &ioapic->redirtbl[index]; 296 mask_before = e->fields.mask; 297 /* Preserve read-only fields */ 298 old_remote_irr = e->fields.remote_irr; 299 old_delivery_status = e->fields.delivery_status; 300 old_dest_id = e->fields.dest_id; 301 old_dest_mode = e->fields.dest_mode; 302 if (ioapic->ioregsel & 1) { 303 e->bits &= 0xffffffff; 304 e->bits |= (u64) val << 32; 305 } else { 306 e->bits &= ~0xffffffffULL; 307 e->bits |= (u32) val; 308 } 309 e->fields.remote_irr = old_remote_irr; 310 e->fields.delivery_status = old_delivery_status; 311 312 /* 313 * Some OSes (Linux, Xen) assume that Remote IRR bit will 314 * be cleared by IOAPIC hardware when the entry is configured 315 * as edge-triggered. This behavior is used to simulate an 316 * explicit EOI on IOAPICs that don't have the EOI register. 317 */ 318 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG) 319 e->fields.remote_irr = 0; 320 321 mask_after = e->fields.mask; 322 if (mask_before != mask_after) 323 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); 324 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG 325 && ioapic->irr & (1 << index)) 326 ioapic_service(ioapic, index, false); 327 if (e->fields.delivery_mode == APIC_DM_FIXED) { 328 struct kvm_lapic_irq irq; 329 330 irq.shorthand = 0; 331 irq.vector = e->fields.vector; 332 irq.delivery_mode = e->fields.delivery_mode << 8; 333 irq.dest_id = e->fields.dest_id; 334 irq.dest_mode = e->fields.dest_mode; 335 bitmap_zero(&vcpu_bitmap, 16); 336 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 337 &vcpu_bitmap); 338 if (old_dest_mode != e->fields.dest_mode || 339 old_dest_id != e->fields.dest_id) { 340 /* 341 * Update vcpu_bitmap with vcpus specified in 342 * the previous request as well. This is done to 343 * keep ioapic_handled_vectors synchronized. 344 */ 345 irq.dest_id = old_dest_id; 346 irq.dest_mode = old_dest_mode; 347 kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq, 348 &vcpu_bitmap); 349 } 350 kvm_make_scan_ioapic_request_mask(ioapic->kvm, 351 &vcpu_bitmap); 352 } else { 353 kvm_make_scan_ioapic_request(ioapic->kvm); 354 } 355 break; 356 } 357 } 358 359 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) 360 { 361 union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; 362 struct kvm_lapic_irq irqe; 363 int ret; 364 365 if (entry->fields.mask || 366 (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG && 367 entry->fields.remote_irr)) 368 return -1; 369 370 irqe.dest_id = entry->fields.dest_id; 371 irqe.vector = entry->fields.vector; 372 irqe.dest_mode = entry->fields.dest_mode; 373 irqe.trig_mode = entry->fields.trig_mode; 374 irqe.delivery_mode = entry->fields.delivery_mode << 8; 375 irqe.level = 1; 376 irqe.shorthand = 0; 377 irqe.msi_redir_hint = false; 378 379 if (irqe.trig_mode == IOAPIC_EDGE_TRIG) 380 ioapic->irr_delivered |= 1 << irq; 381 382 if (irq == RTC_GSI && line_status) { 383 /* 384 * pending_eoi cannot ever become negative (see 385 * rtc_status_pending_eoi_check_valid) and the caller 386 * ensures that it is only called if it is >= zero, namely 387 * if rtc_irq_check_coalesced returns false). 388 */ 389 BUG_ON(ioapic->rtc_status.pending_eoi != 0); 390 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, 391 &ioapic->rtc_status.dest_map); 392 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); 393 } else 394 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); 395 396 if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) 397 entry->fields.remote_irr = 1; 398 399 return ret; 400 } 401 402 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, 403 int level, bool line_status) 404 { 405 int ret, irq_level; 406 407 BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS); 408 409 spin_lock(&ioapic->lock); 410 irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq], 411 irq_source_id, level); 412 ret = ioapic_set_irq(ioapic, irq, irq_level, line_status); 413 414 spin_unlock(&ioapic->lock); 415 416 return ret; 417 } 418 419 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id) 420 { 421 int i; 422 423 spin_lock(&ioapic->lock); 424 for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) 425 __clear_bit(irq_source_id, &ioapic->irq_states[i]); 426 spin_unlock(&ioapic->lock); 427 } 428 429 static void kvm_ioapic_eoi_inject_work(struct work_struct *work) 430 { 431 int i; 432 struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic, 433 eoi_inject.work); 434 spin_lock(&ioapic->lock); 435 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 436 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 437 438 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG) 439 continue; 440 441 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr) 442 ioapic_service(ioapic, i, false); 443 } 444 spin_unlock(&ioapic->lock); 445 } 446 447 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000 448 449 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, 450 struct kvm_ioapic *ioapic, int vector, int trigger_mode) 451 { 452 struct dest_map *dest_map = &ioapic->rtc_status.dest_map; 453 struct kvm_lapic *apic = vcpu->arch.apic; 454 int i; 455 456 /* RTC special handling */ 457 if (test_bit(vcpu->vcpu_id, dest_map->map) && 458 vector == dest_map->vectors[vcpu->vcpu_id]) 459 rtc_irq_eoi(ioapic, vcpu); 460 461 for (i = 0; i < IOAPIC_NUM_PINS; i++) { 462 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i]; 463 464 if (ent->fields.vector != vector) 465 continue; 466 467 /* 468 * We are dropping lock while calling ack notifiers because ack 469 * notifier callbacks for assigned devices call into IOAPIC 470 * recursively. Since remote_irr is cleared only after call 471 * to notifiers if the same vector will be delivered while lock 472 * is dropped it will be put into irr and will be delivered 473 * after ack notifier returns. 474 */ 475 spin_unlock(&ioapic->lock); 476 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i); 477 spin_lock(&ioapic->lock); 478 479 if (trigger_mode != IOAPIC_LEVEL_TRIG || 480 kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) 481 continue; 482 483 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG); 484 ent->fields.remote_irr = 0; 485 if (!ent->fields.mask && (ioapic->irr & (1 << i))) { 486 ++ioapic->irq_eoi[i]; 487 if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) { 488 /* 489 * Real hardware does not deliver the interrupt 490 * immediately during eoi broadcast, and this 491 * lets a buggy guest make slow progress 492 * even if it does not correctly handle a 493 * level-triggered interrupt. Emulate this 494 * behavior if we detect an interrupt storm. 495 */ 496 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100); 497 ioapic->irq_eoi[i] = 0; 498 trace_kvm_ioapic_delayed_eoi_inj(ent->bits); 499 } else { 500 ioapic_service(ioapic, i, false); 501 } 502 } else { 503 ioapic->irq_eoi[i] = 0; 504 } 505 } 506 } 507 508 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode) 509 { 510 struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic; 511 512 spin_lock(&ioapic->lock); 513 __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode); 514 spin_unlock(&ioapic->lock); 515 } 516 517 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 518 { 519 return container_of(dev, struct kvm_ioapic, dev); 520 } 521 522 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr) 523 { 524 return ((addr >= ioapic->base_address && 525 (addr < ioapic->base_address + IOAPIC_MEM_LENGTH))); 526 } 527 528 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 529 gpa_t addr, int len, void *val) 530 { 531 struct kvm_ioapic *ioapic = to_ioapic(this); 532 u32 result; 533 if (!ioapic_in_range(ioapic, addr)) 534 return -EOPNOTSUPP; 535 536 ASSERT(!(addr & 0xf)); /* check alignment */ 537 538 addr &= 0xff; 539 spin_lock(&ioapic->lock); 540 switch (addr) { 541 case IOAPIC_REG_SELECT: 542 result = ioapic->ioregsel; 543 break; 544 545 case IOAPIC_REG_WINDOW: 546 result = ioapic_read_indirect(ioapic, addr, len); 547 break; 548 549 default: 550 result = 0; 551 break; 552 } 553 spin_unlock(&ioapic->lock); 554 555 switch (len) { 556 case 8: 557 *(u64 *) val = result; 558 break; 559 case 1: 560 case 2: 561 case 4: 562 memcpy(val, (char *)&result, len); 563 break; 564 default: 565 printk(KERN_WARNING "ioapic: wrong length %d\n", len); 566 } 567 return 0; 568 } 569 570 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, 571 gpa_t addr, int len, const void *val) 572 { 573 struct kvm_ioapic *ioapic = to_ioapic(this); 574 u32 data; 575 if (!ioapic_in_range(ioapic, addr)) 576 return -EOPNOTSUPP; 577 578 ASSERT(!(addr & 0xf)); /* check alignment */ 579 580 switch (len) { 581 case 8: 582 case 4: 583 data = *(u32 *) val; 584 break; 585 case 2: 586 data = *(u16 *) val; 587 break; 588 case 1: 589 data = *(u8 *) val; 590 break; 591 default: 592 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len); 593 return 0; 594 } 595 596 addr &= 0xff; 597 spin_lock(&ioapic->lock); 598 switch (addr) { 599 case IOAPIC_REG_SELECT: 600 ioapic->ioregsel = data & 0xFF; /* 8-bit register */ 601 break; 602 603 case IOAPIC_REG_WINDOW: 604 ioapic_write_indirect(ioapic, data); 605 break; 606 607 default: 608 break; 609 } 610 spin_unlock(&ioapic->lock); 611 return 0; 612 } 613 614 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) 615 { 616 int i; 617 618 cancel_delayed_work_sync(&ioapic->eoi_inject); 619 for (i = 0; i < IOAPIC_NUM_PINS; i++) 620 ioapic->redirtbl[i].fields.mask = 1; 621 ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS; 622 ioapic->ioregsel = 0; 623 ioapic->irr = 0; 624 ioapic->irr_delivered = 0; 625 ioapic->id = 0; 626 memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi)); 627 rtc_irq_eoi_tracking_reset(ioapic); 628 } 629 630 static const struct kvm_io_device_ops ioapic_mmio_ops = { 631 .read = ioapic_mmio_read, 632 .write = ioapic_mmio_write, 633 }; 634 635 int kvm_ioapic_init(struct kvm *kvm) 636 { 637 struct kvm_ioapic *ioapic; 638 int ret; 639 640 ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT); 641 if (!ioapic) 642 return -ENOMEM; 643 spin_lock_init(&ioapic->lock); 644 INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work); 645 kvm->arch.vioapic = ioapic; 646 kvm_ioapic_reset(ioapic); 647 kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops); 648 ioapic->kvm = kvm; 649 mutex_lock(&kvm->slots_lock); 650 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address, 651 IOAPIC_MEM_LENGTH, &ioapic->dev); 652 mutex_unlock(&kvm->slots_lock); 653 if (ret < 0) { 654 kvm->arch.vioapic = NULL; 655 kfree(ioapic); 656 } 657 658 return ret; 659 } 660 661 void kvm_ioapic_destroy(struct kvm *kvm) 662 { 663 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 664 665 if (!ioapic) 666 return; 667 668 cancel_delayed_work_sync(&ioapic->eoi_inject); 669 mutex_lock(&kvm->slots_lock); 670 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); 671 mutex_unlock(&kvm->slots_lock); 672 kvm->arch.vioapic = NULL; 673 kfree(ioapic); 674 } 675 676 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 677 { 678 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 679 680 spin_lock(&ioapic->lock); 681 memcpy(state, ioapic, sizeof(struct kvm_ioapic_state)); 682 state->irr &= ~ioapic->irr_delivered; 683 spin_unlock(&ioapic->lock); 684 } 685 686 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) 687 { 688 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 689 690 spin_lock(&ioapic->lock); 691 memcpy(ioapic, state, sizeof(struct kvm_ioapic_state)); 692 ioapic->irr = 0; 693 ioapic->irr_delivered = 0; 694 kvm_make_scan_ioapic_request(kvm); 695 kvm_ioapic_inject_all(ioapic, state->irr); 696 spin_unlock(&ioapic->lock); 697 } 698