1 /* 2 * MSI-X device support 3 * 4 * This module includes support for MSI-X in pci devices. 5 * 6 * Author: Michael S. Tsirkin <mst@redhat.com> 7 * 8 * Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com) 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2. See 11 * the COPYING file in the top-level directory. 12 * 13 * Contributions after 2012-01-13 are licensed under the terms of the 14 * GNU GPL, version 2 or (at your option) any later version. 15 */ 16 17 #include "qemu/osdep.h" 18 #include "hw/pci/msi.h" 19 #include "hw/pci/msix.h" 20 #include "hw/pci/pci.h" 21 #include "hw/xen/xen.h" 22 #include "sysemu/xen.h" 23 #include "migration/qemu-file-types.h" 24 #include "migration/vmstate.h" 25 #include "qemu/range.h" 26 #include "qapi/error.h" 27 #include "trace.h" 28 29 #include "hw/i386/kvm/xen_evtchn.h" 30 31 /* MSI enable bit and maskall bit are in byte 1 in FLAGS register */ 32 #define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1) 33 #define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8) 34 #define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8) 35 36 static MSIMessage msix_prepare_message(PCIDevice *dev, unsigned vector) 37 { 38 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; 39 MSIMessage msg; 40 41 msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR); 42 msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA); 43 return msg; 44 } 45 46 MSIMessage msix_get_message(PCIDevice *dev, unsigned vector) 47 { 48 return dev->msix_prepare_message(dev, vector); 49 } 50 51 /* 52 * Special API for POWER to configure the vectors through 53 * a side channel. Should never be used by devices. 54 */ 55 void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg) 56 { 57 uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE; 58 59 pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address); 60 pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data); 61 table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 62 } 63 64 static uint8_t msix_pending_mask(int vector) 65 { 66 return 1 << (vector % 8); 67 } 68 69 static uint8_t *msix_pending_byte(PCIDevice *dev, int vector) 70 { 71 return dev->msix_pba + vector / 8; 72 } 73 74 static int msix_is_pending(PCIDevice *dev, int vector) 75 { 76 return *msix_pending_byte(dev, vector) & msix_pending_mask(vector); 77 } 78 79 void msix_set_pending(PCIDevice *dev, unsigned int vector) 80 { 81 *msix_pending_byte(dev, vector) |= msix_pending_mask(vector); 82 } 83 84 void msix_clr_pending(PCIDevice *dev, int vector) 85 { 86 *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector); 87 } 88 89 static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask) 90 { 91 unsigned offset = vector * PCI_MSIX_ENTRY_SIZE; 92 uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA]; 93 /* MSIs on Xen can be remapped into pirqs. In those cases, masking 94 * and unmasking go through the PV evtchn path. */ 95 if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) { 96 return false; 97 } 98 return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] & 99 PCI_MSIX_ENTRY_CTRL_MASKBIT; 100 } 101 102 bool msix_is_masked(PCIDevice *dev, unsigned int vector) 103 { 104 return msix_vector_masked(dev, vector, dev->msix_function_masked); 105 } 106 107 static void msix_fire_vector_notifier(PCIDevice *dev, 108 unsigned int vector, bool is_masked) 109 { 110 MSIMessage msg; 111 int ret; 112 113 if (!dev->msix_vector_use_notifier) { 114 return; 115 } 116 if (is_masked) { 117 dev->msix_vector_release_notifier(dev, vector); 118 } else { 119 msg = msix_get_message(dev, vector); 120 ret = dev->msix_vector_use_notifier(dev, vector, msg); 121 assert(ret >= 0); 122 } 123 } 124 125 static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked) 126 { 127 bool is_masked = msix_is_masked(dev, vector); 128 129 if (xen_mode == XEN_EMULATE) { 130 MSIMessage msg = msix_prepare_message(dev, vector); 131 132 xen_evtchn_snoop_msi(dev, true, vector, msg.address, msg.data, 133 is_masked); 134 } 135 136 if (is_masked == was_masked) { 137 return; 138 } 139 140 msix_fire_vector_notifier(dev, vector, is_masked); 141 142 if (!is_masked && msix_is_pending(dev, vector)) { 143 msix_clr_pending(dev, vector); 144 msix_notify(dev, vector); 145 } 146 } 147 148 void msix_set_mask(PCIDevice *dev, int vector, bool mask) 149 { 150 unsigned offset; 151 bool was_masked; 152 153 assert(vector < dev->msix_entries_nr); 154 155 offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; 156 157 was_masked = msix_is_masked(dev, vector); 158 159 if (mask) { 160 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 161 } else { 162 dev->msix_table[offset] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 163 } 164 165 msix_handle_mask_update(dev, vector, was_masked); 166 } 167 168 static bool msix_masked(PCIDevice *dev) 169 { 170 return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK; 171 } 172 173 static void msix_update_function_masked(PCIDevice *dev) 174 { 175 dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev); 176 } 177 178 /* Handle MSI-X capability config write. */ 179 void msix_write_config(PCIDevice *dev, uint32_t addr, 180 uint32_t val, int len) 181 { 182 unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET; 183 int vector; 184 bool was_masked; 185 186 if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) { 187 return; 188 } 189 190 trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev)); 191 192 was_masked = dev->msix_function_masked; 193 msix_update_function_masked(dev); 194 195 if (!msix_enabled(dev)) { 196 return; 197 } 198 199 pci_device_deassert_intx(dev); 200 201 if (dev->msix_function_masked == was_masked) { 202 return; 203 } 204 205 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { 206 msix_handle_mask_update(dev, vector, 207 msix_vector_masked(dev, vector, was_masked)); 208 } 209 } 210 211 static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr, 212 unsigned size) 213 { 214 PCIDevice *dev = opaque; 215 216 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); 217 return pci_get_long(dev->msix_table + addr); 218 } 219 220 static void msix_table_mmio_write(void *opaque, hwaddr addr, 221 uint64_t val, unsigned size) 222 { 223 PCIDevice *dev = opaque; 224 int vector = addr / PCI_MSIX_ENTRY_SIZE; 225 bool was_masked; 226 227 assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); 228 229 was_masked = msix_is_masked(dev, vector); 230 pci_set_long(dev->msix_table + addr, val); 231 msix_handle_mask_update(dev, vector, was_masked); 232 } 233 234 static const MemoryRegionOps msix_table_mmio_ops = { 235 .read = msix_table_mmio_read, 236 .write = msix_table_mmio_write, 237 .endianness = DEVICE_LITTLE_ENDIAN, 238 .valid = { 239 .min_access_size = 4, 240 .max_access_size = 8, 241 }, 242 .impl = { 243 .max_access_size = 4, 244 }, 245 }; 246 247 static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr, 248 unsigned size) 249 { 250 PCIDevice *dev = opaque; 251 if (dev->msix_vector_poll_notifier) { 252 unsigned vector_start = addr * 8; 253 unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr); 254 dev->msix_vector_poll_notifier(dev, vector_start, vector_end); 255 } 256 257 return pci_get_long(dev->msix_pba + addr); 258 } 259 260 static void msix_pba_mmio_write(void *opaque, hwaddr addr, 261 uint64_t val, unsigned size) 262 { 263 } 264 265 static const MemoryRegionOps msix_pba_mmio_ops = { 266 .read = msix_pba_mmio_read, 267 .write = msix_pba_mmio_write, 268 .endianness = DEVICE_LITTLE_ENDIAN, 269 .valid = { 270 .min_access_size = 4, 271 .max_access_size = 8, 272 }, 273 .impl = { 274 .max_access_size = 4, 275 }, 276 }; 277 278 static void msix_mask_all(struct PCIDevice *dev, unsigned nentries) 279 { 280 int vector; 281 282 for (vector = 0; vector < nentries; ++vector) { 283 unsigned offset = 284 vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL; 285 bool was_masked = msix_is_masked(dev, vector); 286 287 dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 288 msix_handle_mask_update(dev, vector, was_masked); 289 } 290 } 291 292 /* 293 * Make PCI device @dev MSI-X capable 294 * @nentries is the max number of MSI-X vectors that the device support. 295 * @table_bar is the MemoryRegion that MSI-X table structure resides. 296 * @table_bar_nr is number of base address register corresponding to @table_bar. 297 * @table_offset indicates the offset that the MSI-X table structure starts with 298 * in @table_bar. 299 * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides. 300 * @pba_bar_nr is number of base address register corresponding to @pba_bar. 301 * @pba_offset indicates the offset that the Pending Bit Array structure 302 * starts with in @pba_bar. 303 * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space. 304 * @errp is for returning errors. 305 * 306 * Return 0 on success; set @errp and return -errno on error: 307 * -ENOTSUP means lacking msi support for a msi-capable platform. 308 * -EINVAL means capability overlap, happens when @cap_pos is non-zero, 309 * also means a programming error, except device assignment, which can check 310 * if a real HW is broken. 311 */ 312 int msix_init(struct PCIDevice *dev, unsigned short nentries, 313 MemoryRegion *table_bar, uint8_t table_bar_nr, 314 unsigned table_offset, MemoryRegion *pba_bar, 315 uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos, 316 Error **errp) 317 { 318 int cap; 319 unsigned table_size, pba_size; 320 uint8_t *config; 321 322 /* Nothing to do if MSI is not supported by interrupt controller */ 323 if (!msi_nonbroken) { 324 error_setg(errp, "MSI-X is not supported by interrupt controller"); 325 return -ENOTSUP; 326 } 327 328 if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) { 329 error_setg(errp, "The number of MSI-X vectors is invalid"); 330 return -EINVAL; 331 } 332 333 table_size = nentries * PCI_MSIX_ENTRY_SIZE; 334 pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; 335 336 /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */ 337 if ((table_bar_nr == pba_bar_nr && 338 ranges_overlap(table_offset, table_size, pba_offset, pba_size)) || 339 table_offset + table_size > memory_region_size(table_bar) || 340 pba_offset + pba_size > memory_region_size(pba_bar) || 341 (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) { 342 error_setg(errp, "table & pba overlap, or they don't fit in BARs," 343 " or don't align"); 344 return -EINVAL; 345 } 346 347 cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, 348 cap_pos, MSIX_CAP_LENGTH, errp); 349 if (cap < 0) { 350 return cap; 351 } 352 353 dev->msix_cap = cap; 354 dev->cap_present |= QEMU_PCI_CAP_MSIX; 355 config = dev->config + cap; 356 357 pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); 358 dev->msix_entries_nr = nentries; 359 dev->msix_function_masked = true; 360 361 pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr); 362 pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr); 363 364 /* Make flags bit writable. */ 365 dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | 366 MSIX_MASKALL_MASK; 367 368 dev->msix_table = g_malloc0(table_size); 369 dev->msix_pba = g_malloc0(pba_size); 370 dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); 371 372 msix_mask_all(dev, nentries); 373 374 memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, 375 "msix-table", table_size); 376 memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); 377 memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, 378 "msix-pba", pba_size); 379 memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); 380 381 dev->msix_prepare_message = msix_prepare_message; 382 383 return 0; 384 } 385 386 int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries, 387 uint8_t bar_nr, Error **errp) 388 { 389 int ret; 390 char *name; 391 uint32_t bar_size = 4096; 392 uint32_t bar_pba_offset = bar_size / 2; 393 uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; 394 395 /* 396 * Migration compatibility dictates that this remains a 4k 397 * BAR with the vector table in the lower half and PBA in 398 * the upper half for nentries which is lower or equal to 128. 399 * No need to care about using more than 65 entries for legacy 400 * machine types who has at most 64 queues. 401 */ 402 if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) { 403 bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE; 404 } 405 406 if (bar_pba_offset + bar_pba_size > 4096) { 407 bar_size = bar_pba_offset + bar_pba_size; 408 } 409 410 bar_size = pow2ceil(bar_size); 411 412 name = g_strdup_printf("%s-msix", dev->name); 413 memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size); 414 g_free(name); 415 416 ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr, 417 0, &dev->msix_exclusive_bar, 418 bar_nr, bar_pba_offset, 419 0, errp); 420 if (ret) { 421 return ret; 422 } 423 424 pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY, 425 &dev->msix_exclusive_bar); 426 427 return 0; 428 } 429 430 static void msix_free_irq_entries(PCIDevice *dev) 431 { 432 int vector; 433 434 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { 435 dev->msix_entry_used[vector] = 0; 436 msix_clr_pending(dev, vector); 437 } 438 } 439 440 static void msix_clear_all_vectors(PCIDevice *dev) 441 { 442 int vector; 443 444 for (vector = 0; vector < dev->msix_entries_nr; ++vector) { 445 msix_clr_pending(dev, vector); 446 } 447 } 448 449 /* Clean up resources for the device. */ 450 void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar) 451 { 452 if (!msix_present(dev)) { 453 return; 454 } 455 pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH); 456 dev->msix_cap = 0; 457 msix_free_irq_entries(dev); 458 dev->msix_entries_nr = 0; 459 memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio); 460 g_free(dev->msix_pba); 461 dev->msix_pba = NULL; 462 memory_region_del_subregion(table_bar, &dev->msix_table_mmio); 463 g_free(dev->msix_table); 464 dev->msix_table = NULL; 465 g_free(dev->msix_entry_used); 466 dev->msix_entry_used = NULL; 467 dev->cap_present &= ~QEMU_PCI_CAP_MSIX; 468 dev->msix_prepare_message = NULL; 469 } 470 471 void msix_uninit_exclusive_bar(PCIDevice *dev) 472 { 473 if (msix_present(dev)) { 474 msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar); 475 } 476 } 477 478 void msix_save(PCIDevice *dev, QEMUFile *f) 479 { 480 unsigned n = dev->msix_entries_nr; 481 482 if (!msix_present(dev)) { 483 return; 484 } 485 486 qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); 487 qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); 488 } 489 490 /* Should be called after restoring the config space. */ 491 void msix_load(PCIDevice *dev, QEMUFile *f) 492 { 493 unsigned n = dev->msix_entries_nr; 494 unsigned int vector; 495 496 if (!msix_present(dev)) { 497 return; 498 } 499 500 msix_clear_all_vectors(dev); 501 qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE); 502 qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8)); 503 msix_update_function_masked(dev); 504 505 for (vector = 0; vector < n; vector++) { 506 msix_handle_mask_update(dev, vector, true); 507 } 508 } 509 510 /* Does device support MSI-X? */ 511 int msix_present(PCIDevice *dev) 512 { 513 return dev->cap_present & QEMU_PCI_CAP_MSIX; 514 } 515 516 /* Is MSI-X enabled? */ 517 int msix_enabled(PCIDevice *dev) 518 { 519 return (dev->cap_present & QEMU_PCI_CAP_MSIX) && 520 (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & 521 MSIX_ENABLE_MASK); 522 } 523 524 /* Send an MSI-X message */ 525 void msix_notify(PCIDevice *dev, unsigned vector) 526 { 527 MSIMessage msg; 528 529 assert(vector < dev->msix_entries_nr); 530 531 if (!dev->msix_entry_used[vector]) { 532 return; 533 } 534 535 if (msix_is_masked(dev, vector)) { 536 msix_set_pending(dev, vector); 537 return; 538 } 539 540 msg = msix_get_message(dev, vector); 541 542 msi_send_message(dev, msg); 543 } 544 545 void msix_reset(PCIDevice *dev) 546 { 547 if (!msix_present(dev)) { 548 return; 549 } 550 msix_clear_all_vectors(dev); 551 dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &= 552 ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET]; 553 memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE); 554 memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8); 555 msix_mask_all(dev, dev->msix_entries_nr); 556 } 557 558 /* PCI spec suggests that devices make it possible for software to configure 559 * less vectors than supported by the device, but does not specify a standard 560 * mechanism for devices to do so. 561 * 562 * We support this by asking devices to declare vectors software is going to 563 * actually use, and checking this on the notification path. Devices that 564 * don't want to follow the spec suggestion can declare all vectors as used. */ 565 566 /* Mark vector as used. */ 567 void msix_vector_use(PCIDevice *dev, unsigned vector) 568 { 569 assert(vector < dev->msix_entries_nr); 570 dev->msix_entry_used[vector]++; 571 } 572 573 /* Mark vector as unused. */ 574 void msix_vector_unuse(PCIDevice *dev, unsigned vector) 575 { 576 assert(vector < dev->msix_entries_nr); 577 if (!dev->msix_entry_used[vector]) { 578 return; 579 } 580 if (--dev->msix_entry_used[vector]) { 581 return; 582 } 583 msix_clr_pending(dev, vector); 584 } 585 586 void msix_unuse_all_vectors(PCIDevice *dev) 587 { 588 if (!msix_present(dev)) { 589 return; 590 } 591 msix_free_irq_entries(dev); 592 } 593 594 unsigned int msix_nr_vectors_allocated(const PCIDevice *dev) 595 { 596 return dev->msix_entries_nr; 597 } 598 599 static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector) 600 { 601 MSIMessage msg; 602 603 if (msix_is_masked(dev, vector)) { 604 return 0; 605 } 606 msg = msix_get_message(dev, vector); 607 return dev->msix_vector_use_notifier(dev, vector, msg); 608 } 609 610 static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector) 611 { 612 if (msix_is_masked(dev, vector)) { 613 return; 614 } 615 dev->msix_vector_release_notifier(dev, vector); 616 } 617 618 int msix_set_vector_notifiers(PCIDevice *dev, 619 MSIVectorUseNotifier use_notifier, 620 MSIVectorReleaseNotifier release_notifier, 621 MSIVectorPollNotifier poll_notifier) 622 { 623 int vector, ret; 624 625 assert(use_notifier && release_notifier); 626 627 dev->msix_vector_use_notifier = use_notifier; 628 dev->msix_vector_release_notifier = release_notifier; 629 dev->msix_vector_poll_notifier = poll_notifier; 630 631 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & 632 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { 633 for (vector = 0; vector < dev->msix_entries_nr; vector++) { 634 ret = msix_set_notifier_for_vector(dev, vector); 635 if (ret < 0) { 636 goto undo; 637 } 638 } 639 } 640 if (dev->msix_vector_poll_notifier) { 641 dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr); 642 } 643 return 0; 644 645 undo: 646 while (--vector >= 0) { 647 msix_unset_notifier_for_vector(dev, vector); 648 } 649 dev->msix_vector_use_notifier = NULL; 650 dev->msix_vector_release_notifier = NULL; 651 dev->msix_vector_poll_notifier = NULL; 652 return ret; 653 } 654 655 void msix_unset_vector_notifiers(PCIDevice *dev) 656 { 657 int vector; 658 659 assert(dev->msix_vector_use_notifier && 660 dev->msix_vector_release_notifier); 661 662 if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & 663 (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) { 664 for (vector = 0; vector < dev->msix_entries_nr; vector++) { 665 msix_unset_notifier_for_vector(dev, vector); 666 } 667 } 668 dev->msix_vector_use_notifier = NULL; 669 dev->msix_vector_release_notifier = NULL; 670 dev->msix_vector_poll_notifier = NULL; 671 } 672 673 static int put_msix_state(QEMUFile *f, void *pv, size_t size, 674 const VMStateField *field, JSONWriter *vmdesc) 675 { 676 msix_save(pv, f); 677 678 return 0; 679 } 680 681 static int get_msix_state(QEMUFile *f, void *pv, size_t size, 682 const VMStateField *field) 683 { 684 msix_load(pv, f); 685 return 0; 686 } 687 688 static const VMStateInfo vmstate_info_msix = { 689 .name = "msix state", 690 .get = get_msix_state, 691 .put = put_msix_state, 692 }; 693 694 const VMStateDescription vmstate_msix = { 695 .name = "msix", 696 .fields = (const VMStateField[]) { 697 { 698 .name = "msix", 699 .version_id = 0, 700 .field_exists = NULL, 701 .size = 0, /* ouch */ 702 .info = &vmstate_info_msix, 703 .flags = VMS_SINGLE, 704 .offset = 0, 705 }, 706 VMSTATE_END_OF_LIST() 707 } 708 }; 709