1 /* 2 * Inter-VM Shared Memory PCI device. 3 * 4 * Author: 5 * Cam Macdonell <cam@cs.ualberta.ca> 6 * 7 * Based On: cirrus_vga.c 8 * Copyright (c) 2004 Fabrice Bellard 9 * Copyright (c) 2004 Makoto Suzuki (suzu) 10 * 11 * and rtl8139.c 12 * Copyright (c) 2006 Igor Kovalenko 13 * 14 * This code is licensed under the GNU GPL v2. 15 * 16 * Contributions after 2012-01-13 are licensed under the terms of the 17 * GNU GPL, version 2 or (at your option) any later version. 18 */ 19 #include "hw/hw.h" 20 #include "hw/i386/pc.h" 21 #include "hw/pci/pci.h" 22 #include "hw/pci/msi.h" 23 #include "hw/pci/msix.h" 24 #include "sysemu/kvm.h" 25 #include "migration/migration.h" 26 #include "qemu/error-report.h" 27 #include "qemu/event_notifier.h" 28 #include "qemu/fifo8.h" 29 #include "sysemu/char.h" 30 #include "sysemu/hostmem.h" 31 #include "qapi/visitor.h" 32 33 #include "hw/misc/ivshmem.h" 34 35 #include <sys/mman.h> 36 #include <sys/types.h> 37 #include <limits.h> 38 39 #define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET 40 #define PCI_DEVICE_ID_IVSHMEM 0x1110 41 42 #define IVSHMEM_MAX_PEERS G_MAXUINT16 43 #define IVSHMEM_IOEVENTFD 0 44 #define IVSHMEM_MSI 1 45 46 #define IVSHMEM_PEER 0 47 #define IVSHMEM_MASTER 1 48 49 #define IVSHMEM_REG_BAR_SIZE 0x100 50 51 //#define DEBUG_IVSHMEM 52 #ifdef DEBUG_IVSHMEM 53 #define IVSHMEM_DPRINTF(fmt, ...) \ 54 do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0) 55 #else 56 #define IVSHMEM_DPRINTF(fmt, ...) 57 #endif 58 59 #define TYPE_IVSHMEM "ivshmem" 60 #define IVSHMEM(obj) \ 61 OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM) 62 63 #define IVSHMEM_MEMDEV_PROP "memdev" 64 65 typedef struct Peer { 66 int nb_eventfds; 67 EventNotifier *eventfds; 68 } Peer; 69 70 typedef struct MSIVector { 71 PCIDevice *pdev; 72 int virq; 73 } MSIVector; 74 75 typedef struct IVShmemState { 76 /*< private >*/ 77 PCIDevice parent_obj; 78 /*< public >*/ 79 80 HostMemoryBackend *hostmem; 81 uint32_t intrmask; 82 uint32_t intrstatus; 83 84 CharDriverState **eventfd_chr; 85 CharDriverState *server_chr; 86 Fifo8 incoming_fifo; 87 MemoryRegion ivshmem_mmio; 88 89 /* We might need to register the BAR before we actually have the memory. 90 * So prepare a container MemoryRegion for the BAR immediately and 91 * add a subregion when we have the memory. 92 */ 93 MemoryRegion bar; 94 MemoryRegion ivshmem; 95 uint64_t ivshmem_size; /* size of shared memory region */ 96 uint32_t ivshmem_64bit; 97 98 Peer *peers; 99 int nb_peers; /* how many peers we have space for */ 100 101 int vm_id; 102 uint32_t vectors; 103 uint32_t features; 104 MSIVector *msi_vectors; 105 106 Error *migration_blocker; 107 108 char * shmobj; 109 char * sizearg; 110 char * role; 111 int role_val; /* scalar to avoid multiple string comparisons */ 112 } IVShmemState; 113 114 /* registers for the Inter-VM shared memory device */ 115 enum ivshmem_registers { 116 INTRMASK = 0, 117 INTRSTATUS = 4, 118 IVPOSITION = 8, 119 DOORBELL = 12, 120 }; 121 122 static inline uint32_t ivshmem_has_feature(IVShmemState *ivs, 123 unsigned int feature) { 124 return (ivs->features & (1 << feature)); 125 } 126 127 /* accessing registers - based on rtl8139 */ 128 static void ivshmem_update_irq(IVShmemState *s) 129 { 130 PCIDevice *d = PCI_DEVICE(s); 131 int isr; 132 isr = (s->intrstatus & s->intrmask) & 0xffffffff; 133 134 /* don't print ISR resets */ 135 if (isr) { 136 IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n", 137 isr ? 1 : 0, s->intrstatus, s->intrmask); 138 } 139 140 pci_set_irq(d, (isr != 0)); 141 } 142 143 static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val) 144 { 145 IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val); 146 147 s->intrmask = val; 148 149 ivshmem_update_irq(s); 150 } 151 152 static uint32_t ivshmem_IntrMask_read(IVShmemState *s) 153 { 154 uint32_t ret = s->intrmask; 155 156 IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret); 157 158 return ret; 159 } 160 161 static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val) 162 { 163 IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val); 164 165 s->intrstatus = val; 166 167 ivshmem_update_irq(s); 168 } 169 170 static uint32_t ivshmem_IntrStatus_read(IVShmemState *s) 171 { 172 uint32_t ret = s->intrstatus; 173 174 /* reading ISR clears all interrupts */ 175 s->intrstatus = 0; 176 177 ivshmem_update_irq(s); 178 179 return ret; 180 } 181 182 static void ivshmem_io_write(void *opaque, hwaddr addr, 183 uint64_t val, unsigned size) 184 { 185 IVShmemState *s = opaque; 186 187 uint16_t dest = val >> 16; 188 uint16_t vector = val & 0xff; 189 190 addr &= 0xfc; 191 192 IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr); 193 switch (addr) 194 { 195 case INTRMASK: 196 ivshmem_IntrMask_write(s, val); 197 break; 198 199 case INTRSTATUS: 200 ivshmem_IntrStatus_write(s, val); 201 break; 202 203 case DOORBELL: 204 /* check that dest VM ID is reasonable */ 205 if (dest >= s->nb_peers) { 206 IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest); 207 break; 208 } 209 210 /* check doorbell range */ 211 if (vector < s->peers[dest].nb_eventfds) { 212 IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector); 213 event_notifier_set(&s->peers[dest].eventfds[vector]); 214 } else { 215 IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n", 216 vector, dest); 217 } 218 break; 219 default: 220 IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr); 221 } 222 } 223 224 static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, 225 unsigned size) 226 { 227 228 IVShmemState *s = opaque; 229 uint32_t ret; 230 231 switch (addr) 232 { 233 case INTRMASK: 234 ret = ivshmem_IntrMask_read(s); 235 break; 236 237 case INTRSTATUS: 238 ret = ivshmem_IntrStatus_read(s); 239 break; 240 241 case IVPOSITION: 242 /* return my VM ID if the memory is mapped */ 243 if (memory_region_is_mapped(&s->ivshmem)) { 244 ret = s->vm_id; 245 } else { 246 ret = -1; 247 } 248 break; 249 250 default: 251 IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr); 252 ret = 0; 253 } 254 255 return ret; 256 } 257 258 static const MemoryRegionOps ivshmem_mmio_ops = { 259 .read = ivshmem_io_read, 260 .write = ivshmem_io_write, 261 .endianness = DEVICE_NATIVE_ENDIAN, 262 .impl = { 263 .min_access_size = 4, 264 .max_access_size = 4, 265 }, 266 }; 267 268 static void ivshmem_receive(void *opaque, const uint8_t *buf, int size) 269 { 270 IVShmemState *s = opaque; 271 272 IVSHMEM_DPRINTF("ivshmem_receive 0x%02x size: %d\n", *buf, size); 273 274 ivshmem_IntrStatus_write(s, *buf); 275 } 276 277 static int ivshmem_can_receive(void * opaque) 278 { 279 return sizeof(int64_t); 280 } 281 282 static void ivshmem_event(void *opaque, int event) 283 { 284 IVSHMEM_DPRINTF("ivshmem_event %d\n", event); 285 } 286 287 static void fake_irqfd(void *opaque, const uint8_t *buf, int size) { 288 289 MSIVector *entry = opaque; 290 PCIDevice *pdev = entry->pdev; 291 IVShmemState *s = IVSHMEM(pdev); 292 int vector = entry - s->msi_vectors; 293 294 IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector); 295 msix_notify(pdev, vector); 296 } 297 298 static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector, 299 MSIMessage msg) 300 { 301 IVShmemState *s = IVSHMEM(dev); 302 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; 303 MSIVector *v = &s->msi_vectors[vector]; 304 int ret; 305 306 IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector); 307 308 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev); 309 if (ret < 0) { 310 return ret; 311 } 312 313 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq); 314 } 315 316 static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector) 317 { 318 IVShmemState *s = IVSHMEM(dev); 319 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; 320 int ret; 321 322 IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector); 323 324 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, 325 s->msi_vectors[vector].virq); 326 if (ret != 0) { 327 error_report("remove_irqfd_notifier_gsi failed"); 328 } 329 } 330 331 static void ivshmem_vector_poll(PCIDevice *dev, 332 unsigned int vector_start, 333 unsigned int vector_end) 334 { 335 IVShmemState *s = IVSHMEM(dev); 336 unsigned int vector; 337 338 IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end); 339 340 vector_end = MIN(vector_end, s->vectors); 341 342 for (vector = vector_start; vector < vector_end; vector++) { 343 EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector]; 344 345 if (!msix_is_masked(dev, vector)) { 346 continue; 347 } 348 349 if (event_notifier_test_and_clear(notifier)) { 350 msix_set_pending(dev, vector); 351 } 352 } 353 } 354 355 static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n, 356 int vector) 357 { 358 /* create a event character device based on the passed eventfd */ 359 IVShmemState *s = opaque; 360 PCIDevice *pdev = PCI_DEVICE(s); 361 int eventfd = event_notifier_get_fd(n); 362 CharDriverState *chr; 363 364 s->msi_vectors[vector].pdev = pdev; 365 366 chr = qemu_chr_open_eventfd(eventfd); 367 368 if (chr == NULL) { 369 error_report("creating chardriver for eventfd %d failed", eventfd); 370 return NULL; 371 } 372 qemu_chr_fe_claim_no_fail(chr); 373 374 /* if MSI is supported we need multiple interrupts */ 375 if (ivshmem_has_feature(s, IVSHMEM_MSI)) { 376 s->msi_vectors[vector].pdev = PCI_DEVICE(s); 377 378 qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd, 379 ivshmem_event, &s->msi_vectors[vector]); 380 } else { 381 qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive, 382 ivshmem_event, s); 383 } 384 385 return chr; 386 387 } 388 389 static int check_shm_size(IVShmemState *s, int fd, Error **errp) 390 { 391 /* check that the guest isn't going to try and map more memory than the 392 * the object has allocated return -1 to indicate error */ 393 394 struct stat buf; 395 396 if (fstat(fd, &buf) < 0) { 397 error_setg(errp, "exiting: fstat on fd %d failed: %s", 398 fd, strerror(errno)); 399 return -1; 400 } 401 402 if (s->ivshmem_size > buf.st_size) { 403 error_setg(errp, "Requested memory size greater" 404 " than shared object size (%" PRIu64 " > %" PRIu64")", 405 s->ivshmem_size, (uint64_t)buf.st_size); 406 return -1; 407 } else { 408 return 0; 409 } 410 } 411 412 /* create the shared memory BAR when we are not using the server, so we can 413 * create the BAR and map the memory immediately */ 414 static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr, 415 Error **errp) 416 { 417 void * ptr; 418 419 ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 420 if (ptr == MAP_FAILED) { 421 error_setg_errno(errp, errno, "Failed to mmap shared memory"); 422 return -1; 423 } 424 425 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2", 426 s->ivshmem_size, ptr); 427 vmstate_register_ram(&s->ivshmem, DEVICE(s)); 428 memory_region_add_subregion(&s->bar, 0, &s->ivshmem); 429 430 /* region for shared memory */ 431 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar); 432 433 return 0; 434 } 435 436 static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i) 437 { 438 memory_region_add_eventfd(&s->ivshmem_mmio, 439 DOORBELL, 440 4, 441 true, 442 (posn << 16) | i, 443 &s->peers[posn].eventfds[i]); 444 } 445 446 static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i) 447 { 448 memory_region_del_eventfd(&s->ivshmem_mmio, 449 DOORBELL, 450 4, 451 true, 452 (posn << 16) | i, 453 &s->peers[posn].eventfds[i]); 454 } 455 456 static void close_peer_eventfds(IVShmemState *s, int posn) 457 { 458 int i, n; 459 460 if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { 461 return; 462 } 463 if (posn < 0 || posn >= s->nb_peers) { 464 error_report("invalid peer %d", posn); 465 return; 466 } 467 468 n = s->peers[posn].nb_eventfds; 469 470 memory_region_transaction_begin(); 471 for (i = 0; i < n; i++) { 472 ivshmem_del_eventfd(s, posn, i); 473 } 474 memory_region_transaction_commit(); 475 for (i = 0; i < n; i++) { 476 event_notifier_cleanup(&s->peers[posn].eventfds[i]); 477 } 478 479 g_free(s->peers[posn].eventfds); 480 s->peers[posn].nb_eventfds = 0; 481 } 482 483 /* this function increase the dynamic storage need to store data about other 484 * peers */ 485 static int resize_peers(IVShmemState *s, int new_min_size) 486 { 487 488 int j, old_size; 489 490 /* limit number of max peers */ 491 if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) { 492 return -1; 493 } 494 if (new_min_size <= s->nb_peers) { 495 return 0; 496 } 497 498 old_size = s->nb_peers; 499 s->nb_peers = new_min_size; 500 501 IVSHMEM_DPRINTF("bumping storage to %d peers\n", s->nb_peers); 502 503 s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer)); 504 505 for (j = old_size; j < s->nb_peers; j++) { 506 s->peers[j].eventfds = g_new0(EventNotifier, s->vectors); 507 s->peers[j].nb_eventfds = 0; 508 } 509 510 return 0; 511 } 512 513 static bool fifo_update_and_get(IVShmemState *s, const uint8_t *buf, int size, 514 void *data, size_t len) 515 { 516 const uint8_t *p; 517 uint32_t num; 518 519 assert(len <= sizeof(int64_t)); /* limitation of the fifo */ 520 if (fifo8_is_empty(&s->incoming_fifo) && size == len) { 521 memcpy(data, buf, size); 522 return true; 523 } 524 525 IVSHMEM_DPRINTF("short read of %d bytes\n", size); 526 527 num = MIN(size, sizeof(int64_t) - fifo8_num_used(&s->incoming_fifo)); 528 fifo8_push_all(&s->incoming_fifo, buf, num); 529 530 if (fifo8_num_used(&s->incoming_fifo) < len) { 531 assert(num == 0); 532 return false; 533 } 534 535 size -= num; 536 buf += num; 537 p = fifo8_pop_buf(&s->incoming_fifo, len, &num); 538 assert(num == len); 539 540 memcpy(data, p, len); 541 542 if (size > 0) { 543 fifo8_push_all(&s->incoming_fifo, buf, size); 544 } 545 546 return true; 547 } 548 549 static bool fifo_update_and_get_i64(IVShmemState *s, 550 const uint8_t *buf, int size, int64_t *i64) 551 { 552 if (fifo_update_and_get(s, buf, size, i64, sizeof(*i64))) { 553 *i64 = GINT64_FROM_LE(*i64); 554 return true; 555 } 556 557 return false; 558 } 559 560 static int ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector) 561 { 562 PCIDevice *pdev = PCI_DEVICE(s); 563 MSIMessage msg = msix_get_message(pdev, vector); 564 int ret; 565 566 IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector); 567 568 if (s->msi_vectors[vector].pdev != NULL) { 569 return 0; 570 } 571 572 ret = kvm_irqchip_add_msi_route(kvm_state, msg, pdev); 573 if (ret < 0) { 574 error_report("ivshmem: kvm_irqchip_add_msi_route failed"); 575 return -1; 576 } 577 578 s->msi_vectors[vector].virq = ret; 579 s->msi_vectors[vector].pdev = pdev; 580 581 return 0; 582 } 583 584 static void setup_interrupt(IVShmemState *s, int vector) 585 { 586 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector]; 587 bool with_irqfd = kvm_msi_via_irqfd_enabled() && 588 ivshmem_has_feature(s, IVSHMEM_MSI); 589 PCIDevice *pdev = PCI_DEVICE(s); 590 591 IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector); 592 593 if (!with_irqfd) { 594 IVSHMEM_DPRINTF("with eventfd"); 595 s->eventfd_chr[vector] = create_eventfd_chr_device(s, n, vector); 596 } else if (msix_enabled(pdev)) { 597 IVSHMEM_DPRINTF("with irqfd"); 598 if (ivshmem_add_kvm_msi_virq(s, vector) < 0) { 599 return; 600 } 601 602 if (!msix_is_masked(pdev, vector)) { 603 kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, 604 s->msi_vectors[vector].virq); 605 } 606 } else { 607 /* it will be delayed until msix is enabled, in write_config */ 608 IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled"); 609 } 610 } 611 612 static void ivshmem_read(void *opaque, const uint8_t *buf, int size) 613 { 614 IVShmemState *s = opaque; 615 int incoming_fd; 616 int new_eventfd; 617 int64_t incoming_posn; 618 Error *err = NULL; 619 Peer *peer; 620 621 if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) { 622 return; 623 } 624 625 if (incoming_posn < -1) { 626 IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn); 627 return; 628 } 629 630 /* pick off s->server_chr->msgfd and store it, posn should accompany msg */ 631 incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr); 632 IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n", 633 incoming_posn, incoming_fd); 634 635 /* make sure we have enough space for this peer */ 636 if (incoming_posn >= s->nb_peers) { 637 if (resize_peers(s, incoming_posn + 1) < 0) { 638 error_report("failed to resize peers array"); 639 if (incoming_fd != -1) { 640 close(incoming_fd); 641 } 642 return; 643 } 644 } 645 646 peer = &s->peers[incoming_posn]; 647 648 if (incoming_fd == -1) { 649 /* if posn is positive and unseen before then this is our posn*/ 650 if (incoming_posn >= 0 && s->vm_id == -1) { 651 /* receive our posn */ 652 s->vm_id = incoming_posn; 653 } else { 654 /* otherwise an fd == -1 means an existing peer has gone away */ 655 IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn); 656 close_peer_eventfds(s, incoming_posn); 657 } 658 return; 659 } 660 661 /* if the position is -1, then it's shared memory region fd */ 662 if (incoming_posn == -1) { 663 void * map_ptr; 664 665 if (memory_region_is_mapped(&s->ivshmem)) { 666 error_report("shm already initialized"); 667 close(incoming_fd); 668 return; 669 } 670 671 if (check_shm_size(s, incoming_fd, &err) == -1) { 672 error_report_err(err); 673 close(incoming_fd); 674 return; 675 } 676 677 /* mmap the region and map into the BAR2 */ 678 map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, 679 incoming_fd, 0); 680 if (map_ptr == MAP_FAILED) { 681 error_report("Failed to mmap shared memory %s", strerror(errno)); 682 close(incoming_fd); 683 return; 684 } 685 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), 686 "ivshmem.bar2", s->ivshmem_size, map_ptr); 687 vmstate_register_ram(&s->ivshmem, DEVICE(s)); 688 689 IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n", 690 map_ptr, s->ivshmem_size); 691 692 memory_region_add_subregion(&s->bar, 0, &s->ivshmem); 693 694 close(incoming_fd); 695 return; 696 } 697 698 /* each peer has an associated array of eventfds, and we keep 699 * track of how many eventfds received so far */ 700 /* get a new eventfd: */ 701 if (peer->nb_eventfds >= s->vectors) { 702 error_report("Too many eventfd received, device has %d vectors", 703 s->vectors); 704 close(incoming_fd); 705 return; 706 } 707 708 new_eventfd = peer->nb_eventfds++; 709 710 /* this is an eventfd for a particular peer VM */ 711 IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn, 712 new_eventfd, incoming_fd); 713 event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd); 714 fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */ 715 716 if (incoming_posn == s->vm_id) { 717 setup_interrupt(s, new_eventfd); 718 } 719 720 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) { 721 ivshmem_add_eventfd(s, incoming_posn, new_eventfd); 722 } 723 } 724 725 static void ivshmem_check_version(void *opaque, const uint8_t * buf, int size) 726 { 727 IVShmemState *s = opaque; 728 int tmp; 729 int64_t version; 730 731 if (!fifo_update_and_get_i64(s, buf, size, &version)) { 732 return; 733 } 734 735 tmp = qemu_chr_fe_get_msgfd(s->server_chr); 736 if (tmp != -1 || version != IVSHMEM_PROTOCOL_VERSION) { 737 fprintf(stderr, "incompatible version, you are connecting to a ivshmem-" 738 "server using a different protocol please check your setup\n"); 739 qemu_chr_delete(s->server_chr); 740 s->server_chr = NULL; 741 return; 742 } 743 744 IVSHMEM_DPRINTF("version check ok, switch to real chardev handler\n"); 745 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read, 746 ivshmem_event, s); 747 } 748 749 /* Select the MSI-X vectors used by device. 750 * ivshmem maps events to vectors statically, so 751 * we just enable all vectors on init and after reset. */ 752 static void ivshmem_use_msix(IVShmemState * s) 753 { 754 PCIDevice *d = PCI_DEVICE(s); 755 int i; 756 757 IVSHMEM_DPRINTF("%s, msix present: %d\n", __func__, msix_present(d)); 758 if (!msix_present(d)) { 759 return; 760 } 761 762 for (i = 0; i < s->vectors; i++) { 763 msix_vector_use(d, i); 764 } 765 } 766 767 static void ivshmem_reset(DeviceState *d) 768 { 769 IVShmemState *s = IVSHMEM(d); 770 771 s->intrstatus = 0; 772 s->intrmask = 0; 773 ivshmem_use_msix(s); 774 } 775 776 static int ivshmem_setup_msi(IVShmemState * s) 777 { 778 if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) { 779 return -1; 780 } 781 782 IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors); 783 784 /* allocate QEMU char devices for receiving interrupts */ 785 s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector)); 786 787 ivshmem_use_msix(s); 788 return 0; 789 } 790 791 static void ivshmem_enable_irqfd(IVShmemState *s) 792 { 793 PCIDevice *pdev = PCI_DEVICE(s); 794 int i; 795 796 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { 797 ivshmem_add_kvm_msi_virq(s, i); 798 } 799 800 if (msix_set_vector_notifiers(pdev, 801 ivshmem_vector_unmask, 802 ivshmem_vector_mask, 803 ivshmem_vector_poll)) { 804 error_report("ivshmem: msix_set_vector_notifiers failed"); 805 } 806 } 807 808 static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector) 809 { 810 IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector); 811 812 if (s->msi_vectors[vector].pdev == NULL) { 813 return; 814 } 815 816 /* it was cleaned when masked in the frontend. */ 817 kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq); 818 819 s->msi_vectors[vector].pdev = NULL; 820 } 821 822 static void ivshmem_disable_irqfd(IVShmemState *s) 823 { 824 PCIDevice *pdev = PCI_DEVICE(s); 825 int i; 826 827 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) { 828 ivshmem_remove_kvm_msi_virq(s, i); 829 } 830 831 msix_unset_vector_notifiers(pdev); 832 } 833 834 static void ivshmem_write_config(PCIDevice *pdev, uint32_t address, 835 uint32_t val, int len) 836 { 837 IVShmemState *s = IVSHMEM(pdev); 838 int is_enabled, was_enabled = msix_enabled(pdev); 839 840 pci_default_write_config(pdev, address, val, len); 841 is_enabled = msix_enabled(pdev); 842 843 if (kvm_msi_via_irqfd_enabled() && s->vm_id != -1) { 844 if (!was_enabled && is_enabled) { 845 ivshmem_enable_irqfd(s); 846 } else if (was_enabled && !is_enabled) { 847 ivshmem_disable_irqfd(s); 848 } 849 } 850 } 851 852 static void pci_ivshmem_realize(PCIDevice *dev, Error **errp) 853 { 854 IVShmemState *s = IVSHMEM(dev); 855 uint8_t *pci_conf; 856 uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY | 857 PCI_BASE_ADDRESS_MEM_PREFETCH; 858 859 if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) { 860 error_setg(errp, "You must specify either a shmobj, a chardev" 861 " or a hostmem"); 862 return; 863 } 864 865 if (s->hostmem) { 866 MemoryRegion *mr; 867 868 if (s->sizearg) { 869 g_warning("size argument ignored with hostmem"); 870 } 871 872 mr = host_memory_backend_get_memory(s->hostmem, errp); 873 s->ivshmem_size = memory_region_size(mr); 874 } else if (s->sizearg == NULL) { 875 s->ivshmem_size = 4 << 20; /* 4 MB default */ 876 } else { 877 char *end; 878 int64_t size = qemu_strtosz(s->sizearg, &end); 879 if (size < 0 || *end != '\0' || !is_power_of_2(size)) { 880 error_setg(errp, "Invalid size %s", s->sizearg); 881 return; 882 } 883 s->ivshmem_size = size; 884 } 885 886 fifo8_create(&s->incoming_fifo, sizeof(int64_t)); 887 888 /* IRQFD requires MSI */ 889 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) && 890 !ivshmem_has_feature(s, IVSHMEM_MSI)) { 891 error_setg(errp, "ioeventfd/irqfd requires MSI"); 892 return; 893 } 894 895 /* check that role is reasonable */ 896 if (s->role) { 897 if (strncmp(s->role, "peer", 5) == 0) { 898 s->role_val = IVSHMEM_PEER; 899 } else if (strncmp(s->role, "master", 7) == 0) { 900 s->role_val = IVSHMEM_MASTER; 901 } else { 902 error_setg(errp, "'role' must be 'peer' or 'master'"); 903 return; 904 } 905 } else { 906 s->role_val = IVSHMEM_MASTER; /* default */ 907 } 908 909 if (s->role_val == IVSHMEM_PEER) { 910 error_setg(&s->migration_blocker, 911 "Migration is disabled when using feature 'peer mode' in device 'ivshmem'"); 912 migrate_add_blocker(s->migration_blocker); 913 } 914 915 pci_conf = dev->config; 916 pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY; 917 918 pci_config_set_interrupt_pin(pci_conf, 1); 919 920 memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s, 921 "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE); 922 923 /* region for registers*/ 924 pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, 925 &s->ivshmem_mmio); 926 927 memory_region_init(&s->bar, OBJECT(s), "ivshmem-bar2-container", s->ivshmem_size); 928 if (s->ivshmem_64bit) { 929 attr |= PCI_BASE_ADDRESS_MEM_TYPE_64; 930 } 931 932 if (s->hostmem != NULL) { 933 MemoryRegion *mr; 934 935 IVSHMEM_DPRINTF("using hostmem\n"); 936 937 mr = host_memory_backend_get_memory(MEMORY_BACKEND(s->hostmem), errp); 938 vmstate_register_ram(mr, DEVICE(s)); 939 memory_region_add_subregion(&s->bar, 0, mr); 940 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar); 941 } else if (s->server_chr != NULL) { 942 if (strncmp(s->server_chr->filename, "unix:", 5)) { 943 error_setg(errp, "chardev is not a unix client socket"); 944 return; 945 } 946 947 /* if we get a UNIX socket as the parameter we will talk 948 * to the ivshmem server to receive the memory region */ 949 950 IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n", 951 s->server_chr->filename); 952 953 if (ivshmem_has_feature(s, IVSHMEM_MSI) && 954 ivshmem_setup_msi(s)) { 955 error_setg(errp, "msix initialization failed"); 956 return; 957 } 958 959 /* we allocate enough space for 16 peers and grow as needed */ 960 resize_peers(s, 16); 961 s->vm_id = -1; 962 963 pci_register_bar(dev, 2, attr, &s->bar); 964 965 s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *)); 966 967 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, 968 ivshmem_check_version, ivshmem_event, s); 969 } else { 970 /* just map the file immediately, we're not using a server */ 971 int fd; 972 973 IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj); 974 975 /* try opening with O_EXCL and if it succeeds zero the memory 976 * by truncating to 0 */ 977 if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL, 978 S_IRWXU|S_IRWXG|S_IRWXO)) > 0) { 979 /* truncate file to length PCI device's memory */ 980 if (ftruncate(fd, s->ivshmem_size) != 0) { 981 error_report("could not truncate shared file"); 982 } 983 984 } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR, 985 S_IRWXU|S_IRWXG|S_IRWXO)) < 0) { 986 error_setg(errp, "could not open shared file"); 987 return; 988 } 989 990 if (check_shm_size(s, fd, errp) == -1) { 991 return; 992 } 993 994 create_shared_memory_BAR(s, fd, attr, errp); 995 close(fd); 996 } 997 } 998 999 static void pci_ivshmem_exit(PCIDevice *dev) 1000 { 1001 IVShmemState *s = IVSHMEM(dev); 1002 int i; 1003 1004 fifo8_destroy(&s->incoming_fifo); 1005 1006 if (s->migration_blocker) { 1007 migrate_del_blocker(s->migration_blocker); 1008 error_free(s->migration_blocker); 1009 } 1010 1011 if (memory_region_is_mapped(&s->ivshmem)) { 1012 if (!s->hostmem) { 1013 void *addr = memory_region_get_ram_ptr(&s->ivshmem); 1014 1015 if (munmap(addr, s->ivshmem_size) == -1) { 1016 error_report("Failed to munmap shared memory %s", 1017 strerror(errno)); 1018 } 1019 } 1020 1021 vmstate_unregister_ram(&s->ivshmem, DEVICE(dev)); 1022 memory_region_del_subregion(&s->bar, &s->ivshmem); 1023 } 1024 1025 if (s->eventfd_chr) { 1026 for (i = 0; i < s->vectors; i++) { 1027 if (s->eventfd_chr[i]) { 1028 qemu_chr_free(s->eventfd_chr[i]); 1029 } 1030 } 1031 g_free(s->eventfd_chr); 1032 } 1033 1034 if (s->peers) { 1035 for (i = 0; i < s->nb_peers; i++) { 1036 close_peer_eventfds(s, i); 1037 } 1038 g_free(s->peers); 1039 } 1040 1041 if (ivshmem_has_feature(s, IVSHMEM_MSI)) { 1042 msix_uninit_exclusive_bar(dev); 1043 } 1044 1045 g_free(s->msi_vectors); 1046 } 1047 1048 static bool test_msix(void *opaque, int version_id) 1049 { 1050 IVShmemState *s = opaque; 1051 1052 return ivshmem_has_feature(s, IVSHMEM_MSI); 1053 } 1054 1055 static bool test_no_msix(void *opaque, int version_id) 1056 { 1057 return !test_msix(opaque, version_id); 1058 } 1059 1060 static int ivshmem_pre_load(void *opaque) 1061 { 1062 IVShmemState *s = opaque; 1063 1064 if (s->role_val == IVSHMEM_PEER) { 1065 error_report("'peer' devices are not migratable"); 1066 return -EINVAL; 1067 } 1068 1069 return 0; 1070 } 1071 1072 static int ivshmem_post_load(void *opaque, int version_id) 1073 { 1074 IVShmemState *s = opaque; 1075 1076 if (ivshmem_has_feature(s, IVSHMEM_MSI)) { 1077 ivshmem_use_msix(s); 1078 } 1079 1080 return 0; 1081 } 1082 1083 static int ivshmem_load_old(QEMUFile *f, void *opaque, int version_id) 1084 { 1085 IVShmemState *s = opaque; 1086 PCIDevice *pdev = PCI_DEVICE(s); 1087 int ret; 1088 1089 IVSHMEM_DPRINTF("ivshmem_load_old\n"); 1090 1091 if (version_id != 0) { 1092 return -EINVAL; 1093 } 1094 1095 if (s->role_val == IVSHMEM_PEER) { 1096 error_report("'peer' devices are not migratable"); 1097 return -EINVAL; 1098 } 1099 1100 ret = pci_device_load(pdev, f); 1101 if (ret) { 1102 return ret; 1103 } 1104 1105 if (ivshmem_has_feature(s, IVSHMEM_MSI)) { 1106 msix_load(pdev, f); 1107 ivshmem_use_msix(s); 1108 } else { 1109 s->intrstatus = qemu_get_be32(f); 1110 s->intrmask = qemu_get_be32(f); 1111 } 1112 1113 return 0; 1114 } 1115 1116 static const VMStateDescription ivshmem_vmsd = { 1117 .name = "ivshmem", 1118 .version_id = 1, 1119 .minimum_version_id = 1, 1120 .pre_load = ivshmem_pre_load, 1121 .post_load = ivshmem_post_load, 1122 .fields = (VMStateField[]) { 1123 VMSTATE_PCI_DEVICE(parent_obj, IVShmemState), 1124 1125 VMSTATE_MSIX_TEST(parent_obj, IVShmemState, test_msix), 1126 VMSTATE_UINT32_TEST(intrstatus, IVShmemState, test_no_msix), 1127 VMSTATE_UINT32_TEST(intrmask, IVShmemState, test_no_msix), 1128 1129 VMSTATE_END_OF_LIST() 1130 }, 1131 .load_state_old = ivshmem_load_old, 1132 .minimum_version_id_old = 0 1133 }; 1134 1135 static Property ivshmem_properties[] = { 1136 DEFINE_PROP_CHR("chardev", IVShmemState, server_chr), 1137 DEFINE_PROP_STRING("size", IVShmemState, sizearg), 1138 DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1), 1139 DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false), 1140 DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true), 1141 DEFINE_PROP_STRING("shm", IVShmemState, shmobj), 1142 DEFINE_PROP_STRING("role", IVShmemState, role), 1143 DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1), 1144 DEFINE_PROP_END_OF_LIST(), 1145 }; 1146 1147 static void ivshmem_class_init(ObjectClass *klass, void *data) 1148 { 1149 DeviceClass *dc = DEVICE_CLASS(klass); 1150 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); 1151 1152 k->realize = pci_ivshmem_realize; 1153 k->exit = pci_ivshmem_exit; 1154 k->config_write = ivshmem_write_config; 1155 k->vendor_id = PCI_VENDOR_ID_IVSHMEM; 1156 k->device_id = PCI_DEVICE_ID_IVSHMEM; 1157 k->class_id = PCI_CLASS_MEMORY_RAM; 1158 dc->reset = ivshmem_reset; 1159 dc->props = ivshmem_properties; 1160 dc->vmsd = &ivshmem_vmsd; 1161 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 1162 dc->desc = "Inter-VM shared memory"; 1163 } 1164 1165 static void ivshmem_check_memdev_is_busy(Object *obj, const char *name, 1166 Object *val, Error **errp) 1167 { 1168 MemoryRegion *mr; 1169 1170 mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), errp); 1171 if (memory_region_is_mapped(mr)) { 1172 char *path = object_get_canonical_path_component(val); 1173 error_setg(errp, "can't use already busy memdev: %s", path); 1174 g_free(path); 1175 } else { 1176 qdev_prop_allow_set_link_before_realize(obj, name, val, errp); 1177 } 1178 } 1179 1180 static void ivshmem_init(Object *obj) 1181 { 1182 IVShmemState *s = IVSHMEM(obj); 1183 1184 object_property_add_link(obj, IVSHMEM_MEMDEV_PROP, TYPE_MEMORY_BACKEND, 1185 (Object **)&s->hostmem, 1186 ivshmem_check_memdev_is_busy, 1187 OBJ_PROP_LINK_UNREF_ON_RELEASE, 1188 &error_abort); 1189 } 1190 1191 static const TypeInfo ivshmem_info = { 1192 .name = TYPE_IVSHMEM, 1193 .parent = TYPE_PCI_DEVICE, 1194 .instance_size = sizeof(IVShmemState), 1195 .instance_init = ivshmem_init, 1196 .class_init = ivshmem_class_init, 1197 }; 1198 1199 static void ivshmem_register_types(void) 1200 { 1201 type_register_static(&ivshmem_info); 1202 } 1203 1204 type_init(ivshmem_register_types) 1205