1 /* 2 * Copyright (C) 2010 Citrix Ltd. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 * 7 * Contributions after 2012-01-13 are licensed under the terms of the 8 * GNU GPL, version 2 or (at your option) any later version. 9 */ 10 11 #include "qemu/osdep.h" 12 #include "qemu/units.h" 13 14 #include "cpu.h" 15 #include "hw/pci/pci.h" 16 #include "hw/pci/pci_host.h" 17 #include "hw/i386/pc.h" 18 #include "hw/southbridge/piix.h" 19 #include "hw/irq.h" 20 #include "hw/hw.h" 21 #include "hw/i386/apic-msidef.h" 22 #include "hw/xen/xen_common.h" 23 #include "hw/xen/xen-legacy-backend.h" 24 #include "hw/xen/xen-bus.h" 25 #include "hw/xen/xen-x86.h" 26 #include "qapi/error.h" 27 #include "qapi/qapi-commands-migration.h" 28 #include "qemu/error-report.h" 29 #include "qemu/main-loop.h" 30 #include "qemu/range.h" 31 #include "sysemu/runstate.h" 32 #include "sysemu/sysemu.h" 33 #include "sysemu/xen.h" 34 #include "sysemu/xen-mapcache.h" 35 #include "trace.h" 36 #include "exec/address-spaces.h" 37 38 #include <xen/hvm/ioreq.h> 39 #include <xen/hvm/e820.h> 40 41 //#define DEBUG_XEN_HVM 42 43 #ifdef DEBUG_XEN_HVM 44 #define DPRINTF(fmt, ...) \ 45 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) 46 #else 47 #define DPRINTF(fmt, ...) \ 48 do { } while (0) 49 #endif 50 51 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; 52 static MemoryRegion *framebuffer; 53 static bool xen_in_migration; 54 55 /* Compatibility with older version */ 56 57 /* This allows QEMU to build on a system that has Xen 4.5 or earlier 58 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h 59 * needs to be included before this block and hw/xen/xen_common.h needs to 60 * be included before xen/hvm/ioreq.h 61 */ 62 #ifndef IOREQ_TYPE_VMWARE_PORT 63 #define IOREQ_TYPE_VMWARE_PORT 3 64 struct vmware_regs { 65 uint32_t esi; 66 uint32_t edi; 67 uint32_t ebx; 68 uint32_t ecx; 69 uint32_t edx; 70 }; 71 typedef struct vmware_regs vmware_regs_t; 72 73 struct shared_vmport_iopage { 74 struct vmware_regs vcpu_vmport_regs[1]; 75 }; 76 typedef struct shared_vmport_iopage shared_vmport_iopage_t; 77 #endif 78 79 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) 80 { 81 return shared_page->vcpu_ioreq[i].vp_eport; 82 } 83 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) 84 { 85 return &shared_page->vcpu_ioreq[vcpu]; 86 } 87 88 #define BUFFER_IO_MAX_DELAY 100 89 90 typedef struct XenPhysmap { 91 hwaddr start_addr; 92 ram_addr_t size; 93 const char *name; 94 hwaddr phys_offset; 95 96 QLIST_ENTRY(XenPhysmap) list; 97 } XenPhysmap; 98 99 static QLIST_HEAD(, XenPhysmap) xen_physmap; 100 101 typedef struct XenPciDevice { 102 PCIDevice *pci_dev; 103 uint32_t sbdf; 104 QLIST_ENTRY(XenPciDevice) entry; 105 } XenPciDevice; 106 107 typedef struct XenIOState { 108 ioservid_t ioservid; 109 shared_iopage_t *shared_page; 110 shared_vmport_iopage_t *shared_vmport_page; 111 buffered_iopage_t *buffered_io_page; 112 QEMUTimer *buffered_io_timer; 113 CPUState **cpu_by_vcpu_id; 114 /* the evtchn port for polling the notification, */ 115 evtchn_port_t *ioreq_local_port; 116 /* evtchn remote and local ports for buffered io */ 117 evtchn_port_t bufioreq_remote_port; 118 evtchn_port_t bufioreq_local_port; 119 /* the evtchn fd for polling */ 120 xenevtchn_handle *xce_handle; 121 /* which vcpu we are serving */ 122 int send_vcpu; 123 124 struct xs_handle *xenstore; 125 MemoryListener memory_listener; 126 MemoryListener io_listener; 127 QLIST_HEAD(, XenPciDevice) dev_list; 128 DeviceListener device_listener; 129 hwaddr free_phys_offset; 130 const XenPhysmap *log_for_dirtybit; 131 /* Buffer used by xen_sync_dirty_bitmap */ 132 unsigned long *dirty_bitmap; 133 134 Notifier exit; 135 Notifier suspend; 136 Notifier wakeup; 137 } XenIOState; 138 139 /* Xen specific function for piix pci */ 140 141 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) 142 { 143 return irq_num + (PCI_SLOT(pci_dev->devfn) << 2); 144 } 145 146 void xen_piix3_set_irq(void *opaque, int irq_num, int level) 147 { 148 xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, 149 irq_num & 3, level); 150 } 151 152 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) 153 { 154 int i; 155 156 /* Scan for updates to PCI link routes (0x60-0x63). */ 157 for (i = 0; i < len; i++) { 158 uint8_t v = (val >> (8 * i)) & 0xff; 159 if (v & 0x80) { 160 v = 0; 161 } 162 v &= 0xf; 163 if (((address + i) >= PIIX_PIRQCA) && ((address + i) <= PIIX_PIRQCD)) { 164 xen_set_pci_link_route(xen_domid, address + i - PIIX_PIRQCA, v); 165 } 166 } 167 } 168 169 int xen_is_pirq_msi(uint32_t msi_data) 170 { 171 /* If vector is 0, the msi is remapped into a pirq, passed as 172 * dest_id. 173 */ 174 return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; 175 } 176 177 void xen_hvm_inject_msi(uint64_t addr, uint32_t data) 178 { 179 xen_inject_msi(xen_domid, addr, data); 180 } 181 182 static void xen_suspend_notifier(Notifier *notifier, void *data) 183 { 184 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); 185 } 186 187 /* Xen Interrupt Controller */ 188 189 static void xen_set_irq(void *opaque, int irq, int level) 190 { 191 xen_set_isa_irq_level(xen_domid, irq, level); 192 } 193 194 qemu_irq *xen_interrupt_controller_init(void) 195 { 196 return qemu_allocate_irqs(xen_set_irq, NULL, 16); 197 } 198 199 /* Memory Ops */ 200 201 static void xen_ram_init(PCMachineState *pcms, 202 ram_addr_t ram_size, MemoryRegion **ram_memory_p) 203 { 204 X86MachineState *x86ms = X86_MACHINE(pcms); 205 MemoryRegion *sysmem = get_system_memory(); 206 ram_addr_t block_len; 207 uint64_t user_lowmem = 208 object_property_get_uint(qdev_get_machine(), 209 PC_MACHINE_MAX_RAM_BELOW_4G, 210 &error_abort); 211 212 /* Handle the machine opt max-ram-below-4g. It is basically doing 213 * min(xen limit, user limit). 214 */ 215 if (!user_lowmem) { 216 user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ 217 } 218 if (HVM_BELOW_4G_RAM_END <= user_lowmem) { 219 user_lowmem = HVM_BELOW_4G_RAM_END; 220 } 221 222 if (ram_size >= user_lowmem) { 223 x86ms->above_4g_mem_size = ram_size - user_lowmem; 224 x86ms->below_4g_mem_size = user_lowmem; 225 } else { 226 x86ms->above_4g_mem_size = 0; 227 x86ms->below_4g_mem_size = ram_size; 228 } 229 if (!x86ms->above_4g_mem_size) { 230 block_len = ram_size; 231 } else { 232 /* 233 * Xen does not allocate the memory continuously, it keeps a 234 * hole of the size computed above or passed in. 235 */ 236 block_len = (4 * GiB) + x86ms->above_4g_mem_size; 237 } 238 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, 239 &error_fatal); 240 *ram_memory_p = &ram_memory; 241 242 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", 243 &ram_memory, 0, 0xa0000); 244 memory_region_add_subregion(sysmem, 0, &ram_640k); 245 /* Skip of the VGA IO memory space, it will be registered later by the VGA 246 * emulated device. 247 * 248 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load 249 * the Options ROM, so it is registered here as RAM. 250 */ 251 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", 252 &ram_memory, 0xc0000, 253 x86ms->below_4g_mem_size - 0xc0000); 254 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); 255 if (x86ms->above_4g_mem_size > 0) { 256 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", 257 &ram_memory, 0x100000000ULL, 258 x86ms->above_4g_mem_size); 259 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); 260 } 261 } 262 263 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, 264 Error **errp) 265 { 266 unsigned long nr_pfn; 267 xen_pfn_t *pfn_list; 268 int i; 269 270 if (runstate_check(RUN_STATE_INMIGRATE)) { 271 /* RAM already populated in Xen */ 272 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT 273 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", 274 __func__, size, ram_addr); 275 return; 276 } 277 278 if (mr == &ram_memory) { 279 return; 280 } 281 282 trace_xen_ram_alloc(ram_addr, size); 283 284 nr_pfn = size >> TARGET_PAGE_BITS; 285 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); 286 287 for (i = 0; i < nr_pfn; i++) { 288 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; 289 } 290 291 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { 292 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, 293 ram_addr); 294 } 295 296 g_free(pfn_list); 297 } 298 299 static XenPhysmap *get_physmapping(hwaddr start_addr, ram_addr_t size) 300 { 301 XenPhysmap *physmap = NULL; 302 303 start_addr &= TARGET_PAGE_MASK; 304 305 QLIST_FOREACH(physmap, &xen_physmap, list) { 306 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { 307 return physmap; 308 } 309 } 310 return NULL; 311 } 312 313 static hwaddr xen_phys_offset_to_gaddr(hwaddr phys_offset, ram_addr_t size) 314 { 315 hwaddr addr = phys_offset & TARGET_PAGE_MASK; 316 XenPhysmap *physmap = NULL; 317 318 QLIST_FOREACH(physmap, &xen_physmap, list) { 319 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { 320 return physmap->start_addr + (phys_offset - physmap->phys_offset); 321 } 322 } 323 324 return phys_offset; 325 } 326 327 #ifdef XEN_COMPAT_PHYSMAP 328 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) 329 { 330 char path[80], value[17]; 331 332 snprintf(path, sizeof(path), 333 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", 334 xen_domid, (uint64_t)physmap->phys_offset); 335 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->start_addr); 336 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 337 return -1; 338 } 339 snprintf(path, sizeof(path), 340 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", 341 xen_domid, (uint64_t)physmap->phys_offset); 342 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)physmap->size); 343 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 344 return -1; 345 } 346 if (physmap->name) { 347 snprintf(path, sizeof(path), 348 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", 349 xen_domid, (uint64_t)physmap->phys_offset); 350 if (!xs_write(state->xenstore, 0, path, 351 physmap->name, strlen(physmap->name))) { 352 return -1; 353 } 354 } 355 return 0; 356 } 357 #else 358 static int xen_save_physmap(XenIOState *state, XenPhysmap *physmap) 359 { 360 return 0; 361 } 362 #endif 363 364 static int xen_add_to_physmap(XenIOState *state, 365 hwaddr start_addr, 366 ram_addr_t size, 367 MemoryRegion *mr, 368 hwaddr offset_within_region) 369 { 370 unsigned long nr_pages; 371 int rc = 0; 372 XenPhysmap *physmap = NULL; 373 hwaddr pfn, start_gpfn; 374 hwaddr phys_offset = memory_region_get_ram_addr(mr); 375 const char *mr_name; 376 377 if (get_physmapping(start_addr, size)) { 378 return 0; 379 } 380 if (size <= 0) { 381 return -1; 382 } 383 384 /* Xen can only handle a single dirty log region for now and we want 385 * the linear framebuffer to be that region. 386 * Avoid tracking any regions that is not videoram and avoid tracking 387 * the legacy vga region. */ 388 if (mr == framebuffer && start_addr > 0xbffff) { 389 goto go_physmap; 390 } 391 return -1; 392 393 go_physmap: 394 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", 395 start_addr, start_addr + size); 396 397 mr_name = memory_region_name(mr); 398 399 physmap = g_malloc(sizeof(XenPhysmap)); 400 401 physmap->start_addr = start_addr; 402 physmap->size = size; 403 physmap->name = mr_name; 404 physmap->phys_offset = phys_offset; 405 406 QLIST_INSERT_HEAD(&xen_physmap, physmap, list); 407 408 if (runstate_check(RUN_STATE_INMIGRATE)) { 409 /* Now when we have a physmap entry we can replace a dummy mapping with 410 * a real one of guest foreign memory. */ 411 uint8_t *p = xen_replace_cache_entry(phys_offset, start_addr, size); 412 assert(p && p == memory_region_get_ram_ptr(mr)); 413 414 return 0; 415 } 416 417 pfn = phys_offset >> TARGET_PAGE_BITS; 418 start_gpfn = start_addr >> TARGET_PAGE_BITS; 419 nr_pages = size >> TARGET_PAGE_BITS; 420 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, nr_pages, pfn, 421 start_gpfn); 422 if (rc) { 423 int saved_errno = errno; 424 425 error_report("relocate_memory %lu pages from GFN %"HWADDR_PRIx 426 " to GFN %"HWADDR_PRIx" failed: %s", 427 nr_pages, pfn, start_gpfn, strerror(saved_errno)); 428 errno = saved_errno; 429 return -1; 430 } 431 432 rc = xendevicemodel_pin_memory_cacheattr(xen_dmod, xen_domid, 433 start_addr >> TARGET_PAGE_BITS, 434 (start_addr + size - 1) >> TARGET_PAGE_BITS, 435 XEN_DOMCTL_MEM_CACHEATTR_WB); 436 if (rc) { 437 error_report("pin_memory_cacheattr failed: %s", strerror(errno)); 438 } 439 return xen_save_physmap(state, physmap); 440 } 441 442 static int xen_remove_from_physmap(XenIOState *state, 443 hwaddr start_addr, 444 ram_addr_t size) 445 { 446 int rc = 0; 447 XenPhysmap *physmap = NULL; 448 hwaddr phys_offset = 0; 449 450 physmap = get_physmapping(start_addr, size); 451 if (physmap == NULL) { 452 return -1; 453 } 454 455 phys_offset = physmap->phys_offset; 456 size = physmap->size; 457 458 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " 459 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); 460 461 size >>= TARGET_PAGE_BITS; 462 start_addr >>= TARGET_PAGE_BITS; 463 phys_offset >>= TARGET_PAGE_BITS; 464 rc = xendevicemodel_relocate_memory(xen_dmod, xen_domid, size, start_addr, 465 phys_offset); 466 if (rc) { 467 int saved_errno = errno; 468 469 error_report("relocate_memory "RAM_ADDR_FMT" pages" 470 " from GFN %"HWADDR_PRIx 471 " to GFN %"HWADDR_PRIx" failed: %s", 472 size, start_addr, phys_offset, strerror(saved_errno)); 473 errno = saved_errno; 474 return -1; 475 } 476 477 QLIST_REMOVE(physmap, list); 478 if (state->log_for_dirtybit == physmap) { 479 state->log_for_dirtybit = NULL; 480 g_free(state->dirty_bitmap); 481 state->dirty_bitmap = NULL; 482 } 483 g_free(physmap); 484 485 return 0; 486 } 487 488 static void xen_set_memory(struct MemoryListener *listener, 489 MemoryRegionSection *section, 490 bool add) 491 { 492 XenIOState *state = container_of(listener, XenIOState, memory_listener); 493 hwaddr start_addr = section->offset_within_address_space; 494 ram_addr_t size = int128_get64(section->size); 495 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); 496 hvmmem_type_t mem_type; 497 498 if (section->mr == &ram_memory) { 499 return; 500 } else { 501 if (add) { 502 xen_map_memory_section(xen_domid, state->ioservid, 503 section); 504 } else { 505 xen_unmap_memory_section(xen_domid, state->ioservid, 506 section); 507 } 508 } 509 510 if (!memory_region_is_ram(section->mr)) { 511 return; 512 } 513 514 if (log_dirty != add) { 515 return; 516 } 517 518 trace_xen_client_set_memory(start_addr, size, log_dirty); 519 520 start_addr &= TARGET_PAGE_MASK; 521 size = TARGET_PAGE_ALIGN(size); 522 523 if (add) { 524 if (!memory_region_is_rom(section->mr)) { 525 xen_add_to_physmap(state, start_addr, size, 526 section->mr, section->offset_within_region); 527 } else { 528 mem_type = HVMMEM_ram_ro; 529 if (xen_set_mem_type(xen_domid, mem_type, 530 start_addr >> TARGET_PAGE_BITS, 531 size >> TARGET_PAGE_BITS)) { 532 DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", 533 start_addr); 534 } 535 } 536 } else { 537 if (xen_remove_from_physmap(state, start_addr, size) < 0) { 538 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); 539 } 540 } 541 } 542 543 static void xen_region_add(MemoryListener *listener, 544 MemoryRegionSection *section) 545 { 546 memory_region_ref(section->mr); 547 xen_set_memory(listener, section, true); 548 } 549 550 static void xen_region_del(MemoryListener *listener, 551 MemoryRegionSection *section) 552 { 553 xen_set_memory(listener, section, false); 554 memory_region_unref(section->mr); 555 } 556 557 static void xen_io_add(MemoryListener *listener, 558 MemoryRegionSection *section) 559 { 560 XenIOState *state = container_of(listener, XenIOState, io_listener); 561 MemoryRegion *mr = section->mr; 562 563 if (mr->ops == &unassigned_io_ops) { 564 return; 565 } 566 567 memory_region_ref(mr); 568 569 xen_map_io_section(xen_domid, state->ioservid, section); 570 } 571 572 static void xen_io_del(MemoryListener *listener, 573 MemoryRegionSection *section) 574 { 575 XenIOState *state = container_of(listener, XenIOState, io_listener); 576 MemoryRegion *mr = section->mr; 577 578 if (mr->ops == &unassigned_io_ops) { 579 return; 580 } 581 582 xen_unmap_io_section(xen_domid, state->ioservid, section); 583 584 memory_region_unref(mr); 585 } 586 587 static void xen_device_realize(DeviceListener *listener, 588 DeviceState *dev) 589 { 590 XenIOState *state = container_of(listener, XenIOState, device_listener); 591 592 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 593 PCIDevice *pci_dev = PCI_DEVICE(dev); 594 XenPciDevice *xendev = g_new(XenPciDevice, 1); 595 596 xendev->pci_dev = pci_dev; 597 xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), 598 pci_dev->devfn); 599 QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); 600 601 xen_map_pcidev(xen_domid, state->ioservid, pci_dev); 602 } 603 } 604 605 static void xen_device_unrealize(DeviceListener *listener, 606 DeviceState *dev) 607 { 608 XenIOState *state = container_of(listener, XenIOState, device_listener); 609 610 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 611 PCIDevice *pci_dev = PCI_DEVICE(dev); 612 XenPciDevice *xendev, *next; 613 614 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); 615 616 QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { 617 if (xendev->pci_dev == pci_dev) { 618 QLIST_REMOVE(xendev, entry); 619 g_free(xendev); 620 break; 621 } 622 } 623 } 624 } 625 626 static void xen_sync_dirty_bitmap(XenIOState *state, 627 hwaddr start_addr, 628 ram_addr_t size) 629 { 630 hwaddr npages = size >> TARGET_PAGE_BITS; 631 const int width = sizeof(unsigned long) * 8; 632 size_t bitmap_size = DIV_ROUND_UP(npages, width); 633 int rc, i, j; 634 const XenPhysmap *physmap = NULL; 635 636 physmap = get_physmapping(start_addr, size); 637 if (physmap == NULL) { 638 /* not handled */ 639 return; 640 } 641 642 if (state->log_for_dirtybit == NULL) { 643 state->log_for_dirtybit = physmap; 644 state->dirty_bitmap = g_new(unsigned long, bitmap_size); 645 } else if (state->log_for_dirtybit != physmap) { 646 /* Only one range for dirty bitmap can be tracked. */ 647 return; 648 } 649 650 rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, 651 npages, state->dirty_bitmap); 652 if (rc < 0) { 653 #ifndef ENODATA 654 #define ENODATA ENOENT 655 #endif 656 if (errno == ENODATA) { 657 memory_region_set_dirty(framebuffer, 0, size); 658 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx 659 ", 0x" TARGET_FMT_plx "): %s\n", 660 start_addr, start_addr + size, strerror(errno)); 661 } 662 return; 663 } 664 665 for (i = 0; i < bitmap_size; i++) { 666 unsigned long map = state->dirty_bitmap[i]; 667 while (map != 0) { 668 j = ctzl(map); 669 map &= ~(1ul << j); 670 memory_region_set_dirty(framebuffer, 671 (i * width + j) * TARGET_PAGE_SIZE, 672 TARGET_PAGE_SIZE); 673 }; 674 } 675 } 676 677 static void xen_log_start(MemoryListener *listener, 678 MemoryRegionSection *section, 679 int old, int new) 680 { 681 XenIOState *state = container_of(listener, XenIOState, memory_listener); 682 683 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { 684 xen_sync_dirty_bitmap(state, section->offset_within_address_space, 685 int128_get64(section->size)); 686 } 687 } 688 689 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, 690 int old, int new) 691 { 692 XenIOState *state = container_of(listener, XenIOState, memory_listener); 693 694 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { 695 state->log_for_dirtybit = NULL; 696 g_free(state->dirty_bitmap); 697 state->dirty_bitmap = NULL; 698 /* Disable dirty bit tracking */ 699 xen_track_dirty_vram(xen_domid, 0, 0, NULL); 700 } 701 } 702 703 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) 704 { 705 XenIOState *state = container_of(listener, XenIOState, memory_listener); 706 707 xen_sync_dirty_bitmap(state, section->offset_within_address_space, 708 int128_get64(section->size)); 709 } 710 711 static void xen_log_global_start(MemoryListener *listener) 712 { 713 if (xen_enabled()) { 714 xen_in_migration = true; 715 } 716 } 717 718 static void xen_log_global_stop(MemoryListener *listener) 719 { 720 xen_in_migration = false; 721 } 722 723 static MemoryListener xen_memory_listener = { 724 .region_add = xen_region_add, 725 .region_del = xen_region_del, 726 .log_start = xen_log_start, 727 .log_stop = xen_log_stop, 728 .log_sync = xen_log_sync, 729 .log_global_start = xen_log_global_start, 730 .log_global_stop = xen_log_global_stop, 731 .priority = 10, 732 }; 733 734 static MemoryListener xen_io_listener = { 735 .region_add = xen_io_add, 736 .region_del = xen_io_del, 737 .priority = 10, 738 }; 739 740 static DeviceListener xen_device_listener = { 741 .realize = xen_device_realize, 742 .unrealize = xen_device_unrealize, 743 }; 744 745 /* get the ioreq packets from share mem */ 746 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) 747 { 748 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); 749 750 if (req->state != STATE_IOREQ_READY) { 751 DPRINTF("I/O request not ready: " 752 "%x, ptr: %x, port: %"PRIx64", " 753 "data: %"PRIx64", count: %u, size: %u\n", 754 req->state, req->data_is_ptr, req->addr, 755 req->data, req->count, req->size); 756 return NULL; 757 } 758 759 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ 760 761 req->state = STATE_IOREQ_INPROCESS; 762 return req; 763 } 764 765 /* use poll to get the port notification */ 766 /* ioreq_vec--out,the */ 767 /* retval--the number of ioreq packet */ 768 static ioreq_t *cpu_get_ioreq(XenIOState *state) 769 { 770 MachineState *ms = MACHINE(qdev_get_machine()); 771 unsigned int max_cpus = ms->smp.max_cpus; 772 int i; 773 evtchn_port_t port; 774 775 port = xenevtchn_pending(state->xce_handle); 776 if (port == state->bufioreq_local_port) { 777 timer_mod(state->buffered_io_timer, 778 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 779 return NULL; 780 } 781 782 if (port != -1) { 783 for (i = 0; i < max_cpus; i++) { 784 if (state->ioreq_local_port[i] == port) { 785 break; 786 } 787 } 788 789 if (i == max_cpus) { 790 hw_error("Fatal error while trying to get io event!\n"); 791 } 792 793 /* unmask the wanted port again */ 794 xenevtchn_unmask(state->xce_handle, port); 795 796 /* get the io packet from shared memory */ 797 state->send_vcpu = i; 798 return cpu_get_ioreq_from_shared_memory(state, i); 799 } 800 801 /* read error or read nothing */ 802 return NULL; 803 } 804 805 static uint32_t do_inp(uint32_t addr, unsigned long size) 806 { 807 switch (size) { 808 case 1: 809 return cpu_inb(addr); 810 case 2: 811 return cpu_inw(addr); 812 case 4: 813 return cpu_inl(addr); 814 default: 815 hw_error("inp: bad size: %04x %lx", addr, size); 816 } 817 } 818 819 static void do_outp(uint32_t addr, 820 unsigned long size, uint32_t val) 821 { 822 switch (size) { 823 case 1: 824 return cpu_outb(addr, val); 825 case 2: 826 return cpu_outw(addr, val); 827 case 4: 828 return cpu_outl(addr, val); 829 default: 830 hw_error("outp: bad size: %04x %lx", addr, size); 831 } 832 } 833 834 /* 835 * Helper functions which read/write an object from/to physical guest 836 * memory, as part of the implementation of an ioreq. 837 * 838 * Equivalent to 839 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, 840 * val, req->size, 0/1) 841 * except without the integer overflow problems. 842 */ 843 static void rw_phys_req_item(hwaddr addr, 844 ioreq_t *req, uint32_t i, void *val, int rw) 845 { 846 /* Do everything unsigned so overflow just results in a truncated result 847 * and accesses to undesired parts of guest memory, which is up 848 * to the guest */ 849 hwaddr offset = (hwaddr)req->size * i; 850 if (req->df) { 851 addr -= offset; 852 } else { 853 addr += offset; 854 } 855 cpu_physical_memory_rw(addr, val, req->size, rw); 856 } 857 858 static inline void read_phys_req_item(hwaddr addr, 859 ioreq_t *req, uint32_t i, void *val) 860 { 861 rw_phys_req_item(addr, req, i, val, 0); 862 } 863 static inline void write_phys_req_item(hwaddr addr, 864 ioreq_t *req, uint32_t i, void *val) 865 { 866 rw_phys_req_item(addr, req, i, val, 1); 867 } 868 869 870 static void cpu_ioreq_pio(ioreq_t *req) 871 { 872 uint32_t i; 873 874 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, 875 req->data, req->count, req->size); 876 877 if (req->size > sizeof(uint32_t)) { 878 hw_error("PIO: bad size (%u)", req->size); 879 } 880 881 if (req->dir == IOREQ_READ) { 882 if (!req->data_is_ptr) { 883 req->data = do_inp(req->addr, req->size); 884 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, 885 req->size); 886 } else { 887 uint32_t tmp; 888 889 for (i = 0; i < req->count; i++) { 890 tmp = do_inp(req->addr, req->size); 891 write_phys_req_item(req->data, req, i, &tmp); 892 } 893 } 894 } else if (req->dir == IOREQ_WRITE) { 895 if (!req->data_is_ptr) { 896 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, 897 req->size); 898 do_outp(req->addr, req->size, req->data); 899 } else { 900 for (i = 0; i < req->count; i++) { 901 uint32_t tmp = 0; 902 903 read_phys_req_item(req->data, req, i, &tmp); 904 do_outp(req->addr, req->size, tmp); 905 } 906 } 907 } 908 } 909 910 static void cpu_ioreq_move(ioreq_t *req) 911 { 912 uint32_t i; 913 914 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, 915 req->data, req->count, req->size); 916 917 if (req->size > sizeof(req->data)) { 918 hw_error("MMIO: bad size (%u)", req->size); 919 } 920 921 if (!req->data_is_ptr) { 922 if (req->dir == IOREQ_READ) { 923 for (i = 0; i < req->count; i++) { 924 read_phys_req_item(req->addr, req, i, &req->data); 925 } 926 } else if (req->dir == IOREQ_WRITE) { 927 for (i = 0; i < req->count; i++) { 928 write_phys_req_item(req->addr, req, i, &req->data); 929 } 930 } 931 } else { 932 uint64_t tmp; 933 934 if (req->dir == IOREQ_READ) { 935 for (i = 0; i < req->count; i++) { 936 read_phys_req_item(req->addr, req, i, &tmp); 937 write_phys_req_item(req->data, req, i, &tmp); 938 } 939 } else if (req->dir == IOREQ_WRITE) { 940 for (i = 0; i < req->count; i++) { 941 read_phys_req_item(req->data, req, i, &tmp); 942 write_phys_req_item(req->addr, req, i, &tmp); 943 } 944 } 945 } 946 } 947 948 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) 949 { 950 uint32_t sbdf = req->addr >> 32; 951 uint32_t reg = req->addr; 952 XenPciDevice *xendev; 953 954 if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && 955 req->size != sizeof(uint32_t)) { 956 hw_error("PCI config access: bad size (%u)", req->size); 957 } 958 959 if (req->count != 1) { 960 hw_error("PCI config access: bad count (%u)", req->count); 961 } 962 963 QLIST_FOREACH(xendev, &state->dev_list, entry) { 964 if (xendev->sbdf != sbdf) { 965 continue; 966 } 967 968 if (!req->data_is_ptr) { 969 if (req->dir == IOREQ_READ) { 970 req->data = pci_host_config_read_common( 971 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 972 req->size); 973 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, 974 req->size, req->data); 975 } else if (req->dir == IOREQ_WRITE) { 976 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, 977 req->size, req->data); 978 pci_host_config_write_common( 979 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 980 req->data, req->size); 981 } 982 } else { 983 uint32_t tmp; 984 985 if (req->dir == IOREQ_READ) { 986 tmp = pci_host_config_read_common( 987 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 988 req->size); 989 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, 990 req->size, tmp); 991 write_phys_req_item(req->data, req, 0, &tmp); 992 } else if (req->dir == IOREQ_WRITE) { 993 read_phys_req_item(req->data, req, 0, &tmp); 994 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, 995 req->size, tmp); 996 pci_host_config_write_common( 997 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 998 tmp, req->size); 999 } 1000 } 1001 } 1002 } 1003 1004 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) 1005 { 1006 X86CPU *cpu; 1007 CPUX86State *env; 1008 1009 cpu = X86_CPU(current_cpu); 1010 env = &cpu->env; 1011 env->regs[R_EAX] = req->data; 1012 env->regs[R_EBX] = vmport_regs->ebx; 1013 env->regs[R_ECX] = vmport_regs->ecx; 1014 env->regs[R_EDX] = vmport_regs->edx; 1015 env->regs[R_ESI] = vmport_regs->esi; 1016 env->regs[R_EDI] = vmport_regs->edi; 1017 } 1018 1019 static void regs_from_cpu(vmware_regs_t *vmport_regs) 1020 { 1021 X86CPU *cpu = X86_CPU(current_cpu); 1022 CPUX86State *env = &cpu->env; 1023 1024 vmport_regs->ebx = env->regs[R_EBX]; 1025 vmport_regs->ecx = env->regs[R_ECX]; 1026 vmport_regs->edx = env->regs[R_EDX]; 1027 vmport_regs->esi = env->regs[R_ESI]; 1028 vmport_regs->edi = env->regs[R_EDI]; 1029 } 1030 1031 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) 1032 { 1033 vmware_regs_t *vmport_regs; 1034 1035 assert(state->shared_vmport_page); 1036 vmport_regs = 1037 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; 1038 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); 1039 1040 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; 1041 regs_to_cpu(vmport_regs, req); 1042 cpu_ioreq_pio(req); 1043 regs_from_cpu(vmport_regs); 1044 current_cpu = NULL; 1045 } 1046 1047 static void handle_ioreq(XenIOState *state, ioreq_t *req) 1048 { 1049 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, 1050 req->addr, req->data, req->count, req->size); 1051 1052 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && 1053 (req->size < sizeof (target_ulong))) { 1054 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; 1055 } 1056 1057 if (req->dir == IOREQ_WRITE) 1058 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, 1059 req->addr, req->data, req->count, req->size); 1060 1061 switch (req->type) { 1062 case IOREQ_TYPE_PIO: 1063 cpu_ioreq_pio(req); 1064 break; 1065 case IOREQ_TYPE_COPY: 1066 cpu_ioreq_move(req); 1067 break; 1068 case IOREQ_TYPE_VMWARE_PORT: 1069 handle_vmport_ioreq(state, req); 1070 break; 1071 case IOREQ_TYPE_TIMEOFFSET: 1072 break; 1073 case IOREQ_TYPE_INVALIDATE: 1074 xen_invalidate_map_cache(); 1075 break; 1076 case IOREQ_TYPE_PCI_CONFIG: 1077 cpu_ioreq_config(state, req); 1078 break; 1079 default: 1080 hw_error("Invalid ioreq type 0x%x\n", req->type); 1081 } 1082 if (req->dir == IOREQ_READ) { 1083 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, 1084 req->addr, req->data, req->count, req->size); 1085 } 1086 } 1087 1088 static int handle_buffered_iopage(XenIOState *state) 1089 { 1090 buffered_iopage_t *buf_page = state->buffered_io_page; 1091 buf_ioreq_t *buf_req = NULL; 1092 ioreq_t req; 1093 int qw; 1094 1095 if (!buf_page) { 1096 return 0; 1097 } 1098 1099 memset(&req, 0x00, sizeof(req)); 1100 req.state = STATE_IOREQ_READY; 1101 req.count = 1; 1102 req.dir = IOREQ_WRITE; 1103 1104 for (;;) { 1105 uint32_t rdptr = buf_page->read_pointer, wrptr; 1106 1107 xen_rmb(); 1108 wrptr = buf_page->write_pointer; 1109 xen_rmb(); 1110 if (rdptr != buf_page->read_pointer) { 1111 continue; 1112 } 1113 if (rdptr == wrptr) { 1114 break; 1115 } 1116 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; 1117 req.size = 1U << buf_req->size; 1118 req.addr = buf_req->addr; 1119 req.data = buf_req->data; 1120 req.type = buf_req->type; 1121 xen_rmb(); 1122 qw = (req.size == 8); 1123 if (qw) { 1124 if (rdptr + 1 == wrptr) { 1125 hw_error("Incomplete quad word buffered ioreq"); 1126 } 1127 buf_req = &buf_page->buf_ioreq[(rdptr + 1) % 1128 IOREQ_BUFFER_SLOT_NUM]; 1129 req.data |= ((uint64_t)buf_req->data) << 32; 1130 xen_rmb(); 1131 } 1132 1133 handle_ioreq(state, &req); 1134 1135 /* Only req.data may get updated by handle_ioreq(), albeit even that 1136 * should not happen as such data would never make it to the guest (we 1137 * can only usefully see writes here after all). 1138 */ 1139 assert(req.state == STATE_IOREQ_READY); 1140 assert(req.count == 1); 1141 assert(req.dir == IOREQ_WRITE); 1142 assert(!req.data_is_ptr); 1143 1144 qatomic_add(&buf_page->read_pointer, qw + 1); 1145 } 1146 1147 return req.count; 1148 } 1149 1150 static void handle_buffered_io(void *opaque) 1151 { 1152 XenIOState *state = opaque; 1153 1154 if (handle_buffered_iopage(state)) { 1155 timer_mod(state->buffered_io_timer, 1156 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 1157 } else { 1158 timer_del(state->buffered_io_timer); 1159 xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); 1160 } 1161 } 1162 1163 static void cpu_handle_ioreq(void *opaque) 1164 { 1165 XenIOState *state = opaque; 1166 ioreq_t *req = cpu_get_ioreq(state); 1167 1168 handle_buffered_iopage(state); 1169 if (req) { 1170 ioreq_t copy = *req; 1171 1172 xen_rmb(); 1173 handle_ioreq(state, ©); 1174 req->data = copy.data; 1175 1176 if (req->state != STATE_IOREQ_INPROCESS) { 1177 fprintf(stderr, "Badness in I/O request ... not in service?!: " 1178 "%x, ptr: %x, port: %"PRIx64", " 1179 "data: %"PRIx64", count: %u, size: %u, type: %u\n", 1180 req->state, req->data_is_ptr, req->addr, 1181 req->data, req->count, req->size, req->type); 1182 destroy_hvm_domain(false); 1183 return; 1184 } 1185 1186 xen_wmb(); /* Update ioreq contents /then/ update state. */ 1187 1188 /* 1189 * We do this before we send the response so that the tools 1190 * have the opportunity to pick up on the reset before the 1191 * guest resumes and does a hlt with interrupts disabled which 1192 * causes Xen to powerdown the domain. 1193 */ 1194 if (runstate_is_running()) { 1195 ShutdownCause request; 1196 1197 if (qemu_shutdown_requested_get()) { 1198 destroy_hvm_domain(false); 1199 } 1200 request = qemu_reset_requested_get(); 1201 if (request) { 1202 qemu_system_reset(request); 1203 destroy_hvm_domain(true); 1204 } 1205 } 1206 1207 req->state = STATE_IORESP_READY; 1208 xenevtchn_notify(state->xce_handle, 1209 state->ioreq_local_port[state->send_vcpu]); 1210 } 1211 } 1212 1213 static void xen_main_loop_prepare(XenIOState *state) 1214 { 1215 int evtchn_fd = -1; 1216 1217 if (state->xce_handle != NULL) { 1218 evtchn_fd = xenevtchn_fd(state->xce_handle); 1219 } 1220 1221 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, 1222 state); 1223 1224 if (evtchn_fd != -1) { 1225 CPUState *cpu_state; 1226 1227 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); 1228 CPU_FOREACH(cpu_state) { 1229 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", 1230 __func__, cpu_state->cpu_index, cpu_state); 1231 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; 1232 } 1233 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); 1234 } 1235 } 1236 1237 1238 static void xen_hvm_change_state_handler(void *opaque, int running, 1239 RunState rstate) 1240 { 1241 XenIOState *state = opaque; 1242 1243 if (running) { 1244 xen_main_loop_prepare(state); 1245 } 1246 1247 xen_set_ioreq_server_state(xen_domid, 1248 state->ioservid, 1249 (rstate == RUN_STATE_RUNNING)); 1250 } 1251 1252 static void xen_exit_notifier(Notifier *n, void *data) 1253 { 1254 XenIOState *state = container_of(n, XenIOState, exit); 1255 1256 xen_destroy_ioreq_server(xen_domid, state->ioservid); 1257 1258 xenevtchn_close(state->xce_handle); 1259 xs_daemon_close(state->xenstore); 1260 } 1261 1262 #ifdef XEN_COMPAT_PHYSMAP 1263 static void xen_read_physmap(XenIOState *state) 1264 { 1265 XenPhysmap *physmap = NULL; 1266 unsigned int len, num, i; 1267 char path[80], *value = NULL; 1268 char **entries = NULL; 1269 1270 snprintf(path, sizeof(path), 1271 "/local/domain/0/device-model/%d/physmap", xen_domid); 1272 entries = xs_directory(state->xenstore, 0, path, &num); 1273 if (entries == NULL) 1274 return; 1275 1276 for (i = 0; i < num; i++) { 1277 physmap = g_malloc(sizeof (XenPhysmap)); 1278 physmap->phys_offset = strtoull(entries[i], NULL, 16); 1279 snprintf(path, sizeof(path), 1280 "/local/domain/0/device-model/%d/physmap/%s/start_addr", 1281 xen_domid, entries[i]); 1282 value = xs_read(state->xenstore, 0, path, &len); 1283 if (value == NULL) { 1284 g_free(physmap); 1285 continue; 1286 } 1287 physmap->start_addr = strtoull(value, NULL, 16); 1288 free(value); 1289 1290 snprintf(path, sizeof(path), 1291 "/local/domain/0/device-model/%d/physmap/%s/size", 1292 xen_domid, entries[i]); 1293 value = xs_read(state->xenstore, 0, path, &len); 1294 if (value == NULL) { 1295 g_free(physmap); 1296 continue; 1297 } 1298 physmap->size = strtoull(value, NULL, 16); 1299 free(value); 1300 1301 snprintf(path, sizeof(path), 1302 "/local/domain/0/device-model/%d/physmap/%s/name", 1303 xen_domid, entries[i]); 1304 physmap->name = xs_read(state->xenstore, 0, path, &len); 1305 1306 QLIST_INSERT_HEAD(&xen_physmap, physmap, list); 1307 } 1308 free(entries); 1309 } 1310 #else 1311 static void xen_read_physmap(XenIOState *state) 1312 { 1313 } 1314 #endif 1315 1316 static void xen_wakeup_notifier(Notifier *notifier, void *data) 1317 { 1318 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); 1319 } 1320 1321 static int xen_map_ioreq_server(XenIOState *state) 1322 { 1323 void *addr = NULL; 1324 xenforeignmemory_resource_handle *fres; 1325 xen_pfn_t ioreq_pfn; 1326 xen_pfn_t bufioreq_pfn; 1327 evtchn_port_t bufioreq_evtchn; 1328 int rc; 1329 1330 /* 1331 * Attempt to map using the resource API and fall back to normal 1332 * foreign mapping if this is not supported. 1333 */ 1334 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); 1335 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); 1336 fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, 1337 XENMEM_resource_ioreq_server, 1338 state->ioservid, 0, 2, 1339 &addr, 1340 PROT_READ | PROT_WRITE, 0); 1341 if (fres != NULL) { 1342 trace_xen_map_resource_ioreq(state->ioservid, addr); 1343 state->buffered_io_page = addr; 1344 state->shared_page = addr + TARGET_PAGE_SIZE; 1345 } else if (errno != EOPNOTSUPP) { 1346 error_report("failed to map ioreq server resources: error %d handle=%p", 1347 errno, xen_xc); 1348 return -1; 1349 } 1350 1351 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, 1352 (state->shared_page == NULL) ? 1353 &ioreq_pfn : NULL, 1354 (state->buffered_io_page == NULL) ? 1355 &bufioreq_pfn : NULL, 1356 &bufioreq_evtchn); 1357 if (rc < 0) { 1358 error_report("failed to get ioreq server info: error %d handle=%p", 1359 errno, xen_xc); 1360 return rc; 1361 } 1362 1363 if (state->shared_page == NULL) { 1364 DPRINTF("shared page at pfn %lx\n", ioreq_pfn); 1365 1366 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, 1367 PROT_READ | PROT_WRITE, 1368 1, &ioreq_pfn, NULL); 1369 if (state->shared_page == NULL) { 1370 error_report("map shared IO page returned error %d handle=%p", 1371 errno, xen_xc); 1372 } 1373 } 1374 1375 if (state->buffered_io_page == NULL) { 1376 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); 1377 1378 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, 1379 PROT_READ | PROT_WRITE, 1380 1, &bufioreq_pfn, 1381 NULL); 1382 if (state->buffered_io_page == NULL) { 1383 error_report("map buffered IO page returned error %d", errno); 1384 return -1; 1385 } 1386 } 1387 1388 if (state->shared_page == NULL || state->buffered_io_page == NULL) { 1389 return -1; 1390 } 1391 1392 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); 1393 1394 state->bufioreq_remote_port = bufioreq_evtchn; 1395 1396 return 0; 1397 } 1398 1399 void xen_hvm_init_pc(PCMachineState *pcms, MemoryRegion **ram_memory) 1400 { 1401 MachineState *ms = MACHINE(pcms); 1402 unsigned int max_cpus = ms->smp.max_cpus; 1403 int i, rc; 1404 xen_pfn_t ioreq_pfn; 1405 XenIOState *state; 1406 1407 state = g_malloc0(sizeof (XenIOState)); 1408 1409 state->xce_handle = xenevtchn_open(NULL, 0); 1410 if (state->xce_handle == NULL) { 1411 perror("xen: event channel open"); 1412 goto err; 1413 } 1414 1415 state->xenstore = xs_daemon_open(); 1416 if (state->xenstore == NULL) { 1417 perror("xen: xenstore open"); 1418 goto err; 1419 } 1420 1421 xen_create_ioreq_server(xen_domid, &state->ioservid); 1422 1423 state->exit.notify = xen_exit_notifier; 1424 qemu_add_exit_notifier(&state->exit); 1425 1426 state->suspend.notify = xen_suspend_notifier; 1427 qemu_register_suspend_notifier(&state->suspend); 1428 1429 state->wakeup.notify = xen_wakeup_notifier; 1430 qemu_register_wakeup_notifier(&state->wakeup); 1431 1432 /* 1433 * Register wake-up support in QMP query-current-machine API 1434 */ 1435 qemu_register_wakeup_support(); 1436 1437 rc = xen_map_ioreq_server(state); 1438 if (rc < 0) { 1439 goto err; 1440 } 1441 1442 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); 1443 if (!rc) { 1444 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); 1445 state->shared_vmport_page = 1446 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, 1447 1, &ioreq_pfn, NULL); 1448 if (state->shared_vmport_page == NULL) { 1449 error_report("map shared vmport IO page returned error %d handle=%p", 1450 errno, xen_xc); 1451 goto err; 1452 } 1453 } else if (rc != -ENOSYS) { 1454 error_report("get vmport regs pfn returned error %d, rc=%d", 1455 errno, rc); 1456 goto err; 1457 } 1458 1459 /* Note: cpus is empty at this point in init */ 1460 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); 1461 1462 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); 1463 if (rc < 0) { 1464 error_report("failed to enable ioreq server info: error %d handle=%p", 1465 errno, xen_xc); 1466 goto err; 1467 } 1468 1469 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); 1470 1471 /* FIXME: how about if we overflow the page here? */ 1472 for (i = 0; i < max_cpus; i++) { 1473 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1474 xen_vcpu_eport(state->shared_page, i)); 1475 if (rc == -1) { 1476 error_report("shared evtchn %d bind error %d", i, errno); 1477 goto err; 1478 } 1479 state->ioreq_local_port[i] = rc; 1480 } 1481 1482 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1483 state->bufioreq_remote_port); 1484 if (rc == -1) { 1485 error_report("buffered evtchn bind error %d", errno); 1486 goto err; 1487 } 1488 state->bufioreq_local_port = rc; 1489 1490 /* Init RAM management */ 1491 #ifdef XEN_COMPAT_PHYSMAP 1492 xen_map_cache_init(xen_phys_offset_to_gaddr, state); 1493 #else 1494 xen_map_cache_init(NULL, state); 1495 #endif 1496 xen_ram_init(pcms, ms->ram_size, ram_memory); 1497 1498 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); 1499 1500 state->memory_listener = xen_memory_listener; 1501 memory_listener_register(&state->memory_listener, &address_space_memory); 1502 state->log_for_dirtybit = NULL; 1503 1504 state->io_listener = xen_io_listener; 1505 memory_listener_register(&state->io_listener, &address_space_io); 1506 1507 state->device_listener = xen_device_listener; 1508 QLIST_INIT(&state->dev_list); 1509 device_listener_register(&state->device_listener); 1510 1511 xen_bus_init(); 1512 1513 /* Initialize backend core & drivers */ 1514 if (xen_be_init() != 0) { 1515 error_report("xen backend core setup failed"); 1516 goto err; 1517 } 1518 xen_be_register_common(); 1519 1520 QLIST_INIT(&xen_physmap); 1521 xen_read_physmap(state); 1522 1523 /* Disable ACPI build because Xen handles it */ 1524 pcms->acpi_build_enabled = false; 1525 1526 return; 1527 1528 err: 1529 error_report("xen hardware virtual machine initialisation failed"); 1530 exit(1); 1531 } 1532 1533 void destroy_hvm_domain(bool reboot) 1534 { 1535 xc_interface *xc_handle; 1536 int sts; 1537 int rc; 1538 1539 unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; 1540 1541 if (xen_dmod) { 1542 rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); 1543 if (!rc) { 1544 return; 1545 } 1546 if (errno != ENOTTY /* old Xen */) { 1547 perror("xendevicemodel_shutdown failed"); 1548 } 1549 /* well, try the old thing then */ 1550 } 1551 1552 xc_handle = xc_interface_open(0, 0, 0); 1553 if (xc_handle == NULL) { 1554 fprintf(stderr, "Cannot acquire xenctrl handle\n"); 1555 } else { 1556 sts = xc_domain_shutdown(xc_handle, xen_domid, reason); 1557 if (sts != 0) { 1558 fprintf(stderr, "xc_domain_shutdown failed to issue %s, " 1559 "sts %d, %s\n", reboot ? "reboot" : "poweroff", 1560 sts, strerror(errno)); 1561 } else { 1562 fprintf(stderr, "Issued domain %d %s\n", xen_domid, 1563 reboot ? "reboot" : "poweroff"); 1564 } 1565 xc_interface_close(xc_handle); 1566 } 1567 } 1568 1569 void xen_register_framebuffer(MemoryRegion *mr) 1570 { 1571 framebuffer = mr; 1572 } 1573 1574 void xen_shutdown_fatal_error(const char *fmt, ...) 1575 { 1576 va_list ap; 1577 1578 va_start(ap, fmt); 1579 vfprintf(stderr, fmt, ap); 1580 va_end(ap); 1581 fprintf(stderr, "Will destroy the domain.\n"); 1582 /* destroy the domain */ 1583 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); 1584 } 1585 1586 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) 1587 { 1588 if (unlikely(xen_in_migration)) { 1589 int rc; 1590 ram_addr_t start_pfn, nb_pages; 1591 1592 start = xen_phys_offset_to_gaddr(start, length); 1593 1594 if (length == 0) { 1595 length = TARGET_PAGE_SIZE; 1596 } 1597 start_pfn = start >> TARGET_PAGE_BITS; 1598 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) 1599 - start_pfn; 1600 rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); 1601 if (rc) { 1602 fprintf(stderr, 1603 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", 1604 __func__, start, nb_pages, errno, strerror(errno)); 1605 } 1606 } 1607 } 1608 1609 void qmp_xen_set_global_dirty_log(bool enable, Error **errp) 1610 { 1611 if (enable) { 1612 memory_global_dirty_log_start(); 1613 } else { 1614 memory_global_dirty_log_stop(); 1615 } 1616 } 1617