1 /* 2 * Copyright (C) 2010 Citrix Ltd. 3 * 4 * This work is licensed under the terms of the GNU GPL, version 2. See 5 * the COPYING file in the top-level directory. 6 * 7 * Contributions after 2012-01-13 are licensed under the terms of the 8 * GNU GPL, version 2 or (at your option) any later version. 9 */ 10 11 #include "qemu/osdep.h" 12 13 #include "cpu.h" 14 #include "hw/pci/pci.h" 15 #include "hw/i386/pc.h" 16 #include "hw/i386/apic-msidef.h" 17 #include "hw/xen/xen_common.h" 18 #include "hw/xen/xen_backend.h" 19 #include "qmp-commands.h" 20 21 #include "qemu/error-report.h" 22 #include "qemu/range.h" 23 #include "sysemu/xen-mapcache.h" 24 #include "trace.h" 25 #include "exec/address-spaces.h" 26 27 #include <xen/hvm/ioreq.h> 28 #include <xen/hvm/params.h> 29 #include <xen/hvm/e820.h> 30 31 //#define DEBUG_XEN_HVM 32 33 #ifdef DEBUG_XEN_HVM 34 #define DPRINTF(fmt, ...) \ 35 do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) 36 #else 37 #define DPRINTF(fmt, ...) \ 38 do { } while (0) 39 #endif 40 41 static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi; 42 static MemoryRegion *framebuffer; 43 static bool xen_in_migration; 44 45 /* Compatibility with older version */ 46 47 /* This allows QEMU to build on a system that has Xen 4.5 or earlier 48 * installed. This here (not in hw/xen/xen_common.h) because xen/hvm/ioreq.h 49 * needs to be included before this block and hw/xen/xen_common.h needs to 50 * be included before xen/hvm/ioreq.h 51 */ 52 #ifndef IOREQ_TYPE_VMWARE_PORT 53 #define IOREQ_TYPE_VMWARE_PORT 3 54 struct vmware_regs { 55 uint32_t esi; 56 uint32_t edi; 57 uint32_t ebx; 58 uint32_t ecx; 59 uint32_t edx; 60 }; 61 typedef struct vmware_regs vmware_regs_t; 62 63 struct shared_vmport_iopage { 64 struct vmware_regs vcpu_vmport_regs[1]; 65 }; 66 typedef struct shared_vmport_iopage shared_vmport_iopage_t; 67 #endif 68 69 static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) 70 { 71 return shared_page->vcpu_ioreq[i].vp_eport; 72 } 73 static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) 74 { 75 return &shared_page->vcpu_ioreq[vcpu]; 76 } 77 78 #define BUFFER_IO_MAX_DELAY 100 79 80 typedef struct XenPhysmap { 81 hwaddr start_addr; 82 ram_addr_t size; 83 const char *name; 84 hwaddr phys_offset; 85 86 QLIST_ENTRY(XenPhysmap) list; 87 } XenPhysmap; 88 89 typedef struct XenIOState { 90 ioservid_t ioservid; 91 shared_iopage_t *shared_page; 92 shared_vmport_iopage_t *shared_vmport_page; 93 buffered_iopage_t *buffered_io_page; 94 QEMUTimer *buffered_io_timer; 95 CPUState **cpu_by_vcpu_id; 96 /* the evtchn port for polling the notification, */ 97 evtchn_port_t *ioreq_local_port; 98 /* evtchn local port for buffered io */ 99 evtchn_port_t bufioreq_local_port; 100 /* the evtchn fd for polling */ 101 xenevtchn_handle *xce_handle; 102 /* which vcpu we are serving */ 103 int send_vcpu; 104 105 struct xs_handle *xenstore; 106 MemoryListener memory_listener; 107 MemoryListener io_listener; 108 DeviceListener device_listener; 109 QLIST_HEAD(, XenPhysmap) physmap; 110 hwaddr free_phys_offset; 111 const XenPhysmap *log_for_dirtybit; 112 113 Notifier exit; 114 Notifier suspend; 115 Notifier wakeup; 116 } XenIOState; 117 118 /* Xen specific function for piix pci */ 119 120 int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) 121 { 122 return irq_num + ((pci_dev->devfn >> 3) << 2); 123 } 124 125 void xen_piix3_set_irq(void *opaque, int irq_num, int level) 126 { 127 xen_set_pci_intx_level(xen_domid, 0, 0, irq_num >> 2, 128 irq_num & 3, level); 129 } 130 131 void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) 132 { 133 int i; 134 135 /* Scan for updates to PCI link routes (0x60-0x63). */ 136 for (i = 0; i < len; i++) { 137 uint8_t v = (val >> (8 * i)) & 0xff; 138 if (v & 0x80) { 139 v = 0; 140 } 141 v &= 0xf; 142 if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { 143 xen_set_pci_link_route(xen_domid, address + i - 0x60, v); 144 } 145 } 146 } 147 148 int xen_is_pirq_msi(uint32_t msi_data) 149 { 150 /* If vector is 0, the msi is remapped into a pirq, passed as 151 * dest_id. 152 */ 153 return ((msi_data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT) == 0; 154 } 155 156 void xen_hvm_inject_msi(uint64_t addr, uint32_t data) 157 { 158 xen_inject_msi(xen_domid, addr, data); 159 } 160 161 static void xen_suspend_notifier(Notifier *notifier, void *data) 162 { 163 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3); 164 } 165 166 /* Xen Interrupt Controller */ 167 168 static void xen_set_irq(void *opaque, int irq, int level) 169 { 170 xen_set_isa_irq_level(xen_domid, irq, level); 171 } 172 173 qemu_irq *xen_interrupt_controller_init(void) 174 { 175 return qemu_allocate_irqs(xen_set_irq, NULL, 16); 176 } 177 178 /* Memory Ops */ 179 180 static void xen_ram_init(PCMachineState *pcms, 181 ram_addr_t ram_size, MemoryRegion **ram_memory_p) 182 { 183 MemoryRegion *sysmem = get_system_memory(); 184 ram_addr_t block_len; 185 uint64_t user_lowmem = object_property_get_uint(qdev_get_machine(), 186 PC_MACHINE_MAX_RAM_BELOW_4G, 187 &error_abort); 188 189 /* Handle the machine opt max-ram-below-4g. It is basically doing 190 * min(xen limit, user limit). 191 */ 192 if (!user_lowmem) { 193 user_lowmem = HVM_BELOW_4G_RAM_END; /* default */ 194 } 195 if (HVM_BELOW_4G_RAM_END <= user_lowmem) { 196 user_lowmem = HVM_BELOW_4G_RAM_END; 197 } 198 199 if (ram_size >= user_lowmem) { 200 pcms->above_4g_mem_size = ram_size - user_lowmem; 201 pcms->below_4g_mem_size = user_lowmem; 202 } else { 203 pcms->above_4g_mem_size = 0; 204 pcms->below_4g_mem_size = ram_size; 205 } 206 if (!pcms->above_4g_mem_size) { 207 block_len = ram_size; 208 } else { 209 /* 210 * Xen does not allocate the memory continuously, it keeps a 211 * hole of the size computed above or passed in. 212 */ 213 block_len = (1ULL << 32) + pcms->above_4g_mem_size; 214 } 215 memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len, 216 &error_fatal); 217 *ram_memory_p = &ram_memory; 218 vmstate_register_ram_global(&ram_memory); 219 220 memory_region_init_alias(&ram_640k, NULL, "xen.ram.640k", 221 &ram_memory, 0, 0xa0000); 222 memory_region_add_subregion(sysmem, 0, &ram_640k); 223 /* Skip of the VGA IO memory space, it will be registered later by the VGA 224 * emulated device. 225 * 226 * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load 227 * the Options ROM, so it is registered here as RAM. 228 */ 229 memory_region_init_alias(&ram_lo, NULL, "xen.ram.lo", 230 &ram_memory, 0xc0000, 231 pcms->below_4g_mem_size - 0xc0000); 232 memory_region_add_subregion(sysmem, 0xc0000, &ram_lo); 233 if (pcms->above_4g_mem_size > 0) { 234 memory_region_init_alias(&ram_hi, NULL, "xen.ram.hi", 235 &ram_memory, 0x100000000ULL, 236 pcms->above_4g_mem_size); 237 memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi); 238 } 239 } 240 241 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, 242 Error **errp) 243 { 244 unsigned long nr_pfn; 245 xen_pfn_t *pfn_list; 246 int i; 247 248 if (runstate_check(RUN_STATE_INMIGRATE)) { 249 /* RAM already populated in Xen */ 250 fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT 251 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n", 252 __func__, size, ram_addr); 253 return; 254 } 255 256 if (mr == &ram_memory) { 257 return; 258 } 259 260 trace_xen_ram_alloc(ram_addr, size); 261 262 nr_pfn = size >> TARGET_PAGE_BITS; 263 pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn); 264 265 for (i = 0; i < nr_pfn; i++) { 266 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; 267 } 268 269 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { 270 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, 271 ram_addr); 272 } 273 274 g_free(pfn_list); 275 } 276 277 static XenPhysmap *get_physmapping(XenIOState *state, 278 hwaddr start_addr, ram_addr_t size) 279 { 280 XenPhysmap *physmap = NULL; 281 282 start_addr &= TARGET_PAGE_MASK; 283 284 QLIST_FOREACH(physmap, &state->physmap, list) { 285 if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) { 286 return physmap; 287 } 288 } 289 return NULL; 290 } 291 292 static hwaddr xen_phys_offset_to_gaddr(hwaddr start_addr, 293 ram_addr_t size, void *opaque) 294 { 295 hwaddr addr = start_addr & TARGET_PAGE_MASK; 296 XenIOState *xen_io_state = opaque; 297 XenPhysmap *physmap = NULL; 298 299 QLIST_FOREACH(physmap, &xen_io_state->physmap, list) { 300 if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) { 301 return physmap->start_addr; 302 } 303 } 304 305 return start_addr; 306 } 307 308 static int xen_add_to_physmap(XenIOState *state, 309 hwaddr start_addr, 310 ram_addr_t size, 311 MemoryRegion *mr, 312 hwaddr offset_within_region) 313 { 314 unsigned long i = 0; 315 int rc = 0; 316 XenPhysmap *physmap = NULL; 317 hwaddr pfn, start_gpfn; 318 hwaddr phys_offset = memory_region_get_ram_addr(mr); 319 char path[80], value[17]; 320 const char *mr_name; 321 322 if (get_physmapping(state, start_addr, size)) { 323 return 0; 324 } 325 if (size <= 0) { 326 return -1; 327 } 328 329 /* Xen can only handle a single dirty log region for now and we want 330 * the linear framebuffer to be that region. 331 * Avoid tracking any regions that is not videoram and avoid tracking 332 * the legacy vga region. */ 333 if (mr == framebuffer && start_addr > 0xbffff) { 334 goto go_physmap; 335 } 336 return -1; 337 338 go_physmap: 339 DPRINTF("mapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx"\n", 340 start_addr, start_addr + size); 341 342 pfn = phys_offset >> TARGET_PAGE_BITS; 343 start_gpfn = start_addr >> TARGET_PAGE_BITS; 344 for (i = 0; i < size >> TARGET_PAGE_BITS; i++) { 345 unsigned long idx = pfn + i; 346 xen_pfn_t gpfn = start_gpfn + i; 347 348 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); 349 if (rc) { 350 DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %" 351 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); 352 return -rc; 353 } 354 } 355 356 mr_name = memory_region_name(mr); 357 358 physmap = g_malloc(sizeof (XenPhysmap)); 359 360 physmap->start_addr = start_addr; 361 physmap->size = size; 362 physmap->name = mr_name; 363 physmap->phys_offset = phys_offset; 364 365 QLIST_INSERT_HEAD(&state->physmap, physmap, list); 366 367 xc_domain_pin_memory_cacheattr(xen_xc, xen_domid, 368 start_addr >> TARGET_PAGE_BITS, 369 (start_addr + size - 1) >> TARGET_PAGE_BITS, 370 XEN_DOMCTL_MEM_CACHEATTR_WB); 371 372 snprintf(path, sizeof(path), 373 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr", 374 xen_domid, (uint64_t)phys_offset); 375 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr); 376 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 377 return -1; 378 } 379 snprintf(path, sizeof(path), 380 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size", 381 xen_domid, (uint64_t)phys_offset); 382 snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size); 383 if (!xs_write(state->xenstore, 0, path, value, strlen(value))) { 384 return -1; 385 } 386 if (mr_name) { 387 snprintf(path, sizeof(path), 388 "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name", 389 xen_domid, (uint64_t)phys_offset); 390 if (!xs_write(state->xenstore, 0, path, mr_name, strlen(mr_name))) { 391 return -1; 392 } 393 } 394 395 return 0; 396 } 397 398 static int xen_remove_from_physmap(XenIOState *state, 399 hwaddr start_addr, 400 ram_addr_t size) 401 { 402 unsigned long i = 0; 403 int rc = 0; 404 XenPhysmap *physmap = NULL; 405 hwaddr phys_offset = 0; 406 407 physmap = get_physmapping(state, start_addr, size); 408 if (physmap == NULL) { 409 return -1; 410 } 411 412 phys_offset = physmap->phys_offset; 413 size = physmap->size; 414 415 DPRINTF("unmapping vram to %"HWADDR_PRIx" - %"HWADDR_PRIx", at " 416 "%"HWADDR_PRIx"\n", start_addr, start_addr + size, phys_offset); 417 418 size >>= TARGET_PAGE_BITS; 419 start_addr >>= TARGET_PAGE_BITS; 420 phys_offset >>= TARGET_PAGE_BITS; 421 for (i = 0; i < size; i++) { 422 xen_pfn_t idx = start_addr + i; 423 xen_pfn_t gpfn = phys_offset + i; 424 425 rc = xen_xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn); 426 if (rc) { 427 fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %" 428 PRI_xen_pfn" failed: %d (errno: %d)\n", idx, gpfn, rc, errno); 429 return -rc; 430 } 431 } 432 433 QLIST_REMOVE(physmap, list); 434 if (state->log_for_dirtybit == physmap) { 435 state->log_for_dirtybit = NULL; 436 } 437 g_free(physmap); 438 439 return 0; 440 } 441 442 static void xen_set_memory(struct MemoryListener *listener, 443 MemoryRegionSection *section, 444 bool add) 445 { 446 XenIOState *state = container_of(listener, XenIOState, memory_listener); 447 hwaddr start_addr = section->offset_within_address_space; 448 ram_addr_t size = int128_get64(section->size); 449 bool log_dirty = memory_region_is_logging(section->mr, DIRTY_MEMORY_VGA); 450 hvmmem_type_t mem_type; 451 452 if (section->mr == &ram_memory) { 453 return; 454 } else { 455 if (add) { 456 xen_map_memory_section(xen_domid, state->ioservid, 457 section); 458 } else { 459 xen_unmap_memory_section(xen_domid, state->ioservid, 460 section); 461 } 462 } 463 464 if (!memory_region_is_ram(section->mr)) { 465 return; 466 } 467 468 if (log_dirty != add) { 469 return; 470 } 471 472 trace_xen_client_set_memory(start_addr, size, log_dirty); 473 474 start_addr &= TARGET_PAGE_MASK; 475 size = TARGET_PAGE_ALIGN(size); 476 477 if (add) { 478 if (!memory_region_is_rom(section->mr)) { 479 xen_add_to_physmap(state, start_addr, size, 480 section->mr, section->offset_within_region); 481 } else { 482 mem_type = HVMMEM_ram_ro; 483 if (xen_set_mem_type(xen_domid, mem_type, 484 start_addr >> TARGET_PAGE_BITS, 485 size >> TARGET_PAGE_BITS)) { 486 DPRINTF("xen_set_mem_type error, addr: "TARGET_FMT_plx"\n", 487 start_addr); 488 } 489 } 490 } else { 491 if (xen_remove_from_physmap(state, start_addr, size) < 0) { 492 DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr); 493 } 494 } 495 } 496 497 static void xen_region_add(MemoryListener *listener, 498 MemoryRegionSection *section) 499 { 500 memory_region_ref(section->mr); 501 xen_set_memory(listener, section, true); 502 } 503 504 static void xen_region_del(MemoryListener *listener, 505 MemoryRegionSection *section) 506 { 507 xen_set_memory(listener, section, false); 508 memory_region_unref(section->mr); 509 } 510 511 static void xen_io_add(MemoryListener *listener, 512 MemoryRegionSection *section) 513 { 514 XenIOState *state = container_of(listener, XenIOState, io_listener); 515 MemoryRegion *mr = section->mr; 516 517 if (mr->ops == &unassigned_io_ops) { 518 return; 519 } 520 521 memory_region_ref(mr); 522 523 xen_map_io_section(xen_domid, state->ioservid, section); 524 } 525 526 static void xen_io_del(MemoryListener *listener, 527 MemoryRegionSection *section) 528 { 529 XenIOState *state = container_of(listener, XenIOState, io_listener); 530 MemoryRegion *mr = section->mr; 531 532 if (mr->ops == &unassigned_io_ops) { 533 return; 534 } 535 536 xen_unmap_io_section(xen_domid, state->ioservid, section); 537 538 memory_region_unref(mr); 539 } 540 541 static void xen_device_realize(DeviceListener *listener, 542 DeviceState *dev) 543 { 544 XenIOState *state = container_of(listener, XenIOState, device_listener); 545 546 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 547 PCIDevice *pci_dev = PCI_DEVICE(dev); 548 549 xen_map_pcidev(xen_domid, state->ioservid, pci_dev); 550 } 551 } 552 553 static void xen_device_unrealize(DeviceListener *listener, 554 DeviceState *dev) 555 { 556 XenIOState *state = container_of(listener, XenIOState, device_listener); 557 558 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 559 PCIDevice *pci_dev = PCI_DEVICE(dev); 560 561 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); 562 } 563 } 564 565 static void xen_sync_dirty_bitmap(XenIOState *state, 566 hwaddr start_addr, 567 ram_addr_t size) 568 { 569 hwaddr npages = size >> TARGET_PAGE_BITS; 570 const int width = sizeof(unsigned long) * 8; 571 unsigned long bitmap[DIV_ROUND_UP(npages, width)]; 572 int rc, i, j; 573 const XenPhysmap *physmap = NULL; 574 575 physmap = get_physmapping(state, start_addr, size); 576 if (physmap == NULL) { 577 /* not handled */ 578 return; 579 } 580 581 if (state->log_for_dirtybit == NULL) { 582 state->log_for_dirtybit = physmap; 583 } else if (state->log_for_dirtybit != physmap) { 584 /* Only one range for dirty bitmap can be tracked. */ 585 return; 586 } 587 588 rc = xen_track_dirty_vram(xen_domid, start_addr >> TARGET_PAGE_BITS, 589 npages, bitmap); 590 if (rc < 0) { 591 #ifndef ENODATA 592 #define ENODATA ENOENT 593 #endif 594 if (errno == ENODATA) { 595 memory_region_set_dirty(framebuffer, 0, size); 596 DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx 597 ", 0x" TARGET_FMT_plx "): %s\n", 598 start_addr, start_addr + size, strerror(errno)); 599 } 600 return; 601 } 602 603 for (i = 0; i < ARRAY_SIZE(bitmap); i++) { 604 unsigned long map = bitmap[i]; 605 while (map != 0) { 606 j = ctzl(map); 607 map &= ~(1ul << j); 608 memory_region_set_dirty(framebuffer, 609 (i * width + j) * TARGET_PAGE_SIZE, 610 TARGET_PAGE_SIZE); 611 }; 612 } 613 } 614 615 static void xen_log_start(MemoryListener *listener, 616 MemoryRegionSection *section, 617 int old, int new) 618 { 619 XenIOState *state = container_of(listener, XenIOState, memory_listener); 620 621 if (new & ~old & (1 << DIRTY_MEMORY_VGA)) { 622 xen_sync_dirty_bitmap(state, section->offset_within_address_space, 623 int128_get64(section->size)); 624 } 625 } 626 627 static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section, 628 int old, int new) 629 { 630 XenIOState *state = container_of(listener, XenIOState, memory_listener); 631 632 if (old & ~new & (1 << DIRTY_MEMORY_VGA)) { 633 state->log_for_dirtybit = NULL; 634 /* Disable dirty bit tracking */ 635 xen_track_dirty_vram(xen_domid, 0, 0, NULL); 636 } 637 } 638 639 static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section) 640 { 641 XenIOState *state = container_of(listener, XenIOState, memory_listener); 642 643 xen_sync_dirty_bitmap(state, section->offset_within_address_space, 644 int128_get64(section->size)); 645 } 646 647 static void xen_log_global_start(MemoryListener *listener) 648 { 649 if (xen_enabled()) { 650 xen_in_migration = true; 651 } 652 } 653 654 static void xen_log_global_stop(MemoryListener *listener) 655 { 656 xen_in_migration = false; 657 } 658 659 static MemoryListener xen_memory_listener = { 660 .region_add = xen_region_add, 661 .region_del = xen_region_del, 662 .log_start = xen_log_start, 663 .log_stop = xen_log_stop, 664 .log_sync = xen_log_sync, 665 .log_global_start = xen_log_global_start, 666 .log_global_stop = xen_log_global_stop, 667 .priority = 10, 668 }; 669 670 static MemoryListener xen_io_listener = { 671 .region_add = xen_io_add, 672 .region_del = xen_io_del, 673 .priority = 10, 674 }; 675 676 static DeviceListener xen_device_listener = { 677 .realize = xen_device_realize, 678 .unrealize = xen_device_unrealize, 679 }; 680 681 /* get the ioreq packets from share mem */ 682 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) 683 { 684 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); 685 686 if (req->state != STATE_IOREQ_READY) { 687 DPRINTF("I/O request not ready: " 688 "%x, ptr: %x, port: %"PRIx64", " 689 "data: %"PRIx64", count: %u, size: %u\n", 690 req->state, req->data_is_ptr, req->addr, 691 req->data, req->count, req->size); 692 return NULL; 693 } 694 695 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ 696 697 req->state = STATE_IOREQ_INPROCESS; 698 return req; 699 } 700 701 /* use poll to get the port notification */ 702 /* ioreq_vec--out,the */ 703 /* retval--the number of ioreq packet */ 704 static ioreq_t *cpu_get_ioreq(XenIOState *state) 705 { 706 int i; 707 evtchn_port_t port; 708 709 port = xenevtchn_pending(state->xce_handle); 710 if (port == state->bufioreq_local_port) { 711 timer_mod(state->buffered_io_timer, 712 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 713 return NULL; 714 } 715 716 if (port != -1) { 717 for (i = 0; i < max_cpus; i++) { 718 if (state->ioreq_local_port[i] == port) { 719 break; 720 } 721 } 722 723 if (i == max_cpus) { 724 hw_error("Fatal error while trying to get io event!\n"); 725 } 726 727 /* unmask the wanted port again */ 728 xenevtchn_unmask(state->xce_handle, port); 729 730 /* get the io packet from shared memory */ 731 state->send_vcpu = i; 732 return cpu_get_ioreq_from_shared_memory(state, i); 733 } 734 735 /* read error or read nothing */ 736 return NULL; 737 } 738 739 static uint32_t do_inp(uint32_t addr, unsigned long size) 740 { 741 switch (size) { 742 case 1: 743 return cpu_inb(addr); 744 case 2: 745 return cpu_inw(addr); 746 case 4: 747 return cpu_inl(addr); 748 default: 749 hw_error("inp: bad size: %04x %lx", addr, size); 750 } 751 } 752 753 static void do_outp(uint32_t addr, 754 unsigned long size, uint32_t val) 755 { 756 switch (size) { 757 case 1: 758 return cpu_outb(addr, val); 759 case 2: 760 return cpu_outw(addr, val); 761 case 4: 762 return cpu_outl(addr, val); 763 default: 764 hw_error("outp: bad size: %04x %lx", addr, size); 765 } 766 } 767 768 /* 769 * Helper functions which read/write an object from/to physical guest 770 * memory, as part of the implementation of an ioreq. 771 * 772 * Equivalent to 773 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, 774 * val, req->size, 0/1) 775 * except without the integer overflow problems. 776 */ 777 static void rw_phys_req_item(hwaddr addr, 778 ioreq_t *req, uint32_t i, void *val, int rw) 779 { 780 /* Do everything unsigned so overflow just results in a truncated result 781 * and accesses to undesired parts of guest memory, which is up 782 * to the guest */ 783 hwaddr offset = (hwaddr)req->size * i; 784 if (req->df) { 785 addr -= offset; 786 } else { 787 addr += offset; 788 } 789 cpu_physical_memory_rw(addr, val, req->size, rw); 790 } 791 792 static inline void read_phys_req_item(hwaddr addr, 793 ioreq_t *req, uint32_t i, void *val) 794 { 795 rw_phys_req_item(addr, req, i, val, 0); 796 } 797 static inline void write_phys_req_item(hwaddr addr, 798 ioreq_t *req, uint32_t i, void *val) 799 { 800 rw_phys_req_item(addr, req, i, val, 1); 801 } 802 803 804 static void cpu_ioreq_pio(ioreq_t *req) 805 { 806 uint32_t i; 807 808 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, 809 req->data, req->count, req->size); 810 811 if (req->size > sizeof(uint32_t)) { 812 hw_error("PIO: bad size (%u)", req->size); 813 } 814 815 if (req->dir == IOREQ_READ) { 816 if (!req->data_is_ptr) { 817 req->data = do_inp(req->addr, req->size); 818 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, 819 req->size); 820 } else { 821 uint32_t tmp; 822 823 for (i = 0; i < req->count; i++) { 824 tmp = do_inp(req->addr, req->size); 825 write_phys_req_item(req->data, req, i, &tmp); 826 } 827 } 828 } else if (req->dir == IOREQ_WRITE) { 829 if (!req->data_is_ptr) { 830 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, 831 req->size); 832 do_outp(req->addr, req->size, req->data); 833 } else { 834 for (i = 0; i < req->count; i++) { 835 uint32_t tmp = 0; 836 837 read_phys_req_item(req->data, req, i, &tmp); 838 do_outp(req->addr, req->size, tmp); 839 } 840 } 841 } 842 } 843 844 static void cpu_ioreq_move(ioreq_t *req) 845 { 846 uint32_t i; 847 848 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, 849 req->data, req->count, req->size); 850 851 if (req->size > sizeof(req->data)) { 852 hw_error("MMIO: bad size (%u)", req->size); 853 } 854 855 if (!req->data_is_ptr) { 856 if (req->dir == IOREQ_READ) { 857 for (i = 0; i < req->count; i++) { 858 read_phys_req_item(req->addr, req, i, &req->data); 859 } 860 } else if (req->dir == IOREQ_WRITE) { 861 for (i = 0; i < req->count; i++) { 862 write_phys_req_item(req->addr, req, i, &req->data); 863 } 864 } 865 } else { 866 uint64_t tmp; 867 868 if (req->dir == IOREQ_READ) { 869 for (i = 0; i < req->count; i++) { 870 read_phys_req_item(req->addr, req, i, &tmp); 871 write_phys_req_item(req->data, req, i, &tmp); 872 } 873 } else if (req->dir == IOREQ_WRITE) { 874 for (i = 0; i < req->count; i++) { 875 read_phys_req_item(req->data, req, i, &tmp); 876 write_phys_req_item(req->addr, req, i, &tmp); 877 } 878 } 879 } 880 } 881 882 static void regs_to_cpu(vmware_regs_t *vmport_regs, ioreq_t *req) 883 { 884 X86CPU *cpu; 885 CPUX86State *env; 886 887 cpu = X86_CPU(current_cpu); 888 env = &cpu->env; 889 env->regs[R_EAX] = req->data; 890 env->regs[R_EBX] = vmport_regs->ebx; 891 env->regs[R_ECX] = vmport_regs->ecx; 892 env->regs[R_EDX] = vmport_regs->edx; 893 env->regs[R_ESI] = vmport_regs->esi; 894 env->regs[R_EDI] = vmport_regs->edi; 895 } 896 897 static void regs_from_cpu(vmware_regs_t *vmport_regs) 898 { 899 X86CPU *cpu = X86_CPU(current_cpu); 900 CPUX86State *env = &cpu->env; 901 902 vmport_regs->ebx = env->regs[R_EBX]; 903 vmport_regs->ecx = env->regs[R_ECX]; 904 vmport_regs->edx = env->regs[R_EDX]; 905 vmport_regs->esi = env->regs[R_ESI]; 906 vmport_regs->edi = env->regs[R_EDI]; 907 } 908 909 static void handle_vmport_ioreq(XenIOState *state, ioreq_t *req) 910 { 911 vmware_regs_t *vmport_regs; 912 913 assert(state->shared_vmport_page); 914 vmport_regs = 915 &state->shared_vmport_page->vcpu_vmport_regs[state->send_vcpu]; 916 QEMU_BUILD_BUG_ON(sizeof(*req) < sizeof(*vmport_regs)); 917 918 current_cpu = state->cpu_by_vcpu_id[state->send_vcpu]; 919 regs_to_cpu(vmport_regs, req); 920 cpu_ioreq_pio(req); 921 regs_from_cpu(vmport_regs); 922 current_cpu = NULL; 923 } 924 925 static void handle_ioreq(XenIOState *state, ioreq_t *req) 926 { 927 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, 928 req->addr, req->data, req->count, req->size); 929 930 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && 931 (req->size < sizeof (target_ulong))) { 932 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; 933 } 934 935 if (req->dir == IOREQ_WRITE) 936 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, 937 req->addr, req->data, req->count, req->size); 938 939 switch (req->type) { 940 case IOREQ_TYPE_PIO: 941 cpu_ioreq_pio(req); 942 break; 943 case IOREQ_TYPE_COPY: 944 cpu_ioreq_move(req); 945 break; 946 case IOREQ_TYPE_VMWARE_PORT: 947 handle_vmport_ioreq(state, req); 948 break; 949 case IOREQ_TYPE_TIMEOFFSET: 950 break; 951 case IOREQ_TYPE_INVALIDATE: 952 xen_invalidate_map_cache(); 953 break; 954 case IOREQ_TYPE_PCI_CONFIG: { 955 uint32_t sbdf = req->addr >> 32; 956 uint32_t val; 957 958 /* Fake a write to port 0xCF8 so that 959 * the config space access will target the 960 * correct device model. 961 */ 962 val = (1u << 31) | 963 ((req->addr & 0x0f00) << 16) | 964 ((sbdf & 0xffff) << 8) | 965 (req->addr & 0xfc); 966 do_outp(0xcf8, 4, val); 967 968 /* Now issue the config space access via 969 * port 0xCFC 970 */ 971 req->addr = 0xcfc | (req->addr & 0x03); 972 cpu_ioreq_pio(req); 973 break; 974 } 975 default: 976 hw_error("Invalid ioreq type 0x%x\n", req->type); 977 } 978 if (req->dir == IOREQ_READ) { 979 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, 980 req->addr, req->data, req->count, req->size); 981 } 982 } 983 984 static int handle_buffered_iopage(XenIOState *state) 985 { 986 buffered_iopage_t *buf_page = state->buffered_io_page; 987 buf_ioreq_t *buf_req = NULL; 988 ioreq_t req; 989 int qw; 990 991 if (!buf_page) { 992 return 0; 993 } 994 995 memset(&req, 0x00, sizeof(req)); 996 req.state = STATE_IOREQ_READY; 997 req.count = 1; 998 req.dir = IOREQ_WRITE; 999 1000 for (;;) { 1001 uint32_t rdptr = buf_page->read_pointer, wrptr; 1002 1003 xen_rmb(); 1004 wrptr = buf_page->write_pointer; 1005 xen_rmb(); 1006 if (rdptr != buf_page->read_pointer) { 1007 continue; 1008 } 1009 if (rdptr == wrptr) { 1010 break; 1011 } 1012 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; 1013 req.size = 1U << buf_req->size; 1014 req.addr = buf_req->addr; 1015 req.data = buf_req->data; 1016 req.type = buf_req->type; 1017 xen_rmb(); 1018 qw = (req.size == 8); 1019 if (qw) { 1020 if (rdptr + 1 == wrptr) { 1021 hw_error("Incomplete quad word buffered ioreq"); 1022 } 1023 buf_req = &buf_page->buf_ioreq[(rdptr + 1) % 1024 IOREQ_BUFFER_SLOT_NUM]; 1025 req.data |= ((uint64_t)buf_req->data) << 32; 1026 xen_rmb(); 1027 } 1028 1029 handle_ioreq(state, &req); 1030 1031 /* Only req.data may get updated by handle_ioreq(), albeit even that 1032 * should not happen as such data would never make it to the guest (we 1033 * can only usefully see writes here after all). 1034 */ 1035 assert(req.state == STATE_IOREQ_READY); 1036 assert(req.count == 1); 1037 assert(req.dir == IOREQ_WRITE); 1038 assert(!req.data_is_ptr); 1039 1040 atomic_add(&buf_page->read_pointer, qw + 1); 1041 } 1042 1043 return req.count; 1044 } 1045 1046 static void handle_buffered_io(void *opaque) 1047 { 1048 XenIOState *state = opaque; 1049 1050 if (handle_buffered_iopage(state)) { 1051 timer_mod(state->buffered_io_timer, 1052 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 1053 } else { 1054 timer_del(state->buffered_io_timer); 1055 xenevtchn_unmask(state->xce_handle, state->bufioreq_local_port); 1056 } 1057 } 1058 1059 static void cpu_handle_ioreq(void *opaque) 1060 { 1061 XenIOState *state = opaque; 1062 ioreq_t *req = cpu_get_ioreq(state); 1063 1064 handle_buffered_iopage(state); 1065 if (req) { 1066 ioreq_t copy = *req; 1067 1068 xen_rmb(); 1069 handle_ioreq(state, ©); 1070 req->data = copy.data; 1071 1072 if (req->state != STATE_IOREQ_INPROCESS) { 1073 fprintf(stderr, "Badness in I/O request ... not in service?!: " 1074 "%x, ptr: %x, port: %"PRIx64", " 1075 "data: %"PRIx64", count: %u, size: %u, type: %u\n", 1076 req->state, req->data_is_ptr, req->addr, 1077 req->data, req->count, req->size, req->type); 1078 destroy_hvm_domain(false); 1079 return; 1080 } 1081 1082 xen_wmb(); /* Update ioreq contents /then/ update state. */ 1083 1084 /* 1085 * We do this before we send the response so that the tools 1086 * have the opportunity to pick up on the reset before the 1087 * guest resumes and does a hlt with interrupts disabled which 1088 * causes Xen to powerdown the domain. 1089 */ 1090 if (runstate_is_running()) { 1091 ShutdownCause request; 1092 1093 if (qemu_shutdown_requested_get()) { 1094 destroy_hvm_domain(false); 1095 } 1096 request = qemu_reset_requested_get(); 1097 if (request) { 1098 qemu_system_reset(request); 1099 destroy_hvm_domain(true); 1100 } 1101 } 1102 1103 req->state = STATE_IORESP_READY; 1104 xenevtchn_notify(state->xce_handle, 1105 state->ioreq_local_port[state->send_vcpu]); 1106 } 1107 } 1108 1109 static void xen_main_loop_prepare(XenIOState *state) 1110 { 1111 int evtchn_fd = -1; 1112 1113 if (state->xce_handle != NULL) { 1114 evtchn_fd = xenevtchn_fd(state->xce_handle); 1115 } 1116 1117 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, 1118 state); 1119 1120 if (evtchn_fd != -1) { 1121 CPUState *cpu_state; 1122 1123 DPRINTF("%s: Init cpu_by_vcpu_id\n", __func__); 1124 CPU_FOREACH(cpu_state) { 1125 DPRINTF("%s: cpu_by_vcpu_id[%d]=%p\n", 1126 __func__, cpu_state->cpu_index, cpu_state); 1127 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; 1128 } 1129 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); 1130 } 1131 } 1132 1133 1134 static void xen_hvm_change_state_handler(void *opaque, int running, 1135 RunState rstate) 1136 { 1137 XenIOState *state = opaque; 1138 1139 if (running) { 1140 xen_main_loop_prepare(state); 1141 } 1142 1143 xen_set_ioreq_server_state(xen_domid, 1144 state->ioservid, 1145 (rstate == RUN_STATE_RUNNING)); 1146 } 1147 1148 static void xen_exit_notifier(Notifier *n, void *data) 1149 { 1150 XenIOState *state = container_of(n, XenIOState, exit); 1151 1152 xenevtchn_close(state->xce_handle); 1153 xs_daemon_close(state->xenstore); 1154 } 1155 1156 static void xen_read_physmap(XenIOState *state) 1157 { 1158 XenPhysmap *physmap = NULL; 1159 unsigned int len, num, i; 1160 char path[80], *value = NULL; 1161 char **entries = NULL; 1162 1163 snprintf(path, sizeof(path), 1164 "/local/domain/0/device-model/%d/physmap", xen_domid); 1165 entries = xs_directory(state->xenstore, 0, path, &num); 1166 if (entries == NULL) 1167 return; 1168 1169 for (i = 0; i < num; i++) { 1170 physmap = g_malloc(sizeof (XenPhysmap)); 1171 physmap->phys_offset = strtoull(entries[i], NULL, 16); 1172 snprintf(path, sizeof(path), 1173 "/local/domain/0/device-model/%d/physmap/%s/start_addr", 1174 xen_domid, entries[i]); 1175 value = xs_read(state->xenstore, 0, path, &len); 1176 if (value == NULL) { 1177 g_free(physmap); 1178 continue; 1179 } 1180 physmap->start_addr = strtoull(value, NULL, 16); 1181 free(value); 1182 1183 snprintf(path, sizeof(path), 1184 "/local/domain/0/device-model/%d/physmap/%s/size", 1185 xen_domid, entries[i]); 1186 value = xs_read(state->xenstore, 0, path, &len); 1187 if (value == NULL) { 1188 g_free(physmap); 1189 continue; 1190 } 1191 physmap->size = strtoull(value, NULL, 16); 1192 free(value); 1193 1194 snprintf(path, sizeof(path), 1195 "/local/domain/0/device-model/%d/physmap/%s/name", 1196 xen_domid, entries[i]); 1197 physmap->name = xs_read(state->xenstore, 0, path, &len); 1198 1199 QLIST_INSERT_HEAD(&state->physmap, physmap, list); 1200 } 1201 free(entries); 1202 } 1203 1204 static void xen_wakeup_notifier(Notifier *notifier, void *data) 1205 { 1206 xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 0); 1207 } 1208 1209 void xen_hvm_init(PCMachineState *pcms, MemoryRegion **ram_memory) 1210 { 1211 int i, rc; 1212 xen_pfn_t ioreq_pfn; 1213 xen_pfn_t bufioreq_pfn; 1214 evtchn_port_t bufioreq_evtchn; 1215 XenIOState *state; 1216 1217 state = g_malloc0(sizeof (XenIOState)); 1218 1219 state->xce_handle = xenevtchn_open(NULL, 0); 1220 if (state->xce_handle == NULL) { 1221 perror("xen: event channel open"); 1222 goto err; 1223 } 1224 1225 state->xenstore = xs_daemon_open(); 1226 if (state->xenstore == NULL) { 1227 perror("xen: xenstore open"); 1228 goto err; 1229 } 1230 1231 if (xen_domid_restrict) { 1232 rc = xen_restrict(xen_domid); 1233 if (rc < 0) { 1234 error_report("failed to restrict: error %d", errno); 1235 goto err; 1236 } 1237 } 1238 1239 xen_create_ioreq_server(xen_domid, &state->ioservid); 1240 1241 state->exit.notify = xen_exit_notifier; 1242 qemu_add_exit_notifier(&state->exit); 1243 1244 state->suspend.notify = xen_suspend_notifier; 1245 qemu_register_suspend_notifier(&state->suspend); 1246 1247 state->wakeup.notify = xen_wakeup_notifier; 1248 qemu_register_wakeup_notifier(&state->wakeup); 1249 1250 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, 1251 &ioreq_pfn, &bufioreq_pfn, 1252 &bufioreq_evtchn); 1253 if (rc < 0) { 1254 error_report("failed to get ioreq server info: error %d handle=%p", 1255 errno, xen_xc); 1256 goto err; 1257 } 1258 1259 DPRINTF("shared page at pfn %lx\n", ioreq_pfn); 1260 DPRINTF("buffered io page at pfn %lx\n", bufioreq_pfn); 1261 DPRINTF("buffered io evtchn is %x\n", bufioreq_evtchn); 1262 1263 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, 1264 PROT_READ|PROT_WRITE, 1265 1, &ioreq_pfn, NULL); 1266 if (state->shared_page == NULL) { 1267 error_report("map shared IO page returned error %d handle=%p", 1268 errno, xen_xc); 1269 goto err; 1270 } 1271 1272 rc = xen_get_vmport_regs_pfn(xen_xc, xen_domid, &ioreq_pfn); 1273 if (!rc) { 1274 DPRINTF("shared vmport page at pfn %lx\n", ioreq_pfn); 1275 state->shared_vmport_page = 1276 xenforeignmemory_map(xen_fmem, xen_domid, PROT_READ|PROT_WRITE, 1277 1, &ioreq_pfn, NULL); 1278 if (state->shared_vmport_page == NULL) { 1279 error_report("map shared vmport IO page returned error %d handle=%p", 1280 errno, xen_xc); 1281 goto err; 1282 } 1283 } else if (rc != -ENOSYS) { 1284 error_report("get vmport regs pfn returned error %d, rc=%d", 1285 errno, rc); 1286 goto err; 1287 } 1288 1289 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, 1290 PROT_READ|PROT_WRITE, 1291 1, &bufioreq_pfn, NULL); 1292 if (state->buffered_io_page == NULL) { 1293 error_report("map buffered IO page returned error %d", errno); 1294 goto err; 1295 } 1296 1297 /* Note: cpus is empty at this point in init */ 1298 state->cpu_by_vcpu_id = g_malloc0(max_cpus * sizeof(CPUState *)); 1299 1300 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); 1301 if (rc < 0) { 1302 error_report("failed to enable ioreq server info: error %d handle=%p", 1303 errno, xen_xc); 1304 goto err; 1305 } 1306 1307 state->ioreq_local_port = g_malloc0(max_cpus * sizeof (evtchn_port_t)); 1308 1309 /* FIXME: how about if we overflow the page here? */ 1310 for (i = 0; i < max_cpus; i++) { 1311 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1312 xen_vcpu_eport(state->shared_page, i)); 1313 if (rc == -1) { 1314 error_report("shared evtchn %d bind error %d", i, errno); 1315 goto err; 1316 } 1317 state->ioreq_local_port[i] = rc; 1318 } 1319 1320 rc = xenevtchn_bind_interdomain(state->xce_handle, xen_domid, 1321 bufioreq_evtchn); 1322 if (rc == -1) { 1323 error_report("buffered evtchn bind error %d", errno); 1324 goto err; 1325 } 1326 state->bufioreq_local_port = rc; 1327 1328 /* Init RAM management */ 1329 xen_map_cache_init(xen_phys_offset_to_gaddr, state); 1330 xen_ram_init(pcms, ram_size, ram_memory); 1331 1332 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); 1333 1334 state->memory_listener = xen_memory_listener; 1335 QLIST_INIT(&state->physmap); 1336 memory_listener_register(&state->memory_listener, &address_space_memory); 1337 state->log_for_dirtybit = NULL; 1338 1339 state->io_listener = xen_io_listener; 1340 memory_listener_register(&state->io_listener, &address_space_io); 1341 1342 state->device_listener = xen_device_listener; 1343 device_listener_register(&state->device_listener); 1344 1345 /* Initialize backend core & drivers */ 1346 if (xen_be_init() != 0) { 1347 error_report("xen backend core setup failed"); 1348 goto err; 1349 } 1350 xen_be_register_common(); 1351 xen_read_physmap(state); 1352 1353 /* Disable ACPI build because Xen handles it */ 1354 pcms->acpi_build_enabled = false; 1355 1356 return; 1357 1358 err: 1359 error_report("xen hardware virtual machine initialisation failed"); 1360 exit(1); 1361 } 1362 1363 void destroy_hvm_domain(bool reboot) 1364 { 1365 xc_interface *xc_handle; 1366 int sts; 1367 1368 xc_handle = xc_interface_open(0, 0, 0); 1369 if (xc_handle == NULL) { 1370 fprintf(stderr, "Cannot acquire xenctrl handle\n"); 1371 } else { 1372 sts = xc_domain_shutdown(xc_handle, xen_domid, 1373 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff); 1374 if (sts != 0) { 1375 fprintf(stderr, "xc_domain_shutdown failed to issue %s, " 1376 "sts %d, %s\n", reboot ? "reboot" : "poweroff", 1377 sts, strerror(errno)); 1378 } else { 1379 fprintf(stderr, "Issued domain %d %s\n", xen_domid, 1380 reboot ? "reboot" : "poweroff"); 1381 } 1382 xc_interface_close(xc_handle); 1383 } 1384 } 1385 1386 void xen_register_framebuffer(MemoryRegion *mr) 1387 { 1388 framebuffer = mr; 1389 } 1390 1391 void xen_shutdown_fatal_error(const char *fmt, ...) 1392 { 1393 va_list ap; 1394 1395 va_start(ap, fmt); 1396 vfprintf(stderr, fmt, ap); 1397 va_end(ap); 1398 fprintf(stderr, "Will destroy the domain.\n"); 1399 /* destroy the domain */ 1400 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); 1401 } 1402 1403 void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) 1404 { 1405 if (unlikely(xen_in_migration)) { 1406 int rc; 1407 ram_addr_t start_pfn, nb_pages; 1408 1409 if (length == 0) { 1410 length = TARGET_PAGE_SIZE; 1411 } 1412 start_pfn = start >> TARGET_PAGE_BITS; 1413 nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS) 1414 - start_pfn; 1415 rc = xen_modified_memory(xen_domid, start_pfn, nb_pages); 1416 if (rc) { 1417 fprintf(stderr, 1418 "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n", 1419 __func__, start, nb_pages, rc, strerror(-rc)); 1420 } 1421 } 1422 } 1423 1424 void qmp_xen_set_global_dirty_log(bool enable, Error **errp) 1425 { 1426 if (enable) { 1427 memory_global_dirty_log_start(); 1428 } else { 1429 memory_global_dirty_log_stop(); 1430 } 1431 } 1432