1 #include "qemu/osdep.h" 2 #include "qemu/units.h" 3 #include "qapi/error.h" 4 #include "trace.h" 5 6 #include "hw/pci/pci_host.h" 7 #include "hw/xen/xen-hvm-common.h" 8 #include "hw/xen/xen-bus.h" 9 #include "hw/boards.h" 10 #include "hw/xen/arch_hvm.h" 11 12 MemoryRegion ram_memory; 13 14 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr, 15 Error **errp) 16 { 17 unsigned long nr_pfn; 18 xen_pfn_t *pfn_list; 19 int i; 20 21 if (runstate_check(RUN_STATE_INMIGRATE)) { 22 /* RAM already populated in Xen */ 23 warn_report("%s: do not alloc "RAM_ADDR_FMT 24 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE", 25 __func__, size, ram_addr); 26 return; 27 } 28 29 if (mr == &ram_memory) { 30 return; 31 } 32 33 trace_xen_ram_alloc(ram_addr, size); 34 35 nr_pfn = size >> TARGET_PAGE_BITS; 36 pfn_list = g_new(xen_pfn_t, nr_pfn); 37 38 for (i = 0; i < nr_pfn; i++) { 39 pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; 40 } 41 42 if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { 43 error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT, 44 ram_addr); 45 } 46 47 g_free(pfn_list); 48 } 49 50 static void xen_set_memory(struct MemoryListener *listener, 51 MemoryRegionSection *section, 52 bool add) 53 { 54 XenIOState *state = container_of(listener, XenIOState, memory_listener); 55 56 if (section->mr == &ram_memory) { 57 return; 58 } else { 59 if (add) { 60 xen_map_memory_section(xen_domid, state->ioservid, 61 section); 62 } else { 63 xen_unmap_memory_section(xen_domid, state->ioservid, 64 section); 65 } 66 } 67 68 arch_xen_set_memory(state, section, add); 69 } 70 71 void xen_region_add(MemoryListener *listener, 72 MemoryRegionSection *section) 73 { 74 memory_region_ref(section->mr); 75 xen_set_memory(listener, section, true); 76 } 77 78 void xen_region_del(MemoryListener *listener, 79 MemoryRegionSection *section) 80 { 81 xen_set_memory(listener, section, false); 82 memory_region_unref(section->mr); 83 } 84 85 void xen_io_add(MemoryListener *listener, 86 MemoryRegionSection *section) 87 { 88 XenIOState *state = container_of(listener, XenIOState, io_listener); 89 MemoryRegion *mr = section->mr; 90 91 if (mr->ops == &unassigned_io_ops) { 92 return; 93 } 94 95 memory_region_ref(mr); 96 97 xen_map_io_section(xen_domid, state->ioservid, section); 98 } 99 100 void xen_io_del(MemoryListener *listener, 101 MemoryRegionSection *section) 102 { 103 XenIOState *state = container_of(listener, XenIOState, io_listener); 104 MemoryRegion *mr = section->mr; 105 106 if (mr->ops == &unassigned_io_ops) { 107 return; 108 } 109 110 xen_unmap_io_section(xen_domid, state->ioservid, section); 111 112 memory_region_unref(mr); 113 } 114 115 void xen_device_realize(DeviceListener *listener, 116 DeviceState *dev) 117 { 118 XenIOState *state = container_of(listener, XenIOState, device_listener); 119 120 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 121 PCIDevice *pci_dev = PCI_DEVICE(dev); 122 XenPciDevice *xendev = g_new(XenPciDevice, 1); 123 124 xendev->pci_dev = pci_dev; 125 xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev), 126 pci_dev->devfn); 127 QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); 128 129 xen_map_pcidev(xen_domid, state->ioservid, pci_dev); 130 } 131 } 132 133 void xen_device_unrealize(DeviceListener *listener, 134 DeviceState *dev) 135 { 136 XenIOState *state = container_of(listener, XenIOState, device_listener); 137 138 if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) { 139 PCIDevice *pci_dev = PCI_DEVICE(dev); 140 XenPciDevice *xendev, *next; 141 142 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); 143 144 QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { 145 if (xendev->pci_dev == pci_dev) { 146 QLIST_REMOVE(xendev, entry); 147 g_free(xendev); 148 break; 149 } 150 } 151 } 152 } 153 154 MemoryListener xen_io_listener = { 155 .name = "xen-io", 156 .region_add = xen_io_add, 157 .region_del = xen_io_del, 158 .priority = MEMORY_LISTENER_PRIORITY_ACCEL, 159 }; 160 161 DeviceListener xen_device_listener = { 162 .realize = xen_device_realize, 163 .unrealize = xen_device_unrealize, 164 }; 165 166 /* get the ioreq packets from share mem */ 167 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) 168 { 169 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); 170 171 if (req->state != STATE_IOREQ_READY) { 172 trace_cpu_get_ioreq_from_shared_memory_req_not_ready(req->state, 173 req->data_is_ptr, 174 req->addr, 175 req->data, 176 req->count, 177 req->size); 178 return NULL; 179 } 180 181 xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */ 182 183 req->state = STATE_IOREQ_INPROCESS; 184 return req; 185 } 186 187 /* use poll to get the port notification */ 188 /* ioreq_vec--out,the */ 189 /* retval--the number of ioreq packet */ 190 static ioreq_t *cpu_get_ioreq(XenIOState *state) 191 { 192 MachineState *ms = MACHINE(qdev_get_machine()); 193 unsigned int max_cpus = ms->smp.max_cpus; 194 int i; 195 evtchn_port_t port; 196 197 port = qemu_xen_evtchn_pending(state->xce_handle); 198 if (port == state->bufioreq_local_port) { 199 timer_mod(state->buffered_io_timer, 200 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 201 return NULL; 202 } 203 204 if (port != -1) { 205 for (i = 0; i < max_cpus; i++) { 206 if (state->ioreq_local_port[i] == port) { 207 break; 208 } 209 } 210 211 if (i == max_cpus) { 212 hw_error("Fatal error while trying to get io event!\n"); 213 } 214 215 /* unmask the wanted port again */ 216 qemu_xen_evtchn_unmask(state->xce_handle, port); 217 218 /* get the io packet from shared memory */ 219 state->send_vcpu = i; 220 return cpu_get_ioreq_from_shared_memory(state, i); 221 } 222 223 /* read error or read nothing */ 224 return NULL; 225 } 226 227 static uint32_t do_inp(uint32_t addr, unsigned long size) 228 { 229 switch (size) { 230 case 1: 231 return cpu_inb(addr); 232 case 2: 233 return cpu_inw(addr); 234 case 4: 235 return cpu_inl(addr); 236 default: 237 hw_error("inp: bad size: %04x %lx", addr, size); 238 } 239 } 240 241 static void do_outp(uint32_t addr, 242 unsigned long size, uint32_t val) 243 { 244 switch (size) { 245 case 1: 246 return cpu_outb(addr, val); 247 case 2: 248 return cpu_outw(addr, val); 249 case 4: 250 return cpu_outl(addr, val); 251 default: 252 hw_error("outp: bad size: %04x %lx", addr, size); 253 } 254 } 255 256 /* 257 * Helper functions which read/write an object from/to physical guest 258 * memory, as part of the implementation of an ioreq. 259 * 260 * Equivalent to 261 * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, 262 * val, req->size, 0/1) 263 * except without the integer overflow problems. 264 */ 265 static void rw_phys_req_item(hwaddr addr, 266 ioreq_t *req, uint32_t i, void *val, int rw) 267 { 268 /* Do everything unsigned so overflow just results in a truncated result 269 * and accesses to undesired parts of guest memory, which is up 270 * to the guest */ 271 hwaddr offset = (hwaddr)req->size * i; 272 if (req->df) { 273 addr -= offset; 274 } else { 275 addr += offset; 276 } 277 cpu_physical_memory_rw(addr, val, req->size, rw); 278 } 279 280 static inline void read_phys_req_item(hwaddr addr, 281 ioreq_t *req, uint32_t i, void *val) 282 { 283 rw_phys_req_item(addr, req, i, val, 0); 284 } 285 static inline void write_phys_req_item(hwaddr addr, 286 ioreq_t *req, uint32_t i, void *val) 287 { 288 rw_phys_req_item(addr, req, i, val, 1); 289 } 290 291 292 void cpu_ioreq_pio(ioreq_t *req) 293 { 294 uint32_t i; 295 296 trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr, 297 req->data, req->count, req->size); 298 299 if (req->size > sizeof(uint32_t)) { 300 hw_error("PIO: bad size (%u)", req->size); 301 } 302 303 if (req->dir == IOREQ_READ) { 304 if (!req->data_is_ptr) { 305 req->data = do_inp(req->addr, req->size); 306 trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr, 307 req->size); 308 } else { 309 uint32_t tmp; 310 311 for (i = 0; i < req->count; i++) { 312 tmp = do_inp(req->addr, req->size); 313 write_phys_req_item(req->data, req, i, &tmp); 314 } 315 } 316 } else if (req->dir == IOREQ_WRITE) { 317 if (!req->data_is_ptr) { 318 trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr, 319 req->size); 320 do_outp(req->addr, req->size, req->data); 321 } else { 322 for (i = 0; i < req->count; i++) { 323 uint32_t tmp = 0; 324 325 read_phys_req_item(req->data, req, i, &tmp); 326 do_outp(req->addr, req->size, tmp); 327 } 328 } 329 } 330 } 331 332 static void cpu_ioreq_move(ioreq_t *req) 333 { 334 uint32_t i; 335 336 trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr, 337 req->data, req->count, req->size); 338 339 if (req->size > sizeof(req->data)) { 340 hw_error("MMIO: bad size (%u)", req->size); 341 } 342 343 if (!req->data_is_ptr) { 344 if (req->dir == IOREQ_READ) { 345 for (i = 0; i < req->count; i++) { 346 read_phys_req_item(req->addr, req, i, &req->data); 347 } 348 } else if (req->dir == IOREQ_WRITE) { 349 for (i = 0; i < req->count; i++) { 350 write_phys_req_item(req->addr, req, i, &req->data); 351 } 352 } 353 } else { 354 uint64_t tmp; 355 356 if (req->dir == IOREQ_READ) { 357 for (i = 0; i < req->count; i++) { 358 read_phys_req_item(req->addr, req, i, &tmp); 359 write_phys_req_item(req->data, req, i, &tmp); 360 } 361 } else if (req->dir == IOREQ_WRITE) { 362 for (i = 0; i < req->count; i++) { 363 read_phys_req_item(req->data, req, i, &tmp); 364 write_phys_req_item(req->addr, req, i, &tmp); 365 } 366 } 367 } 368 } 369 370 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) 371 { 372 uint32_t sbdf = req->addr >> 32; 373 uint32_t reg = req->addr; 374 XenPciDevice *xendev; 375 376 if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) && 377 req->size != sizeof(uint32_t)) { 378 hw_error("PCI config access: bad size (%u)", req->size); 379 } 380 381 if (req->count != 1) { 382 hw_error("PCI config access: bad count (%u)", req->count); 383 } 384 385 QLIST_FOREACH(xendev, &state->dev_list, entry) { 386 if (xendev->sbdf != sbdf) { 387 continue; 388 } 389 390 if (!req->data_is_ptr) { 391 if (req->dir == IOREQ_READ) { 392 req->data = pci_host_config_read_common( 393 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 394 req->size); 395 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, 396 req->size, req->data); 397 } else if (req->dir == IOREQ_WRITE) { 398 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, 399 req->size, req->data); 400 pci_host_config_write_common( 401 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 402 req->data, req->size); 403 } 404 } else { 405 uint32_t tmp; 406 407 if (req->dir == IOREQ_READ) { 408 tmp = pci_host_config_read_common( 409 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 410 req->size); 411 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg, 412 req->size, tmp); 413 write_phys_req_item(req->data, req, 0, &tmp); 414 } else if (req->dir == IOREQ_WRITE) { 415 read_phys_req_item(req->data, req, 0, &tmp); 416 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg, 417 req->size, tmp); 418 pci_host_config_write_common( 419 xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE, 420 tmp, req->size); 421 } 422 } 423 } 424 } 425 426 static void handle_ioreq(XenIOState *state, ioreq_t *req) 427 { 428 trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr, 429 req->addr, req->data, req->count, req->size); 430 431 if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) && 432 (req->size < sizeof (target_ulong))) { 433 req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; 434 } 435 436 if (req->dir == IOREQ_WRITE) 437 trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr, 438 req->addr, req->data, req->count, req->size); 439 440 switch (req->type) { 441 case IOREQ_TYPE_PIO: 442 cpu_ioreq_pio(req); 443 break; 444 case IOREQ_TYPE_COPY: 445 cpu_ioreq_move(req); 446 break; 447 case IOREQ_TYPE_TIMEOFFSET: 448 break; 449 case IOREQ_TYPE_INVALIDATE: 450 xen_invalidate_map_cache(); 451 break; 452 case IOREQ_TYPE_PCI_CONFIG: 453 cpu_ioreq_config(state, req); 454 break; 455 default: 456 arch_handle_ioreq(state, req); 457 } 458 if (req->dir == IOREQ_READ) { 459 trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr, 460 req->addr, req->data, req->count, req->size); 461 } 462 } 463 464 static bool handle_buffered_iopage(XenIOState *state) 465 { 466 buffered_iopage_t *buf_page = state->buffered_io_page; 467 buf_ioreq_t *buf_req = NULL; 468 bool handled_ioreq = false; 469 ioreq_t req; 470 int qw; 471 472 if (!buf_page) { 473 return 0; 474 } 475 476 memset(&req, 0x00, sizeof(req)); 477 req.state = STATE_IOREQ_READY; 478 req.count = 1; 479 req.dir = IOREQ_WRITE; 480 481 for (;;) { 482 uint32_t rdptr = buf_page->read_pointer, wrptr; 483 484 xen_rmb(); 485 wrptr = buf_page->write_pointer; 486 xen_rmb(); 487 if (rdptr != buf_page->read_pointer) { 488 continue; 489 } 490 if (rdptr == wrptr) { 491 break; 492 } 493 buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM]; 494 req.size = 1U << buf_req->size; 495 req.addr = buf_req->addr; 496 req.data = buf_req->data; 497 req.type = buf_req->type; 498 xen_rmb(); 499 qw = (req.size == 8); 500 if (qw) { 501 if (rdptr + 1 == wrptr) { 502 hw_error("Incomplete quad word buffered ioreq"); 503 } 504 buf_req = &buf_page->buf_ioreq[(rdptr + 1) % 505 IOREQ_BUFFER_SLOT_NUM]; 506 req.data |= ((uint64_t)buf_req->data) << 32; 507 xen_rmb(); 508 } 509 510 handle_ioreq(state, &req); 511 512 /* Only req.data may get updated by handle_ioreq(), albeit even that 513 * should not happen as such data would never make it to the guest (we 514 * can only usefully see writes here after all). 515 */ 516 assert(req.state == STATE_IOREQ_READY); 517 assert(req.count == 1); 518 assert(req.dir == IOREQ_WRITE); 519 assert(!req.data_is_ptr); 520 521 qatomic_add(&buf_page->read_pointer, qw + 1); 522 handled_ioreq = true; 523 } 524 525 return handled_ioreq; 526 } 527 528 static void handle_buffered_io(void *opaque) 529 { 530 XenIOState *state = opaque; 531 532 if (handle_buffered_iopage(state)) { 533 timer_mod(state->buffered_io_timer, 534 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME)); 535 } else { 536 timer_del(state->buffered_io_timer); 537 qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); 538 } 539 } 540 541 static void cpu_handle_ioreq(void *opaque) 542 { 543 XenIOState *state = opaque; 544 ioreq_t *req = cpu_get_ioreq(state); 545 546 handle_buffered_iopage(state); 547 if (req) { 548 ioreq_t copy = *req; 549 550 xen_rmb(); 551 handle_ioreq(state, ©); 552 req->data = copy.data; 553 554 if (req->state != STATE_IOREQ_INPROCESS) { 555 warn_report("Badness in I/O request ... not in service?!: " 556 "%x, ptr: %x, port: %"PRIx64", " 557 "data: %"PRIx64", count: %u, size: %u, type: %u", 558 req->state, req->data_is_ptr, req->addr, 559 req->data, req->count, req->size, req->type); 560 destroy_hvm_domain(false); 561 return; 562 } 563 564 xen_wmb(); /* Update ioreq contents /then/ update state. */ 565 566 /* 567 * We do this before we send the response so that the tools 568 * have the opportunity to pick up on the reset before the 569 * guest resumes and does a hlt with interrupts disabled which 570 * causes Xen to powerdown the domain. 571 */ 572 if (runstate_is_running()) { 573 ShutdownCause request; 574 575 if (qemu_shutdown_requested_get()) { 576 destroy_hvm_domain(false); 577 } 578 request = qemu_reset_requested_get(); 579 if (request) { 580 qemu_system_reset(request); 581 destroy_hvm_domain(true); 582 } 583 } 584 585 req->state = STATE_IORESP_READY; 586 qemu_xen_evtchn_notify(state->xce_handle, 587 state->ioreq_local_port[state->send_vcpu]); 588 } 589 } 590 591 static void xen_main_loop_prepare(XenIOState *state) 592 { 593 int evtchn_fd = -1; 594 595 if (state->xce_handle != NULL) { 596 evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle); 597 } 598 599 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, 600 state); 601 602 if (evtchn_fd != -1) { 603 CPUState *cpu_state; 604 605 CPU_FOREACH(cpu_state) { 606 trace_xen_main_loop_prepare_init_cpu(cpu_state->cpu_index, 607 cpu_state); 608 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; 609 } 610 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); 611 } 612 } 613 614 615 void xen_hvm_change_state_handler(void *opaque, bool running, 616 RunState rstate) 617 { 618 XenIOState *state = opaque; 619 620 if (running) { 621 xen_main_loop_prepare(state); 622 } 623 624 xen_set_ioreq_server_state(xen_domid, 625 state->ioservid, 626 running); 627 } 628 629 void xen_exit_notifier(Notifier *n, void *data) 630 { 631 XenIOState *state = container_of(n, XenIOState, exit); 632 633 xen_destroy_ioreq_server(xen_domid, state->ioservid); 634 if (state->fres != NULL) { 635 xenforeignmemory_unmap_resource(xen_fmem, state->fres); 636 } 637 638 qemu_xen_evtchn_close(state->xce_handle); 639 xs_daemon_close(state->xenstore); 640 } 641 642 static int xen_map_ioreq_server(XenIOState *state) 643 { 644 void *addr = NULL; 645 xen_pfn_t ioreq_pfn; 646 xen_pfn_t bufioreq_pfn; 647 evtchn_port_t bufioreq_evtchn; 648 int rc; 649 650 /* 651 * Attempt to map using the resource API and fall back to normal 652 * foreign mapping if this is not supported. 653 */ 654 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0); 655 QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1); 656 state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, 657 XENMEM_resource_ioreq_server, 658 state->ioservid, 0, 2, 659 &addr, 660 PROT_READ | PROT_WRITE, 0); 661 if (state->fres != NULL) { 662 trace_xen_map_resource_ioreq(state->ioservid, addr); 663 state->buffered_io_page = addr; 664 state->shared_page = addr + XC_PAGE_SIZE; 665 } else if (errno != EOPNOTSUPP) { 666 error_report("failed to map ioreq server resources: error %d handle=%p", 667 errno, xen_xc); 668 return -1; 669 } 670 671 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, 672 (state->shared_page == NULL) ? 673 &ioreq_pfn : NULL, 674 (state->buffered_io_page == NULL) ? 675 &bufioreq_pfn : NULL, 676 &bufioreq_evtchn); 677 if (rc < 0) { 678 error_report("failed to get ioreq server info: error %d handle=%p", 679 errno, xen_xc); 680 return rc; 681 } 682 683 if (state->shared_page == NULL) { 684 trace_xen_map_ioreq_server_shared_page(ioreq_pfn); 685 686 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, 687 PROT_READ | PROT_WRITE, 688 1, &ioreq_pfn, NULL); 689 if (state->shared_page == NULL) { 690 error_report("map shared IO page returned error %d handle=%p", 691 errno, xen_xc); 692 } 693 } 694 695 if (state->buffered_io_page == NULL) { 696 trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn); 697 698 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, 699 PROT_READ | PROT_WRITE, 700 1, &bufioreq_pfn, 701 NULL); 702 if (state->buffered_io_page == NULL) { 703 error_report("map buffered IO page returned error %d", errno); 704 return -1; 705 } 706 } 707 708 if (state->shared_page == NULL || state->buffered_io_page == NULL) { 709 return -1; 710 } 711 712 trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn); 713 714 state->bufioreq_remote_port = bufioreq_evtchn; 715 716 return 0; 717 } 718 719 void destroy_hvm_domain(bool reboot) 720 { 721 xc_interface *xc_handle; 722 int sts; 723 int rc; 724 725 unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff; 726 727 if (xen_dmod) { 728 rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason); 729 if (!rc) { 730 return; 731 } 732 if (errno != ENOTTY /* old Xen */) { 733 error_report("xendevicemodel_shutdown failed with error %d", errno); 734 } 735 /* well, try the old thing then */ 736 } 737 738 xc_handle = xc_interface_open(0, 0, 0); 739 if (xc_handle == NULL) { 740 trace_destroy_hvm_domain_cannot_acquire_handle(); 741 } else { 742 sts = xc_domain_shutdown(xc_handle, xen_domid, reason); 743 if (sts != 0) { 744 trace_destroy_hvm_domain_failed_action( 745 reboot ? "reboot" : "poweroff", sts, strerror(errno) 746 ); 747 } else { 748 trace_destroy_hvm_domain_action( 749 xen_domid, reboot ? "reboot" : "poweroff" 750 ); 751 } 752 xc_interface_close(xc_handle); 753 } 754 } 755 756 void xen_shutdown_fatal_error(const char *fmt, ...) 757 { 758 va_list ap; 759 760 va_start(ap, fmt); 761 error_vreport(fmt, ap); 762 va_end(ap); 763 error_report("Will destroy the domain."); 764 /* destroy the domain */ 765 qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR); 766 } 767 768 static void xen_do_ioreq_register(XenIOState *state, 769 unsigned int max_cpus, 770 const MemoryListener *xen_memory_listener) 771 { 772 int i, rc; 773 774 state->exit.notify = xen_exit_notifier; 775 qemu_add_exit_notifier(&state->exit); 776 777 /* 778 * Register wake-up support in QMP query-current-machine API 779 */ 780 qemu_register_wakeup_support(); 781 782 rc = xen_map_ioreq_server(state); 783 if (rc < 0) { 784 goto err; 785 } 786 787 /* Note: cpus is empty at this point in init */ 788 state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus); 789 790 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); 791 if (rc < 0) { 792 error_report("failed to enable ioreq server info: error %d handle=%p", 793 errno, xen_xc); 794 goto err; 795 } 796 797 state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus); 798 799 /* FIXME: how about if we overflow the page here? */ 800 for (i = 0; i < max_cpus; i++) { 801 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, 802 xen_vcpu_eport(state->shared_page, 803 i)); 804 if (rc == -1) { 805 error_report("shared evtchn %d bind error %d", i, errno); 806 goto err; 807 } 808 state->ioreq_local_port[i] = rc; 809 } 810 811 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, 812 state->bufioreq_remote_port); 813 if (rc == -1) { 814 error_report("buffered evtchn bind error %d", errno); 815 goto err; 816 } 817 state->bufioreq_local_port = rc; 818 819 /* Init RAM management */ 820 #ifdef XEN_COMPAT_PHYSMAP 821 xen_map_cache_init(xen_phys_offset_to_gaddr, state); 822 #else 823 xen_map_cache_init(NULL, state); 824 #endif 825 826 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); 827 828 state->memory_listener = *xen_memory_listener; 829 memory_listener_register(&state->memory_listener, &address_space_memory); 830 831 state->io_listener = xen_io_listener; 832 memory_listener_register(&state->io_listener, &address_space_io); 833 834 state->device_listener = xen_device_listener; 835 QLIST_INIT(&state->dev_list); 836 device_listener_register(&state->device_listener); 837 838 return; 839 840 err: 841 error_report("xen hardware virtual machine initialisation failed"); 842 exit(1); 843 } 844 845 void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, 846 const MemoryListener *xen_memory_listener) 847 { 848 int rc; 849 850 setup_xen_backend_ops(); 851 852 state->xce_handle = qemu_xen_evtchn_open(); 853 if (state->xce_handle == NULL) { 854 error_report("xen: event channel open failed with error %d", errno); 855 goto err; 856 } 857 858 state->xenstore = xs_daemon_open(); 859 if (state->xenstore == NULL) { 860 error_report("xen: xenstore open failed with error %d", errno); 861 goto err; 862 } 863 864 rc = xen_create_ioreq_server(xen_domid, &state->ioservid); 865 if (!rc) { 866 xen_do_ioreq_register(state, max_cpus, xen_memory_listener); 867 } else { 868 warn_report("xen: failed to create ioreq server"); 869 } 870 871 xen_bus_init(); 872 873 xen_be_init(); 874 875 return; 876 877 err: 878 error_report("xen hardware virtual machine backend registration failed"); 879 exit(1); 880 } 881