xref: /openbmc/qemu/hw/xen/xen-hvm-common.c (revision f21f0cbc)
1 #include "qemu/osdep.h"
2 #include "qemu/units.h"
3 #include "qapi/error.h"
4 #include "exec/target_page.h"
5 #include "trace.h"
6 
7 #include "hw/pci/pci_host.h"
8 #include "hw/xen/xen-hvm-common.h"
9 #include "hw/xen/xen-bus.h"
10 #include "hw/boards.h"
11 #include "hw/xen/arch_hvm.h"
12 
13 MemoryRegion xen_memory;
14 
15 /* Check for xen memory.  */
16 bool xen_mr_is_memory(MemoryRegion *mr)
17 {
18     return mr == &xen_memory;
19 }
20 
21 void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr,
22                    Error **errp)
23 {
24     unsigned target_page_bits = qemu_target_page_bits();
25     unsigned long nr_pfn;
26     xen_pfn_t *pfn_list;
27     int i;
28 
29     if (runstate_check(RUN_STATE_INMIGRATE)) {
30         /* RAM already populated in Xen */
31         warn_report("%s: do not alloc "RAM_ADDR_FMT
32                 " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE",
33                 __func__, size, ram_addr);
34         return;
35     }
36 
37     if (xen_mr_is_memory(mr)) {
38         return;
39     }
40 
41     trace_xen_ram_alloc(ram_addr, size);
42 
43     nr_pfn = size >> target_page_bits;
44     pfn_list = g_new(xen_pfn_t, nr_pfn);
45 
46     for (i = 0; i < nr_pfn; i++) {
47         pfn_list[i] = (ram_addr >> target_page_bits) + i;
48     }
49 
50     if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
51         error_setg(errp, "xen: failed to populate ram at " RAM_ADDR_FMT,
52                    ram_addr);
53     }
54 
55     g_free(pfn_list);
56 }
57 
58 static void xen_set_memory(struct MemoryListener *listener,
59                            MemoryRegionSection *section,
60                            bool add)
61 {
62     XenIOState *state = container_of(listener, XenIOState, memory_listener);
63 
64     if (xen_mr_is_memory(section->mr)) {
65         return;
66     } else {
67         if (add) {
68             xen_map_memory_section(xen_domid, state->ioservid,
69                                    section);
70         } else {
71             xen_unmap_memory_section(xen_domid, state->ioservid,
72                                      section);
73         }
74     }
75 
76     arch_xen_set_memory(state, section, add);
77 }
78 
79 void xen_region_add(MemoryListener *listener,
80                            MemoryRegionSection *section)
81 {
82     memory_region_ref(section->mr);
83     xen_set_memory(listener, section, true);
84 }
85 
86 void xen_region_del(MemoryListener *listener,
87                            MemoryRegionSection *section)
88 {
89     xen_set_memory(listener, section, false);
90     memory_region_unref(section->mr);
91 }
92 
93 void xen_io_add(MemoryListener *listener,
94                        MemoryRegionSection *section)
95 {
96     XenIOState *state = container_of(listener, XenIOState, io_listener);
97     MemoryRegion *mr = section->mr;
98 
99     if (mr->ops == &unassigned_io_ops) {
100         return;
101     }
102 
103     memory_region_ref(mr);
104 
105     xen_map_io_section(xen_domid, state->ioservid, section);
106 }
107 
108 void xen_io_del(MemoryListener *listener,
109                        MemoryRegionSection *section)
110 {
111     XenIOState *state = container_of(listener, XenIOState, io_listener);
112     MemoryRegion *mr = section->mr;
113 
114     if (mr->ops == &unassigned_io_ops) {
115         return;
116     }
117 
118     xen_unmap_io_section(xen_domid, state->ioservid, section);
119 
120     memory_region_unref(mr);
121 }
122 
123 void xen_device_realize(DeviceListener *listener,
124                                DeviceState *dev)
125 {
126     XenIOState *state = container_of(listener, XenIOState, device_listener);
127 
128     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
129         PCIDevice *pci_dev = PCI_DEVICE(dev);
130         XenPciDevice *xendev = g_new(XenPciDevice, 1);
131 
132         xendev->pci_dev = pci_dev;
133         xendev->sbdf = PCI_BUILD_BDF(pci_dev_bus_num(pci_dev),
134                                      pci_dev->devfn);
135         QLIST_INSERT_HEAD(&state->dev_list, xendev, entry);
136 
137         xen_map_pcidev(xen_domid, state->ioservid, pci_dev);
138     }
139 }
140 
141 void xen_device_unrealize(DeviceListener *listener,
142                                  DeviceState *dev)
143 {
144     XenIOState *state = container_of(listener, XenIOState, device_listener);
145 
146     if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE)) {
147         PCIDevice *pci_dev = PCI_DEVICE(dev);
148         XenPciDevice *xendev, *next;
149 
150         xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev);
151 
152         QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) {
153             if (xendev->pci_dev == pci_dev) {
154                 QLIST_REMOVE(xendev, entry);
155                 g_free(xendev);
156                 break;
157             }
158         }
159     }
160 }
161 
162 MemoryListener xen_io_listener = {
163     .name = "xen-io",
164     .region_add = xen_io_add,
165     .region_del = xen_io_del,
166     .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
167 };
168 
169 DeviceListener xen_device_listener = {
170     .realize = xen_device_realize,
171     .unrealize = xen_device_unrealize,
172 };
173 
174 /* get the ioreq packets from share mem */
175 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
176 {
177     ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
178 
179     if (req->state != STATE_IOREQ_READY) {
180         trace_cpu_get_ioreq_from_shared_memory_req_not_ready(req->state,
181                                                              req->data_is_ptr,
182                                                              req->addr,
183                                                              req->data,
184                                                              req->count,
185                                                              req->size);
186         return NULL;
187     }
188 
189     xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
190 
191     req->state = STATE_IOREQ_INPROCESS;
192     return req;
193 }
194 
195 /* use poll to get the port notification */
196 /* ioreq_vec--out,the */
197 /* retval--the number of ioreq packet */
198 static ioreq_t *cpu_get_ioreq(XenIOState *state)
199 {
200     MachineState *ms = MACHINE(qdev_get_machine());
201     unsigned int max_cpus = ms->smp.max_cpus;
202     int i;
203     evtchn_port_t port;
204 
205     port = qemu_xen_evtchn_pending(state->xce_handle);
206     if (port == state->bufioreq_local_port) {
207         timer_mod(state->buffered_io_timer,
208                 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
209         return NULL;
210     }
211 
212     if (port != -1) {
213         for (i = 0; i < max_cpus; i++) {
214             if (state->ioreq_local_port[i] == port) {
215                 break;
216             }
217         }
218 
219         if (i == max_cpus) {
220             hw_error("Fatal error while trying to get io event!\n");
221         }
222 
223         /* unmask the wanted port again */
224         qemu_xen_evtchn_unmask(state->xce_handle, port);
225 
226         /* get the io packet from shared memory */
227         state->send_vcpu = i;
228         return cpu_get_ioreq_from_shared_memory(state, i);
229     }
230 
231     /* read error or read nothing */
232     return NULL;
233 }
234 
235 static uint32_t do_inp(uint32_t addr, unsigned long size)
236 {
237     switch (size) {
238         case 1:
239             return cpu_inb(addr);
240         case 2:
241             return cpu_inw(addr);
242         case 4:
243             return cpu_inl(addr);
244         default:
245             hw_error("inp: bad size: %04x %lx", addr, size);
246     }
247 }
248 
249 static void do_outp(uint32_t addr,
250         unsigned long size, uint32_t val)
251 {
252     switch (size) {
253         case 1:
254             return cpu_outb(addr, val);
255         case 2:
256             return cpu_outw(addr, val);
257         case 4:
258             return cpu_outl(addr, val);
259         default:
260             hw_error("outp: bad size: %04x %lx", addr, size);
261     }
262 }
263 
264 /*
265  * Helper functions which read/write an object from/to physical guest
266  * memory, as part of the implementation of an ioreq.
267  *
268  * Equivalent to
269  *   cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i,
270  *                          val, req->size, 0/1)
271  * except without the integer overflow problems.
272  */
273 static void rw_phys_req_item(hwaddr addr,
274                              ioreq_t *req, uint32_t i, void *val, int rw)
275 {
276     /* Do everything unsigned so overflow just results in a truncated result
277      * and accesses to undesired parts of guest memory, which is up
278      * to the guest */
279     hwaddr offset = (hwaddr)req->size * i;
280     if (req->df) {
281         addr -= offset;
282     } else {
283         addr += offset;
284     }
285     cpu_physical_memory_rw(addr, val, req->size, rw);
286 }
287 
288 static inline void read_phys_req_item(hwaddr addr,
289                                       ioreq_t *req, uint32_t i, void *val)
290 {
291     rw_phys_req_item(addr, req, i, val, 0);
292 }
293 static inline void write_phys_req_item(hwaddr addr,
294                                        ioreq_t *req, uint32_t i, void *val)
295 {
296     rw_phys_req_item(addr, req, i, val, 1);
297 }
298 
299 
300 void cpu_ioreq_pio(ioreq_t *req)
301 {
302     uint32_t i;
303 
304     trace_cpu_ioreq_pio(req, req->dir, req->df, req->data_is_ptr, req->addr,
305                          req->data, req->count, req->size);
306 
307     if (req->size > sizeof(uint32_t)) {
308         hw_error("PIO: bad size (%u)", req->size);
309     }
310 
311     if (req->dir == IOREQ_READ) {
312         if (!req->data_is_ptr) {
313             req->data = do_inp(req->addr, req->size);
314             trace_cpu_ioreq_pio_read_reg(req, req->data, req->addr,
315                                          req->size);
316         } else {
317             uint32_t tmp;
318 
319             for (i = 0; i < req->count; i++) {
320                 tmp = do_inp(req->addr, req->size);
321                 write_phys_req_item(req->data, req, i, &tmp);
322             }
323         }
324     } else if (req->dir == IOREQ_WRITE) {
325         if (!req->data_is_ptr) {
326             trace_cpu_ioreq_pio_write_reg(req, req->data, req->addr,
327                                           req->size);
328             do_outp(req->addr, req->size, req->data);
329         } else {
330             for (i = 0; i < req->count; i++) {
331                 uint32_t tmp = 0;
332 
333                 read_phys_req_item(req->data, req, i, &tmp);
334                 do_outp(req->addr, req->size, tmp);
335             }
336         }
337     }
338 }
339 
340 static void cpu_ioreq_move(ioreq_t *req)
341 {
342     uint32_t i;
343 
344     trace_cpu_ioreq_move(req, req->dir, req->df, req->data_is_ptr, req->addr,
345                          req->data, req->count, req->size);
346 
347     if (req->size > sizeof(req->data)) {
348         hw_error("MMIO: bad size (%u)", req->size);
349     }
350 
351     if (!req->data_is_ptr) {
352         if (req->dir == IOREQ_READ) {
353             for (i = 0; i < req->count; i++) {
354                 read_phys_req_item(req->addr, req, i, &req->data);
355             }
356         } else if (req->dir == IOREQ_WRITE) {
357             for (i = 0; i < req->count; i++) {
358                 write_phys_req_item(req->addr, req, i, &req->data);
359             }
360         }
361     } else {
362         uint64_t tmp;
363 
364         if (req->dir == IOREQ_READ) {
365             for (i = 0; i < req->count; i++) {
366                 read_phys_req_item(req->addr, req, i, &tmp);
367                 write_phys_req_item(req->data, req, i, &tmp);
368             }
369         } else if (req->dir == IOREQ_WRITE) {
370             for (i = 0; i < req->count; i++) {
371                 read_phys_req_item(req->data, req, i, &tmp);
372                 write_phys_req_item(req->addr, req, i, &tmp);
373             }
374         }
375     }
376 }
377 
378 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req)
379 {
380     uint32_t sbdf = req->addr >> 32;
381     uint32_t reg = req->addr;
382     XenPciDevice *xendev;
383 
384     if (req->size != sizeof(uint8_t) && req->size != sizeof(uint16_t) &&
385         req->size != sizeof(uint32_t)) {
386         hw_error("PCI config access: bad size (%u)", req->size);
387     }
388 
389     if (req->count != 1) {
390         hw_error("PCI config access: bad count (%u)", req->count);
391     }
392 
393     QLIST_FOREACH(xendev, &state->dev_list, entry) {
394         if (xendev->sbdf != sbdf) {
395             continue;
396         }
397 
398         if (!req->data_is_ptr) {
399             if (req->dir == IOREQ_READ) {
400                 req->data = pci_host_config_read_common(
401                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
402                     req->size);
403                 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
404                                             req->size, req->data);
405             } else if (req->dir == IOREQ_WRITE) {
406                 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
407                                              req->size, req->data);
408                 pci_host_config_write_common(
409                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
410                     req->data, req->size);
411             }
412         } else {
413             uint32_t tmp;
414 
415             if (req->dir == IOREQ_READ) {
416                 tmp = pci_host_config_read_common(
417                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
418                     req->size);
419                 trace_cpu_ioreq_config_read(req, xendev->sbdf, reg,
420                                             req->size, tmp);
421                 write_phys_req_item(req->data, req, 0, &tmp);
422             } else if (req->dir == IOREQ_WRITE) {
423                 read_phys_req_item(req->data, req, 0, &tmp);
424                 trace_cpu_ioreq_config_write(req, xendev->sbdf, reg,
425                                              req->size, tmp);
426                 pci_host_config_write_common(
427                     xendev->pci_dev, reg, PCI_CONFIG_SPACE_SIZE,
428                     tmp, req->size);
429             }
430         }
431     }
432 }
433 
434 static void handle_ioreq(XenIOState *state, ioreq_t *req)
435 {
436     trace_handle_ioreq(req, req->type, req->dir, req->df, req->data_is_ptr,
437                        req->addr, req->data, req->count, req->size);
438 
439     if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
440             (req->size < sizeof (target_ulong))) {
441         req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
442     }
443 
444     if (req->dir == IOREQ_WRITE)
445         trace_handle_ioreq_write(req, req->type, req->df, req->data_is_ptr,
446                                  req->addr, req->data, req->count, req->size);
447 
448     switch (req->type) {
449         case IOREQ_TYPE_PIO:
450             cpu_ioreq_pio(req);
451             break;
452         case IOREQ_TYPE_COPY:
453             cpu_ioreq_move(req);
454             break;
455         case IOREQ_TYPE_TIMEOFFSET:
456             break;
457         case IOREQ_TYPE_INVALIDATE:
458             xen_invalidate_map_cache();
459             break;
460         case IOREQ_TYPE_PCI_CONFIG:
461             cpu_ioreq_config(state, req);
462             break;
463         default:
464             arch_handle_ioreq(state, req);
465     }
466     if (req->dir == IOREQ_READ) {
467         trace_handle_ioreq_read(req, req->type, req->df, req->data_is_ptr,
468                                 req->addr, req->data, req->count, req->size);
469     }
470 }
471 
472 static bool handle_buffered_iopage(XenIOState *state)
473 {
474     buffered_iopage_t *buf_page = state->buffered_io_page;
475     buf_ioreq_t *buf_req = NULL;
476     bool handled_ioreq = false;
477     ioreq_t req;
478     int qw;
479 
480     if (!buf_page) {
481         return 0;
482     }
483 
484     memset(&req, 0x00, sizeof(req));
485     req.state = STATE_IOREQ_READY;
486     req.count = 1;
487     req.dir = IOREQ_WRITE;
488 
489     for (;;) {
490         uint32_t rdptr = buf_page->read_pointer, wrptr;
491 
492         xen_rmb();
493         wrptr = buf_page->write_pointer;
494         xen_rmb();
495         if (rdptr != buf_page->read_pointer) {
496             continue;
497         }
498         if (rdptr == wrptr) {
499             break;
500         }
501         buf_req = &buf_page->buf_ioreq[rdptr % IOREQ_BUFFER_SLOT_NUM];
502         req.size = 1U << buf_req->size;
503         req.addr = buf_req->addr;
504         req.data = buf_req->data;
505         req.type = buf_req->type;
506         xen_rmb();
507         qw = (req.size == 8);
508         if (qw) {
509             if (rdptr + 1 == wrptr) {
510                 hw_error("Incomplete quad word buffered ioreq");
511             }
512             buf_req = &buf_page->buf_ioreq[(rdptr + 1) %
513                                            IOREQ_BUFFER_SLOT_NUM];
514             req.data |= ((uint64_t)buf_req->data) << 32;
515             xen_rmb();
516         }
517 
518         handle_ioreq(state, &req);
519 
520         /* Only req.data may get updated by handle_ioreq(), albeit even that
521          * should not happen as such data would never make it to the guest (we
522          * can only usefully see writes here after all).
523          */
524         assert(req.state == STATE_IOREQ_READY);
525         assert(req.count == 1);
526         assert(req.dir == IOREQ_WRITE);
527         assert(!req.data_is_ptr);
528 
529         qatomic_add(&buf_page->read_pointer, qw + 1);
530         handled_ioreq = true;
531     }
532 
533     return handled_ioreq;
534 }
535 
536 static void handle_buffered_io(void *opaque)
537 {
538     XenIOState *state = opaque;
539 
540     if (handle_buffered_iopage(state)) {
541         timer_mod(state->buffered_io_timer,
542                 BUFFER_IO_MAX_DELAY + qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
543     } else {
544         timer_del(state->buffered_io_timer);
545         qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
546     }
547 }
548 
549 static void cpu_handle_ioreq(void *opaque)
550 {
551     XenIOState *state = opaque;
552     ioreq_t *req = cpu_get_ioreq(state);
553 
554     handle_buffered_iopage(state);
555     if (req) {
556         ioreq_t copy = *req;
557 
558         xen_rmb();
559         handle_ioreq(state, &copy);
560         req->data = copy.data;
561 
562         if (req->state != STATE_IOREQ_INPROCESS) {
563             warn_report("Badness in I/O request ... not in service?!: "
564                     "%x, ptr: %x, port: %"PRIx64", "
565                     "data: %"PRIx64", count: %u, size: %u, type: %u",
566                     req->state, req->data_is_ptr, req->addr,
567                     req->data, req->count, req->size, req->type);
568             destroy_hvm_domain(false);
569             return;
570         }
571 
572         xen_wmb(); /* Update ioreq contents /then/ update state. */
573 
574         /*
575          * We do this before we send the response so that the tools
576          * have the opportunity to pick up on the reset before the
577          * guest resumes and does a hlt with interrupts disabled which
578          * causes Xen to powerdown the domain.
579          */
580         if (runstate_is_running()) {
581             ShutdownCause request;
582 
583             if (qemu_shutdown_requested_get()) {
584                 destroy_hvm_domain(false);
585             }
586             request = qemu_reset_requested_get();
587             if (request) {
588                 qemu_system_reset(request);
589                 destroy_hvm_domain(true);
590             }
591         }
592 
593         req->state = STATE_IORESP_READY;
594         qemu_xen_evtchn_notify(state->xce_handle,
595                                state->ioreq_local_port[state->send_vcpu]);
596     }
597 }
598 
599 static void xen_main_loop_prepare(XenIOState *state)
600 {
601     int evtchn_fd = -1;
602 
603     if (state->xce_handle != NULL) {
604         evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle);
605     }
606 
607     state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io,
608                                                  state);
609 
610     if (evtchn_fd != -1) {
611         CPUState *cpu_state;
612 
613         CPU_FOREACH(cpu_state) {
614             trace_xen_main_loop_prepare_init_cpu(cpu_state->cpu_index,
615                                                  cpu_state);
616             state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state;
617         }
618         qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
619     }
620 }
621 
622 
623 void xen_hvm_change_state_handler(void *opaque, bool running,
624                                          RunState rstate)
625 {
626     XenIOState *state = opaque;
627 
628     if (running) {
629         xen_main_loop_prepare(state);
630     }
631 
632     xen_set_ioreq_server_state(xen_domid,
633                                state->ioservid,
634                                running);
635 }
636 
637 void xen_exit_notifier(Notifier *n, void *data)
638 {
639     XenIOState *state = container_of(n, XenIOState, exit);
640 
641     xen_destroy_ioreq_server(xen_domid, state->ioservid);
642     if (state->fres != NULL) {
643         xenforeignmemory_unmap_resource(xen_fmem, state->fres);
644     }
645 
646     qemu_xen_evtchn_close(state->xce_handle);
647     xs_daemon_close(state->xenstore);
648 }
649 
650 static int xen_map_ioreq_server(XenIOState *state)
651 {
652     void *addr = NULL;
653     xen_pfn_t ioreq_pfn;
654     xen_pfn_t bufioreq_pfn;
655     evtchn_port_t bufioreq_evtchn;
656     int rc;
657 
658     /*
659      * Attempt to map using the resource API and fall back to normal
660      * foreign mapping if this is not supported.
661      */
662     QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_bufioreq != 0);
663     QEMU_BUILD_BUG_ON(XENMEM_resource_ioreq_server_frame_ioreq(0) != 1);
664     state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid,
665                                          XENMEM_resource_ioreq_server,
666                                          state->ioservid, 0, 2,
667                                          &addr,
668                                          PROT_READ | PROT_WRITE, 0);
669     if (state->fres != NULL) {
670         trace_xen_map_resource_ioreq(state->ioservid, addr);
671         state->buffered_io_page = addr;
672         state->shared_page = addr + XC_PAGE_SIZE;
673     } else if (errno != EOPNOTSUPP) {
674         error_report("failed to map ioreq server resources: error %d handle=%p",
675                      errno, xen_xc);
676         return -1;
677     }
678 
679     rc = xen_get_ioreq_server_info(xen_domid, state->ioservid,
680                                    (state->shared_page == NULL) ?
681                                    &ioreq_pfn : NULL,
682                                    (state->buffered_io_page == NULL) ?
683                                    &bufioreq_pfn : NULL,
684                                    &bufioreq_evtchn);
685     if (rc < 0) {
686         error_report("failed to get ioreq server info: error %d handle=%p",
687                      errno, xen_xc);
688         return rc;
689     }
690 
691     if (state->shared_page == NULL) {
692         trace_xen_map_ioreq_server_shared_page(ioreq_pfn);
693 
694         state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid,
695                                                   PROT_READ | PROT_WRITE,
696                                                   1, &ioreq_pfn, NULL);
697         if (state->shared_page == NULL) {
698             error_report("map shared IO page returned error %d handle=%p",
699                          errno, xen_xc);
700         }
701     }
702 
703     if (state->buffered_io_page == NULL) {
704         trace_xen_map_ioreq_server_buffered_io_page(bufioreq_pfn);
705 
706         state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid,
707                                                        PROT_READ | PROT_WRITE,
708                                                        1, &bufioreq_pfn,
709                                                        NULL);
710         if (state->buffered_io_page == NULL) {
711             error_report("map buffered IO page returned error %d", errno);
712             return -1;
713         }
714     }
715 
716     if (state->shared_page == NULL || state->buffered_io_page == NULL) {
717         return -1;
718     }
719 
720     trace_xen_map_ioreq_server_buffered_io_evtchn(bufioreq_evtchn);
721 
722     state->bufioreq_remote_port = bufioreq_evtchn;
723 
724     return 0;
725 }
726 
727 void destroy_hvm_domain(bool reboot)
728 {
729     xc_interface *xc_handle;
730     int sts;
731     int rc;
732 
733     unsigned int reason = reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff;
734 
735     if (xen_dmod) {
736         rc = xendevicemodel_shutdown(xen_dmod, xen_domid, reason);
737         if (!rc) {
738             return;
739         }
740         if (errno != ENOTTY /* old Xen */) {
741             error_report("xendevicemodel_shutdown failed with error %d", errno);
742         }
743         /* well, try the old thing then */
744     }
745 
746     xc_handle = xc_interface_open(0, 0, 0);
747     if (xc_handle == NULL) {
748         trace_destroy_hvm_domain_cannot_acquire_handle();
749     } else {
750         sts = xc_domain_shutdown(xc_handle, xen_domid, reason);
751         if (sts != 0) {
752             trace_destroy_hvm_domain_failed_action(
753                 reboot ? "reboot" : "poweroff", sts, strerror(errno)
754             );
755         } else {
756             trace_destroy_hvm_domain_action(
757                 xen_domid, reboot ? "reboot" : "poweroff"
758             );
759         }
760         xc_interface_close(xc_handle);
761     }
762 }
763 
764 void xen_shutdown_fatal_error(const char *fmt, ...)
765 {
766     va_list ap;
767 
768     va_start(ap, fmt);
769     error_vreport(fmt, ap);
770     va_end(ap);
771     error_report("Will destroy the domain.");
772     /* destroy the domain */
773     qemu_system_shutdown_request(SHUTDOWN_CAUSE_HOST_ERROR);
774 }
775 
776 static void xen_do_ioreq_register(XenIOState *state,
777                                   unsigned int max_cpus,
778                                   const MemoryListener *xen_memory_listener)
779 {
780     int i, rc;
781 
782     state->exit.notify = xen_exit_notifier;
783     qemu_add_exit_notifier(&state->exit);
784 
785     /*
786      * Register wake-up support in QMP query-current-machine API
787      */
788     qemu_register_wakeup_support();
789 
790     rc = xen_map_ioreq_server(state);
791     if (rc < 0) {
792         goto err;
793     }
794 
795     /* Note: cpus is empty at this point in init */
796     state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus);
797 
798     rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true);
799     if (rc < 0) {
800         error_report("failed to enable ioreq server info: error %d handle=%p",
801                      errno, xen_xc);
802         goto err;
803     }
804 
805     state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus);
806 
807     /* FIXME: how about if we overflow the page here? */
808     for (i = 0; i < max_cpus; i++) {
809         rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
810                                               xen_vcpu_eport(state->shared_page,
811                                                              i));
812         if (rc == -1) {
813             error_report("shared evtchn %d bind error %d", i, errno);
814             goto err;
815         }
816         state->ioreq_local_port[i] = rc;
817     }
818 
819     rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid,
820                                           state->bufioreq_remote_port);
821     if (rc == -1) {
822         error_report("buffered evtchn bind error %d", errno);
823         goto err;
824     }
825     state->bufioreq_local_port = rc;
826 
827     /* Init RAM management */
828 #ifdef XEN_COMPAT_PHYSMAP
829     xen_map_cache_init(xen_phys_offset_to_gaddr, state);
830 #else
831     xen_map_cache_init(NULL, state);
832 #endif
833 
834     qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
835 
836     state->memory_listener = *xen_memory_listener;
837     memory_listener_register(&state->memory_listener, &address_space_memory);
838 
839     state->io_listener = xen_io_listener;
840     memory_listener_register(&state->io_listener, &address_space_io);
841 
842     state->device_listener = xen_device_listener;
843     QLIST_INIT(&state->dev_list);
844     device_listener_register(&state->device_listener);
845 
846     return;
847 
848 err:
849     error_report("xen hardware virtual machine initialisation failed");
850     exit(1);
851 }
852 
853 void xen_register_ioreq(XenIOState *state, unsigned int max_cpus,
854                         const MemoryListener *xen_memory_listener)
855 {
856     int rc;
857 
858     setup_xen_backend_ops();
859 
860     state->xce_handle = qemu_xen_evtchn_open();
861     if (state->xce_handle == NULL) {
862         error_report("xen: event channel open failed with error %d", errno);
863         goto err;
864     }
865 
866     state->xenstore = xs_daemon_open();
867     if (state->xenstore == NULL) {
868         error_report("xen: xenstore open failed with error %d", errno);
869         goto err;
870     }
871 
872     rc = xen_create_ioreq_server(xen_domid, &state->ioservid);
873     if (!rc) {
874         xen_do_ioreq_register(state, max_cpus, xen_memory_listener);
875     } else {
876         warn_report("xen: failed to create ioreq server");
877     }
878 
879     xen_bus_init();
880 
881     return;
882 
883 err:
884     error_report("xen hardware virtual machine backend registration failed");
885     exit(1);
886 }
887